define <4 x float> @pair_sum_v4f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x float> %2, <4 x float> %3) {
; SSSE3-SLOW-LABEL: pair_sum_v4f32_v4f32:
; SSSE3-SLOW: # %bb.0:
-; SSSE3-SLOW-NEXT: haddps %xmm1, %xmm0
-; SSSE3-SLOW-NEXT: movaps %xmm0, %xmm1
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,3]
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3,1,3]
-; SSSE3-SLOW-NEXT: addps %xmm1, %xmm0
+; SSSE3-SLOW-NEXT: haddps %xmm0, %xmm0
+; SSSE3-SLOW-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSSE3-SLOW-NEXT: addps %xmm4, %xmm0
+; SSSE3-SLOW-NEXT: haddps %xmm1, %xmm1
+; SSSE3-SLOW-NEXT: movshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
+; SSSE3-SLOW-NEXT: addps %xmm1, %xmm4
+; SSSE3-SLOW-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; SSSE3-SLOW-NEXT: haddps %xmm2, %xmm2
; SSSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
-; SSSE3-SLOW-NEXT: movddup {{.*#+}} xmm2 = xmm2[0,0]
-; SSSE3-SLOW-NEXT: addps %xmm1, %xmm2
+; SSSE3-SLOW-NEXT: addps %xmm2, %xmm1
; SSSE3-SLOW-NEXT: haddps %xmm3, %xmm3
-; SSSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3]
-; SSSE3-SLOW-NEXT: addps %xmm3, %xmm1
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
+; SSSE3-SLOW-NEXT: movshdup {{.*#+}} xmm2 = xmm3[1,1,3,3]
+; SSSE3-SLOW-NEXT: addps %xmm3, %xmm2
+; SSSE3-SLOW-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0]
+; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0]
; SSSE3-SLOW-NEXT: retq
;
; SSSE3-FAST-LABEL: pair_sum_v4f32_v4f32:
;
; AVX1-SLOW-LABEL: pair_sum_v4f32_v4f32:
; AVX1-SLOW: # %bb.0:
-; AVX1-SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vhaddps %xmm0, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX1-SLOW-NEXT: vaddps %xmm4, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vhaddps %xmm1, %xmm1, %xmm1
+; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
+; AVX1-SLOW-NEXT: vaddps %xmm4, %xmm1, %xmm1
+; AVX1-SLOW-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX1-SLOW-NEXT: vhaddps %xmm2, %xmm2, %xmm1
-; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm0[1,3],xmm1[1,1]
-; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,1]
-; AVX1-SLOW-NEXT: vaddps %xmm2, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-SLOW-NEXT: vaddps %xmm2, %xmm1, %xmm1
+; AVX1-SLOW-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX1-SLOW-NEXT: vhaddps %xmm3, %xmm3, %xmm1
; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-SLOW-NEXT: vaddps %xmm2, %xmm1, %xmm1
;
; AVX2-SLOW-LABEL: pair_sum_v4f32_v4f32:
; AVX2-SLOW: # %bb.0:
-; AVX2-SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vhaddps %xmm0, %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX2-SLOW-NEXT: vaddps %xmm4, %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vhaddps %xmm1, %xmm1, %xmm1
+; AVX2-SLOW-NEXT: vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
+; AVX2-SLOW-NEXT: vaddps %xmm4, %xmm1, %xmm1
+; AVX2-SLOW-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX2-SLOW-NEXT: vhaddps %xmm2, %xmm2, %xmm1
-; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm0[1,3],xmm1[1,1]
+; AVX2-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX2-SLOW-NEXT: vaddps %xmm2, %xmm1, %xmm1
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,3]
-; AVX2-SLOW-NEXT: vaddps %xmm2, %xmm0, %xmm0
; AVX2-SLOW-NEXT: vhaddps %xmm3, %xmm3, %xmm1
; AVX2-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-SLOW-NEXT: vaddps %xmm2, %xmm1, %xmm1
define <4 x i32> @pair_sum_v4i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, <4 x i32> %3) {
; SSSE3-SLOW-LABEL: pair_sum_v4i32_v4i32:
; SSSE3-SLOW: # %bb.0:
-; SSSE3-SLOW-NEXT: phaddd %xmm1, %xmm0
-; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
-; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,1,3]
-; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm0
+; SSSE3-SLOW-NEXT: phaddd %xmm0, %xmm0
+; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,1,1]
+; SSSE3-SLOW-NEXT: paddd %xmm4, %xmm0
+; SSSE3-SLOW-NEXT: phaddd %xmm1, %xmm1
+; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,1,1]
+; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm4
+; SSSE3-SLOW-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; SSSE3-SLOW-NEXT: phaddd %xmm2, %xmm2
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
-; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,1]
-; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm2
+; SSSE3-SLOW-NEXT: paddd %xmm2, %xmm1
; SSSE3-SLOW-NEXT: phaddd %xmm3, %xmm3
-; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,1,1]
-; SSSE3-SLOW-NEXT: paddd %xmm3, %xmm1
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3]
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
+; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,1,1]
+; SSSE3-SLOW-NEXT: paddd %xmm3, %xmm2
+; SSSE3-SLOW-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
+; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0]
; SSSE3-SLOW-NEXT: retq
;
; SSSE3-FAST-LABEL: pair_sum_v4i32_v4i32:
;
; AVX1-SLOW-LABEL: pair_sum_v4i32_v4i32:
; AVX1-SLOW: # %bb.0:
-; AVX1-SLOW-NEXT: vphaddd %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,1,3]
-; AVX1-SLOW-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; AVX1-SLOW-NEXT: vphaddd %xmm0, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,1,1]
+; AVX1-SLOW-NEXT: vpaddd %xmm4, %xmm0, %xmm0
+; AVX1-SLOW-NEXT: vphaddd %xmm1, %xmm1, %xmm1
+; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[1,1,1,1]
+; AVX1-SLOW-NEXT: vpaddd %xmm4, %xmm1, %xmm1
+; AVX1-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX1-SLOW-NEXT: vphaddd %xmm2, %xmm2, %xmm1
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
; AVX1-SLOW-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX1-SLOW-NEXT: vphaddd %xmm3, %xmm3, %xmm1
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX1-SLOW-NEXT: vpaddd %xmm2, %xmm1, %xmm1
+; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX1-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
; AVX1-SLOW-NEXT: retq
;
;
; AVX2-SLOW-LABEL: pair_sum_v4i32_v4i32:
; AVX2-SLOW: # %bb.0:
-; AVX2-SLOW-NEXT: vphaddd %xmm1, %xmm0, %xmm0
-; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
-; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm4[0]
-; AVX2-SLOW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vphaddd %xmm0, %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,1,1]
+; AVX2-SLOW-NEXT: vpaddd %xmm4, %xmm0, %xmm0
+; AVX2-SLOW-NEXT: vphaddd %xmm1, %xmm1, %xmm1
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[1,1,1,1]
+; AVX2-SLOW-NEXT: vpaddd %xmm4, %xmm1, %xmm1
+; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX2-SLOW-NEXT: vphaddd %xmm2, %xmm2, %xmm1
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
; AVX2-SLOW-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,3]
; AVX2-SLOW-NEXT: vphaddd %xmm3, %xmm3, %xmm1
-; AVX2-SLOW-NEXT: vpbroadcastq %xmm1, %xmm2
-; AVX2-SLOW-NEXT: vpbroadcastd %xmm1, %xmm1
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
; AVX2-SLOW-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; AVX2-SLOW-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
+; AVX2-SLOW-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
; AVX2-SLOW-NEXT: retq
%5 = shufflevector <4 x i32> %0, <4 x i32> poison, <2 x i32> <i32 0, i32 2>
%6 = shufflevector <4 x i32> %0, <4 x i32> poison, <2 x i32> <i32 1, i32 3>
define <8 x float> @pair_sum_v8f32_v4f32(<4 x float> %0, <4 x float> %1, <4 x float> %2, <4 x float> %3, <4 x float> %4, <4 x float> %5, <4 x float> %6, <4 x float> %7) {
; SSSE3-SLOW-LABEL: pair_sum_v8f32_v4f32:
; SSSE3-SLOW: # %bb.0:
-; SSSE3-SLOW-NEXT: haddps %xmm1, %xmm0
-; SSSE3-SLOW-NEXT: movaps %xmm0, %xmm1
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[1,3]
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3,1,3]
-; SSSE3-SLOW-NEXT: addps %xmm1, %xmm0
+; SSSE3-SLOW-NEXT: movaps %xmm3, %xmm8
+; SSSE3-SLOW-NEXT: haddps %xmm0, %xmm0
+; SSSE3-SLOW-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; SSSE3-SLOW-NEXT: addps %xmm3, %xmm0
+; SSSE3-SLOW-NEXT: haddps %xmm1, %xmm1
+; SSSE3-SLOW-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; SSSE3-SLOW-NEXT: addps %xmm1, %xmm3
+; SSSE3-SLOW-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; SSSE3-SLOW-NEXT: movaps %xmm2, %xmm1
-; SSSE3-SLOW-NEXT: haddps %xmm3, %xmm1
-; SSSE3-SLOW-NEXT: haddps %xmm2, %xmm3
+; SSSE3-SLOW-NEXT: haddps %xmm8, %xmm1
+; SSSE3-SLOW-NEXT: haddps %xmm2, %xmm8
; SSSE3-SLOW-NEXT: haddps %xmm4, %xmm5
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[2,0]
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm5[3,1]
-; SSSE3-SLOW-NEXT: addps %xmm1, %xmm3
-; SSSE3-SLOW-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm3[0]
+; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm8 = xmm8[3,1],xmm5[3,1]
+; SSSE3-SLOW-NEXT: addps %xmm1, %xmm8
+; SSSE3-SLOW-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm8[0]
; SSSE3-SLOW-NEXT: haddps %xmm6, %xmm6
; SSSE3-SLOW-NEXT: haddps %xmm7, %xmm7
; SSSE3-SLOW-NEXT: haddps %xmm7, %xmm6
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,3],xmm6[0,2]
-; SSSE3-SLOW-NEXT: movaps %xmm3, %xmm1
+; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,3],xmm6[0,2]
+; SSSE3-SLOW-NEXT: movaps %xmm8, %xmm1
; SSSE3-SLOW-NEXT: retq
;
; SSSE3-FAST-LABEL: pair_sum_v8f32_v4f32:
;
; AVX1-SLOW-LABEL: pair_sum_v8f32_v4f32:
; AVX1-SLOW: # %bb.0:
-; AVX1-SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm8 = xmm0[0,2,1,3]
-; AVX1-SLOW-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX1-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[0,1]
-; AVX1-SLOW-NEXT: vaddps %xmm0, %xmm8, %xmm0
+; AVX1-SLOW-NEXT: vhaddps %xmm0, %xmm0, %xmm8
+; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm0 = xmm8[1,1,3,3]
+; AVX1-SLOW-NEXT: vaddps %xmm0, %xmm8, %xmm8
+; AVX1-SLOW-NEXT: vhaddps %xmm1, %xmm1, %xmm1
+; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; AVX1-SLOW-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; AVX1-SLOW-NEXT: vunpcklps {{.*#+}} xmm0 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
; AVX1-SLOW-NEXT: vhaddps %xmm3, %xmm2, %xmm1
; AVX1-SLOW-NEXT: vhaddps %xmm4, %xmm4, %xmm2
; AVX1-SLOW-NEXT: vhaddps %xmm5, %xmm5, %xmm3
;
; AVX2-SLOW-LABEL: pair_sum_v8f32_v4f32:
; AVX2-SLOW: # %bb.0:
-; AVX2-SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm0
-; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm8 = xmm0[0,2,1,3]
-; AVX2-SLOW-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[0,1]
-; AVX2-SLOW-NEXT: vaddps %xmm0, %xmm8, %xmm0
+; AVX2-SLOW-NEXT: vhaddps %xmm0, %xmm0, %xmm8
+; AVX2-SLOW-NEXT: vmovshdup {{.*#+}} xmm0 = xmm8[1,1,3,3]
+; AVX2-SLOW-NEXT: vaddps %xmm0, %xmm8, %xmm8
+; AVX2-SLOW-NEXT: vhaddps %xmm1, %xmm1, %xmm1
+; AVX2-SLOW-NEXT: vmovshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
+; AVX2-SLOW-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; AVX2-SLOW-NEXT: vunpcklps {{.*#+}} xmm0 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
; AVX2-SLOW-NEXT: vhaddps %xmm4, %xmm4, %xmm1
; AVX2-SLOW-NEXT: vhaddps %xmm5, %xmm5, %xmm4
; AVX2-SLOW-NEXT: vhaddps %xmm3, %xmm2, %xmm2
define <8 x i32> @pair_sum_v8i32_v4i32(<4 x i32> %0, <4 x i32> %1, <4 x i32> %2, <4 x i32> %3, <4 x i32> %4, <4 x i32> %5, <4 x i32> %6, <4 x i32> %7) {
; SSSE3-SLOW-LABEL: pair_sum_v8i32_v4i32:
; SSSE3-SLOW: # %bb.0:
-; SSSE3-SLOW-NEXT: phaddd %xmm1, %xmm0
-; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
-; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,1,3]
-; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm0
-; SSSE3-SLOW-NEXT: phaddd %xmm3, %xmm2
+; SSSE3-SLOW-NEXT: movdqa %xmm2, %xmm8
+; SSSE3-SLOW-NEXT: phaddd %xmm0, %xmm0
+; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,1,1]
+; SSSE3-SLOW-NEXT: paddd %xmm2, %xmm0
+; SSSE3-SLOW-NEXT: phaddd %xmm1, %xmm1
+; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
+; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm2
+; SSSE3-SLOW-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; SSSE3-SLOW-NEXT: phaddd %xmm3, %xmm8
; SSSE3-SLOW-NEXT: phaddd %xmm4, %xmm5
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm5[2,3,2,3]
-; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm3 = xmm5[0,1,0,1]
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm1[1,1]
-; SSSE3-SLOW-NEXT: movdqa %xmm2, %xmm1
+; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm2 = xmm5[0,1,0,1]
+; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1]
+; SSSE3-SLOW-NEXT: movdqa %xmm8, %xmm1
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[2,0]
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[2,0]
-; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm2
-; SSSE3-SLOW-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,3],xmm2[2,0]
+; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm8
+; SSSE3-SLOW-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm8[0]
; SSSE3-SLOW-NEXT: phaddd %xmm6, %xmm6
; SSSE3-SLOW-NEXT: phaddd %xmm7, %xmm7
; SSSE3-SLOW-NEXT: phaddd %xmm7, %xmm6
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,3],xmm6[0,2]
-; SSSE3-SLOW-NEXT: movaps %xmm2, %xmm1
+; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,3],xmm6[0,2]
+; SSSE3-SLOW-NEXT: movaps %xmm8, %xmm1
; SSSE3-SLOW-NEXT: retq
;
; SSSE3-FAST-LABEL: pair_sum_v8i32_v4i32:
;
; AVX1-SLOW-LABEL: pair_sum_v8i32_v4i32:
; AVX1-SLOW: # %bb.0:
-; AVX1-SLOW-NEXT: vphaddd %xmm1, %xmm0, %xmm0
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,1,1]
+; AVX1-SLOW-NEXT: vphaddd %xmm0, %xmm0, %xmm8
+; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm8[1,1,1,1]
+; AVX1-SLOW-NEXT: vpaddd %xmm0, %xmm8, %xmm8
+; AVX1-SLOW-NEXT: vphaddd %xmm1, %xmm1, %xmm1
+; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
; AVX1-SLOW-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; AVX1-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
; AVX1-SLOW-NEXT: vphaddd %xmm3, %xmm2, %xmm1
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,2,1,3]
; AVX1-SLOW-NEXT: vphaddd %xmm4, %xmm4, %xmm3
;
; AVX2-SLOW-LABEL: pair_sum_v8i32_v4i32:
; AVX2-SLOW: # %bb.0:
-; AVX2-SLOW-NEXT: vphaddd %xmm1, %xmm0, %xmm0
-; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,2,1,3]
-; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,1,1]
+; AVX2-SLOW-NEXT: vphaddd %xmm0, %xmm0, %xmm8
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm8[1,1,1,1]
+; AVX2-SLOW-NEXT: vpaddd %xmm0, %xmm8, %xmm8
+; AVX2-SLOW-NEXT: vphaddd %xmm1, %xmm1, %xmm1
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
; AVX2-SLOW-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm8[0],xmm0[0],xmm8[1],xmm0[1]
; AVX2-SLOW-NEXT: vphaddd %xmm4, %xmm4, %xmm1
; AVX2-SLOW-NEXT: vphaddd %xmm5, %xmm5, %xmm4
; AVX2-SLOW-NEXT: vphaddd %xmm3, %xmm2, %xmm2
; SSSE3-SLOW-LABEL: sequential_sum_v4f32_v4f32:
; SSSE3-SLOW: # %bb.0:
; SSSE3-SLOW-NEXT: movaps %xmm0, %xmm4
-; SSSE3-SLOW-NEXT: haddps %xmm1, %xmm4
; SSSE3-SLOW-NEXT: movaps %xmm0, %xmm5
-; SSSE3-SLOW-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
-; SSSE3-SLOW-NEXT: movshdup {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSSE3-SLOW-NEXT: addps %xmm2, %xmm0
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm0[0,1]
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm2[2,3]
-; SSSE3-SLOW-NEXT: addps %xmm4, %xmm5
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[3,3]
-; SSSE3-SLOW-NEXT: addps %xmm5, %xmm1
-; SSSE3-SLOW-NEXT: movshdup {{.*#+}} xmm0 = xmm3[1,1,3,3]
-; SSSE3-SLOW-NEXT: addps %xmm3, %xmm0
-; SSSE3-SLOW-NEXT: movaps %xmm3, %xmm2
-; SSSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm3[1]
-; SSSE3-SLOW-NEXT: addps %xmm0, %xmm2
+; SSSE3-SLOW-NEXT: haddps %xmm1, %xmm5
+; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2,2,3]
+; SSSE3-SLOW-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-SLOW-NEXT: addps %xmm5, %xmm0
+; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm4[3,3]
+; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0,2,3]
+; SSSE3-SLOW-NEXT: addps %xmm1, %xmm0
+; SSSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
+; SSSE3-SLOW-NEXT: addps %xmm2, %xmm1
+; SSSE3-SLOW-NEXT: movaps %xmm2, %xmm4
+; SSSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm2[1]
+; SSSE3-SLOW-NEXT: addps %xmm1, %xmm4
+; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3,3,3]
+; SSSE3-SLOW-NEXT: addps %xmm4, %xmm2
+; SSSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3]
+; SSSE3-SLOW-NEXT: addps %xmm3, %xmm1
+; SSSE3-SLOW-NEXT: movaps %xmm3, %xmm4
+; SSSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
+; SSSE3-SLOW-NEXT: addps %xmm1, %xmm4
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3,3,3]
-; SSSE3-SLOW-NEXT: addps %xmm2, %xmm3
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[2,3]
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,0]
-; SSSE3-SLOW-NEXT: movaps %xmm1, %xmm0
+; SSSE3-SLOW-NEXT: addps %xmm4, %xmm3
+; SSSE3-SLOW-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm2[0]
+; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,0]
; SSSE3-SLOW-NEXT: retq
;
; SSSE3-FAST-LABEL: sequential_sum_v4f32_v4f32:
; SSSE3-FAST: # %bb.0:
; SSSE3-FAST-NEXT: movaps %xmm0, %xmm4
-; SSSE3-FAST-NEXT: haddps %xmm1, %xmm4
; SSSE3-FAST-NEXT: movaps %xmm0, %xmm5
-; SSSE3-FAST-NEXT: unpckhps {{.*#+}} xmm5 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
-; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm0[3,3]
-; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm2[2,3]
-; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[3,3]
-; SSSE3-FAST-NEXT: haddps %xmm2, %xmm2
-; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm2[0,1]
-; SSSE3-FAST-NEXT: addps %xmm4, %xmm5
-; SSSE3-FAST-NEXT: addps %xmm5, %xmm1
-; SSSE3-FAST-NEXT: movaps %xmm3, %xmm0
-; SSSE3-FAST-NEXT: haddps %xmm3, %xmm0
-; SSSE3-FAST-NEXT: movaps %xmm3, %xmm2
-; SSSE3-FAST-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm3[1]
-; SSSE3-FAST-NEXT: addps %xmm0, %xmm2
+; SSSE3-FAST-NEXT: haddps %xmm1, %xmm5
+; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2,2,3]
+; SSSE3-FAST-NEXT: unpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; SSSE3-FAST-NEXT: addps %xmm5, %xmm0
+; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm4[3,3]
+; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0,2,3]
+; SSSE3-FAST-NEXT: addps %xmm1, %xmm0
+; SSSE3-FAST-NEXT: movaps %xmm2, %xmm1
+; SSSE3-FAST-NEXT: haddps %xmm2, %xmm1
+; SSSE3-FAST-NEXT: movaps %xmm2, %xmm4
+; SSSE3-FAST-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm2[1]
+; SSSE3-FAST-NEXT: addps %xmm1, %xmm4
+; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,3,3,3]
+; SSSE3-FAST-NEXT: addps %xmm4, %xmm2
+; SSSE3-FAST-NEXT: movaps %xmm3, %xmm1
+; SSSE3-FAST-NEXT: haddps %xmm3, %xmm1
+; SSSE3-FAST-NEXT: movaps %xmm3, %xmm4
+; SSSE3-FAST-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm3[1]
+; SSSE3-FAST-NEXT: addps %xmm1, %xmm4
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,3,3,3]
-; SSSE3-FAST-NEXT: addps %xmm2, %xmm3
-; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[2,3]
-; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,0]
-; SSSE3-FAST-NEXT: movaps %xmm1, %xmm0
+; SSSE3-FAST-NEXT: addps %xmm4, %xmm3
+; SSSE3-FAST-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm2[0]
+; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,0]
; SSSE3-FAST-NEXT: retq
;
; AVX-SLOW-LABEL: sequential_sum_v4f32_v4f32:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vhaddps %xmm1, %xmm0, %xmm4
+; AVX-SLOW-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[0,2,2,3]
; AVX-SLOW-NEXT: vunpckhps {{.*#+}} xmm5 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX-SLOW-NEXT: vaddps %xmm4, %xmm5, %xmm4
; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-SLOW-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[3],xmm1[1],zero,zero
+; AVX-SLOW-NEXT: vaddps %xmm4, %xmm0, %xmm0
; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
; AVX-SLOW-NEXT: vaddps %xmm2, %xmm1, %xmm1
-; AVX-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm4[0,2],xmm1[0,1]
-; AVX-SLOW-NEXT: vblendps {{.*#+}} xmm4 = xmm5[0,1],xmm2[2,3]
+; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm2[1,0]
; AVX-SLOW-NEXT: vaddps %xmm1, %xmm4, %xmm1
-; AVX-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[3,3]
-; AVX-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; AVX-SLOW-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,3,3,3]
+; AVX-SLOW-NEXT: vaddps %xmm1, %xmm2, %xmm1
+; AVX-SLOW-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm3[1,1,3,3]
; AVX-SLOW-NEXT: vaddps %xmm3, %xmm1, %xmm1
; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm3[1,0]
; AVX-FAST-LABEL: sequential_sum_v4f32_v4f32:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhaddps %xmm1, %xmm0, %xmm4
+; AVX-FAST-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[0,2,2,3]
; AVX-FAST-NEXT: vunpckhps {{.*#+}} xmm5 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; AVX-FAST-NEXT: vaddps %xmm4, %xmm5, %xmm4
; AVX-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-FAST-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[3],xmm1[1],zero,zero
+; AVX-FAST-NEXT: vaddps %xmm4, %xmm0, %xmm0
; AVX-FAST-NEXT: vhaddps %xmm2, %xmm2, %xmm1
-; AVX-FAST-NEXT: vshufps {{.*#+}} xmm1 = xmm4[0,2],xmm1[0,1]
-; AVX-FAST-NEXT: vblendps {{.*#+}} xmm4 = xmm5[0,1],xmm2[2,3]
+; AVX-FAST-NEXT: vpermilpd {{.*#+}} xmm4 = xmm2[1,0]
; AVX-FAST-NEXT: vaddps %xmm1, %xmm4, %xmm1
-; AVX-FAST-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[3,3]
-; AVX-FAST-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; AVX-FAST-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,3,3,3]
+; AVX-FAST-NEXT: vaddps %xmm1, %xmm2, %xmm1
+; AVX-FAST-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-FAST-NEXT: vhaddps %xmm3, %xmm3, %xmm1
; AVX-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm3[1,0]
; AVX-FAST-NEXT: vaddps %xmm1, %xmm2, %xmm1
; SSSE3-SLOW-NEXT: paddd %xmm4, %xmm1
; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm0
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
-; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm4 = xmm2[3,3,3,3]
-; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,1,0,1]
-; SSSE3-SLOW-NEXT: paddd %xmm2, %xmm5
-; SSSE3-SLOW-NEXT: paddd %xmm4, %xmm5
-; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm5
+; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm4 = xmm2[2,3,2,3]
+; SSSE3-SLOW-NEXT: paddd %xmm2, %xmm4
+; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
+; SSSE3-SLOW-NEXT: paddd %xmm4, %xmm2
+; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm2
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm3[1,1,1,1]
-; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
-; SSSE3-SLOW-NEXT: paddd %xmm3, %xmm2
+; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm4 = xmm3[2,3,2,3]
+; SSSE3-SLOW-NEXT: paddd %xmm3, %xmm4
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
-; SSSE3-SLOW-NEXT: paddd %xmm2, %xmm3
+; SSSE3-SLOW-NEXT: paddd %xmm4, %xmm3
; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm3
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm5[2,3]
+; SSSE3-SLOW-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm2[0]
; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,0]
; SSSE3-SLOW-NEXT: retq
;
; SSSE3-FAST-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; SSSE3-FAST-NEXT: paddd %xmm4, %xmm1
; SSSE3-FAST-NEXT: paddd %xmm1, %xmm0
-; SSSE3-FAST-NEXT: movdqa %xmm2, %xmm1
-; SSSE3-FAST-NEXT: phaddd %xmm2, %xmm1
+; SSSE3-FAST-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
; SSSE3-FAST-NEXT: pshufd {{.*#+}} xmm4 = xmm2[3,3,3,3]
-; SSSE3-FAST-NEXT: paddd %xmm1, %xmm4
+; SSSE3-FAST-NEXT: phaddd %xmm2, %xmm2
; SSSE3-FAST-NEXT: paddd %xmm2, %xmm4
+; SSSE3-FAST-NEXT: paddd %xmm1, %xmm4
; SSSE3-FAST-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
; SSSE3-FAST-NEXT: pshufd {{.*#+}} xmm2 = xmm3[3,3,3,3]
; SSSE3-FAST-NEXT: phaddd %xmm3, %xmm3
; SSSE3-FAST-NEXT: paddd %xmm3, %xmm2
; SSSE3-FAST-NEXT: paddd %xmm1, %xmm2
-; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3]
+; SSSE3-FAST-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm4[0]
; SSSE3-FAST-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0]
; SSSE3-FAST-NEXT: retq
;
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
; AVX1-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
+; AVX1-SLOW-NEXT: vpaddd %xmm0, %xmm4, %xmm0
+; AVX1-SLOW-NEXT: vpaddd %xmm0, %xmm5, %xmm0
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
+; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[2,3,2,3]
+; AVX1-SLOW-NEXT: vpaddd %xmm4, %xmm2, %xmm4
+; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
+; AVX1-SLOW-NEXT: vpaddd %xmm2, %xmm4, %xmm2
; AVX1-SLOW-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm4[0],xmm1[0]
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[3,3,3,3]
-; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; AVX1-SLOW-NEXT: vpaddd %xmm0, %xmm1, %xmm0
-; AVX1-SLOW-NEXT: vpblendw {{.*#+}} xmm1 = xmm5[0,1,2,3],xmm2[4,5,6,7]
-; AVX1-SLOW-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; AVX1-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm3[1,1,1,1]
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[0,0,0,0]
-; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[2,2,2,2]
-; AVX1-SLOW-NEXT: vpaddd %xmm4, %xmm2, %xmm2
+; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
+; AVX1-SLOW-NEXT: vpaddd %xmm2, %xmm3, %xmm2
+; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
; AVX1-SLOW-NEXT: vpaddd %xmm3, %xmm2, %xmm2
; AVX1-SLOW-NEXT: vpaddd %xmm2, %xmm1, %xmm1
+; AVX1-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX1-SLOW-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
+; AVX1-FAST-NEXT: vpaddd %xmm0, %xmm4, %xmm0
+; AVX1-FAST-NEXT: vpaddd %xmm0, %xmm5, %xmm0
; AVX1-FAST-NEXT: vphaddd %xmm2, %xmm2, %xmm1
-; AVX1-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm4[0],xmm1[0]
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[3,3,3,3]
-; AVX1-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; AVX1-FAST-NEXT: vpaddd %xmm0, %xmm1, %xmm0
-; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm1 = xmm5[0,1,2,3],xmm2[4,5,6,7]
-; AVX1-FAST-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[2,3,2,3]
+; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
+; AVX1-FAST-NEXT: vpaddd %xmm2, %xmm1, %xmm1
+; AVX1-FAST-NEXT: vpaddd %xmm1, %xmm4, %xmm1
+; AVX1-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX1-FAST-NEXT: vphaddd %xmm3, %xmm3, %xmm1
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,2,2,2]
-; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
+; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
; AVX1-FAST-NEXT: vpaddd %xmm3, %xmm1, %xmm1
; AVX1-FAST-NEXT: vpaddd %xmm1, %xmm2, %xmm1
+; AVX1-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
; AVX1-FAST-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
; AVX1-FAST-NEXT: retq
;
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX2-SLOW-NEXT: vpaddd %xmm0, %xmm4, %xmm0
+; AVX2-SLOW-NEXT: vpaddd %xmm0, %xmm5, %xmm0
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[1,1,1,1]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[2,3,2,3]
+; AVX2-SLOW-NEXT: vpaddd %xmm4, %xmm2, %xmm4
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
+; AVX2-SLOW-NEXT: vpaddd %xmm2, %xmm4, %xmm2
; AVX2-SLOW-NEXT: vpaddd %xmm2, %xmm1, %xmm1
-; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm4[0],xmm1[0]
-; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[3,3,3,3]
-; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; AVX2-SLOW-NEXT: vpaddd %xmm0, %xmm1, %xmm0
-; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm1 = xmm5[0,1],xmm2[2,3]
-; AVX2-SLOW-NEXT: vpaddd %xmm0, %xmm1, %xmm0
-; AVX2-SLOW-NEXT: vpbroadcastq %xmm3, %xmm1
-; AVX2-SLOW-NEXT: vpbroadcastd %xmm3, %xmm2
-; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[2,2,2,2]
-; AVX2-SLOW-NEXT: vpaddd %xmm4, %xmm2, %xmm2
+; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm3[1,1,1,1]
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
+; AVX2-SLOW-NEXT: vpaddd %xmm2, %xmm3, %xmm2
+; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
; AVX2-SLOW-NEXT: vpaddd %xmm3, %xmm2, %xmm2
; AVX2-SLOW-NEXT: vpaddd %xmm2, %xmm1, %xmm1
+; AVX2-SLOW-NEXT: vpbroadcastd %xmm1, %xmm1
; AVX2-SLOW-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3]
+; AVX2-FAST-NEXT: vpaddd %xmm0, %xmm4, %xmm0
+; AVX2-FAST-NEXT: vpaddd %xmm0, %xmm5, %xmm0
; AVX2-FAST-NEXT: vphaddd %xmm2, %xmm2, %xmm1
-; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm4[0],xmm1[0]
-; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[3,3,3,3]
-; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm4[0]
-; AVX2-FAST-NEXT: vpaddd %xmm0, %xmm1, %xmm0
-; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm1 = xmm5[0,1],xmm2[2,3]
-; AVX2-FAST-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[2,3,2,3]
+; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[3,3,3,3]
+; AVX2-FAST-NEXT: vpaddd %xmm2, %xmm1, %xmm1
+; AVX2-FAST-NEXT: vpaddd %xmm1, %xmm4, %xmm1
+; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX2-FAST-NEXT: vphaddd %xmm3, %xmm3, %xmm1
-; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,2,2,2]
-; AVX2-FAST-NEXT: vpbroadcastd %xmm1, %xmm1
+; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
+; AVX2-FAST-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[3,3,3,3]
; AVX2-FAST-NEXT: vpaddd %xmm3, %xmm1, %xmm1
; AVX2-FAST-NEXT: vpaddd %xmm1, %xmm2, %xmm1
+; AVX2-FAST-NEXT: vpbroadcastd %xmm1, %xmm1
; AVX2-FAST-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
; AVX2-FAST-NEXT: retq
%5 = shufflevector <4 x i32> %0, <4 x i32> %1, <2 x i32> <i32 0, i32 4>
; SSSE3-SLOW-LABEL: reduction_sum_v4f32_v4f32_reassoc:
; SSSE3-SLOW: # %bb.0:
; SSSE3-SLOW-NEXT: movaps %xmm0, %xmm4
-; SSSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm0[1]
-; SSSE3-SLOW-NEXT: addps %xmm0, %xmm4
-; SSSE3-SLOW-NEXT: movshdup {{.*#+}} xmm0 = xmm4[1,1,3,3]
-; SSSE3-SLOW-NEXT: movaps %xmm1, %xmm5
-; SSSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm1[1]
-; SSSE3-SLOW-NEXT: addps %xmm1, %xmm5
-; SSSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm5[1,1,3,3]
+; SSSE3-SLOW-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; SSSE3-SLOW-NEXT: addps %xmm4, %xmm0
+; SSSE3-SLOW-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSSE3-SLOW-NEXT: addps %xmm4, %xmm0
+; SSSE3-SLOW-NEXT: movaps %xmm1, %xmm4
+; SSSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm1[1]
+; SSSE3-SLOW-NEXT: addps %xmm1, %xmm4
+; SSSE3-SLOW-NEXT: movshdup {{.*#+}} xmm1 = xmm4[1,1,3,3]
+; SSSE3-SLOW-NEXT: addps %xmm4, %xmm1
; SSSE3-SLOW-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-SLOW-NEXT: movaps %xmm2, %xmm1
; SSSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm2[1]
; SSSE3-SLOW-NEXT: addps %xmm2, %xmm1
-; SSSE3-SLOW-NEXT: movaps %xmm3, %xmm2
-; SSSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm3[1]
-; SSSE3-SLOW-NEXT: addps %xmm3, %xmm2
-; SSSE3-SLOW-NEXT: movaps %xmm2, %xmm3
-; SSSE3-SLOW-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm1[0]
-; SSSE3-SLOW-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm3[2,0]
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1]
-; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0]
-; SSSE3-SLOW-NEXT: addps %xmm0, %xmm4
-; SSSE3-SLOW-NEXT: movaps %xmm4, %xmm0
+; SSSE3-SLOW-NEXT: movshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; SSSE3-SLOW-NEXT: addps %xmm1, %xmm2
+; SSSE3-SLOW-NEXT: movaps %xmm3, %xmm1
+; SSSE3-SLOW-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1]
+; SSSE3-SLOW-NEXT: addps %xmm3, %xmm1
+; SSSE3-SLOW-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; SSSE3-SLOW-NEXT: addps %xmm1, %xmm3
+; SSSE3-SLOW-NEXT: movlhps {{.*#+}} xmm3 = xmm3[0],xmm2[0]
+; SSSE3-SLOW-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,0]
; SSSE3-SLOW-NEXT: retq
;
; SSSE3-FAST-LABEL: reduction_sum_v4f32_v4f32_reassoc:
; AVX-SLOW: # %bb.0:
; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm0[1,0]
; AVX-SLOW-NEXT: vaddps %xmm4, %xmm0, %xmm0
+; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX-SLOW-NEXT: vaddps %xmm4, %xmm0, %xmm0
; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
; AVX-SLOW-NEXT: vaddps %xmm4, %xmm1, %xmm1
-; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm2[1,0]
-; AVX-SLOW-NEXT: vaddps %xmm4, %xmm2, %xmm2
-; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm3[1,0]
-; AVX-SLOW-NEXT: vaddps %xmm4, %xmm3, %xmm3
-; AVX-SLOW-NEXT: vshufps {{.*#+}} xmm4 = xmm3[1,1],xmm2[1,1]
-; AVX-SLOW-NEXT: vinsertps {{.*#+}} xmm5 = xmm0[1],xmm1[1],zero,zero
-; AVX-SLOW-NEXT: vshufps {{.*#+}} xmm4 = xmm5[0,1],xmm4[2,0]
-; AVX-SLOW-NEXT: vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
+; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
+; AVX-SLOW-NEXT: vaddps %xmm4, %xmm1, %xmm1
; AVX-SLOW-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
-; AVX-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,0]
-; AVX-SLOW-NEXT: vaddps %xmm4, %xmm0, %xmm0
+; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
+; AVX-SLOW-NEXT: vaddps %xmm1, %xmm2, %xmm1
+; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX-SLOW-NEXT: vaddps %xmm2, %xmm1, %xmm1
+; AVX-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm3[1,0]
+; AVX-SLOW-NEXT: vaddps %xmm2, %xmm3, %xmm2
+; AVX-SLOW-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; AVX-SLOW-NEXT: vaddps %xmm3, %xmm2, %xmm2
+; AVX-SLOW-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
+; AVX-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
; AVX-SLOW-NEXT: retq
;
; AVX-FAST-LABEL: reduction_sum_v4f32_v4f32_reassoc:
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
; SSSE3-SLOW-NEXT: paddd %xmm0, %xmm4
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,1,1]
-; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
-; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm5
-; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm5[1,1,1,1]
+; SSSE3-SLOW-NEXT: paddd %xmm4, %xmm0
+; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm4
+; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm4[1,1,1,1]
+; SSSE3-SLOW-NEXT: paddd %xmm4, %xmm1
; SSSE3-SLOW-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
; SSSE3-SLOW-NEXT: paddd %xmm2, %xmm1
; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
-; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm6 = xmm3[2,3,2,3]
-; SSSE3-SLOW-NEXT: paddd %xmm3, %xmm6
-; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm3 = xmm6[1,1,1,1]
+; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm2
+; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
+; SSSE3-SLOW-NEXT: paddd %xmm3, %xmm1
+; SSSE3-SLOW-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,1,1]
+; SSSE3-SLOW-NEXT: paddd %xmm1, %xmm3
; SSSE3-SLOW-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSSE3-SLOW-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
-; SSSE3-SLOW-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
-; SSSE3-SLOW-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; SSSE3-SLOW-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm1[0]
-; SSSE3-SLOW-NEXT: paddd %xmm0, %xmm4
-; SSSE3-SLOW-NEXT: movdqa %xmm4, %xmm0
; SSSE3-SLOW-NEXT: retq
;
; SSSE3-FAST-LABEL: reduction_sum_v4i32_v4i32:
; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[2,3,2,3]
; AVX-SLOW-NEXT: vpaddd %xmm4, %xmm0, %xmm0
; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,1,1]
-; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[2,3,2,3]
-; AVX-SLOW-NEXT: vpaddd %xmm5, %xmm1, %xmm1
-; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm1[1,1,1,1]
-; AVX-SLOW-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
-; AVX-SLOW-NEXT: vpaddd %xmm5, %xmm2, %xmm2
-; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[1,1,1,1]
-; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[2,3,2,3]
-; AVX-SLOW-NEXT: vpaddd %xmm6, %xmm3, %xmm3
-; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[1,1,1,1]
-; AVX-SLOW-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
-; AVX-SLOW-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
-; AVX-SLOW-NEXT: vpaddd %xmm5, %xmm2, %xmm2
-; AVX-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; AVX-SLOW-NEXT: vpaddd %xmm4, %xmm0, %xmm0
-; AVX-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[2,3,2,3]
+; AVX-SLOW-NEXT: vpaddd %xmm4, %xmm1, %xmm1
+; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[1,1,1,1]
+; AVX-SLOW-NEXT: vpaddd %xmm4, %xmm1, %xmm1
+; AVX-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
+; AVX-SLOW-NEXT: vpaddd %xmm1, %xmm2, %xmm1
+; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
+; AVX-SLOW-NEXT: vpaddd %xmm2, %xmm1, %xmm1
+; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm2 = xmm3[2,3,2,3]
+; AVX-SLOW-NEXT: vpaddd %xmm2, %xmm3, %xmm2
+; AVX-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,1,1]
+; AVX-SLOW-NEXT: vpaddd %xmm3, %xmm2, %xmm2
+; AVX-SLOW-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; AVX-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX-SLOW-NEXT: retq
;
; AVX-FAST-LABEL: reduction_sum_v4i32_v4i32:
define <4 x i32> @signbits_mask_ashr_smax(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: signbits_mask_ashr_smax:
; X86: # %bb.0:
+; X86-NEXT: vpsrad $26, %xmm0, %xmm2
+; X86-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; X86-NEXT: vpsrad $27, %xmm0, %xmm3
; X86-NEXT: vpsrad $25, %xmm0, %xmm0
+; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; X86-NEXT: vpsrad $26, %xmm1, %xmm2
+; X86-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; X86-NEXT: vpsrad $27, %xmm1, %xmm3
; X86-NEXT: vpsrad $25, %xmm1, %xmm1
-; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X86-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; X86-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
; X86-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X86-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-NEXT: retl
;
; X64-AVX1-LABEL: signbits_mask_ashr_smax:
; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vpsrad $26, %xmm0, %xmm2
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; X64-AVX1-NEXT: vpsrad $27, %xmm0, %xmm3
; X64-AVX1-NEXT: vpsrad $25, %xmm0, %xmm0
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; X64-AVX1-NEXT: vpsrad $26, %xmm1, %xmm2
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; X64-AVX1-NEXT: vpsrad $27, %xmm1, %xmm3
; X64-AVX1-NEXT: vpsrad $25, %xmm1, %xmm1
-; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
; X64-AVX1-NEXT: vpmaxsd %xmm1, %xmm0, %xmm0
+; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
define <4 x i32> @signbits_mask_ashr_smin(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: signbits_mask_ashr_smin:
; X86: # %bb.0:
+; X86-NEXT: vpsrad $26, %xmm0, %xmm2
+; X86-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; X86-NEXT: vpsrad $27, %xmm0, %xmm3
; X86-NEXT: vpsrad $25, %xmm0, %xmm0
+; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; X86-NEXT: vpsrad $26, %xmm1, %xmm2
+; X86-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; X86-NEXT: vpsrad $27, %xmm1, %xmm3
; X86-NEXT: vpsrad $25, %xmm1, %xmm1
-; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X86-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; X86-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
; X86-NEXT: vpminsd %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X86-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-NEXT: retl
;
; X64-AVX1-LABEL: signbits_mask_ashr_smin:
; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vpsrad $26, %xmm0, %xmm2
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; X64-AVX1-NEXT: vpsrad $27, %xmm0, %xmm3
; X64-AVX1-NEXT: vpsrad $25, %xmm0, %xmm0
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; X64-AVX1-NEXT: vpsrad $26, %xmm1, %xmm2
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; X64-AVX1-NEXT: vpsrad $27, %xmm1, %xmm3
; X64-AVX1-NEXT: vpsrad $25, %xmm1, %xmm1
-; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
; X64-AVX1-NEXT: vpminsd %xmm1, %xmm0, %xmm0
+; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
define <4 x i32> @signbits_mask_ashr_umax(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: signbits_mask_ashr_umax:
; X86: # %bb.0:
+; X86-NEXT: vpsrad $26, %xmm0, %xmm2
+; X86-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; X86-NEXT: vpsrad $27, %xmm0, %xmm3
; X86-NEXT: vpsrad $25, %xmm0, %xmm0
+; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; X86-NEXT: vpsrad $26, %xmm1, %xmm2
+; X86-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; X86-NEXT: vpsrad $27, %xmm1, %xmm3
; X86-NEXT: vpsrad $25, %xmm1, %xmm1
-; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X86-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; X86-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
; X86-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X86-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-NEXT: retl
;
; X64-AVX1-LABEL: signbits_mask_ashr_umax:
; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vpsrad $26, %xmm0, %xmm2
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; X64-AVX1-NEXT: vpsrad $27, %xmm0, %xmm3
; X64-AVX1-NEXT: vpsrad $25, %xmm0, %xmm0
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; X64-AVX1-NEXT: vpsrad $26, %xmm1, %xmm2
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; X64-AVX1-NEXT: vpsrad $27, %xmm1, %xmm3
; X64-AVX1-NEXT: vpsrad $25, %xmm1, %xmm1
-; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
; X64-AVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm0
+; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;
define <4 x i32> @signbits_mask_ashr_umin(<4 x i32> %a0, <4 x i32> %a1) {
; X86-LABEL: signbits_mask_ashr_umin:
; X86: # %bb.0:
+; X86-NEXT: vpsrad $26, %xmm0, %xmm2
+; X86-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; X86-NEXT: vpsrad $27, %xmm0, %xmm3
; X86-NEXT: vpsrad $25, %xmm0, %xmm0
+; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; X86-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; X86-NEXT: vpsrad $26, %xmm1, %xmm2
+; X86-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; X86-NEXT: vpsrad $27, %xmm1, %xmm3
; X86-NEXT: vpsrad $25, %xmm1, %xmm1
-; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X86-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; X86-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
; X86-NEXT: vpminud %xmm1, %xmm0, %xmm0
+; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X86-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X86-NEXT: retl
;
; X64-AVX1-LABEL: signbits_mask_ashr_umin:
; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vpsrad $26, %xmm0, %xmm2
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
+; X64-AVX1-NEXT: vpsrad $27, %xmm0, %xmm3
; X64-AVX1-NEXT: vpsrad $25, %xmm0, %xmm0
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm3[4,5,6,7]
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
+; X64-AVX1-NEXT: vpsrad $26, %xmm1, %xmm2
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; X64-AVX1-NEXT: vpsrad $27, %xmm1, %xmm3
; X64-AVX1-NEXT: vpsrad $25, %xmm1, %xmm1
-; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,0,0]
-; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
; X64-AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm0
+; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0]
; X64-AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: retq
;