define <2 x double> @add_pd_003(<2 x double> %x) {
; SSE-LABEL: add_pd_003:
; SSE: # %bb.0:
-; SSE-NEXT: movddup {{.*#+}} xmm1 = xmm0[0,0]
-; SSE-NEXT: addpd %xmm1, %xmm0
+; SSE-NEXT: haddpd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: add_pd_003:
; AVX: # %bb.0:
-; AVX-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
-; AVX-NEXT: vaddpd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%l = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 undef, i32 0>
%add = fadd <2 x double> %l, %x
define <2 x double> @add_pd_003_2(<2 x double> %x) {
; SSE-LABEL: add_pd_003_2:
; SSE: # %bb.0:
-; SSE-NEXT: movapd %xmm0, %xmm1
-; SSE-NEXT: shufpd {{.*#+}} xmm1 = xmm1[1],xmm0[0]
-; SSE-NEXT: addpd %xmm0, %xmm1
-; SSE-NEXT: movapd %xmm1, %xmm0
+; SSE-NEXT: haddpd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: add_pd_003_2:
; AVX: # %bb.0:
-; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
-; AVX-NEXT: vaddpd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%l = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 1, i32 0>
%add = fadd <2 x double> %l, %x
define <2 x double> @add_pd_010(<2 x double> %x) {
; SSE-LABEL: add_pd_010:
; SSE: # %bb.0:
-; SSE-NEXT: movddup {{.*#+}} xmm1 = xmm0[0,0]
-; SSE-NEXT: addpd %xmm0, %xmm1
-; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
-; SSE-NEXT: movapd %xmm1, %xmm0
+; SSE-NEXT: haddpd %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: add_pd_010:
; AVX: # %bb.0:
-; AVX-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
-; AVX-NEXT: vaddpd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: retq
%l = shufflevector <2 x double> %x, <2 x double> undef, <2 x i32> <i32 undef, i32 0>
define <4 x float> @add_ps_007(<4 x float> %x) {
; SSE-LABEL: add_ps_007:
; SSE: # %bb.0:
-; SSE-NEXT: movaps %xmm0, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0,2]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; SSE-NEXT: addps %xmm1, %xmm0
+; SSE-NEXT: haddps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: add_ps_007:
; AVX: # %bb.0:
-; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,1,0,2]
-; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%l = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 2>
%r = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 1, i32 3>
define <4 x float> @add_ps_030(<4 x float> %x) {
; SSE-LABEL: add_ps_030:
; SSE: # %bb.0:
-; SSE-NEXT: movaps %xmm0, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm0[0,2]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; SSE-NEXT: addps %xmm1, %xmm0
+; SSE-NEXT: haddps %xmm0, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,2,2,3]
; SSE-NEXT: retq
;
; AVX-LABEL: add_ps_030:
; AVX: # %bb.0:
-; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[0,1,0,2]
-; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,2,3]
; AVX-NEXT: retq
%l = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 2>
define <4 x float> @add_ps_007_2(<4 x float> %x) {
; SSE-LABEL: add_ps_007_2:
; SSE: # %bb.0:
-; SSE-NEXT: movddup {{.*#+}} xmm1 = xmm0[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; SSE-NEXT: addps %xmm1, %xmm0
+; SSE-NEXT: haddps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: add_ps_007_2:
; AVX: # %bb.0:
-; AVX-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
-; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%l = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 undef>
%r = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 1, i32 undef>
define <4 x float> @add_ps_008(<4 x float> %x) {
; SSE-LABEL: add_ps_008:
; SSE: # %bb.0:
-; SSE-NEXT: movsldup {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE-NEXT: addps %xmm1, %xmm0
+; SSE-NEXT: haddps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: add_ps_008:
; AVX: # %bb.0:
-; AVX-NEXT: vmovsldup {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%l = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 undef, i32 2>
%add = fadd <4 x float> %l, %x
define <4 x float> @add_ps_017(<4 x float> %x) {
; SSE-LABEL: add_ps_017:
; SSE: # %bb.0:
-; SSE-NEXT: movsldup {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; SSE-NEXT: addps %xmm0, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
-; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: haddps %xmm0, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE-NEXT: retq
;
; AVX-LABEL: add_ps_017:
; AVX: # %bb.0:
-; AVX-NEXT: vmovsldup {{.*#+}} xmm1 = xmm0[0,0,2,2]
-; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-NEXT: retq
%l = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 undef, i32 2>
define <4 x float> @add_ps_018(<4 x float> %x) {
; SSE-LABEL: add_ps_018:
; SSE: # %bb.0:
-; SSE-NEXT: movddup {{.*#+}} xmm1 = xmm0[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; SSE-NEXT: addps %xmm1, %xmm0
+; SSE-NEXT: haddps %xmm0, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE-NEXT: retq
;
; AVX-LABEL: add_ps_018:
; AVX: # %bb.0:
-; AVX-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
-; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX-NEXT: retq
%l = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 undef>
define <4 x i32> @phaddd_single_source1(<4 x i32> %x) {
; SSSE3-LABEL: phaddd_single_source1:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,2]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; SSSE3-NEXT: paddd %xmm1, %xmm0
+; SSSE3-NEXT: phaddd %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phaddd_single_source1:
; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,2]
-; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%l = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 2>
%r = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 undef, i32 undef, i32 1, i32 3>
define <4 x i32> @phaddd_single_source2(<4 x i32> %x) {
; SSSE3-LABEL: phaddd_single_source2:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,2]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; SSSE3-NEXT: paddd %xmm1, %xmm0
+; SSSE3-NEXT: phaddd %xmm0, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,2,2,3]
; SSSE3-NEXT: retq
;
; AVX-LABEL: phaddd_single_source2:
; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,2]
-; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,2,2,3]
; AVX-NEXT: retq
%l = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 2>
define <4 x i32> @phaddd_single_source3(<4 x i32> %x) {
; SSSE3-LABEL: phaddd_single_source3:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; SSSE3-NEXT: paddd %xmm1, %xmm0
+; SSSE3-NEXT: phaddd %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phaddd_single_source3:
; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
-; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
-; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%l = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 undef>
%r = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 undef, i32 undef, i32 1, i32 undef>
define <4 x i32> @phaddd_single_source4(<4 x i32> %x) {
; SSSE3-LABEL: phaddd_single_source4:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,2,2]
-; SSSE3-NEXT: paddd %xmm1, %xmm0
+; SSSE3-NEXT: phaddd %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phaddd_single_source4:
; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,2,2]
-; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%l = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 undef, i32 undef, i32 undef, i32 2>
%add = add <4 x i32> %l, %x
define <4 x i32> @phaddd_single_source5(<4 x i32> %x) {
; SSSE3-LABEL: phaddd_single_source5:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,2,2]
-; SSSE3-NEXT: paddd %xmm0, %xmm1
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,1,2,3]
+; SSSE3-NEXT: phaddd %xmm0, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSSE3-NEXT: retq
;
; AVX-LABEL: phaddd_single_source5:
; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,2,2]
-; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-NEXT: retq
%l = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 undef, i32 undef, i32 undef, i32 2>
define <4 x i32> @phaddd_single_source6(<4 x i32> %x) {
; SSSE3-LABEL: phaddd_single_source6:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; SSSE3-NEXT: paddd %xmm1, %xmm0
+; SSSE3-NEXT: phaddd %xmm0, %xmm0
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
; SSSE3-NEXT: retq
;
; AVX-LABEL: phaddd_single_source6:
; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
-; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
-; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vphaddd %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX-NEXT: retq
%l = shufflevector <4 x i32> %x, <4 x i32> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 undef>
define <8 x i16> @phaddw_single_source1(<8 x i16> %x) {
; SSSE3-LABEL: phaddw_single_source1:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: movdqa %xmm0, %xmm1
-; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1,4,5,4,5,6,7,0,1,4,5,8,9,12,13]
-; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[6,7,2,3,4,5,6,7,2,3,6,7,10,11,14,15]
-; SSSE3-NEXT: paddw %xmm1, %xmm0
+; SSSE3-NEXT: phaddw %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phaddw_single_source1:
; AVX: # %bb.0:
-; AVX-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,1,4,5,4,5,6,7,0,1,4,5,8,9,12,13]
-; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,7,2,3,4,5,6,7,2,3,6,7,10,11,14,15]
-; AVX-NEXT: vpaddw %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vphaddw %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%l = shufflevector <8 x i16> %x, <8 x i16> undef, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 0, i32 2, i32 4, i32 6>
%r = shufflevector <8 x i16> %x, <8 x i16> undef, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 1, i32 3, i32 5, i32 7>
define <8 x i16> @phaddw_single_source2(<8 x i16> %x) {
; SSSE3-LABEL: phaddw_single_source2:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
-; SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
-; SSSE3-NEXT: paddw %xmm1, %xmm0
+; SSSE3-NEXT: phaddw %xmm0, %xmm0
; SSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,6,7]
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
; SSSE3-NEXT: retq
;
; AVX-LABEL: phaddw_single_source2:
; AVX: # %bb.0:
-; AVX-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
-; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
-; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
-; AVX-NEXT: vpaddw %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vphaddw %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,4,6,7]
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
; AVX-NEXT: retq
define <8 x i16> @phaddw_single_source3(<8 x i16> %x) {
; SSSE3-LABEL: phaddw_single_source3:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
-; SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
-; SSSE3-NEXT: paddw %xmm1, %xmm0
+; SSSE3-NEXT: phaddw %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phaddw_single_source3:
; AVX: # %bb.0:
-; AVX-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7]
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
-; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7]
-; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
-; AVX-NEXT: vpaddw %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vphaddw %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%l = shufflevector <8 x i16> %x, <8 x i16> undef, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 0, i32 2, i32 undef, i32 undef>
%r = shufflevector <8 x i16> %x, <8 x i16> undef, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 1, i32 3, i32 undef, i32 undef>
define <8 x i16> @phaddw_single_source4(<8 x i16> %x) {
; SSSE3-LABEL: phaddw_single_source4:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: movdqa %xmm0, %xmm1
-; SSSE3-NEXT: pslld $16, %xmm1
-; SSSE3-NEXT: paddw %xmm0, %xmm1
-; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: phaddw %xmm0, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: phaddw_single_source4:
; AVX: # %bb.0:
-; AVX-NEXT: vpslld $16, %xmm0, %xmm1
-; AVX-NEXT: vpaddw %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vphaddw %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%l = shufflevector <8 x i16> %x, <8 x i16> undef, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 6>
%add = add <8 x i16> %l, %x
define <8 x i16> @phaddw_single_source6(<8 x i16> %x) {
; SSSE3-LABEL: phaddw_single_source6:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
-; SSSE3-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,6,7]
-; SSSE3-NEXT: paddw %xmm1, %xmm0
+; SSSE3-NEXT: phaddw %xmm0, %xmm0
; SSSE3-NEXT: psrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: retq
;
; AVX-LABEL: phaddw_single_source6:
; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[0,1,0,1]
-; AVX-NEXT: vpmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
-; AVX-NEXT: vpaddw %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vphaddw %xmm0, %xmm0, %xmm0
; AVX-NEXT: vpsrldq {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,zero
; AVX-NEXT: retq
%l = shufflevector <8 x i16> %x, <8 x i16> undef, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 0, i32 undef, i32 undef, i32 undef>