(VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
(v4f32 (VPERMILPSri (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 0)), sub_xmm),
(v4f32 (VPERMILPSri (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 0)), 1)>;
+ def : Pat<(v8f32 (X86VBroadcast v4f32:$src)),
+ (VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)),
+ (v4f32 (VPERMILPSri VR128:$src, 0)), sub_xmm),
+ (v4f32 (VPERMILPSri VR128:$src, 0)), 1)>;
def : Pat<(v4f64 (X86VBroadcast FR64:$src)),
(VINSERTF128rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)),
(v2f64 (VMOVDDUPrr (v2f64 (COPY_TO_REGCLASS FR64:$src, VR128)))), sub_xmm),
store <2 x float> <float 1.000000e+00, float 1.000000e+00>, <2 x float>* bitcast (i8* getelementptr inbounds ([49216 x i8], [49216 x i8]* @qa_, i64 0, i64 47372) to <2 x float>*), align 4
ret void
}
+
+define <4 x float> @concat_of_broadcast_v4f32_v8f32(<8 x float>* %a0, <8 x float>* %a1, <8 x float>* %a2) {
+; AVX1-LABEL: concat_of_broadcast_v4f32_v8f32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovaps (%rdi), %ymm0
+; AVX1-NEXT: vmovaps (%rsi), %ymm1
+; AVX1-NEXT: vmovaps (%rdx), %ymm2
+; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,0,0,0]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4],ymm2[5,6],ymm0[7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm2[3,0],xmm0[0,0]
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3],xmm1[2,0]
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: concat_of_broadcast_v4f32_v8f32:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovaps (%rdi), %ymm0
+; AVX2-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5]
+; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,0]
+; AVX2-NEXT: vmovaps {{.*#+}} xmm1 = [6,7,4,3]
+; AVX2-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4],mem[5,6],ymm0[7]
+; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+ %ld0 = load volatile <8 x float>, <8 x float>* %a0
+ %ld1 = load volatile <8 x float>, <8 x float>* %a1
+ %ld2 = load volatile <8 x float>, <8 x float>* %a2
+ %shuffle = shufflevector <8 x float> %ld0, <8 x float> %ld1, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 4, i32 undef, i32 undef, i32 8>
+ %shuffle1 = shufflevector <8 x float> %ld2, <8 x float> %shuffle, <4 x i32> <i32 6, i32 15, i32 12, i32 3>
+ ret <4 x float> %shuffle1
+}