From: Simon Pilgrim Date: Sun, 7 Nov 2021 12:59:35 +0000 (+0000) Subject: [X86][AVX] Add missing X86ISD::VBROADCAST(v4f32 -> v8f32) isel pattern for AVX1 targets X-Git-Tag: upstream/15.0.7~26528 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=b5ef56f0bc9b51bd026a643d4a541af9993aaf89;p=platform%2Fupstream%2Fllvm.git [X86][AVX] Add missing X86ISD::VBROADCAST(v4f32 -> v8f32) isel pattern for AVX1 targets D109434 addressed the v2f64 -> v4f64 case, an internal test has found an equivalent crash for the v4f32 -> v8f32 case. --- diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td index b918b57..035f139 100644 --- a/llvm/lib/Target/X86/X86InstrSSE.td +++ b/llvm/lib/Target/X86/X86InstrSSE.td @@ -7630,6 +7630,10 @@ let Predicates = [HasAVX1Only] in { (VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), (v4f32 (VPERMILPSri (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 0)), sub_xmm), (v4f32 (VPERMILPSri (v4f32 (COPY_TO_REGCLASS FR32:$src, VR128)), 0)), 1)>; + def : Pat<(v8f32 (X86VBroadcast v4f32:$src)), + (VINSERTF128rr (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), + (v4f32 (VPERMILPSri VR128:$src, 0)), sub_xmm), + (v4f32 (VPERMILPSri VR128:$src, 0)), 1)>; def : Pat<(v4f64 (X86VBroadcast FR64:$src)), (VINSERTF128rr (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), (v2f64 (VMOVDDUPrr (v2f64 (COPY_TO_REGCLASS FR64:$src, VR128)))), sub_xmm), diff --git a/llvm/test/CodeGen/X86/combine-concatvectors.ll b/llvm/test/CodeGen/X86/combine-concatvectors.ll index 3dffeae..c2f8827 100644 --- a/llvm/test/CodeGen/X86/combine-concatvectors.ll +++ b/llvm/test/CodeGen/X86/combine-concatvectors.ll @@ -85,3 +85,38 @@ ifmerge.1298: ; preds = %loop.4942 store <2 x float> , <2 x float>* bitcast (i8* getelementptr inbounds ([49216 x i8], [49216 x i8]* @qa_, i64 0, i64 47372) to <2 x float>*), align 4 ret void } + +define <4 x float> @concat_of_broadcast_v4f32_v8f32(<8 x float>* %a0, <8 x float>* %a1, <8 x float>* %a2) { +; AVX1-LABEL: concat_of_broadcast_v4f32_v8f32: +; AVX1: # %bb.0: +; AVX1-NEXT: vmovaps (%rdi), %ymm0 +; AVX1-NEXT: vmovaps (%rsi), %ymm1 +; AVX1-NEXT: vmovaps (%rdx), %ymm2 +; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,0,0,0] +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1 +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7] +; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4],ymm2[5,6],ymm0[7] +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0 +; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm2[3,0],xmm0[0,0] +; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,3],xmm1[2,0] +; AVX1-NEXT: vzeroupper +; AVX1-NEXT: retq +; +; AVX2-LABEL: concat_of_broadcast_v4f32_v8f32: +; AVX2: # %bb.0: +; AVX2-NEXT: vmovaps (%rdi), %ymm0 +; AVX2-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] +; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,0] +; AVX2-NEXT: vmovaps {{.*#+}} xmm1 = [6,7,4,3] +; AVX2-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4],mem[5,6],ymm0[7] +; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq + %ld0 = load volatile <8 x float>, <8 x float>* %a0 + %ld1 = load volatile <8 x float>, <8 x float>* %a1 + %ld2 = load volatile <8 x float>, <8 x float>* %a2 + %shuffle = shufflevector <8 x float> %ld0, <8 x float> %ld1, <8 x i32> + %shuffle1 = shufflevector <8 x float> %ld2, <8 x float> %shuffle, <4 x i32> + ret <4 x float> %shuffle1 +}