if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
Depth + 1))
return true;
+ // Aggressively peek through src to get at the demanded elt.
+ // TODO - we should do this for all target/faux shuffles ops.
+ APInt SrcBits = APInt::getAllOnesValue(SrcVT.getScalarSizeInBits());
+ if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(Src, SrcBits, SrcElts,
+ TLO.DAG, Depth + 1))
+ return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
break;
}
case X86ISD::VPERMV: {
;
; AVX2-SLOW-LABEL: splat_v3i32:
; AVX2-SLOW: # %bb.0:
-; AVX2-SLOW-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
-; AVX2-SLOW-NEXT: vpinsrd $2, 8(%rdi), %xmm0, %xmm1
-; AVX2-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3,4,5,6,7]
-; AVX2-SLOW-NEXT: vpbroadcastd %xmm1, %ymm1
-; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4,5,6,7]
+; AVX2-SLOW-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX2-SLOW-NEXT: vxorps %xmm2, %xmm2, %xmm2
+; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm1[1],ymm2[2,3,4,5,6,7]
+; AVX2-SLOW-NEXT: vbroadcastss %xmm1, %ymm1
+; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4,5,6,7]
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-LABEL: splat_v3i32:
; AVX2-FAST: # %bb.0:
-; AVX2-FAST-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
-; AVX2-FAST-NEXT: vpinsrd $2, 8(%rdi), %xmm0, %xmm1
+; AVX2-FAST-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
; AVX2-FAST-NEXT: vpxor %xmm0, %xmm0, %xmm0
; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3,4,5,6,7]
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm1 = zero,zero,zero,zero,zero,zero,zero,zero,ymm1[0,1,2,3],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero