return true;
}
+/// Return true if every element in Mask, beginning
+/// from position Pos and ending in Pos+Size, falls within the specified
+/// sequential range (Low, Low+Size], or is undef or is zero.
+static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
+ unsigned Size, int Low) {
+ for (unsigned i = Pos, e = Pos + Size; i != e; ++i, ++Low)
+ if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
+ return false;
+ return true;
+}
+
/// Return true if the specified EXTRACT_SUBVECTOR operand specifies a vector
/// extract that is suitable for instruction that extract 128 or 256 bit vectors
static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
return true;
}
+ // Attempt to blend with zero.
+ if (VT.getVectorNumElements() <= 8 &&
+ ((Subtarget.hasSSE41() && VT.is128BitVector()) ||
+ (Subtarget.hasAVX() && VT.is256BitVector()))) {
+ // Convert VT to a type compatible with X86ISD::BLENDI.
+ // TODO - add 16i16 support (requires lane duplication).
+ MVT ShuffleVT = VT;
+ if (Subtarget.hasAVX2()) {
+ if (VT == MVT::v4i64)
+ ShuffleVT = MVT::v8i32;
+ else if (VT == MVT::v2i64)
+ ShuffleVT = MVT::v4i32;
+ } else {
+ if (VT == MVT::v2i64 || VT == MVT::v4i32)
+ ShuffleVT = MVT::v8i16;
+ else if (VT == MVT::v4i64)
+ ShuffleVT = MVT::v4f64;
+ else if (VT == MVT::v8i32)
+ ShuffleVT = MVT::v8f32;
+ }
+
+ if (isSequentialOrUndefOrZeroInRange(Mask, /*Pos*/ 0, /*Size*/ Mask.size(),
+ /*Low*/ 0) &&
+ Mask.size() <= ShuffleVT.getVectorNumElements()) {
+ unsigned BlendMask = 0;
+ unsigned ShuffleSize = ShuffleVT.getVectorNumElements();
+ unsigned MaskRatio = ShuffleSize / Mask.size();
+
+ for (unsigned i = 0; i != ShuffleSize; ++i)
+ if (Mask[i / MaskRatio] < 0)
+ BlendMask |= 1u << i;
+
+ if (Root.getOpcode() != X86ISD::BLENDI ||
+ Root->getConstantOperandVal(2) != BlendMask) {
+ SDValue Zero = getZeroVector(ShuffleVT, Subtarget, DAG, DL);
+ Res = DAG.getBitcast(ShuffleVT, Input);
+ DCI.AddToWorklist(Res.getNode());
+ Res = DAG.getNode(X86ISD::BLENDI, DL, ShuffleVT, Res, Zero,
+ DAG.getConstant(BlendMask, DL, MVT::i8));
+ DCI.AddToWorklist(Res.getNode());
+ DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
+ /*AddTo*/ true);
+ return true;
+ }
+ }
+ }
+
// Don't try to re-form single instruction chains under any circumstances now
// that we've done encoding canonicalization for them.
if (Depth < 2)
; AVX-LABEL: insert_v4f64_0zz3:
; AVX: # BB#0:
; AVX-NEXT: vxorpd %ymm1, %ymm1, %ymm1
-; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3]
-; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3]
+; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3]
; AVX-NEXT: retq
%1 = insertelement <4 x double> %a, double 0.0, i32 1
%2 = insertelement <4 x double> %1, double 0.0, i32 2
; AVX-LABEL: insert_v8f32_z12345z7:
; AVX: # BB#0:
; AVX-NEXT: vxorps %ymm1, %ymm1, %ymm1
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
-; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7]
+; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
; AVX-NEXT: retq
%1 = insertelement <8 x float> %a, float 0.0, i32 0
%2 = insertelement <8 x float> %1, float 0.0, i32 6
; AVX1-LABEL: insert_v8i32_z12345z7:
; AVX1: # BB#0:
; AVX1-NEXT: vxorps %ymm1, %ymm1, %ymm1
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7]
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
; AVX1-NEXT: retq
;
; AVX2-LABEL: insert_v8i32_z12345z7:
; AVX2: # BB#0:
; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
-; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6],ymm0[7]
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5],ymm1[6],ymm0[7]
; AVX2-NEXT: retq
%1 = insertelement <8 x i32> %a, i32 0, i32 0
%2 = insertelement <8 x i32> %1, i32 0, i32 6
; SSE41-LABEL: insert_v8i16_z12345z7:
; SSE41: # BB#0:
; SSE41-NEXT: pxor %xmm1, %xmm1
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6],xmm0[7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7]
; SSE41-NEXT: retq
;
; AVX-LABEL: insert_v8i16_z12345z7:
; AVX: # BB#0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7]
-; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6],xmm0[7]
+; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5],xmm1[6],xmm0[7]
; AVX-NEXT: retq
%1 = insertelement <8 x i16> %a, i16 0, i32 0
%2 = insertelement <8 x i16> %1, i16 0, i32 6
; SSE41-LABEL: insert_v16i16_z12345z789ABZDEz:
; SSE41: # BB#0:
; SSE41-NEXT: pxor %xmm2, %xmm2
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3,4,5,6,7]
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm2[6],xmm0[7]
+; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3,4,5],xmm2[6],xmm0[7]
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6],xmm2[7]
; SSE41-NEXT: retq
;
define <4 x float> @insertps_undef_input1(<4 x float> %a0, <4 x float> %a1) {
; SSE-LABEL: insertps_undef_input1:
; SSE: # BB#0:
-; SSE-NEXT: insertps {{.*#+}} xmm0 = zero,zero,zero,xmm0[3]
+; SSE-NEXT: xorps %xmm1, %xmm1
+; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
; SSE-NEXT: retq
;
; AVX-LABEL: insertps_undef_input1:
; AVX: # BB#0:
-; AVX-NEXT: vinsertps {{.*#+}} xmm0 = zero,zero,zero,xmm0[3]
+; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
; AVX-NEXT: retq
%res0 = fadd <4 x float> %a1, <float 1.0, float 1.0, float 1.0, float 1.0>
%res1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %res0, i8 21)
; X32-AVX-LABEL: merge_8f32_2f32_23z5:
; X32-AVX: # BB#0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X32-AVX-NEXT: vxorpd %ymm0, %ymm0, %ymm0
-; X32-AVX-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm0[2],mem[3]
+; X32-AVX-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; X32-AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5],mem[6,7]
; X32-AVX-NEXT: retl
%ptr0 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 2
%ptr1 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 3
; CHECK-NEXT: pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
; CHECK-NEXT: movl $32, %eax
; CHECK-NEXT: pinsrd $0, %eax, %xmm0
-; CHECK-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; CHECK-NEXT: movq %xmm0, (%esp)
+; CHECK-NEXT: pxor %xmm1, %xmm1
+; CHECK-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
+; CHECK-NEXT: movq %xmm1, (%esp)
; CHECK-NEXT: movq (%esp), %mm0
; CHECK-NEXT: addl $20, %esp
; CHECK-NEXT: retl
; SSE41-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSE41-NEXT: xorps %xmm1, %xmm1
-; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
+; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; SSE41-NEXT: retq
;
; AVX1-LABEL: shuffle_v2i64_bitcast_z123:
; AVX1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
+; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v2i64_bitcast_z123:
; AVX1-LABEL: shuf_zext_4i32_to_4i64:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
-; AVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
; AVX1-LABEL: shuf_zext_4i32_to_4i64_offset1:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm0[3],zero,zero,zero
-; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3]
+; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;