bool MaskedValueIsZero(SDValue Op, const APInt &Mask,
const APInt &DemandedElts, unsigned Depth = 0) const;
- /// Return true if the DemandedElts of the vector Op are all zero. We
- /// use this predicate to simplify operations downstream.
- bool MaskedElementsAreZero(SDValue Op, const APInt &DemandedElts,
- unsigned Depth = 0) const;
-
/// Return true if '(Op & Mask) == Mask'.
/// Op and Mask are known to be the same type.
bool MaskedValueIsAllOnes(SDValue Op, const APInt &Mask,
return Mask.isSubsetOf(computeKnownBits(V, DemandedElts, Depth).Zero);
}
-/// Return true if the DemandedElts of the vector Op are all zero. We
-/// use this predicate to simplify operations downstream.
-bool SelectionDAG::MaskedElementsAreZero(SDValue Op, const APInt &DemandedElts,
- unsigned Depth) const {
- assert(Op.getValueType().isFixedLengthVector() &&
- Op.getValueType().getVectorNumElements() ==
- DemandedElts.getBitWidth() &&
- "MaskedElementsAreZero vector size mismatch");
- unsigned BitWidth = Op.getScalarValueSizeInBits();
- APInt DemandedBits = APInt::getAllOnesValue(BitWidth);
- return MaskedValueIsZero(Op, DemandedBits, DemandedElts, Depth);
-}
-
/// MaskedValueIsAllOnes - Return true if '(Op & Mask) == Mask'.
bool SelectionDAG::MaskedValueIsAllOnes(SDValue V, const APInt &Mask,
unsigned Depth) const {
}
}
- // See if this is a blend with zero - in which case check if the zero'd
- // elements are already zero.
- if (isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0)) {
- assert(!KnownZero.isNullValue() && "Shuffle has no zero elements");
- SDValue NewV1 = CanonicalizeShuffleInput(MaskVT, V1);
- if (DAG.MaskedElementsAreZero(NewV1, KnownZero))
- return DAG.getBitcast(RootVT, NewV1);
- }
-
SDValue NewV1 = V1; // Save operand in case early exit happens.
if (matchUnaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s
+
+; @fptoui_zext is legal to optimize to a single vcvttps2dq: if one of the i8
+; results of fptoui is poisoned, the corresponding i32 result of the zext is
+; also poisoned. We currently don't implement this optimization.
+
+define <16 x i8> @fptoui_zext(<4 x float> %arg) {
+; CHECK-LABEL: fptoui_zext:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vcvttps2dq %xmm0, %xmm0
+; CHECK-NEXT: vpackusdw %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
+; CHECK-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
+; CHECK-NEXT: retq
+ %f = fptoui <4 x float> %arg to <4 x i8>
+ %z = zext <4 x i8> %f to <4 x i32>
+ %b = bitcast <4 x i32> %z to <16 x i8>
+ ret <16 x i8> %b
+}
+
+; In @fptoui_shuffle, we must preserve the vpand for correctnesss. Only the
+; i8 values extracted from %s are poison. The values from the zeroinitializer
+; are not.
+
+define <16 x i8> @fptoui_shuffle(<4 x float> %arg) {
+; CHECK-LABEL: fptoui_shuffle:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vcvttps2dq %xmm0, %xmm0
+; CHECK-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT: retq
+ %f = fptoui <4 x float> %arg to <4 x i8>
+ %s = shufflevector <4 x i8> %f, <4 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %ss = shufflevector <16 x i8> %s, <16 x i8> zeroinitializer, <16 x i32> <i32 0, i32 17, i32 18, i32 19, i32 1, i32 21, i32 22, i32 23, i32 2, i32 25, i32 26, i32 27, i32 3, i32 29, i32 30, i32 31>
+ ret <16 x i8> %ss
+}
; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; AVX1-NEXT: vpinsrd $2, 8(%rdi), %xmm0, %xmm1
; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm1[1],ymm2[2,3,4,5,6,7]
-; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4,5,6,7]
; AVX1-NEXT: retq
;
; XOP-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
; XOP-NEXT: vpinsrd $2, 8(%rdi), %xmm0, %xmm1
; XOP-NEXT: vxorps %xmm2, %xmm2, %xmm2
-; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm1[1],ymm2[2,3,4,5,6,7]
-; XOP-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3,4,5,6,7]
+; XOP-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
; XOP-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3,4,5,6,7]
; XOP-NEXT: retq
%1 = load <3 x i32>, <3 x i32>* %ptr, align 1
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: psrad $1, %xmm0
+; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: retq
;
-; X64-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1:
-; X64-AVX: # %bb.0:
-; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
-; X64-AVX-NEXT: vpsrad $1, %xmm0, %xmm0
-; X64-AVX-NEXT: retq
+; X64-AVX1-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
+; X64-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
+; X64-AVX1-NEXT: retq
+;
+; X64-AVX2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX2-NEXT: vpsrad $1, %xmm0, %xmm0
+; X64-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; X64-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
+; X64-AVX2-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 18446744065119617024, i64 18446744065119617024>
%t1 = ashr <2 x i64> %t0, <i64 1, i64 1>
ret <2 x i64> %t1