From beeacac0b32a94abd813148ff142152f74d0f78a Mon Sep 17 00:00:00 2001 From: Chandler Carruth Date: Mon, 7 Jul 2014 19:03:32 +0000 Subject: [PATCH] [x86] Revert r212324 which was too aggressive w.r.t. allowing undef lanes in vector splats. The core problem here is that undef lanes can't *unilaterally* be considered to contribute to splats. Their handling needs to be more cautious. There is also a reported failure of the nightly testers (thanks Tobias!) that may well stem from the same core issue. I'm going to fix this theoretical issue, factor the APIs a bit better, and then verify that I don't see anything bad with Tobias's reduction from the test suite before recommitting. Original commit message for r212324: [x86] Generalize BuildVectorSDNode::getConstantSplatValue to work for any constant, constant FP, or undef splat and to tolerate any undef lanes in a splat, then replace all uses of isSplatVector in X86's lowering with it. This fixes issues where undef lanes in an otherwise splat vector would prevent the splat logic from firing. It is a touch more awkward to use this interface, but it is much more accurate. Suggestions for better interface structuring welcome. With this fix, the code generated with the widening legalization strategy for widen_cast-4.ll is *dramatically* improved as the special lowering strategies for a v16i8 SRA kick in even though the high lanes are undef. We also get a slightly different choice for broadcasting an aligned memory location, and use vpshufd instead of vbroadcastss. This looks like a minor win for pipelining and domain crossing, but a minor loss for the number of micro-ops. I suspect its a wash, but folks can easily tweak the lowering if they want. llvm-svn: 212475 --- llvm/include/llvm/CodeGen/SelectionDAGNodes.h | 8 +- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 13 +- llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 28 ++--- llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp | 34 +++-- llvm/lib/Target/X86/X86ISelLowering.cpp | 151 +++++++++++------------ llvm/test/CodeGen/X86/vector-gep.ll | 2 +- llvm/test/CodeGen/X86/widen_cast-4.ll | 25 +--- 7 files changed, 113 insertions(+), 148 deletions(-) diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h index 0943fa1..5df4067 100644 --- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h @@ -1584,10 +1584,10 @@ public: bool isBigEndian = false) const; /// getConstantSplatValue - Check if this is a constant splat, and if so, - /// return the splatted value. Otherwise return a null SDValue. This is - /// a simpler form of isConstantSplat. Get the constant splat only if you - /// care about the splat value. - SDValue getConstantSplatValue() const; + /// return the splat value only if it is a ConstantSDNode. Otherwise + /// return nullptr. This is a simpler form of isConstantSplat. + /// Get the constant splat only if you care about the splat value. + ConstantSDNode *getConstantSplatValue() const; bool isConstant() const; diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 9a91dcc..7198203 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -654,12 +654,13 @@ static ConstantSDNode *isConstOrConstSplat(SDValue N) { if (ConstantSDNode *CN = dyn_cast(N)) return CN; - if (BuildVectorSDNode *BV = dyn_cast(N)) - if (SDValue Splat = BV->getConstantSplatValue()) - if (auto *CN = dyn_cast(Splat)) - // BuildVectors can truncate their operands. Ignore that case here. - if (CN->getValueType(0) == N.getValueType().getScalarType()) - return CN; + if (BuildVectorSDNode *BV = dyn_cast(N)) { + ConstantSDNode *CN = BV->getConstantSplatValue(); + + // BuildVectors can truncate their operands. Ignore that case here. + if (CN && CN->getValueType(0) == N.getValueType().getScalarType()) + return CN; + } return nullptr; } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index fb7d1b1..3a8a5f9 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -6603,28 +6603,16 @@ bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue, return true; } -SDValue BuildVectorSDNode::getConstantSplatValue() const { - SDValue Splatted; - for (unsigned i = 0, e = getNumOperands(); i != e; ++i) { - SDValue Op = getOperand(i); - if (Op.getOpcode() == ISD::UNDEF) - continue; - if (Op.getOpcode() != ISD::Constant && Op.getOpcode() != ISD::ConstantFP) - return SDValue(); - - if (!Splatted) - Splatted = Op; - else if (Splatted != Op) - return SDValue(); - } +ConstantSDNode *BuildVectorSDNode::getConstantSplatValue() const { + SDValue Op0 = getOperand(0); + if (Op0.getOpcode() != ISD::Constant) + return nullptr; - if (!Splatted) { - assert(getOperand(0).getOpcode() == ISD::UNDEF && - "Can only have a splat without a constant for all undefs."); - return getOperand(0); - } + for (unsigned i = 1, e = getNumOperands(); i != e; ++i) + if (getOperand(i) != Op0) + return nullptr; - return Splatted; + return cast(Op0); } bool BuildVectorSDNode::isConstant() const { diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 1b3e428..ad91d4a 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -1152,15 +1152,14 @@ bool TargetLowering::isConstTrueVal(const SDNode *N) const { bool IsVec = false; const ConstantSDNode *CN = dyn_cast(N); - if (!CN) - if (auto *BV = dyn_cast(N)) - if (SDValue Splat = BV->getConstantSplatValue()) - if (auto *SplatCN = dyn_cast(Splat)) { - IsVec = true; - CN = SplatCN; - } - if (!CN) - return false; + if (!CN) { + const BuildVectorSDNode *BV = dyn_cast(N); + if (!BV) + return false; + + IsVec = true; + CN = BV->getConstantSplatValue(); + } switch (getBooleanContents(IsVec)) { case UndefinedBooleanContent: @@ -1180,15 +1179,14 @@ bool TargetLowering::isConstFalseVal(const SDNode *N) const { bool IsVec = false; const ConstantSDNode *CN = dyn_cast(N); - if (!CN) - if (auto *BV = dyn_cast(N)) - if (SDValue Splat = BV->getConstantSplatValue()) - if (auto *SplatCN = dyn_cast(Splat)) { - IsVec = true; - CN = SplatCN; - } - if (!CN) - return false; + if (!CN) { + const BuildVectorSDNode *BV = dyn_cast(N); + if (!BV) + return false; + + IsVec = true; + CN = BV->getConstantSplatValue(); + } if (getBooleanContents(IsVec) == UndefinedBooleanContent) return !CN->getAPIntValue()[0]; diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 61accea..4748daa 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -4858,6 +4858,19 @@ static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2, return true; } +/// isSplatVector - Returns true if N is a BUILD_VECTOR node whose elements are +/// all the same. +static bool isSplatVector(SDNode *N) { + if (N->getOpcode() != ISD::BUILD_VECTOR) + return false; + + SDValue SplatValue = N->getOperand(0); + for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i) + if (N->getOperand(i) != SplatValue) + return false; + return true; +} + /// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved /// to an zero vector. /// FIXME: move to dag combiner / method on ShuffleVectorSDNode @@ -5766,20 +5779,17 @@ static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget, return SDValue(); case ISD::BUILD_VECTOR: { - auto *BVOp = cast(Op.getNode()); // The BUILD_VECTOR node must be a splat. - SDValue Splat = BVOp->getConstantSplatValue(); - if (!Splat) + if (!isSplatVector(Op.getNode())) return SDValue(); - Ld = Splat; + Ld = Op.getOperand(0); ConstSplatVal = (Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP); // The suspected load node has several users. Make sure that all // of its users are from the BUILD_VECTOR node. // Constants may have multiple users. - // FIXME: This doesn't make sense if the build vector contains undefs. if (!ConstSplatVal && !Ld->hasNUsesOfValue(VT.getVectorNumElements(), 0)) return SDValue(); break; @@ -9406,12 +9416,8 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { bool Commuted = false; // FIXME: This should also accept a bitcast of a splat? Be careful, not // 1,1,1,1 -> v8i16 though. - if (auto *BVOp = dyn_cast(V1.getNode())) - if (BVOp->getConstantSplatValue()) - V1IsSplat = true; - if (auto *BVOp = dyn_cast(V2.getNode())) - if (BVOp->getConstantSplatValue()) - V2IsSplat = true; + V1IsSplat = isSplatVector(V1.getNode()); + V2IsSplat = isSplatVector(V2.getNode()); // Canonicalize the splat or undef, if present, to be on the RHS. if (!V2IsUndef && V1IsSplat && !V2IsSplat) { @@ -15206,11 +15212,10 @@ static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG, SDValue Amt = Op.getOperand(1); // Optimize shl/srl/sra with constant shift amount. - if (auto *BVAmt = dyn_cast(Amt)) { - if (SDValue Splat = BVAmt->getConstantSplatValue()) { - uint64_t ShiftAmt = Splat.getOpcode() == ISD::UNDEF - ? 0 - : cast(Splat)->getZExtValue(); + if (isSplatVector(Amt.getNode())) { + SDValue SclrAmt = Amt->getOperand(0); + if (ConstantSDNode *C = dyn_cast(SclrAmt)) { + uint64_t ShiftAmt = C->getZExtValue(); if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 || (Subtarget->hasInt256() && @@ -19459,35 +19464,27 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS)) return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS); - if (auto *OpRHSBV = dyn_cast(OpRHS)) { - SDValue OpRHSSplat = OpRHSBV->getConstantSplatValue(); - auto *OpRHSSplatConst = dyn_cast(OpRHSSplat); - if (auto *CondRHSBV = dyn_cast(CondRHS)) { - // If the RHS is a constant we have to reverse the const - // canonicalization. - // x > C-1 ? x+-C : 0 --> subus x, C - SDValue CondRHSSplat = CondRHSBV->getConstantSplatValue(); - auto *CondRHSSplatConst = dyn_cast(CondRHSSplat); - if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD && - CondRHSSplatConst && OpRHSSplatConst) { - APInt A = OpRHSSplatConst->getAPIntValue(); - if (CondRHSSplatConst->getAPIntValue() == -A - 1) - return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, - DAG.getConstant(-A, VT)); - } - } + // If the RHS is a constant we have to reverse the const canonicalization. + // x > C-1 ? x+-C : 0 --> subus x, C + if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD && + isSplatVector(CondRHS.getNode()) && isSplatVector(OpRHS.getNode())) { + APInt A = cast(OpRHS.getOperand(0))->getAPIntValue(); + if (CondRHS.getConstantOperandVal(0) == -A-1) + return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, + DAG.getConstant(-A, VT)); + } - // Another special case: If C was a sign bit, the sub has been - // canonicalized into a xor. - // FIXME: Would it be better to use computeKnownBits to determine - // whether it's safe to decanonicalize the xor? - // x s< 0 ? x^C : 0 --> subus x, C - if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR && - ISD::isBuildVectorAllZeros(CondRHS.getNode()) && OpRHSSplatConst) { - APInt A = OpRHSSplatConst->getAPIntValue(); - if (A.isSignBit()) - return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS); - } + // Another special case: If C was a sign bit, the sub has been + // canonicalized into a xor. + // FIXME: Would it be better to use computeKnownBits to determine whether + // it's safe to decanonicalize the xor? + // x s< 0 ? x^C : 0 --> subus x, C + if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR && + ISD::isBuildVectorAllZeros(CondRHS.getNode()) && + isSplatVector(OpRHS.getNode())) { + APInt A = cast(OpRHS.getOperand(0))->getAPIntValue(); + if (A.isSignBit()) + return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS); } } } @@ -20196,16 +20193,16 @@ static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) { // vector operations in many cases. Also, on sandybridge ADD is faster than // shl. // (shl V, 1) -> add V,V - if (auto *N1BV = dyn_cast(N1)) - if (SDValue N1Splat = N1BV->getConstantSplatValue()) { - assert(N0.getValueType().isVector() && "Invalid vector shift type"); - // We shift all of the values by one. In many cases we do not have - // hardware support for this operation. This is better expressed as an ADD - // of two values. - if (N1Splat.getOpcode() == ISD::Constant && - cast(N1Splat)->getZExtValue() == 1) - return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0); + if (isSplatVector(N1.getNode())) { + assert(N0.getValueType().isVector() && "Invalid vector shift type"); + ConstantSDNode *N1C = dyn_cast(N1->getOperand(0)); + // We shift all of the values by one. In many cases we do not have + // hardware support for this operation. This is better expressed as an ADD + // of two values. + if (N1C && (1 == N1C->getZExtValue())) { + return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0); } + } return SDValue(); } @@ -20224,19 +20221,20 @@ static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG, SDValue Amt = N->getOperand(1); SDLoc DL(N); - if (auto *AmtBV = dyn_cast(Amt)) - if (SDValue AmtSplat = AmtBV->getConstantSplatValue()) - if (auto *AmtConst = dyn_cast(AmtSplat)) { - APInt ShiftAmt = AmtConst->getAPIntValue(); - unsigned MaxAmount = VT.getVectorElementType().getSizeInBits(); - - // SSE2/AVX2 logical shifts always return a vector of 0s - // if the shift amount is bigger than or equal to - // the element size. The constant shift amount will be - // encoded as a 8-bit immediate. - if (ShiftAmt.trunc(8).uge(MaxAmount)) - return getZeroVector(VT, Subtarget, DAG, DL); - } + if (isSplatVector(Amt.getNode())) { + SDValue SclrAmt = Amt->getOperand(0); + if (ConstantSDNode *C = dyn_cast(SclrAmt)) { + APInt ShiftAmt = C->getAPIntValue(); + unsigned MaxAmount = VT.getVectorElementType().getSizeInBits(); + + // SSE2/AVX2 logical shifts always return a vector of 0s + // if the shift amount is bigger than or equal to + // the element size. The constant shift amount will be + // encoded as a 8-bit immediate. + if (ShiftAmt.trunc(8).uge(MaxAmount)) + return getZeroVector(VT, Subtarget, DAG, DL); + } + } return SDValue(); } @@ -20430,10 +20428,9 @@ static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG, // The right side has to be a 'trunc' or a constant vector. bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE; - SDValue RHSConstSplat; - if (auto *RHSBV = dyn_cast(N1)) - RHSConstSplat = RHSBV->getConstantSplatValue(); - if (!RHSTrunc && !RHSConstSplat) + bool RHSConst = (isSplatVector(N1.getNode()) && + isa(N1->getOperand(0))); + if (!RHSTrunc && !RHSConst) return SDValue(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); @@ -20443,9 +20440,9 @@ static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG, // Set N0 and N1 to hold the inputs to the new wide operation. N0 = N0->getOperand(0); - if (RHSConstSplat) { + if (RHSConst) { N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(), - RHSConstSplat); + N1->getOperand(0)); SmallVector C(WideVT.getVectorNumElements(), N1); N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C); } else if (RHSTrunc) { @@ -20591,10 +20588,12 @@ static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG, unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits(); unsigned SraAmt = ~0; if (Mask.getOpcode() == ISD::SRA) { - if (auto *AmtBV = dyn_cast(Mask.getOperand(1))) - if (SDValue AmtSplat = AmtBV->getConstantSplatValue()) - if (auto *AmtConst = dyn_cast(AmtSplat)) - SraAmt = AmtConst->getZExtValue(); + SDValue Amt = Mask.getOperand(1); + if (isSplatVector(Amt.getNode())) { + SDValue SclrAmt = Amt->getOperand(0); + if (ConstantSDNode *C = dyn_cast(SclrAmt)) + SraAmt = C->getZExtValue(); + } } else if (Mask.getOpcode() == X86ISD::VSRAI) { SDValue SraC = Mask.getOperand(1); SraAmt = cast(SraC)->getZExtValue(); diff --git a/llvm/test/CodeGen/X86/vector-gep.ll b/llvm/test/CodeGen/X86/vector-gep.ll index 291cc11..3f7ee3a 100644 --- a/llvm/test/CodeGen/X86/vector-gep.ll +++ b/llvm/test/CodeGen/X86/vector-gep.ll @@ -5,7 +5,7 @@ define <4 x i32*> @AGEP0(i32* %ptr) nounwind { entry: ;CHECK-LABEL: AGEP0 -;CHECK: vpshufd {{.*}} xmm0 = mem[0,0,0,0] +;CHECK: vbroadcast ;CHECK-NEXT: vpaddd ;CHECK-NEXT: ret %vecinit.i = insertelement <4 x i32*> undef, i32* %ptr, i32 0 diff --git a/llvm/test/CodeGen/X86/widen_cast-4.ll b/llvm/test/CodeGen/X86/widen_cast-4.ll index 19b84f1..1bc06a7 100644 --- a/llvm/test/CodeGen/X86/widen_cast-4.ll +++ b/llvm/test/CodeGen/X86/widen_cast-4.ll @@ -1,9 +1,8 @@ ; RUN: llc < %s -march=x86 -mattr=+sse4.2 | FileCheck %s -; RUN: llc < %s -march=x86 -mattr=+sse4.2 -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=CHECK-WIDE +; CHECK: psraw +; CHECK: psraw define void @update(i64* %dst_i, i64* %src_i, i32 %n) nounwind { -; CHECK-LABEL: update: -; CHECK-WIDE-LABEL: update: entry: %dst_i.addr = alloca i64* ; [#uses=2] %src_i.addr = alloca i64* ; [#uses=2] @@ -45,26 +44,6 @@ forbody: ; preds = %forcond %shr = ashr <8 x i8> %add, < i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2 > ; <<8 x i8>> [#uses=1] store <8 x i8> %shr, <8 x i8>* %arrayidx10 br label %forinc -; CHECK: %forbody -; CHECK: pmovzxbw -; CHECK-NEXT: paddw -; CHECK-NEXT: psllw $8 -; CHECK-NEXT: psraw $8 -; CHECK-NEXT: psraw $2 -; CHECK-NEXT: pshufb -; CHECK-NEXT: movlpd -; -; FIXME: We shouldn't require both a movd and an insert. -; CHECK-WIDE: %forbody -; CHECK-WIDE: movd -; CHECK-WIDE-NEXT: pinsrd -; CHECK-WIDE-NEXT: paddb -; CHECK-WIDE-NEXT: psrlw $2 -; CHECK-WIDE-NEXT: pand -; CHECK-WIDE-NEXT: pxor -; CHECK-WIDE-NEXT: psubb -; CHECK-WIDE-NEXT: pextrd -; CHECK-WIDE-NEXT: movd forinc: ; preds = %forbody %tmp15 = load i32* %i ; [#uses=1] -- 2.7.4