From: Christopher Tetreault Date: Thu, 9 Apr 2020 19:19:23 +0000 (-0700) Subject: Clean up usages of asserting vector getters in Type X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=b96558f5e5904d9d01298fbb1c965acb15b92781;p=platform%2Fupstream%2Fllvm.git Clean up usages of asserting vector getters in Type Summary: Remove usages of asserting vector getters in Type in preparation for the VectorType refactor. The existence of these functions complicates the refactor while adding little value. Reviewers: sunfish, sdesmalen, efriedma Reviewed By: efriedma Subscribers: hiraditya, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D77273 --- diff --git a/llvm/include/llvm/Analysis/Utils/Local.h b/llvm/include/llvm/Analysis/Utils/Local.h index 84e884e..c7be9ee 100644 --- a/llvm/include/llvm/Analysis/Utils/Local.h +++ b/llvm/include/llvm/Analysis/Utils/Local.h @@ -63,7 +63,8 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP, // Splat the constant if needed. if (IntIdxTy->isVectorTy() && !OpC->getType()->isVectorTy()) - OpC = ConstantVector::getSplat(IntIdxTy->getVectorElementCount(), OpC); + OpC = ConstantVector::getSplat( + cast(IntIdxTy)->getElementCount(), OpC); Constant *Scale = ConstantInt::get(IntIdxTy, Size); Constant *OC = ConstantExpr::getIntegerCast(OpC, IntIdxTy, true /*SExt*/); @@ -76,7 +77,8 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP, // Splat the index if needed. if (IntIdxTy->isVectorTy() && !Op->getType()->isVectorTy()) - Op = Builder->CreateVectorSplat(IntIdxTy->getVectorNumElements(), Op); + Op = Builder->CreateVectorSplat( + cast(IntIdxTy)->getNumElements(), Op); // Convert to correct type. if (Op->getType() != IntIdxTy) diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp index ed6bde8..351e36f 100644 --- a/llvm/lib/Analysis/ConstantFolding.cpp +++ b/llvm/lib/Analysis/ConstantFolding.cpp @@ -155,11 +155,11 @@ Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) { // If the element types match, IR can fold it. unsigned NumDstElt = DestVTy->getNumElements(); - unsigned NumSrcElt = C->getType()->getVectorNumElements(); + unsigned NumSrcElt = cast(C->getType())->getNumElements(); if (NumDstElt == NumSrcElt) return ConstantExpr::getBitCast(C, DestTy); - Type *SrcEltTy = C->getType()->getVectorElementType(); + Type *SrcEltTy = cast(C->getType())->getElementType(); Type *DstEltTy = DestVTy->getElementType(); // Otherwise, we're changing the number of elements in a vector, which @@ -218,7 +218,8 @@ Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) { for (unsigned j = 0; j != Ratio; ++j) { Constant *Src = C->getAggregateElement(SrcElt++); if (Src && isa(Src)) - Src = Constant::getNullValue(C->getType()->getVectorElementType()); + Src = Constant::getNullValue( + cast(C->getType())->getElementType()); else Src = dyn_cast_or_null(Src); if (!Src) // Reject constantexpr elements. @@ -469,8 +470,8 @@ bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr, NumElts = AT->getNumElements(); EltTy = AT->getElementType(); } else { - NumElts = C->getType()->getVectorNumElements(); - EltTy = C->getType()->getVectorElementType(); + NumElts = cast(C->getType())->getNumElements(); + EltTy = cast(C->getType())->getElementType(); } uint64_t EltSize = DL.getTypeAllocSize(EltTy); uint64_t Index = ByteOffset / EltSize; @@ -508,7 +509,7 @@ bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr, Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy, const DataLayout &DL) { // Bail out early. Not expect to load from scalable global variable. - if (LoadTy->isVectorTy() && LoadTy->getVectorIsScalable()) + if (LoadTy->isVectorTy() && cast(LoadTy)->isScalable()) return nullptr; auto *PTy = cast(C->getType()); @@ -836,7 +837,7 @@ Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP, Type *ResElemTy = GEP->getResultElementType(); Type *ResTy = GEP->getType(); if (!SrcElemTy->isSized() || - (SrcElemTy->isVectorTy() && SrcElemTy->getVectorIsScalable())) + (SrcElemTy->isVectorTy() && cast(SrcElemTy)->isScalable())) return nullptr; if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy, @@ -2571,7 +2572,7 @@ static Constant *ConstantFoldVectorCall(StringRef Name, // Do not iterate on scalable vector. The number of elements is unknown at // compile-time. - if (VTy->getVectorIsScalable()) + if (VTy->isScalable()) return nullptr; if (IntrinsicID == Intrinsic::masked_load) { diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp index 086ff74..111f64b 100644 --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -945,8 +945,9 @@ static Value *simplifyDivRem(Value *Op0, Value *Op1, bool IsDiv) { // If any element of a constant divisor vector is zero or undef, the whole op // is undef. auto *Op1C = dyn_cast(Op1); - if (Op1C && Ty->isVectorTy()) { - unsigned NumElts = Ty->getVectorNumElements(); + auto *VTy = dyn_cast(Ty); + if (Op1C && VTy) { + unsigned NumElts = VTy->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *Elt = Op1C->getAggregateElement(i); if (Elt && (Elt->isNullValue() || isa(Elt))) @@ -1221,7 +1222,8 @@ static bool isUndefShift(Value *Amount) { // If all lanes of a vector shift are undefined the whole shift is. if (isa(C) || isa(C)) { - for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E; ++I) + for (unsigned I = 0, E = cast(C->getType())->getNumElements(); + I != E; ++I) if (!isUndefShift(C->getAggregateElement(I))) return false; return true; @@ -4011,7 +4013,7 @@ static Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, Constant *TrueC, *FalseC; if (TrueVal->getType()->isVectorTy() && match(TrueVal, m_Constant(TrueC)) && match(FalseVal, m_Constant(FalseC))) { - unsigned NumElts = TrueC->getType()->getVectorNumElements(); + unsigned NumElts = cast(TrueC->getType())->getNumElements(); SmallVector NewC; for (unsigned i = 0; i != NumElts; ++i) { // Bail out on incomplete vector constants. @@ -4081,7 +4083,7 @@ static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef Ops, return UndefValue::get(GEPTy); bool IsScalableVec = - SrcTy->isVectorTy() ? SrcTy->getVectorIsScalable() : false; + isa(SrcTy) && cast(SrcTy)->isScalable(); if (Ops.size() == 2) { // getelementptr P, 0 -> P. @@ -4223,8 +4225,8 @@ Value *llvm::SimplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx, // For fixed-length vector, fold into undef if index is out of bounds. if (auto *CI = dyn_cast(Idx)) { - if (!Vec->getType()->getVectorIsScalable() && - CI->uge(Vec->getType()->getVectorNumElements())) + if (!cast(Vec->getType())->isScalable() && + CI->uge(cast(Vec->getType())->getNumElements())) return UndefValue::get(Vec->getType()); } @@ -4280,6 +4282,7 @@ Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef Idxs, /// If not, this returns null. static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &, unsigned) { + auto *VecVTy = cast(Vec->getType()); if (auto *CVec = dyn_cast(Vec)) { if (auto *CIdx = dyn_cast(Idx)) return ConstantFoldExtractElementInstruction(CVec, CIdx); @@ -4289,16 +4292,15 @@ static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQ return Splat; if (isa(Vec)) - return UndefValue::get(Vec->getType()->getVectorElementType()); + return UndefValue::get(VecVTy->getElementType()); } // If extracting a specified index from the vector, see if we can recursively // find a previously computed scalar that was inserted into the vector. if (auto *IdxC = dyn_cast(Idx)) { // For fixed-length vector, fold into undef if index is out of bounds. - if (!Vec->getType()->getVectorIsScalable() && - IdxC->getValue().uge(Vec->getType()->getVectorNumElements())) - return UndefValue::get(Vec->getType()->getVectorElementType()); + if (!VecVTy->isScalable() && IdxC->getValue().uge(VecVTy->getNumElements())) + return UndefValue::get(VecVTy->getElementType()); if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue())) return Elt; } @@ -4306,7 +4308,7 @@ static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQ // An undef extract index can be arbitrarily chosen to be an out-of-range // index value, which would result in the instruction being undef. if (isa(Idx)) - return UndefValue::get(Vec->getType()->getVectorElementType()); + return UndefValue::get(VecVTy->getElementType()); return nullptr; } @@ -4403,7 +4405,7 @@ static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1, return nullptr; // The mask value chooses which source operand we need to look at next. - int InVecNumElts = Op0->getType()->getVectorNumElements(); + int InVecNumElts = cast(Op0->getType())->getNumElements(); int RootElt = MaskVal; Value *SourceOp = Op0; if (MaskVal >= InVecNumElts) { @@ -4446,9 +4448,9 @@ static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, if (all_of(Mask, [](int Elem) { return Elem == UndefMaskElem; })) return UndefValue::get(RetTy); - Type *InVecTy = Op0->getType(); + auto *InVecTy = cast(Op0->getType()); unsigned MaskNumElts = Mask.size(); - ElementCount InVecEltCount = InVecTy->getVectorElementCount(); + ElementCount InVecEltCount = InVecTy->getElementCount(); bool Scalable = InVecEltCount.Scalable; diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp index b2d20ce..9919440 100644 --- a/llvm/lib/Analysis/Loads.cpp +++ b/llvm/lib/Analysis/Loads.cpp @@ -148,7 +148,8 @@ bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, const DominatorTree *DT) { // For unsized types or scalable vectors we don't know exactly how many bytes // are dereferenced, so bail out. - if (!Ty->isSized() || (Ty->isVectorTy() && Ty->getVectorIsScalable())) + if (!Ty->isSized() || + (Ty->isVectorTy() && cast(Ty)->isScalable())) return false; // When dereferenceability information is provided by a dereferenceable diff --git a/llvm/lib/Analysis/MemoryBuiltins.cpp b/llvm/lib/Analysis/MemoryBuiltins.cpp index ac72bda..7d4b634 100644 --- a/llvm/lib/Analysis/MemoryBuiltins.cpp +++ b/llvm/lib/Analysis/MemoryBuiltins.cpp @@ -650,7 +650,7 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) { return unknown(); if (I.getAllocatedType()->isVectorTy() && - I.getAllocatedType()->getVectorIsScalable()) + cast(I.getAllocatedType())->isScalable()) return unknown(); APInt Size(IntTyBits, DL.getTypeAllocSize(I.getAllocatedType())); diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp index 150a395..50a8b60 100644 --- a/llvm/lib/Analysis/TargetTransformInfo.cpp +++ b/llvm/lib/Analysis/TargetTransformInfo.cpp @@ -874,7 +874,7 @@ static bool matchPairwiseShuffleMask(ShuffleVectorInst *SI, bool IsLeft, else if (!SI) return false; - SmallVector Mask(SI->getType()->getVectorNumElements(), -1); + SmallVector Mask(SI->getType()->getNumElements(), -1); // Build a mask of 0, 2, ... (left) or 1, 3, ... (right) depending on whether // we look at the left or right side. @@ -1036,8 +1036,8 @@ static ReductionKind matchPairwiseReduction(const ExtractElementInst *ReduxRoot, if (!RD) return RK_None; - Type *VecTy = RdxStart->getType(); - unsigned NumVecElems = VecTy->getVectorNumElements(); + auto *VecTy = cast(RdxStart->getType()); + unsigned NumVecElems = VecTy->getNumElements(); if (!isPowerOf2_32(NumVecElems)) return RK_None; @@ -1101,8 +1101,8 @@ matchVectorSplittingReduction(const ExtractElementInst *ReduxRoot, if (!RD) return RK_None; - Type *VecTy = ReduxRoot->getOperand(0)->getType(); - unsigned NumVecElems = VecTy->getVectorNumElements(); + auto *VecTy = cast(ReduxRoot->getOperand(0)->getType()); + unsigned NumVecElems = VecTy->getNumElements(); if (!isPowerOf2_32(NumVecElems)) return RK_None; diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 6b61983..58b15f4 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -168,11 +168,12 @@ static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf, APInt &DemandedLHS, APInt &DemandedRHS) { // The length of scalable vectors is unknown at compile time, thus we // cannot check their values - if (Shuf->getType()->getVectorElementCount().Scalable) + if (Shuf->getType()->isScalable()) return false; - int NumElts = Shuf->getOperand(0)->getType()->getVectorNumElements(); - int NumMaskElts = Shuf->getType()->getVectorNumElements(); + int NumElts = + cast(Shuf->getOperand(0)->getType())->getNumElements(); + int NumMaskElts = Shuf->getType()->getNumElements(); DemandedLHS = DemandedRHS = APInt::getNullValue(NumElts); if (DemandedElts.isNullValue()) return true; @@ -206,9 +207,10 @@ static void computeKnownBits(const Value *V, const APInt &DemandedElts, static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Query &Q) { Type *Ty = V->getType(); - APInt DemandedElts = Ty->isVectorTy() - ? APInt::getAllOnesValue(Ty->getVectorNumElements()) - : APInt(1, 1); + APInt DemandedElts = + Ty->isVectorTy() + ? APInt::getAllOnesValue(cast(Ty)->getNumElements()) + : APInt(1, 1); computeKnownBits(V, DemandedElts, Known, Depth, Q); } @@ -373,9 +375,10 @@ static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, const Query &Q) { Type *Ty = V->getType(); - APInt DemandedElts = Ty->isVectorTy() - ? APInt::getAllOnesValue(Ty->getVectorNumElements()) - : APInt(1, 1); + APInt DemandedElts = + Ty->isVectorTy() + ? APInt::getAllOnesValue(cast(Ty)->getNumElements()) + : APInt(1, 1); return ComputeNumSignBits(V, DemandedElts, Depth, Q); } @@ -1791,7 +1794,7 @@ static void computeKnownBitsFromOperator(const Operator *I, const Value *Vec = I->getOperand(0); const Value *Idx = I->getOperand(1); auto *CIdx = dyn_cast(Idx); - unsigned NumElts = Vec->getType()->getVectorNumElements(); + unsigned NumElts = cast(Vec->getType())->getNumElements(); APInt DemandedVecElts = APInt::getAllOnesValue(NumElts); if (CIdx && CIdx->getValue().ult(NumElts)) DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); @@ -1870,8 +1873,8 @@ void computeKnownBits(const Value *V, const APInt &DemandedElts, Type *Ty = V->getType(); assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && "Not integer or pointer type!"); - assert(((Ty->isVectorTy() && - Ty->getVectorNumElements() == DemandedElts.getBitWidth()) || + assert(((Ty->isVectorTy() && cast(Ty)->getNumElements() == + DemandedElts.getBitWidth()) || (!Ty->isVectorTy() && DemandedElts == APInt(1, 1))) && "Unexpected vector size"); @@ -2510,7 +2513,7 @@ bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth, const Value *Vec = EEI->getVectorOperand(); const Value *Idx = EEI->getIndexOperand(); auto *CIdx = dyn_cast(Idx); - unsigned NumElts = Vec->getType()->getVectorNumElements(); + unsigned NumElts = cast(Vec->getType())->getNumElements(); APInt DemandedVecElts = APInt::getAllOnesValue(NumElts); if (CIdx && CIdx->getValue().ult(NumElts)) DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); @@ -2524,9 +2527,10 @@ bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth, bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) { Type *Ty = V->getType(); - APInt DemandedElts = Ty->isVectorTy() - ? APInt::getAllOnesValue(Ty->getVectorNumElements()) - : APInt(1, 1); + APInt DemandedElts = + Ty->isVectorTy() + ? APInt::getAllOnesValue(cast(Ty)->getNumElements()) + : APInt(1, 1); return isKnownNonZero(V, DemandedElts, Depth, Q); } @@ -2627,7 +2631,7 @@ static unsigned computeNumSignBitsVectorConstant(const Value *V, return 0; unsigned MinSignBits = TyBits; - unsigned NumElts = CV->getType()->getVectorNumElements(); + unsigned NumElts = cast(CV->getType())->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { if (!DemandedElts[i]) continue; @@ -2670,8 +2674,8 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, // same behavior for poison though -- that's a FIXME today. Type *Ty = V->getType(); - assert(((Ty->isVectorTy() && - Ty->getVectorNumElements() == DemandedElts.getBitWidth()) || + assert(((Ty->isVectorTy() && cast(Ty)->getNumElements() == + DemandedElts.getBitWidth()) || (!Ty->isVectorTy() && DemandedElts == APInt(1, 1))) && "Unexpected vector size"); @@ -3246,8 +3250,8 @@ static bool cannotBeOrderedLessThanZeroImpl(const Value *V, // Handle vector of constants. if (auto *CV = dyn_cast(V)) { - if (CV->getType()->isVectorTy()) { - unsigned NumElts = CV->getType()->getVectorNumElements(); + if (auto *CVVTy = dyn_cast(CV->getType())) { + unsigned NumElts = CVVTy->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { auto *CFP = dyn_cast_or_null(CV->getAggregateElement(i)); if (!CFP) @@ -3423,7 +3427,7 @@ bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI, return false; // For vectors, verify that each element is not infinity. - unsigned NumElts = V->getType()->getVectorNumElements(); + unsigned NumElts = cast(V->getType())->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *Elt = cast(V)->getAggregateElement(i); if (!Elt) @@ -3524,7 +3528,7 @@ bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI, return false; // For vectors, verify that each element is not NaN. - unsigned NumElts = V->getType()->getVectorNumElements(); + unsigned NumElts = cast(V->getType())->getNumElements(); for (unsigned i = 0; i != NumElts; ++i) { Constant *Elt = cast(V)->getAggregateElement(i); if (!Elt) diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp index 8b98e05..5468794 100644 --- a/llvm/lib/Analysis/VectorUtils.cpp +++ b/llvm/lib/Analysis/VectorUtils.cpp @@ -263,7 +263,7 @@ Value *llvm::findScalarElement(Value *V, unsigned EltNo) { assert(V->getType()->isVectorTy() && "Not looking at a vector?"); VectorType *VTy = cast(V->getType()); // For fixed-length vector, return undef for out of range access. - if (!V->getType()->getVectorIsScalable()) { + if (!VTy->isScalable()) { unsigned Width = VTy->getNumElements(); if (EltNo >= Width) return UndefValue::get(VTy->getElementType()); @@ -289,7 +289,8 @@ Value *llvm::findScalarElement(Value *V, unsigned EltNo) { } if (ShuffleVectorInst *SVI = dyn_cast(V)) { - unsigned LHSWidth = SVI->getOperand(0)->getType()->getVectorNumElements(); + unsigned LHSWidth = + cast(SVI->getOperand(0)->getType())->getNumElements(); int InEl = SVI->getMaskValue(EltNo); if (InEl < 0) return UndefValue::get(VTy->getElementType()); @@ -805,8 +806,9 @@ bool llvm::maskIsAllZeroOrUndef(Value *Mask) { return false; if (ConstMask->isNullValue() || isa(ConstMask)) return true; - for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E; - ++I) { + for (unsigned I = 0, + E = cast(ConstMask->getType())->getNumElements(); + I != E; ++I) { if (auto *MaskElt = ConstMask->getAggregateElement(I)) if (MaskElt->isNullValue() || isa(MaskElt)) continue; @@ -822,8 +824,9 @@ bool llvm::maskIsAllOneOrUndef(Value *Mask) { return false; if (ConstMask->isAllOnesValue() || isa(ConstMask)) return true; - for (unsigned I = 0, E = ConstMask->getType()->getVectorNumElements(); I != E; - ++I) { + for (unsigned I = 0, + E = cast(ConstMask->getType())->getNumElements(); + I != E; ++I) { if (auto *MaskElt = ConstMask->getAggregateElement(I)) if (MaskElt->isAllOnesValue() || isa(MaskElt)) continue;