From: Peter Collingbourne Date: Fri, 2 Dec 2016 02:24:42 +0000 (+0000) Subject: IR: Change the gep_type_iterator API to avoid always exposing the "current" type. X-Git-Tag: llvmorg-4.0.0-rc1~3191 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=ab85225be49b2abf5112823d84525e91d469d6bd;p=platform%2Fupstream%2Fllvm.git IR: Change the gep_type_iterator API to avoid always exposing the "current" type. Instead, expose whether the current type is an array or a struct, if an array what the upper bound is, and if a struct the struct type itself. This is in preparation for a later change which will make PointerType derive from Type rather than SequentialType. Differential Revision: https://reviews.llvm.org/D26594 llvm-svn: 288458 --- diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h index 4948bbd..2934e9ca 100644 --- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h +++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h @@ -482,10 +482,7 @@ public: int64_t BaseOffset = 0; int64_t Scale = 0; - // Assumes the address space is 0 when Ptr is nullptr. - unsigned AS = - (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace()); - auto GTI = gep_type_begin(PointeeType, AS, Operands); + auto GTI = gep_type_begin(PointeeType, Operands); for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) { // We assume that the cost of Scalar GEP with constant index and the // cost of Vector GEP with splat constant index are the same. @@ -493,7 +490,12 @@ public: if (!ConstIdx) if (auto Splat = getSplatValue(*I)) ConstIdx = dyn_cast(Splat); - if (isa(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { + // For structures the index is always splat or scalar constant + assert(ConstIdx && "Unexpected GEP index"); + uint64_t Field = ConstIdx->getZExtValue(); + BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field); + } else { int64_t ElementSize = DL.getTypeAllocSize(GTI.getIndexedType()); if (ConstIdx) BaseOffset += ConstIdx->getSExtValue() * ElementSize; @@ -504,17 +506,15 @@ public: return TTI::TCC_Basic; Scale = ElementSize; } - } else { - StructType *STy = cast(*GTI); - // For structures the index is always splat or scalar constant - assert(ConstIdx && "Unexpected GEP index"); - uint64_t Field = ConstIdx->getZExtValue(); - BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field); } } + // Assumes the address space is 0 when Ptr is nullptr. + unsigned AS = + (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace()); if (static_cast(this)->isLegalAddressingMode( - PointerType::get(*GTI, AS), const_cast(BaseGV), + PointerType::get(Type::getInt8Ty(PointeeType->getContext()), AS), + const_cast(BaseGV), BaseOffset, HasBaseReg, Scale, AS)) { return TTI::TCC_Free; } diff --git a/llvm/include/llvm/IR/GetElementPtrTypeIterator.h b/llvm/include/llvm/IR/GetElementPtrTypeIterator.h index 1e89621..d9904c5 100644 --- a/llvm/include/llvm/IR/GetElementPtrTypeIterator.h +++ b/llvm/include/llvm/IR/GetElementPtrTypeIterator.h @@ -16,7 +16,7 @@ #define LLVM_IR_GETELEMENTPTRTYPEITERATOR_H #include "llvm/ADT/ArrayRef.h" -#include "llvm/ADT/PointerIntPair.h" +#include "llvm/ADT/PointerUnion.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Operator.h" #include "llvm/IR/User.h" @@ -33,18 +33,15 @@ namespace llvm { Type *, ptrdiff_t> super; ItTy OpIt; - PointerIntPair CurTy; - unsigned AddrSpace; - + PointerUnion CurTy; + enum { Unbounded = -1ull }; + uint64_t NumElements = Unbounded; generic_gep_type_iterator() = default; public: - static generic_gep_type_iterator begin(Type *Ty, unsigned AddrSpace, - ItTy It) { + static generic_gep_type_iterator begin(Type *Ty, ItTy It) { generic_gep_type_iterator I; - I.CurTy.setPointer(Ty); - I.CurTy.setInt(true); - I.AddrSpace = AddrSpace; + I.CurTy = Ty; I.OpIt = It; return I; } @@ -63,34 +60,28 @@ namespace llvm { return !operator==(x); } - Type *operator*() const { - if (CurTy.getInt()) - return CurTy.getPointer()->getPointerTo(AddrSpace); - return CurTy.getPointer(); - } - + // FIXME: Make this the iterator's operator*() after the 4.0 release. + // operator*() had a different meaning in earlier releases, so we're + // temporarily not giving this iterator an operator*() to avoid a subtle + // semantics break. Type *getIndexedType() const { - if (CurTy.getInt()) - return CurTy.getPointer(); - CompositeType *CT = cast(CurTy.getPointer()); - return CT->getTypeAtIndex(getOperand()); + if (auto *T = CurTy.dyn_cast()) + return T; + return CurTy.get()->getTypeAtIndex(getOperand()); } - // This is a non-standard operator->. It allows you to call methods on the - // current type directly. - Type *operator->() const { return operator*(); } - Value *getOperand() const { return const_cast(&**OpIt); } generic_gep_type_iterator& operator++() { // Preincrement - if (CurTy.getInt()) { - CurTy.setInt(false); - } else if (CompositeType *CT = - dyn_cast(CurTy.getPointer())) { - CurTy.setPointer(CT->getTypeAtIndex(getOperand())); - } else { - CurTy.setPointer(nullptr); - } + Type *Ty = getIndexedType(); + if (auto *ATy = dyn_cast(Ty)) { + CurTy = ATy->getElementType(); + NumElements = ATy->getNumElements(); + } else if (auto *VTy = dyn_cast(Ty)) { + CurTy = VTy->getElementType(); + NumElements = VTy->getNumElements(); + } else + CurTy = dyn_cast(Ty); ++OpIt; return *this; } @@ -98,6 +89,39 @@ namespace llvm { generic_gep_type_iterator operator++(int) { // Postincrement generic_gep_type_iterator tmp = *this; ++*this; return tmp; } + + // All of the below API is for querying properties of the "outer type", i.e. + // the type that contains the indexed type. Most of the time this is just + // the type that was visited immediately prior to the indexed type, but for + // the first element this is an unbounded array of the GEP's source element + // type, for which there is no clearly corresponding IR type (we've + // historically used a pointer type as the outer type in this case, but + // pointers will soon lose their element type). + // + // FIXME: Most current users of this class are just interested in byte + // offsets (a few need to know whether the outer type is a struct because + // they are trying to replace a constant with a variable, which is only + // legal for arrays, e.g. canReplaceOperandWithVariable in SimplifyCFG.cpp); + // we should provide a more minimal API here that exposes not much more than + // that. + + bool isStruct() const { return CurTy.is(); } + bool isSequential() const { return CurTy.is(); } + + StructType *getStructType() const { return CurTy.get(); } + + StructType *getStructTypeOrNull() const { + return CurTy.dyn_cast(); + } + + bool isBoundedSequential() const { + return isSequential() && NumElements != Unbounded; + } + + uint64_t getSequentialNumElements() const { + assert(isBoundedSequential()); + return NumElements; + } }; typedef generic_gep_type_iterator<> gep_type_iterator; @@ -106,8 +130,6 @@ namespace llvm { auto *GEPOp = cast(GEP); return gep_type_iterator::begin( GEPOp->getSourceElementType(), - cast(GEPOp->getPointerOperandType()->getScalarType()) - ->getAddressSpace(), GEP->op_begin() + 1); } @@ -119,8 +141,6 @@ namespace llvm { auto &GEPOp = cast(GEP); return gep_type_iterator::begin( GEPOp.getSourceElementType(), - cast(GEPOp.getPointerOperandType()->getScalarType()) - ->getAddressSpace(), GEP.op_begin() + 1); } @@ -130,13 +150,13 @@ namespace llvm { template inline generic_gep_type_iterator - gep_type_begin(Type *Op0, unsigned AS, ArrayRef A) { - return generic_gep_type_iterator::begin(Op0, AS, A.begin()); + gep_type_begin(Type *Op0, ArrayRef A) { + return generic_gep_type_iterator::begin(Op0, A.begin()); } template inline generic_gep_type_iterator - gep_type_end(Type * /*Op0*/, unsigned /*AS*/, ArrayRef A) { + gep_type_end(Type * /*Op0*/, ArrayRef A) { return generic_gep_type_iterator::end(A.end()); } diff --git a/llvm/include/llvm/Transforms/Utils/Local.h b/llvm/include/llvm/Transforms/Utils/Local.h index 063e6aa..490a765 100644 --- a/llvm/include/llvm/Transforms/Utils/Local.h +++ b/llvm/include/llvm/Transforms/Utils/Local.h @@ -217,7 +217,7 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP, continue; // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { if (OpC->getType()->isVectorTy()) OpC = OpC->getSplatValue(); diff --git a/llvm/lib/Analysis/BasicAliasAnalysis.cpp b/llvm/lib/Analysis/BasicAliasAnalysis.cpp index 942f036..761c6d9 100644 --- a/llvm/lib/Analysis/BasicAliasAnalysis.cpp +++ b/llvm/lib/Analysis/BasicAliasAnalysis.cpp @@ -412,10 +412,10 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V, // Assume all GEP operands are constants until proven otherwise. bool GepHasConstantOffset = true; for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end(); - I != E; ++I) { + I != E; ++I, ++GTI) { const Value *Index = *I; // Compute the (potentially symbolic) offset in bytes for this index. - if (StructType *STy = dyn_cast(*GTI++)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { // For a struct, add the member offset. unsigned FieldNo = cast(Index)->getZExtValue(); if (FieldNo == 0) @@ -431,13 +431,13 @@ bool BasicAAResult::DecomposeGEPExpression(const Value *V, if (CIdx->isZero()) continue; Decomposed.OtherOffset += - DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue(); + DL.getTypeAllocSize(GTI.getIndexedType()) * CIdx->getSExtValue(); continue; } GepHasConstantOffset = false; - uint64_t Scale = DL.getTypeAllocSize(*GTI); + uint64_t Scale = DL.getTypeAllocSize(GTI.getIndexedType()); unsigned ZExtBits = 0, SExtBits = 0; // If the integer type is smaller than the pointer size, it is implicitly diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp index 6c3525f..02a2753 100644 --- a/llvm/lib/Analysis/InlineCost.cpp +++ b/llvm/lib/Analysis/InlineCost.cpp @@ -318,7 +318,7 @@ bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) { continue; // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { unsigned ElementIdx = OpC->getZExtValue(); const StructLayout *SL = DL.getStructLayout(STy); Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx)); diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 234d42d..e7220f8 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -1231,7 +1231,7 @@ static void computeKnownBitsFromOperator(const Operator *I, APInt &KnownZero, gep_type_iterator GTI = gep_type_begin(I); for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { Value *Index = I->getOperand(i); - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { // Handle struct member offset arithmetic. // Handle case when index is vector zeroinitializer @@ -1730,7 +1730,7 @@ static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); GTI != GTE; ++GTI) { // Struct types are easy -- they must always be indexed by a constant. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { ConstantInt *OpC = cast(GTI.getOperand()); unsigned ElementIdx = OpC->getZExtValue(); const StructLayout *SL = Q.DL.getStructLayout(STy); diff --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp index 5a51fd8..7e598f4 100644 --- a/llvm/lib/Analysis/VectorUtils.cpp +++ b/llvm/lib/Analysis/VectorUtils.cpp @@ -107,11 +107,11 @@ unsigned llvm::getGEPInductionOperand(const GetElementPtrInst *Gep) { while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) { // Find the type we're currently indexing into. gep_type_iterator GEPTI = gep_type_begin(Gep); - std::advance(GEPTI, LastOperand - 1); + std::advance(GEPTI, LastOperand - 2); // If it's a type with the same allocation size as the result of the GEP we // can peel off the zero index. - if (DL.getTypeAllocSize(*GEPTI) != GEPAllocSize) + if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize) break; --LastOperand; } diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index 60ac557..0ae57ce 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -3261,7 +3261,7 @@ bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode, int64_t ConstantOffset = 0; gep_type_iterator GTI = gep_type_begin(AddrInst); for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) { - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { const StructLayout *SL = DL.getStructLayout(STy); unsigned Idx = cast(AddrInst->getOperand(i))->getZExtValue(); diff --git a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp index 7bc0c9d..e2f33bb 100644 --- a/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -488,7 +488,7 @@ bool FastISel::selectGetElementPtr(const User *I) { for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I); GTI != E; ++GTI) { const Value *Idx = GTI.getOperand(); - if (auto *StTy = dyn_cast(*GTI)) { + if (StructType *StTy = GTI.getStructTypeOrNull()) { uint64_t Field = cast(Idx)->getZExtValue(); if (Field) { // N = N + Offset diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 01c7c14..09f2bbc 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -3274,7 +3274,7 @@ void SelectionDAGBuilder::visitGetElementPtr(const User &I) { for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I); GTI != E; ++GTI) { const Value *Idx = GTI.getOperand(); - if (StructType *StTy = dyn_cast(*GTI)) { + if (StructType *StTy = GTI.getStructTypeOrNull()) { unsigned Field = cast(Idx)->getUniqueInteger().getZExtValue(); if (Field) { // N = N + Offset diff --git a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp index 1eb4f7d..923f6e7 100644 --- a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp +++ b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp @@ -999,7 +999,7 @@ GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I, uint64_t Total = 0; for (; I != E; ++I) { - if (StructType *STy = dyn_cast(*I)) { + if (StructType *STy = I.getStructTypeOrNull()) { const StructLayout *SLO = getDataLayout().getStructLayout(STy); const ConstantInt *CPU = cast(I.getOperand()); @@ -1007,7 +1007,6 @@ GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I, Total += SLO->getElementOffset(Index); } else { - SequentialType *ST = cast(*I); // Get the index number for the array... which must be long type... GenericValue IdxGV = getOperandValue(I.getOperand(), SF); @@ -1020,7 +1019,7 @@ GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I, assert(BitWidth == 64 && "Invalid index type for getelementptr"); Idx = (int64_t)IdxGV.IntVal.getZExtValue(); } - Total += getDataLayout().getTypeAllocSize(ST->getElementType()) * Idx; + Total += getDataLayout().getTypeAllocSize(I.getIndexedType()) * Idx; } } diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp index 37bab3a..60f6fbb 100644 --- a/llvm/lib/IR/ConstantFold.cpp +++ b/llvm/lib/IR/ConstantFold.cpp @@ -2019,22 +2019,8 @@ static bool isInBoundsIndices(ArrayRef Idxs) { } /// Test whether a given ConstantInt is in-range for a SequentialType. -static bool isIndexInRangeOfSequentialType(SequentialType *STy, - const ConstantInt *CI) { - // And indices are valid when indexing along a pointer - if (isa(STy)) - return true; - - uint64_t NumElements = 0; - // Determine the number of elements in our sequential type. - if (auto *ATy = dyn_cast(STy)) - NumElements = ATy->getNumElements(); - else if (auto *VTy = dyn_cast(STy)) - NumElements = VTy->getNumElements(); - - assert((isa(STy) || NumElements > 0) && - "didn't expect non-array type to have zero elements!"); - +static bool isIndexInRangeOfArrayType(uint64_t NumElements, + const ConstantInt *CI) { // We cannot bounds check the index if it doesn't fit in an int64_t. if (CI->getValue().getActiveBits() > 64) return false; @@ -2089,10 +2075,10 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C, // getelementptr instructions into a single instruction. // if (CE->getOpcode() == Instruction::GetElementPtr) { - Type *LastTy = nullptr; + gep_type_iterator LastI = gep_type_end(CE); for (gep_type_iterator I = gep_type_begin(CE), E = gep_type_end(CE); I != E; ++I) - LastTy = *I; + LastI = I; // We cannot combine indices if doing so would take us outside of an // array or vector. Doing otherwise could trick us if we evaluated such a @@ -2115,9 +2101,11 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C, bool PerformFold = false; if (Idx0->isNullValue()) PerformFold = true; - else if (SequentialType *STy = dyn_cast_or_null(LastTy)) + else if (LastI.isSequential()) if (ConstantInt *CI = dyn_cast(Idx0)) - PerformFold = isIndexInRangeOfSequentialType(STy, CI); + PerformFold = + !LastI.isBoundedSequential() || + isIndexInRangeOfArrayType(LastI.getSequentialNumElements(), CI); if (PerformFold) { SmallVector NewIndices; @@ -2228,7 +2216,10 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C, Unknown = true; continue; } - if (isIndexInRangeOfSequentialType(STy, CI)) + if (isIndexInRangeOfArrayType(isa(STy) + ? cast(STy)->getNumElements() + : cast(STy)->getNumElements(), + CI)) // It's in range, skip to the next index. continue; if (!isa(Prev)) { diff --git a/llvm/lib/IR/Constants.cpp b/llvm/lib/IR/Constants.cpp index 0e5fa24..b6af6ed 100644 --- a/llvm/lib/IR/Constants.cpp +++ b/llvm/lib/IR/Constants.cpp @@ -1073,19 +1073,14 @@ bool ConstantExpr::isGEPWithNoNotionalOverIndexing() const { gep_type_iterator GEPI = gep_type_begin(this), E = gep_type_end(this); User::const_op_iterator OI = std::next(this->op_begin()); - // Skip the first index, as it has no static limit. - ++GEPI; - ++OI; - // The remaining indices must be compile-time known integers within the // bounds of the corresponding notional static array types. for (; GEPI != E; ++GEPI, ++OI) { ConstantInt *CI = dyn_cast(*OI); - if (!CI) return false; - if (ArrayType *ATy = dyn_cast(*GEPI)) - if (CI->getValue().getActiveBits() > 64 || - CI->getZExtValue() >= ATy->getNumElements()) - return false; + if (GEPI.isBoundedSequential() && + (CI->getValue().getActiveBits() > 64 || + CI->getZExtValue() >= GEPI.getSequentialNumElements())) + return false; } // All the indices checked out. diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp index 3de1889..d15a34c 100644 --- a/llvm/lib/IR/DataLayout.cpp +++ b/llvm/lib/IR/DataLayout.cpp @@ -737,15 +737,12 @@ int64_t DataLayout::getIndexedOffsetInType(Type *ElemTy, ArrayRef Indices) const { int64_t Result = 0; - // We can use 0 as the address space as we don't need - // to get pointer types back from gep_type_iterator. - unsigned AS = 0; generic_gep_type_iterator - GTI = gep_type_begin(ElemTy, AS, Indices), - GTE = gep_type_end(ElemTy, AS, Indices); + GTI = gep_type_begin(ElemTy, Indices), + GTE = gep_type_end(ElemTy, Indices); for (; GTI != GTE; ++GTI) { Value *Idx = GTI.getOperand(); - if (auto *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { assert(Idx->getType()->isIntegerTy(32) && "Illegal struct idx"); unsigned FieldNo = cast(Idx)->getZExtValue(); diff --git a/llvm/lib/IR/Operator.cpp b/llvm/lib/IR/Operator.cpp index 8a94053..2fba24d 100644 --- a/llvm/lib/IR/Operator.cpp +++ b/llvm/lib/IR/Operator.cpp @@ -33,7 +33,7 @@ bool GEPOperator::accumulateConstantOffset(const DataLayout &DL, continue; // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { unsigned ElementIdx = OpC->getZExtValue(); const StructLayout *SL = DL.getStructLayout(STy); Offset += APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx)); diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp index 405e7d0..fe2c2d4 100644 --- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp +++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp @@ -557,7 +557,7 @@ bool AArch64FastISel::computeAddress(const Value *Obj, Address &Addr, Type *Ty) for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U); GTI != E; ++GTI) { const Value *Op = GTI.getOperand(); - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { const StructLayout *SL = DL.getStructLayout(STy); unsigned Idx = cast(Op)->getZExtValue(); TmpOffset += SL->getElementOffset(Idx); @@ -4885,7 +4885,7 @@ bool AArch64FastISel::selectGetElementPtr(const Instruction *I) { for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I); GTI != E; ++GTI) { const Value *Idx = GTI.getOperand(); - if (auto *StTy = dyn_cast(*GTI)) { + if (auto *StTy = GTI.getStructTypeOrNull()) { unsigned Field = cast(Idx)->getZExtValue(); // N = N + Offset if (Field) diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 019d72d..581958c 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -7157,8 +7157,8 @@ bool AArch64TargetLowering::isExtFreeImpl(const Instruction *Ext) const { case Instruction::GetElementPtr: { gep_type_iterator GTI = gep_type_begin(Instr); auto &DL = Ext->getModule()->getDataLayout(); - std::advance(GTI, U.getOperandNo()); - Type *IdxTy = *GTI; + std::advance(GTI, U.getOperandNo()-1); + Type *IdxTy = GTI.getIndexedType(); // This extension will end up with a shift because of the scaling factor. // 8-bit sized types have a scaling factor of 1, thus a shift amount of 0. // Get the shift amount based on the scaling factor: diff --git a/llvm/lib/Target/ARM/ARMFastISel.cpp b/llvm/lib/Target/ARM/ARMFastISel.cpp index 9b50458..df4dcb3 100644 --- a/llvm/lib/Target/ARM/ARMFastISel.cpp +++ b/llvm/lib/Target/ARM/ARMFastISel.cpp @@ -733,7 +733,7 @@ bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) { for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i, ++GTI) { const Value *Op = *i; - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { const StructLayout *SL = DL.getStructLayout(STy); unsigned Idx = cast(Op)->getZExtValue(); TmpOffset += SL->getElementOffset(Idx); diff --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp index cfce60c..29f3e2c 100644 --- a/llvm/lib/Target/Mips/MipsFastISel.cpp +++ b/llvm/lib/Target/Mips/MipsFastISel.cpp @@ -445,7 +445,7 @@ bool MipsFastISel::computeAddress(const Value *Obj, Address &Addr) { for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i, ++GTI) { const Value *Op = *i; - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { const StructLayout *SL = DL.getStructLayout(STy); unsigned Idx = cast(Op)->getZExtValue(); TmpOffset += SL->getElementOffset(Idx); diff --git a/llvm/lib/Target/PowerPC/PPCFastISel.cpp b/llvm/lib/Target/PowerPC/PPCFastISel.cpp index b7f3603..9b91b9a 100644 --- a/llvm/lib/Target/PowerPC/PPCFastISel.cpp +++ b/llvm/lib/Target/PowerPC/PPCFastISel.cpp @@ -358,7 +358,7 @@ bool PPCFastISel::PPCComputeAddress(const Value *Obj, Address &Addr) { for (User::const_op_iterator II = U->op_begin() + 1, IE = U->op_end(); II != IE; ++II, ++GTI) { const Value *Op = *II; - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { const StructLayout *SL = DL.getStructLayout(STy); unsigned Idx = cast(Op)->getZExtValue(); TmpOffset += SL->getElementOffset(Idx); diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp index 628b5a5..4bdf06a 100644 --- a/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp +++ b/llvm/lib/Target/WebAssembly/WebAssemblyFastISel.cpp @@ -241,7 +241,7 @@ bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) { for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U); GTI != E; ++GTI) { const Value *Op = GTI.getOperand(); - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { const StructLayout *SL = DL.getStructLayout(STy); unsigned Idx = cast(Op)->getZExtValue(); TmpOffset += SL->getElementOffset(Idx); diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp index 1979433..0cc0615 100644 --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -936,7 +936,7 @@ redo_gep: for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i, ++GTI) { const Value *Op = *i; - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { const StructLayout *SL = DL.getStructLayout(STy); Disp += SL->getElementOffset(cast(Op)->getZExtValue()); continue; diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp index f1686a0..1df9ee7 100644 --- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -371,14 +371,14 @@ static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) { ++GEPI; // Skip over the pointer index. // If this is a use of an array allocation, do a bit more checking for sanity. - if (ArrayType *AT = dyn_cast(*GEPI)) { - uint64_t NumElements = AT->getNumElements(); + if (GEPI.isSequential()) { ConstantInt *Idx = cast(U->getOperand(2)); // Check to make sure that index falls within the array. If not, // something funny is going on, so we won't do the optimization. // - if (Idx->getZExtValue() >= NumElements) + if (GEPI.isBoundedSequential() && + Idx->getZExtValue() >= GEPI.getSequentialNumElements()) return false; // We cannot scalar repl this level of the array unless any array @@ -391,19 +391,13 @@ static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) { for (++GEPI; // Skip array index. GEPI != E; ++GEPI) { - uint64_t NumElements; - if (ArrayType *SubArrayTy = dyn_cast(*GEPI)) - NumElements = SubArrayTy->getNumElements(); - else if (VectorType *SubVectorTy = dyn_cast(*GEPI)) - NumElements = SubVectorTy->getNumElements(); - else { - assert((*GEPI)->isStructTy() && - "Indexed GEP type is not array, vector, or struct!"); + if (GEPI.isStruct()) continue; - } ConstantInt *IdxVal = dyn_cast(GEPI.getOperand()); - if (!IdxVal || IdxVal->getZExtValue() >= NumElements) + if (!IdxVal || + (GEPI.isBoundedSequential() && + IdxVal->getZExtValue() >= GEPI.getSequentialNumElements())) return false; } } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp index bacb9ba..c610002 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -517,7 +517,7 @@ static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC, if (CI->isZero()) continue; // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); } else { uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); @@ -547,7 +547,7 @@ static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC, if (CI->isZero()) continue; // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); } else { uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp index cdbc8eb..90eba68 100644 --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -1389,7 +1389,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E; ++I, ++GTI) { // Skip indices into struct types. - if (isa(*GTI)) + if (GTI.isStruct()) continue; // Index type should have the same width as IntPtr @@ -1546,7 +1546,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { bool EndsWithSequential = false; for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src); I != E; ++I) - EndsWithSequential = !(*I)->isStructTy(); + EndsWithSequential = I.isSequential(); // Can we combine the two pointer arithmetics offsets? if (EndsWithSequential) { diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp index c74d911..b19e663 100644 --- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -52,7 +52,7 @@ static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, if (OpC->isZero()) continue; // No offset. // Handle struct indices, which add their field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); continue; } diff --git a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp index 81744c0..0a3bf7b 100644 --- a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp +++ b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp @@ -281,9 +281,10 @@ Instruction *NaryReassociatePass::tryReassociateGEP(GetElementPtrInst *GEP) { return nullptr; gep_type_iterator GTI = gep_type_begin(*GEP); - for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) { - if (isa(*GTI++)) { - if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1, *GTI)) { + for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { + if (GTI.isSequential()) { + if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1, + GTI.getIndexedType())) { return NewGEP; } } diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp index 54ce1d8..66de397 100644 --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -692,7 +692,7 @@ private: break; // Handle a struct index, which adds its field offset to the pointer. - if (StructType *STy = dyn_cast(*GTI)) { + if (StructType *STy = GTI.getStructTypeOrNull()) { unsigned ElementIdx = OpC->getZExtValue(); const StructLayout *SL = DL.getStructLayout(STy); GEPOffset += diff --git a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp index 62c9f46..4d59453 100644 --- a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp +++ b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp @@ -722,7 +722,7 @@ bool SeparateConstOffsetFromGEP::canonicalizeArrayIndicesToPointerSize( for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end(); I != E; ++I, ++GTI) { // Skip struct member indices which must be i32. - if (isa(*GTI)) { + if (GTI.isSequential()) { if ((*I)->getType() != IntPtrTy) { *I = CastInst::CreateIntegerCast(*I, IntPtrTy, true, "idxprom", GEP); Changed = true; @@ -739,7 +739,7 @@ SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP, int64_t AccumulativeByteOffset = 0; gep_type_iterator GTI = gep_type_begin(*GEP); for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { - if (isa(*GTI)) { + if (GTI.isSequential()) { // Tries to extract a constant offset from this GEP index. int64_t ConstantOffset = ConstantOffsetExtractor::Find(GEP->getOperand(I), GEP, DT); @@ -752,7 +752,7 @@ SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP, ConstantOffset * DL->getTypeAllocSize(GTI.getIndexedType()); } } else if (LowerGEP) { - StructType *StTy = cast(*GTI); + StructType *StTy = GTI.getStructType(); uint64_t Field = cast(GEP->getOperand(I))->getZExtValue(); // Skip field 0 as the offset is always 0. if (Field != 0) { @@ -787,7 +787,7 @@ void SeparateConstOffsetFromGEP::lowerToSingleIndexGEPs( // Create an ugly GEP for each sequential index. We don't create GEPs for // structure indices, as they are accumulated in the constant offset index. for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) { - if (isa(*GTI)) { + if (GTI.isSequential()) { Value *Idx = Variadic->getOperand(I); // Skip zero indices. if (ConstantInt *CI = dyn_cast(Idx)) @@ -848,7 +848,7 @@ SeparateConstOffsetFromGEP::lowerToArithmetics(GetElementPtrInst *Variadic, // don't create arithmetics for structure indices, as they are accumulated // in the constant offset index. for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) { - if (isa(*GTI)) { + if (GTI.isSequential()) { Value *Idx = Variadic->getOperand(I); // Skip zero indices. if (ConstantInt *CI = dyn_cast(Idx)) @@ -928,7 +928,7 @@ bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) { // handle the constant offset and won't need a new structure index. gep_type_iterator GTI = gep_type_begin(*GEP); for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { - if (isa(*GTI)) { + if (GTI.isSequential()) { // Splits this GEP index into a variadic part and a constant offset, and // uses the variadic part as the new index. Value *OldIdx = GEP->getOperand(I); diff --git a/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp b/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp index c4d18f1..2be3f5c 100644 --- a/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp +++ b/llvm/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp @@ -490,8 +490,8 @@ void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP( IndexExprs.push_back(SE->getSCEV(*I)); gep_type_iterator GTI = gep_type_begin(GEP); - for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) { - if (!isa(*GTI++)) + for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { + if (GTI.isStruct()) continue; const SCEV *OrigIndexExpr = IndexExprs[I - 1]; @@ -501,7 +501,7 @@ void StraightLineStrengthReduce::allocateCandidatesAndFindBasisForGEP( // indices except this current one. const SCEV *BaseExpr = SE->getGEPExpr(cast(GEP), IndexExprs); Value *ArrayIdx = GEP->getOperand(I); - uint64_t ElementSize = DL->getTypeAllocSize(*GTI); + uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType()); if (ArrayIdx->getType()->getIntegerBitWidth() <= DL->getPointerSizeInBits(GEP->getAddressSpace())) { // Skip factoring if ArrayIdx is wider than the pointer size, because diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp index 323b81c..2f4e3e9 100644 --- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp @@ -1416,7 +1416,7 @@ static bool canReplaceOperandWithVariable(const Instruction *I, if (OpIdx == 0) return true; gep_type_iterator It = std::next(gep_type_begin(I), OpIdx - 1); - return !It->isStructTy(); + return It.isSequential(); } }