int64_t BaseOffset = 0;
int64_t Scale = 0;
- // Assumes the address space is 0 when Ptr is nullptr.
- unsigned AS =
- (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace());
- auto GTI = gep_type_begin(PointeeType, AS, Operands);
+ auto GTI = gep_type_begin(PointeeType, Operands);
for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) {
// We assume that the cost of Scalar GEP with constant index and the
// cost of Vector GEP with splat constant index are the same.
if (!ConstIdx)
if (auto Splat = getSplatValue(*I))
ConstIdx = dyn_cast<ConstantInt>(Splat);
- if (isa<SequentialType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
+ // For structures the index is always splat or scalar constant
+ assert(ConstIdx && "Unexpected GEP index");
+ uint64_t Field = ConstIdx->getZExtValue();
+ BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field);
+ } else {
int64_t ElementSize = DL.getTypeAllocSize(GTI.getIndexedType());
if (ConstIdx)
BaseOffset += ConstIdx->getSExtValue() * ElementSize;
return TTI::TCC_Basic;
Scale = ElementSize;
}
- } else {
- StructType *STy = cast<StructType>(*GTI);
- // For structures the index is always splat or scalar constant
- assert(ConstIdx && "Unexpected GEP index");
- uint64_t Field = ConstIdx->getZExtValue();
- BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field);
}
}
+ // Assumes the address space is 0 when Ptr is nullptr.
+ unsigned AS =
+ (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace());
if (static_cast<T *>(this)->isLegalAddressingMode(
- PointerType::get(*GTI, AS), const_cast<GlobalValue *>(BaseGV),
+ PointerType::get(Type::getInt8Ty(PointeeType->getContext()), AS),
+ const_cast<GlobalValue *>(BaseGV),
BaseOffset, HasBaseReg, Scale, AS)) {
return TTI::TCC_Free;
}
#define LLVM_IR_GETELEMENTPTRTYPEITERATOR_H
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/PointerUnion.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/User.h"
Type *, ptrdiff_t> super;
ItTy OpIt;
- PointerIntPair<Type *, 1> CurTy;
- unsigned AddrSpace;
-
+ PointerUnion<StructType *, Type *> CurTy;
+ enum { Unbounded = -1ull };
+ uint64_t NumElements = Unbounded;
generic_gep_type_iterator() = default;
public:
- static generic_gep_type_iterator begin(Type *Ty, unsigned AddrSpace,
- ItTy It) {
+ static generic_gep_type_iterator begin(Type *Ty, ItTy It) {
generic_gep_type_iterator I;
- I.CurTy.setPointer(Ty);
- I.CurTy.setInt(true);
- I.AddrSpace = AddrSpace;
+ I.CurTy = Ty;
I.OpIt = It;
return I;
}
return !operator==(x);
}
- Type *operator*() const {
- if (CurTy.getInt())
- return CurTy.getPointer()->getPointerTo(AddrSpace);
- return CurTy.getPointer();
- }
-
+ // FIXME: Make this the iterator's operator*() after the 4.0 release.
+ // operator*() had a different meaning in earlier releases, so we're
+ // temporarily not giving this iterator an operator*() to avoid a subtle
+ // semantics break.
Type *getIndexedType() const {
- if (CurTy.getInt())
- return CurTy.getPointer();
- CompositeType *CT = cast<CompositeType>(CurTy.getPointer());
- return CT->getTypeAtIndex(getOperand());
+ if (auto *T = CurTy.dyn_cast<Type *>())
+ return T;
+ return CurTy.get<StructType *>()->getTypeAtIndex(getOperand());
}
- // This is a non-standard operator->. It allows you to call methods on the
- // current type directly.
- Type *operator->() const { return operator*(); }
-
Value *getOperand() const { return const_cast<Value *>(&**OpIt); }
generic_gep_type_iterator& operator++() { // Preincrement
- if (CurTy.getInt()) {
- CurTy.setInt(false);
- } else if (CompositeType *CT =
- dyn_cast<CompositeType>(CurTy.getPointer())) {
- CurTy.setPointer(CT->getTypeAtIndex(getOperand()));
- } else {
- CurTy.setPointer(nullptr);
- }
+ Type *Ty = getIndexedType();
+ if (auto *ATy = dyn_cast<ArrayType>(Ty)) {
+ CurTy = ATy->getElementType();
+ NumElements = ATy->getNumElements();
+ } else if (auto *VTy = dyn_cast<VectorType>(Ty)) {
+ CurTy = VTy->getElementType();
+ NumElements = VTy->getNumElements();
+ } else
+ CurTy = dyn_cast<StructType>(Ty);
++OpIt;
return *this;
}
generic_gep_type_iterator operator++(int) { // Postincrement
generic_gep_type_iterator tmp = *this; ++*this; return tmp;
}
+
+ // All of the below API is for querying properties of the "outer type", i.e.
+ // the type that contains the indexed type. Most of the time this is just
+ // the type that was visited immediately prior to the indexed type, but for
+ // the first element this is an unbounded array of the GEP's source element
+ // type, for which there is no clearly corresponding IR type (we've
+ // historically used a pointer type as the outer type in this case, but
+ // pointers will soon lose their element type).
+ //
+ // FIXME: Most current users of this class are just interested in byte
+ // offsets (a few need to know whether the outer type is a struct because
+ // they are trying to replace a constant with a variable, which is only
+ // legal for arrays, e.g. canReplaceOperandWithVariable in SimplifyCFG.cpp);
+ // we should provide a more minimal API here that exposes not much more than
+ // that.
+
+ bool isStruct() const { return CurTy.is<StructType *>(); }
+ bool isSequential() const { return CurTy.is<Type *>(); }
+
+ StructType *getStructType() const { return CurTy.get<StructType *>(); }
+
+ StructType *getStructTypeOrNull() const {
+ return CurTy.dyn_cast<StructType *>();
+ }
+
+ bool isBoundedSequential() const {
+ return isSequential() && NumElements != Unbounded;
+ }
+
+ uint64_t getSequentialNumElements() const {
+ assert(isBoundedSequential());
+ return NumElements;
+ }
};
typedef generic_gep_type_iterator<> gep_type_iterator;
auto *GEPOp = cast<GEPOperator>(GEP);
return gep_type_iterator::begin(
GEPOp->getSourceElementType(),
- cast<PointerType>(GEPOp->getPointerOperandType()->getScalarType())
- ->getAddressSpace(),
GEP->op_begin() + 1);
}
auto &GEPOp = cast<GEPOperator>(GEP);
return gep_type_iterator::begin(
GEPOp.getSourceElementType(),
- cast<PointerType>(GEPOp.getPointerOperandType()->getScalarType())
- ->getAddressSpace(),
GEP.op_begin() + 1);
}
template<typename T>
inline generic_gep_type_iterator<const T *>
- gep_type_begin(Type *Op0, unsigned AS, ArrayRef<T> A) {
- return generic_gep_type_iterator<const T *>::begin(Op0, AS, A.begin());
+ gep_type_begin(Type *Op0, ArrayRef<T> A) {
+ return generic_gep_type_iterator<const T *>::begin(Op0, A.begin());
}
template<typename T>
inline generic_gep_type_iterator<const T *>
- gep_type_end(Type * /*Op0*/, unsigned /*AS*/, ArrayRef<T> A) {
+ gep_type_end(Type * /*Op0*/, ArrayRef<T> A) {
return generic_gep_type_iterator<const T *>::end(A.end());
}
continue;
// Handle a struct index, which adds its field offset to the pointer.
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
if (OpC->getType()->isVectorTy())
OpC = OpC->getSplatValue();
// Assume all GEP operands are constants until proven otherwise.
bool GepHasConstantOffset = true;
for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
- I != E; ++I) {
+ I != E; ++I, ++GTI) {
const Value *Index = *I;
// Compute the (potentially symbolic) offset in bytes for this index.
- if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
// For a struct, add the member offset.
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
if (FieldNo == 0)
if (CIdx->isZero())
continue;
Decomposed.OtherOffset +=
- DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue();
+ DL.getTypeAllocSize(GTI.getIndexedType()) * CIdx->getSExtValue();
continue;
}
GepHasConstantOffset = false;
- uint64_t Scale = DL.getTypeAllocSize(*GTI);
+ uint64_t Scale = DL.getTypeAllocSize(GTI.getIndexedType());
unsigned ZExtBits = 0, SExtBits = 0;
// If the integer type is smaller than the pointer size, it is implicitly
continue;
// Handle a struct index, which adds its field offset to the pointer.
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
unsigned ElementIdx = OpC->getZExtValue();
const StructLayout *SL = DL.getStructLayout(STy);
Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
gep_type_iterator GTI = gep_type_begin(I);
for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
Value *Index = I->getOperand(i);
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
// Handle struct member offset arithmetic.
// Handle case when index is vector zeroinitializer
for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
GTI != GTE; ++GTI) {
// Struct types are easy -- they must always be indexed by a constant.
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
unsigned ElementIdx = OpC->getZExtValue();
const StructLayout *SL = Q.DL.getStructLayout(STy);
while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
// Find the type we're currently indexing into.
gep_type_iterator GEPTI = gep_type_begin(Gep);
- std::advance(GEPTI, LastOperand - 1);
+ std::advance(GEPTI, LastOperand - 2);
// If it's a type with the same allocation size as the result of the GEP we
// can peel off the zero index.
- if (DL.getTypeAllocSize(*GEPTI) != GEPAllocSize)
+ if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize)
break;
--LastOperand;
}
int64_t ConstantOffset = 0;
gep_type_iterator GTI = gep_type_begin(AddrInst);
for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx =
cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
GTI != E; ++GTI) {
const Value *Idx = GTI.getOperand();
- if (auto *StTy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *StTy = GTI.getStructTypeOrNull()) {
uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
if (Field) {
// N = N + Offset
for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
GTI != E; ++GTI) {
const Value *Idx = GTI.getOperand();
- if (StructType *StTy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *StTy = GTI.getStructTypeOrNull()) {
unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
if (Field) {
// N = N + Offset
uint64_t Total = 0;
for (; I != E; ++I) {
- if (StructType *STy = dyn_cast<StructType>(*I)) {
+ if (StructType *STy = I.getStructTypeOrNull()) {
const StructLayout *SLO = getDataLayout().getStructLayout(STy);
const ConstantInt *CPU = cast<ConstantInt>(I.getOperand());
Total += SLO->getElementOffset(Index);
} else {
- SequentialType *ST = cast<SequentialType>(*I);
// Get the index number for the array... which must be long type...
GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
assert(BitWidth == 64 && "Invalid index type for getelementptr");
Idx = (int64_t)IdxGV.IntVal.getZExtValue();
}
- Total += getDataLayout().getTypeAllocSize(ST->getElementType()) * Idx;
+ Total += getDataLayout().getTypeAllocSize(I.getIndexedType()) * Idx;
}
}
}
/// Test whether a given ConstantInt is in-range for a SequentialType.
-static bool isIndexInRangeOfSequentialType(SequentialType *STy,
- const ConstantInt *CI) {
- // And indices are valid when indexing along a pointer
- if (isa<PointerType>(STy))
- return true;
-
- uint64_t NumElements = 0;
- // Determine the number of elements in our sequential type.
- if (auto *ATy = dyn_cast<ArrayType>(STy))
- NumElements = ATy->getNumElements();
- else if (auto *VTy = dyn_cast<VectorType>(STy))
- NumElements = VTy->getNumElements();
-
- assert((isa<ArrayType>(STy) || NumElements > 0) &&
- "didn't expect non-array type to have zero elements!");
-
+static bool isIndexInRangeOfArrayType(uint64_t NumElements,
+ const ConstantInt *CI) {
// We cannot bounds check the index if it doesn't fit in an int64_t.
if (CI->getValue().getActiveBits() > 64)
return false;
// getelementptr instructions into a single instruction.
//
if (CE->getOpcode() == Instruction::GetElementPtr) {
- Type *LastTy = nullptr;
+ gep_type_iterator LastI = gep_type_end(CE);
for (gep_type_iterator I = gep_type_begin(CE), E = gep_type_end(CE);
I != E; ++I)
- LastTy = *I;
+ LastI = I;
// We cannot combine indices if doing so would take us outside of an
// array or vector. Doing otherwise could trick us if we evaluated such a
bool PerformFold = false;
if (Idx0->isNullValue())
PerformFold = true;
- else if (SequentialType *STy = dyn_cast_or_null<SequentialType>(LastTy))
+ else if (LastI.isSequential())
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx0))
- PerformFold = isIndexInRangeOfSequentialType(STy, CI);
+ PerformFold =
+ !LastI.isBoundedSequential() ||
+ isIndexInRangeOfArrayType(LastI.getSequentialNumElements(), CI);
if (PerformFold) {
SmallVector<Value*, 16> NewIndices;
Unknown = true;
continue;
}
- if (isIndexInRangeOfSequentialType(STy, CI))
+ if (isIndexInRangeOfArrayType(isa<ArrayType>(STy)
+ ? cast<ArrayType>(STy)->getNumElements()
+ : cast<VectorType>(STy)->getNumElements(),
+ CI))
// It's in range, skip to the next index.
continue;
if (!isa<SequentialType>(Prev)) {
gep_type_iterator GEPI = gep_type_begin(this), E = gep_type_end(this);
User::const_op_iterator OI = std::next(this->op_begin());
- // Skip the first index, as it has no static limit.
- ++GEPI;
- ++OI;
-
// The remaining indices must be compile-time known integers within the
// bounds of the corresponding notional static array types.
for (; GEPI != E; ++GEPI, ++OI) {
ConstantInt *CI = dyn_cast<ConstantInt>(*OI);
- if (!CI) return false;
- if (ArrayType *ATy = dyn_cast<ArrayType>(*GEPI))
- if (CI->getValue().getActiveBits() > 64 ||
- CI->getZExtValue() >= ATy->getNumElements())
- return false;
+ if (GEPI.isBoundedSequential() &&
+ (CI->getValue().getActiveBits() > 64 ||
+ CI->getZExtValue() >= GEPI.getSequentialNumElements()))
+ return false;
}
// All the indices checked out.
ArrayRef<Value *> Indices) const {
int64_t Result = 0;
- // We can use 0 as the address space as we don't need
- // to get pointer types back from gep_type_iterator.
- unsigned AS = 0;
generic_gep_type_iterator<Value* const*>
- GTI = gep_type_begin(ElemTy, AS, Indices),
- GTE = gep_type_end(ElemTy, AS, Indices);
+ GTI = gep_type_begin(ElemTy, Indices),
+ GTE = gep_type_end(ElemTy, Indices);
for (; GTI != GTE; ++GTI) {
Value *Idx = GTI.getOperand();
- if (auto *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
assert(Idx->getType()->isIntegerTy(32) && "Illegal struct idx");
unsigned FieldNo = cast<ConstantInt>(Idx)->getZExtValue();
continue;
// Handle a struct index, which adds its field offset to the pointer.
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
unsigned ElementIdx = OpC->getZExtValue();
const StructLayout *SL = DL.getStructLayout(STy);
Offset += APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx));
for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U);
GTI != E; ++GTI) {
const Value *Op = GTI.getOperand();
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
TmpOffset += SL->getElementOffset(Idx);
for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
GTI != E; ++GTI) {
const Value *Idx = GTI.getOperand();
- if (auto *StTy = dyn_cast<StructType>(*GTI)) {
+ if (auto *StTy = GTI.getStructTypeOrNull()) {
unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
// N = N + Offset
if (Field)
case Instruction::GetElementPtr: {
gep_type_iterator GTI = gep_type_begin(Instr);
auto &DL = Ext->getModule()->getDataLayout();
- std::advance(GTI, U.getOperandNo());
- Type *IdxTy = *GTI;
+ std::advance(GTI, U.getOperandNo()-1);
+ Type *IdxTy = GTI.getIndexedType();
// This extension will end up with a shift because of the scaling factor.
// 8-bit sized types have a scaling factor of 1, thus a shift amount of 0.
// Get the shift amount based on the scaling factor:
for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
i != e; ++i, ++GTI) {
const Value *Op = *i;
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
TmpOffset += SL->getElementOffset(Idx);
for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e;
++i, ++GTI) {
const Value *Op = *i;
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
TmpOffset += SL->getElementOffset(Idx);
for (User::const_op_iterator II = U->op_begin() + 1, IE = U->op_end();
II != IE; ++II, ++GTI) {
const Value *Op = *II;
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
TmpOffset += SL->getElementOffset(Idx);
for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U);
GTI != E; ++GTI) {
const Value *Op = GTI.getOperand();
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
TmpOffset += SL->getElementOffset(Idx);
for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
i != e; ++i, ++GTI) {
const Value *Op = *i;
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
const StructLayout *SL = DL.getStructLayout(STy);
Disp += SL->getElementOffset(cast<ConstantInt>(Op)->getZExtValue());
continue;
++GEPI; // Skip over the pointer index.
// If this is a use of an array allocation, do a bit more checking for sanity.
- if (ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) {
- uint64_t NumElements = AT->getNumElements();
+ if (GEPI.isSequential()) {
ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2));
// Check to make sure that index falls within the array. If not,
// something funny is going on, so we won't do the optimization.
//
- if (Idx->getZExtValue() >= NumElements)
+ if (GEPI.isBoundedSequential() &&
+ Idx->getZExtValue() >= GEPI.getSequentialNumElements())
return false;
// We cannot scalar repl this level of the array unless any array
for (++GEPI; // Skip array index.
GEPI != E;
++GEPI) {
- uint64_t NumElements;
- if (ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI))
- NumElements = SubArrayTy->getNumElements();
- else if (VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI))
- NumElements = SubVectorTy->getNumElements();
- else {
- assert((*GEPI)->isStructTy() &&
- "Indexed GEP type is not array, vector, or struct!");
+ if (GEPI.isStruct())
continue;
- }
ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand());
- if (!IdxVal || IdxVal->getZExtValue() >= NumElements)
+ if (!IdxVal ||
+ (GEPI.isBoundedSequential() &&
+ IdxVal->getZExtValue() >= GEPI.getSequentialNumElements()))
return false;
}
}
if (CI->isZero()) continue;
// Handle a struct index, which adds its field offset to the pointer.
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
} else {
uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
if (CI->isZero()) continue;
// Handle a struct index, which adds its field offset to the pointer.
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
} else {
uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
++I, ++GTI) {
// Skip indices into struct types.
- if (isa<StructType>(*GTI))
+ if (GTI.isStruct())
continue;
// Index type should have the same width as IntPtr
bool EndsWithSequential = false;
for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
I != E; ++I)
- EndsWithSequential = !(*I)->isStructTy();
+ EndsWithSequential = I.isSequential();
// Can we combine the two pointer arithmetics offsets?
if (EndsWithSequential) {
if (OpC->isZero()) continue; // No offset.
// Handle struct indices, which add their field offset to the pointer.
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
continue;
}
return nullptr;
gep_type_iterator GTI = gep_type_begin(*GEP);
- for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) {
- if (isa<SequentialType>(*GTI++)) {
- if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1, *GTI)) {
+ for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
+ if (GTI.isSequential()) {
+ if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1,
+ GTI.getIndexedType())) {
return NewGEP;
}
}
break;
// Handle a struct index, which adds its field offset to the pointer.
- if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (StructType *STy = GTI.getStructTypeOrNull()) {
unsigned ElementIdx = OpC->getZExtValue();
const StructLayout *SL = DL.getStructLayout(STy);
GEPOffset +=
for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end();
I != E; ++I, ++GTI) {
// Skip struct member indices which must be i32.
- if (isa<SequentialType>(*GTI)) {
+ if (GTI.isSequential()) {
if ((*I)->getType() != IntPtrTy) {
*I = CastInst::CreateIntegerCast(*I, IntPtrTy, true, "idxprom", GEP);
Changed = true;
int64_t AccumulativeByteOffset = 0;
gep_type_iterator GTI = gep_type_begin(*GEP);
for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
- if (isa<SequentialType>(*GTI)) {
+ if (GTI.isSequential()) {
// Tries to extract a constant offset from this GEP index.
int64_t ConstantOffset =
ConstantOffsetExtractor::Find(GEP->getOperand(I), GEP, DT);
ConstantOffset * DL->getTypeAllocSize(GTI.getIndexedType());
}
} else if (LowerGEP) {
- StructType *StTy = cast<StructType>(*GTI);
+ StructType *StTy = GTI.getStructType();
uint64_t Field = cast<ConstantInt>(GEP->getOperand(I))->getZExtValue();
// Skip field 0 as the offset is always 0.
if (Field != 0) {
// Create an ugly GEP for each sequential index. We don't create GEPs for
// structure indices, as they are accumulated in the constant offset index.
for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
- if (isa<SequentialType>(*GTI)) {
+ if (GTI.isSequential()) {
Value *Idx = Variadic->getOperand(I);
// Skip zero indices.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
// don't create arithmetics for structure indices, as they are accumulated
// in the constant offset index.
for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
- if (isa<SequentialType>(*GTI)) {
+ if (GTI.isSequential()) {
Value *Idx = Variadic->getOperand(I);
// Skip zero indices.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
// handle the constant offset and won't need a new structure index.
gep_type_iterator GTI = gep_type_begin(*GEP);
for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
- if (isa<SequentialType>(*GTI)) {
+ if (GTI.isSequential()) {
// Splits this GEP index into a variadic part and a constant offset, and
// uses the variadic part as the new index.
Value *OldIdx = GEP->getOperand(I);
IndexExprs.push_back(SE->getSCEV(*I));
gep_type_iterator GTI = gep_type_begin(GEP);
- for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) {
- if (!isa<SequentialType>(*GTI++))
+ for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
+ if (GTI.isStruct())
continue;
const SCEV *OrigIndexExpr = IndexExprs[I - 1];
// indices except this current one.
const SCEV *BaseExpr = SE->getGEPExpr(cast<GEPOperator>(GEP), IndexExprs);
Value *ArrayIdx = GEP->getOperand(I);
- uint64_t ElementSize = DL->getTypeAllocSize(*GTI);
+ uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
if (ArrayIdx->getType()->getIntegerBitWidth() <=
DL->getPointerSizeInBits(GEP->getAddressSpace())) {
// Skip factoring if ArrayIdx is wider than the pointer size, because
if (OpIdx == 0)
return true;
gep_type_iterator It = std::next(gep_type_begin(I), OpIdx - 1);
- return !It->isStructTy();
+ return It.isSequential();
}
}