/// This is a helper function which calls the two-argument getUserCost
/// with \p Operands which are the current operands U has.
int getUserCost(const User *U, TargetCostKind CostKind) const {
- SmallVector<const Value *, 4> Operands(U->value_op_begin(),
- U->value_op_end());
+ SmallVector<const Value *, 4> Operands(U->operand_values());
return getUserCost(U, Operands, CostKind);
}
}
int getInstructionLatency(const Instruction *I) {
- SmallVector<const Value *, 4> Operands(I->value_op_begin(),
- I->value_op_end());
+ SmallVector<const Value *, 4> Operands(I->operand_values());
if (getUserCost(I, Operands, TTI::TCK_Latency) == TTI::TCC_Free)
return 0;
StorageType Storage, bool ShouldCreate = true);
TempGenericDINode cloneImpl() const {
- return getTemporary(
- getContext(), getTag(), getHeader(),
- SmallVector<Metadata *, 4>(dwarf_op_begin(), dwarf_op_end()));
+ return getTemporary(getContext(), getTag(), getHeader(),
+ SmallVector<Metadata *, 4>(dwarf_operands()));
}
public:
StorageType Storage, bool ShouldCreate = true);
TempMDTuple cloneImpl() const {
- return getTemporary(getContext(),
- SmallVector<Metadata *, 4>(op_begin(), op_end()));
+ return getTemporary(getContext(), SmallVector<Metadata *, 4>(operands()));
}
public:
if (Entry)
return Entry;
- SmallVector<BasicBlock *, 32> PredCache(pred_begin(BB), pred_end(BB));
+ SmallVector<BasicBlock *, 32> PredCache(predecessors(BB));
PredCache.push_back(nullptr); // null terminator.
BlockToPredCountMap[BB] = PredCache.size() - 1;
// expression GEP with the same indices and a null base pointer to see
// what constant folding can make out of it.
Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType());
- SmallVector<Value *, 4> IndicesLHS(GLHS->idx_begin(), GLHS->idx_end());
+ SmallVector<Value *, 4> IndicesLHS(GLHS->indices());
Constant *NewLHS = ConstantExpr::getGetElementPtr(
GLHS->getSourceElementType(), Null, IndicesLHS);
I->getOperand(2), Q);
break;
case Instruction::GetElementPtr: {
- SmallVector<Value *, 8> Ops(I->op_begin(), I->op_end());
+ SmallVector<Value *, 8> Ops(I->operands());
Result = SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(),
Ops, Q);
break;
// NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
LIOps.push_back(AddRec->getStart());
- SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
- AddRec->op_end());
+ SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
// This follows from the fact that the no-wrap flags on the outer add
// expression are applicable on the 0th iteration, when the add recurrence
// will be equal to its start value.
"AddRecExprs are not sorted in reverse dominance order?");
if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
// Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
- SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
- AddRec->op_end());
+ SmallVector<const SCEV *, 4> AddRecOps(AddRec->operands());
for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
++OtherIdx) {
const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
const SCEV *Op = M->getOperand(i);
const SCEV *Div = getUDivExpr(Op, RHSC);
if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
- Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
- M->op_end());
+ Operands = SmallVector<const SCEV *, 4>(M->operands());
Operands[i] = Div;
return getMulExpr(Operands);
}
? (L->getLoopDepth() < NestedLoop->getLoopDepth())
: (!NestedLoop->contains(L) &&
DT.dominates(L->getHeader(), NestedLoop->getHeader()))) {
- SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
- NestedAR->op_end());
+ SmallVector<const SCEV *, 4> NestedOperands(NestedAR->operands());
Operands[0] = NestedAR->getStart();
// AddRecs require their operands be loop-invariant with respect to their
// loops. Don't perform this transformation if it would break this
// If the start is a non-zero constant, shift the range to simplify things.
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
if (!SC->getValue()->isZero()) {
- SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
+ SmallVector<const SCEV *, 4> Operands(operands());
Operands[0] = SE.getZero(SC->getType());
const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
getNoWrapFlags(FlagNW));
// so that future queries will recompute the expressions using the new
// value.
Value *Old = getValPtr();
- SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end());
+ SmallVector<User *, 16> Worklist(Old->users());
SmallPtrSet<User *, 8> Visited;
while (!Worklist.empty()) {
User *U = Worklist.pop_back_val();
}
Instruction *ConstantExpr::getAsInstruction() const {
- SmallVector<Value *, 4> ValueOperands(op_begin(), op_end());
+ SmallVector<Value *, 4> ValueOperands(operands());
ArrayRef<Value*> Ops(ValueOperands);
switch (getOpcode()) {
// Check constraints that this basic block imposes on all of the PHI nodes in
// it.
if (isa<PHINode>(BB.front())) {
- SmallVector<BasicBlock*, 8> Preds(pred_begin(&BB), pred_end(&BB));
+ SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
SmallVector<std::pair<BasicBlock*, Value*>, 8> Values;
llvm::sort(Preds);
for (const PHINode &PN : BB.phis()) {
"GEP base pointer is not a vector or a vector of pointers", &GEP);
Assert(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
- SmallVector<Value*, 16> Idxs(GEP.idx_begin(), GEP.idx_end());
+ SmallVector<Value *, 16> Idxs(GEP.indices());
Assert(all_of(
Idxs, [](Value* V) { return V->getType()->isIntOrIntVectorTy(); }),
"GEP indexes must be integers", &GEP);
static RecordRecTy *resolveRecordTypes(RecordRecTy *T1, RecordRecTy *T2) {
SmallVector<Record *, 4> CommonSuperClasses;
- SmallVector<Record *, 4> Stack;
-
- Stack.insert(Stack.end(), T1->classes_begin(), T1->classes_end());
+ SmallVector<Record *, 4> Stack(T1->classes_begin(), T1->classes_end());
while (!Stack.empty()) {
Record *R = Stack.back();