Visited.clear();
return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
}
- for (Value *IncValue : PN->incoming_values())
- Worklist.push_back(IncValue);
+ append_range(Worklist, PN->incoming_values());
continue;
}
// directions.
BasicBlockListType BBList;
for (auto &SCC : make_range(scc_begin(&F), scc_end(&F)))
- for (BasicBlock * BB : SCC)
- BBList.push_back(BB);
+ append_range(BBList, SCC);
std::reverse(BBList.begin(), BBList.end());
DDGBuilder(*this, D, BBList).populate();
}
LoopBlocksDFS DFS(&L);
DFS.perform(&LI);
BasicBlockListType BBList;
- for (BasicBlock *BB : make_range(DFS.beginRPO(), DFS.endRPO()))
- BBList.push_back(BB);
+ append_range(BBList, make_range(DFS.beginRPO(), DFS.endRPO()));
DDGBuilder(*this, D, BBList).populate();
}
size_t OldSize = Graph.Nodes.size();
Graph.Nodes.clear();
- for (NodeType *N : reverse(NodesInPO))
- Graph.Nodes.push_back(N);
+ append_range(Graph.Nodes, reverse(NodesInPO));
if (Graph.Nodes.size() != OldSize)
assert(false &&
"Expected the number of nodes to stay the same after the sort");
FT.push_back(
{TensorSpec::createSpec<int64_t>(FeatureNameMap.at(I), {1}), None});
if (MUTR && MUTR->outputLoggedFeatureSpecs().size() > 1)
- FT.insert(FT.end(), MUTR->outputLoggedFeatureSpecs().begin() + 1,
- MUTR->outputLoggedFeatureSpecs().end());
+ append_range(FT, drop_begin(MUTR->outputLoggedFeatureSpecs()));
DefaultDecisionPos = FT.size();
FT.push_back(
for (size_t I = 0; I < NumberOfFeatures; ++I)
InputSpecs.push_back(
TensorSpec::createSpec<int64_t>(TFFeedPrefix + FeatureNameMap[I], {1}));
- InputSpecs.insert(InputSpecs.end(), TrainingOnlyFeatures.begin(),
- TrainingOnlyFeatures.end());
+ append_range(InputSpecs, TrainingOnlyFeatures);
if (auto MaybeOutSpecs =
loadOutputSpecs(Ctx, DecisionName, ModelPath, TFOutputSpecOverride))
OutputSpecs = std::move(*MaybeOutSpecs);
}
LoopVectorTy Loops;
- for (Loop *L : breadth_first(&Root))
- Loops.push_back(L);
+ append_range(Loops, breadth_first(&Root));
if (!getInnerMostLoop(Loops)) {
LLVM_DEBUG(dbgs() << "Cannot compute cache cost of loop nest with more "
LoopNest::LoopNest(Loop &Root, ScalarEvolution &SE)
: MaxPerfectDepth(getMaxPerfectDepth(Root, SE)) {
- for (Loop *L : breadth_first(&Root))
- Loops.push_back(L);
+ append_range(Loops, breadth_first(&Root));
}
std::unique_ptr<LoopNest> LoopNest::getLoopNest(Loop &Root,
} else {
// Seed DirtyBlocks with each of the preds of QueryInst's block.
BasicBlock *QueryBB = QueryCall->getParent();
- for (BasicBlock *Pred : PredCache.get(QueryBB))
- DirtyBlocks.push_back(Pred);
+ append_range(DirtyBlocks, PredCache.get(QueryBB));
++NumUncacheNonLocal;
}
// If the block *is* completely transparent to the load, we need to check
// the predecessors of this block. Add them to our worklist.
- for (BasicBlock *Pred : PredCache.get(DirtyBB))
- DirtyBlocks.push_back(Pred);
+ append_range(DirtyBlocks, PredCache.get(DirtyBB));
}
}
if (!TransfersExecution)
return nullptr;
- for (const BasicBlock *AdjacentBB : successors(ToBB))
- Worklist.push_back(AdjacentBB);
+ append_range(Worklist, successors(ToBB));
}
}
if (V == I || isSafeToSpeculativelyExecute(V)) {
EphValues.insert(V);
if (const User *U = dyn_cast<User>(V))
- for (User::const_op_iterator J = U->op_begin(), JE = U->op_end();
- J != JE; ++J)
- WorkSet.push_back(*J);
+ append_range(WorkSet, U->operands());
}
}
}
// underlying objects.
if (!LI || !LI->isLoopHeader(PN->getParent()) ||
isSameUnderlyingObjectInLoop(PN, LI))
- for (Value *IncValue : PN->incoming_values())
- Worklist.push_back(IncValue);
+ append_range(Worklist, PN->incoming_values());
continue;
}