// Return true if edge destination should be visited.
template<typename NodeType>
bool insertEdge(NodeType *From, NodeType *To) {
- return Visited.insert(To);
+ return Visited.insert(To).second;
}
// Called after all children of BB have been visited.
/// \brief Insert a new element into the SetVector.
/// \returns true iff the element was inserted into the SetVector.
bool insert(const value_type &X) {
- bool result = set_.insert(X);
+ bool result = set_.insert(X).second;
if (result)
vector_.push_back(X);
return result;
template<typename It>
void insert(It Start, It End) {
for (; Start != End; ++Start)
- if (set_.insert(*Start))
+ if (set_.insert(*Start).second)
vector_.push_back(*Start);
}
/// insert_imp - This returns true if the pointer was new to the set, false if
/// it was already in the set. This is hidden from the client so that the
/// derived class can check that the right type of pointer is passed in.
- bool insert_imp(const void * Ptr);
+ std::pair<const void *const *, bool> insert_imp(const void *Ptr);
/// erase_imp - If the set contains the specified pointer, remove it and
/// return true, otherwise return false. This is hidden from the client so
: SmallPtrSetImplBase(SmallStorage, SmallSize) {}
public:
+ typedef SmallPtrSetIterator<PtrType> iterator;
+ typedef SmallPtrSetIterator<PtrType> const_iterator;
+
/// insert - This returns true if the pointer was new to the set, false if it
/// was already in the set.
- bool insert(PtrType Ptr) {
- return insert_imp(PtrTraits::getAsVoidPointer(Ptr));
+ std::pair<iterator, bool> insert(PtrType Ptr) {
+ auto p = insert_imp(PtrTraits::getAsVoidPointer(Ptr));
+ return std::make_pair(iterator(p.first, CurArray + CurArraySize), p.second);
}
/// erase - If the set contains the specified pointer, remove it and return
insert(*I);
}
- typedef SmallPtrSetIterator<PtrType> iterator;
- typedef SmallPtrSetIterator<PtrType> const_iterator;
inline iterator begin() const {
return iterator(CurArray, CurArray+CurArraySize);
}
#ifndef LLVM_ADT_SMALLSET_H
#define LLVM_ADT_SMALLSET_H
+#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include <set>
/// insert - Insert an element into the set if it isn't already there.
/// Returns true if the element is inserted (it was not in the set before).
- bool insert(const T &V) {
+ /// The first value of the returned pair is unused and provided for
+ /// partial compatibility with the standard library self-associative container
+ /// concept.
+ // FIXME: Add iterators that abstract over the small and large form, and then
+ // return those here.
+ std::pair<NoneType, bool> insert(const T &V) {
if (!isSmall())
- return Set.insert(V).second;
+ return std::make_pair(None, Set.insert(V).second);
VIterator I = vfind(V);
if (I != Vector.end()) // Don't reinsert if it already exists.
- return false;
+ return std::make_pair(None, false);
if (Vector.size() < N) {
Vector.push_back(V);
- return true;
+ return std::make_pair(None, true);
}
// Otherwise, grow from vector to set.
Vector.pop_back();
}
Set.insert(V);
- return true;
+ return std::make_pair(None, true);
}
template <typename IterT>
DomSetType &S = this->Frontiers[currentBB];
// Visit each block only once.
- if (visited.insert(currentBB)) {
+ if (visited.insert(currentBB).second) {
// Loop over CFG successors to calculate DFlocal[currentNode]
for (auto SI = BlockTraits::child_begin(currentBB),
SE = BlockTraits::child_end(currentBB);
SmallPtrSet<const SCEV *, 8> Visited;
void push(const SCEV *S) {
- if (Visited.insert(S) && Visitor.follow(S))
+ if (Visited.insert(S).second && Visitor.follow(S))
Worklist.push_back(S);
}
public:
void recordSplitCriticalEdge(MachineBasicBlock *FromBB,
MachineBasicBlock *ToBB,
MachineBasicBlock *NewBB) {
- bool Inserted = NewBBs.insert(NewBB);
+ bool Inserted = NewBBs.insert(NewBB).second;
(void)Inserted;
assert(Inserted &&
"A basic block inserted via edge splitting cannot appear twice");
typename GraphT::NodeType* VAncestor = DT.Vertex[VInfo.Parent];
// Process Ancestor first
- if (Visited.insert(VAncestor) && VInfo.Parent >= LastLinked) {
+ if (Visited.insert(VAncestor).second && VInfo.Parent >= LastLinked) {
Work.push_back(VAncestor);
continue;
}
Worklist.push_back(Loc.Ptr);
do {
const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
- if (!Visited.insert(V)) {
+ if (!Visited.insert(V).second) {
Visited.clear();
return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
}
// sides are PHI nodes. In which case, this is O(m x n) time where 'm'
// and 'n' are the number of PHI sources.
return MayAlias;
- if (UniqueSrc.insert(PV1))
+ if (UniqueSrc.insert(PV1).second)
V1Srcs.push_back(PV1);
}
bool FoundNew = false;
while (I != succ_end(ParentBB)) {
BB = *I++;
- if (Visited.insert(BB)) {
+ if (Visited.insert(BB).second) {
FoundNew = true;
break;
}
SmallSet<const BasicBlock*, 64> Visited;
do {
BasicBlock *BB = Worklist.pop_back_val();
- if (!Visited.insert(BB))
+ if (!Visited.insert(BB).second)
continue;
if (BB == StopBB)
return true;
if (Count++ >= Threshold)
return Tracker->tooManyUses();
- if (Visited.insert(&UU))
+ if (Visited.insert(&UU).second)
if (Tracker->shouldExplore(&UU))
Worklist.push_back(&UU);
}
const Value *V = WorkSet.front();
WorkSet.erase(WorkSet.begin());
- if (!Visited.insert(V))
+ if (!Visited.insert(V).second)
continue;
// If all uses of this value are ephemeral, then so is this value.
// Recursively fold the ConstantExpr's operands. If we have already folded
// a ConstantExpr, we don't have to process it again.
if (ConstantExpr *NewCE = dyn_cast<ConstantExpr>(NewC)) {
- if (FoldedOps.insert(NewCE))
+ if (FoldedOps.insert(NewCE).second)
NewC = ConstantFoldConstantExpressionImpl(NewCE, TD, TLI, FoldedOps);
}
Ops.push_back(NewC);
break;
}
assert(V->getType()->isPointerTy() && "Unexpected operand type!");
- } while (Visited.insert(V));
+ } while (Visited.insert(V).second);
Type *IntPtrTy = DL->getIntPtrType(V->getContext());
return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
SmallPtrSetImpl<Loop*> &SimpleLoopNests) {
// Add this IV user to the Processed set before returning false to ensure that
// all IV users are members of the set. See IVUsers::isIVUserOrOperand.
- if (!Processed.insert(I))
+ if (!Processed.insert(I).second)
return true; // Instruction already handled.
if (!SE->isSCEVable(I->getType()))
SmallPtrSet<Instruction *, 4> UniqueUsers;
for (Use &U : I->uses()) {
Instruction *User = cast<Instruction>(U.getUser());
- if (!UniqueUsers.insert(User))
+ if (!UniqueUsers.insert(User).second)
continue;
// Do not infinitely recurse on PHI nodes.
}
assert(V->getType()->getScalarType()->isPointerTy() &&
"Unexpected operand type!");
- } while (Visited.insert(V));
+ } while (Visited.insert(V).second);
Constant *OffsetIntPtr = ConstantInt::get(IntPtrTy, Offset);
if (V->getType()->isVectorTy())
}
for (Value *Op : C->operand_values())
- if (Visited.insert(cast<Constant>(Op)))
+ if (Visited.insert(cast<Constant>(Op)).second)
Worklist.push_back(cast<Constant>(Op));
}
}
for (Instruction &I : BB)
for (Value *Op : I.operand_values())
if (Constant *C = dyn_cast<Constant>(Op))
- if (Visited.insert(C))
+ if (Visited.insert(C).second)
Worklist.push_back(C);
// We've collected all the constant (and thus potentially function or
SmallPtrSet<Constant *, 16> Visited;
for (GlobalVariable &GV : M.globals())
if (GV.hasInitializer())
- if (Visited.insert(GV.getInitializer()))
+ if (Visited.insert(GV.getInitializer()).second)
Worklist.push_back(GV.getInitializer());
DEBUG(dbgs() << " Adding functions referenced by global initializers to the "
SmallPtrSetImpl<LazyCallGraph::Node *> &Printed) {
// Recurse depth first through the nodes.
for (LazyCallGraph::Node &ChildN : N)
- if (Printed.insert(&ChildN))
+ if (Printed.insert(&ChildN).second)
printNodes(OS, ChildN, Printed);
OS << " Call edges in function: " << N.getFunction().getName() << "\n";
SmallPtrSet<LazyCallGraph::Node *, 16> Printed;
for (LazyCallGraph::Node &N : G)
- if (Printed.insert(&N))
+ if (Printed.insert(&N).second)
printNodes(OS, N, Printed);
for (LazyCallGraph::SCC &SCC : G.postorder_sccs())
Value *Lint::findValueImpl(Value *V, bool OffsetOk,
SmallPtrSetImpl<Value *> &Visited) const {
// Detect self-referential values.
- if (!Visited.insert(V))
+ if (!Visited.insert(V).second)
return UndefValue::get(V->getType());
// TODO: Look through sext or zext cast, when the result is known to
BasicBlock *BB = L->getParent();
SmallPtrSet<BasicBlock *, 4> VisitedBlocks;
for (;;) {
- if (!VisitedBlocks.insert(BB)) break;
+ if (!VisitedBlocks.insert(BB).second)
+ break;
if (Value *U = FindAvailableLoadedValue(L->getPointerOperand(),
BB, BBI, 6, AA))
return findValueImpl(U, OffsetOk, Visited);
if (Instruction *I = dyn_cast<Instruction>(V)) {
// If we have already seen this instruction, bail out. Cycles can happen in
// unreachable code after constant propagation.
- if (!SeenInsts.insert(I))
+ if (!SeenInsts.insert(I).second)
return unknown();
if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
// Record the pointers that were handled in this run, so that they can be
// cleaned later if something fails. We also use this set to break cycles that
// can occur in dead code.
- if (!SeenVals.insert(V)) {
+ if (!SeenVals.insert(V).second) {
Result = unknown();
} else if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
Result = visitGEPOperator(*GEP);
DirtyBlocks.pop_back();
// Already processed this block?
- if (!Visited.insert(DirtyBB))
+ if (!Visited.insert(DirtyBB).second)
continue;
// Do a binary search to see if we already have an entry for this block in
void detail::PtrUseVisitorBase::enqueueUsers(Instruction &I) {
for (Use &U : I.uses()) {
- if (VisitedUses.insert(&U)) {
+ if (VisitedUses.insert(&U).second) {
UseToVisit NewU = {
UseToVisit::UseAndIsOffsetKnownPair(&U, IsOffsetKnown),
Offset
Visited.insert(PN);
while (!Worklist.empty()) {
Instruction *I = Worklist.pop_back_val();
- if (!Visited.insert(I)) continue;
+ if (!Visited.insert(I).second)
+ continue;
ValueExprMapType::iterator It =
ValueExprMap.find_as(static_cast<Value *>(I));
SmallPtrSet<Instruction *, 8> Visited;
while (!Worklist.empty()) {
Instruction *I = Worklist.pop_back_val();
- if (!Visited.insert(I)) continue;
+ if (!Visited.insert(I).second)
+ continue;
ValueExprMapType::iterator It =
ValueExprMap.find_as(static_cast<Value *>(I));
SmallPtrSet<Instruction *, 8> Visited;
while (!Worklist.empty()) {
Instruction *I = Worklist.pop_back_val();
- if (!Visited.insert(I)) continue;
+ if (!Visited.insert(I).second)
+ continue;
ValueExprMapType::iterator It =
ValueExprMap.find_as(static_cast<Value *>(I));
SmallPtrSet<Instruction *, 8> Visited;
while (!Worklist.empty()) {
I = Worklist.pop_back_val();
- if (!Visited.insert(I)) continue;
+ if (!Visited.insert(I).second)
+ continue;
ValueExprMapType::iterator It =
ValueExprMap.find_as(static_cast<Value *>(I));
// that until everything else is done.
if (U == Old)
continue;
- if (!Visited.insert(U))
+ if (!Visited.insert(U).second)
continue;
if (PHINode *PN = dyn_cast<PHINode>(U))
SE->ConstantEvolutionLoopExitValue.erase(PN);
Constant *One = ConstantInt::get(Ty, 1);
for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
BasicBlock *HP = *HPI;
- if (!PredSeen.insert(HP)) {
+ if (!PredSeen.insert(HP).second) {
// There must be an incoming value for each predecessor, even the
// duplicates!
CanonicalIV->addIncoming(CanonicalIV->getIncomingValueForBlock(HP), HP);
SmallSet<StratifiedIndex, 16> Visited;
for (unsigned I = 0, E = Links.size(); I < E; ++I) {
auto CurrentIndex = getHighestParentAbove(I);
- if (!Visited.insert(CurrentIndex)) {
+ if (!Visited.insert(CurrentIndex).second) {
continue;
}
while (!WorkSet.empty()) {
const Value *V = WorkSet.pop_back_val();
- if (!Visited.insert(V))
+ if (!Visited.insert(V).second)
continue;
// If all uses of this value are ephemeral, then so is this value.
// If this is a PHI node, there are two cases: either we have already seen it
// or we haven't.
if (PHINode *PN = dyn_cast<PHINode>(V)) {
- if (!PHIs.insert(PN))
+ if (!PHIs.insert(PN).second)
return ~0ULL; // already in the set.
// If it was new, see if all the input strings are the same length.
Value *P = Worklist.pop_back_val();
P = GetUnderlyingObject(P, TD, MaxLookup);
- if (!Visited.insert(P))
+ if (!Visited.insert(P).second)
continue;
if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
ParseTypeAndBasicBlock(DestBB, PFS))
return true;
- if (!SeenCases.insert(Constant))
+ if (!SeenCases.insert(Constant).second)
return Error(CondLoc, "duplicate case value in switch");
if (!isa<ConstantInt>(Constant))
return Error(CondLoc, "case value is not a constant integer");
for (SUnit::const_pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
P != PE; ++P) {
if ((P->getKind() == SDep::Anti) || (P->getKind() == SDep::Output)) {
- if (RegSet.insert(P->getReg()))
+ if (RegSet.insert(P->getReg()).second)
Edges.push_back(&*P);
}
}
const MCExpr *Base = TLI->getPICJumpTableRelocBaseExpr(MF,JTI,OutContext);
for (unsigned ii = 0, ee = JTBBs.size(); ii != ee; ++ii) {
const MachineBasicBlock *MBB = JTBBs[ii];
- if (!EmittedSets.insert(MBB)) continue;
+ if (!EmittedSets.insert(MBB).second)
+ continue;
// .set LJTSet, LBB32-base
const MCExpr *LHS =
for (unsigned i = 0, e = Variables.getNumElements(); i != e; ++i) {
DIVariable DV(Variables.getElement(i));
assert(DV.isVariable());
- if (!Processed.insert(DV))
+ if (!Processed.insert(DV).second)
continue;
if (LexicalScope *Scope = LScopes.findLexicalScope(DV.getContext())) {
ensureAbstractVariableIsCreatedIfScoped(DV, Scope->getScopeNode());
for (unsigned i = 0, e = Variables.getNumElements(); i != e; ++i) {
DIVariable DV(Variables.getElement(i));
assert(DV && DV.isVariable());
- if (!ProcessedVars.insert(DV))
+ if (!ProcessedVars.insert(DV).second)
continue;
ensureAbstractVariableIsCreated(DV, DV.getContext());
assert(LScopes.getAbstractScopesList().size() == NumAbstractScopes
continue;
// Visit each predecessor only once.
- if (!UniquePreds.insert(PBB))
+ if (!UniquePreds.insert(PBB).second)
continue;
// Skip blocks which may jump to a landing pad. Can't tail merge these.
numInstr++;
if (mi->isIdentityCopy() || mi->isImplicitDef() || mi->isDebugValue())
continue;
- if (!visited.insert(mi))
+ if (!visited.insert(mi).second)
continue;
float weight = 1.0f;
} else {
SmallPtrSet<BasicBlock*, 4> VisitedBBs;
for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) {
- if (!VisitedBBs.insert(*PI))
+ if (!VisitedBBs.insert(*PI).second)
continue;
BasicBlock::InstListType &InstList = (*PI)->getInstList();
SmallPtrSetImpl<Instruction*> &ConsideredInsts,
const TargetLowering &TLI) {
// If we already considered this instruction, we're done.
- if (!ConsideredInsts.insert(I))
+ if (!ConsideredInsts.insert(I).second)
return false;
// If this is an obviously unfoldable instruction, bail out.
worklist.pop_back();
// Break use-def graph loops.
- if (!Visited.insert(V)) {
+ if (!Visited.insert(V).second) {
Consensus = nullptr;
break;
}
MachineInstr *DefMI = MRI->getVRegDef(Reg);
if (!DefMI || DefMI->getParent() != Head)
continue;
- if (InsertAfter.insert(DefMI))
+ if (InsertAfter.insert(DefMI).second)
DEBUG(dbgs() << "BB#" << MBB->getNumber() << " depends on " << *DefMI);
if (DefMI->isTerminator()) {
DEBUG(dbgs() << "Can't insert instructions below terminator.\n");
WorkList.push_back(std::make_pair(LI, VNI));
do {
std::tie(LI, VNI) = WorkList.pop_back_val();
- if (!UsedValues.insert(VNI))
+ if (!UsedValues.insert(VNI).second)
continue;
if (VNI->isPHIDef()) {
valnos.clear();
for (const_iterator I = begin(), E = end(); I != E; ++I) {
VNInfo *VNI = I->valno;
- if (!Seen.insert(VNI))
+ if (!Seen.insert(VNI).second)
continue;
assert(!VNI->isUnused() && "Unused valno used by live segment");
VNI->id = (unsigned)valnos.size();
(void)ExtVNI;
assert(ExtVNI == VNI && "Unexpected existing value number");
// Is this a PHIDef we haven't seen before?
- if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI))
+ if (!VNI->isPHIDef() || VNI->def != BlockStart ||
+ !UsedPHIs.insert(VNI).second)
continue;
// The PHI is live, make sure the predecessors are live-out.
for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
PE = MBB->pred_end(); PI != PE; ++PI) {
- if (!LiveOut.insert(*PI))
+ if (!LiveOut.insert(*PI).second)
continue;
SlotIndex Stop = getMBBEndIdx(*PI);
// A predecessor is not required to have a live-out value for a PHI.
// Make sure VNI is live-out from the predecessors.
for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
PE = MBB->pred_end(); PI != PE; ++PI) {
- if (!LiveOut.insert(*PI))
+ if (!LiveOut.insert(*PI).second)
continue;
SlotIndex Stop = getMBBEndIdx(*PI);
assert(li->getVNInfoBefore(Stop) == VNI &&
/// Update a single live range, assuming an instruction has been moved from
/// OldIdx to NewIdx.
void updateRange(LiveRange &LR, unsigned Reg) {
- if (!Updated.insert(&LR))
+ if (!Updated.insert(&LR).second)
return;
DEBUG({
dbgs() << " ";
MachineBasicBlock::succ_iterator SI = succ_begin();
while (SI != succ_end()) {
const MachineBasicBlock *MBB = *SI;
- if (!SeenMBBs.insert(MBB) ||
+ if (!SeenMBBs.insert(MBB).second ||
(MBB != DestA && MBB != DestB && !MBB->isLandingPad())) {
// This is a superfluous edge, remove it.
SI = removeSuccessor(SI);
BE = L.block_end();
BI != BE; ++BI) {
BlockChain &Chain = *BlockToChain[*BI];
- if (!UpdatedPreds.insert(&Chain))
+ if (!UpdatedPreds.insert(&Chain).second)
continue;
assert(Chain.LoopPredecessors == 0);
for (MachineFunction::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
MachineBasicBlock *BB = &*FI;
BlockChain &Chain = *BlockToChain[BB];
- if (!UpdatedPreds.insert(&Chain))
+ if (!UpdatedPreds.insert(&Chain).second)
continue;
assert(Chain.LoopPredecessors == 0);
// Internal def is now killed.
KilledDefSet.insert(Reg);
} else {
- if (ExternUseSet.insert(Reg)) {
+ if (ExternUseSet.insert(Reg).second) {
ExternUses.push_back(Reg);
if (MO.isUndef())
UndefUseSet.insert(Reg);
if (!Reg)
continue;
- if (LocalDefSet.insert(Reg)) {
+ if (LocalDefSet.insert(Reg).second) {
LocalDefs.push_back(Reg);
if (MO.isDead()) {
DeadDefSet.insert(Reg);
if (!MO.isDead()) {
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
unsigned SubReg = *SubRegs;
- if (LocalDefSet.insert(SubReg))
+ if (LocalDefSet.insert(SubReg).second)
LocalDefs.push_back(SubReg);
}
}
SmallSet<unsigned, 32> Added;
for (unsigned i = 0, e = LocalDefs.size(); i != e; ++i) {
unsigned Reg = LocalDefs[i];
- if (Added.insert(Reg)) {
+ if (Added.insert(Reg).second) {
// If it's not live beyond end of the bundle, mark it dead.
bool isDead = DeadDefSet.count(Reg) || KilledDefSet.count(Reg);
MIB.addReg(Reg, getDefRegState(true) | getDeadRegState(isDead) |
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
- bool isNew = RegSeen.insert(Reg);
+ bool isNew = RegSeen.insert(Reg).second;
unsigned RCId, RCCost;
getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost);
if (MO.isDef())
if (!TargetRegisterInfo::isVirtualRegister(Reg))
continue;
- bool isNew = RegSeen.insert(Reg);
+ bool isNew = RegSeen.insert(Reg).second;
if (MO.isDef())
Defs.push_back(Reg);
else if (!isNew && isOperandKill(MO, MRI)) {
// If the pass has already considered breaking this edge (during this pass
// through the function), then let's go ahead and break it. This means
// sinking multiple "cheap" instructions into the same block.
- if (!CEBCandidates.insert(std::make_pair(From, To)))
+ if (!CEBCandidates.insert(std::make_pair(From, To)).second)
return true;
if (!MI->isCopy() && !TII->isAsCheapAsAMove(MI))
}
// To is a new block. Mark the block as visited in case the CFG has cycles
// that MachineLoopInfo didn't recognize as a natural loop.
- return LB.Visited.insert(To);
+ return LB.Visited.insert(To).second;
}
};
}
unsigned DstReg = MI->getOperand(0).getReg();
// See if we already saw this register.
- if (!PHIsInCycle.insert(MI))
+ if (!PHIsInCycle.insert(MI).second)
return true;
// Don't scan crazily complex things.
"PHI destination is not a virtual register");
// See if we already saw this register.
- if (!PHIsInCycle.insert(MI))
+ if (!PHIsInCycle.insert(MI).second)
return true;
// Don't scan crazily complex things.
// Check to make sure we haven't already emitted the copy for this block.
// This can happen because PHI nodes may have multiple entries for the same
// basic block.
- if (!MBBsInsertedInto.insert(&opBlock))
+ if (!MBBsInsertedInto.insert(&opBlock).second)
continue; // If the copy has already been emitted, we're done.
// Find a safe location to insert the copy, this may be the first terminator
continue;
if (MO.isEarlyClobber() || MI->isRegTiedToDefOperand(i) ||
(MO.getSubReg() && MI->readsVirtualRegister(Reg))) {
- if (ThroughRegs.insert(Reg))
+ if (ThroughRegs.insert(Reg).second)
DEBUG(dbgs() << ' ' << PrintReg(Reg));
}
}
// the UseMI operands removes them from the SrcReg use-def chain, but when
// SrcReg is DstReg we could encounter UseMI twice if it has multiple
// operands mentioning the virtual register.
- if (SrcReg == DstReg && !Visited.insert(UseMI))
+ if (SrcReg == DstReg && !Visited.insert(UseMI).second)
continue;
SmallVector<unsigned,8> Ops;
for (SmallVectorImpl<Value *>::iterator I = Objs.begin(), IE = Objs.end();
I != IE; ++I) {
V = *I;
- if (!Visited.insert(V))
+ if (!Visited.insert(V).second)
continue;
if (Operator::getOpcode(V) == Instruction::IntToPtr) {
const Value *O =
return *Depth;
// Remember visited nodes.
- if (!Visited.insert(SUb))
+ if (!Visited.insert(SUb).second)
return *Depth;
// If there is _some_ dependency already in place, do not
// descend any further.
default:
// Only add if it isn't already in the list.
- if (SeenOps.insert(Op.getNode()))
+ if (SeenOps.insert(Op.getNode()).second)
Ops.push_back(Op);
else
Changed = true;
}
// Don't bother if we've been before.
- if (!Visited.insert(Chain.getNode()))
+ if (!Visited.insert(Chain.getNode()).second)
continue;
switch (Chain.getOpcode()) {
for (SDNode::use_iterator UI = M->use_begin(),
UIE = M->use_end(); UI != UIE; ++UI)
- if (UI.getUse().getValueType() == MVT::Other && Visited.insert(*UI)) {
+ if (UI.getUse().getValueType() == MVT::Other &&
+ Visited.insert(*UI).second) {
if (isa<MemIntrinsicSDNode>(*UI) || isa<MemSDNode>(*UI)) {
// We've not visited this use, and we care about it (it could have an
// ordering dependency with the original node).
// If this terminator has multiple identical successors (common for
// switches), only handle each succ once.
- if (!SuccsHandled.insert(SuccMBB))
+ if (!SuccsHandled.insert(SuccMBB).second)
continue;
MachineBasicBlock::iterator MBBI = SuccMBB->begin();
continue;
}
- if (LegalizedNodes.insert(N)) {
+ if (LegalizedNodes.insert(N).second) {
AnyLegalized = true;
Legalizer.LegalizeOp(N);
bool Added = false;
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
if (LiveRegDefs[*AI] && LiveRegDefs[*AI] != SU) {
- if (RegAdded.insert(*AI)) {
+ if (RegAdded.insert(*AI).second) {
LRegs.push_back(*AI);
Added = true;
}
if (LiveRegDefs[*AliasI] == SU) continue;
// Add Reg to the set of interfering live regs.
- if (RegAdded.insert(*AliasI)) {
+ if (RegAdded.insert(*AliasI).second) {
LRegs.push_back(*AliasI);
}
}
if (!LiveRegDefs[i]) continue;
if (LiveRegDefs[i] == SU) continue;
if (!MachineOperand::clobbersPhysReg(RegMask, i)) continue;
- if (RegAdded.insert(i))
+ if (RegAdded.insert(i).second)
LRegs.push_back(i);
}
}
SDNode *Gen = LiveRegGens[CallResource]->getNode();
while (SDNode *Glued = Gen->getGluedNode())
Gen = Glued;
- if (!IsChainDependent(Gen, Node, 0, TII) && RegAdded.insert(CallResource))
+ if (!IsChainDependent(Gen, Node, 0, TII) &&
+ RegAdded.insert(CallResource).second)
LRegs.push_back(CallResource);
}
}
for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end();
I != E && UseCount < 100; ++I, ++UseCount) {
SDNode *User = *I;
- if (User == Node || !Visited.insert(User))
+ if (User == Node || !Visited.insert(User).second)
continue;
int64_t Offset1, Offset2;
if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) ||
// Add all operands to the worklist unless they've already been added.
for (unsigned i = 0, e = NI->getNumOperands(); i != e; ++i)
- if (Visited.insert(NI->getOperand(i).getNode()))
+ if (Visited.insert(NI->getOperand(i).getNode()).second)
Worklist.push_back(NI->getOperand(i).getNode());
if (isPassiveNode(NI)) // Leaf node, e.g. a TargetImmediate.
SmallVectorImpl<std::pair<unsigned, MachineInstr*> > &Orders,
SmallSet<unsigned, 8> &Seen) {
unsigned Order = N->getIROrder();
- if (!Order || !Seen.insert(Order)) {
+ if (!Order || !Seen.insert(Order).second) {
// Process any valid SDDbgValues even if node does not have any order
// assigned.
ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, 0);
const SDNode *M = Worklist.pop_back_val();
for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
SDNode *Op = M->getOperand(i).getNode();
- if (Visited.insert(Op))
+ if (Visited.insert(Op).second)
Worklist.push_back(Op);
if (Op == N)
return true;
// If a node has already been visited on this depth-first walk, reject it as
// a cycle.
- if (!Visited.insert(N)) {
+ if (!Visited.insert(N).second) {
errs() << "Detected cycle in SelectionDAG\n";
dbgs() << "Offending node:\n";
N->dumprFull(DAG); dbgs() << "\n";
SmallSet<BasicBlock*, 32> Done;
for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
BasicBlock *BB = I.getSuccessor(i);
- bool Inserted = Done.insert(BB);
+ bool Inserted = Done.insert(BB).second;
if (!Inserted)
continue;
// If this terminator has multiple identical successors (common for
// switches), only handle each succ once.
- if (!SuccsHandled.insert(SuccMBB)) continue;
+ if (!SuccsHandled.insert(SuccMBB).second)
+ continue;
MachineBasicBlock::iterator MBBI = SuccMBB->begin();
typedef SmallPtrSet<const SDNode *, 128> VisitedSDNodeSet;
static void DumpNodesr(raw_ostream &OS, const SDNode *N, unsigned indent,
const SelectionDAG *G, VisitedSDNodeSet &once) {
- if (!once.insert(N)) // If we've been here before, return now.
+ if (!once.insert(N).second) // If we've been here before, return now.
return;
// Dump the current SDNode, but don't end the line yet.
SDNode *N = Worklist.pop_back_val();
// If we've already seen this node, ignore it.
- if (!VisitedNodes.insert(N))
+ if (!VisitedNodes.insert(N).second)
continue;
// Otherwise, add all chain operands to the worklist.
// Don't revisit nodes if we already scanned it and didn't fail, we know we
// won't fail if we scan it again.
- if (!Visited.insert(Use))
+ if (!Visited.insert(Use).second)
return false;
for (unsigned i = 0, e = Use->getNumOperands(); i != e; ++i) {
/// we reach blocks we've already seen.
static void MarkBlocksLiveIn(BasicBlock *BB,
SmallPtrSetImpl<BasicBlock *> &LiveBBs) {
- if (!LiveBBs.insert(BB))
+ if (!LiveBBs.insert(BB).second)
return; // already been here.
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI)
} else if (const PHINode *PN = dyn_cast<PHINode>(U)) {
// Keep track of what PHI nodes we have already visited to ensure
// they are only visited once.
- if (VisitedPHIs.insert(PN))
+ if (VisitedPHIs.insert(PN).second)
if (HasAddressTaken(PN))
return true;
} else if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
unsigned Reg = DstReg;
while (MachineInstr *UseMI = findOnlyInterestingUse(Reg, MBB, MRI, TII,IsCopy,
NewReg, IsDstPhys)) {
- if (IsCopy && !Processed.insert(UseMI))
+ if (IsCopy && !Processed.insert(UseMI).second)
break;
DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UseMI);
// ConstantExpr traps if any operands can trap.
for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) {
if (ConstantExpr *Op = dyn_cast<ConstantExpr>(CE->getOperand(i))) {
- if (NonTrappingOps.insert(Op) && canTrapImpl(Op, NonTrappingOps))
+ if (NonTrappingOps.insert(Op).second && canTrapImpl(Op, NonTrappingOps))
return true;
}
}
const Constant *ConstOp = dyn_cast<Constant>(Op);
if (!ConstOp)
continue;
- if (Visited.insert(ConstOp))
+ if (Visited.insert(ConstOp).second)
WorkList.push_back(ConstOp);
}
}
// TrackingVHs back into Values.
SmallPtrSet<Value *, 16> RetainSet;
for (unsigned I = 0, E = AllRetainTypes.size(); I < E; I++)
- if (RetainSet.insert(AllRetainTypes[I]))
+ if (RetainSet.insert(AllRetainTypes[I]).second)
RetainValues.push_back(AllRetainTypes[I]);
DIArray RetainTypes = getOrCreateArray(RetainValues);
DIType(TempRetainTypes).replaceAllUsesWith(RetainTypes);
if (!DV.isVariable())
return;
- if (!NodesSeen.insert(DV))
+ if (!NodesSeen.insert(DV).second)
return;
processScope(DIVariable(N).getContext());
processType(DIVariable(N).getType().resolve(TypeIdentifierMap));
if (!DV.isVariable())
return;
- if (!NodesSeen.insert(DV))
+ if (!NodesSeen.insert(DV).second)
return;
processScope(DIVariable(N).getContext());
processType(DIVariable(N).getType().resolve(TypeIdentifierMap));
if (!DT)
return false;
- if (!NodesSeen.insert(DT))
+ if (!NodesSeen.insert(DT).second)
return false;
TYs.push_back(DT);
bool DebugInfoFinder::addCompileUnit(DICompileUnit CU) {
if (!CU)
return false;
- if (!NodesSeen.insert(CU))
+ if (!NodesSeen.insert(CU).second)
return false;
CUs.push_back(CU);
if (!DIG)
return false;
- if (!NodesSeen.insert(DIG))
+ if (!NodesSeen.insert(DIG).second)
return false;
GVs.push_back(DIG);
if (!SP)
return false;
- if (!NodesSeen.insert(SP))
+ if (!NodesSeen.insert(SP).second)
return false;
SPs.push_back(SP);
// as null for now.
if (Scope->getNumOperands() == 0)
return false;
- if (!NodesSeen.insert(Scope))
+ if (!NodesSeen.insert(Scope).second)
return false;
Scopes.push_back(Scope);
return true;
if (isOpaque())
return false;
- if (Visited && !Visited->insert(this))
+ if (Visited && !Visited->insert(this).second)
return false;
// Okay, our struct is sized if all of the elements are, but if one of the
#ifndef NDEBUG
static bool contains(SmallPtrSetImpl<ConstantExpr *> &Cache, ConstantExpr *Expr,
Constant *C) {
- if (!Cache.insert(Expr))
+ if (!Cache.insert(Expr).second)
return false;
for (auto &O : Expr->operands()) {
return V;
}
assert(V->getType()->isPointerTy() && "Unexpected operand type!");
- } while (Visited.insert(V));
+ } while (Visited.insert(V).second);
return V;
}
return V;
}
assert(V->getType()->isPointerTy() && "Unexpected operand type!");
- } while (Visited.insert(V));
+ } while (Visited.insert(V).second);
return V;
}
// For GEPs, determine if the indexing lands within the allocated object.
if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
// Conservatively require that the base pointer be fully dereferenceable.
- if (!Visited.insert(GEP->getOperand(0)))
+ if (!Visited.insert(GEP->getOperand(0)).second)
return false;
if (!isDereferenceablePointer(GEP->getOperand(0), DL, Visited))
return false;
while (!WorkStack.empty()) {
const Value *V = WorkStack.pop_back_val();
- if (!Visited.insert(V))
+ if (!Visited.insert(V).second)
continue;
if (const User *U = dyn_cast<User>(V)) {
Assert1(!GV->isDeclaration(), "Alias must point to a definition", &GA);
if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
- Assert1(Visited.insert(GA2), "Aliases cannot form a cycle", &GA);
+ Assert1(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
Assert1(!GA2->mayBeOverridden(), "Alias cannot point to a weak alias",
&GA);
void Verifier::visitMDNode(MDNode &MD, Function *F) {
// Only visit each node once. Metadata can be mutually recursive, so this
// avoids infinite recursion here, as well as being an optimization.
- if (!MDNodes.insert(&MD))
+ if (!MDNodes.insert(&MD).second)
return;
for (unsigned i = 0, e = MD.getNumOperands(); i != e; ++i) {
for (SwitchInst::CaseIt i = SI.case_begin(), e = SI.case_end(); i != e; ++i) {
Assert1(i.getCaseValue()->getType() == SwitchTy,
"Switch constants must all be same type as switch value!", &SI);
- Assert2(Constants.insert(i.getCaseValue()),
+ Assert2(Constants.insert(i.getCaseValue()).second,
"Duplicate integer as switch case", &SI, i.getCaseValue());
}
while (!Stack.empty()) {
const ConstantExpr *V = Stack.pop_back_val();
- if (!Visited.insert(V))
+ if (!Visited.insert(V).second)
continue;
VerifyConstantExprBitcastType(V);
// same opaque type then we fail.
if (cast<StructType>(DstTy)->isOpaque()) {
// We can only map one source type onto the opaque destination type.
- if (!DstResolvedOpaqueTypes.insert(cast<StructType>(DstTy)))
+ if (!DstResolvedOpaqueTypes.insert(cast<StructType>(DstTy)).second)
return false;
SrcDefinitionsToResolve.push_back(SSTy);
Entry = DstTy;
continue;
// If we've already seen this option, don't add it to the list again.
- if (!OptionSet.insert(I->second))
+ if (!OptionSet.insert(I->second).second)
continue;
Opts.push_back(std::pair<const char *, Option*>(I->getKey().data(),
memset(CurArray, -1, CurArraySize*sizeof(void*));
}
-bool SmallPtrSetImplBase::insert_imp(const void * Ptr) {
+std::pair<const void *const *, bool>
+SmallPtrSetImplBase::insert_imp(const void *Ptr) {
if (isSmall()) {
// Check to see if it is already in the set.
for (const void **APtr = SmallArray, **E = SmallArray+NumElements;
APtr != E; ++APtr)
if (*APtr == Ptr)
- return false;
-
+ return std::make_pair(APtr, false);
+
// Nope, there isn't. If we stay small, just 'pushback' now.
if (NumElements < CurArraySize) {
SmallArray[NumElements++] = Ptr;
- return true;
+ return std::make_pair(SmallArray + (NumElements - 1), true);
}
// Otherwise, hit the big set case, which will call grow.
}
// Okay, we know we have space. Find a hash bucket.
const void **Bucket = const_cast<const void**>(FindBucketFor(Ptr));
- if (*Bucket == Ptr) return false; // Already inserted, good.
-
+ if (*Bucket == Ptr)
+ return std::make_pair(Bucket, false); // Already inserted, good.
+
// Otherwise, insert it!
if (*Bucket == getTombstoneMarker())
--NumTombstones;
*Bucket = Ptr;
++NumElements; // Track density.
- return true;
+ return std::make_pair(Bucket, true);
}
bool SmallPtrSetImplBase::erase_imp(const void * Ptr) {
// global. Do not promote constant expressions either, as they may
// require some code expansion.
if (Cst && !isa<GlobalValue>(Cst) && !isa<ConstantExpr>(Cst) &&
- AlreadyChecked.insert(Cst))
+ AlreadyChecked.insert(Cst).second)
LocalChange |= promoteConstant(Cst);
}
}
for (std::vector<MachineBasicBlock*>::iterator
I = LPadList.begin(), E = LPadList.end(); I != E; ++I) {
MachineBasicBlock *CurMBB = *I;
- if (SeenMBBs.insert(CurMBB))
+ if (SeenMBBs.insert(CurMBB).second)
DispContBB->addSuccessor(CurMBB);
}
bool MemDefsUses::updateDefsUses(ValueType V, bool MayStore) {
if (MayStore)
- return !Defs.insert(V) || Uses.count(V) || SeenNoObjStore || SeenNoObjLoad;
+ return !Defs.insert(V).second || Uses.count(V) || SeenNoObjStore ||
+ SeenNoObjLoad;
Uses.insert(V);
return Defs.count(V) || SeenNoObjStore;
// nodes just above the top-level loads and token factors.
while (!Queue.empty()) {
SDNode *ChainNext = Queue.pop_back_val();
- if (!Visited.insert(ChainNext))
+ if (!Visited.insert(ChainNext).second)
continue;
if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
while (!Queue.empty()) {
SDNode *LoadRoot = Queue.pop_back_val();
- if (!Visited.insert(LoadRoot))
+ if (!Visited.insert(LoadRoot).second)
continue;
if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
SDValue BinOp = BinOps.back();
BinOps.pop_back();
- if (!Visited.insert(BinOp.getNode()))
+ if (!Visited.insert(BinOp.getNode()).second)
continue;
PromOps.push_back(BinOp);
SDValue BinOp = BinOps.back();
BinOps.pop_back();
- if (!Visited.insert(BinOp.getNode()))
+ if (!Visited.insert(BinOp.getNode()).second)
continue;
PromOps.push_back(BinOp);
// Process any unreachable blocks in arbitrary order now.
if (MF.size() != Processed.size())
for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB)
- if (Processed.insert(BB))
+ if (Processed.insert(BB).second)
Changed |= processBasicBlock(MF, *BB);
LiveBundles.clear();
Value *V = WorkList.back();
WorkList.pop_back();
if (isa<GetElementPtrInst>(V) || isa<PHINode>(V)) {
- if (PtrValues.insert(V))
+ if (PtrValues.insert(V).second)
WorkList.insert(WorkList.end(), V->user_begin(), V->user_end());
} else if (StoreInst *Store = dyn_cast<StoreInst>(V)) {
Stores.push_back(Store);
case Instruction::AddrSpaceCast:
// The original value is not read/written via this if the new value isn't.
for (Use &UU : I->uses())
- if (Visited.insert(&UU))
+ if (Visited.insert(&UU).second)
Worklist.push_back(&UU);
break;
auto AddUsersToWorklistIfCapturing = [&] {
if (Captures)
for (Use &UU : I->uses())
- if (Visited.insert(&UU))
+ if (Visited.insert(&UU).second)
Worklist.push_back(&UU);
};
/// recursively mark anything that it uses as also needed.
void GlobalDCE::GlobalIsNeeded(GlobalValue *G) {
// If the global is already in the set, no need to reprocess it.
- if (!AliveGlobals.insert(G))
+ if (!AliveGlobals.insert(G).second)
return;
Module *M = G->getParent();
for (User::op_iterator I = C->op_begin(), E = C->op_end(); I != E; ++I) {
// If we've already processed this constant there's no need to do it again.
Constant *Op = dyn_cast<Constant>(*I);
- if (Op && SeenConstants.insert(Op))
+ if (Op && SeenConstants.insert(Op).second)
MarkUsedGlobalsAsNeeded(Op);
}
}
} else if (const PHINode *PN = dyn_cast<PHINode>(U)) {
// If we've already seen this phi node, ignore it, it has already been
// checked.
- if (PHIs.insert(PN) && !AllUsesOfValueWillTrapIfNull(PN, PHIs))
+ if (PHIs.insert(PN).second && !AllUsesOfValueWillTrapIfNull(PN, PHIs))
return false;
} else if (isa<ICmpInst>(U) &&
isa<ConstantPointerNull>(U->getOperand(1))) {
if (const PHINode *PN = dyn_cast<PHINode>(Inst)) {
// PHIs are ok if all uses are ok. Don't infinitely recurse through PHI
// cycles.
- if (PHIs.insert(PN))
+ if (PHIs.insert(PN).second)
if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs))
return false;
continue;
}
if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
- if (!LoadUsingPHIsPerLoad.insert(PN))
+ if (!LoadUsingPHIsPerLoad.insert(PN).second)
// This means some phi nodes are dependent on each other.
// Avoid infinite looping!
return false;
- if (!LoadUsingPHIs.insert(PN))
+ if (!LoadUsingPHIs.insert(PN).second)
// If we have already analyzed this PHI, then it is safe.
continue;
SmallPtrSetImpl<Constant*> &SimpleConstants,
const DataLayout *DL) {
// If we already checked this constant, we win.
- if (!SimpleConstants.insert(C)) return true;
+ if (!SimpleConstants.insert(C).second)
+ return true;
// Check the constant.
return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, DL);
}
// Okay, we succeeded in evaluating this control flow. See if we have
// executed the new block before. If so, we have a looping function,
// which we cannot evaluate in reasonable time.
- if (!ExecutedBlocks.insert(NextBB))
+ if (!ExecutedBlocks.insert(NextBB).second)
return false; // looped!
// Okay, we have never been in this block before. Check to see if there
}
bool usedErase(GlobalValue *GV) { return Used.erase(GV); }
bool compilerUsedErase(GlobalValue *GV) { return CompilerUsed.erase(GV); }
- bool usedInsert(GlobalValue *GV) { return Used.insert(GV); }
- bool compilerUsedInsert(GlobalValue *GV) { return CompilerUsed.insert(GV); }
+ bool usedInsert(GlobalValue *GV) { return Used.insert(GV).second; }
+ bool compilerUsedInsert(GlobalValue *GV) {
+ return CompilerUsed.insert(GV).second;
+ }
void syncVariablesAndSets() {
if (UsedV)
SmallPtrSet<const Function *, 8> NewCalledFunctions(CalledFunctions);
// Don't treat recursive functions as empty.
- if (!NewCalledFunctions.insert(CalledFn))
+ if (!NewCalledFunctions.insert(CalledFn).second)
return false;
if (!cxxDtorIsEmpty(*CalledFn, NewCalledFunctions))
// If the inlined function already uses this alloca then we can't reuse
// it.
- if (!UsedAllocas.insert(AvailableAlloca))
+ if (!UsedAllocas.insert(AvailableAlloca).second)
continue;
// Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare
assert(TermL->getNumSuccessors() == TermR->getNumSuccessors());
for (unsigned i = 0, e = TermL->getNumSuccessors(); i != e; ++i) {
- if (!VisitedBBs.insert(TermL->getSuccessor(i)))
+ if (!VisitedBBs.insert(TermL->getSuccessor(i)).second)
continue;
FnLBBs.push_back(TermL->getSuccessor(i));
if (!PN->hasOneUse()) return false;
// Remember this node, and if we find the cycle, return.
- if (!PotentiallyDeadPHIs.insert(PN))
+ if (!PotentiallyDeadPHIs.insert(PN).second)
return true;
// Don't scan crazily complex things.
static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal,
SmallPtrSetImpl<PHINode*> &ValueEqualPHIs) {
// See if we already saw this PHI node.
- if (!ValueEqualPHIs.insert(PN))
+ if (!ValueEqualPHIs.insert(PN).second)
return true;
// Don't scan crazily complex things.
// If the user is a PHI, inspect its uses recursively.
if (PHINode *UserPN = dyn_cast<PHINode>(UserI)) {
- if (PHIsInspected.insert(UserPN))
+ if (PHIsInspected.insert(UserPN).second)
PHIsToSlice.push_back(UserPN);
continue;
}
// If we already saw this clause, there is no point in having a second
// copy of it.
- if (AlreadyCaught.insert(TypeInfo)) {
+ if (AlreadyCaught.insert(TypeInfo).second) {
// This catch clause was not already seen.
NewClauses.push_back(CatchClause);
} else {
continue;
// There is no point in having multiple copies of the same typeinfo in
// a filter, so only add it if we didn't already.
- if (SeenInFilter.insert(TypeInfo))
+ if (SeenInFilter.insert(TypeInfo).second)
NewFilterElts.push_back(cast<Constant>(Elt));
}
// A filter containing a catch-all cannot match anything by definition.
BB = Worklist.pop_back_val();
// We have now visited this block! If we've already been here, ignore it.
- if (!Visited.insert(BB)) continue;
+ if (!Visited.insert(BB).second)
+ continue;
for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
Instruction *Inst = BBI++;
if (Value *Addr =
isInterestingMemoryAccess(&Inst, &IsWrite, &Alignment)) {
if (ClOpt && ClOptSameTemp) {
- if (!TempsToInstrument.insert(Addr))
+ if (!TempsToInstrument.insert(Addr).second)
continue; // We've seen this temp in the current BB.
}
} else if (ClInvalidPointerPairs &&
// Add the predecessors to the worklist.
do {
BasicBlock *PredBB = *PI;
- if (Visited.insert(PredBB))
+ if (Visited.insert(PredBB).second)
Worklist.push_back(std::make_pair(PredBB, PredBB->end()));
} while (++PI != PE);
break;
if (isa<AllocaInst>(P))
return true;
- if (!Visited.insert(P))
+ if (!Visited.insert(P).second)
continue;
if (const SelectInst *SI = dyn_cast<const SelectInst>(P)) {
// that makes this a partial merge.
bool Partial = ReverseInsertPts.size() != Other.ReverseInsertPts.size();
for (Instruction *Inst : Other.ReverseInsertPts)
- Partial |= ReverseInsertPts.insert(Inst);
+ Partial |= ReverseInsertPts.insert(Inst).second;
return Partial;
}
while (SuccStack.back().second != SE) {
BasicBlock *SuccBB = *SuccStack.back().second++;
- if (Visited.insert(SuccBB)) {
+ if (Visited.insert(SuccBB).second) {
TerminatorInst *TI = cast<TerminatorInst>(&SuccBB->back());
SuccStack.push_back(std::make_pair(SuccBB, succ_iterator(TI)));
BBStates[CurrBB].addSucc(SuccBB);
BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
while (PredStack.back().second != PE) {
BasicBlock *BB = *PredStack.back().second++;
- if (Visited.insert(BB)) {
+ if (Visited.insert(BB).second) {
PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
goto reverse_dfs_next_succ;
}
if (!NewRetainReleaseRRI.Calls.count(NewRetain))
return false;
- if (ReleasesToMove.Calls.insert(NewRetainRelease)) {
+ if (ReleasesToMove.Calls.insert(NewRetainRelease).second) {
// If we overflow when we compute the path count, don't remove/move
// anything.
// Collect the optimal insertion points.
if (!KnownSafe)
for (Instruction *RIP : NewRetainReleaseRRI.ReverseInsertPts) {
- if (ReleasesToMove.ReverseInsertPts.insert(RIP)) {
+ if (ReleasesToMove.ReverseInsertPts.insert(RIP).second) {
// If we overflow when we compute the path count, don't
// remove/move anything.
const BBState &RIPBBState = BBStates[RIP->getParent()];
if (!NewReleaseRetainRRI.Calls.count(NewRelease))
return false;
- if (RetainsToMove.Calls.insert(NewReleaseRetain)) {
+ if (RetainsToMove.Calls.insert(NewReleaseRetain).second) {
// If we overflow when we compute the path count, don't remove/move
// anything.
const BBState &NRRBBState = BBStates[NewReleaseRetain->getParent()];
// Collect the optimal insertion points.
if (!KnownSafe)
for (Instruction *RIP : NewReleaseRetainRRI.ReverseInsertPts) {
- if (RetainsToMove.ReverseInsertPts.insert(RIP)) {
+ if (RetainsToMove.ReverseInsertPts.insert(RIP).second) {
// If we overflow when we compute the path count, don't
// remove/move anything.
const BBState &RIPBBState = BBStates[RIP->getParent()];
SmallPtrSet<const Value *, 4> UniqueSrc;
for (unsigned i = 0, e = A->getNumIncomingValues(); i != e; ++i) {
const Value *PV1 = A->getIncomingValue(i);
- if (UniqueSrc.insert(PV1) && related(PV1, B))
+ if (UniqueSrc.insert(PV1).second && related(PV1, B))
return true;
}
if (isa<PtrToIntInst>(P))
// Assume the worst.
return true;
- if (Visited.insert(Ur))
+ if (Visited.insert(Ur).second)
Worklist.push_back(Ur);
}
} while (!Worklist.empty());
for (Instruction::op_iterator OI = curr->op_begin(), OE = curr->op_end();
OI != OE; ++OI)
if (Instruction* Inst = dyn_cast<Instruction>(OI))
- if (alive.insert(Inst))
+ if (alive.insert(Inst).second)
worklist.push_back(Inst);
}
Instruction *NarrowUser = cast<Instruction>(U);
// Handle data flow merges and bizarre phi cycles.
- if (!Widened.insert(NarrowUser))
+ if (!Widened.insert(NarrowUser).second)
continue;
NarrowIVUsers.push_back(NarrowIVDefUse(NarrowDef, NarrowUser, WideDef));
static bool isHighCostExpansion(const SCEV *S, BranchInst *BI,
SmallPtrSetImpl<const SCEV*> &Processed,
ScalarEvolution *SE) {
- if (!Processed.insert(S))
+ if (!Processed.insert(S).second)
return false;
// If the backedge-taken count is a UDiv, it's very likely a UDiv that
// Optimistically handle other instructions.
for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) {
- if (!Visited.insert(*OI))
+ if (!Visited.insert(*OI).second)
continue;
if (!hasConcreteDefImpl(*OI, Visited, Depth+1))
return false;
BasicBlock *PredBB = *PI;
// If we already scanned this predecessor, skip it.
- if (!PredsScanned.insert(PredBB))
+ if (!PredsScanned.insert(PredBB).second)
continue;
// Scan the predecessor to see if the value is available in the pred.
for (unsigned i = 0, e = PredValues.size(); i != e; ++i) {
BasicBlock *Pred = PredValues[i].second;
- if (!SeenPreds.insert(Pred))
+ if (!SeenPreds.insert(Pred).second)
continue; // Duplicate predecessor entry.
// If the predecessor ends with an indirect goto, we can't change its
for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE;
++SI) {
BasicBlock *SuccBB = *SI;
- if (!Visited.insert(SuccBB))
+ if (!Visited.insert(SuccBB).second)
continue;
const Loop *SuccLoop = LI->getLoopFor(SuccBB);
for (unsigned i = 0; i < SubLoopExitBlocks.size(); ++i) {
BasicBlock *ExitBB = SubLoopExitBlocks[i];
- if (LI->getLoopFor(ExitBB) == L && Visited.insert(ExitBB))
+ if (LI->getLoopFor(ExitBB) == L && Visited.insert(ExitBB).second)
VisitStack.push_back(WorklistItem(ExitBB, false));
}
Processed, SE);
}
- if (!Processed.insert(S))
+ if (!Processed.insert(S).second)
return false;
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
Lose();
return;
}
- if (Regs.insert(Reg)) {
+ if (Regs.insert(Reg).second) {
RateRegister(Reg, Regs, L, SE, DT);
if (LoserRegs && isLoser())
LoserRegs->insert(Reg);
User::op_iterator IVOpIter = findIVOperand(I->op_begin(), IVOpEnd, L, SE);
while (IVOpIter != IVOpEnd) {
Instruction *IVOpInst = cast<Instruction>(*IVOpIter);
- if (UniqueOperands.insert(IVOpInst))
+ if (UniqueOperands.insert(IVOpInst).second)
ChainInstruction(I, IVOpInst, ChainUsersVec);
IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE);
}
const SCEV *S = Worklist.pop_back_val();
// Don't process the same SCEV twice
- if (!Visited.insert(S))
+ if (!Visited.insert(S).second)
continue;
if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S))
for (int LUIdx = UsedByIndices.find_first(); LUIdx != -1;
LUIdx = UsedByIndices.find_next(LUIdx))
// Make a memo of this use, offset, and register tuple.
- if (UniqueItems.insert(std::make_pair(LUIdx, Imm)))
+ if (UniqueItems.insert(std::make_pair(LUIdx, Imm)).second)
WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg));
}
}
// If this is a binary operation of the right kind with only one use then
// add its operands to the expression.
if (BinaryOperator *BO = isReassociableOp(Op, Opcode)) {
- assert(Visited.insert(Op) && "Not first visit!");
+ assert(Visited.insert(Op).second && "Not first visit!");
DEBUG(dbgs() << "DIRECT ADD: " << *Op << " (" << Weight << ")\n");
Worklist.push_back(std::make_pair(BO, Weight));
continue;
LeafMap::iterator It = Leaves.find(Op);
if (It == Leaves.end()) {
// Not in the leaf map. Must be the first time we saw this operand.
- assert(Visited.insert(Op) && "Not first visit!");
+ assert(Visited.insert(Op).second && "Not first visit!");
if (!Op->hasOneUse()) {
// This value has uses not accounted for by the expression, so it is
// not safe to modify. Mark it as being a leaf.
SmallPtrSet<Value*, 8> Duplicates;
for (unsigned i = 0, e = Factors.size(); i != e; ++i) {
Value *Factor = Factors[i];
- if (!Duplicates.insert(Factor))
+ if (!Duplicates.insert(Factor).second)
continue;
unsigned Occ = ++FactorOccurrences[Factor];
// and add that since that's where optimization actually happens.
unsigned Opcode = Op->getOpcode();
while (Op->hasOneUse() && Op->user_back()->getOpcode() == Opcode &&
- Visited.insert(Op))
+ Visited.insert(Op).second)
Op = Op->user_back();
RedoInsts.insert(Op);
}
///
/// This returns true if the block was not considered live before.
bool MarkBlockExecutable(BasicBlock *BB) {
- if (!BBExecutable.insert(BB)) return false;
+ if (!BBExecutable.insert(BB).second)
+ return false;
DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << '\n');
BBWorkList.push_back(BB); // Add the block to the work list!
return true;
private:
void markAsDead(Instruction &I) {
- if (VisitedDeadInsts.insert(&I))
+ if (VisitedDeadInsts.insert(&I).second)
AS.DeadUsers.push_back(&I);
}
}
for (User *U : I->users())
- if (Visited.insert(cast<Instruction>(U)))
+ if (Visited.insert(cast<Instruction>(U)).second)
Uses.push_back(std::make_pair(I, cast<Instruction>(U)));
} while (!Uses.empty());
else
return false;
- } while (Visited.insert(Ptr));
+ } while (Visited.insert(Ptr).second);
return false;
}
break;
Offset += GEPOffset;
Ptr = GEP->getPointerOperand();
- if (!Visited.insert(Ptr))
+ if (!Visited.insert(Ptr).second)
break;
}
break;
}
assert(Ptr->getType()->isPointerTy() && "Unexpected operand type!");
- } while (Visited.insert(Ptr));
+ } while (Visited.insert(Ptr).second);
if (!OffsetPtr) {
if (!Int8Ptr) {
/// This uses a set to de-duplicate users.
void enqueueUsers(Instruction &I) {
for (Use &U : I.uses())
- if (Visited.insert(U.getUser()))
+ if (Visited.insert(U.getUser()).second)
Queue.push_back(&U);
}
SmallVectorImpl<Instruction *> &Worklist,
SmallPtrSetImpl<Instruction *> &Visited) {
for (User *U : I.users())
- if (Visited.insert(cast<Instruction>(U)))
+ if (Visited.insert(cast<Instruction>(U)).second)
Worklist.push_back(cast<Instruction>(U));
}
for (auto *BB2 : Descendants) {
bool IsDomParent = DomTree->dominates(BB2, BB1);
bool IsInSameLoop = LI->getLoopFor(BB1) == LI->getLoopFor(BB2);
- if (BB1 != BB2 && VisitedBlocks.insert(BB2) && IsDomParent &&
+ if (BB1 != BB2 && VisitedBlocks.insert(BB2).second && IsDomParent &&
IsInSameLoop) {
EquivalenceClass[BB2] = BB1;
<< " known. Set weight for block: ";
printBlockWeight(dbgs(), BB););
}
- if (VisitedBlocks.insert(BB))
+ if (VisitedBlocks.insert(BB).second)
Changed = true;
} else if (NumUnknownEdges == 1 && VisitedBlocks.count(BB)) {
// If there is a single unknown edge and the block has been
llvm_unreachable("Found a stale predecessors list in a basic block.");
for (pred_iterator PI = pred_begin(B1), PE = pred_end(B1); PI != PE; ++PI) {
BasicBlock *B2 = *PI;
- if (Visited.insert(B2))
+ if (Visited.insert(B2).second)
Predecessors[B1].push_back(B2);
}
llvm_unreachable("Found a stale successors list in a basic block.");
for (succ_iterator SI = succ_begin(B1), SE = succ_end(B1); SI != SE; ++SI) {
BasicBlock *B2 = *SI;
- if (Visited.insert(B2))
+ if (Visited.insert(B2).second)
Successors[B1].push_back(B2);
}
}
AllocaInfo &Info) {
// If we've already checked this PHI, don't do it again.
if (PHINode *PN = dyn_cast<PHINode>(I))
- if (!Info.CheckedPHIs.insert(PN))
+ if (!Info.CheckedPHIs.insert(PN).second)
return;
for (User *U : I->users()) {
auto AddUsesToWorklist = [&](Value *V) {
for (auto &U : V->uses()) {
- if (!Visited.insert(&U))
+ if (!Visited.insert(&U).second)
continue;
Worklist.push_back(&U);
}
} else if (const PHINode *PN = dyn_cast<PHINode>(I)) {
// PHI nodes we can check just like select or GEP instructions, but we
// have to be careful about infinite recursion.
- if (PhiUsers.insert(PN)) // Not already visited.
+ if (PhiUsers.insert(PN).second) // Not already visited.
if (analyzeGlobalAux(I, GS, PhiUsers))
return true;
} else if (isa<CmpInst>(I)) {
// If we find an instruction more than once, we're on a cycle that
// won't prove fruitful.
- if (!Visited.insert(I)) {
+ if (!Visited.insert(I).second) {
// Break the cycle and delete the instruction and its operands.
I->replaceAllUsesWith(UndefValue::get(I->getType()));
(void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
Changed |= ConstantFoldTerminator(BB, true);
for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI)
- if (Reachable.insert(*SI))
+ if (Reachable.insert(*SI).second)
Worklist.push_back(*SI);
} while (!Worklist.empty());
return Changed;
if (LPM) {
if (ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>()) {
if (Loop *L = LI->getLoopFor(BB)) {
- if (ForgottenLoops.insert(L))
+ if (ForgottenLoops.insert(L).second)
SE->forgetLoop(L);
}
}
// The block really is live in here, insert it into the set. If already in
// the set, then it has already been processed.
- if (!LiveInBlocks.insert(BB))
+ if (!LiveInBlocks.insert(BB).second)
continue;
// Since the value is live into BB, it is either defined in a predecessor or
if (SuccLevel > RootLevel)
continue;
- if (!Visited.insert(SuccNode))
+ if (!Visited.insert(SuccNode).second)
continue;
BasicBlock *SuccBB = SuccNode->getBlock();
}
// Don't revisit blocks.
- if (!Visited.insert(BB))
+ if (!Visited.insert(BB).second)
return;
for (BasicBlock::iterator II = BB->begin(); !isa<TerminatorInst>(II);) {
++I;
for (; I != E; ++I)
- if (VisitedSuccs.insert(*I))
+ if (VisitedSuccs.insert(*I).second)
Worklist.push_back(RenamePassData(*I, Pred, IncomingVals));
goto NextIteration;
SmallPtrSet<Value *, 8> Succs;
for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) {
BasicBlock *Dest = IBI->getDestination(i);
- if (!Dest->hasAddressTaken() || !Succs.insert(Dest)) {
+ if (!Dest->hasAddressTaken() || !Succs.insert(Dest).second) {
Dest->removePredecessor(BB);
IBI->removeDestination(i);
--i; --e;
// Also ensure unique worklist users.
// If Def is a LoopPhi, it may not be in the Simplified set, so check for
// self edges first.
- if (UI != Def && Simplified.insert(UI))
+ if (UI != Def && Simplified.insert(UI).second)
SimpleIVUsers.push_back(std::make_pair(UI, Def));
}
}
// If we did *not* see this pointer before, insert it to the read-write
// list. At this phase it is only a 'write' list.
- if (Seen.insert(Ptr)) {
+ if (Seen.insert(Ptr).second) {
++NumReadWrites;
AliasAnalysis::Location Loc = AA->getLocation(ST);
// read a few words, modify, and write a few words, and some of the
// words may be written to the same address.
bool IsReadOnlyPtr = false;
- if (Seen.insert(Ptr) || !isStridedPtr(SE, DL, Ptr, TheLoop, Strides)) {
+ if (Seen.insert(Ptr).second ||
+ !isStridedPtr(SE, DL, Ptr, TheLoop, Strides)) {
++NumReads;
IsReadOnlyPtr = true;
}
// value must only be used once, except by phi nodes and min/max
// reductions which are represented as a cmp followed by a select.
ReductionInstDesc IgnoredVal(false, nullptr);
- if (VisitedInsts.insert(UI)) {
+ if (VisitedInsts.insert(UI).second) {
if (isa<PHINode>(UI))
PHIs.push_back(UI);
else
for (UserList::iterator I = ExternalUses.begin(), E = ExternalUses.end();
I != E; ++I) {
// We only add extract cost once for the same scalar.
- if (!ExtractCostCalculated.insert(I->Scalar))
+ if (!ExtractCostCalculated.insert(I->Scalar).second)
continue;
// Uses by ephemeral values are free (because the ephemeral value will be
ValueList Operands;
BasicBlock *IBB = PH->getIncomingBlock(i);
- if (!VisitedBBs.insert(IBB)) {
+ if (!VisitedBBs.insert(IBB).second) {
NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB);
continue;
}
for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) {
// We may go through BB multiple times so skip the one we have checked.
- if (!VisitedInstrs.insert(it))
+ if (!VisitedInstrs.insert(it).second)
continue;
if (isa<DbgInfoIntrinsic>(it))
// Recursively find all reachable SchedReadWrite records.
static void scanSchedRW(Record *RWDef, RecVec &RWDefs,
SmallPtrSet<Record*, 16> &RWSet) {
- if (!RWSet.insert(RWDef))
+ if (!RWSet.insert(RWDef).second)
return;
RWDefs.push_back(RWDef);
// Reads don't current have sequence records, but it can be added later.
for (ArrayRef<Record*>::const_iterator
II = InstDefs.begin(), IE = InstDefs.end(); II != IE; ++II) {
unsigned OldSCIdx = InstrClassMap[*II];
- if (OldSCIdx && RemappedClassIDs.insert(OldSCIdx)) {
+ if (OldSCIdx && RemappedClassIDs.insert(OldSCIdx).second) {
for (RecIter RI = SchedClasses[OldSCIdx].InstRWs.begin(),
RE = SchedClasses[OldSCIdx].InstRWs.end(); RI != RE; ++RI) {
if ((*RI)->getValueAsDef("SchedModel") == RWModelDef) {
for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
PE = SchedModels.procModelEnd(); PI != PE; ++PI) {
- if (!ItinsDefSet.insert(PI->ItinsDef))
+ if (!ItinsDefSet.insert(PI->ItinsDef).second)
continue;
std::vector<Record*> FUs = PI->ItinsDef->getValueAsListOfDefs("FU");
PE = SchedModels.procModelEnd(); PI != PE; ++PI, ++ProcItinListsIter) {
Record *ItinsDef = PI->ItinsDef;
- if (!ItinsDefSet.insert(ItinsDef))
+ if (!ItinsDefSet.insert(ItinsDef).second)
continue;
// Get processor itinerary name