Add more iterators compatible with range-based `for` syntax for various data structures. These iterators all assume (and some check) that the underlying data structures determining the iteration are not changed during the iteration. For example, don't use these to iterate over the predecessor edges if you are changing the order or contents of the predecessor edge list.
- BasicBlock: iterate over all blocks in the function, a subset starting not at the first block, or a specified range of blocks. Removed uses of the `foreach_block` macro. E.g.:
```
for (BasicBlock* const block : Blocks()) // all blocks in function
for (BasicBlock* const block : BasicBlockSimpleList(fgFirstBB->bbNext)) // all blocks starting at fgFirstBB->bbNext
for (BasicBlock* const testBlock : BasicBlockRangeList(firstNonLoopBlock, lastNonLoopBlock)) // all blocks in range (inclusive)
```
- block predecessors: iterate over all predecessor edges, or all predecessor blocks, e.g.:
```
for (flowList* const edge : block->PredEdges())
for (BasicBlock* const predBlock : block->PredBlocks())
```
- block successors: iterate over all block successors using the `NumSucc()/GetSucc()`, or `NumSucc(Compiler*)/GetSucc(Compiler*)` pairs, e.g.:
```
for (BasicBlock* const succ : Succs())
for (BasicBlock* const succ : Succs(compiler))
```
Note that there already exists the "AllSuccessorsIter" which iterates over block successors including possible EH successors, e.g.:
```
for (BasicBlock* succ : block->GetAllSuccs(m_pCompiler))
```
- switch targets, (namely, the successors of `BBJ_SWITCH` blocks), e.g.:
```
for (BasicBlock* const bTarget : block->SwitchTargets())
```
- loops blocks: iterate over all the blocks in a loop, e.g.:
```
for (BasicBlock* const blk : optLoopTable[loopInd].LoopBlocks())
```
- Statements: added an iterator shortcut for the non-phi statements, e.g.:
```
for (Statement* const stmt : block->NonPhiStatements())
```
Note that there already exists an iterator over all statements, e.g.:
```
for (Statement* const stmt : block->Statements())
```
- EH clauses, e.g.:
```
for (EHblkDsc* const HBtab : EHClauses(this))
```
- GenTree in linear order (but not LIR, which already has an iterator), namely, using the `gtNext` links, e.g.:
```
for (GenTree* const call : stmt->TreeList())
```
This is a no-diff change.
{
ASSERT_TP* jumpDestGen = fgAllocateTypeForEachBlk<ASSERT_TP>();
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
ASSERT_TP valueGen = BitVecOps::MakeEmpty(apTraits);
GenTree* jtrue = nullptr;
// Walk the statement trees in this basic block.
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
if (tree->gtOper == GT_JTRUE)
{
// Initially estimate the OUT sets to everything except killed expressions
// Also set the IN sets to 1, so that we can perform the intersection.
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->bbAssertionIn = BitVecOps::MakeCopy(apTraits, apValidFull);
block->bbAssertionGen = BitVecOps::MakeEmpty(apTraits);
noway_assert(optAssertionCount == 0);
// First discover all value assignments and record them in the table.
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
compCurBB = block;
}
// Perform assertion gen for control flow based assertions.
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
optAssertionGen(tree);
}
// Zero out the bbAssertionIn values, as these can be referenced in RangeCheck::MergeAssertion
// and this is sharedstate with the CSE phase: bbCseIn
//
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->bbAssertionIn = BitVecOps::MakeEmpty(apTraits);
}
AssertionPropFlowCallback ap(this, bbJtrueAssertionOut, jumpDestGen);
flow.ForwardAnalysis(ap);
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// Compute any implied non-Null assertions for block->bbAssertionIn
optImpliedByTypeOfAssertions(block->bbAssertionIn);
if (verbose)
{
printf("\n");
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
printf("\n" FMT_BB, block->bbNum);
printf(" valueIn = %s", BitVecOps::ToString(apTraits, block->bbAssertionIn));
ASSERT_TP assertions = BitVecOps::MakeEmpty(apTraits);
// Perform assertion propagation (and constant folding)
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
BitVecOps::Assign(apTraits, assertions, block->bbAssertionIn);
// Find the first block of the try.
EHblkDsc* ehblk = ehGetDsc(tryIndex);
BasicBlock* tryStart = ehblk->ebdTryBeg;
- for (flowList* tryStartPreds = tryStart->bbPreds; tryStartPreds != nullptr;
- tryStartPreds = tryStartPreds->flNext)
+ for (BasicBlock* const tryStartPredBlock : tryStart->PredBlocks())
{
- res = new (this, CMK_FlowList) flowList(tryStartPreds->getBlock(), res);
+ res = new (this, CMK_FlowList) flowList(tryStartPredBlock, res);
#if MEASURE_BLOCK_SIZE
genFlowNodeCnt += 1;
// (plus adding in any filter blocks outside the try whose exceptions are handled here).
// That doesn't work, however: funclets have caused us to sometimes split the body of a try into
// more than one sequence of contiguous blocks. We need to find a better way to do this.
- for (BasicBlock* bb = fgFirstBB; bb != nullptr; bb = bb->bbNext)
+ for (BasicBlock* const bb : Blocks())
{
if (bbInExnFlowRegions(tryIndex, bb) && !bb->isBBCallAlwaysPairTail())
{
bool BasicBlock::checkPredListOrder()
{
unsigned lastBBNum = 0;
- for (flowList* pred = bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : PredBlocks())
{
- const unsigned bbNum = pred->getBlock()->bbNum;
+ const unsigned bbNum = predBlock->bbNum;
if (bbNum <= lastBBNum)
{
assert(bbNum != lastBBNum);
// Count number or entries.
//
int count = 0;
- for (flowList* pred = bbPreds; pred != nullptr; pred = pred->flNext)
+ for (flowList* const pred : PredEdges())
{
count++;
}
// Fill in the vector from the list.
//
- for (flowList* pred = bbPreds; pred != nullptr; pred = pred->flNext)
+ for (flowList* const pred : PredEdges())
{
sortVector->push_back(pred);
}
unsigned BasicBlock::dspPreds()
{
unsigned count = 0;
- for (flowList* pred = bbPreds; pred != nullptr; pred = pred->flNext)
+ for (flowList* const pred : PredEdges())
{
if (count != 0)
{
/*****************************************************************************
*
* Display the basic block successors.
- * Returns the count of successors.
*/
-unsigned BasicBlock::dspSuccs(Compiler* compiler)
+void BasicBlock::dspSuccs(Compiler* compiler)
{
- unsigned numSuccs = NumSucc(compiler);
- unsigned count = 0;
- for (unsigned i = 0; i < numSuccs; i++)
+ bool first = true;
+ for (BasicBlock* const succ : Succs(compiler))
{
- printf("%s", (count == 0) ? "" : ",");
- printf(FMT_BB, GetSucc(i, compiler)->bbNum);
- count++;
+ printf("%s" FMT_BB, first ? "" : ",", succ->bbNum);
+ first = false;
}
- return count;
}
// Display a compact representation of the bbJumpKind, that is, where this block branches.
to->bbTgtStkDepth = from->bbTgtStkDepth;
#endif // DEBUG
- for (Statement* fromStmt : from->Statements())
+ for (Statement* const fromStmt : from->Statements())
{
auto newExpr = compiler->gtCloneExpr(fromStmt->GetRootNode(), GTF_EMPTY, varNum, varVal);
if (!newExpr)
bbFlags |= BBF_IS_LIR;
}
-bool BasicBlock::IsLIR()
+bool BasicBlock::IsLIR() const
{
assert(isValid());
const bool isLIR = ((bbFlags & BBF_IS_LIR) != 0);
//------------------------------------------------------------------------
// BasicBlock::firstNode: Returns the first node in the block.
//
-GenTree* BasicBlock::firstNode()
+GenTree* BasicBlock::firstNode() const
{
return IsLIR() ? GetFirstLIRNode() : Compiler::fgGetFirstNode(firstStmt()->GetRootNode());
}
//------------------------------------------------------------------------
// BasicBlock::lastNode: Returns the last node in the block.
//
-GenTree* BasicBlock::lastNode()
+GenTree* BasicBlock::lastNode() const
{
return IsLIR() ? m_lastNode : lastStmt()->GetRootNode();
}
// a backedge), we never want to consider it "unique" because the prolog is an
// implicit predecessor.
-BasicBlock* BasicBlock::GetUniquePred(Compiler* compiler)
+BasicBlock* BasicBlock::GetUniquePred(Compiler* compiler) const
{
if ((bbPreds == nullptr) || (bbPreds->flNext != nullptr) || (this == compiler->fgFirstBB))
{
// Return Value:
// The unique successor of a block, or nullptr if there is no unique successor.
-BasicBlock* BasicBlock::GetUniqueSucc()
+BasicBlock* BasicBlock::GetUniqueSucc() const
{
if (bbJumpKind == BBJ_ALWAYS)
{
// True if block is empty, or contains only PHI assignments,
// or contains zero or more PHI assignments followed by NOPs.
//
-bool BasicBlock::isEmpty()
+bool BasicBlock::isEmpty() const
{
if (!IsLIR())
{
- Statement* stmt = FirstNonPhiDef();
-
- while (stmt != nullptr)
+ for (Statement* const stmt : NonPhiStatements())
{
if (!stmt->GetRootNode()->OperIs(GT_NOP))
{
return false;
}
-
- stmt = stmt->GetNextStmt();
}
}
else
// Return Value:
// True if it a valid basic block.
//
-bool BasicBlock::isValid()
+bool BasicBlock::isValid() const
{
const bool isLIR = ((bbFlags & BBF_IS_LIR) != 0);
if (isLIR)
}
}
-Statement* BasicBlock::FirstNonPhiDef()
+Statement* BasicBlock::FirstNonPhiDef() const
{
Statement* stmt = firstStmt();
if (stmt == nullptr)
return stmt;
}
-Statement* BasicBlock::FirstNonPhiDefOrCatchArgAsg()
+Statement* BasicBlock::FirstNonPhiDefOrCatchArgAsg() const
{
Statement* stmt = FirstNonPhiDef();
if (stmt == nullptr)
* Can a BasicBlock be inserted after this without altering the flowgraph
*/
-bool BasicBlock::bbFallsThrough()
+bool BasicBlock::bbFallsThrough() const
{
switch (bbJumpKind)
{
-
case BBJ_THROW:
case BBJ_EHFINALLYRET:
case BBJ_EHFILTERRET:
// Return Value:
// Count of block successors.
//
-unsigned BasicBlock::NumSucc()
+unsigned BasicBlock::NumSucc() const
{
switch (bbJumpKind)
{
// Return Value:
// Requested successor block
//
-BasicBlock* BasicBlock::GetSucc(unsigned i)
+BasicBlock* BasicBlock::GetSucc(unsigned i) const
{
assert(i < NumSucc()); // Index bounds check.
switch (bbJumpKind)
}
// Returns true if the basic block ends with GT_JMP
-bool BasicBlock::endsWithJmpMethod(Compiler* comp)
+bool BasicBlock::endsWithJmpMethod(Compiler* comp) const
{
if (comp->compJmpOpUsed && (bbJumpKind == BBJ_RETURN) && (bbFlags & BBF_HAS_JMP))
{
// comp - Compiler instance
// fastTailCallsOnly - Only consider fast tail calls excluding tail calls via helper.
//
-bool BasicBlock::endsWithTailCallOrJmp(Compiler* comp, bool fastTailCallsOnly /*=false*/)
+bool BasicBlock::endsWithTailCallOrJmp(Compiler* comp, bool fastTailCallsOnly /*=false*/) const
{
GenTree* tailCall = nullptr;
bool tailCallsConvertibleToLoopOnly = false;
bool BasicBlock::endsWithTailCall(Compiler* comp,
bool fastTailCallsOnly,
bool tailCallsConvertibleToLoopOnly,
- GenTree** tailCall)
+ GenTree** tailCall) const
{
assert(!fastTailCallsOnly || !tailCallsConvertibleToLoopOnly);
*tailCall = nullptr;
// Return Value:
// true if the block ends with a tail call convertible to loop.
//
-bool BasicBlock::endsWithTailCallConvertibleToLoop(Compiler* comp, GenTree** tailCall)
+bool BasicBlock::endsWithTailCallConvertibleToLoop(Compiler* comp, GenTree** tailCall) const
{
bool fastTailCallsOnly = false;
bool tailCallsConvertibleToLoopOnly = true;
//------------------------------------------------------------------------
// isBBCallAlwaysPair: Determine if this is the first block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair
-
+//
// Return Value:
// True iff "this" is the first block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair
// -- a block corresponding to an exit from the try of a try/finally.
// "retless" BBJ_CALLFINALLY blocks due to a requirement to use the BBJ_ALWAYS for
// generating code.
//
-bool BasicBlock::isBBCallAlwaysPair()
+bool BasicBlock::isBBCallAlwaysPair() const
{
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
if (this->bbJumpKind == BBJ_CALLFINALLY)
// Notes:
// See notes on isBBCallAlwaysPair(), above.
//
-bool BasicBlock::isBBCallAlwaysPairTail()
+bool BasicBlock::isBBCallAlwaysPairTail() const
{
return (bbPrev != nullptr) && bbPrev->isBBCallAlwaysPair();
}
// this block might be entered via flow that is not represented by an edge
// in the flowgraph.
//
-bool BasicBlock::hasEHBoundaryIn()
+bool BasicBlock::hasEHBoundaryIn() const
{
bool returnVal = (bbCatchTyp != BBCT_NONE);
if (!returnVal)
// live in registers if any successor is a normal flow edge. That's because the
// EH write-thru semantics ensure that we always have an up-to-date value on the stack.
//
-bool BasicBlock::hasEHBoundaryOut()
+bool BasicBlock::hasEHBoundaryOut() const
{
bool returnVal = false;
if (bbJumpKind == BBJ_EHFILTERRET)
}
};
+// PredEdgeList: adapter class for forward iteration of the predecessor edge linked list using range-based `for`,
+// normally used via BasicBlock::PredEdges(), e.g.:
+// for (flowList* const edge : block->PredEdges()) ...
+//
+class PredEdgeList
+{
+ flowList* m_begin;
+
+ // Forward iterator for the predecessor edges linked list.
+ // The caller can't make changes to the preds list when using this.
+ //
+ class iterator
+ {
+ flowList* m_pred;
+
+#ifdef DEBUG
+ // Try to guard against the user of the iterator from making changes to the IR that would invalidate
+ // the iterator: cache the edge we think should be next, then check it when we actually do the `++`
+ // operation. This is a bit conservative, but attempts to protect against callers assuming too much about
+ // this iterator implementation.
+ flowList* m_next;
+#endif
+
+ public:
+ iterator(flowList* pred);
+
+ flowList* operator*() const
+ {
+ return m_pred;
+ }
+
+ iterator& operator++();
+
+ bool operator!=(const iterator& i) const
+ {
+ return m_pred != i.m_pred;
+ }
+ };
+
+public:
+ PredEdgeList(flowList* pred) : m_begin(pred)
+ {
+ }
+
+ iterator begin() const
+ {
+ return iterator(m_begin);
+ }
+
+ iterator end() const
+ {
+ return iterator(nullptr);
+ }
+};
+
+// PredBlockList: adapter class for forward iteration of the predecessor edge linked list yielding
+// predecessor blocks, using range-based `for`, normally used via BasicBlock::PredBlocks(), e.g.:
+// for (BasicBlock* const predBlock : block->PredBlocks()) ...
+//
+class PredBlockList
+{
+ flowList* m_begin;
+
+ // Forward iterator for the predecessor edges linked list, yielding the predecessor block, not the edge.
+ // The caller can't make changes to the preds list when using this.
+ //
+ class iterator
+ {
+ flowList* m_pred;
+
+#ifdef DEBUG
+ // Try to guard against the user of the iterator from making changes to the IR that would invalidate
+ // the iterator: cache the edge we think should be next, then check it when we actually do the `++`
+ // operation. This is a bit conservative, but attempts to protect against callers assuming too much about
+ // this iterator implementation.
+ flowList* m_next;
+#endif
+
+ public:
+ iterator(flowList* pred);
+
+ BasicBlock* operator*() const;
+
+ iterator& operator++();
+
+ bool operator!=(const iterator& i) const
+ {
+ return m_pred != i.m_pred;
+ }
+ };
+
+public:
+ PredBlockList(flowList* pred) : m_begin(pred)
+ {
+ }
+
+ iterator begin() const
+ {
+ return iterator(m_begin);
+ }
+
+ iterator end() const
+ {
+ return iterator(nullptr);
+ }
+};
+
+// BBArrayIterator: forward iterator for an array of BasicBlock*, such as the BBswtDesc->bbsDstTab.
+// It is an error (with assert) to yield a nullptr BasicBlock* in this array.
+// `m_bbEntry` can be nullptr, but it only makes sense if both the begin and end of an iteration range are nullptr
+// (meaning, no actual iteration will happen).
+//
+class BBArrayIterator
+{
+ BasicBlock* const* m_bbEntry;
+
+public:
+ BBArrayIterator(BasicBlock* const* bbEntry) : m_bbEntry(bbEntry)
+ {
+ }
+
+ BasicBlock* operator*() const
+ {
+ assert(m_bbEntry != nullptr);
+ BasicBlock* bTarget = *m_bbEntry;
+ assert(bTarget != nullptr);
+ return bTarget;
+ }
+
+ BBArrayIterator& operator++()
+ {
+ assert(m_bbEntry != nullptr);
+ ++m_bbEntry;
+ return *this;
+ }
+
+ bool operator!=(const BBArrayIterator& i) const
+ {
+ return m_bbEntry != i.m_bbEntry;
+ }
+};
+
+// BBSwitchTargetList: adapter class for forward iteration of switch targets, using range-based `for`,
+// normally used via BasicBlock::SwitchTargets(), e.g.:
+// for (BasicBlock* const target : block->SwitchTargets()) ...
+//
+class BBSwitchTargetList
+{
+ BBswtDesc* m_bbsDesc;
+
+public:
+ BBSwitchTargetList(BBswtDesc* bbsDesc);
+ BBArrayIterator begin() const;
+ BBArrayIterator end() const;
+};
+
//------------------------------------------------------------------------
// BasicBlockFlags: a bitmask of flags for BasicBlock
//
}
#ifdef DEBUG
- void dspFlags(); // Print the flags
- unsigned dspCheapPreds(); // Print the predecessors (bbCheapPreds)
- unsigned dspPreds(); // Print the predecessors (bbPreds)
- unsigned dspSuccs(Compiler* compiler); // Print the successors. The 'compiler' argument determines whether EH
- // regions are printed: see NumSucc() for details.
- void dspJumpKind(); // Print the block jump kind (e.g., BBJ_NONE, BBJ_COND, etc.).
+ void dspFlags(); // Print the flags
+ unsigned dspCheapPreds(); // Print the predecessors (bbCheapPreds)
+ unsigned dspPreds(); // Print the predecessors (bbPreds)
+ void dspSuccs(Compiler* compiler); // Print the successors. The 'compiler' argument determines whether EH
+ // regions are printed: see NumSucc() for details.
+ void dspJumpKind(); // Print the block jump kind (e.g., BBJ_NONE, BBJ_COND, etc.).
// Print a simple basic block header for various output, including a list of predecessors and successors.
void dspBlockHeader(Compiler* compiler, bool showKind = true, bool showFlags = false, bool showPreds = true);
// Returns "true" if the block is empty. Empty here means there are no statement
// trees *except* PHI definitions.
- bool isEmpty();
+ bool isEmpty() const;
- bool isValid();
+ bool isValid() const;
// Returns "true" iff "this" is the first block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair --
// a block corresponding to an exit from the try of a try/finally.
- bool isBBCallAlwaysPair();
+ bool isBBCallAlwaysPair() const;
+
// Returns "true" iff "this" is the last block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair --
// a block corresponding to an exit from the try of a try/finally.
- bool isBBCallAlwaysPairTail();
+ bool isBBCallAlwaysPairTail() const;
BBjumpKinds bbJumpKind; // jump (if any) at the end of this block
// Note that for BBJ_COND, which has two successors (fall through and condition true branch target),
// only the unique targets are returned. Thus, if both targets are the same, NumSucc() will only return 1
// instead of 2.
-
+ //
// NumSucc: Returns the number of successors of "this".
- unsigned NumSucc();
+ unsigned NumSucc() const;
unsigned NumSucc(Compiler* comp);
// GetSucc: Returns the "i"th successor. Requires (0 <= i < NumSucc()).
- BasicBlock* GetSucc(unsigned i);
+ BasicBlock* GetSucc(unsigned i) const;
BasicBlock* GetSucc(unsigned i, Compiler* comp);
- BasicBlock* GetUniquePred(Compiler* comp);
+ // SwitchTargets: convenience methods for enabling range-based `for` iteration over a switch block's targets, e.g.:
+ // for (BasicBlock* const bTarget : block->SwitchTargets()) ...
+ //
+ BBSwitchTargetList SwitchTargets() const
+ {
+ assert(bbJumpKind == BBJ_SWITCH);
+ return BBSwitchTargetList(bbJumpSwt);
+ }
+
+ BasicBlock* GetUniquePred(Compiler* comp) const;
- BasicBlock* GetUniqueSucc();
+ BasicBlock* GetUniqueSucc() const;
unsigned countOfInEdges() const
{
return sameTryRegion(blk1, blk2) && sameHndRegion(blk1, blk2);
}
- bool hasEHBoundaryIn();
- bool hasEHBoundaryOut();
+ bool hasEHBoundaryIn() const;
+ bool hasEHBoundaryOut() const;
// Some non-zero value that will not collide with real tokens for bbCatchTyp
#define BBCT_NONE 0x00000000
flowList* bbPreds; // ptr to list of predecessors
};
+ // PredEdges: convenience method for enabling range-based `for` iteration over predecessor edges, e.g.:
+ // for (flowList* const edge : block->PredEdges()) ...
+ //
+ PredEdgeList PredEdges() const
+ {
+ return PredEdgeList(bbPreds);
+ }
+
+ // PredBlocks: convenience method for enabling range-based `for` iteration over predecessor blocks, e.g.:
+ // for (BasicBlock* const predBlock : block->PredBlocks()) ...
+ //
+ PredBlockList PredBlocks() const
+ {
+ return PredBlockList(bbPreds);
+ }
+
// Pred list maintenance
//
bool checkPredListOrder();
static size_t s_Count;
#endif // MEASURE_BLOCK_SIZE
- bool bbFallsThrough();
+ bool bbFallsThrough() const;
// Our slop fraction is 1/128 of the block weight rounded off
static weight_t GetSlopFraction(weight_t weightBlk)
unsigned bbID;
#endif // DEBUG
- ThisInitState bbThisOnEntry();
- unsigned bbStackDepthOnEntry();
+ ThisInitState bbThisOnEntry() const;
+ unsigned bbStackDepthOnEntry() const;
void bbSetStack(void* stackBuffer);
- StackEntry* bbStackOnEntry();
+ StackEntry* bbStackOnEntry() const;
// "bbNum" is one-based (for unknown reasons); it is sometimes useful to have the corresponding
// zero-based number for use as an array index.
- unsigned bbInd()
+ unsigned bbInd() const
{
assert(bbNum > 0);
return bbNum - 1;
Statement* firstStmt() const;
Statement* lastStmt() const;
+ // Statements: convenience method for enabling range-based `for` iteration over the statement list, e.g.:
+ // for (Statement* const stmt : block->Statements())
+ //
StatementList Statements() const
{
return StatementList(firstStmt());
}
- GenTree* firstNode();
- GenTree* lastNode();
+ // NonPhiStatements: convenience method for enabling range-based `for` iteration over the statement list,
+ // excluding any initial PHI statements, e.g.:
+ // for (Statement* const stmt : block->NonPhiStatements())
+ //
+ StatementList NonPhiStatements() const
+ {
+ return StatementList(FirstNonPhiDef());
+ }
+
+ GenTree* firstNode() const;
+ GenTree* lastNode() const;
- bool endsWithJmpMethod(Compiler* comp);
+ bool endsWithJmpMethod(Compiler* comp) const;
bool endsWithTailCall(Compiler* comp,
bool fastTailCallsOnly,
bool tailCallsConvertibleToLoopOnly,
- GenTree** tailCall);
+ GenTree** tailCall) const;
- bool endsWithTailCallOrJmp(Compiler* comp, bool fastTailCallsOnly = false);
+ bool endsWithTailCallOrJmp(Compiler* comp, bool fastTailCallsOnly = false) const;
- bool endsWithTailCallConvertibleToLoop(Compiler* comp, GenTree** tailCall);
+ bool endsWithTailCallConvertibleToLoop(Compiler* comp, GenTree** tailCall) const;
// Returns the first statement in the statement list of "this" that is
// not an SSA definition (a lcl = phi(...) assignment).
- Statement* FirstNonPhiDef();
- Statement* FirstNonPhiDefOrCatchArgAsg();
+ Statement* FirstNonPhiDef() const;
+ Statement* FirstNonPhiDefOrCatchArgAsg() const;
BasicBlock() : bbStmtList(nullptr), bbLiveIn(VarSetOps::UninitVal()), bbLiveOut(VarSetOps::UninitVal())
{
return Successors<AllSuccessorIterPosition>(comp, this);
}
+ // BBSuccList: adapter class for forward iteration of block successors, using range-based `for`,
+ // normally used via BasicBlock::Succs(), e.g.:
+ // for (BasicBlock* const target : block->Succs()) ...
+ //
+ class BBSuccList
+ {
+ // For one or two successors, pre-compute and stash the successors inline, in m_succs[], so we don't
+ // need to call a function or execute another `switch` to get them. Also, pre-compute the begin and end
+ // points of the iteration, for use by BBArrayIterator. `m_begin` and `m_end` will either point at
+ // `m_succs` or at the switch table successor array.
+ BasicBlock* m_succs[2];
+ BasicBlock* const* m_begin;
+ BasicBlock* const* m_end;
+
+ public:
+ BBSuccList(const BasicBlock* block);
+ BBArrayIterator begin() const;
+ BBArrayIterator end() const;
+ };
+
+ // BBCompilerSuccList: adapter class for forward iteration of block successors, using range-based `for`,
+ // normally used via BasicBlock::Succs(), e.g.:
+ // for (BasicBlock* const target : block->Succs(compiler)) ...
+ //
+ // This version uses NumSucc(Compiler*)/GetSucc(Compiler*). See the documentation there for the explanation
+ // of the implications of this versus the version that does not take `Compiler*`.
+ class BBCompilerSuccList
+ {
+ Compiler* m_comp;
+ BasicBlock* m_block;
+
+ // iterator: forward iterator for an array of BasicBlock*, such as the BBswtDesc->bbsDstTab.
+ //
+ class iterator
+ {
+ Compiler* m_comp;
+ BasicBlock* m_block;
+ unsigned m_succNum;
+
+ public:
+ iterator(Compiler* comp, BasicBlock* block, unsigned succNum)
+ : m_comp(comp), m_block(block), m_succNum(succNum)
+ {
+ }
+
+ BasicBlock* operator*() const
+ {
+ assert(m_block != nullptr);
+ BasicBlock* bTarget = m_block->GetSucc(m_succNum, m_comp);
+ assert(bTarget != nullptr);
+ return bTarget;
+ }
+
+ iterator& operator++()
+ {
+ ++m_succNum;
+ return *this;
+ }
+
+ bool operator!=(const iterator& i) const
+ {
+ return m_succNum != i.m_succNum;
+ }
+ };
+
+ public:
+ BBCompilerSuccList(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block)
+ {
+ }
+
+ iterator begin() const
+ {
+ return iterator(m_comp, m_block, 0);
+ }
+
+ iterator end() const
+ {
+ return iterator(m_comp, m_block, m_block->NumSucc(m_comp));
+ }
+ };
+
+ // Succs: convenience methods for enabling range-based `for` iteration over a block's successors, e.g.:
+ // for (BasicBlock* const succ : block->Succs()) ...
+ //
+ // There are two options: one that takes a Compiler* and one that doesn't. These correspond to the
+ // NumSucc()/GetSucc() functions that do or do not take a Compiler*. See the comment for NumSucc()/GetSucc()
+ // for the distinction.
+ BBSuccList Succs() const
+ {
+ return BBSuccList(this);
+ }
+
+ BBCompilerSuccList Succs(Compiler* comp)
+ {
+ return BBCompilerSuccList(comp, this);
+ }
+
// Try to clone block state and statements from `from` block to `to` block (which must be new/empty),
// optionally replacing uses of local `varNum` with IntCns `varVal`. Return true if all statements
// in the block are cloned successfully, false (with partially-populated `to` block) if one fails.
Compiler* compiler, BasicBlock* to, const BasicBlock* from, unsigned varNum = (unsigned)-1, int varVal = 0);
void MakeLIR(GenTree* firstNode, GenTree* lastNode);
- bool IsLIR();
+ bool IsLIR() const;
void SetDominatedByExceptionalEntryFlag()
{
bbFlags |= BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY;
}
- bool IsDominatedByExceptionalEntryFlag()
+ bool IsDominatedByExceptionalEntryFlag() const
{
return (bbFlags & BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY) != 0;
}
#ifdef DEBUG
- bool Contains(const GenTree* node)
+ bool Contains(const GenTree* node) const
{
assert(IsLIR());
for (Iterator iter = begin(); iter != end(); ++iter)
// Map from Block to Block. Used for a variety of purposes.
typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, BasicBlock*> BlockToBlockMap;
+// BasicBlockIterator: forward iterator for the BasicBlock linked list.
+// It is allowed to make changes to the BasicBlock list as long as the current block remains in the list.
+// E.g., the current block `m_bbNext` pointer can be altered (such as when inserting a following block),
+// as long as the current block is still in the list.
+// The block list is expected to be properly doubly-linked.
+//
+class BasicBlockIterator
+{
+ BasicBlock* m_block;
+
+public:
+ BasicBlockIterator(BasicBlock* block) : m_block(block)
+ {
+ }
+
+ BasicBlock* operator*() const
+ {
+ return m_block;
+ }
+
+ BasicBlockIterator& operator++()
+ {
+ assert(m_block != nullptr);
+ // Check that we haven't been spliced out of the list.
+ assert((m_block->bbNext == nullptr) || (m_block->bbNext->bbPrev == m_block));
+ assert((m_block->bbPrev == nullptr) || (m_block->bbPrev->bbNext == m_block));
+
+ m_block = m_block->bbNext;
+ return *this;
+ }
+
+ bool operator!=(const BasicBlockIterator& i) const
+ {
+ return m_block != i.m_block;
+ }
+};
+
+// BasicBlockSimpleList: adapter class for forward iteration of a lexically contiguous range of
+// BasicBlock, starting at `begin` and going to the end of the function, using range-based `for`,
+// normally used via Compiler::Blocks(), e.g.:
+// for (BasicBlock* const block : Blocks()) ...
+//
+class BasicBlockSimpleList
+{
+ BasicBlock* m_begin;
+
+public:
+ BasicBlockSimpleList(BasicBlock* begin) : m_begin(begin)
+ {
+ }
+
+ BasicBlockIterator begin() const
+ {
+ return BasicBlockIterator(m_begin);
+ }
+
+ BasicBlockIterator end() const
+ {
+ return BasicBlockIterator(nullptr);
+ }
+};
+
+// BasicBlockRangeList: adapter class for forward iteration of a lexically contiguous range of
+// BasicBlock specified with both `begin` and `end` blocks. `begin` and `end` are *inclusive*
+// and must be non-null. E.g.,
+// for (BasicBlock* const block : BasicBlockRangeList(startBlock, endBlock)) ...
+//
+class BasicBlockRangeList
+{
+ BasicBlock* m_begin;
+ BasicBlock* m_end;
+
+public:
+ BasicBlockRangeList(BasicBlock* begin, BasicBlock* end) : m_begin(begin), m_end(end)
+ {
+ assert(begin != nullptr);
+ assert(end != nullptr);
+ }
+
+ BasicBlockIterator begin() const
+ {
+ return BasicBlockIterator(m_begin);
+ }
+
+ BasicBlockIterator end() const
+ {
+ return BasicBlockIterator(m_end->bbNext); // walk until we see the block *following* the `m_end` block
+ }
+};
+
// BBswtDesc -- descriptor for a switch block
//
// Things to know:
}
};
+// BBSwitchTargetList out-of-class-declaration implementations (here due to C++ ordering requirements).
+//
+
+inline BBSwitchTargetList::BBSwitchTargetList(BBswtDesc* bbsDesc) : m_bbsDesc(bbsDesc)
+{
+ assert(m_bbsDesc != nullptr);
+ assert(m_bbsDesc->bbsDstTab != nullptr);
+}
+
+inline BBArrayIterator BBSwitchTargetList::begin() const
+{
+ return BBArrayIterator(m_bbsDesc->bbsDstTab);
+}
+
+inline BBArrayIterator BBSwitchTargetList::end() const
+{
+ return BBArrayIterator(m_bbsDesc->bbsDstTab + m_bbsDesc->bbsCount);
+}
+
+// BBSuccList out-of-class-declaration implementations
+//
+inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block)
+{
+ assert(block != nullptr);
+ switch (block->bbJumpKind)
+ {
+ case BBJ_THROW:
+ case BBJ_RETURN:
+ case BBJ_EHFINALLYRET:
+ case BBJ_EHFILTERRET:
+ // We don't need m_succs.
+ m_begin = nullptr;
+ m_end = nullptr;
+ break;
+
+ case BBJ_CALLFINALLY:
+ case BBJ_ALWAYS:
+ case BBJ_EHCATCHRET:
+ case BBJ_LEAVE:
+ m_succs[0] = block->bbJumpDest;
+ m_begin = &m_succs[0];
+ m_end = &m_succs[1];
+ break;
+
+ case BBJ_NONE:
+ m_succs[0] = block->bbNext;
+ m_begin = &m_succs[0];
+ m_end = &m_succs[1];
+ break;
+
+ case BBJ_COND:
+ m_succs[0] = block->bbNext;
+ m_begin = &m_succs[0];
+
+ // If both fall-through and branch successors are identical, then only include
+ // them once in the iteration (this is the same behavior as NumSucc()/GetSucc()).
+ if (block->bbJumpDest == block->bbNext)
+ {
+ m_end = &m_succs[1];
+ }
+ else
+ {
+ m_succs[1] = block->bbJumpDest;
+ m_end = &m_succs[2];
+ }
+ break;
+
+ case BBJ_SWITCH:
+ // We don't use the m_succs in-line data for switches; use the existing jump table in the block.
+ assert(block->bbJumpSwt != nullptr);
+ assert(block->bbJumpSwt->bbsDstTab != nullptr);
+ m_begin = block->bbJumpSwt->bbsDstTab;
+ m_end = block->bbJumpSwt->bbsDstTab + block->bbJumpSwt->bbsCount;
+ break;
+
+ default:
+ unreached();
+ }
+
+ assert(m_end >= m_begin);
+}
+
+inline BBArrayIterator BasicBlock::BBSuccList::begin() const
+{
+ return BBArrayIterator(m_begin);
+}
+
+inline BBArrayIterator BasicBlock::BBSuccList::end() const
+{
+ return BBArrayIterator(m_end);
+}
+
// In compiler terminology the control flow between two BasicBlocks
// is typically referred to as an "edge". Most well known are the
// backward branches for loops, which are often called "back-edges".
}
};
+// Pred list iterator implementations (that are required to be defined after the declaration of BasicBlock and flowList)
+
+inline PredEdgeList::iterator::iterator(flowList* pred) : m_pred(pred)
+{
+#ifdef DEBUG
+ m_next = (m_pred == nullptr) ? nullptr : m_pred->flNext;
+#endif
+}
+
+inline PredEdgeList::iterator& PredEdgeList::iterator::operator++()
+{
+ flowList* next = m_pred->flNext;
+
+#ifdef DEBUG
+ // Check that the next block is the one we expect to see.
+ assert(next == m_next);
+ m_next = (next == nullptr) ? nullptr : next->flNext;
+#endif // DEBUG
+
+ m_pred = next;
+ return *this;
+}
+
+inline PredBlockList::iterator::iterator(flowList* pred) : m_pred(pred)
+{
+#ifdef DEBUG
+ m_next = (m_pred == nullptr) ? nullptr : m_pred->flNext;
+#endif
+}
+
+inline BasicBlock* PredBlockList::iterator::operator*() const
+{
+ return m_pred->getBlock();
+}
+
+inline PredBlockList::iterator& PredBlockList::iterator::operator++()
+{
+ flowList* next = m_pred->flNext;
+
+#ifdef DEBUG
+ // Check that the next block is the one we expect to see.
+ assert(next == m_next);
+ m_next = (next == nullptr) ? nullptr : next->flNext;
+#endif // DEBUG
+
+ m_pred = next;
+ return *this;
+}
+
// This enum represents a pre/post-visit action state to emulate a depth-first
// spanning tree traversal of a tree or graph.
enum DfsStackState
#ifdef DEBUG
// No label flags should be set before this.
- for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : compiler->Blocks())
{
assert((block->bbFlags & BBF_HAS_LABEL) == 0);
}
compiler->fgFirstBB->bbFlags |= BBF_HAS_LABEL;
}
- for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : compiler->Blocks())
{
switch (block->bbJumpKind)
{
break;
case BBJ_SWITCH:
- unsigned jumpCnt;
- jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jumpTab;
- jumpTab = block->bbJumpSwt->bbsDstTab;
- do
+ for (BasicBlock* const bTarget : block->SwitchTargets())
{
- JITDUMP(" " FMT_BB " : branch target\n", (*jumpTab)->bbNum);
- (*jumpTab)->bbFlags |= BBF_HAS_LABEL;
- } while (++jumpTab, --jumpCnt);
+ JITDUMP(" " FMT_BB " : branch target\n", bTarget->bbNum);
+ bTarget->bbFlags |= BBF_HAS_LABEL;
+ }
break;
case BBJ_CALLFINALLY:
add->acdDstBlk->bbFlags |= BBF_HAS_LABEL;
}
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
-
- for (HBtab = compiler->compHndBBtab, HBtabEnd = compiler->compHndBBtab + compiler->compHndBBtabCount;
- HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(compiler))
{
HBtab->ebdTryBeg->bbFlags |= BBF_HAS_LABEL;
HBtab->ebdHndBeg->bbFlags |= BBF_HAS_LABEL;
}
#endif // DEBUG
- unsigned XTnum;
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
+ unsigned XTnum;
bool isCoreRTABI = compiler->IsTargetAbi(CORINFO_CORERT_ABI);
// clauses. If there aren't, we don't need to look for BBJ_CALLFINALLY.
bool anyFinallys = false;
- for (HBtab = compiler->compHndBBtab, HBtabEnd = compiler->compHndBBtab + compiler->compHndBBtabCount;
- HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(compiler))
{
if (HBtab->HasFinallyHandler())
{
}
if (anyFinallys)
{
- for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbJumpKind == BBJ_CALLFINALLY)
{
XTnum = 0; // This is the index we pass to the VM
- for (HBtab = compiler->compHndBBtab, HBtabEnd = compiler->compHndBBtab + compiler->compHndBBtabCount;
- HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(compiler))
{
UNATIVE_OFFSET tryBeg, tryEnd, hndBeg, hndEnd, hndTyp;
if (duplicateClauseCount > 0)
{
- unsigned reportedDuplicateClauseCount = 0; // How many duplicated clauses have we reported?
- unsigned XTnum2;
+ unsigned reportedDuplicateClauseCount = 0; // How many duplicated clauses have we reported?
+ unsigned XTnum2;
+ EHblkDsc* HBtab;
for (XTnum2 = 0, HBtab = compiler->compHndBBtab; XTnum2 < compiler->compHndBBtabCount; XTnum2++, HBtab++)
{
unsigned enclosingTryIndex;
if (clonedFinallyCount > 0)
{
unsigned reportedClonedFinallyCount = 0;
- for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbJumpKind == BBJ_CALLFINALLY)
{
//
//It turns out that the only thing we really have to assert is that the first statement in each basic
//block has an IL offset and appears in eeBoundaries.
- for (BasicBlock * block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : compiler->Blocks())
{
Statement* stmt = block->firstStmt();
if ((block->bbRefs > 1) && (stmt != nullptr))
{
compSizeEstimate = 0;
compCycleEstimate = 0;
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
compSizeEstimate += stmt->GetCostSz();
compCycleEstimate += stmt->GetCostEx();
fgSsaPassesCompleted = 0;
fgVNPassesCompleted = 0;
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
tree->ClearVN();
tree->ClearAssertion();
// Recompute reachability sets, dominators, and loops.
optLoopCount = 0;
fgDomsComputed = false;
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->bbFlags &= ~BBF_LOOP_FLAGS;
}
// Otherwise, iterate.
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (Statement* stmt = block->FirstNonPhiDef(); stmt != nullptr; stmt = stmt->GetNextStmt())
+ for (Statement* const stmt : block->NonPhiStatements())
{
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
TestLabelAndNum tlAndN;
assert(fgStmtListThreaded);
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
- for (GenTree* call = stmt->GetTreeList(); call != nullptr; call = call->gtNext)
+ for (GenTree* const call : stmt->TreeList())
{
if (call->gtOper != GT_CALL)
continue;
- argNum =
-
- regArgNum = regArgDeferred = regArgTemp =
-
- regArgConst = regArgLclVar =
-
- argDWordNum = argLngNum = argFltNum = argDblNum = 0;
+ argNum = regArgNum = regArgDeferred = regArgTemp = regArgConst = regArgLclVar = argDWordNum =
+ argLngNum = argFltNum = argDblNum = 0;
argTotalCalls++;
GenTree* dFindTree(unsigned id)
{
- Compiler* comp = JitTls::GetCompiler();
- BasicBlock* block;
- GenTree* tree;
+ Compiler* comp = JitTls::GetCompiler();
+ GenTree* tree;
dbTreeBlock = nullptr;
dbTree = nullptr;
- for (block = comp->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : comp->Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
tree = dFindTree(stmt->GetRootNode(), id);
if (tree != nullptr)
Statement* dFindStmt(unsigned id)
{
- Compiler* comp = JitTls::GetCompiler();
- BasicBlock* block;
+ Compiler* comp = JitTls::GetCompiler();
dbStmt = nullptr;
unsigned stmtId = 0;
- for (block = comp->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : comp->Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
stmtId++;
if (stmtId == id)
void fgInterBlockLocalVarLiveness();
+ // Blocks: convenience methods for enabling range-based `for` iteration over the function's blocks, e.g.:
+ // 1. for (BasicBlock* const block : compiler->Blocks()) ...
+ // 2. for (BasicBlock* const block : compiler->Blocks(startBlock)) ...
+ // 3. for (BasicBlock* const block : compiler->Blocks(startBlock, endBlock)) ...
+ // In case (1), the block list can be empty. In case (2), `startBlock` can be nullptr. In case (3),
+ // both `startBlock` and `endBlock` must be non-null.
+ //
+ BasicBlockSimpleList Blocks() const
+ {
+ return BasicBlockSimpleList(fgFirstBB);
+ }
+
+ BasicBlockSimpleList Blocks(BasicBlock* startBlock) const
+ {
+ return BasicBlockSimpleList(startBlock);
+ }
+
+ BasicBlockRangeList Blocks(BasicBlock* startBlock, BasicBlock* endBlock) const
+ {
+ return BasicBlockRangeList(startBlock, endBlock);
+ }
+
// The presence of a partial definition presents some difficulties for SSA: this is both a use of some SSA name
// of "x", and a def of a new SSA name for "x". The tree only has one local variable for "x", so it has to choose
// whether to treat that as the use or def. It chooses the "use", and thus the old SSA name. This map allows us
lpEntry->bbNum <= lpBottom->bbNum &&
(lpHead->bbNum < lpTop->bbNum || lpHead->bbNum > lpBottom->bbNum);
}
+
+ // LoopBlocks: convenience method for enabling range-based `for` iteration over all the
+ // blocks in a loop, e.g.:
+ // for (BasicBlock* const block : loop->LoopBlocks()) ...
+ // Currently, the loop blocks are expected to be in linear, lexical, `bbNext` order
+ // from `lpFirst` through `lpBottom`, inclusive. All blocks in this range are considered
+ // to be part of the loop.
+ //
+ BasicBlockRangeList LoopBlocks() const
+ {
+ return BasicBlockRangeList(lpFirst, lpBottom);
+ }
};
protected:
}
};
+// EHClauses: adapter class for forward iteration of the exception handling table using range-based `for`, e.g.:
+// for (EHblkDsc* const ehDsc : EHClauses(compiler))
+//
+class EHClauses
+{
+ EHblkDsc* m_begin;
+ EHblkDsc* m_end;
+
+ // Forward iterator for the exception handling table entries. Iteration is in table order.
+ //
+ class iterator
+ {
+ EHblkDsc* m_ehDsc;
+
+ public:
+ iterator(EHblkDsc* ehDsc) : m_ehDsc(ehDsc)
+ {
+ }
+
+ EHblkDsc* operator*() const
+ {
+ return m_ehDsc;
+ }
+
+ iterator& operator++()
+ {
+ ++m_ehDsc;
+ return *this;
+ }
+
+ bool operator!=(const iterator& i) const
+ {
+ return m_ehDsc != i.m_ehDsc;
+ }
+ };
+
+public:
+ EHClauses(Compiler* comp) : m_begin(comp->compHndBBtab), m_end(comp->compHndBBtab + comp->compHndBBtabCount)
+ {
+ assert((m_begin != nullptr) || (m_begin == m_end));
+ }
+
+ iterator begin() const
+ {
+ return iterator(m_begin);
+ }
+
+ iterator end() const
+ {
+ return iterator(m_end);
+ }
+};
+
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
/*****************************************************************************/
-// foreach_block: An iterator over all blocks in the function.
-// __compiler: the Compiler* object
-// __block : a BasicBlock*, already declared, that gets updated each iteration.
-
-#define foreach_block(__compiler, __block) \
- for ((__block) = (__compiler)->fgFirstBB; (__block); (__block) = (__block)->bbNext)
-
-/*****************************************************************************/
-/*****************************************************************************/
-
#ifdef DEBUG
void dumpConvertedVarSet(Compiler* comp, VARSET_VALARG_TP vars);
#endif // DEBUG
assert(fgStmtListThreaded);
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
if (tree->gtOper != GT_LCL_VAR)
{
*/
void Compiler::optBlockCopyPropPopStacks(BasicBlock* block, LclNumToGenTreePtrStack* curSsaName)
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
if (!tree->IsLocal())
{
// There are no definitions at the start of the block. So clear it.
compCurLifeTree = nullptr;
VarSetOps::Assign(this, compCurLife, block->bbLiveIn);
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
VarSetOps::ClearD(this, optCopyPropKillSet);
// Walk the tree to find if any local variable can be replaced with current live definitions.
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
treeLifeUpdater.UpdateLife(tree);
}
// This logic must be in sync with SSA renaming process.
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
const unsigned lclNum = optIsSsaLocal(tree);
if (lclNum == BAD_VAR_NUM)
// See the article on hints in MSDN for more information on their necessity and use:
// http://msdn.microsoft.com/en-us/library/dd997977.aspx
-#define foreach_block(__compiler, __block) for (;;)
-
#define FOREACH_REGISTER_FILE(file) for (;;)
// From jit.h
assert(fgSsaPassesCompleted == 1);
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
#ifndef DEBUG
if (!optDoEarlyPropForBlock(block))
noway_assert(newSwitchBlock != nullptr);
noway_assert(oldSwitchBlock->bbJumpKind == BBJ_SWITCH);
- unsigned jumpCnt = oldSwitchBlock->bbJumpSwt->bbsCount;
- BasicBlock** jumpTab = oldSwitchBlock->bbJumpSwt->bbsDstTab;
-
- unsigned i;
-
// Walk the switch's jump table, updating the predecessor for each branch.
- for (i = 0; i < jumpCnt; i++)
+ for (BasicBlock* const bJump : oldSwitchBlock->SwitchTargets())
{
- BasicBlock* bJump = jumpTab[i];
noway_assert(bJump != nullptr);
// Note that if there are duplicate branch targets in the switch jump table,
bool modified = false;
- for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (flowList* const pred : block->PredEdges())
{
if (oldPred == pred->getBlock())
{
void Compiler::fgInitBBLookup()
{
BasicBlock** dscBBptr;
- BasicBlock* tmpBBdesc;
/* Allocate the basic block table */
/* Walk all the basic blocks, filling in the table */
- for (tmpBBdesc = fgFirstBB; tmpBBdesc; tmpBBdesc = tmpBBdesc->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- *dscBBptr++ = tmpBBdesc;
+ *dscBBptr++ = block;
}
noway_assert(dscBBptr == fgBBs + fgBBcount);
{
noway_assert(targetBlock->bbNum <= sourceBlock->bbNum);
- for (BasicBlock* block = targetBlock; block != sourceBlock->bbNext; block = block->bbNext)
+ for (BasicBlock* const block : Blocks(targetBlock, sourceBlock))
{
if (((block->bbFlags & BBF_BACKWARD_JUMP) == 0) && (block->bbJumpKind != BBJ_RETURN))
{
/* Walk all the basic blocks, filling in the target addresses */
- for (BasicBlock* curBBdesc = fgFirstBB; curBBdesc; curBBdesc = curBBdesc->bbNext)
+ for (BasicBlock* const curBBdesc : Blocks())
{
switch (curBBdesc->bbJumpKind)
{
#if !defined(FEATURE_EH_FUNCLETS)
- EHblkDsc* HBtabEnd;
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
if (ehMaxHndNestingCount <= HBtab->ebdHandlerNestingLevel)
ehMaxHndNestingCount = HBtab->ebdHandlerNestingLevel + 1;
EHblkDsc* HBtab;
- for (BasicBlock* blk = fgFirstBB; blk; blk = blk->bbNext)
+ for (BasicBlock* const blk : Blocks())
{
if (blk->bbFlags & BBF_INTERNAL)
{
break;
case BBJ_SWITCH: // block ends with a switch statement
-
- BBswtDesc* swtDesc;
- swtDesc = blk->bbJumpSwt;
-
- assert(swtDesc);
-
- unsigned i;
- for (i = 0; i < swtDesc->bbsCount; i++)
+ for (BasicBlock* const bTarget : blk->SwitchTargets())
{
- fgControlFlowPermitted(blk, swtDesc->bbsDstTab[i]);
+ fgControlFlowPermitted(blk, bTarget);
}
-
break;
case BBJ_EHCATCHRET: // block ends with a leave out of a catch (only #if defined(FEATURE_EH_FUNCLETS))
// could have a similar function for LIR that searches for GT_IL_OFFSET nodes.
assert(!block->IsLIR());
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
if (stmt->GetILOffsetX() != BAD_IL_OFFSET)
{
// Without these arcs, a block 'b' may not be a member of succs(preds(b))
if (curr->bbJumpKind != BBJ_SWITCH)
{
- unsigned numSuccs = curr->NumSucc(this);
- for (unsigned i = 0; i < numSuccs; i++)
+ for (BasicBlock* const succ : curr->Succs(this))
{
- BasicBlock* succ = curr->GetSucc(i, this);
if (succ != newBlock)
{
JITDUMP(FMT_BB " previous predecessor was " FMT_BB ", now is " FMT_BB "\n", succ->bbNum, curr->bbNum,
fgRemoveRefPred(succBlock, block);
- for (flowList* pred = block->bbPreds; pred; pred = pred->flNext)
+ for (flowList* const pred : block->PredEdges())
{
BasicBlock* predBlock = pred->getBlock();
//
if (renumbered && fgComputePredsDone)
{
- for (block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->ensurePredListOrder(this);
}
BitVecTraits blockVecTraits(fgBBNumMax + 1, this);
BitVec blocksSeen(BitVecOps::MakeEmpty(&blockVecTraits));
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
BitVecOps::AddElemD(&blockVecTraits, blocksSeen, block->bbNum);
#ifdef DEBUG
void Compiler::fgPrintEdgeWeights()
{
- BasicBlock* bSrc;
- BasicBlock* bDst;
- flowList* edge;
-
// Print out all of the edge weights
- for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext)
+ for (BasicBlock* const bDst : Blocks())
{
if (bDst->bbPreds != nullptr)
{
printf(" Edge weights into " FMT_BB " :", bDst->bbNum);
- for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext)
+ for (flowList* const edge : bDst->PredEdges())
{
- bSrc = edge->getBlock();
+ BasicBlock* bSrc = edge->getBlock();
// This is the control flow edge (bSrc -> bDst)
printf(FMT_BB " ", bSrc->bbNum);
unsigned blockOrdinal = 1;
unsigned* blkMap = new (this, CMK_DebugOnly) unsigned[blkMapSize];
memset(blkMap, 0, sizeof(unsigned) * blkMapSize);
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
assert(block->bbNum < blkMapSize);
blkMap[block->bbNum] = blockOrdinal++;
targetWeightDivisor = (double)bTarget->bbWeight;
}
- flowList* edge;
- for (edge = bTarget->bbPreds; edge != nullptr; edge = edge->flNext, edgeNum++)
+ for (flowList* const edge : bTarget->PredEdges())
{
BasicBlock* bSource = edge->getBlock();
double sourceWeightDivisor;
fprintf(fgxFile, ">");
fprintf(fgxFile, "\n </edge>");
}
+
+ ++edgeNum;
}
}
}
//
if (createDotFile)
{
- for (BasicBlock* bSource = fgFirstBB; bSource != nullptr; bSource = bSource->bbNext)
+ for (BasicBlock* const bSource : Blocks())
{
if (constrained)
{
// Emit successor edges
//
- const unsigned numSuccs = bSource->NumSucc();
-
- for (unsigned i = 0; i < numSuccs; i++)
+ for (BasicBlock* const bTarget : bSource->Succs())
{
- BasicBlock* const bTarget = bSource->GetSucc(i);
fprintf(fgxFile, " " FMT_BB " -> " FMT_BB, bSource->bbNum, bTarget->bbNum);
if (blkMap[bSource->bbNum] > blkMap[bTarget->bbNum])
{
printf("BBnum Reachable by \n");
printf("------------------------------------------------\n");
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
printf(FMT_BB " : ", block->bbNum);
BlockSetOps::Iter iter(this, block->bbReach);
{
// Output a brace for every try region that this block opens
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
-
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
if (HBtab->ebdTryBeg == block)
{
}
}
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
-
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
if (HBtab->ebdTryLast == block)
{
if (!block->IsLIR())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
fgDumpStmtTree(stmt, block->bbNum);
}
{
// Note that typically we have already called fgDispBasicBlocks()
// so we don't need to print the preds and succs again here.
- for (BasicBlock* block = firstBlock; block; block = block->bbNext)
+ for (BasicBlock* block = firstBlock; block != nullptr; block = block->bbNext)
{
fgDumpBlock(block);
}
unsigned blockRefs = 0;
- for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (flowList* const pred : block->PredEdges())
{
blockRefs += pred->flDupCount;
break;
case BBJ_SWITCH:
- {
- unsigned jumpCnt = blockPred->bbJumpSwt->bbsCount;
-
- for (unsigned i = 0; i < jumpCnt; ++i)
+ for (BasicBlock* const bTarget : blockPred->SwitchTargets())
{
- BasicBlock* jumpTab = blockPred->bbJumpSwt->bbsDstTab[i];
- assert(jumpTab != nullptr);
- if (block == jumpTab)
+ if (block == bTarget)
{
return true;
}
}
-
assert(!"SWITCH in the predecessor list with no jump label to BLOCK!");
- }
- break;
+ break;
default:
assert(!"Unexpected bbJumpKind");
// we find a potential 'hit' we check if the funclet we're looking at is
// from the correct try region.
- for (BasicBlock* bcall = comp->fgFirstFuncletBB; bcall != nullptr; bcall = bcall->bbNext)
+ for (BasicBlock* const bcall : comp->Blocks(comp->fgFirstFuncletBB))
{
if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg)
{
/* Check bbNum, bbRefs and bbPreds */
// First, pick a traversal stamp, and label all the blocks with it.
unsigned curTraversalStamp = unsigned(InterlockedIncrement((LONG*)&bbTraverseLabel));
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->bbTraversalStamp = curTraversalStamp;
}
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (checkBBNum)
{
{
// Check to see if this block is the beginning of a filter or a handler and adjust the ref count
// appropriately.
- for (EHblkDsc *HBtab = compHndBBtab, *HBtabEnd = &compHndBBtab[compHndBBtabCount]; HBtab != HBtabEnd;
- HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
if (HBtab->ebdHndBeg == block)
{
fgDebugCheckBlockLinks();
// For each block check the links between the trees.
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->IsLIR())
{
void Compiler::fgDebugCheckStmtsList(BasicBlock* block, bool morphTrees)
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
// Verify that bbStmtList is threaded correctly.
// Note that for the statements list, the GetPrevStmt() list is circular.
{
assert(fgFirstBB->bbPrev == nullptr);
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->bbNext)
{
// about the BlockSet epoch.
BitVecTraits bitVecTraits(fgBBNumMax + 1, this);
BitVec succBlocks(BitVecOps::MakeEmpty(&bitVecTraits));
- BasicBlock** jumpTable = block->bbJumpSwt->bbsDstTab;
- unsigned jumpCount = block->bbJumpSwt->bbsCount;
- for (unsigned i = 0; i < jumpCount; i++)
+ for (BasicBlock* const bTarget : block->SwitchTargets())
{
- BitVecOps::AddElemD(&bitVecTraits, succBlocks, jumpTable[i]->bbNum);
+ BitVecOps::AddElemD(&bitVecTraits, succBlocks, bTarget->bbNum);
}
// Now we should have a set of unique successors that matches what's in the switchMap.
// First, check the number of entries, then make sure all the blocks in uniqueSuccSet
{
UniquenessCheckWalker walker(this);
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->IsLIR())
{
}
else
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
GenTree* root = stmt->GetRootNode();
fgWalkTreePre(&root, UniquenessCheckWalker::MarkTreeId, &walker);
assert(optLoopTable != nullptr);
}
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (optLoopCount == 0)
{
// Limit for now to finallys that contain only a GT_RETFILT.
bool isEmpty = true;
- for (Statement* stmt : firstBlock->Statements())
+ for (Statement* const stmt : firstBlock->Statements())
{
GenTree* stmtExpr = stmt->GetRootNode();
BasicBlock* const lastTryBlock = HBtab->ebdTryLast;
assert(firstTryBlock->getTryIndex() == XTnum);
- for (BasicBlock* block = firstTryBlock; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks(firstTryBlock))
{
// Look for blocks directly contained in this try, and
// update the try region appropriately.
BasicBlock* const lastTryBlock = HBtab->ebdTryLast;
BasicBlock* const firstHandlerBlock = HBtab->ebdHndBeg;
BasicBlock* const lastHandlerBlock = HBtab->ebdHndLast;
- BasicBlock* const endHandlerBlock = lastHandlerBlock->bbNext;
assert(firstTryBlock->getTryIndex() == XTnum);
// handler region (if any) won't change.
//
// Kind of overkill to loop here, but hey.
- for (BasicBlock* block = firstTryBlock; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks(firstTryBlock))
{
// Look for blocks directly contained in this try, and
// update the try region appropriately.
// remove the EH table entry. Change handler exits to jump to
// the continuation. Clear catch type on handler entry.
// Decrement nesting level of enclosed GT_END_LFINs.
- for (BasicBlock* block = firstHandlerBlock; block != endHandlerBlock; block = block->bbNext)
+ for (BasicBlock* const block : Blocks(firstHandlerBlock, lastHandlerBlock))
{
if (block == firstHandlerBlock)
{
// If we're in a non-funclet model, decrement the nesting
// level of any GT_END_LFIN we find in the handler region,
// since we're removing the enclosing handler.
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
GenTree* expr = stmt->GetRootNode();
if (expr->gtOper == GT_END_LFIN)
// Should we compute statement cost here, or is it
// premature...? For now just count statements I guess.
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
regionStmtCount++;
}
JITDUMP("Profile scale factor (" FMT_WT "/" FMT_WT ") => clone " FMT_WT " / original " FMT_WT "\n",
retargetedWeight, originalWeight, clonedScale, originalScale);
- for (BasicBlock* block = firstBlock; block != lastBlock->bbNext; block = block->bbNext)
+ for (BasicBlock* const block : Blocks(firstBlock, lastBlock))
{
if (block->hasProfileWeight())
{
BasicBlock* const lastTryBlock = HBtab->ebdTryLast;
assert(firstTryBlock->getTryIndex() <= XTnum);
assert(lastTryBlock->getTryIndex() <= XTnum);
- BasicBlock* const afterTryBlock = lastTryBlock->bbNext;
- BasicBlock* const finallyBlock = isFinally ? HBtab->ebdHndBeg : nullptr;
+ BasicBlock* const finallyBlock = isFinally ? HBtab->ebdHndBeg : nullptr;
- for (BasicBlock* block = firstTryBlock; block != afterTryBlock; block = block->bbNext)
+ for (BasicBlock* const block : Blocks(firstTryBlock, lastTryBlock))
{
// Only check the directly contained blocks.
assert(block->hasTryIndex());
}
// Look at each of the normal control flow possibilities.
- const unsigned numSuccs = block->NumSucc();
-
- for (unsigned i = 0; i < numSuccs; i++)
+ for (BasicBlock* const succBlock : block->Succs())
{
- BasicBlock* const succBlock = block->GetSucc(i);
-
if (succBlock->hasTryIndex() && succBlock->getTryIndex() <= XTnum)
{
// Successor does not exit this try region.
// Remove the GT_END_LFIN from the continuation,
// Note we only expect to see one such statement.
bool foundEndLFin = false;
- for (Statement* stmt : continuation->Statements())
+ for (Statement* const stmt : continuation->Statements())
{
GenTree* expr = stmt->GetRootNode();
if (expr->gtOper == GT_END_LFIN)
// in case bits are left over from EH clauses being deleted.
// Walk all blocks, and reset the target bits.
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->bbFlags &= ~BBF_FINALLY_TARGET;
}
return;
}
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->isBBCallAlwaysPair())
{
// Look for finallys.
bool hasFinally = false;
- for (unsigned XTnum = 0; XTnum < compHndBBtabCount; XTnum++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
- EHblkDsc* const HBtab = &compHndBBtab[XTnum];
-
// Check if this is a try/finally.
if (HBtab->HasFinallyHandler())
{
assert(blockPred);
assert(!fgCheapPredsValid);
- flowList* pred;
-
- for (pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (flowList* const pred : block->PredEdges())
{
if (blockPred == pred->getBlock())
{
break;
case BBJ_SWITCH:
- {
- unsigned jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jumpTab = block->bbJumpSwt->bbsDstTab;
-
- do
+ for (BasicBlock* const bTarget : block->SwitchTargets())
{
- fgRemoveRefPred(*jumpTab, block);
- } while (++jumpTab, --jumpCnt);
-
+ fgRemoveRefPred(bTarget, block);
+ }
break;
- }
default:
noway_assert(!"Block doesn't have a valid bbJumpKind!!!!");
noway_assert(!fgComputePredsDone); // We can't do this if we've got the full preds.
noway_assert(fgFirstBB != nullptr);
- BasicBlock* block;
-
#ifdef DEBUG
if (verbose)
{
// Clear out the cheap preds lists.
fgRemovePreds();
- for (block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
switch (block->bbJumpKind)
{
break;
case BBJ_SWITCH:
- unsigned jumpCnt;
- jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jumpTab;
- jumpTab = block->bbJumpSwt->bbsDstTab;
-
- do
+ for (BasicBlock* const bTarget : block->SwitchTargets())
{
- fgAddCheapPred(*jumpTab, block);
- } while (++jumpTab, --jumpCnt);
-
+ fgAddCheapPred(bTarget, block);
+ }
break;
case BBJ_EHFINALLYRET: // It's expensive to compute the preds for this case, so we don't for the cheap
// and are the same size. So, this function removes both.
static_assert_no_msg(sizeof(((BasicBlock*)nullptr)->bbPreds) == sizeof(((BasicBlock*)nullptr)->bbCheapPreds));
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->bbPreds = nullptr;
}
{
noway_assert(fgFirstBB != nullptr);
- BasicBlock* block;
-
#ifdef DEBUG
if (verbose)
{
// Check that the block numbers are increasing order.
unsigned lastBBnum = fgFirstBB->bbNum;
- for (BasicBlock* block = fgFirstBB->bbNext; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks(fgFirstBB->bbNext))
{
assert(lastBBnum < block->bbNum);
lastBBnum = block->bbNum;
#endif // DEBUG
// Reset everything pred related
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->bbPreds = nullptr;
block->bbLastPred = nullptr;
fgEntryBB->bbRefs = 1;
}
- for (block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
switch (block->bbJumpKind)
{
break;
case BBJ_SWITCH:
- unsigned jumpCnt;
- jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jumpTab;
- jumpTab = block->bbJumpSwt->bbsDstTab;
-
- do
+ for (BasicBlock* const bTarget : block->SwitchTargets())
{
- fgAddRefPred(*jumpTab, block, nullptr, true);
- } while (++jumpTab, --jumpCnt);
-
+ fgAddRefPred(bTarget, block, nullptr, true);
+ }
break;
default:
}
}
- for (unsigned EHnum = 0; EHnum < compHndBBtabCount; EHnum++)
+ for (EHblkDsc* const ehDsc : EHClauses(this))
{
- EHblkDsc* ehDsc = ehGetDsc(EHnum);
-
if (ehDsc->HasFilter())
{
// The first block of a filter has an artifical extra refcount.
BitVecTraits blockVecTraits(fgBBNumMax + 1, this);
BitVec uniqueSuccBlocks(BitVecOps::MakeEmpty(&blockVecTraits));
- BasicBlock** jumpTable = switchBlk->bbJumpSwt->bbsDstTab;
- unsigned jumpCount = switchBlk->bbJumpSwt->bbsCount;
- for (unsigned i = 0; i < jumpCount; i++)
+ for (BasicBlock* const targ : switchBlk->SwitchTargets())
{
- BasicBlock* targ = jumpTable[i];
BitVecOps::AddElemD(&blockVecTraits, uniqueSuccBlocks, targ->bbNum);
}
// Now we have a set of unique successors.
unsigned nonDupInd = 0;
// At this point, all unique targets are in "uniqueSuccBlocks". As we encounter each,
// add to nonDups, remove from "uniqueSuccBlocks".
- for (unsigned i = 0; i < jumpCount; i++)
+ for (BasicBlock* const targ : switchBlk->SwitchTargets())
{
- BasicBlock* targ = jumpTable[i];
if (BitVecOps::IsMember(&blockVecTraits, uniqueSuccBlocks, targ->bbNum))
{
nonDups[nonDupInd] = targ;
BasicBlock* to)
{
assert(switchBlk->bbJumpKind == BBJ_SWITCH); // Precondition.
- unsigned jmpTabCnt = switchBlk->bbJumpSwt->bbsCount;
- BasicBlock** jmpTab = switchBlk->bbJumpSwt->bbsDstTab;
// Is "from" still in the switch table (because it had more than one entry before?)
bool fromStillPresent = false;
- for (unsigned i = 0; i < jmpTabCnt; i++)
+ for (BasicBlock* const bTarget : switchBlk->SwitchTargets())
{
- if (jmpTab[i] == from)
+ if (bTarget == from)
{
fromStillPresent = true;
break;
&info.compMethodInfo->args);
#endif // DEBUG
- BasicBlock* block = fgFirstBB;
- bool madeChanges = false;
- noway_assert(block != nullptr);
+ noway_assert(fgFirstBB != nullptr);
// Set the root inline context on all statements
InlineContext* rootContext = m_inlineStrategy->GetRootContext();
- for (; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
stmt->SetInlineContext(rootContext);
}
}
- // Reset block back to start for inlining
- block = fgFirstBB;
+ BasicBlock* block = fgFirstBB;
+ bool madeChanges = false;
do
{
// Make the current basic block address available globally
compCurBB = block;
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
#ifdef DEBUG
do
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
// Call Compiler::fgDebugCheckInlineCandidates on each node
fgWalkTreePre(stmt->GetRootNodePointer(), fgDebugCheckInlineCandidates);
GenTreeCall* iciCall = pInlineInfo->iciCall;
Statement* iciStmt = pInlineInfo->iciStmt;
BasicBlock* iciBlock = pInlineInfo->iciBlock;
- BasicBlock* block;
noway_assert(iciBlock->bbStmtList != nullptr);
noway_assert(iciStmt->GetRootNode() != nullptr);
// Create a new inline context and mark the inlined statements with it
InlineContext* calleeContext = m_inlineStrategy->NewSuccess(pInlineInfo);
- for (block = InlineeCompiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : InlineeCompiler->Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
stmt->SetInlineContext(calleeContext);
}
//
// Set the try and handler index and fix the jump types of inlinee's blocks.
//
- for (block = InlineeCompiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : InlineeCompiler->Blocks())
{
noway_assert(!block->hasTryIndex());
noway_assert(!block->hasHndIndex());
return true;
}
- for (flowList* pred = b2->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : b2->PredBlocks())
{
- if (!fgDominate(b1, pred->getBlock()))
+ if (!fgDominate(b1, predBlock))
{
return false;
}
return true;
}
- for (flowList* pred = b2->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : b2->PredBlocks())
{
- if (fgReachable(b1, pred->getBlock()))
+ if (fgReachable(b1, predBlock))
{
return true;
}
fgReachabilitySetsValid = false;
#endif // DEBUG
- BasicBlock* block;
-
- for (block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// Initialize the per-block bbReach sets. It creates a new empty set,
// because the block epoch could change since the previous initialization
{
change = false;
- for (block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
BlockSetOps::Assign(this, newReach, block->bbReach);
bool predGcSafe = (block->bbPreds != nullptr); // Do all of our predecessor blocks have a GC safe bit?
- for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- BasicBlock* predBlock = pred->getBlock();
-
/* Union the predecessor's reachability set into newReach */
BlockSetOps::UnionD(this, newReach, predBlock->bbReach);
if (compHndBBtabCount > 0)
{
/* Also 'or' in the handler basic blocks */
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
if (HBtab->HasFilter())
{
// to the enter blocks is a bit of a compromise, because sometimes the blocks are already reachable,
// and it messes up DFS ordering to have them marked as enter block. We should prevent the
// creation of retless calls some other way.
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->bbJumpKind == BBJ_CALLFINALLY)
{
assert(!fgCheapPredsValid);
assert(fgReachabilitySetsValid);
- bool hasLoops = false;
- bool hasUnreachableBlocks = false;
- BasicBlock* block;
+ bool hasLoops = false;
+ bool hasUnreachableBlocks = false;
/* Record unreachable blocks */
- for (block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
/* Internal throw blocks are also reachable */
if (fgIsThrowHlpBlk(block))
// Set BBF_LOOP_HEAD if we have backwards branches to this block.
unsigned blockNum = block->bbNum;
- for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- BasicBlock* predBlock = pred->getBlock();
if (blockNum <= predBlock->bbNum)
{
if (predBlock->bbJumpKind == BBJ_CALLFINALLY)
if (hasUnreachableBlocks)
{
// Now remove the unreachable blocks
- for (block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
{
// If we mark the block with BBF_REMOVED then
// we need to call fgRemovedBlock() on it
/* Create a list of all BBJ_RETURN blocks. The head of the list is 'fgReturnBlocks'. */
fgReturnBlocks = nullptr;
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// If this is a BBJ_RETURN block, add it to our list of all BBJ_RETURN blocks. This list is only
// used to find return blocks.
// Call the flowgraph DFS traversal helper.
unsigned postIndex = 1;
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// If the block has no predecessors, and we haven't already visited it (because it's in fgEnterBlks but also
// reachable from the first block), go ahead and traverse starting from this block.
//
BlockSet_ValRet_T Compiler::fgDomFindStartNodes()
{
- unsigned j;
- BasicBlock* block;
-
BlockSet startNodes(BlockSetOps::MakeFull(this));
- for (block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- unsigned cSucc = block->NumSucc(this);
- for (j = 0; j < cSucc; ++j)
+ for (BasicBlock* const succ : block->Succs(this))
{
- BasicBlock* succ = block->GetSucc(j, this);
BlockSetOps::RemoveElemD(this, startNodes, succ->bbNum);
}
}
// pre and post actions are processed.
stack.Push(DfsBlockEntry(DSS_Post, currentBlock));
- unsigned cSucc = currentBlock->NumSucc(this);
- for (unsigned j = 0; j < cSucc; ++j)
+ for (BasicBlock* const succ : currentBlock->Succs(this))
{
- BasicBlock* succ = currentBlock->GetSucc(j, this);
-
// If this is a node we haven't seen before, go ahead and process
if (!BlockSetOps::IsMember(this, visited, succ->bbNum))
{
// Mark the EH blocks as entry blocks and also flag them as processed.
if (compHndBBtabCount > 0)
{
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
if (HBtab->HasFilter())
{
// As stated before, once we have computed immediate dominance we need to clear
// all the basic blocks whose predecessor list was set to flRoot. This
// reverts that and leaves the blocks the same as before.
- for (block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->bbPreds == &flRoot)
{
// Traverse the entire block list to build the dominator tree. Skip fgFirstBB
// as it is always a root of the dominator forest.
- for (BasicBlock* block = fgFirstBB->bbNext; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks(fgFirstBB->bbNext))
{
BasicBlock* parent = block->bbIDom;
//
void Compiler::fgInitBlockVarSets()
{
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->InitVarSets(this);
}
// If there is a switch predecessor don't bother because we'd have to update the uniquesuccs as well
// (if they are valid).
- for (flowList* pred = bNext->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : bNext->PredBlocks())
{
- if (pred->getBlock()->bbJumpKind == BBJ_SWITCH)
+ if (predBlock->bbJumpKind == BBJ_SWITCH)
{
return false;
}
JITDUMP("Second block has multiple incoming edges\n");
assert(block->isEmpty());
- for (flowList* pred = bNext->bbPreds; pred; pred = pred->flNext)
+ for (BasicBlock* const predBlock : bNext->PredBlocks())
{
- fgReplaceJumpTarget(pred->getBlock(), block, bNext);
+ fgReplaceJumpTarget(predBlock, block, bNext);
- if (pred->getBlock() != block)
+ if (predBlock != block)
{
- fgAddRefPred(block, pred->getBlock());
+ fgAddRefPred(block, predBlock);
}
}
bNext->bbPreds = nullptr;
// and we avoid most of the work if pred lists are already in order,
// we'll just ensure everything is properly ordered.
//
- for (BasicBlock* checkBlock = fgFirstBB; checkBlock != nullptr; checkBlock = checkBlock->bbNext)
+ for (BasicBlock* const checkBlock : Blocks())
{
checkBlock->ensurePredListOrder(this);
}
block->bbStmtList = firstNonPhi;
}
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
fgRemoveStmt(block, stmt);
}
// EH regions. Is this a case where they can't be merged?
bool okToMerge = true; // assume it's ok
- for (flowList* pred = block->bbPreds; pred; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- if (pred->getBlock()->bbJumpKind == BBJ_EHCATCHRET)
+ if (predBlock->bbJumpKind == BBJ_EHCATCHRET)
{
- assert(pred->getBlock()->bbJumpDest == block);
+ assert(predBlock->bbJumpDest == block);
okToMerge = false; // we can't get rid of the empty block
break;
}
assert(!bDest->IsLIR());
unsigned estDupCostSz = 0;
- for (Statement* stmt : bDest->Statements())
+ for (Statement* const stmt : bDest->Statements())
{
// We want to compute the costs of the statement. Unfortunately, gtPrepareCost() / gtSetStmtInfo()
// call gtSetEvalOrder(), which can reorder nodes. If it does so, we need to re-thread the gtNext/gtPrev
/* Visit all the statements in bDest */
- for (Statement* curStmt : bDest->Statements())
+ for (Statement* const curStmt : bDest->Statements())
{
// Clone/substitute the expression.
Statement* stmt = gtCloneStmt(curStmt);
bool modified = false;
- for (BasicBlock* block = fgFirstBB; block != NULL; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// Lowering expands switches, so calling this method on lowered IR
// does not make sense.
bool rare = true;
/* Make sure that block has at least one normal predecessor */
- for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
/* Find the fall through predecessor, if any */
- if (!pred->getBlock()->isRunRarely())
+ if (!predBlock->isRunRarely())
{
rare = false;
break;
noway_assert(edgeFromPrev != nullptr);
// Examine all of the other edges into bDest
- for (flowList* edge = bDest->bbPreds; edge != nullptr; edge = edge->flNext)
+ for (flowList* const edge : bDest->PredEdges())
{
if (edge != edgeFromPrev)
{
//
// Examine all of the other edges into bDest
- for (flowList* edge = bDest->bbPreds; edge != nullptr; edge = edge->flNext)
+ for (BasicBlock* const predBlock : bDest->PredBlocks())
{
- BasicBlock* bTemp = edge->getBlock();
-
- if ((bTemp != bPrev) && (bTemp->bbWeight >= bPrev->bbWeight))
+ if ((predBlock != bPrev) && (predBlock->bbWeight >= bPrev->bbWeight))
{
moveDestUp = false;
break;
// we will need to update ebdTryLast or ebdHndLast.
//
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
-
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd;
- HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
if ((HBtab->ebdTryLast == bNext) || (HBtab->ebdHndLast == bNext))
{
if (compRationalIRForm)
{
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
LIR::AsRange(block).CheckLIR(this);
}
break;
}
- for (Statement* stmt : StatementList(block->FirstNonPhiDef()))
+ for (Statement* const stmt : block->NonPhiStatements())
{
unsigned char cost = stmt->GetCostSz();
costSz += cost;
{
unsigned nodeCount = 0;
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (!block->IsLIR())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
fgWalkTreePre(stmt->GetRootNodePointer(),
[](GenTree** slot, fgWalkData* data) -> Compiler::fgWalkResult {
calleeWeight, scale);
JITDUMP("Scaling inlinee blocks\n");
- for (BasicBlock* bb = fgFirstBB; bb != nullptr; bb = bb->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- bb->scaleBBWeight(scale);
+ block->scaleBBWeight(scale);
}
}
#ifdef DEBUG
// Set schema index to invalid value
//
- for (BasicBlock* block = m_comp->fgFirstBB; (block != nullptr); block = block->bbNext)
+ for (BasicBlock* const block : m_comp->Blocks())
{
block->bbCountSchemaIndex = -1;
}
//
if (!compIsForInlining())
{
- EHblkDsc* HBtab = compHndBBtab;
- unsigned XTnum = 0;
-
- for (; XTnum < compHndBBtabCount; XTnum++, HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
BasicBlock* hndBegBB = HBtab->ebdHndBeg;
stack.Push(hndBegBB);
#ifdef DEBUG
// Verify the edge still exists.
//
- const unsigned numSucc = block->NumSucc(comp);
- bool found = false;
- for (unsigned i = 0; i < numSucc && !found; i++)
+ bool found = false;
+ for (BasicBlock* const succ : block->Succs(comp))
{
- found = (target == block->GetSucc(i, comp));
+ if (target == succ)
+ {
+ found = true;
+ break;
+ }
}
assert(found);
#endif
#ifdef DEBUG
// Set schema index to invalid value
//
- for (BasicBlock* block = m_comp->fgFirstBB; (block != nullptr); block = block->bbNext)
+ for (BasicBlock* const block : m_comp->Blocks())
{
block->bbClassSchemaIndex = -1;
}
//
BuildClassProbeSchemaGen schemaGen(schema, m_schemaCount);
ClassProbeVisitor<BuildClassProbeSchemaGen> visitor(m_comp, schemaGen);
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
visitor.WalkTree(stmt->GetRootNodePointer(), nullptr);
}
ClassProbeInserter insertProbes(schema, profileMemory, &classSchemaIndex, m_instrCount);
ClassProbeVisitor<ClassProbeInserter> visitor(m_comp, insertProbes);
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
visitor.WalkTree(stmt->GetRootNodePointer(), nullptr);
}
SuppressProbesFunctor suppressProbes(cleanupCount);
ClassProbeVisitor<SuppressProbesFunctor> visitor(m_comp, suppressProbes);
- for (BasicBlock* block = m_comp->fgFirstBB; (block != nullptr); block = block->bbNext)
+ for (BasicBlock* const block : m_comp->Blocks())
{
if ((block->bbFlags & BBF_HAS_CLASS_PROFILE) == 0)
{
continue;
}
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
visitor.WalkTree(stmt->GetRootNodePointer(), nullptr);
}
// Walk the flow graph to build up the instrumentation schema.
//
Schema schema(getAllocator(CMK_Pgo));
- for (BasicBlock* block = fgFirstBB; (block != nullptr); block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (fgCountInstrumentor->ShouldProcess(block))
{
// Add the instrumentation code
//
- for (BasicBlock* block = fgFirstBB; (block != nullptr); block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (fgCountInstrumentor->ShouldProcess(block))
{
//
void Compiler::fgIncorporateBlockCounts()
{
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
BasicBlock::weight_t profileWeight;
{
// Create per-block info, and set up the key to block map.
//
- for (BasicBlock* block = m_comp->fgFirstBB; (block != nullptr); block = block->bbNext)
+ for (BasicBlock* const block : m_comp->Blocks())
{
m_keyToBlockMap.Set(BlockToKey(block), block);
BlockInfo* const info = new (m_allocator) BlockInfo();
// Set weight on all blocks.
//
- for (BasicBlock* block = m_comp->fgFirstBB; (block != nullptr); block = block->bbNext)
+ for (BasicBlock* const block : m_comp->Blocks())
{
BlockInfo* const info = BlockToInfo(block);
assert(info->m_weightKnown);
BasicBlock* bSrc;
BasicBlock* bDst;
- flowList* edge;
BasicBlock::weight_t slop;
unsigned goodEdgeCountCurrent = 0;
unsigned goodEdgeCountPrevious = 0;
bDstWeight -= fgCalledCount;
}
- for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext)
+ for (flowList* const edge : bDst->PredEdges())
{
bool assignOK = true;
JITDUMP("\n -- step 1 --\n");
for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext)
{
- for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext)
+ for (flowList* const edge : bDst->PredEdges())
{
bool assignOK = true;
BasicBlock::weight_t maxEdgeWeightSum = 0;
// Calculate the sums of the minimum and maximum edge weights
- for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext)
+ for (flowList* const edge : bDst->PredEdges())
{
- // We are processing the control flow edge (bSrc -> bDst)
- bSrc = edge->getBlock();
-
maxEdgeWeightSum += edge->edgeWeightMax();
minEdgeWeightSum += edge->edgeWeightMin();
}
// maxEdgeWeightSum is the sum of all flEdgeWeightMax values into bDst
// minEdgeWeightSum is the sum of all flEdgeWeightMin values into bDst
- for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext)
+ for (flowList* const edge : bDst->PredEdges())
{
bool assignOK = true;
// See if any edge weight are expressed in [min..max] form
- for (bDst = fgFirstBB; bDst != nullptr; bDst = bDst->bbNext)
+ for (BasicBlock* const bDst : Blocks())
{
if (bDst->bbPreds != nullptr)
{
- for (edge = bDst->bbPreds; edge != nullptr; edge = edge->flNext)
+ for (flowList* const edge : bDst->PredEdges())
{
- bSrc = edge->getBlock();
- // This is the control flow edge (bSrc -> bDst)
+ // This is the control flow edge (edge->getBlock() -> bDst)
if (edge->edgeWeightMin() != edge->edgeWeightMax())
{
// Verify each profiled block.
//
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (!block->hasProfileWeight())
{
BasicBlock::weight_t incomingWeightMax = 0;
bool foundPreds = false;
- for (flowList* predEdge = block->bbPreds; predEdge != nullptr; predEdge = predEdge->flNext)
+ for (flowList* const predEdge : block->PredEdges())
{
incomingWeightMin += predEdge->edgeWeightMin();
incomingWeightMax += predEdge->edgeWeightMax();
for (unsigned i = 0; i < numSuccs; i++)
{
BasicBlock* succBlock = block->GetSucc(i, this);
- flowList* succEdge = nullptr;
-
- for (flowList* edge = succBlock->bbPreds; edge != nullptr; edge = edge->flNext)
- {
- if (edge->getBlock() == block)
- {
- succEdge = edge;
- break;
- }
- }
+ flowList* succEdge = fgGetPredForBlock(succBlock, block);
if (succEdge == nullptr)
{
static bool blockNeedsGCPoll(BasicBlock* block)
{
bool blockMayNeedGCPoll = false;
- for (Statement* stmt = block->FirstNonPhiDef(); stmt != nullptr; stmt = stmt->GetNextStmt())
+ for (Statement* const stmt : block->NonPhiStatements())
{
if ((stmt->GetRootNode()->gtFlags & GTF_CALL) != 0)
{
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
if (tree->OperGet() == GT_CALL)
{
BasicBlock* block;
// Walk through the blocks and hunt for a block that needs a GC Poll
- for (block = fgFirstBB; block; block = block->bbNext)
+ for (block = fgFirstBB; block != nullptr; block = block->bbNext)
{
// When optimizations are enabled, we can't rely on BBF_HAS_SUPPRESSGC_CALL flag:
// the call could've been moved, e.g., hoisted from a loop, CSE'd, etc.
// Note this includes (to some extent) the impact of importer folded
// branches, provided the folded tree covered the entire block's IL.
unsigned importedILSize = 0;
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if ((block->bbFlags & BBF_IMPORTED) != 0)
{
void Compiler::fgLoopCallMark()
{
- BasicBlock* block;
-
/* If we've already marked all the block, bail */
if (fgLoopCallMarked)
/* Walk the blocks, looking for backward edges */
- for (block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
switch (block->bbJumpKind)
{
break;
case BBJ_SWITCH:
-
- unsigned jumpCnt;
- jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jumpPtr;
- jumpPtr = block->bbJumpSwt->bbsDstTab;
-
- do
+ for (BasicBlock* const bTarget : block->SwitchTargets())
{
- fgLoopCallTest(block, *jumpPtr);
- } while (++jumpPtr, --jumpCnt);
-
+ fgLoopCallTest(block, bTarget);
+ }
break;
default:
fgCreateMonitorTree(lvaMonAcquired, lvaCopyThis, faultBB, false /*exit*/);
// non-exceptional cases
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->bbJumpKind == BBJ_RETURN)
{
{
unsigned retCnt = 0;
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->bbJumpKind == BBJ_RETURN)
{
/* Walk the basic blocks and for each statement determine
* the evaluation order, cost, FP levels, etc... */
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
compCurBB = block;
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
/* Recursively process the statement */
unsigned outgoingArgSpaceSize = 0;
#endif // FEATURE_FIXED_OUT_ARGS
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// Walk the statement trees in this basic block.
compCurBB = block; // Used in fgRngChkTarget.
// the handler go to the prolog. Edges coming from with the handler are back-edges, and
// go to the existing 'block'.
- for (flowList* pred = block->bbPreds; pred; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- BasicBlock* predBlock = pred->getBlock();
if (!fgIsIntraHandlerPred(predBlock, block))
{
// It's a jump from outside the handler; add it to the newHead preds list and remove
noway_assert(!fgDomsComputed); // this function doesn't maintain the dom sets
assert(!fgFuncletsCreated);
- bool prologBlocksCreated = false;
- EHblkDsc* HBtabEnd;
- EHblkDsc* HBtab;
+ bool prologBlocksCreated = false;
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
BasicBlock* head = HBtab->ebdHndBeg;
/* If we don't compute the doms, then we never mark blocks as loops. */
if (fgDomsComputed)
{
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
/* If this block is a loop header, mark it appropriately */
/* If we don't have the dominators, use an abbreviated test for fully interruptible. If there are
* any back edges, check the source and destination blocks to see if they're GC Safe. If not, then
* go fully interruptible. */
-
- /* XXX Mon 1/21/2008
- * Wouldn't it be nice to have a block iterator that can do this loop?
- */
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// true if the edge is forward, or if it is a back edge and either the source and dest are GC safe.
#define EDGE_IS_GC_SAFE(src, dst) \
break;
case BBJ_SWITCH:
-
- unsigned jumpCnt;
- jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jumpPtr;
- jumpPtr = block->bbJumpSwt->bbsDstTab;
-
- do
+ for (BasicBlock* const bTarget : block->SwitchTargets())
{
- partiallyInterruptible &= EDGE_IS_GC_SAFE(block, *jumpPtr);
- } while (++jumpPtr, --jumpCnt);
-
+ partiallyInterruptible &= EDGE_IS_GC_SAFE(block, bTarget);
+ }
break;
default:
}
}
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
#if FEATURE_FASTTAILCALL
void Compiler::fgSetBlockOrder(BasicBlock* block)
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
fgSetStmtSeq(stmt);
void GCInfo::gcMarkFilterVarsPinned()
{
assert(compiler->ehAnyFunclets());
- const EHblkDsc* endHBtab = &(compiler->compHndBBtab[compiler->compHndBBtabCount]);
- for (EHblkDsc* HBtab = compiler->compHndBBtab; HBtab < endHBtab; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(compiler))
{
if (HBtab->HasFilter())
{
void Compiler::fgWalkAllTreesPre(fgWalkPreFn* visitor, void* pCallBackData)
{
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
fgWalkTreePre(stmt->GetRootNodePointer(), visitor, pCallBackData);
}
//
void Compiler::gtDispBlockStmts(BasicBlock* block)
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
gtDispStmt(stmt);
printf("\n");
#endif
};
+// GenTreeList: adapter class for forward iteration of the execution order GenTree linked list
+// using range-based `for`, normally used via Statement::TreeList(), e.g.:
+// for (GenTree* const tree : stmt->TreeList()) ...
+//
+class GenTreeList
+{
+ GenTree* m_trees;
+
+ // Forward iterator for the execution order GenTree linked list (using `gtNext` pointer).
+ //
+ class iterator
+ {
+ GenTree* m_tree;
+
+ public:
+ iterator(GenTree* tree) : m_tree(tree)
+ {
+ }
+
+ GenTree* operator*() const
+ {
+ return m_tree;
+ }
+
+ iterator& operator++()
+ {
+ m_tree = m_tree->gtNext;
+ return *this;
+ }
+
+ bool operator!=(const iterator& i) const
+ {
+ return m_tree != i.m_tree;
+ }
+ };
+
+public:
+ GenTreeList(GenTree* trees) : m_trees(trees)
+ {
+ }
+
+ iterator begin() const
+ {
+ return iterator(m_trees);
+ }
+
+ iterator end() const
+ {
+ return iterator(nullptr);
+ }
+};
+
// We use the following format when printing the Statement number: Statement->GetID()
// This define is used with string concatenation to put this in printf format strings (Note that %u means unsigned int)
#define FMT_STMT "STMT%05u"
m_treeList = treeHead;
}
+ // TreeList: convenience method for enabling range-based `for` iteration over the
+ // execution order of the GenTree linked list, e.g.:
+ // for (GenTree* const tree : stmt->TreeList()) ...
+ //
+ GenTreeList TreeList() const
+ {
+ return GenTreeList(GetTreeList());
+ }
+
InlineContext* GetInlineContext() const
{
return m_inlineContext;
bool m_compilerAdded; // Was the statement created by optimizer?
};
-class StatementIterator
+// StatementList: adapter class for forward iteration of the statement linked list using range-based `for`,
+// normally used via BasicBlock::Statements(), e.g.:
+// for (Statement* const stmt : block->Statements()) ...
+// or:
+// for (Statement* const stmt : block->NonPhiStatements()) ...
+//
+class StatementList
{
- Statement* m_stmt;
+ Statement* m_stmts;
-public:
- StatementIterator(Statement* stmt) : m_stmt(stmt)
+ // Forward iterator for the statement linked list.
+ //
+ class iterator
{
- }
+ Statement* m_stmt;
- Statement* operator*() const
- {
- return m_stmt;
- }
+ public:
+ iterator(Statement* stmt) : m_stmt(stmt)
+ {
+ }
- StatementIterator& operator++()
- {
- m_stmt = m_stmt->GetNextStmt();
- return *this;
- }
+ Statement* operator*() const
+ {
+ return m_stmt;
+ }
- bool operator!=(const StatementIterator& i) const
- {
- return m_stmt != i.m_stmt;
- }
-};
+ iterator& operator++()
+ {
+ m_stmt = m_stmt->GetNextStmt();
+ return *this;
+ }
-class StatementList
-{
- Statement* m_stmts;
+ bool operator!=(const iterator& i) const
+ {
+ return m_stmt != i.m_stmt;
+ }
+ };
public:
StatementList(Statement* stmts) : m_stmts(stmts)
{
}
- StatementIterator begin() const
+ iterator begin() const
{
- return StatementIterator(m_stmts);
+ return iterator(m_stmts);
}
- StatementIterator end() const
+ iterator end() const
{
- return StatementIterator(nullptr);
+ return iterator(nullptr);
}
};
}
};
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
ReplaceShadowParamsVisitor replaceShadowParamsVisitor(this);
replaceShadowParamsVisitor.WalkTree(stmt->GetRootNodePointer(), nullptr);
{
// There could be more than one basic block ending with a "Jmp" type tail call.
// We would have to insert assignments in all such blocks, just before GT_JMP stmnt.
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->bbJumpKind != BBJ_RETURN)
{
void Compiler::impReimportMarkSuccessors(BasicBlock* block)
{
- const unsigned numSuccs = block->NumSucc();
- for (unsigned i = 0; i < numSuccs; i++)
+ for (BasicBlock* const succBlock : block->Succs())
{
- impReimportMarkBlock(block->GetSucc(i));
+ impReimportMarkBlock(succBlock);
}
}
JITDUMP("Marking BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", block->bbNum);
block->bbFlags |= BBF_IMPORTED;
- const unsigned numSuccs = block->NumSucc();
- for (unsigned i = 0; i < numSuccs; i++)
+ for (BasicBlock* const succBlock : block->Succs())
{
- impImportBlockPending(block->GetSucc(i));
+ impImportBlockPending(succBlock);
}
return;
break;
case BBJ_SWITCH:
-
- BasicBlock** jmpTab;
- unsigned jmpCnt;
-
addStmt = impExtractLastStmt();
assert(addStmt->GetRootNode()->gtOper == GT_SWITCH);
- jmpCnt = block->bbJumpSwt->bbsCount;
- jmpTab = block->bbJumpSwt->bbsDstTab;
-
- do
+ for (BasicBlock* const tgtBlock : block->SwitchTargets())
{
- tgtBlock = (*jmpTab);
-
multRef |= tgtBlock->bbRefs;
// Thanks to spill cliques, we should have assigned all or none
{
break;
}
- } while (++jmpTab, --jmpCnt);
-
+ }
break;
case BBJ_CALLFINALLY:
impReimportSpillClique(block);
// For blocks that haven't been imported yet, we still need to mark them as pending import.
- const unsigned numSuccs = block->NumSucc();
- for (unsigned i = 0; i < numSuccs; i++)
+ for (BasicBlock* const succ : block->Succs())
{
- BasicBlock* succ = block->GetSucc(i);
if ((succ->bbFlags & BBF_IMPORTED) == 0)
{
impImportBlockPending(succ);
// otherwise just import the successors of block
/* Does this block jump to any other blocks? */
- const unsigned numSuccs = block->NumSucc();
- for (unsigned i = 0; i < numSuccs; i++)
+ for (BasicBlock* const succ : block->Succs())
{
- impImportBlockPending(block->GetSucc(i));
+ impImportBlockPending(succ);
}
}
}
BasicBlock* blk = node->m_blk;
FreeBlockListNode(node);
- const unsigned numSuccs = blk->NumSucc();
- for (unsigned succNum = 0; succNum < numSuccs; succNum++)
+ for (BasicBlock* const succ : blk->Succs())
{
- BasicBlock* succ = blk->GetSucc(succNum);
// If it's not already in the clique, add it, and also add it
// as a member of the successor "toDo" set.
if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
return;
}
-ThisInitState BasicBlock::bbThisOnEntry()
+ThisInitState BasicBlock::bbThisOnEntry() const
{
return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
}
-unsigned BasicBlock::bbStackDepthOnEntry()
+unsigned BasicBlock::bbStackDepthOnEntry() const
{
return (bbEntryState ? bbEntryState->esStackDepth : 0);
}
bbEntryState->esStack = (StackEntry*)stackBuffer;
}
-StackEntry* BasicBlock::bbStackOnEntry()
+StackEntry* BasicBlock::bbStackOnEntry() const
{
assert(bbEntryState);
return bbEntryState->esStack;
}
// Used in impImportBlockPending() for STRESS_CHK_REIMPORT
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->bbFlags &= ~BBF_VISITED;
}
{
int count = 0;
- for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : compiler->Blocks())
{
count += TransformBlock(block);
}
{
int count = 0;
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
if (compiler->doesMethodHaveFatPointer() && ContainsFatCalli(stmt))
{
}
};
- for (Statement* nextStmt : remainderBlock->Statements())
+ for (Statement* const nextStmt : remainderBlock->Statements())
{
JITDUMP(" Scouting " FMT_STMT "\n", nextStmt->GetID());
assert(!doesMethodHaveGuardedDevirtualization());
assert(!doesMethodHaveExpRuntimeLookup());
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
fgWalkTreePre(stmt->GetRootNodePointer(), fgDebugCheckForTransformableIndirectCalls);
}
*/
void Compiler::ehUpdateLastBlocks(BasicBlock* oldLast, BasicBlock* newLast)
{
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
-
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
if (HBtab->ebdTryLast == oldLast)
{
unsigned Compiler::ehFuncletCount()
{
- unsigned funcletCnt = 0;
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
+ unsigned funcletCnt = 0;
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
if (HBtab->HasFilter())
{
HBtab = compHndBBtab + XTnum;
- EHblkDsc* xtabEnd;
- EHblkDsc* xtab;
- for (xtab = compHndBBtab, xtabEnd = compHndBBtab + compHndBBtabCount; xtab < xtabEnd; xtab++)
+ for (EHblkDsc* const xtab : EHClauses(this))
{
if ((xtab != HBtab) && (xtab->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) &&
(xtab->ebdEnclosingTryIndex >= XTnum))
/* We need to update all of the blocks' bbTryIndex */
- for (BasicBlock* blk = fgFirstBB; blk; blk = blk->bbNext)
+ for (BasicBlock* const blk : Blocks())
{
if (blk->hasTryIndex())
{
{
// Update all enclosing links that will get invalidated by inserting an entry at 'XTnum'
- EHblkDsc* xtabEnd;
- EHblkDsc* xtab;
- for (xtab = compHndBBtab, xtabEnd = compHndBBtab + compHndBBtabCount; xtab < xtabEnd; xtab++)
+ for (EHblkDsc* const xtab : EHClauses(this))
{
if ((xtab->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX) && (xtab->ebdEnclosingTryIndex >= XTnum))
{
// We need to update the BasicBlock bbTryIndex and bbHndIndex field for all blocks
- for (BasicBlock* blk = fgFirstBB; blk; blk = blk->bbNext)
+ for (BasicBlock* const blk : Blocks())
{
if (blk->hasTryIndex() && (blk->getTryIndex() >= XTnum))
{
#ifdef DEBUG
// Make sure none of the remaining blocks have any EH.
- BasicBlock* blk;
- foreach_block(this, blk)
+ for (BasicBlock* const blk : Blocks())
{
assert(!blk->hasTryIndex());
assert(!blk->hasHndIndex());
unsigned* blockNumMap = (unsigned*)_alloca(blockNumBytes);
memset(blockNumMap, 0, blockNumBytes);
- BasicBlock* block;
- unsigned newBBnum = 1;
- for (block = fgFirstBB; block != nullptr; block = block->bbNext)
+ unsigned newBBnum = 1;
+ for (BasicBlock* const block : Blocks())
{
assert((block->bbFlags & BBF_REMOVED) == 0);
assert(1 <= block->bbNum && block->bbNum <= bbNumMax);
#endif
// To verify that bbCatchTyp is set properly on all blocks, and that some BBF_* flags are only set on the first
- // block
- // of 'try' or handlers, create two bool arrays indexed by block number: one for the set of blocks that are the
- // beginning
- // blocks of 'try' regions, and one for blocks that are the beginning of handlers (including filters). Note that
- // since
- // this checking function runs before EH normalization, we have to handle the case where blocks can be both the
- // beginning
- // of a 'try' as well as the beginning of a handler. After we've iterated over the EH table, loop
- // over all blocks and verify that only handler begin blocks have bbCatchTyp == BBCT_NONE, and some other things.
+ // block of 'try' or handlers, create two bool arrays indexed by block number: one for the set of blocks that
+ // are the beginning blocks of 'try' regions, and one for blocks that are the beginning of handlers (including
+ // filters). Note that since this checking function runs before EH normalization, we have to handle the case
+ // where blocks can be both the beginning of a 'try' as well as the beginning of a handler. After we've iterated
+ // over the EH table, loop over all blocks and verify that only handler begin blocks have bbCatchTyp == BBCT_NONE,
+ // and some other things.
size_t blockBoolSetBytes = (bbNumMax + 1) * sizeof(bool);
bool* blockTryBegSet = (bool*)_alloca(blockBoolSetBytes);
// otherwise set. The duplicate clause handler is truly a duplicate of
// a previously processed handler, so we ignore it.
+ BasicBlock* block;
+
size_t blockIndexBytes = (bbNumMax + 1) * sizeof(unsigned short);
unsigned short* blockTryIndex = (unsigned short*)_alloca(blockIndexBytes);
unsigned short* blockHndIndex = (unsigned short*)_alloca(blockIndexBytes);
#endif // FEATURE_EH_FUNCLETS
// Make sure that all blocks have the right index, including those blocks that should have zero (no EH region).
- for (block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
assert(block->bbTryIndex == blockTryIndex[block->bbNum]);
assert(block->bbHndIndex == blockHndIndex[block->bbNum]);
assert(fgComputePredsDone);
assert((block->bbFlags & BBF_FINALLY_TARGET) != 0);
- for (flowList* pred = block->bbPreds; pred; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- if (pred->getBlock()->bbJumpKind == BBJ_ALWAYS && pred->getBlock()->bbJumpDest == block)
+ if (predBlock->bbJumpKind == BBJ_ALWAYS && predBlock->bbJumpDest == block)
{
- BasicBlock* pPrev = pred->getBlock()->bbPrev;
- if (pPrev != NULL)
+ BasicBlock* pPrev = predBlock->bbPrev;
+ if (pPrev != nullptr)
{
if (pPrev->bbJumpKind == BBJ_CALLFINALLY)
{
assert(block->hasHndIndex());
assert(fgFirstBlockOfHandler(block) == block); // this block is the first block of a handler
- flowList* pred;
-
- for (pred = block->bbPreds; pred; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- BasicBlock* predBlock = pred->getBlock();
-
if (fgIsIntraHandlerPred(predBlock, block))
{
// We have a predecessor that is not from our try region
bPrev->bbCatchTyp = block->bbCatchTyp;
block->bbCatchTyp = BBCT_NONE;
- EHblkDsc* HBtab;
- EHblkDsc* HBtabEnd;
-
- for (HBtab = compHndBBtab, HBtabEnd = compHndBBtab + compHndBBtabCount; HBtab < HBtabEnd; HBtab++)
+ for (EHblkDsc* const HBtab : EHClauses(this))
{
/* Multiple pointers in EHblkDsc can point to same block. We can not early out after the first match. */
if (HBtab->ebdTryBeg == block)
LocalAddressVisitor visitor(this);
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// Make the current basic block address available globally
compCurBB = block;
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
visitor.VisitStmt(stmt);
}
JITDUMP("\n*** %s local variables in block " FMT_BB " (weight=%s)\n", isRecompute ? "recomputing" : "marking",
block->bbNum, refCntWtd2str(block->getBBWeight(this)));
- for (Statement* stmt : StatementList(block->FirstNonPhiDef()))
+ for (Statement* const stmt : block->NonPhiStatements())
{
MarkLocalVarsVisitor visitor(this, block, stmt, isRecompute);
DISPSTMT(stmt);
JITDUMP("\n*** lvaComputeRefCounts -- explicit counts ***\n");
// Second, account for all explicit local variable references
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->IsLIR())
{
return *static_cast<Range*>(block);
}
+const LIR::Range& LIR::AsRange(const BasicBlock* block)
+{
+ return *static_cast<const Range*>(block);
+}
+
//------------------------------------------------------------------------
// LIR::EmptyRange: Constructs and returns an empty range.
//
public:
static Range& AsRange(BasicBlock* block);
+ static const LIR::Range& LIR::AsRange(const BasicBlock* block);
static Range EmptyRange();
static Range SeqTree(Compiler* compiler, GenTree* tree);
}
else
{
- for (Statement* stmt : StatementList(block->FirstNonPhiDef()))
+ for (Statement* const stmt : block->NonPhiStatements())
{
compCurStmt = stmt;
- for (GenTree* node = stmt->GetTreeList(); node != nullptr; node = node->gtNext)
+ for (GenTree* const node : stmt->TreeList())
{
fgPerNodeLocalVarLiveness(node);
}
{
printf("\nDebug scopes:\n");
- BasicBlock* block;
- for (block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
printf(FMT_BB ": ", block->bbNum);
dumpConvertedVarSet(this, block->bbScope);
// Mark all tracked LocalVars live over their scope - walk the blocks
// keeping track of the current life, and assign it to the blocks.
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// If we get to a funclet, reset the scope lists and start again, since the block
// offsets will be out of order compared to the previous block.
// Mark all tracked LocalVars live over their scope - walk the blocks
// keeping track of the current life, and assign it to the blocks.
- BasicBlock* block;
- for (block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// Find scopes becoming alive. If there is a gap in the instr
// sequence, we need to process any scopes on those missing offsets.
VARSET_TP initVars(VarSetOps::MakeEmpty(this)); // Vars which are artificially made alive
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
VarSetOps::ClearD(this, initVars);
break;
case BBJ_SWITCH:
- {
- BasicBlock** jmpTab;
- unsigned jmpCnt;
-
- jmpCnt = block->bbJumpSwt->bbsCount;
- jmpTab = block->bbJumpSwt->bbsDstTab;
-
- do
+ for (BasicBlock* const bTarget : block->SwitchTargets())
{
- VarSetOps::UnionD(this, initVars, (*jmpTab)->bbScope);
- } while (++jmpTab, --jmpCnt);
- }
- break;
+ VarSetOps::UnionD(this, initVars, bTarget->bbScope);
+ }
+ break;
case BBJ_EHFINALLYRET:
case BBJ_RETURN:
// Variables involved in exception-handlers and finally blocks need
// to be specially marked
//
- BasicBlock* block;
VARSET_TP exceptVars(VarSetOps::MakeEmpty(this)); // vars live on entry to a handler
VARSET_TP finallyVars(VarSetOps::MakeEmpty(this)); // vars live on exit of a 'finally' block
- for (block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->hasEHBoundaryIn())
{
* Now fill in liveness info within each basic block - Backward DataFlow
*/
- for (block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
/* Tell everyone what block we're working on */
void Compiler::fgDispBBLiveness()
{
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
fgDispBBLiveness(block);
}
// for the cloned loop (and its embedded EH regions).
//
// Also, count the number of return blocks within the loop for future use.
- BasicBlock* stopAt = loop.lpBottom->bbNext;
- unsigned loopRetCount = 0;
- for (BasicBlock* blk = loop.lpFirst; blk != stopAt; blk = blk->bbNext)
+ unsigned loopRetCount = 0;
+ for (BasicBlock* const blk : loop.LoopBlocks())
{
if (blk->bbJumpKind == BBJ_RETURN)
{
BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopClone)) BlockToBlockMap(getAllocator(CMK_LoopClone));
blockMap->Set(e, h2);
- for (flowList* predEntry = e->bbPreds; predEntry != nullptr; predEntry = predEntry->flNext)
+ for (BasicBlock* const predBlock : e->PredBlocks())
{
- BasicBlock* predBlock = predEntry->getBlock();
-
// Skip if predBlock is in the loop.
if (t->bbNum <= predBlock->bbNum && predBlock->bbNum <= b->bbNum)
{
BasicBlock* newFirst = nullptr;
BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopClone)) BlockToBlockMap(getAllocator(CMK_LoopClone));
- for (BasicBlock* blk = loop.lpFirst; blk != loop.lpBottom->bbNext; blk = blk->bbNext)
+ for (BasicBlock* const blk : loop.LoopBlocks())
{
BasicBlock* newBlk = fgNewBBafter(blk->bbJumpKind, newPred, /*extendRegion*/ true);
JITDUMP("Adding " FMT_BB " (copy of " FMT_BB ") after " FMT_BB "\n", newBlk->bbNum, blk->bbNum, newPred->bbNum);
// Now go through the new blocks, remapping their jump targets within the loop
// and updating the preds lists.
- for (BasicBlock* blk = loop.lpFirst; blk != loop.lpBottom->bbNext; blk = blk->bbNext)
+ for (BasicBlock* const blk : loop.LoopBlocks())
{
BasicBlock* newblk = nullptr;
bool b = blockMap->Lookup(blk, &newblk);
break;
case BBJ_SWITCH:
- {
- for (unsigned i = 0; i < newblk->bbJumpSwt->bbsCount; i++)
+ for (BasicBlock* const switchDest : newblk->SwitchTargets())
{
- BasicBlock* switchDest = newblk->bbJumpSwt->bbsDstTab[i];
fgAddRefPred(switchDest, newblk);
}
- }
- break;
+ break;
default:
break;
#ifdef DEBUG
// Display the preds for the new blocks, after all the new blocks have been redirected.
JITDUMP("Preds after loop copy:\n");
- for (BasicBlock* blk = loop.lpFirst; blk != loop.lpBottom->bbNext; blk = blk->bbNext)
+ for (BasicBlock* const blk : loop.LoopBlocks())
{
BasicBlock* newblk = nullptr;
bool b = blockMap->Lookup(blk, &newblk);
assert(b && newblk != nullptr);
JITDUMP(FMT_BB ":", newblk->bbNum);
- for (flowList* pred = newblk->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : newblk->PredBlocks())
{
- JITDUMP(" " FMT_BB, pred->getBlock()->bbNum);
+ JITDUMP(" " FMT_BB, predBlock->bbNum);
}
JITDUMP("\n");
}
//
bool Compiler::optIdentifyLoopOptInfo(unsigned loopNum, LoopCloneContext* context)
{
- noway_assert(loopNum < optLoopCount);
-
- LoopDsc* pLoop = &optLoopTable[loopNum];
-
- BasicBlock* head = pLoop->lpHead;
- BasicBlock* beg = head->bbNext; // should this be pLoop->lpFirst or pLoop->lpTop instead?
- BasicBlock* end = pLoop->lpBottom;
+ JITDUMP("Checking loop " FMT_LP " for optimization candidates\n", loopNum);
- JITDUMP("Checking blocks " FMT_BB ".." FMT_BB " for optimization candidates\n", beg->bbNum, end->bbNum);
+ const LoopDsc& loop = optLoopTable[loopNum];
LoopCloneVisitorInfo info(context, loopNum, nullptr);
- for (BasicBlock* block = beg; block != end->bbNext; block = block->bbNext)
+ for (BasicBlock* const block : loop.LoopBlocks())
{
compCurBB = block;
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
info.stmt = stmt;
const bool lclVarsOnly = false;
}
#endif // !defined(TARGET_64BIT)
- for (BasicBlock* block = comp->fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : comp->Blocks())
{
/* Make the block publicly available */
comp->compCurBB = block;
}
bool hasUniquePred = (block->GetUniquePred(compiler) != nullptr);
- for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- BasicBlock* predBlock = pred->getBlock();
if (!hasUniquePred)
{
if (predBlock->NumSucc(compiler) > 1)
// First, update the NORMAL successors of the current block, adding them to the worklist
// according to the desired order. We will handle the EH successors below.
- bool checkForCriticalOutEdge = (block->NumSucc(compiler) > 1);
+ const unsigned numSuccs = block->NumSucc(compiler);
+ bool checkForCriticalOutEdge = (numSuccs > 1);
if (!checkForCriticalOutEdge && block->bbJumpKind == BBJ_SWITCH)
{
assert(!"Switch with single successor");
}
- const unsigned numSuccs = block->NumSucc(compiler);
for (unsigned succIndex = 0; succIndex < numSuccs; succIndex++)
{
BasicBlock* succ = block->GetSucc(succIndex, compiler);
// connected (these are not removed)
// - EH blocks
- for (BasicBlock* seqBlock = compiler->fgFirstBB; seqBlock; seqBlock = seqBlock->bbNext)
+ for (BasicBlock* const seqBlock : compiler->Blocks())
{
if (!isBlockVisited(seqBlock))
{
#ifdef DEBUG
// Make sure that we've visited all the blocks.
- for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : compiler->Blocks())
{
assert(isBlockVisited(block));
}
for (BasicBlock *block = startBlockSequence(); block != nullptr; ++i, block = moveToNextBlock())
{
JITDUMP(FMT_BB, block->bbNum);
-
JITDUMP("(%6s) ", refCntWtd2str(block->getBBWeight(compiler)));
if (blockInfo[block->bbNum].hasEHBoundaryIn)
// Get predSet of block
BlockSetOps::ClearD(compiler, predSet);
- flowList* pred;
- for (pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- BlockSetOps::AddElemD(compiler, predSet, pred->getBlock()->bbNum);
+ BlockSetOps::AddElemD(compiler, predSet, predBlock->bbNum);
}
// If either a rarely run block or all its preds are already sequenced, use block's weight to sequence
//
void LinearScan::identifyCandidatesExceptionDataflow()
{
- BasicBlock* block;
-
- foreach_block(compiler, block)
+ for (BasicBlock* const block : compiler->Blocks())
{
if (block->hasEHBoundaryIn())
{
}
else
{
- for (flowList* pred = otherBlock->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const otherPred : otherBlock->PredBlocks())
{
- BasicBlock* otherPred = pred->getBlock();
if (otherPred->bbNum == blockInfo[otherBlock->bbNum].predBBNum)
{
predBlock = otherPred;
}
else
{
- for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const candidatePredBlock : block->PredBlocks())
{
- BasicBlock* candidatePredBlock = pred->getBlock();
-
if (isBlockVisited(candidatePredBlock))
{
if ((predBlock == nullptr) || (predBlock->bbWeight < candidatePredBlock->bbWeight))
printf("Has %sCritical Edges\n\n", hasCriticalEdges ? "" : "No ");
printf("Prior to Resolution\n");
- foreach_block(compiler, block)
+ for (BasicBlock* const block : compiler->Blocks())
{
printf("\n" FMT_BB, block->bbNum);
if (block->hasEHBoundaryIn())
return;
}
- BasicBlock *block, *prevBlock = nullptr;
-
// Handle all the critical edges first.
// We will try to avoid resolution across critical edges in cases where all the critical-edge
// targets of a block have the same home. We will then split the edges only for the
if (hasCriticalEdges)
{
- foreach_block(compiler, block)
+ for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
{
handleOutgoingCriticalEdges(block);
}
- prevBlock = block;
}
}
- prevBlock = nullptr;
- foreach_block(compiler, block)
+ for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
// would only improve the debug case, and would clutter up the code somewhat.
if (compiler->fgBBNumMax > bbNumMaxBeforeResolution)
{
- foreach_block(compiler, block)
+ for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
#ifdef DEBUG
// Make sure the varToRegMaps match up on all edges.
bool foundMismatch = false;
- foreach_block(compiler, block)
+ for (BasicBlock* const block : compiler->Blocks())
{
if (block->isEmpty() && block->bbNum > bbNumMaxBeforeResolution)
{
continue;
}
VarToRegMap toVarToRegMap = getInVarToRegMap(block->bbNum);
- for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- BasicBlock* predBlock = pred->getBlock();
VarToRegMap fromVarToRegMap = getOutVarToRegMap(predBlock->bbNum);
VarSetOps::Iter iter(compiler, block->bbLiveIn);
unsigned varIndex = 0;
fprintf(file, "\n");
}
- for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
}
// blocks
- for (BasicBlock* block = compiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : compiler->Blocks())
{
if (block->bbNum > bbNumMaxBeforeResolution)
{
// Now, verify the resolution blocks.
// Currently these are nearly always at the end of the method, but that may not always be the case.
// So, we'll go through all the BBs looking for blocks whose bbNum is greater than bbNumMaxBeforeResolution.
- for (BasicBlock* currentBlock = compiler->fgFirstBB; currentBlock != nullptr; currentBlock = currentBlock->bbNext)
+ for (BasicBlock* const currentBlock : compiler->Blocks())
{
if (currentBlock->bbNum > bbNumMaxBeforeResolution)
{
printf("\n-----------------\n");
printf("LIVENESS:\n");
printf("-----------------\n");
- foreach_block(compiler, block)
+ for (BasicBlock* const block : compiler->Blocks())
{
printf(FMT_BB " use def in out\n", block->bbNum);
dumpConvertedVarSet(compiler, block->bbVarUse);
#ifdef DEBUG
// Make sure we don't have any blocks that were not visited
- foreach_block(compiler, block)
+ for (BasicBlock* const block : compiler->Blocks())
{
assert(isBlockVisited(block));
}
fgCurrentlyInUseArgTemps = hashBv::Create(this);
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
if (fgRemoveRestOfBlock)
{
{
if (compQmarkUsed)
{
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
GenTree* expr = stmt->GetRootNode();
#ifdef DEBUG
*/
void Compiler::fgPostExpandQmarkChecks()
{
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
GenTree* expr = stmt->GetRootNode();
fgWalkTreePre(&expr, Compiler::fgAssertNoQmark, nullptr);
}
}
- BasicBlock* block;
-
- foreach_block(comp, block)
+ for (BasicBlock* const block : comp->Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
BuildConnGraphVisitor buildConnGraphVisitor(this);
buildConnGraphVisitor.WalkTree(stmt->GetRootNodePointer(), nullptr);
m_PossiblyStackPointingPointers = BitVecOps::MakeEmpty(&m_bitVecTraits);
m_DefinitelyStackPointingPointers = BitVecOps::MakeEmpty(&m_bitVecTraits);
- BasicBlock* block;
-
- foreach_block(comp, block)
+ for (BasicBlock* const block : comp->Blocks())
{
const bool basicBlockHasNewObj = (block->bbFlags & BBF_HAS_NEWOBJ) == BBF_HAS_NEWOBJ;
const bool basicBlockHasBackwardJump = (block->bbFlags & BBF_BACKWARD_JUMP) == BBF_BACKWARD_JUMP;
}
#endif // DEBUG
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
GenTree* stmtExpr = stmt->GetRootNode();
GenTree* op2 = nullptr;
}
};
- BasicBlock* block;
-
- foreach_block(comp, block)
+ for (BasicBlock* const block : comp->Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
RewriteUsesVisitor rewriteUsesVisitor(this);
rewriteUsesVisitor.WalkTree(stmt->GetRootNodePointer(), nullptr);
}
#endif
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
/* Make the block publicly available */
noway_assert((block->bbFlags & (BBF_VISITED | BBF_MARKED)) == 0);
/* Walk the statement trees in this basic block */
- for (Statement* stmt : StatementList(block->FirstNonPhiDef()))
+ for (Statement* const stmt : block->NonPhiStatements())
{
const bool isReturn = stmt->GetRootNode()->OperIs(GT_RETURN);
/* We walk the tree in the forwards direction (bottom up) */
bool stmtHasArrLenCandidate = false;
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
if (tree->OperIsCompare() && stmtHasArrLenCandidate)
{
BitVecOps::AddElemD(cseLivenessTraits, cseCallKillsMask, cseAvailBit);
}
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
/* Initialize the blocks's bbCseIn set */
}
}
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// If the block doesn't contains a call then skip it...
//
if (verbose)
{
bool headerPrinted = false;
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
if (block->bbCseGen != nullptr)
{
{
printf("\nAfter performing DataFlow for ValnumCSE's\n");
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
printf(FMT_BB, block->bbNum);
printf(" cseIn = %s,", genES2str(cseLivenessTraits, block->bbCseIn));
#endif
EXPSET_TP available_cses = BitVecOps::MakeEmpty(cseLivenessTraits);
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// Make the block publicly available
// Walk the statement trees in this basic block
- for (Statement* stmt : StatementList(block->FirstNonPhiDef()))
+ for (Statement* const stmt : block->NonPhiStatements())
{
// We walk the tree in the forwards direction (bottom up)
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
bool isUse = false;
bool isDef = false;
void Compiler::optCleanupCSEs()
{
// We must clear the BBF_VISITED and BBF_MARKED flags.
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// And clear all the "visited" bits on the block.
block->bbFlags &= ~(BBF_VISITED | BBF_MARKED);
// Walk the statement trees in this basic block.
- for (Statement* stmt : StatementList(block->FirstNonPhiDef()))
+ for (Statement* const stmt : block->NonPhiStatements())
{
// We must clear the gtCSEnum field.
for (GenTree* tree = stmt->GetRootNode(); tree; tree = tree->gtPrev)
void Compiler::optEnsureClearCSEInfo()
{
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
assert((block->bbFlags & (BBF_VISITED | BBF_MARKED)) == 0);
- // Initialize 'stmt' to the first non-Phi statement
- // Walk the statement trees in this basic block
- for (Statement* stmt : StatementList(block->FirstNonPhiDef()))
+ for (Statement* const stmt : block->NonPhiStatements())
{
for (GenTree* tree = stmt->GetRootNode(); tree; tree = tree->gtPrev)
{
bool firstBBDominatesAllReturns = true;
const bool usingProfileWeights = fgIsUsingProfileWeights();
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
/* Blocks that can't be reached via the first block are rarely executed */
if (!fgReachable(fgFirstBB, block))
/* Build list of backedges for block begBlk */
flowList* backedgeList = nullptr;
- for (flowList* pred = begBlk->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : begBlk->PredBlocks())
{
/* Is this a backedge? */
- if (pred->getBlock()->bbNum >= begBlk->bbNum)
+ if (predBlock->bbNum >= begBlk->bbNum)
{
- flowList* flow = new (this, CMK_FlowList) flowList(pred->getBlock(), backedgeList);
+ backedgeList = new (this, CMK_FlowList) flowList(predBlock, backedgeList);
#if MEASURE_BLOCK_SIZE
genFlowNodeCnt += 1;
genFlowNodeSize += sizeof(flowList);
#endif // MEASURE_BLOCK_SIZE
-
- backedgeList = flow;
}
}
noway_assert(!opts.MinOpts());
- BasicBlock* curBlk;
- unsigned backEdgeCount = 0;
+ unsigned backEdgeCount = 0;
- for (flowList* pred = begBlk->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : begBlk->PredBlocks())
{
- curBlk = pred->getBlock();
-
- /* is this a backward edge? (from curBlk to begBlk) */
+ /* is this a backward edge? (from predBlock to begBlk) */
- if (begBlk->bbNum > curBlk->bbNum)
+ if (begBlk->bbNum > predBlock->bbNum)
{
continue;
}
/* We only consider back-edges that are BBJ_COND or BBJ_ALWAYS for loops */
- if ((curBlk->bbJumpKind != BBJ_COND) && (curBlk->bbJumpKind != BBJ_ALWAYS))
+ if ((predBlock->bbJumpKind != BBJ_COND) && (predBlock->bbJumpKind != BBJ_ALWAYS))
{
continue;
}
}
#endif
- curBlk = begBlk;
+ BasicBlock* curBlk = begBlk;
while (true)
{
noway_assert(curBlk);
switch (block->bbJumpKind)
{
- unsigned jumpCnt;
- BasicBlock** jumpTab;
-
case BBJ_NONE:
case BBJ_COND:
if (block->bbNext == loop.lpEntry)
break;
case BBJ_SWITCH:
- jumpCnt = block->bbJumpSwt->bbsCount;
- jumpTab = block->bbJumpSwt->bbsDstTab;
-
- do
+ for (BasicBlock* const bTarget : block->SwitchTargets())
{
- noway_assert(*jumpTab);
- if ((*jumpTab) == loop.lpEntry)
+ if (bTarget == loop.lpEntry)
{
removeLoop = true;
+ break;
}
- } while (++jumpTab, --jumpCnt);
+ }
break;
default:
/* Check if the entry has other predecessors outside the loop
* TODO: Replace this when predecessors are available */
- BasicBlock* auxBlock;
- for (auxBlock = fgFirstBB; auxBlock; auxBlock = auxBlock->bbNext)
+ for (BasicBlock* const auxBlock : Blocks())
{
/* Ignore blocks in the loop */
- if (auxBlock->bbNum > loop.lpHead->bbNum && auxBlock->bbNum <= loop.lpBottom->bbNum)
+ if (loop.lpContains(auxBlock))
{
continue;
}
switch (auxBlock->bbJumpKind)
{
- unsigned jumpCnt;
- BasicBlock** jumpTab;
-
case BBJ_NONE:
case BBJ_COND:
if (auxBlock->bbNext == loop.lpEntry)
break;
case BBJ_SWITCH:
- jumpCnt = auxBlock->bbJumpSwt->bbsCount;
- jumpTab = auxBlock->bbJumpSwt->bbsDstTab;
-
- do
+ for (BasicBlock* const bTarget : auxBlock->SwitchTargets())
{
- noway_assert(*jumpTab);
- if ((*jumpTab) == loop.lpEntry)
+ if (bTarget == loop.lpEntry)
{
removeLoop = false;
+ break;
}
- } while (++jumpTab, --jumpCnt);
+ }
break;
default:
// Make sure the "iterVar" initialization is never skipped,
// i.e. every pred of ENTRY other than HEAD is in the loop.
- for (flowList* predEdge = entry->bbPreds; predEdge; predEdge = predEdge->flNext)
+ for (BasicBlock* const predBlock : entry->PredBlocks())
{
- BasicBlock* predBlock = predEdge->getBlock();
if ((predBlock != head) && !optLoopTable[loopInd].lpContains(predBlock))
{
goto DONE_LOOP;
do
{
block = block->bbNext;
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
if (stmt->GetRootNode() == incr)
{
void Compiler::optCheckPreds()
{
- BasicBlock* block;
- BasicBlock* blockPred;
- flowList* pred;
-
- for (block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
- for (pred = block->bbPreds; pred; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
// make sure this pred is part of the BB list
- for (blockPred = fgFirstBB; blockPred; blockPred = blockPred->bbNext)
+ BasicBlock* bb;
+ for (bb = fgFirstBB; bb; bb = bb->bbNext)
{
- if (blockPred == pred->getBlock())
+ if (bb == predBlock)
{
break;
}
}
- noway_assert(blockPred);
- switch (blockPred->bbJumpKind)
+ noway_assert(bb);
+ switch (bb->bbJumpKind)
{
case BBJ_COND:
- if (blockPred->bbJumpDest == block)
+ if (bb->bbJumpDest == block)
{
break;
}
FALLTHROUGH;
case BBJ_NONE:
- noway_assert(blockPred->bbNext == block);
+ noway_assert(bb->bbNext == block);
break;
case BBJ_EHFILTERRET:
case BBJ_ALWAYS:
case BBJ_EHCATCHRET:
- noway_assert(blockPred->bbJumpDest == block);
+ noway_assert(bb->bbJumpDest == block);
break;
default:
break;
}
// Add preds to the worklist, checking for side-entries.
- for (flowList* predIter = block->bbPreds; predIter != nullptr; predIter = predIter->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- BasicBlock* pred = predIter->getBlock();
-
- unsigned int testNum = PositionNum(pred);
+ unsigned int testNum = PositionNum(predBlock);
if ((testNum < top->bbNum) || (testNum > bottom->bbNum))
{
// Pred is out of loop range
if (block == entry)
{
- if (pred == head)
+ if (predBlock == head)
{
// This is the single entry we expect.
continue;
// ENTRY has some pred other than head outside the loop. If ENTRY does not
// dominate this pred, we'll consider this a side-entry and skip this loop;
// otherwise the loop is still valid and this may be a (flow-wise) back-edge
- // of an outer loop. For the dominance test, if `pred` is a new block, use
+ // of an outer loop. For the dominance test, if `predBlock` is a new block, use
// its unique predecessor since the dominator tree has info for that.
- BasicBlock* effectivePred = (pred->bbNum > oldBlockMaxNum ? pred->bbPrev : pred);
+ BasicBlock* effectivePred = (predBlock->bbNum > oldBlockMaxNum ? predBlock->bbPrev : predBlock);
if (comp->fgDominate(entry, effectivePred))
{
// Outer loop back-edge
}
bool isFirstVisit;
- if (pred == entry)
+ if (predBlock == entry)
{
// We have indeed found a cycle in the flow graph.
isFirstVisit = !foundCycle;
foundCycle = true;
- assert(loopBlocks.IsMember(pred->bbNum));
+ assert(loopBlocks.IsMember(predBlock->bbNum));
}
- else if (loopBlocks.TestAndInsert(pred->bbNum))
+ else if (loopBlocks.TestAndInsert(predBlock->bbNum))
{
// Already visited this pred
isFirstVisit = false;
}
else
{
- // Add this pred to the worklist
- worklist.push_back(pred);
+ // Add this predBlock to the worklist
+ worklist.push_back(predBlock);
isFirstVisit = true;
}
- if (isFirstVisit && (pred->bbNext != nullptr) && (PositionNum(pred->bbNext) == pred->bbNum))
+ if (isFirstVisit && (predBlock->bbNext != nullptr) &&
+ (PositionNum(predBlock->bbNext) == predBlock->bbNum))
{
- // We've created a new block immediately after `pred` to
+ // We've created a new block immediately after `predBlock` to
// reconnect what was fall-through. Mark it as in-loop also;
// it needs to stay with `prev` and if it exits the loop we'd
// just need to re-create it if we tried to move it out.
- loopBlocks.Insert(pred->bbNext->bbNum);
+ loopBlocks.Insert(predBlock->bbNext->bbNum);
}
}
}
// of an edge from the run of blocks being moved to `newMoveAfter` -- doing so would
// introduce a new lexical back-edge, which could (maybe?) confuse the loop search
// algorithm, and isn't desirable layout anyway.
- for (flowList* predIter = newMoveAfter->bbPreds; predIter != nullptr; predIter = predIter->flNext)
+ for (BasicBlock* const predBlock : newMoveAfter->PredBlocks())
{
- unsigned int predNum = predIter->getBlock()->bbNum;
+ unsigned int predNum = predBlock->bbNum;
if ((predNum >= top->bbNum) && (predNum <= bottom->bbNum) && !loopBlocks.IsMember(predNum))
{
//
bool CanTreatAsLoopBlocks(BasicBlock* firstNonLoopBlock, BasicBlock* lastNonLoopBlock)
{
- BasicBlock* nextLoopBlock = lastNonLoopBlock->bbNext;
- for (BasicBlock* testBlock = firstNonLoopBlock; testBlock != nextLoopBlock; testBlock = testBlock->bbNext)
+ for (BasicBlock* const testBlock : comp->Blocks(firstNonLoopBlock, lastNonLoopBlock))
{
- for (flowList* predIter = testBlock->bbPreds; predIter != nullptr; predIter = predIter->flNext)
+ for (BasicBlock* const testPred : testBlock->PredBlocks())
{
- BasicBlock* testPred = predIter->getBlock();
unsigned int predPosNum = PositionNum(testPred);
unsigned int firstNonLoopPosNum = PositionNum(firstNonLoopBlock);
unsigned int lastNonLoopPosNum = PositionNum(lastNonLoopBlock);
break;
case BBJ_SWITCH:
-
- unsigned jumpCnt;
- jumpCnt = block->bbJumpSwt->bbsCount;
- BasicBlock** jumpTab;
- jumpTab = block->bbJumpSwt->bbsDstTab;
-
- do
+ for (BasicBlock* const exitPoint : block->SwitchTargets())
{
- noway_assert(*jumpTab);
- exitPoint = *jumpTab;
-
if (!loopBlocks.IsMember(exitPoint->bbNum))
{
lastExit = block;
exitCount++;
}
- } while (++jumpTab, --jumpCnt);
+ }
break;
default:
LoopSearch search(this);
- for (BasicBlock* head = fgFirstBB; head->bbNext; head = head->bbNext)
+ for (BasicBlock* head = fgFirstBB; head->bbNext != nullptr; head = head->bbNext)
{
BasicBlock* top = head->bbNext;
continue;
}
- for (flowList* pred = top->bbPreds; pred; pred = pred->flNext)
+ for (BasicBlock* const predBlock : top->PredBlocks())
{
- if (search.FindLoop(head, top, pred->getBlock()))
+ if (search.FindLoop(head, top, predBlock))
{
// Found a loop; record it and see if we've hit the limit.
bool recordedLoop = search.RecordLoop();
// this -- the innermost loop labeling will be done last.
for (unsigned char loopInd = 0; loopInd < optLoopCount; loopInd++)
{
- BasicBlock* first = optLoopTable[loopInd].lpFirst;
- BasicBlock* bottom = optLoopTable[loopInd].lpBottom;
- for (BasicBlock* blk = first; blk != nullptr; blk = blk->bbNext)
+ for (BasicBlock* const blk : optLoopTable[loopInd].LoopBlocks())
{
blk->bbNatLoopNum = loopInd;
- if (blk == bottom)
- {
- break;
- }
- assert(blk->bbNext != nullptr); // We should never reach nullptr.
}
}
break;
case BBJ_SWITCH:
- {
to->bbJumpSwt = new (this, CMK_BasicBlock) BBswtDesc(this, from->bbJumpSwt);
- }
- break;
+ break;
default:
break;
// This is ok, because after the first redirection, the topPredBlock branch target will no longer match the source
// edge of the blockMap, so nothing will happen.
bool firstPred = true;
- for (flowList* topPred = t->bbPreds; topPred != nullptr; topPred = topPred->flNext)
+ for (BasicBlock* const topPredBlock : t->PredBlocks())
{
- BasicBlock* topPredBlock = topPred->getBlock();
-
// Skip if topPredBlock is in the loop.
// Note that this uses block number to detect membership in the loop. We are adding blocks during
// canonicalization, and those block numbers will be new, and larger than previous blocks. However, we work
++loopRetCount;
}
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
gtSetStmtInfo(stmt);
loopCostSz += stmt->GetCostSz();
unsigned estDupCostSz = 0;
- for (Statement* stmt : bTest->Statements())
+ for (Statement* const stmt : bTest->Statements())
{
GenTree* tree = stmt->GetRootNode();
gtPrepareCost(tree);
//
// If the condition has array.Length operations, also boost, as they are likely to be CSE'd.
- for (Statement* stmt : bTest->Statements())
+ for (Statement* const stmt : bTest->Statements())
{
GenTree* tree = stmt->GetRootNode();
BasicBlock* bNewCond = fgNewBBafter(BBJ_COND, block, /*extendRegion*/ true);
// Clone each statement in bTest and append to bNewCond.
- for (Statement* stmt : bTest->Statements())
+ for (Statement* const stmt : bTest->Statements())
{
GenTree* originalTree = stmt->GetRootNode();
GenTree* clonedTree = gtCloneExpr(originalTree);
unsigned loopFirstNum = bNewCond->bbNext->bbNum;
unsigned loopBottomNum = bTest->bbNum;
- for (flowList* pred = bTest->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : bTest->PredBlocks())
{
- BasicBlock* predBlock = pred->getBlock();
- unsigned bNum = predBlock->bbNum;
+ unsigned bNum = predBlock->bbNum;
if ((loopFirstNum <= bNum) && (bNum <= loopBottomNum))
{
// Looks like the predecessor is from within the potential loop; skip it.
}
bool madeChanges = false; // Assume no changes made
- for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// Make sure the appropriate fields are initialized
//
* lastBottom - used when we have multiple back-edges to the same top
*/
- flowList* pred;
-
- BasicBlock* top;
-
- for (top = fgFirstBB; top; top = top->bbNext)
+ for (BasicBlock* const top : Blocks())
{
BasicBlock* foundBottom = nullptr;
- for (pred = top->bbPreds; pred; pred = pred->flNext)
+ for (BasicBlock* const bottom : top->PredBlocks())
{
/* Is this a loop candidate? - We look for "back edges" */
- BasicBlock* bottom = pred->getBlock();
-
/* is this a backward edge? (from BOTTOM to TOP) */
if (top->bbNum > bottom->bbNum)
{
noway_assert(beg != nullptr);
- for (Statement* stmt : beg->Statements())
+ for (Statement* const stmt : beg->Statements())
{
if (fgWalkTreePre(stmt->GetRootNodePointer(), optIsVarAssgCB, &desc) != WALK_CONTINUE)
{
/*****************************************************************************/
int Compiler::optIsSetAssgLoop(unsigned lnum, ALLVARSET_VALARG_TP vars, varRefKinds inds)
{
- LoopDsc* loop;
-
- /* Get hold of the loop descriptor */
-
noway_assert(lnum < optLoopCount);
- loop = optLoopTable + lnum;
+ LoopDsc* loop = &optLoopTable[lnum];
/* Do we already know what variables are assigned within this loop? */
{
isVarAssgDsc desc;
- BasicBlock* beg;
- BasicBlock* end;
-
/* Prepare the descriptor used by the tree walker call-back */
desc.ivaVar = (unsigned)-1;
/* Now walk all the statements of the loop */
- beg = loop->lpHead->bbNext;
- end = loop->lpBottom;
-
- for (/**/; /**/; beg = beg->bbNext)
+ for (BasicBlock* const block : loop->LoopBlocks())
{
- noway_assert(beg);
-
- for (Statement* stmt : StatementList(beg->FirstNonPhiDef()))
+ for (Statement* const stmt : block->NonPhiStatements())
{
fgWalkTreePre(stmt->GetRootNodePointer(), optIsVarAssgCB, &desc);
loop->lpFlags |= LPFLG_ASGVARS_INC;
}
}
-
- if (beg == end)
- {
- break;
- }
}
AllVarSetOps::Assign(this, loop->lpAsgVars, desc.ivaMaskVal);
void HoistBlock(BasicBlock* block)
{
- for (Statement* stmt : StatementList(block->FirstNonPhiDef()))
+ for (Statement* const stmt : block->NonPhiStatements())
{
WalkTree(stmt->GetRootNodePointer(), nullptr);
assert(m_valueStack.TopRef().Node() == stmt->GetRootNode());
// into the phi via the loop header block will now flow through the preheader
// block from the header block.
- for (Statement* stmt : top->Statements())
+ for (Statement* const stmt : top->Statements())
{
GenTree* tree = stmt->GetRootNode();
if (tree->OperGet() != GT_ASG)
edgeToPreHeader->setEdgeWeights(preHead->bbWeight, preHead->bbWeight, preHead);
bool checkNestedLoops = false;
- for (flowList* pred = top->bbPreds; pred; pred = pred->flNext)
+ for (BasicBlock* const predBlock : top->PredBlocks())
{
- BasicBlock* predBlock = pred->getBlock();
-
if (fgDominate(top, predBlock))
{
// note: if 'top' dominates predBlock, 'head' dominates predBlock too
void Compiler::optComputeLoopNestSideEffects(unsigned lnum)
{
assert(optLoopTable[lnum].lpParent == BasicBlock::NOT_IN_LOOP); // Requires: lnum is outermost.
- BasicBlock* botNext = optLoopTable[lnum].lpBottom->bbNext;
- JITDUMP("optComputeLoopSideEffects botNext is " FMT_BB ", lnum is %d\n", botNext->bbNum, lnum);
- for (BasicBlock* bbInLoop = optLoopTable[lnum].lpFirst; bbInLoop != botNext; bbInLoop = bbInLoop->bbNext)
+ JITDUMP("optComputeLoopSideEffects lnum is %d\n", lnum);
+ for (BasicBlock* const bbInLoop : optLoopTable[lnum].LoopBlocks())
{
if (!optComputeLoopSideEffectsOfBlock(bbInLoop))
{
MemoryKindSet memoryHavoc = emptyMemoryKindSet;
// Now iterate over the remaining statements, and their trees.
- for (Statement* stmt : StatementList(blk->FirstNonPhiDef()))
+ for (Statement* const stmt : blk->NonPhiStatements())
{
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
genTreeOps oper = tree->OperGet();
{
change = false;
- for (BasicBlock* b1 = fgFirstBB; b1; b1 = b1->bbNext)
+ for (BasicBlock* const b1 : Blocks())
{
/* We're only interested in conditional jumps here */
for (Statement* stmt = block->FirstNonPhiDef(); stmt != nullptr;)
{
Statement* next = stmt->GetNextStmt();
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
if (((tree->gtFlags & GTF_CALL) != 0))
{
}
int count = 0;
- for (BasicBlock* block = compiler->fgFirstBB->bbNext; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : compiler->Blocks(compiler->fgFirstBB->bbNext))
{
if (block->bbFlags & BBF_PATCHPOINT)
{
void RangeCheck::MapMethodDefs()
{
// First, gather where all definitions occur in the program and store it in a map.
- for (BasicBlock* block = m_pCompiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : m_pCompiler->Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
MapMethodDefsData data(this, block, stmt);
m_pCompiler->fgWalkTreePre(stmt->GetRootNodePointer(), MapMethodDefsVisitor, &data, false, true);
#endif
// Walk through trees looking for arrBndsChk node and check if it can be optimized.
- for (BasicBlock* block = m_pCompiler->fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : m_pCompiler->Blocks())
{
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
- for (GenTree* tree = stmt->GetTreeList(); tree; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
if (IsOverBudget())
{
void Rationalizer::SanityCheck()
{
// TODO: assert(!IsLIR());
- BasicBlock* block;
- foreach_block(comp, block)
+ for (BasicBlock* const block : comp->Blocks())
{
- for (Statement* statement : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
- ValidateStatement(statement, block);
+ ValidateStatement(stmt, block);
- for (GenTree* tree = statement->GetTreeList(); tree; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
// QMARK and PUT_ARG_TYPE nodes should have been removed before this phase.
assert(!tree->OperIs(GT_QMARK, GT_PUTARG_TYPE));
comp->fgOrder = Compiler::FGOrderLinear;
RationalizeVisitor visitor(*this);
- for (BasicBlock* block = comp->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : comp->Blocks())
{
comp->compCurBB = block;
m_block = block;
continue;
}
- for (Statement* statement : StatementList(firstStatement))
+ for (Statement* const statement : block->Statements())
{
assert(statement->GetTreeList() != nullptr);
assert(statement->GetTreeList()->gtPrev == nullptr);
bool madeChanges = false;
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
// Skip over any removed blocks.
//
// Reset visited flags, in case we set any.
//
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->bbFlags &= ~BBF_VISITED;
}
//
Statement* const lastStmt = block->lastStmt();
- for (Statement* stmt = block->FirstNonPhiDef(); stmt != nullptr; stmt = stmt->GetNextStmt())
+ for (Statement* const stmt : block->NonPhiStatements())
{
GenTree* const tree = stmt->GetRootNode();
BasicBlock* const trueTarget = block->bbJumpDest;
BasicBlock* const falseTarget = block->bbNext;
- for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- BasicBlock* const predBlock = pred->getBlock();
numPreds++;
// Treat switch preds as ambiguous for now.
// flow directly by changing their jump targets to the appropriate successor,
// provided it's a permissable flow in our EH model.
//
- for (flowList* pred = block->bbPreds; pred != nullptr; pred = pred->flNext)
+ for (BasicBlock* const predBlock : block->PredBlocks())
{
- BasicBlock* const predBlock = pred->getBlock();
-
if (predBlock->bbJumpKind == BBJ_SWITCH)
{
// Skip over switch preds, they will continue to flow to block.
return true;
}
- for (BasicBlock* block = fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : Blocks())
{
block->bbFlags &= ~BBF_VISITED;
}
m_memorySsaMap[memoryKind] = nullptr;
}
- for (BasicBlock* blk = fgFirstBB; blk != nullptr; blk = blk->bbNext)
+ for (BasicBlock* const blk : Blocks())
{
// Eliminate phis.
for (MemoryKind memoryKind : allMemoryKinds())
}
}
- for (Statement* stmt : blk->Statements())
+ for (Statement* const stmt : blk->Statements())
{
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
if (tree->IsLocal())
{
tree->AsLclVarCommon()->SetSsaNum(SsaConfig::RESERVED_SSA_NUM);
- continue;
}
}
}
static GenTree* GetPhiNode(BasicBlock* block, unsigned lclNum)
{
// Walk the statements for phi nodes.
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
// A prefix of the statements of the block are phi definition nodes. If we complete processing
// that prefix, exit.
#ifdef DEBUG
unsigned seqNum = 1;
- for (GenTree* node = stmt->GetTreeList(); node != nullptr; node = node->gtNext)
+ for (GenTree* const node : stmt->TreeList())
{
node->gtSeqNum = seqNum++;
}
#ifdef DEBUG
unsigned seqNum = 1;
- for (GenTree* node = stmt->GetTreeList(); node != nullptr; node = node->gtNext)
+ for (GenTree* const node : stmt->TreeList())
{
node->gtSeqNum = seqNum++;
}
bool phiFound = false;
#endif
// A prefix of blocks statements will be SSA definitions. Search those for "lclNum".
- for (Statement* stmt : handler->Statements())
+ for (Statement* const stmt : handler->Statements())
{
// If the tree is not an SSA def, break out of the loop: we're done.
if (!stmt->IsPhiDefnStmt())
}
// Walk the statements of the block and rename definitions and uses.
- for (Statement* stmt : block->Statements())
+ for (Statement* const stmt : block->Statements())
{
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
if (tree->OperIs(GT_ASG))
{
for (BasicBlock* succ : block->GetAllSuccs(m_pCompiler))
{
// Walk the statements for phi nodes.
- for (Statement* stmt : succ->Statements())
+ for (Statement* const stmt : succ->Statements())
{
// A prefix of the statements of the block are phi definition nodes. If we complete processing
// that prefix, exit.
// For a filter, we consider the filter to be the "real" handler.
BasicBlock* handlerStart = succTry->ExFlowBlock();
- for (Statement* stmt : handlerStart->Statements())
+ for (Statement* const stmt : handlerStart->Statements())
{
GenTree* tree = stmt->GetRootNode();
// Initialize the memory ssa numbers for unreachable blocks. ValueNum expects
// memory ssa numbers to have some intitial value.
- for (BasicBlock* block = m_pCompiler->fgFirstBB; block; block = block->bbNext)
+ for (BasicBlock* const block : m_pCompiler->Blocks())
{
if (block->bbIDom == nullptr)
{
// tree is built. The pre/post order numbers that were generated previously and used for loop
// recognition are still being used by optPerformHoistExpr via fgCreateLoopPreHeader. That's rather
// odd, considering that SetupBBRoot may have added a new block.
- for (BasicBlock* block = m_pCompiler->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : m_pCompiler->Blocks())
{
block->bbIDom = nullptr;
block->bbPostOrderNum = 0;
//
PhaseStatus StackLevelSetter::DoPhase()
{
- for (BasicBlock* block = comp->fgFirstBB; block != nullptr; block = block->bbNext)
+ for (BasicBlock* const block : comp->Blocks())
{
ProcessBlock(block);
}
-#if !FEATURE_FIXED_OUT_ARGS
+#if !FEATURE_FIXED_OUT_ARGS
if (framePointerRequired)
{
comp->codeGen->setFramePointerRequired(true);
{
lvMemoryPerSsaData.GetSsaDefByIndex(i)->m_vnPair = noVnp;
}
- for (BasicBlock* blk = fgFirstBB; blk != nullptr; blk = blk->bbNext)
+ for (BasicBlock* const blk : Blocks())
{
- // Now iterate over the block's statements, and their trees.
- for (Statement* stmt : StatementList(blk->FirstNonPhiDef()))
+ for (Statement* const stmt : blk->NonPhiStatements())
{
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
tree->gtVNPair.SetBoth(ValueNumStore::NoVN);
}
}
#endif
- for (GenTree* tree = stmt->GetTreeList(); tree != nullptr; tree = tree->gtNext)
+ for (GenTree* const tree : stmt->TreeList())
{
fgValueNumberTree(tree);
}