1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
11 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
15 /*****************************************************************************/
18 /*****************************************************************************/
20 #include "vartype.h" // For "var_types.h"
21 #include "_typeinfo.h"
22 /*****************************************************************************/
30 #include "jithashtable.h"
32 /*****************************************************************************/
33 typedef BitVec EXPSET_TP;
40 typedef BitVec ASSERT_TP;
41 typedef BitVec_ValArg_T ASSERT_VALARG_TP;
42 typedef BitVec_ValRet_T ASSERT_VALRET_TP;
44 /*****************************************************************************
46 * Each basic block ends with a jump which is described as a value
47 * of the following enumeration.
52 enum BBjumpKinds : BYTE
54 BBJ_EHFINALLYRET,// block ends with 'endfinally' (for finally or fault)
55 BBJ_EHFILTERRET, // block ends with 'endfilter'
56 BBJ_EHCATCHRET, // block ends with a leave out of a catch (only #if FEATURE_EH_FUNCLETS)
57 BBJ_THROW, // block ends with 'throw'
58 BBJ_RETURN, // block ends with 'ret'
59 BBJ_NONE, // block flows into the next one (no jump)
60 BBJ_ALWAYS, // block always jumps to the target
61 BBJ_LEAVE, // block always jumps to the target, maybe out of guarded region. Only used until importing.
62 BBJ_CALLFINALLY, // block always calls the target finally
63 BBJ_COND, // block conditionally jumps to the target
64 BBJ_SWITCH, // block ends with a switch statement
76 struct BasicBlockList;
80 #if FEATURE_STACK_FP_X87
81 struct FlatFPStateX87;
84 /*****************************************************************************
86 * The following describes a switch block.
89 * 1. If bbsHasDefault is true, the default case is the last one in the array of basic block addresses
90 * namely bbsDstTab[bbsCount - 1].
91 * 2. bbsCount must be at least 1, for the default case. bbsCount cannot be zero. It appears that the ECMA spec
92 * allows for a degenerate switch with zero cases. Normally, the optimizer will optimize degenerate
93 * switches with just a default case to a BBJ_ALWAYS branch, and a switch with just two cases to a BBJ_COND.
94 * However, in debuggable code, we might not do that, so bbsCount might be 1.
98 unsigned bbsCount; // count of cases (includes 'default' if bbsHasDefault)
99 BasicBlock** bbsDstTab; // case label table address
102 BBswtDesc() : bbsHasDefault(true)
108 assert(bbsHasDefault);
109 assert(bbsCount > 0);
110 bbsHasDefault = false;
114 BasicBlock* getDefault()
116 assert(bbsHasDefault);
117 assert(bbsCount > 0);
118 return bbsDstTab[bbsCount - 1];
127 /*****************************************************************************/
131 TIS_Bottom, // We don't know anything about the 'this' pointer.
132 TIS_Uninit, // The 'this' pointer for this constructor is known to be uninitialized.
133 TIS_Init, // The 'this' pointer for this constructor is known to be initialized.
134 TIS_Top, // This results from merging the state of two blocks one with TIS_Unint and the other with TIS_Init.
135 // We use this in fault blocks to prevent us from accessing the 'this' pointer, but otherwise
136 // allowing the fault block to generate code.
141 ThisInitState thisInitialized; // used to track whether the this ptr is initialized.
142 unsigned esStackDepth; // size of esStack
143 StackEntry* esStack; // ptr to stack
146 // Enumeration of the kinds of memory whose state changes the compiler tracks
149 ByrefExposed = 0, // Includes anything byrefs can read/write (everything in GcHeap, address-taken locals,
150 // unmanaged heap, callers' locals, etc.)
151 GcHeap, // Includes actual GC heap, and also static fields
152 MemoryKindCount, // Number of MemoryKinds
155 const char* const memoryKindNames[] = {"ByrefExposed", "GcHeap"};
158 // Bitmask describing a set of memory kinds (usable in bitfields)
159 typedef unsigned int MemoryKindSet;
161 // Bitmask for a MemoryKindSet containing just the specified MemoryKind
162 inline MemoryKindSet memoryKindSet(MemoryKind memoryKind)
164 return (1U << memoryKind);
167 // Bitmask for a MemoryKindSet containing the specified MemoryKinds
168 template <typename... MemoryKinds>
169 inline MemoryKindSet memoryKindSet(MemoryKind memoryKind, MemoryKinds... memoryKinds)
171 return memoryKindSet(memoryKind) | memoryKindSet(memoryKinds...);
174 // Bitmask containing all the MemoryKinds
175 const MemoryKindSet fullMemoryKindSet = (1 << MemoryKindCount) - 1;
177 // Bitmask containing no MemoryKinds
178 const MemoryKindSet emptyMemoryKindSet = 0;
180 // Standard iterator class for iterating through MemoryKinds
181 class MemoryKindIterator
186 explicit inline MemoryKindIterator(int val) : value(val)
189 inline MemoryKindIterator& operator++()
194 inline MemoryKindIterator operator++(int)
196 return MemoryKindIterator(value++);
198 inline MemoryKind operator*()
200 return static_cast<MemoryKind>(value);
202 friend bool operator==(const MemoryKindIterator& left, const MemoryKindIterator& right)
204 return left.value == right.value;
206 friend bool operator!=(const MemoryKindIterator& left, const MemoryKindIterator& right)
208 return left.value != right.value;
212 // Empty struct that allows enumerating memory kinds via `for(MemoryKind kind : allMemoryKinds())`
213 struct allMemoryKinds
215 inline allMemoryKinds()
218 inline MemoryKindIterator begin()
220 return MemoryKindIterator(0);
222 inline MemoryKindIterator end()
224 return MemoryKindIterator(MemoryKindCount);
228 // This encapsulates the "exception handling" successors of a block. That is,
229 // if a basic block BB1 occurs in a try block, we consider the first basic block
230 // BB2 of the corresponding handler to be an "EH successor" of BB1. Because we
231 // make the conservative assumption that control flow can jump from a try block
232 // to its handler at any time, the immediate (regular control flow)
233 // predecessor(s) of the the first block of a try block are also considered to
234 // have the first block of the handler as an EH successor. This makes variables that
235 // are "live-in" to the handler become "live-out" for these try-predecessor block,
236 // so that they become live-in to the try -- which we require.
237 class EHSuccessorIter
239 // The current compilation.
242 // The block whose EH successors we are iterating over.
245 // The current "regular" successor of "m_block" that we're considering.
246 BasicBlock* m_curRegSucc;
248 // The current try block. If non-null, then the current successor "m_curRegSucc"
249 // is the first block of the handler of this block. While this try block has
250 // enclosing try's that also start with "m_curRegSucc", the corresponding handlers will be
251 // further EH successors.
254 // The number of "regular" (i.e., non-exceptional) successors that remain to
255 // be considered. If BB1 has successor BB2, and BB2 is the first block of a
256 // try block, then we consider the catch block of BB2's try to be an EH
257 // successor of BB1. This captures the iteration over the successors of BB1
258 // for this purpose. (In reverse order; we're done when this field is 0).
259 int m_remainingRegSuccs;
261 // Requires that "m_curTry" is NULL. Determines whether there is, as
262 // discussed just above, a regular successor that's the first block of a
263 // try; if so, sets "m_curTry" to that try block. (As noted above, selecting
264 // the try containing the current regular successor as the "current try" may cause
265 // multiple first-blocks of catches to be yielded as EH successors: trys enclosing
266 // the current try are also included if they also start with the current EH successor.)
267 void FindNextRegSuccTry();
270 // Returns the standard "end" iterator.
272 : m_comp(nullptr), m_block(nullptr), m_curRegSucc(nullptr), m_curTry(nullptr), m_remainingRegSuccs(0)
276 // Initializes the iterator to represent the EH successors of "block".
277 EHSuccessorIter(Compiler* comp, BasicBlock* block);
279 // Go on to the next EH successor.
280 void operator++(void);
282 // Requires that "this" is not equal to the standard "end" iterator. Returns the
283 // current EH successor.
284 BasicBlock* operator*();
286 // Returns "true" iff "*this" is equal to "ehsi" -- ignoring the "m_comp"
287 // and "m_block" fields.
288 bool operator==(const EHSuccessorIter& ehsi)
290 // Ignore the compiler; we'll assume that's the same.
291 return m_curTry == ehsi.m_curTry && m_remainingRegSuccs == ehsi.m_remainingRegSuccs;
294 bool operator!=(const EHSuccessorIter& ehsi)
296 return !((*this) == ehsi);
300 // Yields both normal and EH successors (in that order) in one iteration.
301 class AllSuccessorIter
303 // Normal succ state.
307 unsigned m_numNormSuccs;
308 EHSuccessorIter m_ehIter;
310 // True iff m_blk is a BBJ_CALLFINALLY block, and the current try block of m_ehIter,
311 // the first block of whose handler would be next yielded, is the jump target of m_blk.
312 inline bool CurTryIsBlkCallFinallyTarget();
315 inline AllSuccessorIter()
319 // Initializes "this" to iterate over all successors of "block."
320 inline AllSuccessorIter(Compiler* comp, BasicBlock* block);
322 // Used for constructing an appropriate "end" iter. Should be called with
323 // the number of normal successors of the block being iterated.
324 AllSuccessorIter(unsigned numSuccs) : m_normSucc(numSuccs), m_numNormSuccs(numSuccs), m_ehIter()
328 // Go on to the next successor.
329 inline void operator++(void);
331 // Requires that "this" is not equal to the standard "end" iterator. Returns the
332 // current successor.
333 inline BasicBlock* operator*();
335 // Returns "true" iff "*this" is equal to "asi" -- ignoring the "m_comp"
336 // and "m_block" fields.
337 bool operator==(const AllSuccessorIter& asi)
339 return m_normSucc == asi.m_normSucc && m_ehIter == asi.m_ehIter;
342 bool operator!=(const AllSuccessorIter& asi)
344 return !((*this) == asi);
348 //------------------------------------------------------------------------
349 // BasicBlock: describes a basic block in the flowgraph.
351 // Note that this type derives from LIR::Range in order to make the LIR
352 // utilities that are polymorphic over basic block and scratch ranges
353 // faster and simpler.
355 struct BasicBlock : private LIR::Range
359 BasicBlock* bbNext; // next BB in ascending PC offset order
362 void setNext(BasicBlock* next)
371 unsigned __int64 bbFlags; // see BBF_xxxx below
373 unsigned bbNum; // the block's number
375 unsigned bbPostOrderNum; // the block's post order number in the graph.
376 unsigned bbRefs; // number of blocks that can reach here, either by fall-through or a branch. If this falls to zero,
377 // the block is unreachable.
381 #define BBF_VISITED 0x00000001 // BB visited during optimizations
382 #define BBF_MARKED 0x00000002 // BB marked during optimizations
383 #define BBF_CHANGED 0x00000004 // input/output of this block has changed
384 #define BBF_REMOVED 0x00000008 // BB has been removed from bb-list
386 #define BBF_DONT_REMOVE 0x00000010 // BB should not be removed during flow graph optimizations
387 #define BBF_IMPORTED 0x00000020 // BB byte-code has been imported
388 #define BBF_INTERNAL 0x00000040 // BB has been added by the compiler
389 #define BBF_FAILED_VERIFICATION 0x00000080 // BB has verification exception
391 #define BBF_TRY_BEG 0x00000100 // BB starts a 'try' block
392 #define BBF_FUNCLET_BEG 0x00000200 // BB is the beginning of a funclet
393 #define BBF_HAS_NULLCHECK 0x00000400 // BB contains a null check
394 #define BBF_NEEDS_GCPOLL 0x00000800 // This BB is the source of a back edge and needs a GC Poll
396 #define BBF_RUN_RARELY 0x00001000 // BB is rarely run (catch clauses, blocks with throws etc)
397 #define BBF_LOOP_HEAD 0x00002000 // BB is the head of a loop
398 #define BBF_LOOP_CALL0 0x00004000 // BB starts a loop that sometimes won't call
399 #define BBF_LOOP_CALL1 0x00008000 // BB starts a loop that will always call
401 #define BBF_HAS_LABEL 0x00010000 // BB needs a label
402 #define BBF_JMP_TARGET 0x00020000 // BB is a target of an implicit/explicit jump
403 #define BBF_HAS_JMP 0x00040000 // BB executes a JMP instruction (instead of return)
404 #define BBF_GC_SAFE_POINT 0x00080000 // BB has a GC safe point (a call). More abstractly, BB does not require a
405 // (further) poll -- this may be because this BB has a call, or, in some
406 // cases, because the BB occurs in a loop, and we've determined that all
407 // paths in the loop body leading to BB include a call.
409 #define BBF_HAS_VTABREF 0x00100000 // BB contains reference of vtable
410 #define BBF_HAS_IDX_LEN 0x00200000 // BB contains simple index or length expressions on an array local var.
411 #define BBF_HAS_NEWARRAY 0x00400000 // BB contains 'new' of an array
412 #define BBF_HAS_NEWOBJ 0x00800000 // BB contains 'new' of an object type.
414 #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
416 #define BBF_FINALLY_TARGET 0x01000000 // BB is the target of a finally return: where a finally will return during
417 // non-exceptional flow. Because the ARM calling sequence for calling a
418 // finally explicitly sets the return address to the finally target and jumps
419 // to the finally, instead of using a call instruction, ARM needs this to
420 // generate correct code at the finally target, to allow for proper stack
421 // unwind from within a non-exceptional call to a finally.
423 #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
425 #define BBF_BACKWARD_JUMP 0x02000000 // BB is surrounded by a backward jump/switch arc
426 #define BBF_RETLESS_CALL 0x04000000 // BBJ_CALLFINALLY that will never return (and therefore, won't need a paired
427 // BBJ_ALWAYS); see isBBCallAlwaysPair().
428 #define BBF_LOOP_PREHEADER 0x08000000 // BB is a loop preheader block
430 #define BBF_COLD 0x10000000 // BB is cold
431 #define BBF_PROF_WEIGHT 0x20000000 // BB weight is computed from profile data
433 #ifdef LEGACY_BACKEND
435 #define BBF_FORWARD_SWITCH 0x40000000 // Aux flag used in FP codegen to know if a jmptable entry has been forwarded
437 #else // !LEGACY_BACKEND
439 #define BBF_IS_LIR 0x40000000 // Set if the basic block contains LIR (as opposed to HIR)
441 #endif // LEGACY_BACKEND
443 #define BBF_KEEP_BBJ_ALWAYS 0x80000000 // A special BBJ_ALWAYS block, used by EH code generation. Keep the jump kind
444 // as BBJ_ALWAYS. Used for the paired BBJ_ALWAYS block following the
445 // BBJ_CALLFINALLY block, as well as, on x86, the final step block out of a
448 #define BBF_CLONED_FINALLY_BEGIN 0x100000000 // First block of a cloned finally region
449 #define BBF_CLONED_FINALLY_END 0x200000000 // Last block of a cloned finally region
453 #define BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY 0x400000000 // Block is dominated by exceptional entry.
455 // Flags that relate blocks to loop structure.
457 #define BBF_LOOP_FLAGS (BBF_LOOP_PREHEADER | BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1)
459 bool isRunRarely() const
461 return ((bbFlags & BBF_RUN_RARELY) != 0);
463 bool isLoopHead() const
465 return ((bbFlags & BBF_LOOP_HEAD) != 0);
468 // Flags to update when two blocks are compacted
470 #define BBF_COMPACT_UPD \
471 (BBF_CHANGED | BBF_GC_SAFE_POINT | BBF_HAS_JMP | BBF_NEEDS_GCPOLL | BBF_HAS_IDX_LEN | BBF_BACKWARD_JUMP | \
472 BBF_HAS_NEWARRAY | BBF_HAS_NEWOBJ)
474 // Flags a block should not have had before it is split.
476 #ifdef LEGACY_BACKEND
477 #define BBF_SPLIT_NONEXIST \
478 (BBF_CHANGED | BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1 | BBF_RETLESS_CALL | BBF_LOOP_PREHEADER | \
479 BBF_COLD | BBF_FORWARD_SWITCH)
480 #else // !LEGACY_BACKEND
481 #define BBF_SPLIT_NONEXIST \
482 (BBF_CHANGED | BBF_LOOP_HEAD | BBF_LOOP_CALL0 | BBF_LOOP_CALL1 | BBF_RETLESS_CALL | BBF_LOOP_PREHEADER | BBF_COLD)
483 #endif // LEGACY_BACKEND
485 // Flags lost by the top block when a block is split.
486 // Note, this is a conservative guess.
487 // For example, the top block might or might not have BBF_GC_SAFE_POINT,
488 // but we assume it does not have BBF_GC_SAFE_POINT any more.
490 #define BBF_SPLIT_LOST (BBF_GC_SAFE_POINT | BBF_HAS_JMP | BBF_KEEP_BBJ_ALWAYS | BBF_CLONED_FINALLY_END)
492 // Flags gained by the bottom block when a block is split.
493 // Note, this is a conservative guess.
494 // For example, the bottom block might or might not have BBF_HAS_NEWARRAY,
495 // but we assume it has BBF_HAS_NEWARRAY.
497 // TODO: Should BBF_RUN_RARELY be added to BBF_SPLIT_GAINED ?
499 #define BBF_SPLIT_GAINED \
500 (BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_HAS_JMP | BBF_BACKWARD_JUMP | BBF_HAS_IDX_LEN | BBF_HAS_NEWARRAY | \
501 BBF_PROF_WEIGHT | BBF_HAS_NEWOBJ | BBF_KEEP_BBJ_ALWAYS | BBF_CLONED_FINALLY_END)
503 #ifndef __GNUC__ // GCC doesn't like C_ASSERT at global scope
504 static_assert_no_msg((BBF_SPLIT_NONEXIST & BBF_SPLIT_LOST) == 0);
505 static_assert_no_msg((BBF_SPLIT_NONEXIST & BBF_SPLIT_GAINED) == 0);
509 void dspFlags(); // Print the flags
510 unsigned dspCheapPreds(); // Print the predecessors (bbCheapPreds)
511 unsigned dspPreds(); // Print the predecessors (bbPreds)
512 unsigned dspSuccs(Compiler* compiler); // Print the successors. The 'compiler' argument determines whether EH
513 // regions are printed: see NumSucc() for details.
514 void dspJumpKind(); // Print the block jump kind (e.g., BBJ_NONE, BBJ_COND, etc.).
515 void dspBlockHeader(Compiler* compiler,
516 bool showKind = true,
517 bool showFlags = false,
518 bool showPreds = true); // Print a simple basic block header for various output, including a
519 // list of predecessors and successors.
520 const char* dspToString(int blockNumPadding = 0);
523 typedef unsigned weight_t; // Type used to hold block and edge weights
524 // Note that for CLR v2.0 and earlier our
525 // block weights were stored using unsigned shorts
527 #define BB_UNITY_WEIGHT 100 // how much a normal execute once block weights
528 #define BB_LOOP_WEIGHT 8 // how much more loops are weighted
529 #define BB_ZERO_WEIGHT 0
530 #define BB_MAX_WEIGHT ULONG_MAX // we're using an 'unsigned' for the weight
531 #define BB_VERY_HOT_WEIGHT 256 // how many average hits a BB has (per BBT scenario run) for this block
532 // to be considered as very hot
534 weight_t bbWeight; // The dynamic execution weight of this block
536 // getCalledCount -- get the value used to normalize weights for this method
537 weight_t getCalledCount(Compiler* comp);
539 // getBBWeight -- get the normalized weight of this block
540 weight_t getBBWeight(Compiler* comp);
542 // hasProfileWeight -- Returns true if this block's weight came from profile data
543 bool hasProfileWeight() const
545 return ((this->bbFlags & BBF_PROF_WEIGHT) != 0);
548 // setBBWeight -- if the block weight is not derived from a profile,
549 // then set the weight to the input weight, making sure to not overflow BB_MAX_WEIGHT
550 // Note to set the weight from profile data, instead use setBBProfileWeight
551 void setBBWeight(weight_t weight)
553 if (!hasProfileWeight())
555 this->bbWeight = min(weight, BB_MAX_WEIGHT);
559 // setBBProfileWeight -- Set the profile-derived weight for a basic block
560 void setBBProfileWeight(unsigned weight)
562 this->bbFlags |= BBF_PROF_WEIGHT;
563 this->bbWeight = weight;
566 // modifyBBWeight -- same as setBBWeight, but also make sure that if the block is rarely run, it stays that
567 // way, and if it's not rarely run then its weight never drops below 1.
568 void modifyBBWeight(weight_t weight)
570 if (this->bbWeight != BB_ZERO_WEIGHT)
572 setBBWeight(max(weight, 1));
576 // this block will inherit the same weight and relevant bbFlags as bSrc
577 void inheritWeight(BasicBlock* bSrc)
579 this->bbWeight = bSrc->bbWeight;
581 if (bSrc->hasProfileWeight())
583 this->bbFlags |= BBF_PROF_WEIGHT;
587 this->bbFlags &= ~BBF_PROF_WEIGHT;
590 if (this->bbWeight == 0)
592 this->bbFlags |= BBF_RUN_RARELY;
596 this->bbFlags &= ~BBF_RUN_RARELY;
600 // Similar to inheritWeight(), but we're splitting a block (such as creating blocks for qmark removal).
601 // So, specify a percentage (0 to 99; if it's 100, just use inheritWeight()) of the weight that we're
602 // going to inherit. Since the number isn't exact, clear the BBF_PROF_WEIGHT flag.
603 void inheritWeightPercentage(BasicBlock* bSrc, unsigned percentage)
605 assert(0 <= percentage && percentage < 100);
607 // Check for overflow
608 if (bSrc->bbWeight * 100 <= bSrc->bbWeight)
610 this->bbWeight = bSrc->bbWeight;
614 this->bbWeight = bSrc->bbWeight * percentage / 100;
617 this->bbFlags &= ~BBF_PROF_WEIGHT;
619 if (this->bbWeight == 0)
621 this->bbFlags |= BBF_RUN_RARELY;
625 this->bbFlags &= ~BBF_RUN_RARELY;
630 // This is used to override any profiling data
631 // and force a block to be in the hot region.
632 // We only call this method for handler entry point
633 // and only when HANDLER_ENTRY_MUST_BE_IN_HOT_SECTION is 1.
634 // Doing this helps fgReorderBlocks() by telling
635 // it to try to move these blocks into the hot region.
636 // Note that we do this strictly as an optimization,
637 // not for correctness. fgDetermineFirstColdBlock()
638 // will find all handler entry points and ensure that
639 // for now we don't place them in the cold section.
643 if (this->bbWeight == BB_ZERO_WEIGHT)
645 this->bbFlags &= ~BBF_RUN_RARELY; // Clear any RarelyRun flag
646 this->bbFlags &= ~BBF_PROF_WEIGHT; // Clear any profile-derived flag
653 return (bbWeight == BB_MAX_WEIGHT);
656 // Returns "true" if the block is empty. Empty here means there are no statement
657 // trees *except* PHI definitions.
660 // Returns "true" iff "this" is the first block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair --
661 // a block corresponding to an exit from the try of a try/finally. In the flow graph,
662 // this becomes a block that calls the finally, and a second, immediately
663 // following empty block (in the bbNext chain) to which the finally will return, and which
664 // branches unconditionally to the next block to be executed outside the try/finally.
665 // Note that code is often generated differently than this description. For example, on ARM,
666 // the target of the BBJ_ALWAYS is loaded in LR (the return register), and a direct jump is
667 // made to the 'finally'. The effect is that the 'finally' returns directly to the target of
668 // the BBJ_ALWAYS. A "retless" BBJ_CALLFINALLY is one that has no corresponding BBJ_ALWAYS.
669 // This can happen if the finally is known to not return (e.g., it contains a 'throw'). In
670 // that case, the BBJ_CALLFINALLY flags has BBF_RETLESS_CALL set. Note that ARM never has
671 // "retless" BBJ_CALLFINALLY blocks due to a requirement to use the BBJ_ALWAYS for
673 bool isBBCallAlwaysPair()
675 #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
676 if (this->bbJumpKind == BBJ_CALLFINALLY)
678 if ((this->bbJumpKind == BBJ_CALLFINALLY) && !(this->bbFlags & BBF_RETLESS_CALL))
681 #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
682 // On ARM, there are no retless BBJ_CALLFINALLY.
683 assert(!(this->bbFlags & BBF_RETLESS_CALL));
685 // Some asserts that the next block is a BBJ_ALWAYS of the proper form.
686 assert(this->bbNext != nullptr);
687 assert(this->bbNext->bbJumpKind == BBJ_ALWAYS);
688 assert(this->bbNext->bbFlags & BBF_KEEP_BBJ_ALWAYS);
689 assert(this->bbNext->isEmpty());
699 BBjumpKinds bbJumpKind; // jump (if any) at the end of this block
701 /* The following union describes the jump target(s) of this block */
703 unsigned bbJumpOffs; // PC offset (temporary only)
704 BasicBlock* bbJumpDest; // basic block
705 BBswtDesc* bbJumpSwt; // switch descriptor
708 // NumSucc() gives the number of successors, and GetSucc() returns a given numbered successor.
710 // There are two versions of these functions: ones that take a Compiler* and ones that don't. You must
711 // always use a matching set. Thus, if you call NumSucc() without a Compiler*, you must also call
712 // GetSucc() without a Compiler*.
714 // The behavior of NumSucc()/GetSucc() is different when passed a Compiler* for blocks that end in:
715 // (1) BBJ_EHFINALLYRET (a return from a finally or fault block)
716 // (2) BBJ_EHFILTERRET (a return from EH filter block)
719 // For BBJ_EHFINALLYRET, if no Compiler* is passed, then the block is considered to have no
720 // successor. If Compiler* is passed, we figure out the actual successors. Some cases will want one behavior,
721 // other cases the other. For example, IL verification requires that these blocks end in an empty operand
722 // stack, and since the dataflow analysis of IL verification is concerned only with the contents of the
723 // operand stack, we can consider the finally block to have no successors. But a more general dataflow
724 // analysis that is tracking the contents of local variables might want to consider *all* successors,
725 // and would pass the current Compiler object.
727 // Similarly, BBJ_EHFILTERRET blocks are assumed to have no successors if Compiler* is not passed; if
728 // Compiler* is passed, NumSucc/GetSucc yields the first block of the try block's handler.
730 // For BBJ_SWITCH, if Compiler* is not passed, then all switch successors are returned. If Compiler*
731 // is passed, then only unique switch successors are returned; the duplicate successors are omitted.
733 // Note that for BBJ_COND, which has two successors (fall through and condition true branch target),
734 // only the unique targets are returned. Thus, if both targets are the same, NumSucc() will only return 1
737 // NumSucc: Returns the number of successors of "this".
739 unsigned NumSucc(Compiler* comp);
741 // GetSucc: Returns the "i"th successor. Requires (0 <= i < NumSucc()).
742 BasicBlock* GetSucc(unsigned i);
743 BasicBlock* GetSucc(unsigned i, Compiler* comp);
745 BasicBlock* GetUniquePred(Compiler* comp);
747 BasicBlock* GetUniqueSucc();
749 unsigned countOfInEdges() const
754 __declspec(property(get = getBBTreeList, put = setBBTreeList)) GenTree* bbTreeList; // the body of the block.
756 GenTree* getBBTreeList() const
761 void setBBTreeList(GenTree* tree)
766 EntryState* bbEntryState; // verifier tracked state of all entries in stack.
768 #define NO_BASE_TMP UINT_MAX // base# to use when we have none
769 unsigned bbStkTempsIn; // base# for input stack temps
770 unsigned bbStkTempsOut; // base# for output stack temps
772 #define MAX_XCPTN_INDEX (USHRT_MAX - 1)
774 // It would be nice to make bbTryIndex and bbHndIndex private, but there is still code that uses them directly,
775 // especially Compiler::fgNewBBinRegion() and friends.
777 // index, into the compHndBBtab table, of innermost 'try' clause containing the BB (used for raising exceptions).
778 // Stored as index + 1; 0 means "no try index".
779 unsigned short bbTryIndex;
781 // index, into the compHndBBtab table, of innermost handler (filter, catch, fault/finally) containing the BB.
782 // Stored as index + 1; 0 means "no handler index".
783 unsigned short bbHndIndex;
785 // Given two EH indices that are either bbTryIndex or bbHndIndex (or related), determine if index1 might be more
786 // deeply nested than index2. Both index1 and index2 are in the range [0..compHndBBtabCount], where 0 means
787 // "main function" and otherwise the value is an index into compHndBBtab[]. Note that "sibling" EH regions will
788 // have a numeric index relationship that doesn't indicate nesting, whereas a more deeply nested region must have
789 // a lower index than the region it is nested within. Note that if you compare a single block's bbTryIndex and
790 // bbHndIndex, there is guaranteed to be a nesting relationship, since that block can't be simultaneously in two
791 // sibling EH regions. In that case, "maybe" is actually "definitely".
792 static bool ehIndexMaybeMoreNested(unsigned index1, unsigned index2)
796 // index1 is in the main method. It can't be more deeply nested than index2.
799 else if (index2 == 0)
801 // index1 represents an EH region, whereas index2 is the main method. Thus, index1 is more deeply nested.
807 // If index1 has a smaller index, it might be more deeply nested than index2.
810 return index1 < index2;
814 // catch type: class token of handler, or one of BBCT_*. Only set on first block of catch handler.
817 bool hasTryIndex() const
819 return bbTryIndex != 0;
821 bool hasHndIndex() const
823 return bbHndIndex != 0;
825 unsigned getTryIndex() const
827 assert(bbTryIndex != 0);
828 return bbTryIndex - 1;
830 unsigned getHndIndex() const
832 assert(bbHndIndex != 0);
833 return bbHndIndex - 1;
835 void setTryIndex(unsigned val)
837 bbTryIndex = (unsigned short)(val + 1);
838 assert(bbTryIndex != 0);
840 void setHndIndex(unsigned val)
842 bbHndIndex = (unsigned short)(val + 1);
843 assert(bbHndIndex != 0);
854 void copyEHRegion(const BasicBlock* from)
856 bbTryIndex = from->bbTryIndex;
857 bbHndIndex = from->bbHndIndex;
860 static bool sameTryRegion(const BasicBlock* blk1, const BasicBlock* blk2)
862 return blk1->bbTryIndex == blk2->bbTryIndex;
864 static bool sameHndRegion(const BasicBlock* blk1, const BasicBlock* blk2)
866 return blk1->bbHndIndex == blk2->bbHndIndex;
868 static bool sameEHRegion(const BasicBlock* blk1, const BasicBlock* blk2)
870 return sameTryRegion(blk1, blk2) && sameHndRegion(blk1, blk2);
873 // Some non-zero value that will not collide with real tokens for bbCatchTyp
874 #define BBCT_NONE 0x00000000
875 #define BBCT_FAULT 0xFFFFFFFC
876 #define BBCT_FINALLY 0xFFFFFFFD
877 #define BBCT_FILTER 0xFFFFFFFE
878 #define BBCT_FILTER_HANDLER 0xFFFFFFFF
879 #define handlerGetsXcptnObj(hndTyp) ((hndTyp) != BBCT_NONE && (hndTyp) != BBCT_FAULT && (hndTyp) != BBCT_FINALLY)
881 // TODO-Cleanup: Get rid of bbStkDepth and use bbStackDepthOnEntry() instead
883 unsigned short bbStkDepth; // stack depth on entry
884 unsigned short bbFPinVars; // number of inner enregistered FP vars
887 // Basic block predecessor lists. Early in compilation, some phases might need to compute "cheap" predecessor
888 // lists. These are stored in bbCheapPreds, computed by fgComputeCheapPreds(). If bbCheapPreds is valid,
889 // 'fgCheapPredsValid' will be 'true'. Later, the "full" predecessor lists are created by fgComputePreds(), stored
890 // in 'bbPreds', and then maintained throughout compilation. 'fgComputePredsDone' will be 'true' after the
891 // full predecessor lists are created. See the comment at fgComputeCheapPreds() to see how those differ from
892 // the "full" variant.
894 BasicBlockList* bbCheapPreds; // ptr to list of cheap predecessors (used before normal preds are computed)
895 flowList* bbPreds; // ptr to list of predecessors
898 BlockSet bbReach; // Set of all blocks that can reach this one
899 BasicBlock* bbIDom; // Represent the closest dominator to this block (called the Immediate
900 // Dominator) used to compute the dominance tree.
901 unsigned bbDfsNum; // The index of this block in DFS reverse post order
902 // relative to the flow graph.
904 IL_OFFSET bbCodeOffs; // IL offset of the beginning of the block
905 IL_OFFSET bbCodeOffsEnd; // IL offset past the end of the block. Thus, the [bbCodeOffs..bbCodeOffsEnd)
906 // range is not inclusive of the end offset. The count of IL bytes in the block
907 // is bbCodeOffsEnd - bbCodeOffs, assuming neither are BAD_IL_OFFSET.
910 void dspBlockILRange(); // Display the block's IL range as [XXX...YYY), where XXX and YYY might be "???" for
914 VARSET_TP bbVarUse; // variables used by block (before an assignment)
915 VARSET_TP bbVarDef; // variables assigned by block (before a use)
917 VARSET_TP bbLiveIn; // variables live on entry
918 VARSET_TP bbLiveOut; // variables live on exit
920 // Use, def, live in/out information for the implicit memory variable.
921 MemoryKindSet bbMemoryUse : MemoryKindCount; // must be set for any MemoryKinds this block references
922 MemoryKindSet bbMemoryDef : MemoryKindCount; // must be set for any MemoryKinds this block mutates
923 MemoryKindSet bbMemoryLiveIn : MemoryKindCount;
924 MemoryKindSet bbMemoryLiveOut : MemoryKindCount;
925 MemoryKindSet bbMemoryHavoc : MemoryKindCount; // If true, at some point the block does an operation
926 // that leaves memory in an unknown state. (E.g.,
927 // unanalyzed call, store through unknown pointer...)
929 // We want to make phi functions for the special implicit var memory. But since this is not a real
930 // lclVar, and thus has no local #, we can't use a GenTreePhiArg. Instead, we use this struct.
933 unsigned m_ssaNum; // SSA# for incoming value.
934 MemoryPhiArg* m_nextArg; // Next arg in the list, else NULL.
941 MemoryPhiArg(unsigned ssaNum, MemoryPhiArg* nextArg = nullptr) : m_ssaNum(ssaNum), m_nextArg(nextArg)
945 void* operator new(size_t sz, class Compiler* comp);
947 static MemoryPhiArg* EmptyMemoryPhiDef; // Special value (0x1, FWIW) to represent a to-be-filled in Phi arg list
949 MemoryPhiArg* bbMemorySsaPhiFunc[MemoryKindCount]; // If the "in" Heap SSA var is not a phi definition, this value
951 // Otherwise, it is either the special value EmptyMemoryPhiDefn, to indicate
952 // that Heap needs a phi definition on entry, or else it is the linked list
953 // of the phi arguments.
954 unsigned bbMemorySsaNumIn[MemoryKindCount]; // The SSA # of memory on entry to the block.
955 unsigned bbMemorySsaNumOut[MemoryKindCount]; // The SSA # of memory on exit from the block.
957 VARSET_TP bbScope; // variables in scope over the block
959 void InitVarSets(class Compiler* comp);
961 /* The following are the standard bit sets for dataflow analysis.
962 * We perform CSE and range-checks at the same time
963 * and assertion propagation separately,
964 * thus we can union them since the two operations are completely disjunct.
968 EXPSET_TP bbCseGen; // CSEs computed by block
970 ASSERT_TP bbAssertionGen; // value assignments computed by block
975 EXPSET_TP bbCseIn; // CSEs available on entry
977 ASSERT_TP bbAssertionIn; // value assignments available on entry
982 EXPSET_TP bbCseOut; // CSEs available on exit
984 ASSERT_TP bbAssertionOut; // value assignments available on exit
990 #if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
991 void* bbUnwindNopEmitCookie;
992 #endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
995 stackDesc bbStackIn; // stack descriptor for input
996 stackDesc bbStackOut; // stack descriptor for output
998 verTypeVal* bbTypesIn; // list of variable types on input
999 verTypeVal* bbTypesOut; // list of variable types on output
1002 #if FEATURE_STACK_FP_X87
1003 FlatFPStateX87* bbFPStateX87; // State of FP stack on entry to the basic block
1004 #endif // FEATURE_STACK_FP_X87
1006 /* The following fields used for loop detection */
1008 typedef unsigned char loopNumber;
1009 static const unsigned NOT_IN_LOOP = UCHAR_MAX;
1012 // This is the label a loop gets as part of the second, reachability-based
1013 // loop discovery mechanism. This is apparently only used for debugging.
1014 // We hope we'll eventually just have one loop-discovery mechanism, and this will go away.
1015 loopNumber bbLoopNum; // set to 'n' for a loop #n header
1018 loopNumber bbNatLoopNum; // Index, in optLoopTable, of most-nested loop that contains this block,
1019 // or else NOT_IN_LOOP if this block is not in a loop.
1021 #define MAX_LOOP_NUM 16 // we're using a 'short' for the mask
1022 #define LOOP_MASK_TP unsigned // must be big enough for a mask
1024 //-------------------------------------------------------------------------
1026 #if MEASURE_BLOCK_SIZE
1027 static size_t s_Size;
1028 static size_t s_Count;
1029 #endif // MEASURE_BLOCK_SIZE
1031 bool bbFallsThrough();
1033 // Our slop fraction is 1/128 of the block weight rounded off
1034 static weight_t GetSlopFraction(weight_t weightBlk)
1036 return ((weightBlk + 64) / 128);
1039 // Given an the edge b1 -> b2, calculate the slop fraction by
1040 // using the higher of the two block weights
1041 static weight_t GetSlopFraction(BasicBlock* b1, BasicBlock* b2)
1043 return GetSlopFraction(max(b1->bbWeight, b2->bbWeight));
1047 unsigned bbTgtStkDepth; // Native stack depth on entry (for throw-blocks)
1048 static unsigned s_nMaxTrees; // The max # of tree nodes in any BB
1050 unsigned bbStmtNum; // The statement number of the first stmt in this block
1052 // This is used in integrity checks. We semi-randomly pick a traversal stamp, label all blocks
1053 // in the BB list with that stamp (in this field); then we can tell if (e.g.) predecessors are
1054 // still in the BB list by whether they have the same stamp (with high probability).
1055 unsigned bbTraversalStamp;
1059 ThisInitState bbThisOnEntry();
1060 unsigned bbStackDepthOnEntry();
1061 void bbSetStack(void* stackBuffer);
1062 StackEntry* bbStackOnEntry();
1063 void bbSetRunRarely();
1065 // "bbNum" is one-based (for unknown reasons); it is sometimes useful to have the corresponding
1066 // zero-based number for use as an array index.
1073 GenTreeStmt* firstStmt() const;
1074 GenTreeStmt* lastStmt() const;
1076 GenTree* firstNode();
1077 GenTree* lastNode();
1079 bool endsWithJmpMethod(Compiler* comp);
1081 bool endsWithTailCall(Compiler* comp,
1082 bool fastTailCallsOnly,
1083 bool tailCallsConvertibleToLoopOnly,
1084 GenTree** tailCall);
1086 bool endsWithTailCallOrJmp(Compiler* comp, bool fastTailCallsOnly = false);
1088 bool endsWithTailCallConvertibleToLoop(Compiler* comp, GenTree** tailCall);
1090 // Returns the first statement in the statement list of "this" that is
1091 // not an SSA definition (a lcl = phi(...) assignment).
1092 GenTreeStmt* FirstNonPhiDef();
1093 GenTree* FirstNonPhiDefOrCatchArgAsg();
1095 BasicBlock() : bbLiveIn(VarSetOps::UninitVal()), bbLiveOut(VarSetOps::UninitVal())
1100 EHSuccessorIter StartEHSuccs(Compiler* comp)
1102 return EHSuccessorIter(comp, this);
1104 EHSuccessorIter EndEHSuccs()
1106 return EHSuccessorIter();
1109 friend struct EHSuccs;
1111 AllSuccessorIter StartAllSuccs(Compiler* comp)
1113 return AllSuccessorIter(comp, this);
1115 AllSuccessorIter EndAllSuccs(Compiler* comp)
1117 return AllSuccessorIter(NumSucc(comp));
1120 friend struct AllSuccs;
1123 // Iteratable collection of the EH successors of a block.
1127 BasicBlock* m_block;
1130 EHSuccs(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block)
1134 EHSuccessorIter begin()
1136 return m_block->StartEHSuccs(m_comp);
1138 EHSuccessorIter end()
1140 return EHSuccessorIter();
1144 EHSuccs GetEHSuccs(Compiler* comp)
1146 return EHSuccs(comp, this);
1152 BasicBlock* m_block;
1155 AllSuccs(Compiler* comp, BasicBlock* block) : m_comp(comp), m_block(block)
1159 AllSuccessorIter begin()
1161 return m_block->StartAllSuccs(m_comp);
1163 AllSuccessorIter end()
1165 return AllSuccessorIter(m_block->NumSucc(m_comp));
1169 AllSuccs GetAllSuccs(Compiler* comp)
1171 return AllSuccs(comp, this);
1174 // Try to clone block state and statements from `from` block to `to` block (which must be new/empty),
1175 // optionally replacing uses of local `varNum` with IntCns `varVal`. Return true if all statements
1176 // in the block are cloned successfully, false (with partially-populated `to` block) if one fails.
1177 static bool CloneBlockState(
1178 Compiler* compiler, BasicBlock* to, const BasicBlock* from, unsigned varNum = (unsigned)-1, int varVal = 0);
1180 void MakeLIR(GenTree* firstNode, GenTree* lastNode);
1183 void SetDominatedByExceptionalEntryFlag()
1185 bbFlags |= BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY;
1188 bool IsDominatedByExceptionalEntryFlag()
1190 return (bbFlags & BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY) != 0;
1195 struct JitPtrKeyFuncs<BasicBlock> : public JitKeyFuncsDefEquals<const BasicBlock*>
1198 // Make sure hashing is deterministic and not on "ptr."
1199 static unsigned GetHashCode(const BasicBlock* ptr);
1203 typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, bool> BlkSet;
1205 // A vector of blocks.
1206 typedef jitstd::vector<BasicBlock*> BlkVector;
1208 // A map of block -> set of blocks, can be used as sparse block trees.
1209 typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, BlkSet*> BlkToBlkSetMap;
1211 // A map of block -> vector of blocks, can be used as sparse block trees.
1212 typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, BlkVector> BlkToBlkVectorMap;
1214 // Map from Block to Block. Used for a variety of purposes.
1215 typedef JitHashTable<BasicBlock*, JitPtrKeyFuncs<BasicBlock>, BasicBlock*> BlockToBlockMap;
1217 // In compiler terminology the control flow between two BasicBlocks
1218 // is typically referred to as an "edge". Most well known are the
1219 // backward branches for loops, which are often called "back-edges".
1221 // "struct flowList" is the type that represents our control flow edges.
1222 // This type is a linked list of zero or more "edges".
1223 // (The list of zero edges is represented by NULL.)
1224 // Every BasicBlock has a field called bbPreds of this type. This field
1225 // represents the list of "edges" that flow into this BasicBlock.
1226 // The flowList type only stores the BasicBlock* of the source for the
1227 // control flow edge. The destination block for the control flow edge
1228 // is implied to be the block which contained the bbPreds field.
1230 // For a switch branch target there may be multiple "edges" that have
1231 // the same source block (and destination block). We need to count the
1232 // number of these edges so that during optimization we will know when
1233 // we have zero of them. Rather than have extra flowList entries we
1234 // increment the flDupCount field.
1236 // When we have Profile weight for the BasicBlocks we can usually compute
1237 // the number of times each edge was executed by examining the adjacent
1238 // BasicBlock weights. As we are doing for BasicBlocks, we call the number
1239 // of times that a control flow edge was executed the "edge weight".
1240 // In order to compute the edge weights we need to use a bounded range
1241 // for every edge weight. These two fields, 'flEdgeWeightMin' and 'flEdgeWeightMax'
1242 // are used to hold a bounded range. Most often these will converge such
1243 // that both values are the same and that value is the exact edge weight.
1244 // Sometimes we are left with a rage of possible values between [Min..Max]
1245 // which represents an inexact edge weight.
1247 // The bbPreds list is initially created by Compiler::fgComputePreds()
1248 // and is incrementally kept up to date.
1250 // The edge weight are computed by Compiler::fgComputeEdgeWeights()
1251 // the edge weights are used to straighten conditional branches
1252 // by Compiler::fgReorderBlocks()
1254 // We have a simpler struct, BasicBlockList, which is simply a singly-linked
1255 // list of blocks. This is used for various purposes, but one is as a "cheap"
1256 // predecessor list, computed by fgComputeCheapPreds(), and stored as a list
1257 // on BasicBlock pointed to by bbCheapPreds.
1259 struct BasicBlockList
1261 BasicBlockList* next; // The next BasicBlock in the list, nullptr for end of list.
1262 BasicBlock* block; // The BasicBlock of interest.
1264 BasicBlockList() : next(nullptr), block(nullptr)
1268 BasicBlockList(BasicBlock* blk, BasicBlockList* rest) : next(rest), block(blk)
1275 flowList* flNext; // The next BasicBlock in the list, nullptr for end of list.
1276 BasicBlock* flBlock; // The BasicBlock of interest.
1278 BasicBlock::weight_t flEdgeWeightMin;
1279 BasicBlock::weight_t flEdgeWeightMax;
1281 unsigned flDupCount; // The count of duplicate "edges" (use only for switch stmts)
1283 // These two methods are used to set new values for flEdgeWeightMin and flEdgeWeightMax
1284 // they are used only during the computation of the edge weights
1285 // They return false if the newWeight is not between the current [min..max]
1286 // when slop is non-zero we allow for the case where our weights might be off by 'slop'
1288 bool setEdgeWeightMinChecked(BasicBlock::weight_t newWeight, BasicBlock::weight_t slop, bool* wbUsedSlop);
1289 bool setEdgeWeightMaxChecked(BasicBlock::weight_t newWeight, BasicBlock::weight_t slop, bool* wbUsedSlop);
1291 flowList() : flNext(nullptr), flBlock(nullptr), flEdgeWeightMin(0), flEdgeWeightMax(0), flDupCount(0)
1295 flowList(BasicBlock* blk, flowList* rest)
1296 : flNext(rest), flBlock(blk), flEdgeWeightMin(0), flEdgeWeightMax(0), flDupCount(0)
1301 // This enum represents a pre/post-visit action state to emulate a depth-first
1302 // spanning tree traversal of a tree or graph.
1305 DSS_Invalid, // The initialized, invalid error state
1306 DSS_Pre, // The DFS pre-order (first visit) traversal state
1307 DSS_Post // The DFS post-order (last visit) traversal state
1310 // These structs represents an entry in a stack used to emulate a non-recursive
1311 // depth-first spanning tree traversal of a graph. The entry contains either a
1312 // block pointer or a block number depending on which is more useful.
1313 struct DfsBlockEntry
1315 DfsStackState dfsStackState; // The pre/post traversal action for this entry
1316 BasicBlock* dfsBlock; // The corresponding block for the action
1318 DfsBlockEntry() : dfsStackState(DSS_Invalid), dfsBlock(nullptr)
1322 DfsBlockEntry(DfsStackState state, BasicBlock* basicBlock) : dfsStackState(state), dfsBlock(basicBlock)
1329 DfsStackState dfsStackState; // The pre/post traversal action for this entry
1330 unsigned dfsNum; // The corresponding block number for the action
1332 DfsNumEntry() : dfsStackState(DSS_Invalid), dfsNum(0)
1336 DfsNumEntry(DfsStackState state, unsigned bbNum) : dfsStackState(state), dfsNum(bbNum)
1341 /*****************************************************************************/
1343 extern BasicBlock* __cdecl verAllocBasicBlock();
1346 extern void __cdecl verDispBasicBlocks();
1349 /*****************************************************************************
1351 * The following call-backs supplied by the client; it's used by the code
1352 * emitter to convert a basic block to its corresponding emitter cookie.
1355 void* emitCodeGetCookie(BasicBlock* block);
1357 AllSuccessorIter::AllSuccessorIter(Compiler* comp, BasicBlock* block)
1358 : m_comp(comp), m_blk(block), m_normSucc(0), m_numNormSuccs(block->NumSucc(comp)), m_ehIter(comp, block)
1360 if (CurTryIsBlkCallFinallyTarget())
1366 bool AllSuccessorIter::CurTryIsBlkCallFinallyTarget()
1368 return (m_blk->bbJumpKind == BBJ_CALLFINALLY) && (m_ehIter != EHSuccessorIter()) &&
1369 (m_blk->bbJumpDest == (*m_ehIter));
1372 void AllSuccessorIter::operator++(void)
1374 if (m_normSucc < m_numNormSuccs)
1382 // If the original block whose successors we're iterating over
1383 // is a BBJ_CALLFINALLY, that finally clause's first block
1384 // will be yielded as a normal successor. Don't also yield as
1385 // an exceptional successor.
1386 if (CurTryIsBlkCallFinallyTarget())
1393 // Requires that "this" is not equal to the standard "end" iterator. Returns the
1394 // current successor.
1395 BasicBlock* AllSuccessorIter::operator*()
1397 if (m_normSucc < m_numNormSuccs)
1399 return m_blk->GetSucc(m_normSucc, m_comp);
1406 /*****************************************************************************/
1408 /*****************************************************************************/