1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
4 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
5 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
9 XX This is the node in the semantic tree graph. It represents the operation XX
10 XX corresponding to the node, and other information during code-gen. XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
16 /*****************************************************************************/
19 /*****************************************************************************/
21 #include "vartype.h" // For "var_types"
22 #include "target.h" // For "regNumber"
23 #include "ssaconfig.h" // For "SsaConfig::RESERVED_SSA_NUM"
24 #include "valuenumtype.h"
26 #include "jithashtable.h"
27 #include "gentreeopsdef.h"
29 #include "namedintrinsiclist.h"
31 #include "debuginfo.h"
33 // Debugging GenTree is much easier if we add a magic virtual function to make the debugger able to figure out what type
34 // it's got. This is enabled by default in DEBUG. To enable it in RET builds (temporarily!), you need to change the
35 // build to define DEBUGGABLE_GENTREE=1, as well as pass /OPT:NOICF to the linker (or else all the vtables get merged,
36 // making the debugging value supplied by them useless).
37 #ifndef DEBUGGABLE_GENTREE
39 #define DEBUGGABLE_GENTREE 1
41 #define DEBUGGABLE_GENTREE 0
43 #endif // !DEBUGGABLE_GENTREE
45 // The SpecialCodeKind enum is used to indicate the type of special (unique)
46 // target block that will be targeted by an instruction.
48 // GenTreeBoundsChk nodes (SCK_RNGCHK_FAIL, SCK_ARG_EXCPN, SCK_ARG_RNG_EXCPN)
49 // - these nodes have a field (gtThrowKind) to indicate which kind
50 // GenTreeOps nodes, for which codegen will generate the branch
51 // - it will use the appropriate kind based on the opcode, though it's not
52 // clear why SCK_OVERFLOW == SCK_ARITH_EXCPN
57 SCK_RNGCHK_FAIL, // target when range check fails
58 SCK_DIV_BY_ZERO, // target for divide by zero (Not used on X86/X64)
59 SCK_ARITH_EXCPN, // target on arithmetic exception
60 SCK_OVERFLOW = SCK_ARITH_EXCPN, // target on overflow
61 SCK_ARG_EXCPN, // target on ArgumentException (currently used only for SIMD intrinsics)
62 SCK_ARG_RNG_EXCPN, // target on ArgumentOutOfRangeException (currently used only for SIMD intrinsics)
66 /*****************************************************************************/
68 // The following enum defines a set of bit flags that can be used
69 // to classify expression tree nodes.
73 GTK_SPECIAL = 0x00, // special operator
74 GTK_LEAF = 0x01, // leaf operator
75 GTK_UNOP = 0x02, // unary operator
76 GTK_BINOP = 0x04, // binary operator
78 GTK_KINDMASK = (GTK_SPECIAL | GTK_LEAF | GTK_UNOP | GTK_BINOP), // operator kind mask
79 GTK_SMPOP = (GTK_UNOP | GTK_BINOP),
81 GTK_COMMUTE = 0x08, // commutative operator
82 GTK_EXOP = 0x10, // Indicates that an oper for a node type that extends GenTreeOp (or GenTreeUnOp)
83 // by adding non-node fields to unary or binary operator.
84 GTK_NOVALUE = 0x20, // node does not produce a value
85 GTK_STORE = 0x40, // node represents a store
90 // The following enum defines a set of bit flags that describe opers for the purposes
91 // of DEBUG-only checks. This is separate from the above "GenTreeOperKind"s to avoid
92 // making the table for those larger in Release builds. However, it resides in the same
93 // "namespace" and so all values here must be distinct from those in "GenTreeOperKind".
95 enum GenTreeDebugOperKind
97 DBK_FIRST_FLAG = GTK_MASK + 1,
99 DBK_NOTHIR = DBK_FIRST_FLAG, // This oper is not supported in HIR (before rationalization).
100 DBK_NOTLIR = DBK_FIRST_FLAG << 1, // This oper is not supported in LIR (after rationalization).
101 DBK_NOCONTAIN = DBK_FIRST_FLAG << 2, // This oper produces a value, but may not be contained.
106 /*****************************************************************************/
108 enum gtCallTypes : BYTE
110 CT_USER_FUNC, // User function
111 CT_HELPER, // Jit-helper
112 CT_INDIRECT, // Indirect call
114 CT_COUNT // fake entry (must be last)
117 enum class ExceptionSetFlags : uint32_t
120 OverflowException = 0x1,
121 DivideByZeroException = 0x2,
122 ArithmeticException = 0x4,
123 NullReferenceException = 0x8,
124 IndexOutOfRangeException = 0x10,
125 StackOverflowException = 0x20,
127 All = OverflowException | DivideByZeroException | ArithmeticException | NullReferenceException |
128 IndexOutOfRangeException | StackOverflowException,
131 inline constexpr ExceptionSetFlags operator~(ExceptionSetFlags a)
133 return (ExceptionSetFlags)(~(uint32_t)a);
136 inline constexpr ExceptionSetFlags operator|(ExceptionSetFlags a, ExceptionSetFlags b)
138 return (ExceptionSetFlags)((uint32_t)a | (uint32_t)b);
141 inline constexpr ExceptionSetFlags operator&(ExceptionSetFlags a, ExceptionSetFlags b)
143 return (ExceptionSetFlags)((uint32_t)a & (uint32_t)b);
146 inline ExceptionSetFlags& operator|=(ExceptionSetFlags& a, ExceptionSetFlags b)
148 return a = (ExceptionSetFlags)((uint32_t)a | (uint32_t)b);
151 inline ExceptionSetFlags& operator&=(ExceptionSetFlags& a, ExceptionSetFlags b)
153 return a = (ExceptionSetFlags)((uint32_t)a & (uint32_t)b);
157 /*****************************************************************************
159 * TargetHandleTypes are used to determine the type of handle present inside GenTreeIntCon node.
160 * The values are such that they don't overlap with helper's or user function's handle.
162 enum TargetHandleType : BYTE
165 THT_GSCookieCheck = 4,
167 THT_InitializeArrayIntrinsics = 8
170 /*****************************************************************************/
173 enum BasicBlockFlags : unsigned __int64;
174 struct InlineCandidateInfo;
175 struct HandleHistogramProfileCandidateInfo;
176 struct LateDevirtualizationInfo;
178 typedef unsigned short AssertionIndex;
180 static const AssertionIndex NO_ASSERTION_INDEX = 0;
182 //------------------------------------------------------------------------
183 // GetAssertionIndex: return 1-based AssertionIndex from 0-based int index.
186 // index - 0-based index
188 // 1-based AssertionIndex.
189 inline AssertionIndex GetAssertionIndex(unsigned index)
191 return (AssertionIndex)(index + 1);
196 // true if the assertion holds on the bbNext edge instead of the bbJumpDest edge (for GT_JTRUE nodes)
197 unsigned short m_isNextEdgeAssertion : 1;
198 // 1-based index of the assertion
199 unsigned short m_assertionIndex : 15;
201 AssertionInfo(bool isNextEdgeAssertion, AssertionIndex assertionIndex)
202 : m_isNextEdgeAssertion(isNextEdgeAssertion), m_assertionIndex(assertionIndex)
204 assert(m_assertionIndex == assertionIndex);
208 AssertionInfo() : AssertionInfo(false, 0)
212 AssertionInfo(AssertionIndex assertionIndex) : AssertionInfo(false, assertionIndex)
216 static AssertionInfo ForNextEdge(AssertionIndex assertionIndex)
218 // Ignore the edge information if there's no assertion
219 bool isNextEdge = (assertionIndex != NO_ASSERTION_INDEX);
220 return AssertionInfo(isNextEdge, assertionIndex);
225 m_isNextEdgeAssertion = 0;
226 m_assertionIndex = NO_ASSERTION_INDEX;
229 bool HasAssertion() const
231 return m_assertionIndex != NO_ASSERTION_INDEX;
234 AssertionIndex GetAssertionIndex() const
236 return m_assertionIndex;
239 bool IsNextEdgeAssertion() const
241 return m_isNextEdgeAssertion;
245 // GT_FIELD_ADDR nodes will be lowered into more "code-gen-able" representations, like ADD's of addresses.
246 // For value numbering, we would like to preserve the aliasing information for class and static fields,
247 // and so will annotate such lowered addresses with "field sequences", representing the "base" static or
248 // class field and any additional struct fields. We only need to preserve the handle for the first field,
249 // so any struct fields will be represented implicitly (via offsets). See also "IsFieldAddr".
254 enum class FieldKind : uintptr_t
256 Instance = 0, // An instance field.
257 SimpleStatic = 1, // Simple static field - the handle represents a unique location.
258 SimpleStaticKnownAddress = 2, // Simple static field - the handle represents a known location.
259 SharedStatic = 3, // Static field on a shared generic type: "Class<__Canon>.StaticField".
263 static const uintptr_t FIELD_KIND_MASK = 0b11;
265 static_assert_no_msg(sizeof(CORINFO_FIELD_HANDLE) == sizeof(uintptr_t));
267 uintptr_t m_fieldHandleAndKind;
271 FieldSeq(CORINFO_FIELD_HANDLE fieldHnd, ssize_t offset, FieldKind fieldKind);
273 FieldKind GetKind() const
275 return static_cast<FieldKind>(m_fieldHandleAndKind & FIELD_KIND_MASK);
278 CORINFO_FIELD_HANDLE GetFieldHandle() const
280 return CORINFO_FIELD_HANDLE(m_fieldHandleAndKind & ~FIELD_KIND_MASK);
283 //------------------------------------------------------------------------
284 // GetOffset: Retrieve "the offset" for the field this node represents.
286 // For statics with a known (constant) address it will be the value of that address.
287 // For boxed statics, it will be TARGET_POINTER_SIZE (the method table pointer size).
288 // For other fields, it will be equal to the value "getFieldOffset" would return.
290 ssize_t GetOffset() const
295 bool IsStaticField() const
297 return (GetKind() == FieldKind::SimpleStatic) || (GetKind() == FieldKind::SharedStatic) ||
298 (GetKind() == FieldKind::SimpleStaticKnownAddress);
301 bool IsSharedStaticField() const
303 return GetKind() == FieldKind::SharedStatic;
307 // This class canonicalizes field sequences.
311 // Maps field handles to field sequence instances.
313 JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<CORINFO_FIELD_STRUCT_>, FieldSeq> m_map;
316 FieldSeqStore(CompAllocator alloc) : m_map(alloc)
320 FieldSeq* Create(CORINFO_FIELD_HANDLE fieldHnd, ssize_t offset, FieldSeq::FieldKind fieldKind);
322 FieldSeq* Append(FieldSeq* a, FieldSeq* b);
325 class GenTreeUseEdgeIterator;
326 class GenTreeOperandIterator;
330 /*****************************************************************************/
332 // Forward declarations of the subtypes
333 #define GTSTRUCT_0(fn, en) struct GenTree##fn;
334 #define GTSTRUCT_1(fn, en) struct GenTree##fn;
335 #define GTSTRUCT_2(fn, en, en2) struct GenTree##fn;
336 #define GTSTRUCT_3(fn, en, en2, en3) struct GenTree##fn;
337 #define GTSTRUCT_4(fn, en, en2, en3, en4) struct GenTree##fn;
338 #define GTSTRUCT_N(fn, ...) struct GenTree##fn;
339 #define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2)
340 #define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3)
341 #include "gtstructs.h"
343 /*****************************************************************************/
345 // Don't format the GenTreeFlags declaration
348 //------------------------------------------------------------------------
349 // GenTreeFlags: a bitmask of flags for GenTree stored in gtFlags
351 enum GenTreeFlags : unsigned int
355 //---------------------------------------------------------------------
356 // The first set of flags can be used with a large set of nodes, and
357 // thus they must all have distinct values. That is, one can test any
358 // expression node for one of these flags.
359 //---------------------------------------------------------------------
361 GTF_ASG = 0x00000001, // sub-expression contains an assignment
362 GTF_CALL = 0x00000002, // sub-expression contains a func. call
363 GTF_EXCEPT = 0x00000004, // sub-expression might throw an exception
364 GTF_GLOB_REF = 0x00000008, // sub-expression uses global variable(s)
365 GTF_ORDER_SIDEEFF = 0x00000010, // sub-expression has a re-ordering side effect
367 // If you set these flags, make sure that code:gtExtractSideEffList knows how to find the tree,
368 // otherwise the C# (run csc /o-) code:
369 // var v = side_eff_operation
370 // with no use of `v` will drop your tree on the floor.
372 GTF_PERSISTENT_SIDE_EFFECTS = GTF_ASG | GTF_CALL,
373 GTF_SIDE_EFFECT = GTF_PERSISTENT_SIDE_EFFECTS | GTF_EXCEPT,
374 GTF_GLOB_EFFECT = GTF_SIDE_EFFECT | GTF_GLOB_REF,
375 GTF_ALL_EFFECT = GTF_GLOB_EFFECT | GTF_ORDER_SIDEEFF,
377 GTF_REVERSE_OPS = 0x00000020, // operand op2 should be evaluated before op1 (normally, op1 is evaluated first and op2 is evaluated second)
378 GTF_CONTAINED = 0x00000040, // This node is contained (executed as part of its parent)
379 GTF_SPILLED = 0x00000080, // the value has been spilled
381 GTF_NOREG_AT_USE = 0x00000100, // tree node is in memory at the point of use
383 GTF_SET_FLAGS = 0x00000200, // Requires that codegen for this node set the flags. Use gtSetFlags() to check this flag.
386 GTF_DONT_EXTEND = 0x00000400, // This small-typed tree produces a value with undefined upper bits. Used on x86/x64 as a
387 // lowering optimization and tells the codegen to use instructions like "mov al, [addr]"
388 // instead of "movzx/movsx", when the user node doesn't need the upper bits.
389 #endif // TARGET_XARCH
391 GTF_MAKE_CSE = 0x00000800, // Hoisted expression: try hard to make this into CSE (see optPerformHoistExpr)
392 GTF_DONT_CSE = 0x00001000, // Don't bother CSE'ing this expr
393 GTF_COLON_COND = 0x00002000, // This node is conditionally executed (part of ? :)
395 GTF_NODE_MASK = GTF_COLON_COND,
397 GTF_BOOLEAN = 0x00004000, // value is known to be 0/1
399 GTF_UNSIGNED = 0x00008000, // With GT_CAST: the source operand is an unsigned type
400 // With operators: the specified node is an unsigned operator
401 GTF_SPILL = 0x00020000, // Needs to be spilled here
403 // The extra flag GTF_IS_IN_CSE is used to tell the consumer of the side effect flags
404 // that we are calling in the context of performing a CSE, thus we
405 // should allow the run-once side effects of running a class constructor.
407 // The only requirement of this flag is that it not overlap any of the
408 // side-effect flags. The actual bit used is otherwise arbitrary.
410 GTF_IS_IN_CSE = GTF_BOOLEAN,
412 GTF_COMMON_MASK = 0x0003FFFF, // mask of all the flags above
414 GTF_REUSE_REG_VAL = 0x00800000, // This is set by the register allocator on nodes whose value already exists in the
415 // register assigned to this node, so the code generator does not have to generate
416 // code to produce the value. It is currently used only on constant nodes.
417 // It CANNOT be set on var (GT_LCL*) nodes, or on indir (GT_IND or GT_STOREIND) nodes, since
418 // it is not needed for lclVars and is highly unlikely to be useful for indir nodes.
420 //---------------------------------------------------------------------
421 // The following flags can be used only with a small set of nodes, and
422 // thus their values need not be distinct (other than within the set
423 // that goes with a particular node/nodes, of course). That is, one can
424 // only test for one of these flags if the 'gtOper' value is tested as
425 // well to make sure it's the right operator for the particular flag.
426 //---------------------------------------------------------------------
428 GTF_VAR_DEF = 0x80000000, // GT_STORE_LCL_VAR/GT_STORE_LCL_FLD/GT_LCL_ADDR -- this is a definition
429 GTF_VAR_USEASG = 0x40000000, // GT_STORE_LCL_FLD/GT_STORE_LCL_FLD/GT_LCL_ADDR -- this is a partial definition, a use of
430 // the previous definition is implied. A partial definition usually occurs when a struct
431 // field is assigned to (s.f = ...) or when a scalar typed variable is assigned to via a
432 // narrow store (*((byte*)&i) = ...).
434 // Last-use bits. Also used by GenTreeCopyOrReload.
435 // Note that a node marked GTF_VAR_MULTIREG can only be a pure definition of all the fields, or a pure use of all the fields,
436 // so we don't need the equivalent of GTF_VAR_USEASG.
438 GTF_VAR_FIELD_DEATH0 = 0x04000000, // The last-use bit for the first field of a promoted local.
439 GTF_VAR_FIELD_DEATH1 = 0x08000000, // The last-use bit for the second field of a promoted local.
440 GTF_VAR_FIELD_DEATH2 = 0x10000000, // The last-use bit for the third field of a promoted local.
441 GTF_VAR_FIELD_DEATH3 = 0x20000000, // The last-use bit for the fourth field of a promoted local.
442 GTF_VAR_DEATH_MASK = GTF_VAR_FIELD_DEATH0 | GTF_VAR_FIELD_DEATH1 | GTF_VAR_FIELD_DEATH2 | GTF_VAR_FIELD_DEATH3,
443 GTF_VAR_DEATH = GTF_VAR_FIELD_DEATH0, // The last-use bit for a tracked local.
445 // This is the amount we have to shift, plus the index, to get the last use bit we want.
446 #define FIELD_LAST_USE_SHIFT 26
448 GTF_VAR_MULTIREG = 0x02000000, // This is a struct or (on 32-bit platforms) long variable that is used or defined
449 // to/from a multireg source or destination (e.g. a call arg or return, or an op
450 // that returns its result in multiple registers such as a long multiply). Set by
451 // (and thus only valid after) lowering.
453 GTF_LIVENESS_MASK = GTF_VAR_DEF | GTF_VAR_USEASG | GTF_VAR_DEATH_MASK,
455 GTF_VAR_ITERATOR = 0x01000000, // GT_LCL_VAR -- this is a iterator reference in the loop condition
456 GTF_VAR_MOREUSES = 0x00800000, // GT_LCL_VAR -- this node has additonal uses, for example due to cloning
457 GTF_VAR_CONTEXT = 0x00400000, // GT_LCL_VAR -- this node is part of a runtime lookup
458 GTF_VAR_EXPLICIT_INIT = 0x00200000, // GT_LCL_VAR -- this node is an "explicit init" store. Valid until rationalization.
460 // For additional flags for GT_CALL node see GTF_CALL_M_*
462 GTF_CALL_UNMANAGED = 0x80000000, // GT_CALL -- direct call to unmanaged code
463 GTF_CALL_INLINE_CANDIDATE = 0x40000000, // GT_CALL -- this call has been marked as an inline candidate
465 GTF_CALL_VIRT_KIND_MASK = 0x30000000, // GT_CALL -- mask of the below call kinds
466 GTF_CALL_NONVIRT = 0x00000000, // GT_CALL -- a non virtual call
467 GTF_CALL_VIRT_STUB = 0x10000000, // GT_CALL -- a stub-dispatch virtual call
468 GTF_CALL_VIRT_VTABLE = 0x20000000, // GT_CALL -- a vtable-based virtual call
470 GTF_CALL_NULLCHECK = 0x08000000, // GT_CALL -- must check instance pointer for null
471 GTF_CALL_POP_ARGS = 0x04000000, // GT_CALL -- caller pop arguments?
472 GTF_CALL_HOISTABLE = 0x02000000, // GT_CALL -- call is hoistable
474 GTF_MEMORYBARRIER_LOAD = 0x40000000, // GT_MEMORYBARRIER -- Load barrier
476 GTF_FLD_TLS = 0x80000000, // GT_FIELD_ADDR -- field address is a Windows x86 TLS reference
477 GTF_FLD_DEREFERENCED = 0x40000000, // GT_FIELD_ADDR -- used to preserve previous behavior
479 GTF_INX_RNGCHK = 0x80000000, // GT_INDEX_ADDR -- this array address should be range-checked
480 GTF_INX_ADDR_NONNULL = 0x40000000, // GT_INDEX_ADDR -- this array address is not null
482 GTF_IND_TGT_NOT_HEAP = 0x80000000, // GT_IND -- the target is not on the heap
483 GTF_IND_VOLATILE = 0x40000000, // OperIsIndir() -- the load or store must use volatile semantics (this is a nop on X86)
484 GTF_IND_NONFAULTING = 0x20000000, // OperIsIndir() -- An indir that cannot fault.
485 GTF_IND_TGT_HEAP = 0x10000000, // GT_IND -- the target is on the heap
486 GTF_IND_REQ_ADDR_IN_REG = 0x08000000, // GT_IND -- requires its addr operand to be evaluated into a register.
487 // This flag is useful in cases where it is required to generate register
488 // indirect addressing mode. One such case is virtual stub calls on xarch.
489 GTF_IND_UNALIGNED = 0x02000000, // OperIsIndir() -- the load or store is unaligned (we assume worst case alignment of 1 byte)
490 GTF_IND_INVARIANT = 0x01000000, // GT_IND -- the target is invariant (a prejit indirection)
491 GTF_IND_NONNULL = 0x00400000, // GT_IND -- the indirection never returns null (zero)
492 GTF_IND_INITCLASS = 0x00200000, // OperIsIndir() -- the indirection requires preceding static cctor
494 GTF_IND_COPYABLE_FLAGS = GTF_IND_VOLATILE | GTF_IND_NONFAULTING | GTF_IND_UNALIGNED | GTF_IND_INITCLASS,
495 GTF_IND_FLAGS = GTF_IND_COPYABLE_FLAGS | GTF_IND_NONNULL | GTF_IND_TGT_NOT_HEAP | GTF_IND_TGT_HEAP | GTF_IND_INVARIANT,
497 GTF_ADDRMODE_NO_CSE = 0x80000000, // GT_ADD/GT_MUL/GT_LSH -- Do not CSE this node only, forms complex
500 GTF_MUL_64RSLT = 0x40000000, // GT_MUL -- produce 64-bit result
502 GTF_RELOP_NAN_UN = 0x80000000, // GT_<relop> -- Is branch taken if ops are NaN?
503 GTF_RELOP_JMP_USED = 0x40000000, // GT_<relop> -- result of compare used for jump or ?:
504 GTF_RELOP_ZTT = 0x08000000, // GT_<relop> -- Loop test cloned for converting while-loops into do-while
505 // with explicit "loop test" in the header block.
507 GTF_RET_MERGED = 0x80000000, // GT_RETURN -- This is a return generated during epilog merging.
509 GTF_QMARK_CAST_INSTOF = 0x80000000, // GT_QMARK -- Is this a top (not nested) level qmark created for
510 // castclass or instanceof?
512 GTF_BOX_CLONED = 0x40000000, // GT_BOX -- this box and its operand has been cloned, cannot assume it to be single-use anymore
513 GTF_BOX_VALUE = 0x80000000, // GT_BOX -- "box" is on a value type
515 GTF_ARR_ADDR_NONNULL = 0x80000000, // GT_ARR_ADDR -- this array's address is not null
517 GTF_ICON_HDL_MASK = 0xFF000000, // Bits used by handle types below
518 GTF_ICON_SCOPE_HDL = 0x01000000, // GT_CNS_INT -- constant is a scope handle
519 GTF_ICON_CLASS_HDL = 0x02000000, // GT_CNS_INT -- constant is a class handle
520 GTF_ICON_METHOD_HDL = 0x03000000, // GT_CNS_INT -- constant is a method handle
521 GTF_ICON_FIELD_HDL = 0x04000000, // GT_CNS_INT -- constant is a field handle
522 GTF_ICON_STATIC_HDL = 0x05000000, // GT_CNS_INT -- constant is a handle to static data
523 GTF_ICON_STR_HDL = 0x06000000, // GT_CNS_INT -- constant is a pinned handle pointing to a string object
524 GTF_ICON_OBJ_HDL = 0x12000000, // GT_CNS_INT -- constant is an object handle (e.g. frozen string or Type object)
525 GTF_ICON_CONST_PTR = 0x07000000, // GT_CNS_INT -- constant is a pointer to immutable data, (e.g. IAT_PPVALUE)
526 GTF_ICON_GLOBAL_PTR = 0x08000000, // GT_CNS_INT -- constant is a pointer to mutable data (e.g. from the VM state)
527 GTF_ICON_VARG_HDL = 0x09000000, // GT_CNS_INT -- constant is a var arg cookie handle
528 GTF_ICON_PINVKI_HDL = 0x0A000000, // GT_CNS_INT -- constant is a pinvoke calli handle
529 GTF_ICON_TOKEN_HDL = 0x0B000000, // GT_CNS_INT -- constant is a token handle (other than class, method or field)
530 GTF_ICON_TLS_HDL = 0x0C000000, // GT_CNS_INT -- constant is a TLS ref with offset
531 GTF_ICON_FTN_ADDR = 0x0D000000, // GT_CNS_INT -- constant is a function address
532 GTF_ICON_CIDMID_HDL = 0x0E000000, // GT_CNS_INT -- constant is a class ID or a module ID
533 GTF_ICON_BBC_PTR = 0x0F000000, // GT_CNS_INT -- constant is a basic block count pointer
534 GTF_ICON_STATIC_BOX_PTR = 0x10000000, // GT_CNS_INT -- constant is an address of the box for a STATIC_IN_HEAP field
535 GTF_ICON_FIELD_SEQ = 0x11000000, // <--------> -- constant is a FieldSeq* (used only as VNHandle)
536 GTF_ICON_STATIC_ADDR_PTR = 0x13000000, // GT_CNS_INT -- constant is a pointer to a static base address
538 // GTF_ICON_REUSE_REG_VAL = 0x00800000 // GT_CNS_INT -- GTF_REUSE_REG_VAL, defined above
539 GTF_ICON_SIMD_COUNT = 0x00200000, // GT_CNS_INT -- constant is Vector<T>.Count
541 GTF_OVERFLOW = 0x10000000, // Supported for: GT_ADD, GT_SUB, GT_MUL and GT_CAST.
542 // Requires an overflow check. Use gtOverflow(Ex)() to check this flag.
544 GTF_DIV_MOD_NO_BY_ZERO = 0x20000000, // GT_DIV, GT_MOD -- Div or mod definitely does not divide-by-zero.
546 GTF_DIV_MOD_NO_OVERFLOW = 0x40000000, // GT_DIV, GT_MOD -- Div or mod definitely does not overflow.
548 GTF_DIV_BY_CNS_OPT = 0x80000000, // GT_DIV -- Uses the division by constant optimization to compute this division
550 GTF_CHK_INDEX_INBND = 0x80000000, // GT_BOUNDS_CHECK -- have proven this check is always in-bounds
552 GTF_ARRLEN_NONFAULTING = 0x20000000, // GT_ARR_LENGTH -- An array length operation that cannot fault. Same as GT_IND_NONFAULTING.
554 GTF_MDARRLEN_NONFAULTING = 0x20000000, // GT_MDARR_LENGTH -- An MD array length operation that cannot fault. Same as GT_IND_NONFAULTING.
556 GTF_MDARRLOWERBOUND_NONFAULTING = 0x20000000, // GT_MDARR_LOWER_BOUND -- An MD array lower bound operation that cannot fault. Same as GT_IND_NONFAULTING.
560 inline constexpr GenTreeFlags operator ~(GenTreeFlags a)
562 return (GenTreeFlags)(~(unsigned int)a);
565 inline constexpr GenTreeFlags operator |(GenTreeFlags a, GenTreeFlags b)
567 return (GenTreeFlags)((unsigned int)a | (unsigned int)b);
570 inline constexpr GenTreeFlags operator &(GenTreeFlags a, GenTreeFlags b)
572 return (GenTreeFlags)((unsigned int)a & (unsigned int)b);
575 inline GenTreeFlags& operator |=(GenTreeFlags& a, GenTreeFlags b)
577 return a = (GenTreeFlags)((unsigned int)a | (unsigned int)b);
580 inline GenTreeFlags& operator &=(GenTreeFlags& a, GenTreeFlags b)
582 return a = (GenTreeFlags)((unsigned int)a & (unsigned int)b);
585 inline GenTreeFlags& operator ^=(GenTreeFlags& a, GenTreeFlags b)
587 return a = (GenTreeFlags)((unsigned int)a ^ (unsigned int)b);
590 // Can any side-effects be observed externally, say by a caller method?
591 // For assignments, only assignments to global memory can be observed
592 // externally, whereas simple assignments to local variables can not.
594 // Be careful when using this inside a "try" protected region as the
595 // order of assignments to local variables would need to be preserved
596 // wrt side effects if the variables are alive on entry to the
597 // "catch/finally" region. In such cases, even assignments to locals
598 // will have to be restricted.
599 #define GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(flags) \
600 (((flags) & (GTF_CALL | GTF_EXCEPT)) || (((flags) & (GTF_ASG | GTF_GLOB_REF)) == (GTF_ASG | GTF_GLOB_REF)))
604 //------------------------------------------------------------------------
605 // GenTreeDebugFlags: a bitmask of debug-only flags for GenTree stored in gtDebugFlags
607 enum GenTreeDebugFlags : unsigned int
609 GTF_DEBUG_NONE = 0x00000000, // No debug flags.
611 GTF_DEBUG_NODE_MORPHED = 0x00000001, // the node has been morphed (in the global morphing phase)
612 GTF_DEBUG_NODE_SMALL = 0x00000002,
613 GTF_DEBUG_NODE_LARGE = 0x00000004,
614 GTF_DEBUG_NODE_CG_PRODUCED = 0x00000008, // genProduceReg has been called on this node
615 GTF_DEBUG_NODE_CG_CONSUMED = 0x00000010, // genConsumeReg has been called on this node
616 GTF_DEBUG_NODE_LSRA_ADDED = 0x00000020, // This node was added by LSRA
618 GTF_DEBUG_NODE_MASK = 0x0000003F, // These flags are all node (rather than operation) properties.
620 GTF_DEBUG_VAR_CSE_REF = 0x00800000, // GT_LCL_VAR -- This is a CSE LCL_VAR node
623 inline constexpr GenTreeDebugFlags operator ~(GenTreeDebugFlags a)
625 return (GenTreeDebugFlags)(~(unsigned int)a);
628 inline constexpr GenTreeDebugFlags operator |(GenTreeDebugFlags a, GenTreeDebugFlags b)
630 return (GenTreeDebugFlags)((unsigned int)a | (unsigned int)b);
633 inline constexpr GenTreeDebugFlags operator &(GenTreeDebugFlags a, GenTreeDebugFlags b)
635 return (GenTreeDebugFlags)((unsigned int)a & (unsigned int)b);
638 inline GenTreeDebugFlags& operator |=(GenTreeDebugFlags& a, GenTreeDebugFlags b)
640 return a = (GenTreeDebugFlags)((unsigned int)a | (unsigned int)b);
643 inline GenTreeDebugFlags& operator &=(GenTreeDebugFlags& a, GenTreeDebugFlags b)
645 return a = (GenTreeDebugFlags)((unsigned int)a & (unsigned int)b);
648 #endif // defined(DEBUG)
653 #include <pshpack4.h>
658 // We use GT_STRUCT_0 only for the category of simple ops.
659 #define GTSTRUCT_0(fn, en) \
660 GenTree##fn* As##fn() \
662 assert(OperIsSimple()); \
663 return reinterpret_cast<GenTree##fn*>(this); \
665 const GenTree##fn* As##fn() const \
667 assert(OperIsSimple()); \
668 return reinterpret_cast<const GenTree##fn*>(this); \
670 GenTree##fn& As##fn##Ref() \
675 #define GTSTRUCT_N(fn, ...) \
676 GenTree##fn* As##fn() \
678 assert(OperIs(__VA_ARGS__)); \
679 return reinterpret_cast<GenTree##fn*>(this); \
681 const GenTree##fn* As##fn() const \
683 assert(OperIs(__VA_ARGS__)); \
684 return reinterpret_cast<const GenTree##fn*>(this); \
686 GenTree##fn& As##fn##Ref() \
691 #define GTSTRUCT_1(fn, en) GTSTRUCT_N(fn, en)
692 #define GTSTRUCT_2(fn, en, en2) GTSTRUCT_N(fn, en, en2)
693 #define GTSTRUCT_3(fn, en, en2, en3) GTSTRUCT_N(fn, en, en2, en3)
694 #define GTSTRUCT_4(fn, en, en2, en3, en4) GTSTRUCT_N(fn, en, en2, en3, en4)
695 #define GTSTRUCT_2_SPECIAL(fn, en, en2) GTSTRUCT_2(fn, en, en2)
696 #define GTSTRUCT_3_SPECIAL(fn, en, en2, en3) GTSTRUCT_3(fn, en, en2, en3)
698 #include "gtstructs.h"
700 genTreeOps gtOper; // enum subtype BYTE
701 var_types gtType; // enum subtype BYTE
703 genTreeOps OperGet() const
707 var_types TypeGet() const
712 ClassLayout* GetLayout(Compiler* compiler) const;
715 genTreeOps gtOperSave; // Only used to save gtOper when we destroy a node, to aid debugging.
720 #define IS_CSE_INDEX(x) ((x) != 0)
721 #define IS_CSE_USE(x) ((x) > 0)
722 #define IS_CSE_DEF(x) ((x) < 0)
723 #define GET_CSE_INDEX(x) (((x) > 0) ? x : -(x))
724 #define TO_CSE_DEF(x) (-(x))
726 signed char gtCSEnum; // 0 or the CSE index (negated if def)
727 // valid only for CSE expressions
729 unsigned char gtLIRFlags; // Used for nodes that are in LIR. See LIR::Flags in lir.h for the various flags.
731 AssertionInfo gtAssertionInfo;
733 bool GeneratesAssertion() const
735 return gtAssertionInfo.HasAssertion();
738 void ClearAssertion()
740 gtAssertionInfo.Clear();
743 AssertionInfo GetAssertionInfo() const
745 return gtAssertionInfo;
748 void SetAssertionInfo(AssertionInfo info)
750 gtAssertionInfo = info;
754 // Cost metrics on the node. Don't allow direct access to the variable for setting.
759 // You are not allowed to read the cost values before they have been set in gtSetEvalOrder().
760 // Keep track of whether the costs have been initialized, and assert if they are read before being initialized.
761 // Obviously, this information does need to be initialized when a node is created.
762 // This is public so the dumpers can see it.
764 bool gtCostsInitialized;
767 #define MAX_COST UCHAR_MAX
768 #define IND_COST_EX 3 // execution cost for an indirection
770 unsigned char GetCostEx() const
772 assert(gtCostsInitialized);
775 unsigned char GetCostSz() const
777 assert(gtCostsInitialized);
781 // Set the costs. They are always both set at the same time.
782 // Don't use the "put" property: force calling this function, to make it more obvious in the few places
783 // that set the values.
784 // Note that costs are only set in gtSetEvalOrder() and its callees.
785 void SetCosts(unsigned costEx, unsigned costSz)
787 assert(costEx != (unsigned)-1); // looks bogus
788 assert(costSz != (unsigned)-1); // looks bogus
789 INDEBUG(gtCostsInitialized = true;)
791 _gtCostEx = (costEx > MAX_COST) ? MAX_COST : (unsigned char)costEx;
792 _gtCostSz = (costSz > MAX_COST) ? MAX_COST : (unsigned char)costSz;
795 // Opimized copy function, to avoid the SetCosts() function comparisons, and make it more clear that a node copy is
797 void CopyCosts(const GenTree* const tree)
799 // If the 'tree' costs aren't initialized, we'll hit an assert below.
800 INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;)
801 _gtCostEx = tree->GetCostEx();
802 _gtCostSz = tree->GetCostSz();
805 // Same as CopyCosts, but avoids asserts if the costs we are copying have not been initialized.
806 // This is because the importer, for example, clones nodes, before these costs have been initialized.
807 // Note that we directly access the 'tree' costs, not going through the accessor functions (either
808 // directly or through the properties).
809 void CopyRawCosts(const GenTree* const tree)
811 INDEBUG(gtCostsInitialized = tree->gtCostsInitialized;)
812 _gtCostEx = tree->_gtCostEx;
813 _gtCostSz = tree->_gtCostSz;
817 unsigned char _gtCostEx; // estimate of expression execution cost
818 unsigned char _gtCostSz; // estimate of expression code size cost
821 // Register or register pair number of the node.
823 CLANG_FORMAT_COMMENT_ANCHOR;
830 GT_REGTAG_NONE, // Nothing has been assigned to _gtRegNum
831 GT_REGTAG_REG // _gtRegNum has been assigned
833 genRegTag GetRegTag() const
835 assert(gtRegTag == GT_REGTAG_NONE || gtRegTag == GT_REGTAG_REG);
840 genRegTag gtRegTag; // What is in _gtRegNum?
845 // This stores the register assigned to the node. If a register is not assigned, _gtRegNum is set to REG_NA.
846 regNumberSmall _gtRegNum;
848 // Count of operands. Used *only* by GenTreeMultiOp, exists solely due to padding constraints.
849 friend struct GenTreeMultiOp;
850 uint8_t m_operandCount;
853 // The register number is stored in a small format (8 bits), but the getters return and the setters take
854 // a full-size (unsigned) format, to localize the casts here.
855 CLANG_FORMAT_COMMENT_ANCHOR;
858 bool canBeContained() const;
861 // for codegen purposes, is this node a subnode of its parent
862 bool isContained() const;
864 bool isContainedIndir() const;
866 bool isIndirAddrMode();
868 // This returns true only for GT_IND and GT_STOREIND, and is used in contexts where a "true"
869 // indirection is expected (i.e. either a load to or a store from a single register).
870 // OperIsIndir() returns true also for indirection nodes such as GT_BLK, etc. as well as GT_NULLCHECK.
871 bool isIndir() const;
873 bool isContainedIntOrIImmed() const
875 return isContained() && IsCnsIntOrI() && !isUsedFromSpillTemp();
878 bool isContainedFltOrDblImmed() const
880 return isContained() && OperIs(GT_CNS_DBL);
883 bool isContainedVecImmed() const
885 return isContained() && OperIs(GT_CNS_VEC);
888 bool isLclField() const
890 return OperGet() == GT_LCL_FLD || OperGet() == GT_STORE_LCL_FLD;
893 bool isUsedFromSpillTemp() const;
895 // Indicates whether it is a memory op.
896 // Right now it includes Indir and LclField ops.
897 bool isMemoryOp() const
899 return isIndir() || isLclField();
902 bool isUsedFromMemory() const
904 return ((isContained() && (isMemoryOp() || OperIs(GT_LCL_VAR, GT_CNS_DBL, GT_CNS_VEC))) ||
905 isUsedFromSpillTemp());
908 bool isLclVarUsedFromMemory() const
910 return (OperGet() == GT_LCL_VAR) && (isContained() || isUsedFromSpillTemp());
913 bool isLclFldUsedFromMemory() const
915 return isLclField() && (isContained() || isUsedFromSpillTemp());
918 bool isUsedFromReg() const
920 return !isContained() && !isUsedFromSpillTemp();
923 regNumber GetRegNum() const
925 assert((gtRegTag == GT_REGTAG_REG) || (gtRegTag == GT_REGTAG_NONE)); // TODO-Cleanup: get rid of the NONE case,
926 // and fix everyplace that reads undefined
928 regNumber reg = (regNumber)_gtRegNum;
929 assert((gtRegTag == GT_REGTAG_NONE) || // TODO-Cleanup: get rid of the NONE case, and fix everyplace that reads
931 (reg >= REG_FIRST && reg <= REG_COUNT));
935 void SetRegNum(regNumber reg)
937 assert(reg >= REG_FIRST && reg <= REG_COUNT);
938 _gtRegNum = (regNumberSmall)reg;
939 INDEBUG(gtRegTag = GT_REGTAG_REG;)
940 assert(_gtRegNum == reg);
946 INDEBUG(gtRegTag = GT_REGTAG_NONE;)
949 // Copy the _gtRegNum/gtRegTag fields
950 void CopyReg(GenTree* from);
951 bool gtHasReg(Compiler* comp) const;
953 int GetRegisterDstCount(Compiler* compiler) const;
955 regMaskTP gtGetRegMask() const;
956 regMaskTP gtGetContainedRegMask();
958 GenTreeFlags gtFlags;
961 GenTreeDebugFlags gtDebugFlags;
962 #endif // defined(DEBUG)
964 ValueNumPair gtVNPair;
966 regMaskSmall gtRsvdRegs; // set of fixed trashed registers
968 unsigned AvailableTempRegCount(regMaskTP mask = (regMaskTP)-1) const;
969 regNumber GetSingleTempReg(regMaskTP mask = (regMaskTP)-1);
970 regNumber ExtractTempReg(regMaskTP mask = (regMaskTP)-1);
972 void SetVNsFromNode(GenTree* tree)
974 gtVNPair = tree->gtVNPair;
977 ValueNum GetVN(ValueNumKind vnk) const
979 if (vnk == VNK_Liberal)
981 return gtVNPair.GetLiberal();
985 assert(vnk == VNK_Conservative);
986 return gtVNPair.GetConservative();
989 void SetVN(ValueNumKind vnk, ValueNum vn)
991 if (vnk == VNK_Liberal)
993 return gtVNPair.SetLiberal(vn);
997 assert(vnk == VNK_Conservative);
998 return gtVNPair.SetConservative(vn);
1001 void SetVNs(ValueNumPair vnp)
1007 gtVNPair = ValueNumPair(); // Initializes both elements to "NoVN".
1015 unsigned gtSeqNum; // liveness traversal order within the current statement
1017 int gtUseNum; // use-ordered traversal within the function
1020 static const unsigned char gtOperKindTable[];
1022 static unsigned OperKind(unsigned gtOper)
1024 assert(gtOper < GT_COUNT);
1026 return gtOperKindTable[gtOper];
1029 unsigned OperKind() const
1031 assert(gtOper < GT_COUNT);
1033 return gtOperKindTable[gtOper];
1036 static bool IsExOp(unsigned opKind)
1038 return (opKind & GTK_EXOP) != 0;
1041 bool IsValue() const
1043 if ((OperKind(gtOper) & GTK_NOVALUE) != 0)
1048 if (gtType == TYP_VOID)
1050 // These are the only operators which can produce either VOID or non-VOID results.
1051 assert(OperIs(GT_NOP, GT_CALL, GT_COMMA) || OperIsCompare() || OperIsLong() || OperIsHWIntrinsic() ||
1059 bool IsNotGcDef() const
1061 return IsIntegralConst(0) || OperIs(GT_LCL_ADDR);
1065 // These helper methods, along with the flag values they manipulate, are defined in lir.h
1067 // UnusedValue indicates that, although this node produces a value, it is unused.
1068 inline void SetUnusedValue();
1069 inline void ClearUnusedValue();
1070 inline bool IsUnusedValue() const;
1071 // RegOptional indicates that codegen can still generate code even if it isn't allocated a register.
1072 inline bool IsRegOptional() const;
1073 inline void SetRegOptional();
1074 inline void ClearRegOptional();
1076 void dumpLIRFlags();
1079 bool TypeIs(var_types type) const
1081 return gtType == type;
1084 template <typename... T>
1085 bool TypeIs(var_types type, T... rest) const
1087 return TypeIs(type) || TypeIs(rest...);
1090 static constexpr bool StaticOperIs(genTreeOps operCompare, genTreeOps oper)
1092 return operCompare == oper;
1095 template <typename... T>
1096 static bool StaticOperIs(genTreeOps operCompare, genTreeOps oper, T... rest)
1098 return StaticOperIs(operCompare, oper) || StaticOperIs(operCompare, rest...);
1101 bool OperIs(genTreeOps oper) const
1103 return OperGet() == oper;
1106 template <typename... T>
1107 bool OperIs(genTreeOps oper, T... rest) const
1109 return OperIs(oper) || OperIs(rest...);
1112 static bool OperIsConst(genTreeOps gtOper)
1114 static_assert_no_msg(AreContiguous(GT_CNS_INT, GT_CNS_LNG, GT_CNS_DBL, GT_CNS_STR, GT_CNS_VEC));
1115 return (GT_CNS_INT <= gtOper) && (gtOper <= GT_CNS_VEC);
1118 bool OperIsConst() const
1120 return OperIsConst(gtOper);
1123 static bool OperIsLeaf(genTreeOps gtOper)
1125 return (OperKind(gtOper) & GTK_LEAF) != 0;
1128 bool OperIsLeaf() const
1130 return (OperKind(gtOper) & GTK_LEAF) != 0;
1133 static bool OperIsLocal(genTreeOps gtOper)
1135 static_assert_no_msg(AreContiguous(GT_PHI_ARG, GT_LCL_VAR, GT_LCL_FLD, GT_STORE_LCL_VAR, GT_STORE_LCL_FLD));
1136 return (GT_PHI_ARG <= gtOper) && (gtOper <= GT_STORE_LCL_FLD);
1139 static bool OperIsAnyLocal(genTreeOps gtOper)
1141 static_assert_no_msg(
1142 AreContiguous(GT_PHI_ARG, GT_LCL_VAR, GT_LCL_FLD, GT_STORE_LCL_VAR, GT_STORE_LCL_FLD, GT_LCL_ADDR));
1143 return (GT_PHI_ARG <= gtOper) && (gtOper <= GT_LCL_ADDR);
1146 static bool OperIsLocalField(genTreeOps gtOper)
1148 return (gtOper == GT_LCL_FLD || gtOper == GT_LCL_ADDR || gtOper == GT_STORE_LCL_FLD);
1151 bool OperIsLocalField() const
1153 return OperIsLocalField(gtOper);
1156 static bool OperIsScalarLocal(genTreeOps gtOper)
1158 return (gtOper == GT_LCL_VAR || gtOper == GT_STORE_LCL_VAR);
1161 static bool OperIsNonPhiLocal(genTreeOps gtOper)
1163 return OperIsLocal(gtOper) && (gtOper != GT_PHI_ARG);
1166 static bool OperIsLocalRead(genTreeOps gtOper)
1168 return (OperIsLocal(gtOper) && !OperIsLocalStore(gtOper));
1171 static bool OperIsLocalStore(genTreeOps gtOper)
1173 return (gtOper == GT_STORE_LCL_VAR || gtOper == GT_STORE_LCL_FLD);
1176 static bool OperIsAddrMode(genTreeOps gtOper)
1178 return (gtOper == GT_LEA);
1181 static bool OperIsInitVal(genTreeOps gtOper)
1183 return (gtOper == GT_INIT_VAL);
1186 bool OperIsInitVal() const
1188 return OperIsInitVal(OperGet());
1191 bool IsInitVal() const
1193 return IsIntegralConst(0) || OperIsInitVal();
1196 bool IsConstInitVal() const
1198 return (gtOper == GT_CNS_INT) || (OperIsInitVal() && (gtGetOp1()->gtOper == GT_CNS_INT));
1202 bool OperIsCopyBlkOp();
1203 bool OperIsInitBlkOp();
1205 static bool OperIsBlk(genTreeOps gtOper)
1207 return (gtOper == GT_BLK) || OperIsStoreBlk(gtOper);
1210 bool OperIsBlk() const
1212 return OperIsBlk(OperGet());
1215 static bool OperIsStoreBlk(genTreeOps gtOper)
1217 return StaticOperIs(gtOper, GT_STORE_BLK, GT_STORE_DYN_BLK);
1220 bool OperIsStoreBlk() const
1222 return OperIsStoreBlk(OperGet());
1225 bool OperIsPutArgSplit() const
1227 #if FEATURE_ARG_SPLIT
1228 assert((gtOper != GT_PUTARG_SPLIT) || compFeatureArgSplit());
1229 return gtOper == GT_PUTARG_SPLIT;
1230 #else // !FEATURE_ARG_SPLIT
1235 bool OperIsPutArgStk() const
1237 return gtOper == GT_PUTARG_STK;
1240 bool OperIsPutArgStkOrSplit() const
1242 return OperIsPutArgStk() || OperIsPutArgSplit();
1245 bool OperIsPutArgReg() const
1247 return gtOper == GT_PUTARG_REG;
1250 bool OperIsPutArg() const
1252 return OperIsPutArgStk() || OperIsPutArgReg() || OperIsPutArgSplit();
1255 bool OperIsFieldList() const
1257 return OperIs(GT_FIELD_LIST);
1260 bool OperIsMultiRegOp() const
1262 #if !defined(TARGET_64BIT)
1263 if (OperIs(GT_MUL_LONG))
1267 #if defined(TARGET_ARM)
1268 if (OperIs(GT_PUTARG_REG, GT_BITCAST))
1272 #endif // TARGET_ARM
1273 #endif // TARGET_64BIT
1277 bool OperIsAddrMode() const
1279 return OperIsAddrMode(OperGet());
1282 bool OperIsLocal() const
1284 return OperIsLocal(OperGet());
1287 bool OperIsAnyLocal() const
1289 return OperIsAnyLocal(OperGet());
1292 bool OperIsScalarLocal() const
1294 return OperIsScalarLocal(OperGet());
1297 bool OperIsNonPhiLocal() const
1299 return OperIsNonPhiLocal(OperGet());
1302 bool OperIsLocalStore() const
1304 return OperIsLocalStore(OperGet());
1307 bool OperIsLocalRead() const
1309 return OperIsLocalRead(OperGet());
1312 static bool OperIsCompare(genTreeOps gtOper)
1314 // Note that only GT_EQ to GT_GT are HIR nodes, GT_TEST and GT_BITTEST
1315 // nodes are backend nodes only.
1316 CLANG_FORMAT_COMMENT_ANCHOR;
1318 static_assert_no_msg(AreContiguous(GT_EQ, GT_NE, GT_LT, GT_LE, GT_GE, GT_GT, GT_TEST_EQ, GT_TEST_NE,
1319 GT_BITTEST_EQ, GT_BITTEST_NE));
1320 return (GT_EQ <= gtOper) && (gtOper <= GT_BITTEST_NE);
1322 static_assert_no_msg(AreContiguous(GT_EQ, GT_NE, GT_LT, GT_LE, GT_GE, GT_GT, GT_TEST_EQ, GT_TEST_NE));
1323 return (GT_EQ <= gtOper) && (gtOper <= GT_TEST_NE);
1327 bool OperIsCompare() const
1329 return OperIsCompare(OperGet());
1332 // Oper is a compare that generates a cmp instruction (as opposed to a test instruction).
1333 static bool OperIsCmpCompare(genTreeOps gtOper)
1335 static_assert_no_msg(AreContiguous(GT_EQ, GT_NE, GT_LT, GT_LE, GT_GE, GT_GT));
1336 return (GT_EQ <= gtOper) && (gtOper <= GT_GT);
1339 bool OperIsCmpCompare() const
1341 return OperIsCmpCompare(OperGet());
1344 static bool OperIsConditional(genTreeOps gtOper)
1346 return (GT_SELECT == gtOper);
1349 bool OperIsConditional() const
1351 return OperIsConditional(OperGet());
1354 static bool OperIsCC(genTreeOps gtOper)
1356 return (gtOper == GT_JCC) || (gtOper == GT_SETCC);
1359 bool OperIsCC() const
1361 return OperIsCC(OperGet());
1364 static bool OperIsShift(genTreeOps gtOper)
1366 return (gtOper == GT_LSH) || (gtOper == GT_RSH) || (gtOper == GT_RSZ);
1369 bool OperIsShift() const
1371 return OperIsShift(OperGet());
1374 static bool OperIsShiftLong(genTreeOps gtOper)
1379 return (gtOper == GT_LSH_HI) || (gtOper == GT_RSH_LO);
1383 bool OperIsShiftLong() const
1385 return OperIsShiftLong(OperGet());
1388 static bool OperIsRotate(genTreeOps gtOper)
1390 return (gtOper == GT_ROL) || (gtOper == GT_ROR);
1393 bool OperIsRotate() const
1395 return OperIsRotate(OperGet());
1398 static bool OperIsShiftOrRotate(genTreeOps gtOper)
1400 return OperIsShift(gtOper) || OperIsRotate(gtOper) || OperIsShiftLong(gtOper);
1403 bool OperIsShiftOrRotate() const
1405 return OperIsShiftOrRotate(OperGet());
1408 static bool OperIsMul(genTreeOps gtOper)
1410 return (gtOper == GT_MUL) || (gtOper == GT_MULHI)
1411 #if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
1412 || (gtOper == GT_MUL_LONG)
1417 bool OperIsMul() const
1419 return OperIsMul(gtOper);
1422 bool OperIsArithmetic() const
1424 genTreeOps op = OperGet();
1425 return op == GT_ADD || op == GT_SUB || op == GT_MUL || op == GT_DIV || op == GT_MOD
1427 || op == GT_UDIV || op == GT_UMOD
1429 || op == GT_OR || op == GT_XOR || op == GT_AND
1431 || OperIsShiftOrRotate(op);
1435 static bool OperIsRMWMemOp(genTreeOps gtOper)
1437 // Return if binary op is one of the supported operations for RMW of memory.
1438 return (gtOper == GT_ADD || gtOper == GT_SUB || gtOper == GT_AND || gtOper == GT_OR || gtOper == GT_XOR ||
1439 gtOper == GT_NOT || gtOper == GT_NEG || OperIsShiftOrRotate(gtOper));
1441 bool OperIsRMWMemOp() const
1443 // Return if binary op is one of the supported operations for RMW of memory.
1444 return OperIsRMWMemOp(gtOper);
1446 #endif // TARGET_XARCH
1448 static bool OperIsUnary(genTreeOps gtOper)
1450 return (OperKind(gtOper) & GTK_UNOP) != 0;
1453 bool OperIsUnary() const
1455 return OperIsUnary(gtOper);
1458 static bool OperIsBinary(genTreeOps gtOper)
1460 return (OperKind(gtOper) & GTK_BINOP) != 0;
1463 bool OperIsBinary() const
1465 return OperIsBinary(gtOper);
1468 static bool OperIsSimple(genTreeOps gtOper)
1470 return (OperKind(gtOper) & GTK_SMPOP) != 0;
1473 static bool OperIsSpecial(genTreeOps gtOper)
1475 return ((OperKind(gtOper) & GTK_KINDMASK) == GTK_SPECIAL);
1478 bool OperIsSimple() const
1480 return OperIsSimple(gtOper);
1483 #ifdef FEATURE_HW_INTRINSICS
1484 bool isCommutativeHWIntrinsic() const;
1485 bool isContainableHWIntrinsic() const;
1486 bool isRMWHWIntrinsic(Compiler* comp);
1487 bool isEvexCompatibleHWIntrinsic();
1489 bool isCommutativeHWIntrinsic() const
1494 bool isContainableHWIntrinsic() const
1499 bool isRMWHWIntrinsic(Compiler* comp)
1504 bool isEvexCompatibleHWIntrinsic()
1508 #endif // FEATURE_HW_INTRINSICS
1510 static bool OperIsCommutative(genTreeOps gtOper)
1512 return (OperKind(gtOper) & GTK_COMMUTE) != 0;
1515 bool OperIsCommutative()
1517 return OperIsCommutative(gtOper) || (OperIsHWIntrinsic(gtOper) && isCommutativeHWIntrinsic());
1520 static bool OperMayOverflow(genTreeOps gtOper)
1522 return ((gtOper == GT_ADD) || (gtOper == GT_SUB) || (gtOper == GT_MUL) || (gtOper == GT_CAST)
1523 #if !defined(TARGET_64BIT)
1524 || (gtOper == GT_ADD_HI) || (gtOper == GT_SUB_HI)
1529 bool OperMayOverflow() const
1531 return OperMayOverflow(gtOper);
1534 // This returns true only for GT_IND and GT_STOREIND, and is used in contexts where a "true"
1535 // indirection is expected (i.e. either a load to or a store from a single register).
1536 // OperIsIndir() returns true also for indirection nodes such as GT_BLK, etc. as well as GT_NULLCHECK.
1537 static bool OperIsIndir(genTreeOps gtOper)
1539 static_assert_no_msg(AreContiguous(GT_IND, GT_STOREIND, GT_BLK, GT_STORE_BLK, GT_STORE_DYN_BLK, GT_NULLCHECK));
1540 return (GT_IND <= gtOper) && (gtOper <= GT_NULLCHECK);
1543 static bool OperIsArrLength(genTreeOps gtOper)
1545 return (gtOper == GT_ARR_LENGTH) || (gtOper == GT_MDARR_LENGTH);
1548 static bool OperIsMDArr(genTreeOps gtOper)
1550 return (gtOper == GT_MDARR_LENGTH) || (gtOper == GT_MDARR_LOWER_BOUND);
1553 // Is this an access of an SZ array length, MD array length, or MD array lower bounds?
1554 static bool OperIsArrMetaData(genTreeOps gtOper)
1556 return (gtOper == GT_ARR_LENGTH) || (gtOper == GT_MDARR_LENGTH) || (gtOper == GT_MDARR_LOWER_BOUND);
1559 static bool OperIsIndirOrArrMetaData(genTreeOps gtOper)
1561 return OperIsIndir(gtOper) || OperIsArrMetaData(gtOper);
1564 bool OperIsIndir() const
1566 return OperIsIndir(gtOper);
1569 bool OperIsArrLength() const
1571 return OperIsArrLength(gtOper);
1574 bool OperIsMDArr() const
1576 return OperIsMDArr(gtOper);
1579 bool OperIsIndirOrArrMetaData() const
1581 return OperIsIndirOrArrMetaData(gtOper);
1584 // Helper function to return the array reference of an array length node.
1585 GenTree* GetArrLengthArrRef();
1587 // Helper function to return the address of an indir or array meta-data node.
1588 GenTree* GetIndirOrArrMetaDataAddr();
1590 bool IndirMayFault(Compiler* compiler);
1592 bool OperIsImplicitIndir() const;
1594 static bool OperIsAtomicOp(genTreeOps gtOper)
1610 bool OperIsAtomicOp() const
1612 return OperIsAtomicOp(gtOper);
1615 static bool OperIsStore(genTreeOps gtOper)
1617 return (OperKind(gtOper) & GTK_STORE) != 0;
1620 bool OperIsStore() const
1622 return OperIsStore(gtOper);
1625 static bool OperIsMultiOp(genTreeOps gtOper)
1627 return OperIsHWIntrinsic(gtOper);
1630 bool OperIsMultiOp() const
1632 return OperIsMultiOp(OperGet());
1635 bool OperIsSsaDef() const
1637 return OperIsLocalStore() || OperIs(GT_CALL);
1640 static bool OperIsHWIntrinsic(genTreeOps gtOper)
1642 #ifdef FEATURE_HW_INTRINSICS
1643 return gtOper == GT_HWINTRINSIC;
1646 #endif // FEATURE_HW_INTRINSICS
1649 bool OperIsHWIntrinsic() const
1651 return OperIsHWIntrinsic(gtOper);
1654 bool OperIsHWIntrinsic(NamedIntrinsic intrinsicId) const;
1656 // This is here for cleaner GT_LONG #ifdefs.
1657 static bool OperIsLong(genTreeOps gtOper)
1659 #if defined(TARGET_64BIT)
1662 return gtOper == GT_LONG;
1666 bool OperIsLong() const
1668 return OperIsLong(gtOper);
1671 bool OperIsConditionalJump() const
1673 return OperIs(GT_JTRUE, GT_JCMP, GT_JTEST, GT_JCC);
1676 bool OperConsumesFlags() const
1678 #if !defined(TARGET_64BIT)
1679 if (OperIs(GT_ADD_HI, GT_SUB_HI))
1684 #if defined(TARGET_ARM64)
1685 if (OperIs(GT_CCMP, GT_SELECT_INCCC, GT_SELECT_INVCC, GT_SELECT_NEGCC))
1690 return OperIs(GT_JCC, GT_SETCC, GT_SELECTCC);
1694 static const GenTreeDebugOperKind gtDebugOperKindTable[];
1696 static GenTreeDebugOperKind DebugOperKind(genTreeOps oper)
1698 assert(oper < GT_COUNT);
1700 return gtDebugOperKindTable[oper];
1703 GenTreeDebugOperKind DebugOperKind() const
1705 return DebugOperKind(OperGet());
1708 bool NullOp1Legal() const
1710 assert(OperIsSimple());
1719 return gtType == TYP_VOID;
1725 bool NullOp2Legal() const
1727 assert(OperIsSimple(gtOper) || OperIsBlk(gtOper));
1728 if (!OperIsBinary(gtOper))
1736 #if defined(TARGET_ARM)
1738 #endif // defined(TARGET_ARM)
1739 #if defined(TARGET_ARM64)
1740 case GT_SELECT_NEGCC:
1741 case GT_SELECT_INCCC:
1742 #endif // defined(TARGET_ARM64)
1750 bool OperIsLIR() const
1754 // NOPs may only be present in LIR if they do not produce a value.
1755 return IsNothingNode();
1758 return (DebugOperKind() & DBK_NOTLIR) == 0;
1761 bool OperSupportsReverseOpEvalOrder(Compiler* comp) const;
1762 static bool RequiresNonNullOp2(genTreeOps oper);
1763 bool IsValidCallArgument();
1766 inline bool IsIntegralConst(ssize_t constVal) const;
1767 inline bool IsFloatAllBitsSet() const;
1768 inline bool IsFloatNaN() const;
1769 inline bool IsFloatPositiveZero() const;
1770 inline bool IsFloatNegativeZero() const;
1771 inline bool IsVectorZero() const;
1772 inline bool IsVectorCreate() const;
1773 inline bool IsVectorAllBitsSet() const;
1774 inline bool IsVectorConst();
1776 inline uint64_t GetIntegralVectorConstElement(size_t index, var_types simdBaseType);
1778 inline bool IsBoxedValue();
1780 inline GenTree* gtGetOp1() const;
1782 // Directly return op2. Asserts the node is binary. Might return nullptr if the binary node allows
1783 // a nullptr op2, such as GT_LEA. This is more efficient than gtGetOp2IfPresent() if you know what
1784 // node type you have.
1785 inline GenTree* gtGetOp2() const;
1787 // The returned pointer might be nullptr if the node is not binary, or if non-null op2 is not required.
1788 inline GenTree* gtGetOp2IfPresent() const;
1790 inline GenTree*& Data();
1792 bool TryGetUse(GenTree* operand, GenTree*** pUse);
1794 bool TryGetUse(GenTree* operand)
1796 GenTree** unusedUse = nullptr;
1797 return TryGetUse(operand, &unusedUse);
1801 bool TryGetUseBinOp(GenTree* operand, GenTree*** pUse);
1804 GenTree* gtGetParent(GenTree*** pUse);
1806 void ReplaceOperand(GenTree** useEdge, GenTree* replacement);
1808 inline GenTree* gtEffectiveVal(bool commaOnly = false);
1810 inline GenTree* gtCommaStoreVal();
1812 // Return the child of this node if it is a GT_RELOAD or GT_COPY; otherwise simply return the node itself
1813 inline GenTree* gtSkipReloadOrCopy();
1815 // Returns true if it is a call node returning its value in more than one register
1816 inline bool IsMultiRegCall() const;
1818 // Returns true if it is a struct lclVar node residing in multiple registers.
1819 inline bool IsMultiRegLclVar() const;
1821 // Returns true if it is a node returning its value in more than one register
1822 bool IsMultiRegNode() const;
1824 // Returns the number of registers defined by a multireg node.
1825 unsigned GetMultiRegCount(Compiler* comp) const;
1827 // Returns the regIndex'th register defined by a possibly-multireg node.
1828 regNumber GetRegByIndex(int regIndex) const;
1830 // Returns the type of the regIndex'th register defined by a multi-reg node.
1831 var_types GetRegTypeByIndex(int regIndex) const;
1833 // Returns the GTF flag equivalent for the regIndex'th register of a multi-reg node.
1834 GenTreeFlags GetRegSpillFlagByIdx(int regIndex) const;
1836 // Sets the GTF flag equivalent for the regIndex'th register of a multi-reg node.
1837 void SetRegSpillFlagByIdx(GenTreeFlags flags, int regIndex);
1839 // Last-use information for either GenTreeLclVar or GenTreeCopyOrReload nodes.
1841 GenTreeFlags GetLastUseBit(int regIndex) const;
1844 bool IsLastUse(int fieldIndex) const;
1845 bool HasLastUse() const;
1846 void SetLastUse(int fieldIndex);
1847 void ClearLastUse(int fieldIndex);
1849 // Returns true if it is a GT_COPY or GT_RELOAD node
1850 inline bool IsCopyOrReload() const;
1852 // Returns true if it is a GT_COPY or GT_RELOAD of a multi-reg call node
1853 inline bool IsCopyOrReloadOfMultiRegCall() const;
1855 bool OperRequiresAsgFlag();
1857 bool OperRequiresCallFlag(Compiler* comp);
1859 bool OperMayThrow(Compiler* comp);
1860 ExceptionSetFlags OperExceptions(Compiler* comp);
1862 unsigned GetScaleIndexMul();
1863 unsigned GetScaleIndexShf();
1864 unsigned GetScaledIndex();
1867 static unsigned char s_gtNodeSizes[];
1868 #if NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS
1869 static unsigned char s_gtTrueSizes[];
1872 static unsigned s_gtNodeCounts[];
1875 static void InitNodeSize();
1877 size_t GetNodeSize() const;
1879 bool IsNodeProperlySized() const;
1881 void ReplaceWith(GenTree* src, Compiler* comp);
1883 static genTreeOps ReverseRelop(genTreeOps relop);
1885 static genTreeOps SwapRelop(genTreeOps relop);
1887 //---------------------------------------------------------------------
1889 static bool Compare(GenTree* op1, GenTree* op2, bool swapOK = false);
1891 //---------------------------------------------------------------------
1893 #if defined(DEBUG) || NODEBASH_STATS || MEASURE_NODE_SIZE || COUNT_AST_OPERS || DUMP_FLOWGRAPHS
1894 static const char* OpName(genTreeOps op);
1897 #if MEASURE_NODE_SIZE
1898 static const char* OpStructName(genTreeOps op);
1901 //---------------------------------------------------------------------
1903 bool IsNothingNode() const;
1906 // Value number update action enumeration
1907 enum ValueNumberUpdate
1909 CLEAR_VN, // Clear value number
1910 PRESERVE_VN // Preserve value number
1913 void SetOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN);
1914 void ChangeOper(genTreeOps oper, ValueNumberUpdate vnUpdate = CLEAR_VN);
1915 void SetOperRaw(genTreeOps oper);
1917 void ChangeType(var_types newType)
1919 var_types oldType = gtType;
1921 GenTree* node = this;
1922 while (node->gtOper == GT_COMMA)
1924 node = node->gtGetOp2();
1925 if (node->gtType != newType)
1927 assert(node->gtType == oldType);
1928 node->gtType = newType;
1933 template <typename T>
1934 void BashToConst(T value, var_types type = TYP_UNDEF);
1935 void BashToZeroConst(var_types type);
1936 GenTreeLclVar* BashToLclVar(Compiler* comp, unsigned lclNum);
1939 static void RecordOperBashing(genTreeOps operOld, genTreeOps operNew);
1940 static void ReportOperBashing(FILE* fp);
1942 static void RecordOperBashing(genTreeOps operOld, genTreeOps operNew)
1945 static void ReportOperBashing(FILE* fp)
1950 bool IsLocal() const
1952 return OperIsLocal(OperGet());
1955 bool IsAnyLocal() const
1957 return OperIsAnyLocal(OperGet());
1960 bool IsLclVarAddr() const;
1962 // Returns "true" iff 'this' is a GT_LCL_FLD or GT_STORE_LCL_FLD on which the type
1963 // is not the same size as the type of the GT_LCL_VAR.
1964 bool IsPartialLclFld(Compiler* comp);
1966 bool DefinesLocal(Compiler* comp,
1967 GenTreeLclVarCommon** pLclVarTree,
1968 bool* pIsEntire = nullptr,
1969 ssize_t* pOffset = nullptr,
1970 unsigned* pSize = nullptr);
1972 GenTreeLclVarCommon* IsImplicitByrefParameterValuePreMorph(Compiler* compiler);
1973 GenTreeLclVar* IsImplicitByrefParameterValuePostMorph(Compiler* compiler, GenTree** addr);
1975 // Determine whether this is an assignment tree of the form X = X (op) Y,
1976 // where Y is an arbitrary tree, and X is a lclVar.
1977 unsigned IsLclVarUpdateTree(GenTree** otherTree, genTreeOps* updateOper);
1979 // Determine whether this tree is a basic block profile count update.
1980 bool IsBlockProfileUpdate();
1982 bool IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeq** pFldSeq, ssize_t* pOffset);
1984 bool IsArrayAddr(GenTreeArrAddr** pArrAddr);
1986 bool SupportsSettingZeroFlag();
1988 // These are only used for dumping.
1989 // The GetRegNum() is only valid in LIR, but the dumping methods are not easily
1990 // modified to check this.
1991 CLANG_FORMAT_COMMENT_ANCHOR;
1996 return (GetRegTag() != GT_REGTAG_NONE) ? true : false;
1998 regNumber GetReg() const
2000 return (GetRegTag() != GT_REGTAG_NONE) ? GetRegNum() : REG_NA;
2004 static bool IsContained(unsigned flags)
2006 return ((flags & GTF_CONTAINED) != 0);
2012 gtFlags |= GTF_CONTAINED;
2013 assert(isContained());
2016 void ClearContained()
2019 gtFlags &= ~GTF_CONTAINED;
2025 return ((gtFlags & GTF_DONT_CSE) == 0);
2030 gtFlags |= GTF_DONT_CSE;
2033 void ClearDoNotCSE()
2035 gtFlags &= ~GTF_DONT_CSE;
2038 bool IsReverseOp() const
2040 return (gtFlags & GTF_REVERSE_OPS) ? true : false;
2045 gtFlags |= GTF_REVERSE_OPS;
2048 void ClearReverseOp()
2050 gtFlags &= ~GTF_REVERSE_OPS;
2053 #if defined(TARGET_XARCH)
2054 void SetDontExtend()
2056 assert(varTypeIsSmall(TypeGet()) && OperIs(GT_IND, GT_LCL_FLD));
2057 gtFlags |= GTF_DONT_EXTEND;
2060 void ClearDontExtend()
2062 gtFlags &= ~GTF_DONT_EXTEND;
2065 bool DontExtend() const
2067 assert(varTypeIsSmall(TypeGet()) || ((gtFlags & GTF_DONT_EXTEND) == 0));
2068 return (gtFlags & GTF_DONT_EXTEND) != 0;
2070 #endif // TARGET_XARCH
2072 bool IsUnsigned() const
2074 return ((gtFlags & GTF_UNSIGNED) != 0);
2079 assert(OperIs(GT_ADD, GT_SUB, GT_CAST, GT_LE, GT_LT, GT_GT, GT_GE) || OperIsMul());
2080 gtFlags |= GTF_UNSIGNED;
2083 void ClearUnsigned()
2085 assert(OperIs(GT_ADD, GT_SUB, GT_CAST) || OperIsMul());
2086 gtFlags &= ~GTF_UNSIGNED;
2091 assert(OperMayOverflow());
2092 gtFlags |= GTF_OVERFLOW;
2095 void ClearOverflow()
2097 assert(OperMayOverflow());
2098 gtFlags &= ~GTF_OVERFLOW;
2101 bool Is64RsltMul() const
2103 return (gtFlags & GTF_MUL_64RSLT) != 0;
2108 gtFlags |= GTF_MUL_64RSLT;
2111 void Clear64RsltMul()
2113 gtFlags &= ~GTF_MUL_64RSLT;
2116 void SetAllEffectsFlags(GenTree* source)
2118 SetAllEffectsFlags(source->gtFlags & GTF_ALL_EFFECT);
2121 void SetAllEffectsFlags(GenTree* firstSource, GenTree* secondSource)
2123 SetAllEffectsFlags((firstSource->gtFlags | secondSource->gtFlags) & GTF_ALL_EFFECT);
2126 void SetAllEffectsFlags(GenTree* firstSource, GenTree* secondSource, GenTree* thirdSource)
2128 SetAllEffectsFlags((firstSource->gtFlags | secondSource->gtFlags | thirdSource->gtFlags) & GTF_ALL_EFFECT);
2131 void SetAllEffectsFlags(GenTreeFlags sourceFlags)
2133 assert((sourceFlags & ~GTF_ALL_EFFECT) == 0);
2135 gtFlags &= ~GTF_ALL_EFFECT;
2136 gtFlags |= sourceFlags;
2139 void AddAllEffectsFlags(GenTree* source)
2141 AddAllEffectsFlags(source->gtFlags & GTF_ALL_EFFECT);
2144 void AddAllEffectsFlags(GenTree* firstSource, GenTree* secondSource)
2146 AddAllEffectsFlags((firstSource->gtFlags | secondSource->gtFlags) & GTF_ALL_EFFECT);
2149 void AddAllEffectsFlags(GenTreeFlags sourceFlags)
2151 assert((sourceFlags & ~GTF_ALL_EFFECT) == 0);
2152 gtFlags |= sourceFlags;
2155 inline bool IsCnsIntOrI() const;
2157 inline bool IsIntegralConst() const;
2159 inline bool IsIntegralConstPow2() const;
2161 inline bool IsIntegralConstUnsignedPow2() const;
2163 inline bool IsIntegralConstAbsPow2() const;
2165 inline bool IsIntCnsFitsInI32(); // Constant fits in INT32
2167 inline bool IsCnsFltOrDbl() const;
2169 inline bool IsCnsNonZeroFltOrDbl() const;
2171 inline bool IsCnsVec() const;
2173 bool IsIconHandle() const
2175 return (gtOper == GT_CNS_INT) && ((gtFlags & GTF_ICON_HDL_MASK) != 0);
2178 bool IsIconHandle(GenTreeFlags handleType) const
2180 // check that handleType is one of the valid GTF_ICON_* values
2181 assert((handleType & GTF_ICON_HDL_MASK) != 0);
2182 assert((handleType & ~GTF_ICON_HDL_MASK) == 0);
2183 return (gtOper == GT_CNS_INT) && ((gtFlags & GTF_ICON_HDL_MASK) == handleType);
2186 template <typename... T>
2187 bool IsIconHandle(GenTreeFlags handleType, T... rest) const
2189 return IsIconHandle(handleType) || IsIconHandle(rest...);
2192 // Return just the part of the flags corresponding to the GTF_ICON_*_HDL flag.
2193 // For non-icon handle trees, returns GTF_EMPTY.
2194 GenTreeFlags GetIconHandleFlag() const
2196 return (gtOper == GT_CNS_INT) ? (gtFlags & GTF_ICON_HDL_MASK) : GTF_EMPTY;
2199 // Mark this node as no longer being a handle; clear its GTF_ICON_*_HDL bits.
2200 void ClearIconHandleMask()
2202 assert(gtOper == GT_CNS_INT);
2203 gtFlags &= ~GTF_ICON_HDL_MASK;
2208 return OperGet() == GT_CALL;
2210 inline bool IsHelperCall();
2212 bool gtOverflow() const;
2213 bool gtOverflowEx() const;
2214 bool gtSetFlags() const;
2215 bool gtRequestSetFlags();
2218 static int gtDispFlags(GenTreeFlags flags, GenTreeDebugFlags debugFlags);
2222 inline var_types CastFromType();
2223 inline var_types& CastToType();
2225 // Returns "true" iff "this" is a phi-related node (i.e. a GT_PHI_ARG, GT_PHI, or a PhiDefn).
2228 // Returns "true" iff "*this" is a store (GT_STORE_LCL_VAR) tree that defines an SSA name (lcl = phi(...));
2231 // Because of the fact that we hid the assignment operator of "BitSet" (in DEBUG),
2232 // we can't synthesize an assignment operator.
2233 // TODO-Cleanup: Could change this w/o liveset on tree nodes
2234 // (This is also necessary for the VTable trick.)
2239 // Returns an iterator that will produce the use edge to each operand of this node. Differs
2240 // from the sequence of nodes produced by a loop over `GetChild` in its handling of call, phi,
2241 // and block op nodes.
2242 GenTreeUseEdgeIterator UseEdgesBegin();
2243 GenTreeUseEdgeIterator UseEdgesEnd();
2245 IteratorPair<GenTreeUseEdgeIterator> UseEdges();
2247 // Returns an iterator that will produce each operand of this node, in execution order.
2248 GenTreeOperandIterator OperandsBegin();
2249 GenTreeOperandIterator OperandsEnd();
2251 // Returns a range that will produce the operands of this node in execution order.
2252 IteratorPair<GenTreeOperandIterator> Operands();
2254 enum class VisitResult
2260 // Visits each operand of this node. The operand must be either a lambda, function, or functor with the signature
2261 // `GenTree::VisitResult VisitorFunction(GenTree* operand)`. Here is a simple example:
2263 // unsigned operandCount = 0;
2264 // node->VisitOperands([&](GenTree* operand) -> GenTree::VisitResult)
2267 // return GenTree::VisitResult::Continue;
2270 // This function is generally more efficient that the operand iterator and should be preferred over that API for
2271 // hot code, as it affords better opportunities for inlining and achieves shorter dynamic path lengths when
2272 // deciding how operands need to be accessed.
2274 // Note that this function does not respect `GTF_REVERSE_OPS`. This is always safe in LIR, but may be dangerous
2275 // in HIR if for some reason you need to visit operands in the order in which they will execute.
2276 template <typename TVisitor>
2277 void VisitOperands(TVisitor visitor);
2280 template <typename TVisitor>
2281 void VisitBinOpOperands(TVisitor visitor);
2284 bool Precedes(GenTree* other);
2286 bool IsInvariant() const;
2288 bool IsNeverNegative(Compiler* comp) const;
2289 bool IsNeverNegativeOne(Compiler* comp) const;
2290 bool IsNeverZero() const;
2291 bool CanDivOrModPossiblyOverflow(Compiler* comp) const;
2293 bool IsReuseRegVal() const
2295 // This can be extended to non-constant nodes, but not to local or indir nodes.
2296 return OperIsConst() && ((gtFlags & GTF_REUSE_REG_VAL) != 0);
2299 void SetReuseRegVal()
2301 assert(OperIsConst());
2302 gtFlags |= GTF_REUSE_REG_VAL;
2305 void ResetReuseRegVal()
2307 assert(OperIsConst());
2308 gtFlags &= ~GTF_REUSE_REG_VAL;
2311 void SetIndirExceptionFlags(Compiler* comp);
2313 #if MEASURE_NODE_SIZE
2314 static void DumpNodeSizes();
2320 GenTree& operator=(const GenTree& gt)
2322 assert(!"Don't copy");
2327 #if DEBUGGABLE_GENTREE
2328 // In DEBUG builds, add a dummy virtual method, to give the debugger run-time type information.
2329 virtual void DummyVirt()
2333 typedef void* VtablePtr;
2335 VtablePtr GetVtableForOper(genTreeOps oper);
2336 void SetVtableForOper(genTreeOps oper);
2338 static VtablePtr s_vtablesForOpers[GT_COUNT];
2339 static VtablePtr s_vtableForOp;
2340 #endif // DEBUGGABLE_GENTREE
2343 inline void* operator new(size_t sz, class Compiler*, genTreeOps oper);
2345 inline GenTree(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false));
2348 // Represents a GT_PHI node - a variable sized list of GT_PHI_ARG nodes.
2349 // All PHI_ARG nodes must represent uses of the same local variable and
2350 // the PHI node's type must be the same as the local variable's type.
2352 // The PHI node does not represent a definition by itself, it is always
2353 // the value operand of a STORE_LCL_VAR node. The local store node itself
2354 // is the definition for the same local variable referenced by all the
2355 // used PHI_ARG nodes:
2357 // STORE_LCL_VAR<V01>(PHI(PHI_ARG(V01), PHI_ARG(V01), PHI_ARG(V01)))
2359 // The order of the PHI_ARG uses is not currently relevant and it may be
2360 // the same or not as the order of the predecessor blocks.
2362 struct GenTreePhi final : public GenTree
2370 Use(GenTree* node, Use* next = nullptr) : m_node(node), m_next(next)
2372 assert(node->OperIs(GT_PHI_ARG));
2380 GenTree* GetNode() const
2382 assert(m_node->OperIs(GT_PHI_ARG));
2386 void SetNode(GenTree* node)
2388 assert(node->OperIs(GT_PHI_ARG));
2397 Use* GetNext() const
2408 UseIterator(Use* use) : m_use(use)
2412 Use& operator*() const
2417 Use* operator->() const
2422 UseIterator& operator++()
2424 m_use = m_use->GetNext();
2428 bool operator==(const UseIterator& i) const
2430 return m_use == i.m_use;
2433 bool operator!=(const UseIterator& i) const
2435 return m_use != i.m_use;
2444 UseList(Use* uses) : m_uses(uses)
2448 UseIterator begin() const
2450 return UseIterator(m_uses);
2453 UseIterator end() const
2455 return UseIterator(nullptr);
2461 GenTreePhi(var_types type) : GenTree(GT_PHI, type), gtUses(nullptr)
2467 return UseList(gtUses);
2470 //--------------------------------------------------------------------------
2471 // Equals: Checks if 2 PHI nodes are equal.
2474 // phi1 - The first PHI node
2475 // phi2 - The second PHI node
2478 // true if the 2 PHI nodes have the same type, number of uses, and the
2482 // The order of uses must be the same for equality, even if the
2483 // order is not usually relevant and is not guaranteed to reflect
2484 // a particular order of the predecessor blocks.
2486 static bool Equals(GenTreePhi* phi1, GenTreePhi* phi2)
2488 if (phi1->TypeGet() != phi2->TypeGet())
2493 GenTreePhi::UseIterator i1 = phi1->Uses().begin();
2494 GenTreePhi::UseIterator end1 = phi1->Uses().end();
2495 GenTreePhi::UseIterator i2 = phi2->Uses().begin();
2496 GenTreePhi::UseIterator end2 = phi2->Uses().end();
2498 for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
2500 if (!Compare(i1->GetNode(), i2->GetNode()))
2506 return (i1 == end1) && (i2 == end2);
2509 #if DEBUGGABLE_GENTREE
2510 GenTreePhi() : GenTree()
2516 // Represents a list of fields constituting a struct, when it is passed as an argument.
2518 struct GenTreeFieldList : public GenTree
2528 Use(GenTree* node, unsigned offset, var_types type)
2529 : m_node(node), m_next(nullptr), m_offset(static_cast<uint16_t>(offset)), m_type(type)
2531 // We can save space on 32 bit hosts by storing the offset as uint16_t. Struct promotion
2532 // only accepts structs which are much smaller than that - 128 bytes = max 4 fields * max
2533 // SIMD vector size (32 bytes).
2534 assert(offset <= UINT16_MAX);
2542 GenTree* GetNode() const
2547 void SetNode(GenTree* node)
2549 assert(node != nullptr);
2558 Use* GetNext() const
2563 void SetNext(Use* next)
2568 unsigned GetOffset() const
2573 var_types GetType() const
2578 void SetType(var_types type)
2589 UseIterator(Use* use) : use(use)
2605 use = use->GetNext();
2608 bool operator==(const UseIterator& other)
2610 return use == other.use;
2613 bool operator!=(const UseIterator& other)
2615 return use != other.use;
2625 UseList() : m_head(nullptr), m_tail(nullptr)
2629 Use* GetHead() const
2634 UseIterator begin() const
2639 UseIterator end() const
2644 void AddUse(Use* newUse)
2646 assert(newUse->GetNext() == nullptr);
2648 if (m_head == nullptr)
2654 m_tail->SetNext(newUse);
2660 void InsertUse(Use* insertAfter, Use* newUse)
2662 assert(newUse->GetNext() == nullptr);
2664 newUse->SetNext(insertAfter->GetNext());
2665 insertAfter->SetNext(newUse);
2667 if (m_tail == insertAfter)
2678 for (Use *next, *use = m_tail; use != nullptr; use = next)
2680 next = use->GetNext();
2681 use->SetNext(m_head);
2686 bool IsSorted() const
2688 unsigned offset = 0;
2689 for (GenTreeFieldList::Use& use : *this)
2691 if (use.GetOffset() < offset)
2695 offset = use.GetOffset();
2705 GenTreeFieldList() : GenTree(GT_FIELD_LIST, TYP_STRUCT)
2715 // Add a new field use to the end of the use list and update side effect flags.
2716 void AddField(Compiler* compiler, GenTree* node, unsigned offset, var_types type);
2717 // Add a new field use to the end of the use list without updating side effect flags.
2718 void AddFieldLIR(Compiler* compiler, GenTree* node, unsigned offset, var_types type);
2719 // Insert a new field use after the specified use and update side effect flags.
2720 void InsertField(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type);
2721 // Insert a new field use after the specified use without updating side effect flags.
2722 void InsertFieldLIR(Compiler* compiler, Use* insertAfter, GenTree* node, unsigned offset, var_types type);
2724 //--------------------------------------------------------------------------
2725 // Equals: Check if 2 FIELD_LIST nodes are equal.
2728 // list1 - The first FIELD_LIST node
2729 // list2 - The second FIELD_LIST node
2732 // true if the 2 FIELD_LIST nodes have the same type, number of uses, and the
2735 static bool Equals(GenTreeFieldList* list1, GenTreeFieldList* list2)
2737 assert(list1->TypeGet() == TYP_STRUCT);
2738 assert(list2->TypeGet() == TYP_STRUCT);
2740 UseIterator i1 = list1->Uses().begin();
2741 UseIterator end1 = list1->Uses().end();
2742 UseIterator i2 = list2->Uses().begin();
2743 UseIterator end2 = list2->Uses().end();
2745 for (; (i1 != end1) && (i2 != end2); ++i1, ++i2)
2747 if (!Compare(i1->GetNode(), i2->GetNode()) || (i1->GetOffset() != i2->GetOffset()) ||
2748 (i1->GetType() != i2->GetType()))
2754 return (i1 == end1) && (i2 == end2);
2758 //------------------------------------------------------------------------
2759 // GenTreeUseEdgeIterator: an iterator that will produce each use edge of a GenTree node in the order in which
2762 // Operand iteration is common enough in the back end of the compiler that the implementation of this type has
2763 // traded some simplicity for speed:
2764 // - As much work as is reasonable is done in the constructor rather than during operand iteration
2765 // - Node-specific functionality is handled by a small class of "advance" functions called by operator++
2766 // rather than making operator++ itself handle all nodes
2767 // - Some specialization has been performed for specific node types/shapes (e.g. the advance function for
2768 // binary nodes is specialized based on whether or not the node has the GTF_REVERSE_OPS flag set)
2770 // Valid values of this type may be obtained by calling `GenTree::UseEdgesBegin` and `GenTree::UseEdgesEnd`.
2772 class GenTreeUseEdgeIterator final
2774 friend class GenTreeOperandIterator;
2775 friend GenTreeUseEdgeIterator GenTree::UseEdgesBegin();
2776 friend GenTreeUseEdgeIterator GenTree::UseEdgesEnd();
2782 CALL_CONTROL_EXPR = 2,
2788 typedef void (GenTreeUseEdgeIterator::*AdvanceFn)();
2790 AdvanceFn m_advance;
2793 // Pointer sized state storage, GenTreePhi::Use* or CallArg*
2794 // or the exclusive end/beginning of GenTreeMultiOp's operand array.
2796 // Integer sized state storage, usually the operand index for non-list based nodes.
2799 GenTreeUseEdgeIterator(GenTree* node);
2801 // Advance functions for special nodes
2802 void AdvanceCmpXchg();
2803 void AdvanceArrElem();
2804 void AdvanceStoreDynBlk();
2805 void AdvanceFieldList();
2807 void AdvanceConditional();
2809 template <bool ReverseOperands>
2810 void AdvanceBinOp();
2811 void SetEntryStateForBinOp();
2813 // The advance function for call nodes
2814 template <int state>
2817 #if defined(FEATURE_SIMD) || defined(FEATURE_HW_INTRINSICS)
2818 void AdvanceMultiOp();
2819 void AdvanceReversedMultiOp();
2820 void SetEntryStateForMultiOp();
2826 GenTreeUseEdgeIterator();
2828 inline GenTree** operator*()
2830 assert(m_state != -1);
2834 inline GenTree** operator->()
2836 assert(m_state != -1);
2840 inline bool operator==(const GenTreeUseEdgeIterator& other) const
2842 if (m_state == -1 || other.m_state == -1)
2844 return m_state == other.m_state;
2847 return (m_node == other.m_node) && (m_edge == other.m_edge) && (m_statePtr == other.m_statePtr) &&
2848 (m_state == other.m_state);
2851 inline bool operator!=(const GenTreeUseEdgeIterator& other) const
2853 return !(operator==(other));
2856 GenTreeUseEdgeIterator& operator++();
2859 //------------------------------------------------------------------------
2860 // GenTreeOperandIterator: an iterator that will produce each operand of a
2861 // GenTree node in the order in which they are
2862 // used. This uses `GenTreeUseEdgeIterator` under
2865 // Note: valid values of this type may be obtained by calling
2866 // `GenTree::OperandsBegin` and `GenTree::OperandsEnd`.
2867 class GenTreeOperandIterator final
2869 friend GenTreeOperandIterator GenTree::OperandsBegin();
2870 friend GenTreeOperandIterator GenTree::OperandsEnd();
2872 GenTreeUseEdgeIterator m_useEdges;
2874 GenTreeOperandIterator(GenTree* node) : m_useEdges(node)
2879 GenTreeOperandIterator() : m_useEdges()
2883 inline GenTree* operator*()
2885 return *(*m_useEdges);
2888 inline GenTree* operator->()
2890 return *(*m_useEdges);
2893 inline bool operator==(const GenTreeOperandIterator& other) const
2895 return m_useEdges == other.m_useEdges;
2898 inline bool operator!=(const GenTreeOperandIterator& other) const
2900 return !(operator==(other));
2903 inline GenTreeOperandIterator& operator++()
2910 /*****************************************************************************/
2911 // In the current design, we never instantiate GenTreeUnOp: it exists only to be
2912 // used as a base class. For unary operators, we instantiate GenTreeOp, with a NULL second
2913 // argument. We check that this is true dynamically. We could tighten this and get static
2914 // checking, but that would entail accessing the first child of a unary operator via something
2915 // like gtUnOp.gtOp1 instead of AsOp()->gtOp1.
2916 struct GenTreeUnOp : public GenTree
2921 GenTreeUnOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
2922 : GenTree(oper, type DEBUGARG(largeNode)), gtOp1(nullptr)
2926 GenTreeUnOp(genTreeOps oper, var_types type, GenTree* op1 DEBUGARG(bool largeNode = false))
2927 : GenTree(oper, type DEBUGARG(largeNode)), gtOp1(op1)
2929 assert(op1 != nullptr || NullOp1Legal());
2931 { // Propagate effects flags from child.
2932 gtFlags |= op1->gtFlags & GTF_ALL_EFFECT;
2936 #if DEBUGGABLE_GENTREE
2937 GenTreeUnOp() : GenTree(), gtOp1(nullptr)
2943 struct GenTreeOp : public GenTreeUnOp
2947 GenTreeOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2 DEBUGARG(bool largeNode = false))
2948 : GenTreeUnOp(oper, type, op1 DEBUGARG(largeNode)), gtOp2(op2)
2950 // comparisons are always integral types
2951 assert(!GenTree::OperIsCompare(oper) || varTypeIsIntegral(type));
2952 // Binary operators, with a few exceptions, require a non-nullptr
2954 assert(op2 != nullptr || NullOp2Legal());
2955 // Unary operators, on the other hand, require a null second argument.
2956 assert(!OperIsUnary(oper) || op2 == nullptr);
2957 // Propagate effects flags from child. (UnOp handled this for first child.)
2960 gtFlags |= op2->gtFlags & GTF_ALL_EFFECT;
2964 // A small set of types are unary operators with optional arguments. We use
2965 // this constructor to build those.
2966 GenTreeOp(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
2967 : GenTreeUnOp(oper, type DEBUGARG(largeNode)), gtOp2(nullptr)
2969 // Unary operators with optional arguments:
2970 assert(oper == GT_NOP || oper == GT_RETURN || oper == GT_RETFILT || OperIsBlk(oper));
2973 // returns true if we will use the division by constant optimization for this node.
2974 bool UsesDivideByConstOptimized(Compiler* comp);
2976 // checks if we will use the division by constant optimization this node
2977 // then sets the flag GTF_DIV_BY_CNS_OPT and GTF_DONT_CSE on the constant
2978 void CheckDivideByConstOptimized(Compiler* comp);
2980 // True if this node is marked as using the division by constant optimization
2981 bool MarkedDivideByConstOptimized() const
2983 return (gtFlags & GTF_DIV_BY_CNS_OPT) != 0;
2986 #if !defined(TARGET_64BIT) || defined(TARGET_ARM64)
2987 bool IsValidLongMul();
2990 #if !defined(TARGET_64BIT) && defined(DEBUG)
2991 void DebugCheckLongMul();
2994 #if DEBUGGABLE_GENTREE
2995 GenTreeOp() : GenTreeUnOp(), gtOp2(nullptr)
3001 struct GenTreeVal : public GenTree
3005 GenTreeVal(genTreeOps oper, var_types type, ssize_t val) : GenTree(oper, type), gtVal1(val)
3008 #if DEBUGGABLE_GENTREE
3009 GenTreeVal() : GenTree()
3015 struct GenTreeIntConCommon : public GenTree
3017 inline INT64 LngValue() const;
3018 inline void SetLngValue(INT64 val);
3019 inline ssize_t IconValue() const;
3020 inline void SetIconValue(ssize_t val);
3021 inline INT64 IntegralValue() const;
3022 inline void SetIntegralValue(int64_t value);
3024 template <typename T>
3025 inline void SetValueTruncating(T value);
3027 GenTreeIntConCommon(genTreeOps oper, var_types type DEBUGARG(bool largeNode = false))
3028 : GenTree(oper, type DEBUGARG(largeNode))
3032 bool FitsInI8() // IconValue() fits into 8-bit signed storage
3034 return FitsInI8(IconValue());
3037 static bool FitsInI8(ssize_t val) // Constant fits into 8-bit signed storage
3039 return (int8_t)val == val;
3042 bool FitsInI32() // IconValue() fits into 32-bit signed storage
3044 return FitsInI32(IconValue());
3047 static bool FitsInI32(ssize_t val) // Constant fits into 32-bit signed storage
3050 return (int32_t)val == val;
3056 bool ImmedValNeedsReloc(Compiler* comp);
3057 bool ImmedValCanBeFolded(Compiler* comp, genTreeOps op);
3060 bool FitsInAddrBase(Compiler* comp);
3061 bool AddrNeedsReloc(Compiler* comp);
3064 #if DEBUGGABLE_GENTREE
3065 GenTreeIntConCommon() : GenTree()
3071 // node representing a read from a physical register
3072 struct GenTreePhysReg : public GenTree
3074 // physregs need a field beyond GetRegNum() because
3075 // GetRegNum() indicates the destination (and can be changed)
3076 // whereas reg indicates the source
3078 GenTreePhysReg(regNumber r, var_types type = TYP_I_IMPL) : GenTree(GT_PHYSREG, type), gtSrcReg(r)
3081 #if DEBUGGABLE_GENTREE
3082 GenTreePhysReg() : GenTree()
3088 /* gtIntCon -- integer constant (GT_CNS_INT) */
3089 struct GenTreeIntCon : public GenTreeIntConCommon
3092 * This is the GT_CNS_INT struct definition.
3093 * It's used to hold for both int constants and pointer handle constants.
3094 * For the 64-bit targets we will only use GT_CNS_INT as it used to represent all the possible sizes
3095 * For the 32-bit targets we use a GT_CNS_LNG to hold a 64-bit integer constant and GT_CNS_INT for all others.
3096 * In the future when we retarget the JIT for x86 we should consider eliminating GT_CNS_LNG
3098 ssize_t gtIconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeLngCon below.
3100 /* The InitializeArray intrinsic needs to go back to the newarray statement
3101 to find the class handle of the array so that we can get its size. However,
3102 in ngen mode, the handle in that statement does not correspond to the compile
3103 time handle (rather it lets you get a handle at run-time). In that case, we also
3104 need to store a compile time handle, which goes in this gtCompileTimeHandle field.
3106 ssize_t gtCompileTimeHandle;
3108 // TODO-Cleanup: It's not clear what characterizes the cases where the field
3109 // above is used. It may be that its uses and those of the "gtFieldSeq" field below
3110 // are mutually exclusive, and they could be put in a union. Or else we should separate
3111 // this type into three subtypes.
3113 // If this constant represents the offset of one or more fields, "gtFieldSeq" represents that
3114 // sequence of fields.
3115 FieldSeq* gtFieldSeq;
3118 // If the value represents target address (for a field or call), holds the handle of the field (or call).
3119 size_t gtTargetHandle = 0;
3122 GenTreeIntCon(var_types type, ssize_t value DEBUGARG(bool largeNode = false))
3123 : GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
3125 , gtCompileTimeHandle(0)
3126 , gtFieldSeq(nullptr)
3130 GenTreeIntCon(var_types type, ssize_t value, FieldSeq* fields DEBUGARG(bool largeNode = false))
3131 : GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
3133 , gtCompileTimeHandle(0)
3134 , gtFieldSeq(fields)
3138 void FixupInitBlkValue(var_types type);
3140 #if DEBUGGABLE_GENTREE
3141 GenTreeIntCon() : GenTreeIntConCommon()
3147 /* gtLngCon -- long constant (GT_CNS_LNG) */
3149 struct GenTreeLngCon : public GenTreeIntConCommon
3151 INT64 gtLconVal; // Must overlap and have the same offset with the gtIconVal field in GenTreeIntCon above.
3154 return (INT32)(gtLconVal & 0xffffffff);
3159 return (INT32)(gtLconVal >> 32);
3162 GenTreeLngCon(INT64 val) : GenTreeIntConCommon(GT_CNS_NATIVELONG, TYP_LONG)
3166 #if DEBUGGABLE_GENTREE
3167 GenTreeLngCon() : GenTreeIntConCommon()
3173 inline INT64 GenTreeIntConCommon::LngValue() const
3175 #ifndef TARGET_64BIT
3176 assert(gtOper == GT_CNS_LNG);
3177 return AsLngCon()->gtLconVal;
3183 inline void GenTreeIntConCommon::SetLngValue(INT64 val)
3185 #ifndef TARGET_64BIT
3186 assert(gtOper == GT_CNS_LNG);
3187 AsLngCon()->gtLconVal = val;
3189 // Compile time asserts that these two fields overlap and have the same offsets: gtIconVal and gtLconVal
3190 C_ASSERT(offsetof(GenTreeLngCon, gtLconVal) == offsetof(GenTreeIntCon, gtIconVal));
3191 C_ASSERT(sizeof(AsLngCon()->gtLconVal) == sizeof(AsIntCon()->gtIconVal));
3193 SetIconValue(ssize_t(val));
3197 inline ssize_t GenTreeIntConCommon::IconValue() const
3199 assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
3200 return AsIntCon()->gtIconVal;
3203 inline void GenTreeIntConCommon::SetIconValue(ssize_t val)
3205 assert(gtOper == GT_CNS_INT); // We should never see a GT_CNS_LNG for a 64-bit target!
3206 AsIntCon()->gtIconVal = val;
3209 inline INT64 GenTreeIntConCommon::IntegralValue() const
3214 return gtOper == GT_CNS_LNG ? LngValue() : (INT64)IconValue();
3215 #endif // TARGET_64BIT
3218 inline void GenTreeIntConCommon::SetIntegralValue(int64_t value)
3221 SetIconValue(value);
3223 if (OperIs(GT_CNS_LNG))
3229 assert(FitsIn<int32_t>(value));
3230 SetIconValue(static_cast<int32_t>(value));
3232 #endif // TARGET_64BIT
3235 //------------------------------------------------------------------------
3236 // SetValueTruncating: Set the value, truncating to TYP_INT if necessary.
3238 // The function will truncate the supplied value to a 32 bit signed
3239 // integer if the node's type is not TYP_LONG, otherwise setting it
3240 // as-is. Note that this function intentionally does not check for
3241 // small types (such nodes are created in lowering) for TP reasons.
3243 // This function is intended to be used where its truncating behavior is
3244 // desirable. One example is folding of ADD(CNS_INT, CNS_INT) performed in
3245 // wider integers, which is typical when compiling on 64 bit hosts, as
3246 // most arithmetic is done in ssize_t's aka int64_t's in that case, while
3247 // the node itself can be of a narrower type.
3250 // value - Value to set, truncating to TYP_INT if the node is not of TYP_LONG
3253 // This function is templated so that it works well with compiler warnings of
3254 // the form "Operation may overflow before being assigned to a wider type", in
3255 // case "value" is of type ssize_t, which is common.
3257 template <typename T>
3258 inline void GenTreeIntConCommon::SetValueTruncating(T value)
3260 static_assert_no_msg(
3261 (std::is_same<T, int32_t>::value || std::is_same<T, int64_t>::value || std::is_same<T, ssize_t>::value));
3263 if (TypeIs(TYP_LONG))
3269 SetIconValue(static_cast<int32_t>(value));
3273 /* gtDblCon -- double constant (GT_CNS_DBL) */
3275 struct GenTreeDblCon : public GenTree
3281 double DconValue() const
3286 void SetDconValue(double value)
3288 gtDconVal = FloatingPointUtils::normalize(value);
3291 bool isBitwiseEqual(GenTreeDblCon* other)
3293 unsigned __int64 bits = *(unsigned __int64*)(>DconVal);
3294 unsigned __int64 otherBits = *(unsigned __int64*)(&(other->gtDconVal));
3295 return (bits == otherBits);
3298 GenTreeDblCon(double val, var_types type = TYP_DOUBLE) : GenTree(GT_CNS_DBL, type)
3300 assert(varTypeIsFloating(type));
3303 #if DEBUGGABLE_GENTREE
3304 GenTreeDblCon() : GenTree()
3310 /* gtStrCon -- string constant (GT_CNS_STR) */
3312 #define EMPTY_STRING_SCON (unsigned)-1
3314 struct GenTreeStrCon : public GenTree
3317 CORINFO_MODULE_HANDLE gtScpHnd;
3319 // Returns true if this GT_CNS_STR was imported for String.Empty field
3320 bool IsStringEmptyField()
3322 return gtSconCPX == EMPTY_STRING_SCON && gtScpHnd == nullptr;
3325 // Because this node can come from an inlined method we need to
3326 // have the scope handle, since it will become a helper call.
3327 GenTreeStrCon(unsigned sconCPX, CORINFO_MODULE_HANDLE mod DEBUGARG(bool largeNode = false))
3328 : GenTree(GT_CNS_STR, TYP_REF DEBUGARG(largeNode)), gtSconCPX(sconCPX), gtScpHnd(mod)
3331 #if DEBUGGABLE_GENTREE
3332 GenTreeStrCon() : GenTree()
3338 // Encapsulates the SSA info carried by local nodes. Most local nodes have simple 1-to-1
3339 // relationships with their SSA refs. However, defs of promoted structs can represent
3340 // many SSA defs at the same time, and we need to efficiently encode that.
3342 class SsaNumInfo final
3344 // This can be in one of four states:
3345 // 1. Single SSA name: > RESERVED_SSA_NUM (0).
3346 // 2. RESERVED_SSA_NUM (0)
3347 // 3. "Inline composite name": packed SSA numbers of field locals (each could be RESERVED):
3348 // [byte 3]: [top bit][ssa num 3] (7 bits)
3349 // [byte 2]: [ssa num 2] (8 bits)
3350 // [byte 1]: [compact encoding bit][ssa num 1] (7 bits)
3351 // [byte 0]: [ssa num 0] (8 bits)
3352 // We expect this encoding to cover the 99%+ case of composite names: locals with more
3353 // than 127 defs, maximum for this encoding, are rare, and the current limit on the count
3354 // of promoted fields is 4.
3355 // 4. "Outlined composite name": index into the "composite SSA nums" table. The table itself
3356 // will have the very simple format of N (the total number of fields / simple names) slots
3357 // with full SSA numbers, starting at the encoded index. Notably, the table entries will
3358 // include "empty" slots (for untracked fields), as we don't expect to use the table in
3359 // the common case, and in the pathological cases, the space overhead should be mitigated
3360 // by the cap on the number of tracked locals.
3362 static const int BITS_PER_SIMPLE_NUM = 8;
3363 static const int MAX_SIMPLE_NUM = (1 << (BITS_PER_SIMPLE_NUM - 1)) - 1;
3364 static const int SIMPLE_NUM_MASK = MAX_SIMPLE_NUM;
3365 static const int SIMPLE_NUM_COUNT = (sizeof(int) * BITS_PER_BYTE) / BITS_PER_SIMPLE_NUM;
3366 static const int COMPOSITE_ENCODING_BIT = 1 << 31;
3367 static const int OUTLINED_ENCODING_BIT = 1 << 15;
3368 static const int OUTLINED_INDEX_LOW_MASK = OUTLINED_ENCODING_BIT - 1;
3369 static const int OUTLINED_INDEX_HIGH_MASK =
3370 ~(COMPOSITE_ENCODING_BIT | OUTLINED_ENCODING_BIT | OUTLINED_INDEX_LOW_MASK);
3371 static_assert_no_msg(SsaConfig::RESERVED_SSA_NUM == 0); // A lot in the encoding relies on this.
3375 SsaNumInfo(int value) : m_value(value)
3380 SsaNumInfo() : m_value(SsaConfig::RESERVED_SSA_NUM)
3384 bool IsSimple() const
3386 return IsInvalid() || IsSsaNum(m_value);
3389 bool IsComposite() const
3394 bool IsInvalid() const
3396 return m_value == SsaConfig::RESERVED_SSA_NUM;
3399 unsigned GetNum() const
3405 unsigned GetNum(Compiler* compiler, unsigned index) const;
3407 static SsaNumInfo Simple(unsigned ssaNum)
3409 assert(IsSsaNum(ssaNum) || (ssaNum == SsaConfig::RESERVED_SSA_NUM));
3410 return SsaNumInfo(ssaNum);
3413 static SsaNumInfo Composite(
3414 SsaNumInfo baseNum, Compiler* compiler, unsigned parentLclNum, unsigned index, unsigned ssaNum);
3417 bool HasCompactFormat() const
3419 assert(IsComposite());
3420 return (m_value & OUTLINED_ENCODING_BIT) == 0;
3423 unsigned* GetOutlinedNumSlot(Compiler* compiler, unsigned index) const;
3425 static bool NumCanBeEncodedCompactly(unsigned index, unsigned ssaNum);
3427 static bool IsSsaNum(int value)
3429 return value > SsaConfig::RESERVED_SSA_NUM;
3433 // Common supertype of [STORE_]LCL_VAR, [STORE_]LCL_FLD, PHI_ARG, LCL_VAR_ADDR, LCL_FLD_ADDR.
3434 // This inherits from UnOp because lclvar stores are unary.
3436 struct GenTreeLclVarCommon : public GenTreeUnOp
3439 unsigned m_lclNum; // The local number. An index into the Compiler::lvaTable array.
3440 SsaNumInfo m_ssaNum; // The SSA info.
3443 GenTreeLclVarCommon(genTreeOps oper, var_types type, unsigned lclNum DEBUGARG(bool largeNode = false))
3444 : GenTreeUnOp(oper, type DEBUGARG(largeNode))
3449 GenTreeLclVarCommon(genTreeOps oper, var_types type, unsigned lclNum, GenTree* data)
3450 : GenTreeUnOp(oper, type, data DEBUGARG(/* largeNode */ false))
3452 assert(OperIsLocalStore());
3458 assert(OperIsLocalStore());
3462 unsigned GetLclNum() const
3467 void SetLclNum(unsigned lclNum)
3470 m_ssaNum = SsaNumInfo();
3473 uint16_t GetLclOffs() const;
3475 ClassLayout* GetLayout(Compiler* compiler) const;
3477 unsigned GetSsaNum() const
3479 return m_ssaNum.IsSimple() ? m_ssaNum.GetNum() : SsaConfig::RESERVED_SSA_NUM;
3482 unsigned GetSsaNum(Compiler* compiler, unsigned index) const
3484 return m_ssaNum.IsComposite() ? m_ssaNum.GetNum(compiler, index) : SsaConfig::RESERVED_SSA_NUM;
3487 void SetSsaNum(unsigned ssaNum)
3489 m_ssaNum = SsaNumInfo::Simple(ssaNum);
3492 void SetSsaNum(Compiler* compiler, unsigned index, unsigned ssaNum)
3494 m_ssaNum = SsaNumInfo::Composite(m_ssaNum, compiler, GetLclNum(), index, ssaNum);
3497 bool HasSsaName() const
3499 return GetSsaNum() != SsaConfig::RESERVED_SSA_NUM;
3502 bool HasCompositeSsaName() const
3504 return m_ssaNum.IsComposite();
3507 #if DEBUGGABLE_GENTREE
3508 GenTreeLclVarCommon() : GenTreeUnOp()
3514 //------------------------------------------------------------------------
3515 // MultiRegSpillFlags
3517 // GTF_SPILL or GTF_SPILLED flag on a multi-reg node indicates that one or
3518 // more of its result regs are in that state. The spill flags of each register
3519 // are stored here. We only need 2 bits per returned register,
3520 // so this is treated as a 2-bit array. No architecture needs more than 8 bits.
3522 typedef unsigned char MultiRegSpillFlags;
3523 static const unsigned PACKED_GTF_SPILL = 1;
3524 static const unsigned PACKED_GTF_SPILLED = 2;
3526 //----------------------------------------------------------------------
3527 // GetMultiRegSpillFlagsByIdx: get spill flag associated with the return register
3528 // specified by its index.
3531 // idx - Position or index of the return register
3534 // Returns GTF_* flags associated with the register. Only GTF_SPILL and GTF_SPILLED are considered.
3536 inline GenTreeFlags GetMultiRegSpillFlagsByIdx(MultiRegSpillFlags flags, unsigned idx)
3538 static_assert_no_msg(MAX_MULTIREG_COUNT * 2 <= sizeof(unsigned char) * BITS_PER_BYTE);
3539 assert(idx < MAX_MULTIREG_COUNT);
3541 unsigned bits = flags >> (idx * 2); // It doesn't matter that we possibly leave other high bits here.
3542 GenTreeFlags spillFlags = GTF_EMPTY;
3543 if (bits & PACKED_GTF_SPILL)
3545 spillFlags |= GTF_SPILL;
3547 if (bits & PACKED_GTF_SPILLED)
3549 spillFlags |= GTF_SPILLED;
3554 //----------------------------------------------------------------------
3555 // SetMultiRegSpillFlagsByIdx: set spill flags for the register specified by its index.
3558 // oldFlags - The current value of the MultiRegSpillFlags for a node.
3559 // flagsToSet - GTF_* flags. Only GTF_SPILL and GTF_SPILLED are allowed.
3560 // Note that these are the flags used on non-multireg nodes,
3561 // and this method adds the appropriate flags to the
3562 // incoming MultiRegSpillFlags and returns it.
3563 // idx - Position or index of the register
3566 // The new value for the node's MultiRegSpillFlags.
3568 inline MultiRegSpillFlags SetMultiRegSpillFlagsByIdx(MultiRegSpillFlags oldFlags, GenTreeFlags flagsToSet, unsigned idx)
3570 static_assert_no_msg(MAX_MULTIREG_COUNT * 2 <= sizeof(unsigned char) * BITS_PER_BYTE);
3571 assert(idx < MAX_MULTIREG_COUNT);
3573 MultiRegSpillFlags newFlags = oldFlags;
3575 if (flagsToSet & GTF_SPILL)
3577 bits |= PACKED_GTF_SPILL;
3579 if (flagsToSet & GTF_SPILLED)
3581 bits |= PACKED_GTF_SPILLED;
3584 const unsigned char packedFlags = PACKED_GTF_SPILL | PACKED_GTF_SPILLED;
3586 // Clear anything that was already there by masking out the bits before 'or'ing in what we want there.
3587 newFlags = (unsigned char)((newFlags & ~(packedFlags << (idx * 2))) | (bits << (idx * 2)));
3591 // gtLclVar -- load/store/addr of local variable
3593 struct GenTreeLclVar : public GenTreeLclVarCommon
3596 regNumberSmall gtOtherReg[MAX_MULTIREG_COUNT - 1];
3597 MultiRegSpillFlags gtSpillFlags;
3600 INDEBUG(IL_OFFSET gtLclILoffs = BAD_IL_OFFSET;) // instr offset of ref (only for JIT dumps)
3603 bool IsMultiReg() const
3605 return ((gtFlags & GTF_VAR_MULTIREG) != 0);
3607 void ClearMultiReg()
3609 gtFlags &= ~GTF_VAR_MULTIREG;
3613 gtFlags |= GTF_VAR_MULTIREG;
3614 ClearOtherRegFlags();
3617 regNumber GetRegNumByIdx(int regIndex) const
3619 assert(regIndex < MAX_MULTIREG_COUNT);
3620 return (regIndex == 0) ? GetRegNum() : (regNumber)gtOtherReg[regIndex - 1];
3623 void SetRegNumByIdx(regNumber reg, int regIndex)
3625 assert(regIndex < MAX_MULTIREG_COUNT);
3632 gtOtherReg[regIndex - 1] = regNumberSmall(reg);
3636 GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
3638 return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
3641 void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
3643 gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
3646 unsigned int GetFieldCount(Compiler* compiler) const;
3647 var_types GetFieldTypeByIndex(Compiler* compiler, unsigned idx);
3649 bool IsNeverNegative(Compiler* comp) const;
3651 //-------------------------------------------------------------------
3652 // clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
3659 void ClearOtherRegFlags()
3664 //-------------------------------------------------------------------------
3665 // CopyOtherRegFlags: copy GTF_* flags associated with gtOtherRegs from
3666 // the given LclVar node.
3669 // fromCall - GenTreeLclVar node from which to copy
3674 void CopyOtherRegFlags(GenTreeLclVar* from)
3676 this->gtSpillFlags = from->gtSpillFlags;
3680 void ResetLclILoffs()
3682 gtLclILoffs = BAD_IL_OFFSET;
3686 GenTreeLclVar(genTreeOps oper,
3688 unsigned lclNum DEBUGARG(IL_OFFSET ilOffs = BAD_IL_OFFSET) DEBUGARG(bool largeNode = false))
3689 : GenTreeLclVarCommon(oper, type, lclNum DEBUGARG(largeNode)) DEBUGARG(gtLclILoffs(ilOffs))
3691 assert(OperIsScalarLocal(oper));
3694 GenTreeLclVar(var_types type, unsigned lclNum, GenTree* data)
3695 : GenTreeLclVarCommon(GT_STORE_LCL_VAR, type, lclNum, data)
3699 #if DEBUGGABLE_GENTREE
3700 GenTreeLclVar() : GenTreeLclVarCommon()
3706 // gtLclFld -- load/store/addr of local variable field
3708 struct GenTreeLclFld : public GenTreeLclVarCommon
3711 uint16_t m_lclOffs; // offset into the variable to access
3712 ClassLayout* m_layout; // The struct layout for this local field.
3715 GenTreeLclFld(genTreeOps oper, var_types type, unsigned lclNum, unsigned lclOffs, ClassLayout* layout = nullptr)
3716 : GenTreeLclVarCommon(oper, type, lclNum), m_lclOffs(static_cast<uint16_t>(lclOffs))
3718 assert(lclOffs <= UINT16_MAX);
3722 GenTreeLclFld(var_types type, unsigned lclNum, unsigned lclOffs, GenTree* data, ClassLayout* layout)
3723 : GenTreeLclVarCommon(GT_STORE_LCL_FLD, type, lclNum, data), m_lclOffs(static_cast<uint16_t>(lclOffs))
3725 assert(lclOffs <= UINT16_MAX);
3729 uint16_t GetLclOffs() const
3734 void SetLclOffs(unsigned lclOffs)
3736 assert(lclOffs <= UINT16_MAX);
3737 m_lclOffs = static_cast<uint16_t>(lclOffs);
3740 ClassLayout* GetLayout() const
3742 assert(!TypeIs(TYP_STRUCT) || (m_layout != nullptr));
3746 void SetLayout(ClassLayout* layout)
3751 unsigned GetSize() const;
3754 bool IsOffsetMisaligned() const;
3755 #endif // TARGET_ARM
3757 #if DEBUGGABLE_GENTREE
3758 GenTreeLclFld() : GenTreeLclVarCommon()
3764 // GenTreeCast - conversion to a different type (GT_CAST).
3766 // This node represents all "conv[.ovf].{type}[.un]" IL opcodes.
3768 // There are four semantically significant values that determine what it does:
3770 // 1) "genActualType(CastOp())" - the type being cast from.
3771 // 2) "gtCastType" - the type being cast to.
3772 // 3) "IsUnsigned" (the "GTF_UNSIGNED" flag) - whether the cast is "unsigned".
3773 // 4) "gtOverflow" (the "GTF_OVERFLOW" flag) - whether the cast is checked.
3775 // Different "kinds" of casts use these values differently; not all are always
3776 // meaningful or legal:
3778 // 1) For casts from FP types, "IsUnsigned" will always be "false".
3779 // 2) Checked casts use "IsUnsigned" to represent the fact the type being cast
3780 // from is unsigned. The target type's signedness is similarly significant.
3781 // 3) For unchecked casts, "IsUnsigned" is significant for "int -> long", where
3782 // it decides whether the cast sign- or zero-extends its source, and "integer
3783 // -> FP" cases. For all other unchecked casts, "IsUnsigned" is meaningless.
3784 // 4) For unchecked casts, signedness of the target type is only meaningful if
3785 // the cast is to an FP or small type. In the latter case (and everywhere
3786 // else in IR) it decides whether the value will be sign- or zero-extended.
3788 // For additional context on "GT_CAST"'s semantics, see "IntegralRange::ForCast"
3789 // methods and "GenIntCastDesc"'s constructor.
3791 struct GenTreeCast : public GenTreeOp
3797 var_types gtCastType;
3799 GenTreeCast(var_types type, GenTree* op, bool fromUnsigned, var_types castType DEBUGARG(bool largeNode = false))
3800 : GenTreeOp(GT_CAST, type, op, nullptr DEBUGARG(largeNode)), gtCastType(castType)
3802 // We do not allow casts from floating point types to be treated as from
3803 // unsigned to avoid bugs related to wrong GTF_UNSIGNED in case the
3804 // CastOp's type changes.
3805 assert(!varTypeIsFloating(op) || !fromUnsigned);
3807 gtFlags |= fromUnsigned ? GTF_UNSIGNED : GTF_EMPTY;
3809 #if DEBUGGABLE_GENTREE
3810 GenTreeCast() : GenTreeOp()
3815 bool IsZeroExtending()
3817 assert(varTypeIsIntegral(CastOp()) && varTypeIsIntegral(CastToType()));
3819 if (varTypeIsSmall(CastToType()))
3821 return varTypeIsUnsigned(CastToType());
3823 if (TypeIs(TYP_LONG) && genActualTypeIsInt(CastOp()))
3825 return IsUnsigned();
3832 // GT_BOX nodes are place markers for boxed values. The "real" tree
3833 // for most purposes is in gtBoxOp.
3834 struct GenTreeBox : public GenTreeUnOp
3836 // An expanded helper call to implement the "box" if we don't get
3837 // rid of it any other way. Must be in same position as op1.
3843 // This is the statement that contains the assignment tree when the node is an inlined GT_BOX on a value
3845 Statement* gtDefStmtWhenInlinedBoxValue;
3846 // And this is the statement that copies from the value being boxed to the box payload
3847 Statement* gtCopyStmtWhenInlinedBoxValue;
3849 GenTreeBox(var_types type,
3851 Statement* defStmtWhenInlinedBoxValue,
3852 Statement* copyStmtWhenInlinedBoxValue)
3853 : GenTreeUnOp(GT_BOX, type, boxOp)
3854 , gtDefStmtWhenInlinedBoxValue(defStmtWhenInlinedBoxValue)
3855 , gtCopyStmtWhenInlinedBoxValue(copyStmtWhenInlinedBoxValue)
3858 #if DEBUGGABLE_GENTREE
3859 GenTreeBox() : GenTreeUnOp()
3866 return (gtFlags & GTF_BOX_CLONED) != 0;
3871 gtFlags |= GTF_BOX_CLONED;
3875 // GenTreeFieldAddr -- data member address (GT_FIELD_ADDR)
3876 struct GenTreeFieldAddr : public GenTreeUnOp
3878 CORINFO_FIELD_HANDLE gtFldHnd;
3880 bool gtFldMayOverlap : 1;
3883 bool gtFldIsSpanLength : 1;
3886 #ifdef FEATURE_READYTORUN
3887 CORINFO_CONST_LOOKUP gtFieldLookup;
3890 GenTreeFieldAddr(var_types type, GenTree* obj, CORINFO_FIELD_HANDLE fldHnd, DWORD offs)
3891 : GenTreeUnOp(GT_FIELD_ADDR, type, obj)
3894 , gtFldMayOverlap(false)
3895 , gtFldIsSpanLength(false)
3897 #ifdef FEATURE_READYTORUN
3898 gtFieldLookup.addr = nullptr;
3902 #if DEBUGGABLE_GENTREE
3903 GenTreeFieldAddr() : GenTreeUnOp()
3908 // The object this field belongs to. Will be "nullptr" for static fields.
3909 // Note that this is an address, i. e. for struct fields it will be ADDR(STRUCT).
3910 GenTree* GetFldObj() const
3915 bool IsSpanLength() const
3917 // This is limited to span length today rather than a more general "IsNeverNegative"
3918 // to help avoid confusion around propagating the value to promoted lcl vars.
3920 // Extending this support more in the future will require additional work and
3921 // considerations to help ensure it is correctly used since people may want
3922 // or intend to use this as more of a "point in time" feature like GTF_IND_NONNULL
3923 return gtFldIsSpanLength;
3926 void SetIsSpanLength(bool value)
3928 gtFldIsSpanLength = value;
3931 bool IsInstance() const
3933 return GetFldObj() != nullptr;
3936 bool IsStatic() const
3938 return !IsInstance();
3941 bool IsTlsStatic() const
3943 assert(((gtFlags & GTF_FLD_TLS) == 0) || IsStatic());
3944 return (gtFlags & GTF_FLD_TLS) != 0;
3947 bool IsOffsetKnown() const
3949 #ifdef FEATURE_READYTORUN
3950 return gtFieldLookup.addr == nullptr;
3951 #endif // FEATURE_READYTORUN
3956 // There was quite a bit of confusion in the code base about which of gtOp1 and gtOp2 was the
3957 // 'then' and 'else' clause of a colon node. Adding these accessors, while not enforcing anything,
3958 // at least *allows* the programmer to be obviously correct.
3959 // However, these conventions seem backward.
3960 // TODO-Cleanup: If we could get these accessors used everywhere, then we could switch them.
3961 struct GenTreeColon : public GenTreeOp
3963 GenTree*& ThenNode()
3967 GenTree*& ElseNode()
3972 #if DEBUGGABLE_GENTREE
3973 GenTreeColon() : GenTreeOp()
3978 GenTreeColon(var_types typ, GenTree* thenNode, GenTree* elseNode) : GenTreeOp(GT_COLON, typ, elseNode, thenNode)
3983 // GenTreeConditional -- Conditionally do an operation
3985 struct GenTreeConditional : public GenTreeOp
3990 genTreeOps oper, var_types type, GenTree* cond, GenTree* op1, GenTree* op2 DEBUGARG(bool largeNode = false))
3991 : GenTreeOp(oper, type, op1, op2 DEBUGARG(largeNode)), gtCond(cond)
3993 assert(cond != nullptr);
3996 #if DEBUGGABLE_GENTREE
3997 GenTreeConditional() : GenTreeOp()
4003 // gtCall -- method call (GT_CALL)
4004 enum class InlineObservation;
4006 //------------------------------------------------------------------------
4007 // GenTreeCallFlags: a bitmask of flags for GenTreeCall stored in gtCallMoreFlags.
4010 enum GenTreeCallFlags : unsigned int
4012 GTF_CALL_M_EMPTY = 0,
4014 GTF_CALL_M_EXPLICIT_TAILCALL = 0x00000001, // the call is "tail" prefixed and importer has performed tail call checks
4015 GTF_CALL_M_TAILCALL = 0x00000002, // the call is a tailcall
4016 GTF_CALL_M_RETBUFFARG = 0x00000004, // the ABI dictates that this call needs a ret buffer
4017 GTF_CALL_M_RETBUFFARG_LCLOPT = 0x00000008, // Does this call have a local ret buffer that we are optimizing?
4018 GTF_CALL_M_DELEGATE_INV = 0x00000010, // call to Delegate.Invoke
4019 GTF_CALL_M_NOGCCHECK = 0x00000020, // not a call for computing full interruptability and therefore no GC check is required.
4020 GTF_CALL_M_SPECIAL_INTRINSIC = 0x00000040, // function that could be optimized as an intrinsic
4021 // in special cases. Used to optimize fast way out in morphing
4022 GTF_CALL_M_VIRTSTUB_REL_INDIRECT = 0x00000080, // the virtstub is indirected through a relative address (only for GTF_CALL_VIRT_STUB)
4023 GTF_CALL_M_NONVIRT_SAME_THIS = 0x00000080, // callee "this" pointer is equal to caller this pointer (only for GTF_CALL_NONVIRT)
4024 GTF_CALL_M_FRAME_VAR_DEATH = 0x00000100, // the compLvFrameListRoot variable dies here (last use)
4025 GTF_CALL_M_TAILCALL_VIA_JIT_HELPER = 0x00000200, // call is a tail call dispatched via tail call JIT helper.
4027 #if FEATURE_TAILCALL_OPT
4028 GTF_CALL_M_IMPLICIT_TAILCALL = 0x00000400, // call is an opportunistic tail call and importer has performed tail call checks
4029 GTF_CALL_M_TAILCALL_TO_LOOP = 0x00000800, // call is a fast recursive tail call that can be converted into a loop
4032 GTF_CALL_M_PINVOKE = 0x00001000, // call is a pinvoke. This mirrors VM flag CORINFO_FLG_PINVOKE.
4033 // A call marked as Pinvoke is not necessarily a GT_CALL_UNMANAGED. For e.g.
4034 // an IL Stub dynamically generated for a PInvoke declaration is flagged as
4035 // a Pinvoke but not as an unmanaged call. See impCheckForPInvokeCall() to
4036 // know when these flags are set.
4038 GTF_CALL_M_R2R_REL_INDIRECT = 0x00002000, // ready to run call is indirected through a relative address
4039 GTF_CALL_M_DOES_NOT_RETURN = 0x00004000, // call does not return
4040 GTF_CALL_M_WRAPPER_DELEGATE_INV = 0x00008000, // call is in wrapper delegate
4041 GTF_CALL_M_FAT_POINTER_CHECK = 0x00010000, // NativeAOT managed calli needs transformation, that checks
4042 // special bit in calli address. If it is set, then it is necessary
4043 // to restore real function address and load hidden argument
4044 // as the first argument for calli. It is NativeAOT replacement for instantiating
4045 // stubs, because executable code cannot be generated at runtime.
4046 GTF_CALL_M_HELPER_SPECIAL_DCE = 0x00020000, // this helper call can be removed if it is part of a comma and
4047 // the comma result is unused.
4048 GTF_CALL_M_DEVIRTUALIZED = 0x00040000, // this call was devirtualized
4049 GTF_CALL_M_UNBOXED = 0x00080000, // this call was optimized to use the unboxed entry point
4050 GTF_CALL_M_GUARDED_DEVIRT = 0x00100000, // this call is a candidate for guarded devirtualization
4051 GTF_CALL_M_GUARDED_DEVIRT_EXACT = 0x80000000, // this call is a candidate for guarded devirtualization without a fallback
4052 GTF_CALL_M_GUARDED_DEVIRT_CHAIN = 0x00200000, // this call is a candidate for chained guarded devirtualization
4053 GTF_CALL_M_GUARDED = 0x00400000, // this call was transformed by guarded devirtualization
4054 GTF_CALL_M_ALLOC_SIDE_EFFECTS = 0x00800000, // this is a call to an allocator with side effects
4055 GTF_CALL_M_SUPPRESS_GC_TRANSITION = 0x01000000, // suppress the GC transition (i.e. during a pinvoke) but a separate GC safe point is required.
4056 GTF_CALL_M_EXP_RUNTIME_LOOKUP = 0x02000000, // this call needs to be transformed into CFG for the dynamic dictionary expansion feature.
4057 GTF_CALL_M_STRESS_TAILCALL = 0x04000000, // the call is NOT "tail" prefixed but GTF_CALL_M_EXPLICIT_TAILCALL was added because of tail call stress mode
4058 GTF_CALL_M_EXPANDED_EARLY = 0x08000000, // the Virtual Call target address is expanded and placed in gtControlExpr in Morph rather than in Lower
4059 GTF_CALL_M_HAS_LATE_DEVIRT_INFO = 0x10000000, // this call has late devirtualzation info
4060 GTF_CALL_M_LDVIRTFTN_INTERFACE = 0x20000000, // ldvirtftn on an interface type
4061 GTF_CALL_M_EXP_TLS_ACCESS = 0x40000000, // this call is a helper for access TLS marked field
4064 inline constexpr GenTreeCallFlags operator ~(GenTreeCallFlags a)
4066 return (GenTreeCallFlags)(~(unsigned int)a);
4069 inline constexpr GenTreeCallFlags operator |(GenTreeCallFlags a, GenTreeCallFlags b)
4071 return (GenTreeCallFlags)((unsigned int)a | (unsigned int)b);
4074 inline constexpr GenTreeCallFlags operator &(GenTreeCallFlags a, GenTreeCallFlags b)
4076 return (GenTreeCallFlags)((unsigned int)a & (unsigned int)b);
4079 inline GenTreeCallFlags& operator |=(GenTreeCallFlags& a, GenTreeCallFlags b)
4081 return a = (GenTreeCallFlags)((unsigned int)a | (unsigned int)b);
4084 inline GenTreeCallFlags& operator &=(GenTreeCallFlags& a, GenTreeCallFlags b)
4086 return a = (GenTreeCallFlags)((unsigned int)a & (unsigned int)b);
4091 // Return type descriptor of a GT_CALL node.
4092 // x64 Unix, Arm64, Arm32 and x86 allow a value to be returned in multiple
4093 // registers. For such calls this struct provides the following info
4094 // on their return type
4095 // - type of value returned in each return register
4096 // - ABI return register numbers in which the value is returned
4097 // - count of return registers in which the value is returned
4099 // TODO-ARM: Update this to meet the needs of Arm64 and Arm32
4101 // TODO-AllArch: Right now it is used for describing multi-reg returned types.
4102 // Eventually we would want to use it for describing even single-reg
4103 // returned types (e.g. structs returned in single register x64/arm).
4104 // This would allow us not to lie or normalize single struct return
4105 // values in importer/morph.
4106 struct ReturnTypeDesc
4109 var_types m_regType[MAX_RET_REG_COUNT];
4121 // Initialize the Return Type Descriptor for a method that returns a struct type
4122 void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension callConv);
4124 // Initialize the Return Type Descriptor for a method that returns a TYP_LONG
4125 // Only needed for X86 and arm32.
4126 void InitializeLongReturnType();
4128 // Initialize the Return Type Descriptor.
4129 void InitializeReturnType(Compiler* comp,
4131 CORINFO_CLASS_HANDLE retClsHnd,
4132 CorInfoCallConvExtension callConv);
4134 // Reset type descriptor to defaults
4137 for (unsigned i = 0; i < MAX_RET_REG_COUNT; ++i)
4139 m_regType[i] = TYP_UNKNOWN;
4147 // NOTE: we only use this function when writing out IR dumps. These dumps may take place before the ReturnTypeDesc
4148 // has been initialized.
4149 unsigned TryGetReturnRegCount() const
4151 return m_inited ? GetReturnRegCount() : 0;
4155 //--------------------------------------------------------------------------------------------
4156 // GetReturnRegCount: Get the count of return registers in which the return value is returned.
4162 // Count of return registers.
4163 // Returns 0 if the return type is not returned in registers.
4165 unsigned GetReturnRegCount() const
4170 for (unsigned i = 0; i < MAX_RET_REG_COUNT; ++i)
4172 if (m_regType[i] == TYP_UNKNOWN)
4181 // Any remaining elements in m_regTypes[] should also be TYP_UNKNOWN
4182 for (unsigned i = regCount + 1; i < MAX_RET_REG_COUNT; ++i)
4184 assert(m_regType[i] == TYP_UNKNOWN);
4191 //-----------------------------------------------------------------------
4192 // IsMultiRegRetType: check whether the type is returned in multiple
4193 // return registers.
4199 // Returns true if the type is returned in multiple return registers.
4201 // Note that we only have to examine the first two values to determine this
4203 bool IsMultiRegRetType() const
4205 if (MAX_RET_REG_COUNT < 2)
4212 return ((m_regType[0] != TYP_UNKNOWN) && (m_regType[1] != TYP_UNKNOWN));
4216 //--------------------------------------------------------------------------
4217 // GetReturnRegType: Get var_type of the return register specified by index.
4220 // index - Index of the return register.
4221 // First return register will have an index 0 and so on.
4224 // var_type of the return register specified by its index.
4225 // asserts if the index does not have a valid register return type.
4227 var_types GetReturnRegType(unsigned index) const
4229 var_types result = m_regType[index];
4230 assert(result != TYP_UNKNOWN);
4235 // Get i'th ABI return register
4236 regNumber GetABIReturnReg(unsigned idx) const;
4238 // Get reg mask of ABI return registers
4239 regMaskTP GetABIReturnRegs() const;
4242 class TailCallSiteInfo
4244 bool m_isCallvirt : 1;
4246 CORINFO_SIG_INFO m_sig;
4247 CORINFO_RESOLVED_TOKEN m_token;
4250 // Is the tailcall a callvirt instruction?
4253 return m_isCallvirt;
4256 // Is the tailcall a calli instruction?
4262 // Get the token of the callee
4263 CORINFO_RESOLVED_TOKEN* GetToken()
4269 // Get the signature of the callee
4270 CORINFO_SIG_INFO* GetSig()
4275 // Mark the tailcall as a calli with the given signature
4276 void SetCalli(CORINFO_SIG_INFO* sig)
4278 m_isCallvirt = false;
4283 // Mark the tailcall as a callvirt with the given signature and token
4284 void SetCallvirt(CORINFO_SIG_INFO* sig, CORINFO_RESOLVED_TOKEN* token)
4286 m_isCallvirt = true;
4292 // Mark the tailcall as a call with the given signature and token
4293 void SetCall(CORINFO_SIG_INFO* sig, CORINFO_RESOLVED_TOKEN* token)
4295 m_isCallvirt = false;
4302 enum class CFGCallKind
4310 enum class WellKnownArg
4319 WrapperDelegateCell,
4326 ValidateIndirectCallTarget,
4327 DispatchIndirectCallTarget,
4331 const char* getWellKnownArgName(WellKnownArg arg);
4334 struct CallArgABIInformation
4336 CallArgABIInformation()
4341 #ifdef UNIX_AMD64_ABI
4343 , StructFloatRegs(0)
4345 #if defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
4346 , StructFloatFieldType()
4348 , ArgType(TYP_UNDEF)
4349 , IsBackFilled(false)
4351 , PassedByRef(false)
4352 #if FEATURE_ARG_SPLIT
4355 #ifdef FEATURE_HFA_FIELDS_PRESENT
4356 , m_hfaElemKind(CORINFO_HFA_ELEM_NONE)
4359 for (size_t i = 0; i < MAX_ARG_REG_COUNT; i++)
4361 RegNums[i] = REG_NA;
4366 // The registers to use when passing this argument, set to REG_STK for
4367 // arguments passed on the stack
4368 regNumberSmall RegNums[MAX_ARG_REG_COUNT];
4371 // Count of number of registers that this argument uses. Note that on ARM,
4372 // if we have a double hfa, this reflects the number of DOUBLE registers.
4374 unsigned ByteOffset;
4376 unsigned ByteAlignment;
4377 #if defined(UNIX_AMD64_ABI)
4378 // Unix amd64 will split floating point types and integer types in structs
4379 // between floating point and general purpose registers. Keep track of that
4380 // information so we do not need to recompute it later.
4381 unsigned StructIntRegs;
4382 unsigned StructFloatRegs;
4383 SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR StructDesc;
4384 #endif // UNIX_AMD64_ABI
4385 #if defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
4386 // For LoongArch64's ABI, the struct which has float field(s) and no more than two fields
4387 // may be passed by float register(s).
4388 // e.g `struct {int a; float b;}` passed by an integer register and a float register.
4389 var_types StructFloatFieldType[2];
4391 // The type used to pass this argument. This is generally the original
4392 // argument type, but when a struct is passed as a scalar type, this is
4393 // that type. Note that if a struct is passed by reference, this will still
4394 // be the struct type.
4395 var_types ArgType : 5;
4396 // True when the argument fills a register slot skipped due to alignment
4397 // requirements of previous arguments.
4398 bool IsBackFilled : 1;
4399 // True if this is a struct arg
4401 // True iff the argument is passed by reference.
4402 bool PassedByRef : 1;
4405 #if FEATURE_ARG_SPLIT
4406 // True when this argument is split between the registers and OutArg area
4410 #ifdef FEATURE_HFA_FIELDS_PRESENT
4411 // What kind of an HFA this is (CORINFO_HFA_ELEM_NONE if it is not an HFA).
4412 CorInfoHFAElemType m_hfaElemKind : 3;
4416 CorInfoHFAElemType GetHfaElemKind() const
4418 #ifdef FEATURE_HFA_FIELDS_PRESENT
4419 return m_hfaElemKind;
4421 NOWAY_MSG("GetHfaElemKind");
4422 return CORINFO_HFA_ELEM_NONE;
4426 void SetHfaElemKind(CorInfoHFAElemType elemKind)
4428 #ifdef FEATURE_HFA_FIELDS_PRESENT
4429 m_hfaElemKind = elemKind;
4431 NOWAY_MSG("SetHfaElemKind");
4435 bool IsHfaArg() const;
4436 bool IsHfaRegArg() const;
4437 var_types GetHfaType() const;
4438 void SetHfaType(var_types type, unsigned hfaSlots);
4440 regNumber GetRegNum() const
4442 return (regNumber)RegNums[0];
4445 regNumber GetOtherRegNum() const
4447 return (regNumber)RegNums[1];
4449 regNumber GetRegNum(unsigned int i)
4451 assert(i < MAX_ARG_REG_COUNT);
4452 return (regNumber)RegNums[i];
4454 void SetRegNum(unsigned int i, regNumber regNum)
4456 assert(i < MAX_ARG_REG_COUNT);
4457 RegNums[i] = (regNumberSmall)regNum;
4460 bool IsSplit() const
4462 #if FEATURE_ARG_SPLIT
4463 return compFeatureArgSplit() && m_isSplit;
4464 #else // FEATURE_ARG_SPLIT
4468 void SetSplit(bool value)
4470 #if FEATURE_ARG_SPLIT
4475 bool IsPassedInRegisters() const
4477 return !IsSplit() && (NumRegs != 0);
4480 bool IsPassedInFloatRegisters() const
4485 return isValidFloatArgReg(GetRegNum());
4489 bool IsMismatchedArgType() const
4491 #if defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
4492 return isValidIntArgReg(GetRegNum()) && varTypeUsesFloatReg(ArgType);
4495 #endif // TARGET_LOONGARCH64 || TARGET_RISCV64
4498 void SetByteSize(unsigned byteSize, unsigned byteAlignment, bool isStruct, bool isFloatHfa);
4500 // Get the number of bytes that this argument is occupying on the stack,
4501 // including padding up to the target pointer size for platforms
4502 // where a stack argument can't take less.
4503 unsigned GetStackByteSize() const;
4505 // Set the register numbers for a multireg argument.
4506 // There's nothing to do on x64/Ux because the structDesc has already been used to set the
4507 // register numbers.
4508 void SetMultiRegNums();
4510 // Return number of stack slots that this argument is taking.
4511 // This value is not meaningful on macOS arm64 where multiple arguments can
4512 // be passed in the same stack slot.
4513 unsigned GetStackSlotsNumber() const
4515 return roundUp(GetStackByteSize(), TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE;
4521 // The node being passed.
4522 GenTree* Node = nullptr;
4523 // The signature type of the node.
4524 var_types SignatureType = TYP_UNDEF;
4525 // The class handle if SignatureType == TYP_STRUCT.
4526 CORINFO_CLASS_HANDLE SignatureClsHnd = NO_CLASS_HANDLE;
4527 // The type of well known arg
4528 WellKnownArg WellKnownArg = ::WellKnownArg::None;
4530 NewCallArg WellKnown(::WellKnownArg type) const
4532 NewCallArg copy = *this;
4533 copy.WellKnownArg = type;
4537 static NewCallArg Struct(GenTree* node, var_types type, CORINFO_CLASS_HANDLE clsHnd)
4539 assert(varTypeIsStruct(node) && varTypeIsStruct(type));
4542 arg.SignatureType = type;
4543 arg.SignatureClsHnd = clsHnd;
4544 arg.ValidateTypes();
4548 static NewCallArg Primitive(GenTree* node, var_types type = TYP_UNDEF)
4550 assert(!varTypeIsStruct(node) && !varTypeIsStruct(type));
4553 arg.SignatureType = type == TYP_UNDEF ? node->TypeGet() : type;
4554 arg.ValidateTypes();
4559 void ValidateTypes();
4561 void ValidateTypes()
4569 friend class CallArgs;
4571 GenTree* m_earlyNode;
4572 GenTree* m_lateNode;
4574 CallArg* m_lateNext;
4576 // The class handle for the signature type (when varTypeIsStruct(SignatureType)).
4577 CORINFO_CLASS_HANDLE m_signatureClsHnd;
4578 // The LclVar number if we had to force evaluation of this arg.
4580 // The type of the argument in the signature.
4581 var_types m_signatureType : 5;
4582 // The type of well-known argument this is.
4583 WellKnownArg m_wellKnownArg : 5;
4584 // True when we force this argument's evaluation into a temp LclVar.
4586 // True when we must replace this argument with a placeholder node.
4587 bool m_needPlace : 1;
4588 // True when we setup a temp LclVar for this argument.
4590 // True when we have decided the evaluation order for this argument in LateArgs
4591 bool m_processed : 1;
4595 : m_earlyNode(nullptr)
4596 , m_lateNode(nullptr)
4598 , m_lateNext(nullptr)
4599 , m_signatureClsHnd(NO_CLASS_HANDLE)
4600 , m_tmpNum(BAD_VAR_NUM)
4601 , m_signatureType(TYP_UNDEF)
4602 , m_wellKnownArg(WellKnownArg::None)
4604 , m_needPlace(false)
4606 , m_processed(false)
4611 CallArgABIInformation AbiInfo;
4613 CallArg(const NewCallArg& arg) : CallArg()
4615 m_earlyNode = arg.Node;
4616 m_wellKnownArg = arg.WellKnownArg;
4617 m_signatureType = arg.SignatureType;
4618 m_signatureClsHnd = arg.SignatureClsHnd;
4621 CallArg(const CallArg&) = delete;
4622 CallArg& operator=(CallArg&) = delete;
4625 GenTree*& EarlyNodeRef() { return m_earlyNode; }
4626 GenTree* GetEarlyNode() { return m_earlyNode; }
4627 void SetEarlyNode(GenTree* node) { m_earlyNode = node; }
4628 GenTree*& LateNodeRef() { return m_lateNode; }
4629 GenTree* GetLateNode() { return m_lateNode; }
4630 void SetLateNode(GenTree* lateNode) { m_lateNode = lateNode; }
4631 CallArg*& NextRef() { return m_next; }
4632 CallArg* GetNext() { return m_next; }
4633 void SetNext(CallArg* next) { m_next = next; }
4634 CallArg*& LateNextRef() { return m_lateNext; }
4635 CallArg* GetLateNext() { return m_lateNext; }
4636 void SetLateNext(CallArg* lateNext) { m_lateNext = lateNext; }
4637 CORINFO_CLASS_HANDLE GetSignatureClassHandle() { return m_signatureClsHnd; }
4638 var_types GetSignatureType() { return m_signatureType; }
4639 WellKnownArg GetWellKnownArg() { return m_wellKnownArg; }
4640 bool IsTemp() { return m_isTmp; }
4643 // Get the real argument node, i.e. not a setup or placeholder node.
4644 // This is the same as GetEarlyNode() until morph.
4645 // After lowering, this is a PUTARG_* node.
4648 return m_lateNode == nullptr ? m_earlyNode : m_lateNode;
4651 bool IsArgAddedLate() const;
4653 bool IsUserArg() const;
4656 void Dump(Compiler* comp);
4657 // Check that the value of 'AbiInfo.IsStruct' is consistent.
4658 // A struct arg must be one of the following:
4659 // - A node of struct type,
4660 // - A GT_FIELD_LIST, or
4661 // - A node of a scalar type, passed in a single register or slot
4662 // (or two slots in the case of a struct pass on the stack as TYP_DOUBLE).
4664 void CheckIsStruct();
4671 CallArg* m_lateHead;
4673 unsigned m_nextStackByteOffset;
4675 // Number of stack bytes pushed before we start pushing these arguments.
4676 unsigned m_stkSizeBytes;
4677 // Stack alignment in bytes required before arguments are pushed for this
4678 // call. Computed dynamically during codegen, based on m_stkSizeBytes and the
4679 // current stack level (genStackLevel) when the first stack adjustment is
4680 // made for this call.
4681 unsigned m_padStkAlign;
4683 bool m_hasThisPointer : 1;
4684 bool m_hasRetBuffer : 1;
4685 bool m_isVarArgs : 1;
4686 bool m_abiInformationDetermined : 1;
4687 // True if we have one or more register arguments.
4688 bool m_hasRegArgs : 1;
4689 // True if we have one or more stack arguments.
4690 bool m_hasStackArgs : 1;
4691 bool m_argsComplete : 1;
4692 // One or more arguments must be copied to a temp by EvalArgsToTemps.
4693 bool m_needsTemps : 1;
4695 // Updateable flag, set to 'true' after we've done any required alignment.
4696 bool m_alignmentDone : 1;
4699 void AddedWellKnownArg(WellKnownArg arg);
4700 void RemovedWellKnownArg(WellKnownArg arg);
4701 regNumber GetCustomRegister(Compiler* comp, CorInfoCallConvExtension cc, WellKnownArg arg);
4702 void SplitArg(CallArg* arg, unsigned numRegs, unsigned numSlots);
4703 void SortArgs(Compiler* comp, GenTreeCall* call, CallArg** sortedArgs);
4707 CallArgs(const CallArgs&) = delete;
4708 CallArgs& operator=(CallArgs&) = delete;
4710 CallArg* FindByNode(GenTree* node);
4711 CallArg* FindWellKnownArg(WellKnownArg arg);
4712 CallArg* GetThisArg();
4713 CallArg* GetRetBufferArg();
4714 CallArg* GetArgByIndex(unsigned index);
4715 CallArg* GetUserArgByIndex(unsigned index);
4716 unsigned GetIndex(CallArg* arg);
4718 bool IsEmpty() const
4720 return m_head == nullptr;
4723 // Reverse the args from [index..index + count) in place.
4724 void Reverse(unsigned index, unsigned count);
4726 CallArg* PushFront(Compiler* comp, const NewCallArg& arg);
4727 CallArg* PushBack(Compiler* comp, const NewCallArg& arg);
4728 CallArg* InsertAfter(Compiler* comp, CallArg* after, const NewCallArg& arg);
4729 CallArg* InsertAfterUnchecked(Compiler* comp, CallArg* after, const NewCallArg& arg);
4730 CallArg* InsertInstParam(Compiler* comp, GenTree* node);
4731 CallArg* InsertAfterThisOrFirst(Compiler* comp, const NewCallArg& arg);
4732 void PushLateBack(CallArg* arg);
4733 void Remove(CallArg* arg);
4735 template <typename CopyNodeFunc>
4736 void InternalCopyFrom(Compiler* comp, CallArgs* other, CopyNodeFunc copyFunc);
4738 template <typename... Args>
4739 void PushFront(Compiler* comp, const NewCallArg& arg, Args&&... rest)
4741 PushFront(comp, std::forward<Args>(rest)...);
4742 PushFront(comp, arg);
4745 void ResetFinalArgsAndABIInfo();
4746 void AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call);
4748 void ArgsComplete(Compiler* comp, GenTreeCall* call);
4749 void EvalArgsToTemps(Compiler* comp, GenTreeCall* call);
4750 void SetNeedsTemp(CallArg* arg);
4751 bool IsNonStandard(Compiler* comp, GenTreeCall* call, CallArg* arg);
4753 GenTree* MakeTmpArgNode(Compiler* comp, CallArg* arg);
4754 void SetTemp(CallArg* arg, unsigned tmpNum);
4757 bool HasThisPointer() const { return m_hasThisPointer; }
4758 bool HasRetBuffer() const { return m_hasRetBuffer; }
4759 bool IsVarArgs() const { return m_isVarArgs; }
4760 void SetIsVarArgs() { m_isVarArgs = true; }
4761 void ClearIsVarArgs() { m_isVarArgs = false; }
4762 bool IsAbiInformationDetermined() const { return m_abiInformationDetermined; }
4763 bool AreArgsComplete() const { return m_argsComplete; }
4764 bool HasRegArgs() const { return m_hasRegArgs; }
4765 bool HasStackArgs() const { return m_hasStackArgs; }
4766 bool NeedsTemps() const { return m_needsTemps; }
4769 void ComputeStackAlignment(unsigned curStackLevelInBytes)
4771 m_padStkAlign = AlignmentPad(curStackLevelInBytes, STACK_ALIGN);
4773 unsigned GetStkAlign() const { return m_padStkAlign; }
4774 unsigned GetStkSizeBytes() { return m_stkSizeBytes; }
4775 void SetStkSizeBytes(unsigned bytes) { m_stkSizeBytes = bytes; }
4776 bool IsStkAlignmentDone() const { return m_alignmentDone; }
4777 void SetStkAlignmentDone() { m_alignmentDone = true; }
4781 unsigned OutgoingArgsStackSize() const;
4783 unsigned CountArgs();
4784 unsigned CountUserArgs();
4786 template <CallArg* (CallArg::*Next)()>
4787 class CallArgIterator
4792 explicit CallArgIterator(CallArg* arg) : m_arg(arg)
4797 CallArg& operator*() const { return *m_arg; }
4798 CallArg* operator->() const { return m_arg; }
4799 CallArg* GetArg() const { return m_arg; }
4802 CallArgIterator& operator++()
4804 m_arg = (m_arg->*Next)();
4808 bool operator==(const CallArgIterator& i) const
4810 return m_arg == i.m_arg;
4813 bool operator!=(const CallArgIterator& i) const
4815 return m_arg != i.m_arg;
4819 class EarlyArgIterator
4821 friend class CallArgs;
4825 static CallArg* NextEarlyArg(CallArg* cur)
4827 while ((cur != nullptr) && (cur->GetEarlyNode() == nullptr))
4829 cur = cur->GetNext();
4836 explicit EarlyArgIterator(CallArg* arg) : m_arg(arg)
4841 CallArg& operator*() const { return *m_arg; }
4842 CallArg* operator->() const { return m_arg; }
4843 CallArg* GetArg() const { return m_arg; }
4846 EarlyArgIterator& operator++()
4848 m_arg = NextEarlyArg(m_arg->GetNext());
4852 bool operator==(const EarlyArgIterator& i) const
4854 return m_arg == i.m_arg;
4857 bool operator!=(const EarlyArgIterator& i) const
4859 return m_arg != i.m_arg;
4863 using ArgIterator = CallArgIterator<&CallArg::GetNext>;
4864 using LateArgIterator = CallArgIterator<&CallArg::GetLateNext>;
4866 IteratorPair<ArgIterator> Args()
4868 return IteratorPair<ArgIterator>(ArgIterator(m_head), ArgIterator(nullptr));
4871 IteratorPair<EarlyArgIterator> EarlyArgs()
4873 CallArg* firstEarlyArg = EarlyArgIterator::NextEarlyArg(m_head);
4874 return IteratorPair<EarlyArgIterator>(EarlyArgIterator(firstEarlyArg), EarlyArgIterator(nullptr));
4877 IteratorPair<LateArgIterator> LateArgs()
4879 return IteratorPair<LateArgIterator>(LateArgIterator(m_lateHead), LateArgIterator(nullptr));
4883 struct GenTreeCall final : public GenTree
4888 // Used to register callsites with the EE
4889 CORINFO_SIG_INFO* callSig;
4893 TailCallSiteInfo* tailCallInfo;
4894 // Only used for unmanaged calls, which cannot be tail-called
4895 CorInfoCallConvExtension unmgdCallConv;
4898 #if FEATURE_MULTIREG_RET
4900 // State required to support multi-reg returning call nodes.
4902 // TODO-AllArch: enable for all call nodes to unify single-reg and multi-reg returns.
4903 ReturnTypeDesc gtReturnTypeDesc;
4905 // GetRegNum() would always be the first return reg.
4906 // The following array holds the other reg numbers of multi-reg return.
4907 regNumberSmall gtOtherRegs[MAX_RET_REG_COUNT - 1];
4909 MultiRegSpillFlags gtSpillFlags;
4911 #endif // FEATURE_MULTIREG_RET
4913 //-----------------------------------------------------------------------
4914 // GetReturnTypeDesc: get the type descriptor of return value of the call
4920 // Type descriptor of the value returned by call
4922 // TODO-AllArch: enable for all call nodes to unify single-reg and multi-reg returns.
4923 const ReturnTypeDesc* GetReturnTypeDesc() const
4925 #if FEATURE_MULTIREG_RET
4926 return >ReturnTypeDesc;
4932 void InitializeLongReturnType()
4934 #if FEATURE_MULTIREG_RET
4935 gtReturnTypeDesc.InitializeLongReturnType();
4939 void InitializeStructReturnType(Compiler* comp, CORINFO_CLASS_HANDLE retClsHnd, CorInfoCallConvExtension callConv)
4941 #if FEATURE_MULTIREG_RET
4942 gtReturnTypeDesc.InitializeStructReturnType(comp, retClsHnd, callConv);
4946 void ResetReturnType()
4948 #if FEATURE_MULTIREG_RET
4949 gtReturnTypeDesc.Reset();
4953 //---------------------------------------------------------------------------
4954 // GetRegNumByIdx: get i'th return register allocated to this call node.
4957 // idx - index of the return register
4960 // Return regNumber of i'th return register of call node.
4961 // Returns REG_NA if there is no valid return register for the given index.
4963 regNumber GetRegNumByIdx(unsigned idx) const
4965 assert(idx < MAX_RET_REG_COUNT);
4972 #if FEATURE_MULTIREG_RET
4973 return (regNumber)gtOtherRegs[idx - 1];
4979 //----------------------------------------------------------------------
4980 // SetRegNumByIdx: set i'th return register of this call node
4984 // idx - index of the return register
4989 void SetRegNumByIdx(regNumber reg, unsigned idx)
4991 assert(idx < MAX_RET_REG_COUNT);
4997 #if FEATURE_MULTIREG_RET
5000 gtOtherRegs[idx - 1] = (regNumberSmall)reg;
5001 assert(gtOtherRegs[idx - 1] == reg);
5008 //----------------------------------------------------------------------------
5009 // ClearOtherRegs: clear multi-reg state to indicate no regs are allocated
5017 void ClearOtherRegs()
5019 #if FEATURE_MULTIREG_RET
5020 for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
5022 gtOtherRegs[i] = REG_NA;
5027 //----------------------------------------------------------------------------
5028 // CopyOtherRegs: copy multi-reg state from the given call node to this node
5031 // fromCall - GenTreeCall node from which to copy multi-reg state
5036 void CopyOtherRegs(GenTreeCall* fromCall)
5038 #if FEATURE_MULTIREG_RET
5039 for (unsigned i = 0; i < MAX_RET_REG_COUNT - 1; ++i)
5041 this->gtOtherRegs[i] = fromCall->gtOtherRegs[i];
5046 // Get reg mask of all the valid registers of gtOtherRegs array
5047 regMaskTP GetOtherRegMask() const;
5049 GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
5051 #if FEATURE_MULTIREG_RET
5052 return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
5054 assert(!"unreached");
5059 void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
5061 #if FEATURE_MULTIREG_RET
5062 gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
5066 //-------------------------------------------------------------------
5067 // clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
5074 void ClearOtherRegFlags()
5076 #if FEATURE_MULTIREG_RET
5081 //-------------------------------------------------------------------------
5082 // CopyOtherRegFlags: copy GTF_* flags associated with gtOtherRegs from
5083 // the given call node.
5086 // fromCall - GenTreeCall node from which to copy
5091 void CopyOtherRegFlags(GenTreeCall* fromCall)
5093 #if FEATURE_MULTIREG_RET
5094 this->gtSpillFlags = fromCall->gtSpillFlags;
5098 bool IsUnmanaged() const
5100 return (gtFlags & GTF_CALL_UNMANAGED) != 0;
5102 bool NeedsNullCheck() const
5104 return (gtFlags & GTF_CALL_NULLCHECK) != 0;
5106 bool CallerPop() const
5108 return (gtFlags & GTF_CALL_POP_ARGS) != 0;
5110 bool IsVirtual() const
5112 return (gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT;
5114 bool IsVirtualStub() const
5116 return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_STUB;
5118 bool IsVirtualVtable() const
5120 return (gtFlags & GTF_CALL_VIRT_KIND_MASK) == GTF_CALL_VIRT_VTABLE;
5122 bool IsInlineCandidate() const
5124 return (gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0;
5127 bool IsR2ROrVirtualStubRelativeIndir()
5129 #if defined(FEATURE_READYTORUN)
5130 if (IsR2RRelativeIndir())
5136 return IsVirtualStubRelativeIndir();
5139 bool HasNonStandardAddedArgs(Compiler* compiler) const;
5140 int GetNonStandardAddedArgCount(Compiler* compiler) const;
5142 // Returns true if the ABI dictates that this call should get a ret buf
5143 // arg. This may be out of sync with gtArgs.HasRetBuffer during import
5144 // until we actually create the ret buffer.
5145 bool ShouldHaveRetBufArg() const
5147 return (gtCallMoreFlags & GTF_CALL_M_RETBUFFARG) != 0;
5150 //-------------------------------------------------------------------------
5151 // TreatAsShouldHaveRetBufArg:
5154 // compiler, the compiler instance so that we can call eeGetHelperNum
5157 // Returns true if we treat the call as if it has a retBuf argument
5158 // This method may actually have a retBuf argument
5159 // or it could be a JIT helper that we are still transforming during
5160 // the importer phase.
5163 // On ARM64 marking the method with the GTF_CALL_M_RETBUFFARG flag
5164 // will make ShouldHaveRetBufArg() return true, but will also force the
5165 // use of register x8 to pass the RetBuf argument.
5167 bool TreatAsShouldHaveRetBufArg(Compiler* compiler) const;
5169 //-----------------------------------------------------------------------------------------
5170 // HasMultiRegRetVal: whether the call node returns its value in multiple return registers.
5176 // True if the call is returning a multi-reg return value. False otherwise.
5178 bool HasMultiRegRetVal() const
5180 #ifdef FEATURE_MULTIREG_RET
5181 #if defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
5182 return (gtType == TYP_STRUCT) && (gtReturnTypeDesc.GetReturnRegCount() > 1);
5185 #if defined(TARGET_X86) || defined(TARGET_ARM)
5186 if (varTypeIsLong(gtType))
5192 if (!varTypeIsStruct(gtType) || ShouldHaveRetBufArg())
5196 // Now it is a struct that is returned in registers.
5197 return GetReturnTypeDesc()->IsMultiRegRetType();
5200 #else // !FEATURE_MULTIREG_RET
5202 #endif // !FEATURE_MULTIREG_RET
5205 // Returns true if VM has flagged this method as CORINFO_FLG_PINVOKE.
5206 bool IsPInvoke() const
5208 return (gtCallMoreFlags & GTF_CALL_M_PINVOKE) != 0;
5211 // Note that the distinction of whether tail prefixed or an implicit tail call
5212 // is maintained on a call node till fgMorphCall() after which it will be
5213 // either a tail call (i.e. IsTailCall() is true) or a non-tail call.
5214 bool IsTailPrefixedCall() const
5216 return (gtCallMoreFlags & GTF_CALL_M_EXPLICIT_TAILCALL) != 0;
5219 // Returns true if this call didn't have an explicit tail. prefix in the IL
5220 // but was marked as an explicit tail call because of tail call stress mode.
5221 bool IsStressTailCall() const
5223 return (gtCallMoreFlags & GTF_CALL_M_STRESS_TAILCALL) != 0;
5226 // This method returning "true" implies that tail call flowgraph morhphing has
5227 // performed final checks and committed to making a tail call.
5228 bool IsTailCall() const
5230 return (gtCallMoreFlags & GTF_CALL_M_TAILCALL) != 0;
5233 // This method returning "true" implies that importer has performed tail call checks
5234 // and providing a hint that this can be converted to a tail call.
5235 bool CanTailCall() const
5237 return IsTailPrefixedCall() || IsImplicitTailCall();
5240 // Check whether this is a tailcall dispatched via JIT helper. We only use
5241 // this mechanism on x86 as it is faster than our other more general
5242 // tailcall mechanism.
5243 bool IsTailCallViaJitHelper() const
5246 return IsTailCall() && (gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER);
5252 #if FEATURE_FASTTAILCALL
5253 bool IsFastTailCall() const
5256 return IsTailCall() && !(gtCallMoreFlags & GTF_CALL_M_TAILCALL_VIA_JIT_HELPER);
5258 return IsTailCall();
5261 #else // !FEATURE_FASTTAILCALL
5262 bool IsFastTailCall() const
5266 #endif // !FEATURE_FASTTAILCALL
5268 #if FEATURE_TAILCALL_OPT
5269 // Returns true if this is marked for opportunistic tail calling.
5270 // That is, can be tail called though not explicitly prefixed with "tail" prefix.
5271 bool IsImplicitTailCall() const
5273 return (gtCallMoreFlags & GTF_CALL_M_IMPLICIT_TAILCALL) != 0;
5275 bool IsTailCallConvertibleToLoop() const
5277 return (gtCallMoreFlags & GTF_CALL_M_TAILCALL_TO_LOOP) != 0;
5279 #else // !FEATURE_TAILCALL_OPT
5280 bool IsImplicitTailCall() const
5284 bool IsTailCallConvertibleToLoop() const
5288 #endif // !FEATURE_TAILCALL_OPT
5290 bool NormalizesSmallTypesOnReturn() const
5292 return GetUnmanagedCallConv() == CorInfoCallConvExtension::Managed;
5295 bool IsSameThis() const
5297 return (gtCallMoreFlags & GTF_CALL_M_NONVIRT_SAME_THIS) != 0;
5299 bool IsDelegateInvoke() const
5301 return (gtCallMoreFlags & GTF_CALL_M_DELEGATE_INV) != 0;
5303 bool IsVirtualStubRelativeIndir() const
5305 return IsVirtualStub() && (gtCallMoreFlags & GTF_CALL_M_VIRTSTUB_REL_INDIRECT) != 0;
5308 bool IsR2RRelativeIndir() const
5310 #ifdef FEATURE_READYTORUN
5311 return (gtCallMoreFlags & GTF_CALL_M_R2R_REL_INDIRECT) != 0;
5316 #ifdef FEATURE_READYTORUN
5317 void setEntryPoint(const CORINFO_CONST_LOOKUP& entryPoint)
5319 gtEntryPoint = entryPoint;
5320 if (gtEntryPoint.accessType == IAT_PVALUE)
5322 gtCallMoreFlags |= GTF_CALL_M_R2R_REL_INDIRECT;
5325 #endif // FEATURE_READYTORUN
5327 bool IsVarargs() const
5329 return gtArgs.IsVarArgs();
5332 bool IsNoReturn() const
5334 return (gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0;
5337 bool IsFatPointerCandidate() const
5339 return (gtCallMoreFlags & GTF_CALL_M_FAT_POINTER_CHECK) != 0;
5342 bool IsGuardedDevirtualizationCandidate() const
5344 return (gtCallMoreFlags & GTF_CALL_M_GUARDED_DEVIRT) != 0;
5347 bool IsPure(Compiler* compiler) const;
5349 bool HasSideEffects(Compiler* compiler, bool ignoreExceptions = false, bool ignoreCctors = false) const;
5351 void ClearFatPointerCandidate()
5353 gtCallMoreFlags &= ~GTF_CALL_M_FAT_POINTER_CHECK;
5356 void SetFatPointerCandidate()
5358 gtCallMoreFlags |= GTF_CALL_M_FAT_POINTER_CHECK;
5361 bool IsDevirtualized() const
5363 return (gtCallMoreFlags & GTF_CALL_M_DEVIRTUALIZED) != 0;
5366 bool IsGuarded() const
5368 return (gtCallMoreFlags & GTF_CALL_M_GUARDED) != 0;
5371 bool IsUnboxed() const
5373 return (gtCallMoreFlags & GTF_CALL_M_UNBOXED) != 0;
5376 bool IsSuppressGCTransition() const
5378 return (gtCallMoreFlags & GTF_CALL_M_SUPPRESS_GC_TRANSITION) != 0;
5381 void ClearGuardedDevirtualizationCandidate()
5383 gtCallMoreFlags &= ~(GTF_CALL_M_GUARDED_DEVIRT | GTF_CALL_M_GUARDED_DEVIRT_EXACT);
5388 gtCallMoreFlags |= GTF_CALL_M_GUARDED;
5391 void SetExpRuntimeLookup()
5393 gtCallMoreFlags |= GTF_CALL_M_EXP_RUNTIME_LOOKUP;
5396 void ClearExpRuntimeLookup()
5398 gtCallMoreFlags &= ~GTF_CALL_M_EXP_RUNTIME_LOOKUP;
5401 bool IsExpRuntimeLookup() const
5403 return (gtCallMoreFlags & GTF_CALL_M_EXP_RUNTIME_LOOKUP) != 0;
5406 void SetExpTLSFieldAccess()
5408 gtCallMoreFlags |= GTF_CALL_M_EXP_TLS_ACCESS;
5411 void ClearExpTLSFieldAccess()
5413 gtCallMoreFlags &= ~GTF_CALL_M_EXP_TLS_ACCESS;
5416 bool IsExpTLSFieldAccess() const
5418 return (gtCallMoreFlags & GTF_CALL_M_EXP_TLS_ACCESS) != 0;
5421 void SetExpandedEarly()
5423 gtCallMoreFlags |= GTF_CALL_M_EXPANDED_EARLY;
5426 void ClearExpandedEarly()
5428 gtCallMoreFlags &= ~GTF_CALL_M_EXPANDED_EARLY;
5431 bool IsExpandedEarly() const
5433 return (gtCallMoreFlags & GTF_CALL_M_EXPANDED_EARLY) != 0;
5436 bool IsOptimizingRetBufAsLocal() const
5438 return (gtCallMoreFlags & GTF_CALL_M_RETBUFFARG_LCLOPT) != 0;
5441 InlineCandidateInfo* GetSingleInlineCandidateInfo()
5443 // gtInlineInfoCount can be 0 (not an inline candidate) or 1
5444 if (gtInlineInfoCount == 0)
5446 assert(!IsInlineCandidate());
5447 assert(gtInlineCandidateInfo == nullptr);
5450 else if (gtInlineInfoCount > 1)
5452 assert(!"Call has multiple inline candidates");
5454 return gtInlineCandidateInfo;
5457 void SetSingleInlineCandidateInfo(InlineCandidateInfo* candidateInfo);
5459 InlineCandidateInfo* GetGDVCandidateInfo(uint8_t index);
5461 void AddGDVCandidateInfo(Compiler* comp, InlineCandidateInfo* candidateInfo);
5463 void RemoveGDVCandidateInfo(Compiler* comp, uint8_t index);
5465 void ClearInlineInfo()
5467 SetSingleInlineCandidateInfo(nullptr);
5470 uint8_t GetInlineCandidatesCount()
5472 return gtInlineInfoCount;
5475 //-----------------------------------------------------------------------------------------
5476 // GetIndirectionCellArgKind: Get the kind of indirection cell used by this call.
5482 // The kind (either R2RIndirectionCell or VirtualStubCell),
5483 // or WellKnownArg::None if this call does not have an indirection cell.
5485 WellKnownArg GetIndirectionCellArgKind() const
5487 if (IsVirtualStub())
5489 return WellKnownArg::VirtualStubCell;
5492 #if defined(TARGET_ARMARCH) || defined(TARGET_RISCV64)
5493 // For ARM architectures, we always use an indirection cell for R2R calls.
5494 if (IsR2RRelativeIndir() && !IsDelegateInvoke())
5496 return WellKnownArg::R2RIndirectionCell;
5498 #elif defined(TARGET_XARCH)
5499 // On XARCH we disassemble it from callsite except for tailcalls that need indirection cell.
5500 if (IsR2RRelativeIndir() && IsFastTailCall())
5502 return WellKnownArg::R2RIndirectionCell;
5506 return WellKnownArg::None;
5509 CFGCallKind GetCFGCallKind() const
5511 #if defined(TARGET_AMD64)
5512 // On x64 the dispatcher is more performant, but we cannot use it when
5513 // we need to pass indirection cells as those go into registers that
5514 // are clobbered by the dispatch helper.
5515 bool mayUseDispatcher = GetIndirectionCellArgKind() == WellKnownArg::None;
5516 bool shouldUseDispatcher = true;
5517 #elif defined(TARGET_ARM64)
5518 bool mayUseDispatcher = true;
5519 // Branch predictors on ARM64 generally do not handle the dispatcher as
5520 // well as on x64 hardware, so only use the validator by default.
5521 bool shouldUseDispatcher = false;
5523 // Other platforms do not even support the dispatcher.
5524 bool mayUseDispatcher = false;
5525 bool shouldUseDispatcher = false;
5529 switch (JitConfig.JitCFGUseDispatcher())
5532 shouldUseDispatcher = false;
5535 shouldUseDispatcher = true;
5542 return mayUseDispatcher && shouldUseDispatcher ? CFGCallKind::Dispatch : CFGCallKind::ValidateAndCall;
5545 GenTreeCallFlags gtCallMoreFlags; // in addition to gtFlags
5546 gtCallTypes gtCallType : 3; // value from the gtCallTypes enumeration
5547 var_types gtReturnType : 5; // exact return type
5549 uint8_t gtInlineInfoCount; // number of inline candidates for the given call
5551 CORINFO_CLASS_HANDLE gtRetClsHnd; // The return type handle of the call if it is a struct; always available
5553 void* gtStubCallStubAddr; // GTF_CALL_VIRT_STUB - these are never inlined
5554 CORINFO_CLASS_HANDLE gtInitClsHnd; // Used by static init helpers, represents a class they init
5558 // only used for CALLI unmanaged calls (CT_INDIRECT)
5559 GenTree* gtCallCookie;
5561 // gtInlineCandidateInfo is only used when inlining methods
5562 InlineCandidateInfo* gtInlineCandidateInfo;
5563 // gtInlineCandidateInfoList is used when we have more than one GDV candidate
5564 jitstd::vector<InlineCandidateInfo*>* gtInlineCandidateInfoList;
5566 HandleHistogramProfileCandidateInfo* gtHandleHistogramProfileCandidateInfo;
5567 LateDevirtualizationInfo* gtLateDevirtualizationInfo;
5568 CORINFO_GENERIC_HANDLE compileTimeHelperArgumentHandle; // Used to track type handle argument of dynamic helpers
5569 void* gtDirectCallAddress; // Used to pass direct call address between lower and codegen
5572 // expression evaluated after args are placed which determines the control target
5573 GenTree* gtControlExpr;
5576 CORINFO_METHOD_HANDLE gtCallMethHnd; // CT_USER_FUNC or CT_HELPER
5577 GenTree* gtCallAddr; // CT_INDIRECT
5580 #ifdef FEATURE_READYTORUN
5581 // Call target lookup info for method call from a Ready To Run module
5582 CORINFO_CONST_LOOKUP gtEntryPoint;
5585 #if defined(DEBUG) || defined(INLINE_DATA)
5586 // For non-inline candidates, track the first observation
5587 // that blocks candidacy.
5588 InlineObservation gtInlineObservation;
5590 // IL offset of the call wrt its parent method.
5591 IL_OFFSET gtRawILOffset;
5593 // In DEBUG we report even non inline candidates in the inline tree in
5594 // fgNoteNonInlineCandidate. We need to keep around the inline context for
5595 // this as normally it's part of the candidate info.
5596 class InlineContext* gtInlineContext;
5597 #endif // defined(DEBUG) || defined(INLINE_DATA)
5599 bool IsHelperCall() const
5601 return gtCallType == CT_HELPER;
5604 bool IsHelperCall(CORINFO_METHOD_HANDLE callMethHnd) const
5606 return IsHelperCall() && (callMethHnd == gtCallMethHnd);
5609 bool IsHelperCall(Compiler* compiler, unsigned helper) const;
5611 CorInfoHelpFunc GetHelperNum() const;
5613 bool AreArgsComplete() const;
5615 CorInfoCallConvExtension GetUnmanagedCallConv() const
5617 return IsUnmanaged() ? unmgdCallConv : CorInfoCallConvExtension::Managed;
5620 static bool Equals(GenTreeCall* c1, GenTreeCall* c2);
5622 GenTreeCall(var_types type) : GenTree(GT_CALL, type)
5625 #if DEBUGGABLE_GENTREE
5626 GenTreeCall() : GenTree()
5632 struct GenTreeCmpXchg : public GenTree
5634 GenTree* gtOpLocation;
5636 GenTree* gtOpComparand;
5638 GenTreeCmpXchg(var_types type, GenTree* loc, GenTree* val, GenTree* comparand)
5639 : GenTree(GT_CMPXCHG, type), gtOpLocation(loc), gtOpValue(val), gtOpComparand(comparand)
5641 // There's no reason to do a compare-exchange on a local location, so we'll assume that all of these
5642 // have global effects.
5643 gtFlags |= (GTF_GLOB_REF | GTF_ASG);
5645 // Merge in flags from operands
5646 gtFlags |= gtOpLocation->gtFlags & GTF_ALL_EFFECT;
5647 gtFlags |= gtOpValue->gtFlags & GTF_ALL_EFFECT;
5648 gtFlags |= gtOpComparand->gtFlags & GTF_ALL_EFFECT;
5650 #if DEBUGGABLE_GENTREE
5651 GenTreeCmpXchg() : GenTree()
5657 #if !defined(TARGET_64BIT)
5658 struct GenTreeMultiRegOp : public GenTreeOp
5660 regNumber gtOtherReg;
5662 // GTF_SPILL or GTF_SPILLED flag on a multi-reg node indicates that one or
5663 // more of its result regs are in that state. The spill flag of each of the
5664 // return register is stored here. We only need 2 bits per returned register,
5665 // so this is treated as a 2-bit array. No architecture needs more than 8 bits.
5667 MultiRegSpillFlags gtSpillFlags;
5669 GenTreeMultiRegOp(genTreeOps oper, var_types type, GenTree* op1, GenTree* op2)
5670 : GenTreeOp(oper, type, op1, op2), gtOtherReg(REG_NA)
5672 ClearOtherRegFlags();
5675 unsigned GetRegCount() const
5677 return (TypeGet() == TYP_LONG) ? 2 : 1;
5680 //---------------------------------------------------------------------------
5681 // GetRegNumByIdx: get i'th register allocated to this struct argument.
5684 // idx - index of the register
5687 // Return regNumber of i'th register of this register argument
5689 regNumber GetRegNumByIdx(unsigned idx) const
5701 GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
5703 return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
5706 void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
5708 #if FEATURE_MULTIREG_RET
5709 gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
5713 //--------------------------------------------------------------------------
5714 // GetRegType: Get var_type of the register specified by index.
5717 // index - Index of the register.
5718 // First register will have an index 0 and so on.
5721 // var_type of the register specified by its index.
5723 var_types GetRegType(unsigned index) const
5726 // The type of register is usually the same as GenTree type, since GenTreeMultiRegOp usually defines a single
5728 // The special case is when we have TYP_LONG, which may be a MUL_LONG, or a DOUBLE arg passed as LONG,
5729 // in which case we need to separate them into int for each index.
5730 var_types result = TypeGet();
5731 if (result == TYP_LONG)
5738 //-------------------------------------------------------------------
5739 // clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
5747 void ClearOtherRegFlags()
5752 #if DEBUGGABLE_GENTREE
5753 GenTreeMultiRegOp() : GenTreeOp()
5758 #endif // !defined(TARGET_64BIT)
5760 struct GenTreeFptrVal : public GenTree
5762 CORINFO_METHOD_HANDLE gtFptrMethod;
5764 bool gtFptrDelegateTarget;
5766 #ifdef FEATURE_READYTORUN
5767 CORINFO_CONST_LOOKUP gtEntryPoint;
5770 GenTreeFptrVal(var_types type, CORINFO_METHOD_HANDLE meth)
5771 : GenTree(GT_FTN_ADDR, type), gtFptrMethod(meth), gtFptrDelegateTarget(false)
5773 #ifdef FEATURE_READYTORUN
5774 gtEntryPoint.addr = nullptr;
5775 gtEntryPoint.accessType = IAT_VALUE;
5778 #if DEBUGGABLE_GENTREE
5779 GenTreeFptrVal() : GenTree()
5786 struct GenTreeQmark : public GenTreeOp
5788 GenTreeQmark(var_types type, GenTree* cond, GenTreeColon* colon) : GenTreeOp(GT_QMARK, type, cond, colon)
5790 // These must follow a specific form.
5791 assert((cond != nullptr) && cond->TypeIs(TYP_INT));
5792 assert((colon != nullptr) && colon->OperIs(GT_COLON));
5797 return gtOp2->AsColon()->ThenNode();
5802 return gtOp2->AsColon()->ElseNode();
5805 #if DEBUGGABLE_GENTREE
5806 GenTreeQmark() : GenTreeOp()
5812 /* gtIntrinsic -- intrinsic (possibly-binary op [NULL op2 is allowed] with an additional field) */
5814 struct GenTreeIntrinsic : public GenTreeOp
5816 NamedIntrinsic gtIntrinsicName;
5817 CORINFO_METHOD_HANDLE gtMethodHandle; // Method handle of the method which is treated as an intrinsic.
5819 #ifdef FEATURE_READYTORUN
5820 // Call target lookup info for method call from a Ready To Run module
5821 CORINFO_CONST_LOOKUP gtEntryPoint;
5824 GenTreeIntrinsic(var_types type, GenTree* op1, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle)
5825 : GenTreeOp(GT_INTRINSIC, type, op1, nullptr), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle)
5827 assert(intrinsicName != NI_Illegal);
5831 var_types type, GenTree* op1, GenTree* op2, NamedIntrinsic intrinsicName, CORINFO_METHOD_HANDLE methodHandle)
5832 : GenTreeOp(GT_INTRINSIC, type, op1, op2), gtIntrinsicName(intrinsicName), gtMethodHandle(methodHandle)
5834 assert(intrinsicName != NI_Illegal);
5837 #if DEBUGGABLE_GENTREE
5838 GenTreeIntrinsic() : GenTreeOp()
5844 // GenTreeMultiOp - a node with a flexible count of operands stored in an array.
5845 // The array can be an inline one, or a dynamic one, or both, with switching
5846 // between them supported. See GenTreeJitIntrinsic for an example of a node
5847 // utilizing GenTreeMultiOp. GTF_REVERSE_OPS is supported for GenTreeMultiOp's
5848 // with two operands.
5850 struct GenTreeMultiOp : public GenTree
5858 Iterator(GenTree** use) : m_use(use)
5863 Iterator& operator++()
5869 bool operator==(const Iterator& other) const
5871 return m_use == other.m_use;
5874 bool operator!=(const Iterator& other) const
5876 return m_use != other.m_use;
5880 class OperandsIterator final : public Iterator
5883 OperandsIterator(GenTree** use) : Iterator(use)
5887 GenTree* operator*()
5893 class UseEdgesIterator final : public Iterator
5896 UseEdgesIterator(GenTree** use) : Iterator(use)
5900 GenTree** operator*()
5907 GenTree** m_operands;
5910 template <unsigned InlineOperandCount, typename... Operands>
5911 GenTreeMultiOp(genTreeOps oper,
5913 CompAllocator allocator,
5914 GenTree* (&inlineOperands)[InlineOperandCount] DEBUGARG(bool largeNode),
5915 Operands... operands)
5916 : GenTree(oper, type DEBUGARG(largeNode))
5918 const size_t OperandCount = sizeof...(Operands);
5920 m_operands = (OperandCount <= InlineOperandCount) ? inlineOperands : allocator.allocate<GenTree*>(OperandCount);
5922 // "OperandCount + 1" so that it works well when OperandCount is 0.
5923 GenTree* operandsArray[OperandCount + 1]{operands...};
5924 InitializeOperands(operandsArray, OperandCount);
5927 // Note that this constructor takes the owndership of the "operands" array.
5928 template <unsigned InlineOperandCount>
5929 GenTreeMultiOp(genTreeOps oper,
5932 size_t operandCount,
5933 GenTree* (&inlineOperands)[InlineOperandCount] DEBUGARG(bool largeNode))
5934 : GenTree(oper, type DEBUGARG(largeNode))
5936 m_operands = (operandCount <= InlineOperandCount) ? inlineOperands : operands;
5938 InitializeOperands(operands, operandCount);
5942 #if DEBUGGABLE_GENTREE
5943 GenTreeMultiOp() : GenTree()
5948 GenTree*& Op(size_t index)
5950 size_t actualIndex = index - 1;
5951 assert(actualIndex < m_operandCount);
5952 assert(m_operands[actualIndex] != nullptr);
5954 return m_operands[actualIndex];
5957 GenTree* Op(size_t index) const
5959 return const_cast<GenTreeMultiOp*>(this)->Op(index);
5962 // Note that unlike the general "Operands" iterator, this specialized version does not respect GTF_REVERSE_OPS.
5963 IteratorPair<OperandsIterator> Operands()
5965 return MakeIteratorPair(OperandsIterator(GetOperandArray()),
5966 OperandsIterator(GetOperandArray() + GetOperandCount()));
5969 // Note that unlike the general "UseEdges" iterator, this specialized version does not respect GTF_REVERSE_OPS.
5970 IteratorPair<UseEdgesIterator> UseEdges()
5972 return MakeIteratorPair(UseEdgesIterator(GetOperandArray()),
5973 UseEdgesIterator(GetOperandArray() + GetOperandCount()));
5976 size_t GetOperandCount() const
5978 return m_operandCount;
5981 GenTree** GetOperandArray(size_t startIndex = 0) const
5983 return m_operands + startIndex;
5987 // Reconfigures the operand array, leaving it in a "dirty" state.
5988 void ResetOperandArray(size_t newOperandCount,
5990 GenTree** inlineOperands,
5991 size_t inlineOperandCount);
5993 static bool OperandsAreEqual(GenTreeMultiOp* op1, GenTreeMultiOp* op2);
5996 void InitializeOperands(GenTree** operands, size_t operandCount);
5998 void SetOperandCount(size_t newOperandCount)
6000 assert(FitsIn<uint8_t>(newOperandCount));
6001 m_operandCount = static_cast<uint8_t>(newOperandCount);
6005 // Helper class used to implement the constructor of GenTreeJitIntrinsic which
6006 // transfers the ownership of the passed-in array to the underlying MultiOp node.
6007 class IntrinsicNodeBuilder final
6009 friend struct GenTreeJitIntrinsic;
6011 GenTree** m_operands;
6012 size_t m_operandCount;
6013 GenTree* m_inlineOperands[2];
6016 IntrinsicNodeBuilder(CompAllocator allocator, size_t operandCount) : m_operandCount(operandCount)
6019 (operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands : allocator.allocate<GenTree*>(operandCount);
6021 for (size_t i = 0; i < operandCount; i++)
6023 m_operands[i] = nullptr;
6028 IntrinsicNodeBuilder(CompAllocator allocator, GenTreeMultiOp* source) : m_operandCount(source->GetOperandCount())
6030 m_operands = (m_operandCount <= ArrLen(m_inlineOperands)) ? m_inlineOperands
6031 : allocator.allocate<GenTree*>(m_operandCount);
6032 for (size_t i = 0; i < m_operandCount; i++)
6034 m_operands[i] = source->Op(i + 1);
6038 void AddOperand(size_t index, GenTree* operand)
6040 assert(index < m_operandCount);
6041 assert(m_operands[index] == nullptr);
6042 m_operands[index] = operand;
6045 GenTree* GetOperand(size_t index) const
6047 assert(index < m_operandCount);
6048 assert(m_operands[index] != nullptr);
6049 return m_operands[index];
6052 size_t GetOperandCount() const
6054 return m_operandCount;
6058 GenTree** GetBuiltOperands()
6061 for (size_t i = 0; i < m_operandCount; i++)
6063 assert(m_operands[i] != nullptr);
6071 struct GenTreeJitIntrinsic : public GenTreeMultiOp
6074 GenTree* gtInlineOperands[2];
6075 regNumberSmall gtOtherReg; // The second register for multi-reg intrinsics.
6076 MultiRegSpillFlags gtSpillFlags; // Spill flags for multi-reg intrinsics.
6077 unsigned char gtAuxiliaryJitType; // For intrinsics than need another type (e.g. Avx2.Gather* or SIMD (by element))
6078 unsigned char gtSimdBaseJitType; // SIMD vector base JIT type
6079 unsigned char gtSimdSize; // SIMD vector size in bytes, use 0 for scalar intrinsics
6080 NamedIntrinsic gtHWIntrinsicId;
6083 regNumber GetOtherReg() const
6085 return (regNumber)gtOtherReg;
6088 void SetOtherReg(regNumber reg)
6090 gtOtherReg = (regNumberSmall)reg;
6091 assert(gtOtherReg == reg);
6094 GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
6096 return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
6099 void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
6101 gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
6104 CorInfoType GetAuxiliaryJitType() const
6106 return (CorInfoType)gtAuxiliaryJitType;
6109 void SetAuxiliaryJitType(CorInfoType auxiliaryJitType)
6111 gtAuxiliaryJitType = (unsigned char)auxiliaryJitType;
6112 assert(gtAuxiliaryJitType == auxiliaryJitType);
6115 var_types GetAuxiliaryType() const;
6117 CorInfoType GetSimdBaseJitType() const
6119 return (CorInfoType)gtSimdBaseJitType;
6122 CorInfoType GetNormalizedSimdBaseJitType() const
6124 CorInfoType simdBaseJitType = GetSimdBaseJitType();
6125 switch (simdBaseJitType)
6127 case CORINFO_TYPE_NATIVEINT:
6130 return CORINFO_TYPE_LONG;
6132 return CORINFO_TYPE_INT;
6136 case CORINFO_TYPE_NATIVEUINT:
6139 return CORINFO_TYPE_ULONG;
6141 return CORINFO_TYPE_UINT;
6146 return simdBaseJitType;
6150 void SetSimdBaseJitType(CorInfoType simdBaseJitType)
6152 gtSimdBaseJitType = (unsigned char)simdBaseJitType;
6153 assert(gtSimdBaseJitType == simdBaseJitType);
6156 var_types GetSimdBaseType() const;
6158 unsigned char GetSimdSize() const
6163 void SetSimdSize(unsigned simdSize)
6165 gtSimdSize = (unsigned char)simdSize;
6166 assert(gtSimdSize == simdSize);
6169 template <typename... Operands>
6170 GenTreeJitIntrinsic(genTreeOps oper,
6172 CompAllocator allocator,
6173 CorInfoType simdBaseJitType,
6175 Operands... operands)
6176 : GenTreeMultiOp(oper, type, allocator, gtInlineOperands DEBUGARG(false), operands...)
6177 , gtOtherReg(REG_NA)
6179 , gtAuxiliaryJitType(CORINFO_TYPE_UNDEF)
6180 , gtSimdBaseJitType((unsigned char)simdBaseJitType)
6181 , gtSimdSize((unsigned char)simdSize)
6182 , gtHWIntrinsicId(NI_Illegal)
6184 assert(gtSimdBaseJitType == simdBaseJitType);
6185 assert(gtSimdSize == simdSize);
6188 #if DEBUGGABLE_GENTREE
6189 GenTreeJitIntrinsic() : GenTreeMultiOp()
6195 GenTreeJitIntrinsic(genTreeOps oper,
6197 IntrinsicNodeBuilder&& nodeBuilder,
6198 CorInfoType simdBaseJitType,
6200 : GenTreeMultiOp(oper,
6202 nodeBuilder.GetBuiltOperands(),
6203 nodeBuilder.GetOperandCount(),
6204 gtInlineOperands DEBUGARG(false))
6205 , gtOtherReg(REG_NA)
6207 , gtAuxiliaryJitType(CORINFO_TYPE_UNDEF)
6208 , gtSimdBaseJitType((unsigned char)simdBaseJitType)
6209 , gtSimdSize((unsigned char)simdSize)
6210 , gtHWIntrinsicId(NI_Illegal)
6212 assert(gtSimdBaseJitType == simdBaseJitType);
6213 assert(gtSimdSize == simdSize);
6219 return gtSimdSize != 0;
6223 #ifdef FEATURE_HW_INTRINSICS
6224 struct GenTreeHWIntrinsic : public GenTreeJitIntrinsic
6226 GenTreeHWIntrinsic(var_types type,
6227 IntrinsicNodeBuilder&& nodeBuilder,
6228 NamedIntrinsic hwIntrinsicID,
6229 CorInfoType simdBaseJitType,
6231 : GenTreeJitIntrinsic(GT_HWINTRINSIC, type, std::move(nodeBuilder), simdBaseJitType, simdSize)
6233 Initialize(hwIntrinsicID);
6236 template <typename... Operands>
6237 GenTreeHWIntrinsic(var_types type,
6238 CompAllocator allocator,
6239 NamedIntrinsic hwIntrinsicID,
6240 CorInfoType simdBaseJitType,
6242 Operands... operands)
6243 : GenTreeJitIntrinsic(GT_HWINTRINSIC, type, allocator, simdBaseJitType, simdSize, operands...)
6245 Initialize(hwIntrinsicID);
6248 #if DEBUGGABLE_GENTREE
6249 GenTreeHWIntrinsic() : GenTreeJitIntrinsic()
6254 bool OperIsMemoryLoad(GenTree** pAddr = nullptr) const;
6255 bool OperIsMemoryStore(GenTree** pAddr = nullptr) const;
6256 bool OperIsMemoryLoadOrStore() const;
6257 bool OperIsMemoryStoreOrBarrier() const;
6258 bool OperIsEmbBroadcastCompatible() const;
6259 bool OperIsBroadcastScalar() const;
6260 bool OperIsCreateScalarUnsafe() const;
6262 bool OperRequiresAsgFlag() const;
6263 bool OperRequiresCallFlag() const;
6265 unsigned GetResultOpNumForRmwIntrinsic(GenTree* use, GenTree* op1, GenTree* op2, GenTree* op3);
6267 ClassLayout* GetLayout(Compiler* compiler) const;
6269 NamedIntrinsic GetHWIntrinsicId() const;
6271 //---------------------------------------------------------------------------------------
6272 // ChangeHWIntrinsicId: Change the intrinsic id for this node.
6274 // This method just sets the intrinsic id, asserting that the new intrinsic
6275 // has the same number of operands as the old one, optionally setting some of
6276 // the new operands. Intrinsics with an unknown number of operands are exempt
6277 // from the "do I have the same number of operands" check however, so this method must
6278 // be used with care. Use "ResetHWIntrinsicId" if you need to fully reconfigure
6279 // the node for a different intrinsic, with a possibly different number of operands.
6282 // intrinsicId - the new intrinsic id for the node
6283 // operands - optional operands to set while changing the id
6286 // It is the caller's responsibility to update side effect flags.
6288 template <typename... Operands>
6289 void ChangeHWIntrinsicId(NamedIntrinsic intrinsicId, Operands... operands)
6291 const size_t OperandCount = sizeof...(Operands);
6292 assert(OperandCount <= GetOperandCount());
6294 SetHWIntrinsicId(intrinsicId);
6296 GenTree* operandsArray[OperandCount + 1]{operands...};
6297 GenTree** operandsStore = GetOperandArray();
6299 for (size_t i = 0; i < OperandCount; i++)
6301 operandsStore[i] = operandsArray[i];
6305 //---------------------------------------------------------------------------------------
6306 // ResetHWIntrinsicId: Reset the intrinsic id for this node.
6308 // This method resets the intrinsic id, fully reconfiguring the node. It must
6309 // be supplied with all the operands the new node needs, and can allocate a
6310 // new dynamic array if the operands do not fit into in an inline one, in which
6311 // case a compiler argument is used to get the memory allocator.
6313 // This method is similar to "ChangeHWIntrinsicId" but is more versatile and
6314 // thus more expensive. Use it when you need to bash to an intrinsic id with
6315 // a different number of operands than what the original node had, or, which
6316 // is equivalent, when you do not know the original number of operands.
6319 // intrinsicId - the new intrinsic id for the node
6320 // compiler - compiler to allocate memory with, can be "nullptr" if the
6321 // number of new operands does not exceed the length of the
6322 // inline array (so, there are 2 or fewer of them)
6323 // operands - *all* operands for the new node
6326 // It is the caller's responsibility to update side effect flags.
6328 template <typename... Operands>
6329 void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, Compiler* compiler, Operands... operands)
6331 const size_t NewOperandCount = sizeof...(Operands);
6332 assert((compiler != nullptr) || (NewOperandCount <= ArrLen(gtInlineOperands)));
6334 ResetOperandArray(NewOperandCount, compiler, gtInlineOperands, ArrLen(gtInlineOperands));
6335 ChangeHWIntrinsicId(intrinsicId, operands...);
6338 void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, GenTree* op1, GenTree* op2)
6340 ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr), op1, op2);
6343 void ResetHWIntrinsicId(NamedIntrinsic intrinsicId, GenTree* op1)
6345 ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr), op1);
6348 void ResetHWIntrinsicId(NamedIntrinsic intrinsicId)
6350 ResetHWIntrinsicId(intrinsicId, static_cast<Compiler*>(nullptr));
6353 static bool Equals(GenTreeHWIntrinsic* op1, GenTreeHWIntrinsic* op2);
6356 void SetHWIntrinsicId(NamedIntrinsic intrinsicId);
6358 void Initialize(NamedIntrinsic intrinsicId);
6360 #endif // FEATURE_HW_INTRINSICS
6362 // GenTreeVecCon -- vector constant (GT_CNS_VEC)
6364 struct GenTreeVecCon : public GenTree
6368 simd12_t gtSimd12Val;
6369 simd16_t gtSimd16Val;
6371 #if defined(TARGET_XARCH)
6372 simd32_t gtSimd32Val;
6373 simd64_t gtSimd64Val;
6374 #endif // TARGET_XARCH
6379 #if defined(FEATURE_HW_INTRINSICS)
6380 static unsigned ElementCount(unsigned simdSize, var_types simdBaseType);
6382 template <typename simdTypename>
6383 static bool IsHWIntrinsicCreateConstant(GenTreeHWIntrinsic* node, simdTypename& simdVal)
6385 NamedIntrinsic intrinsic = node->GetHWIntrinsicId();
6386 var_types simdType = node->TypeGet();
6387 var_types simdBaseType = node->GetSimdBaseType();
6388 unsigned simdSize = node->GetSimdSize();
6390 size_t argCnt = node->GetOperandCount();
6391 size_t cnsArgCnt = 0;
6395 case NI_Vector128_Create:
6396 case NI_Vector128_CreateScalar:
6397 case NI_Vector128_CreateScalarUnsafe:
6398 #if defined(TARGET_XARCH)
6399 case NI_Vector256_Create:
6400 case NI_Vector512_Create:
6401 case NI_Vector256_CreateScalar:
6402 case NI_Vector512_CreateScalar:
6403 case NI_Vector256_CreateScalarUnsafe:
6404 case NI_Vector512_CreateScalarUnsafe:
6405 #elif defined(TARGET_ARM64)
6406 case NI_Vector64_Create:
6407 case NI_Vector64_CreateScalar:
6408 case NI_Vector64_CreateScalarUnsafe:
6411 // Zero out the simdVal
6414 // These intrinsics are meant to set the same value to every element.
6415 if ((argCnt == 1) && HandleArgForHWIntrinsicCreate<simdTypename>(node->Op(1), 0, simdVal, simdBaseType))
6417 // CreateScalar leaves the upper bits as zero
6419 #if defined(TARGET_XARCH)
6420 if ((intrinsic != NI_Vector128_CreateScalar) && (intrinsic != NI_Vector256_CreateScalar) &&
6421 (intrinsic != NI_Vector512_CreateScalar))
6422 #elif defined(TARGET_ARM64)
6423 if ((intrinsic != NI_Vector64_CreateScalar) && (intrinsic != NI_Vector128_CreateScalar))
6426 // Now assign the rest of the arguments.
6427 for (unsigned i = 1; i < ElementCount(simdSize, simdBaseType); i++)
6429 HandleArgForHWIntrinsicCreate<simdTypename>(node->Op(1), i, simdVal, simdBaseType);
6437 for (unsigned i = 1; i <= argCnt; i++)
6439 if (HandleArgForHWIntrinsicCreate<simdTypename>(node->Op(i), i - 1, simdVal, simdBaseType))
6446 assert((argCnt == 1) || (argCnt == ElementCount(simdSize, simdBaseType)));
6447 return argCnt == cnsArgCnt;
6457 //----------------------------------------------------------------------------------------------
6458 // HandleArgForHWIntrinsicCreate: Processes an argument for the GenTreeVecCon::IsHWIntrinsicCreateConstant method
6461 // arg - The argument to process
6462 // argIdx - The index of the argument being processed
6463 // simdVal - The vector constant being constructed
6464 // baseType - The base type of the vector constant
6467 // true if arg was a constant; otherwise, false
6468 template <typename simdTypename>
6469 static bool HandleArgForHWIntrinsicCreate(GenTree* arg, int argIdx, simdTypename& simdVal, var_types baseType)
6476 if (arg->IsCnsIntOrI())
6478 simdVal.i8[argIdx] = static_cast<int8_t>(arg->AsIntCon()->gtIconVal);
6483 // We expect the constant to have been already zeroed
6484 assert(simdVal.i8[argIdx] == 0);
6492 if (arg->IsCnsIntOrI())
6494 simdVal.i16[argIdx] = static_cast<int16_t>(arg->AsIntCon()->gtIconVal);
6499 // We expect the constant to have been already zeroed
6500 assert(simdVal.i16[argIdx] == 0);
6508 if (arg->IsCnsIntOrI())
6510 simdVal.i32[argIdx] = static_cast<int32_t>(arg->AsIntCon()->gtIconVal);
6515 // We expect the constant to have been already zeroed
6516 assert(simdVal.i32[argIdx] == 0);
6524 #if defined(TARGET_64BIT)
6525 if (arg->IsCnsIntOrI())
6527 simdVal.i64[argIdx] = static_cast<int64_t>(arg->AsIntCon()->gtIconVal);
6531 if (arg->OperIsLong() && arg->AsOp()->gtOp1->IsCnsIntOrI() && arg->AsOp()->gtOp2->IsCnsIntOrI())
6533 // 32-bit targets will decompose GT_CNS_LNG into two GT_CNS_INT
6534 // We need to reconstruct the 64-bit value in order to handle this
6536 INT64 gtLconVal = arg->AsOp()->gtOp2->AsIntCon()->gtIconVal;
6538 gtLconVal |= arg->AsOp()->gtOp1->AsIntCon()->gtIconVal;
6540 simdVal.i64[argIdx] = gtLconVal;
6543 #endif // TARGET_64BIT
6546 // We expect the constant to have been already zeroed
6547 assert(simdVal.i64[argIdx] == 0);
6554 if (arg->IsCnsFltOrDbl())
6556 simdVal.f32[argIdx] = static_cast<float>(arg->AsDblCon()->DconValue());
6561 // We expect the constant to have been already zeroed
6562 // We check against the i32, rather than f32, to account for -0.0
6563 assert(simdVal.i32[argIdx] == 0);
6570 if (arg->IsCnsFltOrDbl())
6572 simdVal.f64[argIdx] = static_cast<double>(arg->AsDblCon()->DconValue());
6577 // We expect the constant to have been already zeroed
6578 // We check against the i64, rather than f64, to account for -0.0
6579 assert(simdVal.i64[argIdx] == 0);
6593 #endif // FEATURE_HW_INTRINSICS
6595 bool IsAllBitsSet() const
6599 #if defined(FEATURE_SIMD)
6602 return gtSimd8Val.IsAllBitsSet();
6607 return gtSimd12Val.IsAllBitsSet();
6612 return gtSimd16Val.IsAllBitsSet();
6615 #if defined(TARGET_XARCH)
6618 return gtSimd32Val.IsAllBitsSet();
6623 return gtSimd64Val.IsAllBitsSet();
6625 #endif // TARGET_XARCH
6626 #endif // FEATURE_SIMD
6635 static bool Equals(const GenTreeVecCon* left, const GenTreeVecCon* right)
6637 var_types gtType = left->TypeGet();
6639 if (gtType != right->TypeGet())
6646 #if defined(FEATURE_SIMD)
6649 return left->gtSimd8Val == right->gtSimd8Val;
6654 return left->gtSimd12Val == right->gtSimd12Val;
6659 return left->gtSimd16Val == right->gtSimd16Val;
6662 #if defined(TARGET_XARCH)
6665 return left->gtSimd32Val == right->gtSimd32Val;
6670 return left->gtSimd64Val == right->gtSimd64Val;
6672 #endif // TARGET_XARCH
6673 #endif // FEATURE_SIMD
6686 #if defined(FEATURE_SIMD)
6689 return gtSimd8Val.IsZero();
6694 return gtSimd12Val.IsZero();
6699 return gtSimd16Val.IsZero();
6702 #if defined(TARGET_XARCH)
6705 return gtSimd32Val.IsZero();
6710 return gtSimd64Val.IsZero();
6712 #endif // TARGET_XARCH
6713 #endif // FEATURE_SIMD
6722 GenTreeVecCon(var_types type) : GenTree(GT_CNS_VEC, type)
6724 assert(varTypeIsSIMD(type));
6726 // Some uses of GenTreeVecCon do not specify all bits in the vector they are using but failing to zero out the
6727 // buffer will cause determinism issues with the compiler.
6728 memset(>SimdVal, 0, sizeof(gtSimdVal));
6730 #if defined(TARGET_XARCH)
6731 assert(sizeof(simd_t) == sizeof(simd64_t));
6733 assert(sizeof(simd_t) == sizeof(simd16_t));
6737 #if DEBUGGABLE_GENTREE
6738 GenTreeVecCon() : GenTree()
6744 // GenTreeIndexAddr: Given an array object and an index, checks that the index is within the bounds of the array if
6745 // necessary and produces the address of the value at that index of the array.
6747 struct GenTreeIndexAddr : public GenTreeOp
6758 CORINFO_CLASS_HANDLE gtStructElemClass; // If the element type is a struct, this is the struct type.
6760 BasicBlock* gtIndRngFailBB; // Basic block to jump to for array-index-out-of-range
6762 var_types gtElemType; // The element type of the array.
6763 unsigned gtElemSize; // size of elements in the array
6764 unsigned gtLenOffset; // The offset from the array's base address to its length.
6765 unsigned gtElemOffset; // The offset from the array's base address to its first element.
6767 GenTreeIndexAddr(GenTree* arr,
6770 CORINFO_CLASS_HANDLE structElemClass,
6773 unsigned elemOffset,
6775 : GenTreeOp(GT_INDEX_ADDR, TYP_BYREF, arr, ind)
6776 , gtStructElemClass(structElemClass)
6777 , gtIndRngFailBB(nullptr)
6778 , gtElemType(elemType)
6779 , gtElemSize(elemSize)
6780 , gtLenOffset(lenOffset)
6781 , gtElemOffset(elemOffset)
6783 assert(!varTypeIsStruct(elemType) || (structElemClass != NO_CLASS_HANDLE));
6788 gtFlags |= GTF_INX_RNGCHK;
6791 gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
6794 #if DEBUGGABLE_GENTREE
6795 GenTreeIndexAddr() : GenTreeOp()
6800 bool IsBoundsChecked() const
6802 return (gtFlags & GTF_INX_RNGCHK) != 0;
6805 bool IsNotNull() const
6807 return IsBoundsChecked() || ((gtFlags & GTF_INX_ADDR_NONNULL) != 0);
6811 // GenTreeArrAddr - GT_ARR_ADDR, carries information about the array type from morph to VN.
6812 // This node is just a wrapper (similar to GenTreeBox), the real address
6813 // expression is contained in its first operand.
6815 struct GenTreeArrAddr : GenTreeUnOp
6818 CORINFO_CLASS_HANDLE m_elemClassHandle; // The array element class. Currently only used for arrays of TYP_STRUCT.
6819 var_types m_elemType; // The normalized (TYP_SIMD != TYP_STRUCT) array element type.
6820 uint8_t m_firstElemOffset; // Offset to the first element of the array.
6823 GenTreeArrAddr(GenTree* addr, var_types elemType, CORINFO_CLASS_HANDLE elemClassHandle, uint8_t firstElemOffset)
6824 : GenTreeUnOp(GT_ARR_ADDR, TYP_BYREF, addr DEBUGARG(/* largeNode */ false))
6825 , m_elemClassHandle(elemClassHandle)
6826 , m_elemType(elemType)
6827 , m_firstElemOffset(firstElemOffset)
6829 assert(addr->TypeIs(TYP_BYREF));
6830 assert(((elemType == TYP_STRUCT) && (elemClassHandle != NO_CLASS_HANDLE)) ||
6831 (elemClassHandle == NO_CLASS_HANDLE));
6834 #if DEBUGGABLE_GENTREE
6835 GenTreeArrAddr() : GenTreeUnOp()
6845 CORINFO_CLASS_HANDLE GetElemClassHandle() const
6847 return m_elemClassHandle;
6850 var_types GetElemType() const
6855 uint8_t GetFirstElemOffset() const
6857 return m_firstElemOffset;
6860 void ParseArrayAddress(Compiler* comp, GenTree** pArr, ValueNum* pInxVN);
6863 static void ParseArrayAddressWork(GenTree* tree,
6865 target_ssize_t inputMul,
6868 target_ssize_t* pOffset);
6871 // GenTreeArrCommon -- A parent class for GenTreeArrLen, GenTreeMDArr
6872 // (so, accessing array meta-data for either single-dimensional or multi-dimensional arrays).
6873 // Mostly just a convenience to use the ArrRef() accessor.
6875 struct GenTreeArrCommon : public GenTreeUnOp
6877 GenTree*& ArrRef() // the array address node
6882 GenTreeArrCommon(genTreeOps oper, var_types type, GenTree* arrRef) : GenTreeUnOp(oper, type, arrRef)
6886 #if DEBUGGABLE_GENTREE
6887 GenTreeArrCommon() : GenTreeUnOp()
6893 // GenTreeArrLen (GT_ARR_LENGTH) -- single-dimension (SZ) array length. Used for `array.Length`.
6895 struct GenTreeArrLen : public GenTreeArrCommon
6897 GenTree*& ArrRef() // the array address node
6903 int gtArrLenOffset; // constant to add to "ArrRef()" to get the address of the array length.
6906 int ArrLenOffset() const
6908 return gtArrLenOffset;
6911 GenTreeArrLen(var_types type, GenTree* arrRef, int lenOffset)
6912 : GenTreeArrCommon(GT_ARR_LENGTH, type, arrRef), gtArrLenOffset(lenOffset)
6916 #if DEBUGGABLE_GENTREE
6917 GenTreeArrLen() : GenTreeArrCommon()
6923 // GenTreeMDArr (GT_MDARR_LENGTH, GT_MDARR_LOWER_BOUND) -- multi-dimension (MD) array length
6924 // or lower bound for a dimension. Used for `array.GetLength(n)`, `array.GetLowerBound(n)`.
6926 struct GenTreeMDArr : public GenTreeArrCommon
6929 unsigned gtDim; // array dimension of this array length
6930 unsigned gtRank; // array rank of the array
6933 unsigned Dim() const
6938 unsigned Rank() const
6943 GenTreeMDArr(genTreeOps oper, GenTree* arrRef, unsigned dim, unsigned rank)
6944 : GenTreeArrCommon(oper, TYP_INT, arrRef), gtDim(dim), gtRank(rank)
6946 assert(OperIs(GT_MDARR_LENGTH, GT_MDARR_LOWER_BOUND));
6949 #if DEBUGGABLE_GENTREE
6950 GenTreeMDArr() : GenTreeArrCommon()
6958 // - an index value, and
6959 // - the label to jump to if the index is out of range.
6960 // - the "kind" of the throw block to branch to on failure
6961 // It generates no result.
6963 struct GenTreeBoundsChk : public GenTreeOp
6965 BasicBlock* gtIndRngFailBB; // Basic block to jump to for index-out-of-range
6966 SpecialCodeKind gtThrowKind; // Kind of throw block to branch to on failure
6968 // Store some information about the array element type that was in the GT_INDEX_ADDR node before morphing.
6969 // Note that this information is also stored in the ARR_ADDR node of the morphed tree, but that can be hard
6971 var_types gtInxType; // The array element type.
6973 GenTreeBoundsChk(GenTree* index, GenTree* length, SpecialCodeKind kind)
6974 : GenTreeOp(GT_BOUNDS_CHECK, TYP_VOID, index, length)
6975 , gtIndRngFailBB(nullptr)
6977 , gtInxType(TYP_UNKNOWN)
6979 gtFlags |= GTF_EXCEPT;
6981 #if DEBUGGABLE_GENTREE
6982 GenTreeBoundsChk() : GenTreeOp()
6987 // If this check is against GT_ARR_LENGTH or GT_MDARR_LENGTH, returns array reference, else nullptr.
6988 GenTree* GetArray() const
6990 if (GetArrayLength()->OperIsArrLength())
6992 return GetArrayLength()->GetArrLengthArrRef();
7000 // The index expression.
7001 GenTree* GetIndex() const
7006 // An expression for the length.
7007 GenTree* GetArrayLength() const
7013 // GenTreeArrElem - bounds checked address (byref) of a general array element,
7014 // for multidimensional arrays, or 1-d arrays with non-zero lower bounds.
7016 struct GenTreeArrElem : public GenTree
7020 #define GT_ARR_MAX_RANK 3
7021 GenTree* gtArrInds[GT_ARR_MAX_RANK]; // Indices
7022 unsigned char gtArrRank; // Rank of the array
7024 unsigned char gtArrElemSize; // !!! Caution, this is an "unsigned char", it is used only
7025 // on the optimization path of array intrinsics.
7026 // It stores the size of array elements WHEN it can fit
7027 // into an "unsigned char".
7028 // This has caused VSW 571394.
7030 // Requires that "inds" is a pointer to an array of "rank" nodes for the indices.
7031 GenTreeArrElem(var_types type, GenTree* arr, unsigned char rank, unsigned char elemSize, GenTree** inds)
7032 : GenTree(GT_ARR_ELEM, type), gtArrObj(arr), gtArrRank(rank), gtArrElemSize(elemSize)
7034 assert(rank <= ArrLen(gtArrInds));
7035 gtFlags |= (arr->gtFlags & GTF_ALL_EFFECT);
7036 for (unsigned char i = 0; i < rank; i++)
7038 gtArrInds[i] = inds[i];
7039 gtFlags |= (inds[i]->gtFlags & GTF_ALL_EFFECT);
7041 gtFlags |= GTF_EXCEPT;
7043 #if DEBUGGABLE_GENTREE
7044 GenTreeArrElem() : GenTree()
7050 /* gtAddrMode -- Target-specific canonicalized addressing expression (GT_LEA) */
7052 struct GenTreeAddrMode : public GenTreeOp
7054 // Address is Base + Index*Scale + Offset.
7055 // These are the legal patterns:
7057 // Base // Base != nullptr && Index == nullptr && Scale == 0 && Offset == 0
7058 // Base + Index*Scale // Base != nullptr && Index != nullptr && Scale != 0 && Offset == 0
7059 // Base + Offset // Base != nullptr && Index == nullptr && Scale == 0 && Offset != 0
7060 // Base + Index*Scale + Offset // Base != nullptr && Index != nullptr && Scale != 0 && Offset != 0
7061 // Index*Scale // Base == nullptr && Index != nullptr && Scale > 1 && Offset == 0
7062 // Index*Scale + Offset // Base == nullptr && Index != nullptr && Scale > 1 && Offset != 0
7063 // Offset // Base == nullptr && Index == nullptr && Scale == 0 && Offset != 0
7066 // 1. Base + Index is legal with Scale==1
7067 // 2. If Index is null, Scale should be zero (or uninitialized / unused)
7068 // 3. If Scale==1, then we should have "Base" instead of "Index*Scale", and "Base + Offset" instead of
7069 // "Index*Scale + Offset".
7071 // First operand is base address/pointer
7072 bool HasBase() const
7074 return gtOp1 != nullptr;
7081 void SetBase(GenTree* base)
7086 // Second operand is scaled index value
7087 bool HasIndex() const
7089 return gtOp2 != nullptr;
7096 void SetIndex(GenTree* index)
7101 unsigned GetScale() const
7106 void SetScale(unsigned scale)
7113 return static_cast<int>(gtOffset);
7116 void SetOffset(int offset)
7121 unsigned gtScale; // The scale factor
7124 ssize_t gtOffset; // The offset to add
7127 GenTreeAddrMode(var_types type, GenTree* base, GenTree* index, unsigned scale, ssize_t offset)
7128 : GenTreeOp(GT_LEA, type, base, index)
7130 assert(base != nullptr || index != nullptr);
7134 #if DEBUGGABLE_GENTREE
7137 // Used only for GenTree::GetVtableForOper()
7138 GenTreeAddrMode() : GenTreeOp()
7144 // Indir is just an op, no additional data, but some additional abstractions
7145 struct GenTreeIndir : public GenTreeOp
7147 // The address for the indirection.
7153 void SetAddr(GenTree* addr)
7155 assert(varTypeIsI(addr));
7161 assert(OperIs(GT_STOREIND) || OperIsStoreBlk());
7165 // these methods provide an interface to the indirection node which
7173 unsigned Size() const;
7175 GenTreeIndir(genTreeOps oper, var_types type, GenTree* addr, GenTree* data) : GenTreeOp(oper, type, addr, data)
7179 // True if this indirection is a volatile memory operation.
7180 bool IsVolatile() const
7182 return (gtFlags & GTF_IND_VOLATILE) != 0;
7185 // True if this indirection is an unaligned memory operation.
7186 bool IsUnaligned() const
7188 return (gtFlags & GTF_IND_UNALIGNED) != 0;
7191 #if DEBUGGABLE_GENTREE
7192 // Used only for GenTree::GetVtableForOper()
7193 GenTreeIndir() : GenTreeOp()
7197 // Used by XARCH codegen to construct temporary trees to pass to the emitter.
7198 GenTreeIndir() : GenTreeOp(GT_NOP, TYP_UNDEF)
7204 // gtBlk -- 'block' (GT_BLK, GT_STORE_BLK).
7206 // This is the base type for all of the nodes that represent block or struct
7208 // Since it can be a store, it includes gtBlkOpKind to specify the type of
7209 // code generation that will be used for the block operation.
7211 struct GenTreeBlk : public GenTreeIndir
7214 ClassLayout* m_layout;
7217 ClassLayout* GetLayout() const
7222 void SetLayout(ClassLayout* layout)
7224 assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
7228 // The data to be stored (null for GT_BLK)
7233 void SetData(GenTree* dataNode)
7238 // The size of the buffer to be copied.
7239 unsigned Size() const
7241 assert((m_layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
7242 return (m_layout != nullptr) ? m_layout->GetSize() : 0;
7245 // Instruction selection: during codegen time, what code sequence we will be using
7246 // to encode this operation.
7250 BlkOpKindCpObjUnroll,
7252 BlkOpKindCpObjRepInstr,
7261 BlkOpKindUnrollMemmove,
7264 #ifndef JIT32_GCENCODER
7265 bool gtBlkOpGcUnsafe;
7269 bool IsOnHeapAndContainsReferences()
7271 return (m_layout != nullptr) && m_layout->HasGCPtr() && !Addr()->OperIs(GT_LCL_ADDR);
7275 GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, ClassLayout* layout)
7276 : GenTreeIndir(oper, type, addr, nullptr)
7281 GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, GenTree* data, ClassLayout* layout)
7282 : GenTreeIndir(oper, type, addr, data)
7287 void Initialize(ClassLayout* layout)
7289 assert(OperIsBlk(OperGet()) && ((layout != nullptr) || OperIs(GT_STORE_DYN_BLK)));
7290 assert((layout == nullptr) || (layout->GetSize() != 0));
7293 gtBlkOpKind = BlkOpKindInvalid;
7294 #ifndef JIT32_GCENCODER
7295 gtBlkOpGcUnsafe = false;
7299 #if DEBUGGABLE_GENTREE
7302 GenTreeBlk() : GenTreeIndir()
7305 #endif // DEBUGGABLE_GENTREE
7308 // GenTreeStoreDynBlk -- 'dynamic block store' (GT_STORE_DYN_BLK).
7310 // This node is used to represent stores that have a dynamic size - the "cpblk" and "initblk"
7311 // IL instructions are implemented with it. Note that such stores assume the input has no GC
7312 // pointers in it, and as such do not ever use write barriers.
7314 // The "Data()" member of this node will either be a "dummy" IND(struct) node, for "cpblk", or
7315 // the zero constant/INIT_VAL for "initblk".
7317 struct GenTreeStoreDynBlk : public GenTreeBlk
7320 GenTree* gtDynamicSize;
7322 GenTreeStoreDynBlk(GenTree* dstAddr, GenTree* data, GenTree* dynamicSize)
7323 : GenTreeBlk(GT_STORE_DYN_BLK, TYP_VOID, dstAddr, data, nullptr), gtDynamicSize(dynamicSize)
7325 // Conservatively the 'dstAddr' could be null or point into the global heap.
7326 // Likewise, this is a store and so must be marked with the GTF_ASG flag.
7327 gtFlags |= (GTF_ASG | GTF_EXCEPT | GTF_GLOB_REF);
7328 gtFlags |= (dynamicSize->gtFlags & GTF_ALL_EFFECT);
7331 #if DEBUGGABLE_GENTREE
7334 GenTreeStoreDynBlk() : GenTreeBlk()
7337 #endif // DEBUGGABLE_GENTREE
7340 // Read-modify-write status of a RMW memory op rooted at a storeInd
7343 STOREIND_RMW_STATUS_UNKNOWN, // RMW status of storeInd unknown
7344 // Default status unless modified by IsRMWMemOpRootedAtStoreInd()
7346 // One of these denote storeind is a RMW memory operation.
7347 STOREIND_RMW_DST_IS_OP1, // StoreInd is known to be a RMW memory op and dst candidate is op1
7348 STOREIND_RMW_DST_IS_OP2, // StoreInd is known to be a RMW memory op and dst candidate is op2
7350 // One of these denote the reason for storeind is marked as non-RMW operation
7351 STOREIND_RMW_UNSUPPORTED_ADDR, // Addr mode is not yet supported for RMW memory
7352 STOREIND_RMW_UNSUPPORTED_OPER, // Operation is not supported for RMW memory
7353 STOREIND_RMW_UNSUPPORTED_TYPE, // Type is not supported for RMW memory
7354 STOREIND_RMW_INDIR_UNEQUAL // Indir to read value is not equivalent to indir that writes the value
7358 inline const char* RMWStatusDescription(RMWStatus status)
7362 case STOREIND_RMW_STATUS_UNKNOWN:
7363 return "RMW status unknown";
7364 case STOREIND_RMW_DST_IS_OP1:
7365 return "dst candidate is op1";
7366 case STOREIND_RMW_DST_IS_OP2:
7367 return "dst candidate is op2";
7368 case STOREIND_RMW_UNSUPPORTED_ADDR:
7369 return "address mode is not supported";
7370 case STOREIND_RMW_UNSUPPORTED_OPER:
7371 return "oper is not supported";
7372 case STOREIND_RMW_UNSUPPORTED_TYPE:
7373 return "type is not supported";
7374 case STOREIND_RMW_INDIR_UNEQUAL:
7375 return "read indir is not equivalent to write indir";
7382 // StoreInd is just a BinOp, with additional RMW status
7383 struct GenTreeStoreInd : public GenTreeIndir
7385 #if !CPU_LOAD_STORE_ARCH
7386 // The below flag is set and used during lowering
7387 RMWStatus gtRMWStatus;
7389 bool IsRMWStatusUnknown()
7391 return gtRMWStatus == STOREIND_RMW_STATUS_UNKNOWN;
7393 bool IsNonRMWMemoryOp()
7395 return gtRMWStatus == STOREIND_RMW_UNSUPPORTED_ADDR || gtRMWStatus == STOREIND_RMW_UNSUPPORTED_OPER ||
7396 gtRMWStatus == STOREIND_RMW_UNSUPPORTED_TYPE || gtRMWStatus == STOREIND_RMW_INDIR_UNEQUAL;
7398 bool IsRMWMemoryOp()
7400 return gtRMWStatus == STOREIND_RMW_DST_IS_OP1 || gtRMWStatus == STOREIND_RMW_DST_IS_OP2;
7404 return gtRMWStatus == STOREIND_RMW_DST_IS_OP1;
7408 return gtRMWStatus == STOREIND_RMW_DST_IS_OP2;
7410 #endif //! CPU_LOAD_STORE_ARCH
7412 RMWStatus GetRMWStatus()
7414 #if !CPU_LOAD_STORE_ARCH
7417 return STOREIND_RMW_STATUS_UNKNOWN;
7421 void SetRMWStatusDefault()
7423 #if !CPU_LOAD_STORE_ARCH
7424 gtRMWStatus = STOREIND_RMW_STATUS_UNKNOWN;
7428 void SetRMWStatus(RMWStatus status)
7430 #if !CPU_LOAD_STORE_ARCH
7431 gtRMWStatus = status;
7440 GenTreeStoreInd(var_types type, GenTree* destPtr, GenTree* data) : GenTreeIndir(GT_STOREIND, type, destPtr, data)
7442 SetRMWStatusDefault();
7445 #if DEBUGGABLE_GENTREE
7448 // Used only for GenTree::GetVtableForOper()
7449 GenTreeStoreInd() : GenTreeIndir()
7451 SetRMWStatusDefault();
7456 /* gtRetExp -- Place holder for the return expression from an inline candidate (GT_RET_EXPR) */
7458 struct GenTreeRetExpr : public GenTree
7460 GenTreeCall* gtInlineCandidate;
7462 // Expression representing gtInlineCandidate's value (e.g. spill temp or
7463 // expression from inlinee, or call itself for unsuccessful inlines).
7464 // Substituted by UpdateInlineReturnExpressionPlaceHolder.
7465 // This tree is nullptr during the import that created the GenTreeRetExpr and is set later
7466 // when handling the actual inline candidate.
7467 GenTree* gtSubstExpr;
7469 // The basic block that gtSubstExpr comes from, to enable propagating mandatory flags.
7470 // nullptr for cases where gtSubstExpr is not a tree from the inlinee.
7471 BasicBlock* gtSubstBB;
7473 GenTreeRetExpr(var_types type) : GenTree(GT_RET_EXPR, type)
7476 #if DEBUGGABLE_GENTREE
7477 GenTreeRetExpr() : GenTree()
7483 // In LIR there are no longer statements so debug information is inserted linearly using these nodes.
7484 struct GenTreeILOffset : public GenTree
7486 DebugInfo gtStmtDI; // debug info
7488 IL_OFFSET gtStmtLastILoffs; // instr offset at end of stmt
7491 GenTreeILOffset(const DebugInfo& di DEBUGARG(IL_OFFSET lastOffset = BAD_IL_OFFSET))
7492 : GenTree(GT_IL_OFFSET, TYP_VOID)
7495 , gtStmtLastILoffs(lastOffset)
7500 #if DEBUGGABLE_GENTREE
7501 GenTreeILOffset() : GenTree(GT_IL_OFFSET, TYP_VOID)
7507 // GenTreeList: adapter class for forward iteration of the execution order GenTree linked list
7508 // using range-based `for`, normally used via Statement::TreeList(), e.g.:
7509 // for (GenTree* const tree : stmt->TreeList()) ...
7516 // Forward iterator for the execution order GenTree linked list (using `gtNext` pointer).
7523 explicit iterator(GenTree* tree) : m_tree(tree)
7527 GenTree* operator*() const
7532 iterator& operator++()
7534 m_tree = m_tree->gtNext;
7538 bool operator!=(const iterator& i) const
7540 return m_tree != i.m_tree;
7544 explicit GenTreeList(GenTree* trees) : m_trees(trees)
7548 iterator begin() const
7550 return iterator(m_trees);
7553 iterator end() const
7555 return iterator(nullptr);
7559 class LocalsGenTreeList
7566 GenTreeLclVarCommon* m_tree;
7569 explicit iterator(GenTreeLclVarCommon* tree) : m_tree(tree)
7573 GenTreeLclVarCommon* operator*() const
7578 iterator& operator++()
7580 assert((m_tree->gtNext == nullptr) || m_tree->gtNext->OperIsLocal() || m_tree->gtNext->OperIs(GT_LCL_ADDR));
7581 m_tree = static_cast<GenTreeLclVarCommon*>(m_tree->gtNext);
7585 iterator& operator--()
7587 assert((m_tree->gtPrev == nullptr) || m_tree->gtPrev->OperIsLocal() || m_tree->gtPrev->OperIs(GT_LCL_ADDR));
7588 m_tree = static_cast<GenTreeLclVarCommon*>(m_tree->gtPrev);
7592 bool operator!=(const iterator& i) const
7594 return m_tree != i.m_tree;
7598 explicit LocalsGenTreeList(Statement* stmt) : m_stmt(stmt)
7602 iterator begin() const;
7604 iterator end() const
7606 return iterator(nullptr);
7609 void Remove(GenTreeLclVarCommon* node);
7610 void Replace(GenTreeLclVarCommon* firstNode,
7611 GenTreeLclVarCommon* lastNode,
7612 GenTreeLclVarCommon* newFirstNode,
7613 GenTreeLclVarCommon* newLastNode);
7616 GenTree** GetForwardEdge(GenTreeLclVarCommon* node);
7617 GenTree** GetBackwardEdge(GenTreeLclVarCommon* node);
7620 // We use the following format when printing the Statement number: Statement->GetID()
7621 // This define is used with string concatenation to put this in printf format strings (Note that %u means unsigned int)
7622 #define FMT_STMT "STMT%05u"
7627 Statement(GenTree* expr DEBUGARG(unsigned stmtID))
7629 , m_treeList(nullptr)
7630 , m_treeListEnd(nullptr)
7634 , m_lastILOffset(BAD_IL_OFFSET)
7640 GenTree* GetRootNode() const
7645 GenTree** GetRootNodePointer()
7650 void SetRootNode(GenTree* treeRoot)
7652 m_rootNode = treeRoot;
7655 GenTree* GetTreeList() const
7660 GenTree** GetTreeListPointer()
7665 void SetTreeList(GenTree* treeHead)
7667 m_treeList = treeHead;
7670 GenTree* GetTreeListEnd() const
7672 return m_treeListEnd;
7675 GenTree** GetTreeListEndPointer()
7677 return &m_treeListEnd;
7680 void SetTreeListEnd(GenTree* end)
7682 m_treeListEnd = end;
7685 GenTreeList TreeList() const;
7686 LocalsGenTreeList LocalsTreeList();
7688 const DebugInfo& GetDebugInfo() const
7693 void SetDebugInfo(const DebugInfo& di)
7701 IL_OFFSET GetLastILOffset() const
7703 return m_lastILOffset;
7706 void SetLastILOffset(IL_OFFSET lastILOffset)
7708 m_lastILOffset = lastILOffset;
7711 unsigned GetID() const
7717 Statement* GetNextStmt() const
7722 void SetNextStmt(Statement* nextStmt)
7727 Statement* GetPrevStmt() const
7732 void SetPrevStmt(Statement* prevStmt)
7737 bool IsPhiDefnStmt() const
7739 return m_rootNode->IsPhiDefn();
7742 unsigned char GetCostSz() const
7744 return m_rootNode->GetCostSz();
7747 unsigned char GetCostEx() const
7749 return m_rootNode->GetCostEx();
7753 // The root of the expression tree.
7754 // Note: It will be the last node in evaluation order.
7755 GenTree* m_rootNode;
7757 // The tree list head (for forward walks in evaluation order).
7758 // The value is `nullptr` until we have set the sequencing of the nodes.
7759 GenTree* m_treeList;
7761 // The tree list tail. Only valid when locals are linked (fgNodeThreading
7762 // == AllLocals), in which case this is the last local.
7763 // When all nodes are linked (fgNodeThreading == AllTrees), m_rootNode
7764 // should be considered the last node.
7765 GenTree* m_treeListEnd;
7767 // The statement nodes are doubly-linked. The first statement node in a block points
7768 // to the last node in the block via its `m_prev` link. Note that the last statement node
7769 // does not point to the first: it has `m_next == nullptr`; that is, the list is not fully circular.
7773 DebugInfo m_debugInfo;
7776 IL_OFFSET m_lastILOffset; // The instr offset at the end of this statement.
7781 // StatementList: adapter class for forward iteration of the statement linked list using range-based `for`,
7782 // normally used via BasicBlock::Statements(), e.g.:
7783 // for (Statement* const stmt : block->Statements()) ...
7785 // for (Statement* const stmt : block->NonPhiStatements()) ...
7791 // Forward iterator for the statement linked list.
7798 iterator(Statement* stmt) : m_stmt(stmt)
7802 Statement* operator*() const
7807 iterator& operator++()
7809 m_stmt = m_stmt->GetNextStmt();
7813 bool operator!=(const iterator& i) const
7815 return m_stmt != i.m_stmt;
7820 StatementList(Statement* stmts) : m_stmts(stmts)
7824 iterator begin() const
7826 return iterator(m_stmts);
7829 iterator end() const
7831 return iterator(nullptr);
7835 /* NOTE: Any tree nodes that are larger than 8 bytes (two ints or
7836 pointers) must be flagged as 'large' in GenTree::InitNodeSize().
7839 // GenTreeClsVar: data address node (GT_CLS_VAR_ADDR).
7841 struct GenTreeClsVar : public GenTree
7843 CORINFO_FIELD_HANDLE gtClsVarHnd;
7845 GenTreeClsVar(var_types type, CORINFO_FIELD_HANDLE clsVarHnd)
7846 : GenTree(GT_CLS_VAR_ADDR, type), gtClsVarHnd(clsVarHnd)
7850 #if DEBUGGABLE_GENTREE
7851 GenTreeClsVar() : GenTree()
7857 /* gtPhiArg -- phi node rhs argument, var = phi(phiarg, phiarg, phiarg...); GT_PHI_ARG */
7858 struct GenTreePhiArg : public GenTreeLclVarCommon
7860 BasicBlock* gtPredBB;
7862 GenTreePhiArg(var_types type, unsigned lclNum, unsigned ssaNum, BasicBlock* block)
7863 : GenTreeLclVarCommon(GT_PHI_ARG, type, lclNum), gtPredBB(block)
7868 #if DEBUGGABLE_GENTREE
7869 GenTreePhiArg() : GenTreeLclVarCommon()
7875 /* gtPutArgStk -- Argument passed on stack (GT_PUTARG_STK) */
7877 struct GenTreePutArgStk : public GenTreeUnOp
7880 unsigned m_byteOffset;
7881 #ifdef FEATURE_PUT_STRUCT_ARG_STK
7882 unsigned m_byteSize; // The number of bytes that this argument is occupying on the stack with padding.
7886 #if defined(UNIX_X86_ABI)
7887 unsigned gtPadAlign; // Number of padding slots for stack alignment
7889 #if defined(DEBUG) || defined(UNIX_X86_ABI)
7890 GenTreeCall* gtCall; // the call node to which this argument belongs
7893 #if FEATURE_FASTTAILCALL
7895 bool gtPutInIncomingArgArea; // Whether this arg needs to be placed in incoming arg area.
7896 // By default this is false and will be placed in out-going arg area.
7897 // Fast tail calls set this to true.
7898 // In future if we need to add more such bool fields consider bit fields.
7901 #ifdef FEATURE_PUT_STRUCT_ARG_STK
7902 // Instruction selection: during codegen time, what code sequence we will be using
7903 // to encode this operation.
7904 // TODO-Throughput: The following information should be obtained from the child
7907 enum class Kind : int8_t{
7908 Invalid, RepInstr, PartialRepInstr, Unroll, Push,
7910 Kind gtPutArgStkKind;
7914 uint8_t m_argLoadSizeDelta;
7915 #endif // TARGET_XARCH
7916 #endif // FEATURE_PUT_STRUCT_ARG_STK
7919 GenTreePutArgStk(genTreeOps oper,
7922 unsigned stackByteOffset,
7923 #if defined(FEATURE_PUT_STRUCT_ARG_STK)
7924 unsigned stackByteSize,
7926 GenTreeCall* callNode,
7927 bool putInIncomingArgArea)
7928 : GenTreeUnOp(oper, type, op1 DEBUGARG(/*largeNode*/ false))
7929 , m_byteOffset(stackByteOffset)
7930 #if defined(FEATURE_PUT_STRUCT_ARG_STK)
7931 , m_byteSize(stackByteSize)
7933 #if defined(UNIX_X86_ABI)
7936 #if defined(DEBUG) || defined(UNIX_X86_ABI)
7939 #if FEATURE_FASTTAILCALL
7940 , gtPutInIncomingArgArea(putInIncomingArgArea)
7941 #endif // FEATURE_FASTTAILCALL
7942 #if defined(FEATURE_PUT_STRUCT_ARG_STK)
7943 , gtPutArgStkKind(Kind::Invalid)
7944 #if defined(TARGET_XARCH)
7945 , m_argLoadSizeDelta(UINT8_MAX)
7947 #endif // FEATURE_PUT_STRUCT_ARG_STK
7956 #if FEATURE_FASTTAILCALL
7957 bool putInIncomingArgArea() const
7959 return gtPutInIncomingArgArea;
7962 #else // !FEATURE_FASTTAILCALL
7964 bool putInIncomingArgArea() const
7969 #endif // !FEATURE_FASTTAILCALL
7971 unsigned getArgOffset() const
7973 return m_byteOffset;
7976 #if defined(UNIX_X86_ABI)
7977 unsigned getArgPadding() const
7982 void setArgPadding(unsigned padAlign)
7984 gtPadAlign = padAlign;
7988 #ifdef FEATURE_PUT_STRUCT_ARG_STK
7989 unsigned GetStackByteSize() const
7995 //------------------------------------------------------------------------
7996 // SetArgLoadSize: Set the optimal number of bytes to load for this argument.
7998 // On XARCH, it is profitable to use wider loads when our source is a local
7999 // variable. To not duplicate the logic between lowering, LSRA and codegen,
8000 // we do the legality check once, in lowering, and save the result here, as
8001 // a negative delta relative to the size of the argument with padding.
8004 // loadSize - The optimal number of bytes to load
8006 void SetArgLoadSize(unsigned loadSize)
8008 unsigned argSize = GetStackByteSize();
8009 assert(roundUp(loadSize, TARGET_POINTER_SIZE) == argSize);
8011 m_argLoadSizeDelta = static_cast<uint8_t>(argSize - loadSize);
8014 //------------------------------------------------------------------------
8015 // GetArgLoadSize: Get the optimal number of bytes to load for this argument.
8017 unsigned GetArgLoadSize() const
8019 assert(m_argLoadSizeDelta != UINT8_MAX);
8020 return GetStackByteSize() - m_argLoadSizeDelta;
8022 #endif // TARGET_XARCH
8024 // Return true if this is a PutArgStk of a SIMD12 struct.
8025 // This is needed because such values are re-typed to SIMD16, and the type of PutArgStk is VOID.
8026 unsigned isSIMD12() const
8028 return (varTypeIsSIMD(gtOp1) && (GetStackByteSize() == 12));
8031 bool isPushKind() const
8033 return gtPutArgStkKind == Kind::Push;
8035 #else // !FEATURE_PUT_STRUCT_ARG_STK
8036 unsigned GetStackByteSize() const;
8037 #endif // !FEATURE_PUT_STRUCT_ARG_STK
8039 #if DEBUGGABLE_GENTREE
8040 GenTreePutArgStk() : GenTreeUnOp()
8046 #if FEATURE_ARG_SPLIT
8047 // Represent the struct argument: split value in register(s) and stack
8048 struct GenTreePutArgSplit : public GenTreePutArgStk
8052 GenTreePutArgSplit(GenTree* op1,
8053 unsigned stackByteOffset,
8054 #if defined(FEATURE_PUT_STRUCT_ARG_STK)
8055 unsigned stackByteSize,
8058 GenTreeCall* callNode,
8059 bool putIncomingArgArea)
8060 : GenTreePutArgStk(GT_PUTARG_SPLIT,
8064 #if defined(FEATURE_PUT_STRUCT_ARG_STK)
8069 , gtNumRegs(numRegs)
8072 ClearOtherRegFlags();
8075 // Type required to support multi-reg struct arg.
8076 var_types m_regType[MAX_REG_ARG];
8078 // First reg of struct is always given by GetRegNum().
8079 // gtOtherRegs holds the other reg numbers of struct.
8080 regNumberSmall gtOtherRegs[MAX_REG_ARG - 1];
8082 MultiRegSpillFlags gtSpillFlags;
8084 //---------------------------------------------------------------------------
8085 // GetRegNumByIdx: get i'th register allocated to this struct argument.
8088 // idx - index of the struct
8091 // Return regNumber of i'th register of this struct argument
8093 regNumber GetRegNumByIdx(unsigned idx) const
8095 assert(idx < MAX_REG_ARG);
8102 return (regNumber)gtOtherRegs[idx - 1];
8105 //----------------------------------------------------------------------
8106 // SetRegNumByIdx: set i'th register of this struct argument
8110 // idx - index of the struct
8115 void SetRegNumByIdx(regNumber reg, unsigned idx)
8117 assert(idx < MAX_REG_ARG);
8124 gtOtherRegs[idx - 1] = (regNumberSmall)reg;
8125 assert(gtOtherRegs[idx - 1] == reg);
8129 //----------------------------------------------------------------------------
8130 // ClearOtherRegs: clear multi-reg state to indicate no regs are allocated
8138 void ClearOtherRegs()
8140 for (unsigned i = 0; i < MAX_REG_ARG - 1; ++i)
8142 gtOtherRegs[i] = REG_NA;
8146 GenTreeFlags GetRegSpillFlagByIdx(unsigned idx) const
8148 return GetMultiRegSpillFlagsByIdx(gtSpillFlags, idx);
8151 void SetRegSpillFlagByIdx(GenTreeFlags flags, unsigned idx)
8153 #if FEATURE_MULTIREG_RET
8154 gtSpillFlags = SetMultiRegSpillFlagsByIdx(gtSpillFlags, flags, idx);
8158 //--------------------------------------------------------------------------
8159 // GetRegType: Get var_type of the register specified by index.
8162 // index - Index of the register.
8163 // First register will have an index 0 and so on.
8166 // var_type of the register specified by its index.
8168 var_types GetRegType(unsigned index) const
8170 assert(index < gtNumRegs);
8171 var_types result = m_regType[index];
8175 //-------------------------------------------------------------------
8176 // clearOtherRegFlags: clear GTF_* flags associated with gtOtherRegs
8184 void ClearOtherRegFlags()
8189 #if DEBUGGABLE_GENTREE
8190 GenTreePutArgSplit() : GenTreePutArgStk()
8195 #endif // FEATURE_ARG_SPLIT
8197 // Represents GT_COPY or GT_RELOAD node
8199 // Needed to support multi-reg ops.
8201 struct GenTreeCopyOrReload : public GenTreeUnOp
8203 // State required to support copy/reload of a multi-reg call node.
8204 // The first register is always given by GetRegNum().
8206 regNumberSmall gtOtherRegs[MAX_MULTIREG_COUNT - 1];
8208 //----------------------------------------------------------
8209 // ClearOtherRegs: set gtOtherRegs to REG_NA.
8217 void ClearOtherRegs()
8219 for (unsigned i = 0; i < MAX_MULTIREG_COUNT - 1; ++i)
8221 gtOtherRegs[i] = REG_NA;
8225 //-----------------------------------------------------------
8226 // GetRegNumByIdx: Get regNumber of i'th position.
8229 // idx - register position.
8232 // Returns regNumber assigned to i'th position.
8234 regNumber GetRegNumByIdx(unsigned idx) const
8236 assert(idx < MAX_MULTIREG_COUNT);
8243 return (regNumber)gtOtherRegs[idx - 1];
8246 //-----------------------------------------------------------
8247 // SetRegNumByIdx: Set the regNumber for i'th position.
8251 // idx - register position.
8256 void SetRegNumByIdx(regNumber reg, unsigned idx)
8258 assert(idx < MAX_MULTIREG_COUNT);
8266 gtOtherRegs[idx - 1] = (regNumberSmall)reg;
8267 assert(gtOtherRegs[idx - 1] == reg);
8271 //----------------------------------------------------------------------------
8272 // CopyOtherRegs: copy multi-reg state from the given copy/reload node to this
8276 // from - GenTree node from which to copy multi-reg state
8281 // TODO-ARM: Implement this routine for Arm64 and Arm32
8282 // TODO-X86: Implement this routine for x86
8283 void CopyOtherRegs(GenTreeCopyOrReload* from)
8285 assert(OperGet() == from->OperGet());
8287 #ifdef UNIX_AMD64_ABI
8288 for (unsigned i = 0; i < MAX_MULTIREG_COUNT - 1; ++i)
8290 gtOtherRegs[i] = from->gtOtherRegs[i];
8295 unsigned GetRegCount() const
8297 // We need to return the highest index for which we have a valid register.
8298 // Note that the gtOtherRegs array is off by one (the 0th register is GetRegNum()).
8299 // If there's no valid register in gtOtherRegs, GetRegNum() must be valid.
8300 // Note that for most nodes, the set of valid registers must be contiguous,
8301 // but for COPY or RELOAD there is only a valid register for the register positions
8302 // that must be copied or reloaded.
8304 for (unsigned i = MAX_MULTIREG_COUNT; i > 1; i--)
8306 if (gtOtherRegs[i - 2] != REG_NA)
8312 // We should never have a COPY or RELOAD with no valid registers.
8313 assert(GetRegNum() != REG_NA);
8317 GenTreeCopyOrReload(genTreeOps oper, var_types type, GenTree* op1) : GenTreeUnOp(oper, type, op1)
8319 assert(type != TYP_STRUCT || op1->IsMultiRegNode());
8324 #if DEBUGGABLE_GENTREE
8325 GenTreeCopyOrReload() : GenTreeUnOp()
8331 // Represents GT_ALLOCOBJ node
8333 struct GenTreeAllocObj final : public GenTreeUnOp
8335 unsigned int gtNewHelper; // Value returned by ICorJitInfo::getNewHelper
8336 bool gtHelperHasSideEffects;
8337 CORINFO_CLASS_HANDLE gtAllocObjClsHnd;
8338 #ifdef FEATURE_READYTORUN
8339 CORINFO_CONST_LOOKUP gtEntryPoint;
8343 var_types type, unsigned int helper, bool helperHasSideEffects, CORINFO_CLASS_HANDLE clsHnd, GenTree* op)
8344 : GenTreeUnOp(GT_ALLOCOBJ, type, op DEBUGARG(/*largeNode*/ TRUE))
8345 , // This node in most cases will be changed to a call node
8347 , gtHelperHasSideEffects(helperHasSideEffects)
8348 , gtAllocObjClsHnd(clsHnd)
8350 #ifdef FEATURE_READYTORUN
8351 gtEntryPoint.addr = nullptr;
8354 #if DEBUGGABLE_GENTREE
8355 GenTreeAllocObj() : GenTreeUnOp()
8361 // Represents GT_RUNTIMELOOKUP node
8363 struct GenTreeRuntimeLookup final : public GenTreeUnOp
8365 CORINFO_GENERIC_HANDLE gtHnd;
8366 CorInfoGenericHandleType gtHndType;
8368 GenTreeRuntimeLookup(CORINFO_GENERIC_HANDLE hnd, CorInfoGenericHandleType hndTyp, GenTree* tree)
8369 : GenTreeUnOp(GT_RUNTIMELOOKUP, tree->gtType, tree DEBUGARG(/*largeNode*/ FALSE)), gtHnd(hnd), gtHndType(hndTyp)
8371 assert(hnd != nullptr);
8373 #if DEBUGGABLE_GENTREE
8374 GenTreeRuntimeLookup() : GenTreeUnOp()
8379 // Return reference to the actual tree that does the lookup
8385 bool IsClassHandle() const
8387 return gtHndType == CORINFO_HANDLETYPE_CLASS;
8389 bool IsMethodHandle() const
8391 return gtHndType == CORINFO_HANDLETYPE_METHOD;
8393 bool IsFieldHandle() const
8395 return gtHndType == CORINFO_HANDLETYPE_FIELD;
8398 // Note these operations describe the handle that is input to the
8399 // lookup, not the handle produced by the lookup.
8400 CORINFO_CLASS_HANDLE GetClassHandle() const
8402 assert(IsClassHandle());
8403 return (CORINFO_CLASS_HANDLE)gtHnd;
8405 CORINFO_METHOD_HANDLE GetMethodHandle() const
8407 assert(IsMethodHandle());
8408 return (CORINFO_METHOD_HANDLE)gtHnd;
8410 CORINFO_FIELD_HANDLE GetFieldHandle() const
8412 assert(IsMethodHandle());
8413 return (CORINFO_FIELD_HANDLE)gtHnd;
8417 // Represents the condition of a GT_JCC or GT_SETCC node.
8422 enum Code : unsigned char
8426 Unordered = Unsigned,
8429 // 0 would be the encoding of "signed EQ" but since equality is sign insensitive
8430 // we'll use 0 as invalid/uninitialized condition code. This will also leave 1
8441 EQ = Unsigned | 0, // = 8
8442 NE = Unsigned | 1, // = 9
8443 ULT = Unsigned | SLT, // = 10
8444 ULE = Unsigned | SLE, // = 11
8445 UGE = Unsigned | SGE, // = 12
8446 UGT = Unsigned | SGT, // = 13
8447 C = Unsigned | S, // = 14
8448 NC = Unsigned | NS, // = 15
8450 FEQ = Float | 0, // = 16
8451 FNE = Float | 1, // = 17
8452 FLT = Float | SLT, // = 18
8453 FLE = Float | SLE, // = 19
8454 FGE = Float | SGE, // = 20
8455 FGT = Float | SGT, // = 21
8456 O = Float | S, // = 22
8457 NO = Float | NS, // = 23
8459 FEQU = Unordered | FEQ, // = 24
8460 FNEU = Unordered | FNE, // = 25
8461 FLTU = Unordered | FLT, // = 26
8462 FLEU = Unordered | FLE, // = 27
8463 FGEU = Unordered | FGE, // = 28
8464 FGTU = Unordered | FGT, // = 29
8465 P = Unordered | O, // = 30
8466 NP = Unordered | NO, // = 31
8474 Code GetCode() const
8481 return (m_code & OperMask) >= S;
8484 bool IsUnsigned() const
8486 return (ULT <= m_code) && (m_code <= UGT);
8489 bool IsFloat() const
8491 return !IsFlag() && (m_code & Float) != 0;
8494 bool IsUnordered() const
8496 return !IsFlag() && (m_code & (Float | Unordered)) == (Float | Unordered);
8499 bool Is(Code cond) const
8501 return m_code == cond;
8504 template <typename... TRest>
8505 bool Is(Code c, TRest... rest) const
8507 return Is(c) || Is(rest...);
8510 // Indicate whether the condition should be swapped in order to avoid generating
8511 // multiple branches. This happens for certain floating point conditions on XARCH,
8512 // see GenConditionDesc and its associated mapping table for more details.
8513 bool PreferSwap() const
8516 return Is(GenCondition::FLT, GenCondition::FLE, GenCondition::FGTU, GenCondition::FGEU);
8522 const char* Name() const
8525 static const char* names[]
8527 "NONE", "???", "SLT", "SLE", "SGE", "SGT", "S", "NS",
8528 "UEQ", "UNE", "ULT", "ULE", "UGE", "UGT", "C", "NC",
8529 "FEQ", "FNE", "FLT", "FLE", "FGE", "FGT", "O", "NO",
8530 "FEQU", "FNEU", "FLTU", "FLEU", "FGEU", "FGTU", "P", "NP"
8534 assert(m_code < ArrLen(names));
8535 return names[m_code];
8538 GenCondition() : m_code()
8542 GenCondition(Code cond) : m_code(cond)
8546 static_assert((GT_NE - GT_EQ) == (NE & ~Unsigned), "bad relop");
8547 static_assert((GT_LT - GT_EQ) == SLT, "bad relop");
8548 static_assert((GT_LE - GT_EQ) == SLE, "bad relop");
8549 static_assert((GT_GE - GT_EQ) == SGE, "bad relop");
8550 static_assert((GT_GT - GT_EQ) == SGT, "bad relop");
8551 static_assert((GT_TEST_NE - GT_TEST_EQ) == (NE & ~Unsigned), "bad relop");
8553 static GenCondition FromRelop(GenTree* relop)
8555 assert(relop->OperIsCompare());
8557 if (varTypeIsFloating(relop->gtGetOp1()))
8559 return FromFloatRelop(relop);
8563 return FromIntegralRelop(relop);
8567 static GenCondition FromFloatRelop(GenTree* relop)
8569 assert(varTypeIsFloating(relop->gtGetOp1()) && varTypeIsFloating(relop->gtGetOp2()));
8571 return FromFloatRelop(relop->OperGet(), (relop->gtFlags & GTF_RELOP_NAN_UN) != 0);
8574 static GenCondition FromFloatRelop(genTreeOps oper, bool isUnordered)
8576 assert(GenTree::OperIsCompare(oper));
8578 unsigned code = oper - GT_EQ;
8579 assert(code <= SGT);
8587 return GenCondition(static_cast<Code>(code));
8590 static GenCondition FromIntegralRelop(GenTree* relop)
8592 assert(!varTypeIsFloating(relop->gtGetOp1()) && !varTypeIsFloating(relop->gtGetOp2()));
8593 return FromIntegralRelop(relop->OperGet(), relop->IsUnsigned());
8596 static GenCondition FromIntegralRelop(genTreeOps oper, bool isUnsigned)
8598 assert(GenTree::OperIsCompare(oper));
8599 static constexpr unsigned s_codes[] = {EQ, NE, SLT, SLE, SGE, SGT, EQ, NE, NC, C};
8601 static_assert_no_msg(s_codes[GT_EQ - GT_EQ] == EQ);
8602 static_assert_no_msg(s_codes[GT_NE - GT_EQ] == NE);
8603 static_assert_no_msg(s_codes[GT_LT - GT_EQ] == SLT);
8604 static_assert_no_msg(s_codes[GT_LE - GT_EQ] == SLE);
8605 static_assert_no_msg(s_codes[GT_GE - GT_EQ] == SGE);
8606 static_assert_no_msg(s_codes[GT_GT - GT_EQ] == SGT);
8607 static_assert_no_msg(s_codes[GT_TEST_EQ - GT_EQ] == EQ);
8608 static_assert_no_msg(s_codes[GT_TEST_NE - GT_EQ] == NE);
8610 // Generated via bt instruction that sets C flag to the specified bit.
8611 static_assert_no_msg(s_codes[GT_BITTEST_EQ - GT_EQ] == NC);
8612 static_assert_no_msg(s_codes[GT_BITTEST_NE - GT_EQ] == C);
8615 unsigned code = s_codes[oper - GT_EQ];
8617 if (isUnsigned || (code <= 1)) // EQ/NE are treated as unsigned
8622 return GenCondition(static_cast<Code>(code));
8625 static GenCondition Reverse(GenCondition condition)
8628 static const Code reverse[]
8630 // EQ NE LT LE GE GT F NF
8631 NONE, NONE, SGE, SGT, SLT, SLE, NS, S,
8632 NE, EQ, UGE, UGT, ULT, ULE, NC, C,
8633 FNEU, FEQU, FGEU, FGTU, FLTU, FLEU, NO, O,
8634 FNE, FEQ, FGE, FGT, FLT, FGT, NP, P
8638 assert(condition.m_code < ArrLen(reverse));
8639 return GenCondition(reverse[condition.m_code]);
8642 static GenCondition Swap(GenCondition condition)
8645 static const Code swap[]
8647 // EQ NE LT LE GE GT F NF
8648 NONE, NONE, SGT, SGE, SLE, SLT, S, NS,
8649 EQ, NE, UGT, UGE, ULE, ULT, C, NC,
8650 FEQ, FNE, FGT, FGE, FLE, FLT, O, NO,
8651 FEQU, FNEU, FGTU, FGEU, FLEU, FLTU, P, NP
8655 assert(condition.m_code < ArrLen(swap));
8656 return GenCondition(swap[condition.m_code]);
8660 // Represents a GT_JCC or GT_SETCC node.
8661 struct GenTreeCC final : public GenTree
8663 GenCondition gtCondition;
8665 GenTreeCC(genTreeOps oper, var_types type, GenCondition condition)
8666 : GenTree(oper, type DEBUGARG(/*largeNode*/ FALSE)), gtCondition(condition)
8668 assert(OperIs(GT_JCC, GT_SETCC));
8671 #if DEBUGGABLE_GENTREE
8672 GenTreeCC() : GenTree()
8675 #endif // DEBUGGABLE_GENTREE
8678 // Represents a node with two operands and a condition.
8679 struct GenTreeOpCC : public GenTreeOp
8681 GenCondition gtCondition;
8683 GenTreeOpCC(genTreeOps oper, var_types type, GenCondition condition, GenTree* op1 = nullptr, GenTree* op2 = nullptr)
8684 : GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ FALSE)), gtCondition(condition)
8687 assert(OperIs(GT_SELECTCC, GT_SELECT_INCCC, GT_SELECT_INVCC, GT_SELECT_NEGCC));
8689 assert(OperIs(GT_SELECTCC));
8693 #if DEBUGGABLE_GENTREE
8694 GenTreeOpCC() : GenTreeOp()
8697 #endif // DEBUGGABLE_GENTREE
8701 enum insCflags : unsigned
8724 struct GenTreeCCMP final : public GenTreeOpCC
8726 insCflags gtFlagsVal;
8728 GenTreeCCMP(var_types type, GenCondition condition, GenTree* op1, GenTree* op2, insCflags flagsVal)
8729 : GenTreeOpCC(GT_CCMP, type, condition, op1, op2), gtFlagsVal(flagsVal)
8733 #if DEBUGGABLE_GENTREE
8734 GenTreeCCMP() : GenTreeOpCC()
8737 #endif // DEBUGGABLE_GENTREE
8741 //------------------------------------------------------------------------
8742 // Deferred inline functions of GenTree -- these need the subtypes above to
8743 // be defined already.
8744 //------------------------------------------------------------------------
8746 inline bool GenTree::OperIsBlkOp()
8748 if (OperIs(GT_STORE_DYN_BLK))
8754 return varTypeIsStruct(this);
8760 inline bool GenTree::OperIsInitBlkOp()
8766 GenTree* src = Data();
8767 bool isInitBlk = src->TypeIs(TYP_INT);
8768 assert(isInitBlk == src->gtSkipReloadOrCopy()->IsInitVal());
8773 inline bool GenTree::OperIsCopyBlkOp()
8775 return OperIsBlkOp() && !OperIsInitBlkOp();
8778 //------------------------------------------------------------------------
8779 // IsIntegralConst: Checks whether this is a constant node with the given value
8782 // constVal - the value of interest
8785 // Returns true iff the tree is an integral constant opcode, with
8789 // Like gtIconVal, the argument is of ssize_t, so cannot check for
8790 // long constants in a target-independent way.
8792 inline bool GenTree::IsIntegralConst(ssize_t constVal) const
8795 if ((gtOper == GT_CNS_INT) && (AsIntConCommon()->IconValue() == constVal))
8800 if ((gtOper == GT_CNS_LNG) && (AsIntConCommon()->LngValue() == constVal))
8808 //-------------------------------------------------------------------
8809 // IsFloatAllBitsSet: returns true if this is exactly a const float value representing AllBitsSet.
8812 // True if this represents a const floating-point value representing AllBitsSet.
8813 // Will return false otherwise.
8815 inline bool GenTree::IsFloatAllBitsSet() const
8817 if (IsCnsFltOrDbl())
8819 double constValue = AsDblCon()->DconValue();
8821 if (TypeIs(TYP_FLOAT))
8823 return FloatingPointUtils::isAllBitsSet(static_cast<float>(constValue));
8827 assert(TypeIs(TYP_DOUBLE));
8828 return FloatingPointUtils::isAllBitsSet(constValue);
8835 //-------------------------------------------------------------------
8836 // IsFloatNaN: returns true if this is exactly a const float value of NaN
8839 // True if this represents a const floating-point value of NaN.
8840 // Will return false otherwise.
8842 inline bool GenTree::IsFloatNaN() const
8844 if (IsCnsFltOrDbl())
8846 double constValue = AsDblCon()->DconValue();
8847 return FloatingPointUtils::isNaN(constValue);
8853 //-------------------------------------------------------------------
8854 // IsFloatNegativeZero: returns true if this is exactly a const float value of negative zero (-0.0)
8857 // True if this represents a const floating-point value of exactly negative zero (-0.0).
8858 // Will return false if the value is negative zero (+0.0).
8860 inline bool GenTree::IsFloatNegativeZero() const
8862 if (IsCnsFltOrDbl())
8864 double constValue = AsDblCon()->DconValue();
8865 return FloatingPointUtils::isNegativeZero(constValue);
8871 //-------------------------------------------------------------------
8872 // IsFloatPositiveZero: returns true if this is exactly a const float value of positive zero (+0.0)
8875 // True if this represents a const floating-point value of exactly positive zero (+0.0).
8876 // Will return false if the value is negative zero (-0.0).
8878 inline bool GenTree::IsFloatPositiveZero() const
8880 if (IsCnsFltOrDbl())
8882 // This implementation is almost identical to IsCnsNonZeroFltOrDbl
8883 // but it is easier to parse out
8884 // rather than using !IsCnsNonZeroFltOrDbl.
8885 double constValue = AsDblCon()->DconValue();
8886 return FloatingPointUtils::isPositiveZero(constValue);
8892 //-------------------------------------------------------------------
8893 // IsVectorZero: returns true if this node is a vector constant with all bits zero.
8896 // True if this node is a vector constant with all bits zero
8898 inline bool GenTree::IsVectorZero() const
8900 return IsCnsVec() && AsVecCon()->IsZero();
8903 //-------------------------------------------------------------------
8904 // IsVectorCreate: returns true if this node is the creation of a vector.
8905 // Does not include "Unsafe" method calls.
8908 // True if this node is the creation of a vector
8910 inline bool GenTree::IsVectorCreate() const
8912 #ifdef FEATURE_HW_INTRINSICS
8913 if (OperIs(GT_HWINTRINSIC))
8915 switch (AsHWIntrinsic()->GetHWIntrinsicId())
8917 case NI_Vector128_Create:
8918 #if defined(TARGET_XARCH)
8919 case NI_Vector256_Create:
8920 case NI_Vector512_Create:
8921 #elif defined(TARGET_ARMARCH)
8922 case NI_Vector64_Create:
8930 #endif // FEATURE_HW_INTRINSICS
8935 //-------------------------------------------------------------------
8936 // IsVectorAllBitsSet: returns true if this node is a vector constant with all bits set.
8939 // True if this node is a vector constant with all bits set
8941 inline bool GenTree::IsVectorAllBitsSet() const
8944 if (OperIs(GT_CNS_VEC))
8946 return AsVecCon()->IsAllBitsSet();
8948 #endif // FEATURE_SIMD
8953 //-------------------------------------------------------------------
8954 // IsVectorConst: returns true if this node is a HWIntrinsic that represents a constant.
8957 // True if this represents a HWIntrinsic node that represents a constant.
8959 inline bool GenTree::IsVectorConst()
8962 if (OperIs(GT_CNS_VEC))
8966 #endif // FEATURE_SIMD
8971 //-------------------------------------------------------------------
8972 // GetIntegralVectorConstElement: Gets the value of a given element in an integral vector constant
8975 // The value of a given element in an integral vector constant
8977 inline uint64_t GenTree::GetIntegralVectorConstElement(size_t index, var_types simdBaseType)
8979 #ifdef FEATURE_HW_INTRINSICS
8982 const GenTreeVecCon* node = AsVecCon();
8984 switch (simdBaseType)
8988 return node->gtSimdVal.i8[index];
8993 return node->gtSimdVal.u8[index];
8998 return node->gtSimdVal.i16[index];
9003 return node->gtSimdVal.u16[index];
9009 return node->gtSimdVal.i32[index];
9014 return node->gtSimdVal.u32[index];
9020 return node->gtSimdVal.i64[index];
9025 return node->gtSimdVal.u64[index];
9034 #endif // FEATURE_HW_INTRINSICS
9039 inline bool GenTree::IsBoxedValue()
9041 assert(gtOper != GT_BOX || AsBox()->BoxOp() != nullptr);
9042 return (gtOper == GT_BOX) && (gtFlags & GTF_BOX_VALUE);
9046 //------------------------------------------------------------------------
9047 // IsValidCallArgument: Given an GenTree node that represents an argument
9048 // enforce (or don't enforce) the following invariant.
9051 // instance method for a GenTree node
9054 // true: the GenTree node is accepted as a valid argument
9055 // false: the GenTree node is not accepted as a valid argument
9058 // For targets that don't support arguments as a list of fields, we do not support GT_FIELD_LIST.
9060 // Currently for AMD64 UNIX we allow a limited case where a GT_FIELD_LIST is
9061 // allowed but every element must be a GT_LCL_FLD.
9063 // For the future targets that allow for Multireg args (and this includes the current ARM64 target),
9064 // or that allow for passing promoted structs, we allow a GT_FIELD_LIST of arbitrary nodes.
9065 // These would typically start out as GT_LCL_VARs or GT_LCL_FLDS or GT_INDs,
9066 // but could be changed into constants or GT_COMMA trees by the later
9067 // optimization phases.
9069 inline bool GenTree::IsValidCallArgument()
9071 if (OperIs(GT_FIELD_LIST))
9073 #if !FEATURE_MULTIREG_ARGS && !FEATURE_PUT_STRUCT_ARG_STK
9077 #else // FEATURE_MULTIREG_ARGS or FEATURE_PUT_STRUCT_ARG_STK
9079 // We allow this GT_FIELD_LIST as an argument
9082 #endif // FEATURE_MULTIREG_ARGS or FEATURE_PUT_STRUCT_ARG_STK
9084 // We don't have either kind of list, so it satisfies the invariant.
9089 inline GenTree* GenTree::gtGetOp1() const
9091 return AsOp()->gtOp1;
9095 /* static */ inline bool GenTree::RequiresNonNullOp2(genTreeOps oper)
9132 inline GenTree* GenTree::gtGetOp2() const
9134 assert(OperIsBinary());
9136 GenTree* op2 = AsOp()->gtOp2;
9138 // Only allow null op2 if the node type allows it, e.g. GT_LEA.
9139 assert((op2 != nullptr) || !RequiresNonNullOp2(gtOper));
9144 inline GenTree* GenTree::gtGetOp2IfPresent() const
9146 /* AsOp()->gtOp2 is only valid for GTK_BINOP nodes. */
9148 GenTree* op2 = OperIsBinary() ? AsOp()->gtOp2 : nullptr;
9150 // This documents the genTreeOps for which AsOp()->gtOp2 cannot be nullptr.
9151 // This helps prefix in its analysis of code which calls gtGetOp2()
9153 assert((op2 != nullptr) || !RequiresNonNullOp2(gtOper));
9158 inline GenTree*& GenTree::Data()
9160 assert(OperIsStore() || OperIs(GT_STORE_DYN_BLK));
9161 return OperIsLocalStore() ? AsLclVarCommon()->Data() : AsIndir()->Data();
9164 inline GenTree* GenTree::gtEffectiveVal(bool commaOnly /* = false */)
9166 GenTree* effectiveVal = this;
9169 if (effectiveVal->gtOper == GT_COMMA)
9171 effectiveVal = effectiveVal->AsOp()->gtGetOp2();
9173 else if (!commaOnly && (effectiveVal->gtOper == GT_NOP) && (effectiveVal->AsOp()->gtOp1 != nullptr))
9175 effectiveVal = effectiveVal->AsOp()->gtOp1;
9179 return effectiveVal;
9184 //-------------------------------------------------------------------------
9185 // gtCommaStoreVal - find value being assigned to a comma wrapped store
9188 // tree representing value being stored if this tree represents a
9189 // comma-wrapped local definition and use.
9191 // original tree, if not.
9193 inline GenTree* GenTree::gtCommaStoreVal()
9195 GenTree* result = this;
9197 if (OperIs(GT_COMMA))
9199 GenTree* commaOp1 = AsOp()->gtOp1;
9200 GenTree* commaOp2 = AsOp()->gtOp2;
9202 if (commaOp2->OperIs(GT_LCL_VAR) && commaOp1->OperIs(GT_STORE_LCL_VAR) &&
9203 (commaOp1->AsLclVar()->GetLclNum() == commaOp2->AsLclVar()->GetLclNum()))
9205 result = commaOp1->AsLclVar()->Data();
9212 inline GenTree* GenTree::gtSkipReloadOrCopy()
9214 // There can be only one reload or copy (we can't have a reload/copy of a reload/copy)
9215 if (gtOper == GT_RELOAD || gtOper == GT_COPY)
9217 assert(gtGetOp1()->OperGet() != GT_RELOAD && gtGetOp1()->OperGet() != GT_COPY);
9223 //-----------------------------------------------------------------------------------
9224 // IsMultiRegCall: whether a call node returns its value in more than one register
9230 // Returns true if this GenTree is a multi register returning call
9232 inline bool GenTree::IsMultiRegCall() const
9236 return AsCall()->HasMultiRegRetVal();
9242 //-----------------------------------------------------------------------------------
9243 // IsMultiRegLclVar: whether a local var node defines multiple registers
9249 // Returns true if this GenTree is a multi register defining local var
9251 inline bool GenTree::IsMultiRegLclVar() const
9253 if (OperIsScalarLocal())
9255 return AsLclVar()->IsMultiReg();
9260 //-----------------------------------------------------------------------------------
9261 // GetRegByIndex: Get a specific register, based on regIndex, that is produced by this node.
9264 // regIndex - which register to return (must be 0 for non-multireg nodes)
9267 // The register, if any, assigned to this index for this node.
9270 // All targets that support multi-reg ops of any kind also support multi-reg return
9271 // values for calls. Should that change with a future target, this method will need
9272 // to change accordingly.
9274 inline regNumber GenTree::GetRegByIndex(int regIndex) const
9281 #if FEATURE_MULTIREG_RET
9283 if (IsMultiRegCall())
9285 return AsCall()->GetRegNumByIdx(regIndex);
9288 #if FEATURE_ARG_SPLIT
9289 if (OperIsPutArgSplit())
9291 return AsPutArgSplit()->GetRegNumByIdx(regIndex);
9295 #if !defined(TARGET_64BIT)
9296 if (OperIsMultiRegOp())
9298 return AsMultiRegOp()->GetRegNumByIdx(regIndex);
9301 #endif // FEATURE_MULTIREG_RET
9303 if (OperIs(GT_COPY, GT_RELOAD))
9305 return AsCopyOrReload()->GetRegNumByIdx(regIndex);
9308 #ifdef FEATURE_HW_INTRINSICS
9309 if (OperIs(GT_HWINTRINSIC))
9311 assert(regIndex == 1);
9312 // TODO-ARM64-NYI: Support hardware intrinsics operating on multiple contiguous registers.
9313 return AsHWIntrinsic()->GetOtherReg();
9315 #endif // FEATURE_HW_INTRINSICS
9317 if (OperIsScalarLocal())
9319 return AsLclVar()->GetRegNumByIdx(regIndex);
9322 assert(!"Invalid regIndex for GetRegFromMultiRegNode");
9326 //-----------------------------------------------------------------------------------
9327 // GetRegTypeByIndex: Get a specific register's type, based on regIndex, that is produced
9328 // by this multi-reg node.
9331 // regIndex - index of register whose type will be returned
9334 // The register type assigned to this index for this node.
9337 // This must be a multireg node that is *not* a copy or reload (which must retrieve the
9338 // type from its source), and 'regIndex' must be a valid index for this node.
9340 // All targets that support multi-reg ops of any kind also support multi-reg return
9341 // values for calls. Should that change with a future target, this method will need
9342 // to change accordingly.
9344 inline var_types GenTree::GetRegTypeByIndex(int regIndex) const
9346 #if FEATURE_MULTIREG_RET
9347 if (IsMultiRegCall())
9349 return AsCall()->AsCall()->GetReturnTypeDesc()->GetReturnRegType(regIndex);
9352 #if FEATURE_ARG_SPLIT
9353 if (OperIsPutArgSplit())
9355 return AsPutArgSplit()->GetRegType(regIndex);
9357 #endif // FEATURE_ARG_SPLIT
9359 #if !defined(TARGET_64BIT)
9360 if (OperIsMultiRegOp())
9362 return AsMultiRegOp()->GetRegType(regIndex);
9364 #endif // !defined(TARGET_64BIT)
9365 #endif // FEATURE_MULTIREG_RET
9367 #ifdef FEATURE_HW_INTRINSICS
9368 if (OperIsHWIntrinsic())
9370 assert(TypeGet() == TYP_STRUCT);
9372 if (AsHWIntrinsic()->GetSimdSize() == 16)
9378 assert(AsHWIntrinsic()->GetSimdSize() == 8);
9381 #elif defined(TARGET_XARCH)
9382 // At this time, the only multi-reg HW intrinsics all return the type of their
9383 // arguments. If this changes, we will need a way to record or determine this.
9384 return AsHWIntrinsic()->Op(1)->TypeGet();
9387 #endif // FEATURE_HW_INTRINSICS
9389 if (OperIsScalarLocal())
9391 if (TypeGet() == TYP_LONG)
9395 assert(TypeGet() == TYP_STRUCT);
9396 assert((gtFlags & GTF_VAR_MULTIREG) != 0);
9397 // The register type for a multireg lclVar requires looking at the LclVarDsc,
9398 // which requires a Compiler instance. The caller must use the GetFieldTypeByIndex
9399 // on GenTreeLclVar.
9400 assert(!"GetRegTypeByIndex for LclVar");
9403 assert(!"Invalid node type for GetRegTypeByIndex");
9407 //-----------------------------------------------------------------------------------
9408 // GetRegSpillFlagByIdx: Get a specific register's spill flags, based on regIndex,
9409 // for this multi-reg node.
9412 // regIndex - which register's spill flags to return
9415 // The spill flags (GTF_SPILL GTF_SPILLED) for this register.
9418 // This must be a multireg node and 'regIndex' must be a valid index for this node.
9419 // This method returns the GTF "equivalent" flags based on the packed flags on the multireg node.
9421 inline GenTreeFlags GenTree::GetRegSpillFlagByIdx(int regIndex) const
9423 #if FEATURE_MULTIREG_RET
9424 if (IsMultiRegCall())
9426 return AsCall()->GetRegSpillFlagByIdx(regIndex);
9429 #if FEATURE_ARG_SPLIT
9430 if (OperIsPutArgSplit())
9432 return AsPutArgSplit()->GetRegSpillFlagByIdx(regIndex);
9434 #endif // FEATURE_ARG_SPLIT
9436 #if !defined(TARGET_64BIT)
9437 if (OperIsMultiRegOp())
9439 return AsMultiRegOp()->GetRegSpillFlagByIdx(regIndex);
9441 #endif // !defined(TARGET_64BIT)
9442 #endif // FEATURE_MULTIREG_RET
9444 #ifdef FEATURE_HW_INTRINSICS
9445 if (OperIsHWIntrinsic())
9447 return AsHWIntrinsic()->GetRegSpillFlagByIdx(regIndex);
9449 #endif // FEATURE_HW_INTRINSICS
9451 if (OperIsScalarLocal())
9453 return AsLclVar()->GetRegSpillFlagByIdx(regIndex);
9456 assert(!"Invalid node type for GetRegSpillFlagByIdx");
9460 //-----------------------------------------------------------------------------------
9461 // SetRegSpillFlagByIdx: Set a specific register's spill flags, based on regIndex,
9462 // for this multi-reg node.
9465 // flags - the flags to set
9466 // regIndex - which register's spill flags to set
9469 // This must be a multireg node and 'regIndex' must be a valid index for this node.
9470 // This method takes the GTF "equivalent" flags and sets the packed flags on the
9473 inline void GenTree::SetRegSpillFlagByIdx(GenTreeFlags flags, int regIndex)
9475 #if FEATURE_MULTIREG_RET
9476 if (IsMultiRegCall())
9478 AsCall()->SetRegSpillFlagByIdx(flags, regIndex);
9482 #if FEATURE_ARG_SPLIT
9483 if (OperIsPutArgSplit())
9485 AsPutArgSplit()->SetRegSpillFlagByIdx(flags, regIndex);
9488 #endif // FEATURE_ARG_SPLIT
9490 #if !defined(TARGET_64BIT)
9491 if (OperIsMultiRegOp())
9493 AsMultiRegOp()->SetRegSpillFlagByIdx(flags, regIndex);
9496 #endif // !defined(TARGET_64BIT)
9498 #endif // FEATURE_MULTIREG_RET
9500 #ifdef FEATURE_HW_INTRINSICS
9501 if (OperIsHWIntrinsic())
9503 AsHWIntrinsic()->SetRegSpillFlagByIdx(flags, regIndex);
9506 #endif // FEATURE_HW_INTRINSICS
9508 if (OperIsScalarLocal())
9510 AsLclVar()->SetRegSpillFlagByIdx(flags, regIndex);
9514 assert(!"Invalid node type for SetRegSpillFlagByIdx");
9517 //-----------------------------------------------------------------------------------
9518 // GetLastUseBit: Get the last use bit for regIndex
9521 // fieldIndex - the field index
9524 // The bit to set, clear or query for the last-use of the fieldIndex'th value.
9527 // This must be a GenTreeLclVar or GenTreeCopyOrReload node.
9529 inline GenTreeFlags GenTree::GetLastUseBit(int fieldIndex) const
9531 assert(fieldIndex < 4);
9532 assert(OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR, GT_LCL_FLD, GT_STORE_LCL_FLD, GT_LCL_ADDR, GT_COPY, GT_RELOAD));
9533 static_assert_no_msg((1 << FIELD_LAST_USE_SHIFT) == GTF_VAR_FIELD_DEATH0);
9534 return (GenTreeFlags)(1 << (FIELD_LAST_USE_SHIFT + fieldIndex));
9537 //-----------------------------------------------------------------------------------
9538 // IsLastUse: Determine whether this node is a last use of a promoted field.
9541 // fieldIndex - the index of the field
9544 // true iff this is a last use.
9547 // This must be a GenTreeLclVar or GenTreeCopyOrReload node.
9549 inline bool GenTree::IsLastUse(int fieldIndex) const
9551 assert(OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR, GT_LCL_FLD, GT_STORE_LCL_FLD, GT_LCL_ADDR, GT_COPY, GT_RELOAD));
9552 return (gtFlags & GetLastUseBit(fieldIndex)) != 0;
9555 //-----------------------------------------------------------------------------------
9556 // IsLastUse: Determine whether this node is a last use of any value
9559 // true iff this has any last uses (i.e. at any index).
9562 // This must be a GenTreeLclVar or GenTreeCopyOrReload node.
9564 inline bool GenTree::HasLastUse() const
9566 assert(OperIs(GT_LCL_VAR, GT_STORE_LCL_VAR, GT_LCL_FLD, GT_STORE_LCL_FLD, GT_LCL_ADDR, GT_COPY, GT_RELOAD));
9567 return (gtFlags & (GTF_VAR_DEATH_MASK)) != 0;
9570 //-----------------------------------------------------------------------------------
9571 // SetLastUse: Set the last use bit for the given index
9574 // fieldIndex - the index
9577 // This must be a GenTreeLclVar or GenTreeCopyOrReload node.
9579 inline void GenTree::SetLastUse(int fieldIndex)
9581 gtFlags |= GetLastUseBit(fieldIndex);
9584 //-----------------------------------------------------------------------------------
9585 // ClearLastUse: Clear the last use bit for the given index
9588 // fieldIndex - the index
9591 // This must be a GenTreeLclVar or GenTreeCopyOrReload node.
9593 inline void GenTree::ClearLastUse(int fieldIndex)
9595 gtFlags &= ~GetLastUseBit(fieldIndex);
9598 //-------------------------------------------------------------------------
9599 // IsCopyOrReload: whether this is a GT_COPY or GT_RELOAD node.
9605 // Returns true if this GenTree is a copy or reload node.
9607 inline bool GenTree::IsCopyOrReload() const
9609 return (gtOper == GT_COPY || gtOper == GT_RELOAD);
9612 //-----------------------------------------------------------------------------------
9613 // IsCopyOrReloadOfMultiRegCall: whether this is a GT_COPY or GT_RELOAD of a multi-reg
9620 // Returns true if this GenTree is a copy or reload of multi-reg call node.
9622 inline bool GenTree::IsCopyOrReloadOfMultiRegCall() const
9624 if (IsCopyOrReload())
9626 return gtGetOp1()->IsMultiRegCall();
9632 inline bool GenTree::IsCnsIntOrI() const
9634 return (gtOper == GT_CNS_INT);
9637 inline bool GenTree::IsIntegralConst() const
9640 return IsCnsIntOrI();
9641 #else // !TARGET_64BIT
9642 return ((gtOper == GT_CNS_INT) || (gtOper == GT_CNS_LNG));
9643 #endif // !TARGET_64BIT
9646 //-------------------------------------------------------------------------
9647 // IsIntegralConstPow2: Determines whether an integral constant is
9651 // Returns true if the GenTree's integral constant
9652 // is the power of 2.
9654 inline bool GenTree::IsIntegralConstPow2() const
9656 if (IsIntegralConst())
9658 return isPow2(AsIntConCommon()->IntegralValue());
9664 //-------------------------------------------------------------------------
9665 // IsIntegralConstUnsignedPow2: Determines whether the unsigned value of
9666 // an integral constant is the power of 2.
9669 // Returns true if the unsigned value of a GenTree's integral constant
9670 // is the power of 2.
9673 // Integral constant nodes store its value in signed form.
9674 // This should handle cases where an unsigned-int was logically used in
9677 inline bool GenTree::IsIntegralConstUnsignedPow2() const
9679 if (IsIntegralConst())
9681 return isPow2((UINT64)AsIntConCommon()->IntegralValue());
9687 //-------------------------------------------------------------------------
9688 // IsIntegralConstAbsPow2: Determines whether the absolute value of
9689 // an integral constant is the power of 2.
9692 // Returns true if the absolute value of a GenTree's integral constant
9693 // is the power of 2.
9695 inline bool GenTree::IsIntegralConstAbsPow2() const
9697 if (IsIntegralConst())
9699 INT64 svalue = AsIntConCommon()->IntegralValue();
9700 size_t value = (svalue == SSIZE_T_MIN) ? static_cast<size_t>(svalue) : static_cast<size_t>(abs(svalue));
9701 return isPow2(value);
9707 // Is this node an integer constant that fits in a 32-bit signed integer (INT32)
9708 inline bool GenTree::IsIntCnsFitsInI32()
9711 return IsCnsIntOrI() && AsIntCon()->FitsInI32();
9712 #else // !TARGET_64BIT
9713 return IsCnsIntOrI();
9714 #endif // !TARGET_64BIT
9717 inline bool GenTree::IsCnsFltOrDbl() const
9719 return OperIs(GT_CNS_DBL);
9722 inline bool GenTree::IsCnsNonZeroFltOrDbl() const
9724 if (IsCnsFltOrDbl())
9726 double constValue = AsDblCon()->DconValue();
9727 return *(__int64*)&constValue != 0;
9733 inline bool GenTree::IsCnsVec() const
9735 return OperIs(GT_CNS_VEC);
9738 inline bool GenTree::IsHelperCall()
9740 return OperGet() == GT_CALL && AsCall()->gtCallType == CT_HELPER;
9743 inline var_types GenTree::CastFromType()
9745 return this->AsCast()->CastOp()->TypeGet();
9747 inline var_types& GenTree::CastToType()
9749 return this->AsCast()->gtCastType;
9752 inline bool GenTree::isUsedFromSpillTemp() const
9754 // If spilled and no reg at use, then it is used from the spill temp location rather than being reloaded.
9755 if (((gtFlags & GTF_SPILLED) != 0) && ((gtFlags & GTF_NOREG_AT_USE) != 0))
9763 // Helper function to return the array reference of an array length node.
9764 inline GenTree* GenTree::GetArrLengthArrRef()
9766 assert(OperIsArrLength());
9767 return AsArrCommon()->ArrRef();
9770 // Helper function to return the address of an indir or array meta-data node.
9771 inline GenTree* GenTree::GetIndirOrArrMetaDataAddr()
9773 assert(OperIsIndirOrArrMetaData());
9777 return AsIndir()->Addr();
9781 return AsArrCommon()->ArrRef();
9785 /*****************************************************************************/
9788 #include <poppack.h>
9791 /*****************************************************************************/
9793 const size_t TREE_NODE_SZ_SMALL = sizeof(GenTreeLclFld);
9795 // For some configurations, such as x86 release, GenTreeVecCon is
9796 // the largest by a small margin due to needing to carry a simd64_t
9797 // constant value. Otherwise, GenTreeCall is the largest.
9799 const size_t TREE_NODE_SZ_LARGE =
9800 (sizeof(GenTreeVecCon) < sizeof(GenTreeCall)) ? sizeof(GenTreeCall) : sizeof(GenTreeVecCon);
9804 VR_INVARIANT = 0x00, // an invariant value
9806 VR_IND_REF = 0x01, // an object reference
9807 VR_IND_SCL = 0x02, // a non-object reference
9808 VR_GLB_VAR = 0x04, // a global (clsVar)
9811 /*****************************************************************************/
9812 #endif // !GENTREE_H
9813 /*****************************************************************************/