1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
10 XX Imports the given method and converts it to semantic trees XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
23 #define Verify(cond, msg) \
28 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
32 #define VerifyOrReturn(cond, msg) \
37 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
42 #define VerifyOrReturnSpeculative(cond, msg, speculative) \
56 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
62 /*****************************************************************************/
64 void Compiler::impInit()
68 impTreeList = nullptr;
69 impTreeLast = nullptr;
70 impInlinedCodeSize = 0;
74 /*****************************************************************************
76 * Pushes the given tree on the stack.
79 void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
81 /* Check for overflow. If inlining, we may be using a bigger stack */
83 if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84 (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
86 BADCODE("stack overflow");
90 // If we are pushing a struct, make certain we know the precise type!
91 if (tree->TypeGet() == TYP_STRUCT)
93 assert(ti.IsType(TI_STRUCT));
94 CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95 assert(clsHnd != NO_CLASS_HANDLE);
98 if (tiVerificationNeeded && !ti.IsDead())
100 assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
102 // The ti type is consistent with the tree type.
105 // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106 // In the verification type system, we always transform "native int" to "TI_INT".
107 // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108 // attempts to do that have proved too difficult. Instead, we'll assume that in checks like this,
109 // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110 // method used in the last disjunct allows exactly this mismatch.
111 assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112 ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113 ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114 ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115 typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116 NormaliseForStack(typeInfo(tree->TypeGet()))));
118 // If it is a struct type, make certain we normalized the primitive types
119 assert(!ti.IsType(TI_STRUCT) ||
120 info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
124 if (VERBOSE && tiVerificationNeeded)
127 printf(TI_DUMP_PADDING);
128 printf("About to push to stack: ");
131 #endif // VERBOSE_VERIFY
135 verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136 verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
138 if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
142 else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
144 compFloatingPointUsed = true;
148 inline void Compiler::impPushNullObjRefOnStack()
150 impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
153 // This method gets called when we run into unverifiable code
154 // (and we are verifying the method)
156 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
157 DEBUGARG(unsigned line))
159 // Remember that the code is not verifiable
160 // Note that the method may yet pass canSkipMethodVerification(),
161 // and so the presence of unverifiable code may not be an issue.
162 tiIsVerifiableCode = FALSE;
165 const char* tail = strrchr(file, '\\');
171 if (JitConfig.JitBreakOnUnsafeCode())
173 assert(!"Unsafe code detected");
177 JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
178 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
180 if (verNeedsVerification() || compIsForImportOnly())
182 JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
183 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
184 verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
188 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
189 DEBUGARG(unsigned line))
191 JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
192 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
195 // BreakIfDebuggerPresent();
196 if (getBreakOnBadCode())
198 assert(!"Typechecking error");
202 RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
206 // helper function that will tell us if the IL instruction at the addr passed
207 // by param consumes an address at the top of the stack. We use it to save
209 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
211 assert(!compIsForInlining());
215 opcode = (OPCODE)getU1LittleEndian(codeAddr);
219 // case CEE_LDFLDA: We're taking this one out as if you have a sequence
225 // of a primitivelike struct, you end up after morphing with addr of a local
226 // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
227 // for structs that contain other structs, which isnt a case we handle very
228 // well now for other reasons.
232 // We won't collapse small fields. This is probably not the right place to have this
233 // check, but we're only using the function for this purpose, and is easy to factor
234 // out if we need to do so.
236 CORINFO_RESOLVED_TOKEN resolvedToken;
237 impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
239 CORINFO_CLASS_HANDLE clsHnd;
240 var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
242 // Preserve 'small' int types
243 if (lclTyp > TYP_INT)
245 lclTyp = genActualType(lclTyp);
248 if (varTypeIsSmall(lclTyp))
262 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
264 pResolvedToken->tokenContext = impTokenLookupContextHandle;
265 pResolvedToken->tokenScope = info.compScopeHnd;
266 pResolvedToken->token = getU4LittleEndian(addr);
267 pResolvedToken->tokenType = kind;
269 if (!tiVerificationNeeded)
271 info.compCompHnd->resolveToken(pResolvedToken);
275 Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
279 /*****************************************************************************
281 * Pop one tree from the stack.
284 StackEntry Compiler::impPopStack()
286 if (verCurrentState.esStackDepth == 0)
288 BADCODE("stack underflow");
293 if (VERBOSE && tiVerificationNeeded)
296 printf(TI_DUMP_PADDING);
297 printf("About to pop from the stack: ");
298 const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
301 #endif // VERBOSE_VERIFY
304 return verCurrentState.esStack[--verCurrentState.esStackDepth];
307 /*****************************************************************************
309 * Peep at n'th (0-based) tree on the top of the stack.
312 StackEntry& Compiler::impStackTop(unsigned n)
314 if (verCurrentState.esStackDepth <= n)
316 BADCODE("stack underflow");
319 return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
322 unsigned Compiler::impStackHeight()
324 return verCurrentState.esStackDepth;
327 /*****************************************************************************
328 * Some of the trees are spilled specially. While unspilling them, or
329 * making a copy, these need to be handled specially. The function
330 * enumerates the operators possible after spilling.
333 #ifdef DEBUG // only used in asserts
334 static bool impValidSpilledStackEntry(GenTreePtr tree)
336 if (tree->gtOper == GT_LCL_VAR)
341 if (tree->OperIsConst())
350 /*****************************************************************************
352 * The following logic is used to save/restore stack contents.
353 * If 'copy' is true, then we make a copy of the trees on the stack. These
354 * have to all be cloneable/spilled values.
357 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
359 savePtr->ssDepth = verCurrentState.esStackDepth;
361 if (verCurrentState.esStackDepth)
363 savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
364 size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
368 StackEntry* table = savePtr->ssTrees;
370 /* Make a fresh copy of all the stack entries */
372 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
374 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
375 GenTreePtr tree = verCurrentState.esStack[level].val;
377 assert(impValidSpilledStackEntry(tree));
379 switch (tree->gtOper)
386 table->val = gtCloneExpr(tree);
390 assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
397 memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
402 void Compiler::impRestoreStackState(SavedStack* savePtr)
404 verCurrentState.esStackDepth = savePtr->ssDepth;
406 if (verCurrentState.esStackDepth)
408 memcpy(verCurrentState.esStack, savePtr->ssTrees,
409 verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
413 /*****************************************************************************
415 * Get the tree list started for a new basic block.
417 inline void Compiler::impBeginTreeList()
419 assert(impTreeList == nullptr && impTreeLast == nullptr);
421 impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
424 /*****************************************************************************
426 * Store the given start and end stmt in the given basic block. This is
427 * mostly called by impEndTreeList(BasicBlock *block). It is called
428 * directly only for handling CEE_LEAVEs out of finally-protected try's.
431 inline void Compiler::impEndTreeList(BasicBlock* block, GenTreePtr firstStmt, GenTreePtr lastStmt)
433 assert(firstStmt->gtOper == GT_STMT);
434 assert(lastStmt->gtOper == GT_STMT);
436 /* Make the list circular, so that we can easily walk it backwards */
438 firstStmt->gtPrev = lastStmt;
440 /* Store the tree list in the basic block */
442 block->bbTreeList = firstStmt;
444 /* The block should not already be marked as imported */
445 assert((block->bbFlags & BBF_IMPORTED) == 0);
447 block->bbFlags |= BBF_IMPORTED;
450 /*****************************************************************************
452 * Store the current tree list in the given basic block.
455 inline void Compiler::impEndTreeList(BasicBlock* block)
457 assert(impTreeList->gtOper == GT_BEG_STMTS);
459 GenTreePtr firstTree = impTreeList->gtNext;
463 /* The block should not already be marked as imported */
464 assert((block->bbFlags & BBF_IMPORTED) == 0);
466 // Empty block. Just mark it as imported
467 block->bbFlags |= BBF_IMPORTED;
471 // Ignore the GT_BEG_STMTS
472 assert(firstTree->gtPrev == impTreeList);
474 impEndTreeList(block, firstTree, impTreeLast);
478 if (impLastILoffsStmt != nullptr)
480 impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
481 impLastILoffsStmt = nullptr;
484 impTreeList = impTreeLast = nullptr;
488 /*****************************************************************************
490 * Check that storing the given tree doesnt mess up the semantic order. Note
491 * that this has only limited value as we can only check [0..chkLevel).
494 inline void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel)
499 assert(stmt->gtOper == GT_STMT);
501 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
503 chkLevel = verCurrentState.esStackDepth;
506 if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
511 GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
513 // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
515 if (tree->gtFlags & GTF_CALL)
517 for (unsigned level = 0; level < chkLevel; level++)
519 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
523 if (tree->gtOper == GT_ASG)
525 // For an assignment to a local variable, all references of that
526 // variable have to be spilled. If it is aliased, all calls and
527 // indirect accesses have to be spilled
529 if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
531 unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
532 for (unsigned level = 0; level < chkLevel; level++)
534 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
535 assert(!lvaTable[lclNum].lvAddrExposed ||
536 (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
540 // If the access may be to global memory, all side effects have to be spilled.
542 else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
544 for (unsigned level = 0; level < chkLevel; level++)
546 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
553 /*****************************************************************************
555 * Append the given GT_STMT node to the current block's tree list.
556 * [0..chkLevel) is the portion of the stack which we will check for
557 * interference with stmt and spill if needed.
560 inline void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
562 assert(stmt->gtOper == GT_STMT);
563 noway_assert(impTreeLast != nullptr);
565 /* If the statement being appended has any side-effects, check the stack
566 to see if anything needs to be spilled to preserve correct ordering. */
568 GenTreePtr expr = stmt->gtStmt.gtStmtExpr;
569 unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
571 // Assignment to (unaliased) locals don't count as a side-effect as
572 // we handle them specially using impSpillLclRefs(). Temp locals should
575 if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
576 !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
578 unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
579 assert(flags == (op2Flags | GTF_ASG));
583 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
585 chkLevel = verCurrentState.esStackDepth;
588 if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
590 assert(chkLevel <= verCurrentState.esStackDepth);
594 // If there is a call, we have to spill global refs
595 bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
597 if (expr->gtOper == GT_ASG)
599 GenTree* lhs = expr->gtGetOp1();
600 // If we are assigning to a global ref, we have to spill global refs on stack.
601 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
602 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
603 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
604 if (!expr->OperIsBlkOp())
606 // If we are assigning to a global ref, we have to spill global refs on stack
607 if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
609 spillGlobEffects = true;
612 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
613 ((lhs->OperGet() == GT_LCL_VAR) &&
614 (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
616 spillGlobEffects = true;
620 impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
624 impSpillSpecialSideEff();
628 impAppendStmtCheck(stmt, chkLevel);
630 /* Point 'prev' at the previous node, so that we can walk backwards */
632 stmt->gtPrev = impTreeLast;
634 /* Append the expression statement to the list */
636 impTreeLast->gtNext = stmt;
640 impMarkContiguousSIMDFieldAssignments(stmt);
643 /* Once we set impCurStmtOffs in an appended tree, we are ready to
644 report the following offsets. So reset impCurStmtOffs */
646 if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
648 impCurStmtOffsSet(BAD_IL_OFFSET);
652 if (impLastILoffsStmt == nullptr)
654 impLastILoffsStmt = stmt;
665 /*****************************************************************************
667 * Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
670 inline void Compiler::impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore)
672 assert(stmt->gtOper == GT_STMT);
673 assert(stmtBefore->gtOper == GT_STMT);
675 GenTreePtr stmtPrev = stmtBefore->gtPrev;
676 stmt->gtPrev = stmtPrev;
677 stmt->gtNext = stmtBefore;
678 stmtPrev->gtNext = stmt;
679 stmtBefore->gtPrev = stmt;
682 /*****************************************************************************
684 * Append the given expression tree to the current block's tree list.
685 * Return the newly created statement.
688 GenTreePtr Compiler::impAppendTree(GenTreePtr tree, unsigned chkLevel, IL_OFFSETX offset)
692 /* Allocate an 'expression statement' node */
694 GenTreePtr expr = gtNewStmt(tree, offset);
696 /* Append the statement to the current block's stmt list */
698 impAppendStmt(expr, chkLevel);
703 /*****************************************************************************
705 * Insert the given exression tree before GT_STMT "stmtBefore"
708 void Compiler::impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore)
710 assert(stmtBefore->gtOper == GT_STMT);
712 /* Allocate an 'expression statement' node */
714 GenTreePtr expr = gtNewStmt(tree, offset);
716 /* Append the statement to the current block's stmt list */
718 impInsertStmtBefore(expr, stmtBefore);
721 /*****************************************************************************
723 * Append an assignment of the given value to a temp to the current tree list.
724 * curLevel is the stack level for which the spill to the temp is being done.
727 void Compiler::impAssignTempGen(unsigned tmp,
730 GenTreePtr* pAfterStmt, /* = NULL */
731 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
732 BasicBlock* block /* = NULL */
735 GenTreePtr asg = gtNewTempAssign(tmp, val);
737 if (!asg->IsNothingNode())
741 GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
742 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
746 impAppendTree(asg, curLevel, impCurStmtOffs);
751 /*****************************************************************************
752 * same as above, but handle the valueclass case too
755 void Compiler::impAssignTempGen(unsigned tmpNum,
757 CORINFO_CLASS_HANDLE structType,
759 GenTreePtr* pAfterStmt, /* = NULL */
760 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
761 BasicBlock* block /* = NULL */
766 if (varTypeIsStruct(val))
768 assert(tmpNum < lvaCount);
769 assert(structType != NO_CLASS_HANDLE);
771 // if the method is non-verifiable the assert is not true
772 // so at least ignore it in the case when verification is turned on
773 // since any block that tries to use the temp would have failed verification.
774 var_types varType = lvaTable[tmpNum].lvType;
775 assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
776 lvaSetStruct(tmpNum, structType, false);
778 // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
779 // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
780 // that has been passed in for the value being assigned to the temp, in which case we
781 // need to set 'val' to that same type.
782 // Note also that if we always normalized the types of any node that might be a struct
783 // type, this would not be necessary - but that requires additional JIT/EE interface
784 // calls that may not actually be required - e.g. if we only access a field of a struct.
786 val->gtType = lvaTable[tmpNum].lvType;
788 GenTreePtr dst = gtNewLclvNode(tmpNum, val->gtType);
789 asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
793 asg = gtNewTempAssign(tmpNum, val);
796 if (!asg->IsNothingNode())
800 GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
801 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
805 impAppendTree(asg, curLevel, impCurStmtOffs);
810 /*****************************************************************************
812 * Pop the given number of values from the stack and return a list node with
814 * The 'prefixTree' argument may optionally contain an argument
815 * list that is prepended to the list returned from this function.
817 * The notion of prepended is a bit misleading in that the list is backwards
818 * from the way I would expect: The first element popped is at the end of
819 * the returned list, and prefixTree is 'before' that, meaning closer to
820 * the end of the list. To get to prefixTree, you have to walk to the
823 * For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
824 * such we reverse its meaning such that returnValue has a reversed
825 * prefixTree at the head of the list.
828 GenTreeArgList* Compiler::impPopList(unsigned count, CORINFO_SIG_INFO* sig, GenTreeArgList* prefixTree)
830 assert(sig == nullptr || count == sig->numArgs);
832 CORINFO_CLASS_HANDLE structType;
833 GenTreeArgList* treeList;
835 if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
841 treeList = prefixTree;
846 StackEntry se = impPopStack();
847 typeInfo ti = se.seTypeInfo;
848 GenTreePtr temp = se.val;
850 if (varTypeIsStruct(temp))
852 // Morph trees that aren't already OBJs or MKREFANY to be OBJs
853 assert(ti.IsType(TI_STRUCT));
854 structType = ti.GetClassHandleForValueClass();
855 temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
858 /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
859 treeList = gtNewListNode(temp, treeList);
864 if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
865 sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
867 // Make sure that all valuetypes (including enums) that we push are loaded.
868 // This is to guarantee that if a GC is triggerred from the prestub of this methods,
869 // all valuetypes in the method signature are already loaded.
870 // We need to be able to find the size of the valuetypes, but we cannot
871 // do a class-load from within GC.
872 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
875 CORINFO_ARG_LIST_HANDLE argLst = sig->args;
876 CORINFO_CLASS_HANDLE argClass;
877 CORINFO_CLASS_HANDLE argRealClass;
878 GenTreeArgList* args;
881 for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
883 PREFIX_ASSUME(args != nullptr);
885 CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
887 // insert implied casts (from float to double or double to float)
889 if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
891 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
893 else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
895 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
898 // insert any widening or narrowing casts for backwards compatibility
900 args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
902 if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
903 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
905 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
906 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
908 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
910 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
912 args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
915 // Make sure that all valuetypes (including enums) that we push are loaded.
916 // This is to guarantee that if a GC is triggered from the prestub of this methods,
917 // all valuetypes in the method signature are already loaded.
918 // We need to be able to find the size of the valuetypes, but we cannot
919 // do a class-load from within GC.
920 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
923 argLst = info.compCompHnd->getArgNext(argLst);
927 if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
929 // Prepend the prefixTree
931 // Simple in-place reversal to place treeList
932 // at the end of a reversed prefixTree
933 while (prefixTree != nullptr)
935 GenTreeArgList* next = prefixTree->Rest();
936 prefixTree->Rest() = treeList;
937 treeList = prefixTree;
944 /*****************************************************************************
946 * Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
947 * The first "skipReverseCount" items are not reversed.
950 GenTreeArgList* Compiler::impPopRevList(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount)
953 assert(skipReverseCount <= count);
955 GenTreeArgList* list = impPopList(count, sig);
958 if (list == nullptr || skipReverseCount == count)
963 GenTreeArgList* ptr = nullptr; // Initialized to the first node that needs to be reversed
964 GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
966 if (skipReverseCount == 0)
973 // Get to the first node that needs to be reversed
974 for (unsigned i = 0; i < skipReverseCount - 1; i++)
976 lastSkipNode = lastSkipNode->Rest();
979 PREFIX_ASSUME(lastSkipNode != nullptr);
980 ptr = lastSkipNode->Rest();
983 GenTreeArgList* reversedList = nullptr;
987 GenTreeArgList* tmp = ptr->Rest();
988 ptr->Rest() = reversedList;
991 } while (ptr != nullptr);
993 if (skipReverseCount)
995 lastSkipNode->Rest() = reversedList;
1000 return reversedList;
1004 /*****************************************************************************
1005 Assign (copy) the structure from 'src' to 'dest'. The structure is a value
1006 class of type 'clsHnd'. It returns the tree that should be appended to the
1007 statement list that represents the assignment.
1008 Temp assignments may be appended to impTreeList if spilling is necessary.
1009 curLevel is the stack level for which a spill may be being done.
1012 GenTreePtr Compiler::impAssignStruct(GenTreePtr dest,
1014 CORINFO_CLASS_HANDLE structHnd,
1016 GenTreePtr* pAfterStmt, /* = NULL */
1017 BasicBlock* block /* = NULL */
1020 assert(varTypeIsStruct(dest));
1022 while (dest->gtOper == GT_COMMA)
1024 assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1026 // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1029 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1033 impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1036 // set dest to the second thing
1037 dest = dest->gtOp.gtOp2;
1040 assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1041 dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1043 if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1044 src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1047 return gtNewNothingNode();
1050 // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1051 // or re-creating a Blk node if it is.
1052 GenTreePtr destAddr;
1054 if (dest->gtOper == GT_IND || dest->OperIsBlk())
1056 destAddr = dest->gtOp.gtOp1;
1060 destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1063 return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1066 /*****************************************************************************/
1068 GenTreePtr Compiler::impAssignStructPtr(GenTreePtr destAddr,
1070 CORINFO_CLASS_HANDLE structHnd,
1072 GenTreePtr* pAfterStmt, /* = NULL */
1073 BasicBlock* block /* = NULL */
1077 GenTreePtr dest = nullptr;
1078 unsigned destFlags = 0;
1080 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1081 assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1082 // TODO-ARM-BUG: Does ARM need this?
1083 // TODO-ARM64-BUG: Does ARM64 need this?
1084 assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1085 src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1086 src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1087 (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1088 #else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1089 assert(varTypeIsStruct(src));
1091 assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1092 src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1093 src->gtOper == GT_COMMA ||
1094 (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1095 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1096 if (destAddr->OperGet() == GT_ADDR)
1098 GenTree* destNode = destAddr->gtGetOp1();
1099 // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1100 // will be morphed, don't insert an OBJ(ADDR).
1101 if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1102 #ifndef LEGACY_BACKEND
1103 || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1104 #endif // !LEGACY_BACKEND
1109 destType = destNode->TypeGet();
1113 destType = src->TypeGet();
1116 var_types asgType = src->TypeGet();
1118 if (src->gtOper == GT_CALL)
1120 if (src->AsCall()->TreatAsHasRetBufArg(this))
1122 // Case of call returning a struct via hidden retbuf arg
1124 // insert the return value buffer into the argument list as first byref parameter
1125 src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1127 // now returns void, not a struct
1128 src->gtType = TYP_VOID;
1130 // return the morphed call node
1135 // Case of call returning a struct in one or more registers.
1137 var_types returnType = (var_types)src->gtCall.gtReturnType;
1139 // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1140 src->gtType = genActualType(returnType);
1142 // First we try to change this to "LclVar/LclFld = call"
1144 if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1146 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1147 // That is, the IR will be of the form lclVar = call for multi-reg return
1149 GenTreePtr lcl = destAddr->gtOp.gtOp1;
1150 if (src->AsCall()->HasMultiRegRetVal())
1152 // Mark the struct LclVar as used in a MultiReg return context
1153 // which currently makes it non promotable.
1154 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1155 // handle multireg returns.
1156 lcl->gtFlags |= GTF_DONT_CSE;
1157 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1159 else // The call result is not a multireg return
1161 // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1162 lcl->ChangeOper(GT_LCL_FLD);
1163 fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1166 lcl->gtType = src->gtType;
1167 asgType = src->gtType;
1170 #if defined(_TARGET_ARM_)
1171 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1172 // but that method has not been updadted to include ARM.
1173 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1174 lcl->gtFlags |= GTF_DONT_CSE;
1175 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1176 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1177 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1179 // Make the struct non promotable. The eightbytes could contain multiple fields.
1180 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1181 // handle multireg returns.
1182 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1183 // non-multireg returns.
1184 lcl->gtFlags |= GTF_DONT_CSE;
1185 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1188 else // we don't have a GT_ADDR of a GT_LCL_VAR
1190 // !!! The destination could be on stack. !!!
1191 // This flag will let us choose the correct write barrier.
1192 asgType = returnType;
1193 destFlags = GTF_IND_TGTANYWHERE;
1197 else if (src->gtOper == GT_RET_EXPR)
1199 GenTreeCall* call = src->gtRetExpr.gtInlineCandidate->AsCall();
1200 noway_assert(call->gtOper == GT_CALL);
1202 if (call->HasRetBufArg())
1204 // insert the return value buffer into the argument list as first byref parameter
1205 call->gtCallArgs = gtNewListNode(destAddr, call->gtCallArgs);
1207 // now returns void, not a struct
1208 src->gtType = TYP_VOID;
1209 call->gtType = TYP_VOID;
1211 // We already have appended the write to 'dest' GT_CALL's args
1212 // So now we just return an empty node (pruning the GT_RET_EXPR)
1217 // Case of inline method returning a struct in one or more registers.
1219 var_types returnType = (var_types)call->gtReturnType;
1221 // We won't need a return buffer
1222 asgType = returnType;
1223 src->gtType = genActualType(returnType);
1224 call->gtType = src->gtType;
1226 // If we've changed the type, and it no longer matches a local destination,
1227 // we must use an indirection.
1228 if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1233 // !!! The destination could be on stack. !!!
1234 // This flag will let us choose the correct write barrier.
1235 destFlags = GTF_IND_TGTANYWHERE;
1238 else if (src->OperIsBlk())
1240 asgType = impNormStructType(structHnd);
1241 if (src->gtOper == GT_OBJ)
1243 assert(src->gtObj.gtClass == structHnd);
1246 else if (src->gtOper == GT_INDEX)
1248 asgType = impNormStructType(structHnd);
1249 assert(src->gtIndex.gtStructElemClass == structHnd);
1251 else if (src->gtOper == GT_MKREFANY)
1253 // Since we are assigning the result of a GT_MKREFANY,
1254 // "destAddr" must point to a refany.
1256 GenTreePtr destAddrClone;
1258 impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1260 assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1261 assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1262 GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1263 GenTreePtr ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1264 GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1265 typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1266 GenTreePtr typeSlot =
1267 gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1269 // append the assign of the pointer value
1270 GenTreePtr asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1273 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1277 impAppendTree(asg, curLevel, impCurStmtOffs);
1280 // return the assign of the type value, to be appended
1281 return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1283 else if (src->gtOper == GT_COMMA)
1285 // The second thing is the struct or its address.
1286 assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1289 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1293 impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1296 // Evaluate the second thing using recursion.
1297 return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1299 else if (src->IsLocal())
1301 asgType = src->TypeGet();
1303 else if (asgType == TYP_STRUCT)
1305 asgType = impNormStructType(structHnd);
1306 src->gtType = asgType;
1307 #ifdef LEGACY_BACKEND
1308 if (asgType == TYP_STRUCT)
1310 GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1311 src = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1315 if (dest == nullptr)
1317 // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1318 // if this is a known struct type.
1319 if (asgType == TYP_STRUCT)
1321 dest = gtNewObjNode(structHnd, destAddr);
1322 gtSetObjGcInfo(dest->AsObj());
1323 // Although an obj as a call argument was always assumed to be a globRef
1324 // (which is itself overly conservative), that is not true of the operands
1325 // of a block assignment.
1326 dest->gtFlags &= ~GTF_GLOB_REF;
1327 dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1329 else if (varTypeIsStruct(asgType))
1331 dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1335 dest = gtNewOperNode(GT_IND, asgType, destAddr);
1340 dest->gtType = asgType;
1343 dest->gtFlags |= destFlags;
1344 destFlags = dest->gtFlags;
1346 // return an assignment node, to be appended
1347 GenTree* asgNode = gtNewAssignNode(dest, src);
1348 gtBlockOpInit(asgNode, dest, src, false);
1350 // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1352 if ((destFlags & GTF_DONT_CSE) == 0)
1354 dest->gtFlags &= ~(GTF_DONT_CSE);
1359 /*****************************************************************************
1360 Given a struct value, and the class handle for that structure, return
1361 the expression for the address for that structure value.
1363 willDeref - does the caller guarantee to dereference the pointer.
1366 GenTreePtr Compiler::impGetStructAddr(GenTreePtr structVal,
1367 CORINFO_CLASS_HANDLE structHnd,
1371 assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1373 var_types type = structVal->TypeGet();
1375 genTreeOps oper = structVal->gtOper;
1377 if (oper == GT_OBJ && willDeref)
1379 assert(structVal->gtObj.gtClass == structHnd);
1380 return (structVal->gtObj.Addr());
1382 else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
1384 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1386 impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1388 // The 'return value' is now the temp itself
1390 type = genActualType(lvaTable[tmpNum].TypeGet());
1391 GenTreePtr temp = gtNewLclvNode(tmpNum, type);
1392 temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1395 else if (oper == GT_COMMA)
1397 assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1399 GenTreePtr oldTreeLast = impTreeLast;
1400 structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1401 structVal->gtType = TYP_BYREF;
1403 if (oldTreeLast != impTreeLast)
1405 // Some temp assignment statement was placed on the statement list
1406 // for Op2, but that would be out of order with op1, so we need to
1407 // spill op1 onto the statement list after whatever was last
1408 // before we recursed on Op2 (i.e. before whatever Op2 appended).
1409 impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1410 structVal->gtOp.gtOp1 = gtNewNothingNode();
1416 return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1419 //------------------------------------------------------------------------
1420 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1421 // and optionally determine the GC layout of the struct.
1424 // structHnd - The class handle for the struct type of interest.
1425 // gcLayout - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1426 // into which the gcLayout will be written.
1427 // pNumGCVars - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1428 // which will be set to the number of GC fields in the struct.
1429 // pSimdBaseType - (optional, default nullptr) - if non-null, and the struct is a SIMD
1430 // type, set to the SIMD base type
1433 // The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1434 // The gcLayout will be returned using the pointers provided by the caller, if non-null.
1435 // It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1438 // The caller must set gcLayout to nullptr OR ensure that it is large enough
1439 // (see ICorStaticInfo::getClassGClayout in corinfo.h).
1442 // Normalizing the type involves examining the struct type to determine if it should
1443 // be modified to one that is handled specially by the JIT, possibly being a candidate
1444 // for full enregistration, e.g. TYP_SIMD16.
1446 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1448 unsigned* pNumGCVars,
1449 var_types* pSimdBaseType)
1451 assert(structHnd != NO_CLASS_HANDLE);
1453 const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1454 var_types structType = TYP_STRUCT;
1456 // On coreclr the check for GC includes a "may" to account for the special
1457 // ByRef like span structs. The added check for "CONTAINS_STACK_PTR" is the particular bit.
1458 // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1460 const bool mayContainGCPtrs =
1461 ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1464 // Check to see if this is a SIMD type.
1465 if (featureSIMD && !mayContainGCPtrs)
1467 unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1469 if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1471 unsigned int sizeBytes;
1472 var_types simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1473 if (simdBaseType != TYP_UNKNOWN)
1475 assert(sizeBytes == originalSize);
1476 structType = getSIMDTypeForSize(sizeBytes);
1477 if (pSimdBaseType != nullptr)
1479 *pSimdBaseType = simdBaseType;
1481 // Also indicate that we use floating point registers.
1482 compFloatingPointUsed = true;
1486 #endif // FEATURE_SIMD
1488 // Fetch GC layout info if requested
1489 if (gcLayout != nullptr)
1491 unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1493 // Verify that the quick test up above via the class attributes gave a
1494 // safe view of the type's GCness.
1496 // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1497 // does not report any gc fields.
1499 assert(mayContainGCPtrs || (numGCVars == 0));
1501 if (pNumGCVars != nullptr)
1503 *pNumGCVars = numGCVars;
1508 // Can't safely ask for number of GC pointers without also
1509 // asking for layout.
1510 assert(pNumGCVars == nullptr);
1516 //****************************************************************************
1517 // Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1518 // it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1520 GenTreePtr Compiler::impNormStructVal(GenTreePtr structVal,
1521 CORINFO_CLASS_HANDLE structHnd,
1523 bool forceNormalization /*=false*/)
1525 assert(forceNormalization || varTypeIsStruct(structVal));
1526 assert(structHnd != NO_CLASS_HANDLE);
1527 var_types structType = structVal->TypeGet();
1528 bool makeTemp = false;
1529 if (structType == TYP_STRUCT)
1531 structType = impNormStructType(structHnd);
1533 bool alreadyNormalized = false;
1534 GenTreeLclVarCommon* structLcl = nullptr;
1536 genTreeOps oper = structVal->OperGet();
1539 // GT_RETURN and GT_MKREFANY don't capture the handle.
1543 alreadyNormalized = true;
1547 structVal->gtCall.gtRetClsHnd = structHnd;
1552 structVal->gtRetExpr.gtRetClsHnd = structHnd;
1557 structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1561 // This will be transformed to an OBJ later.
1562 alreadyNormalized = true;
1563 structVal->gtIndex.gtStructElemClass = structHnd;
1564 structVal->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(structHnd);
1568 // Wrap it in a GT_OBJ.
1569 structVal->gtType = structType;
1570 structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1575 structLcl = structVal->AsLclVarCommon();
1576 // Wrap it in a GT_OBJ.
1577 structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1584 // These should already have the appropriate type.
1585 assert(structVal->gtType == structType);
1586 alreadyNormalized = true;
1590 assert(structVal->gtType == structType);
1591 structVal = gtNewObjNode(structHnd, structVal->gtGetOp1());
1592 alreadyNormalized = true;
1597 assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1599 #endif // FEATURE_SIMD
1603 // The second thing could either be a block node or a GT_SIMD or a GT_COMMA node.
1604 GenTree* blockNode = structVal->gtOp.gtOp2;
1605 assert(blockNode->gtType == structType);
1607 // Is this GT_COMMA(op1, GT_COMMA())?
1608 GenTree* parent = structVal;
1609 if (blockNode->OperGet() == GT_COMMA)
1611 // Find the last node in the comma chain.
1614 assert(blockNode->gtType == structType);
1616 blockNode = blockNode->gtOp.gtOp2;
1617 } while (blockNode->OperGet() == GT_COMMA);
1621 if (blockNode->OperGet() == GT_SIMD)
1623 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1624 alreadyNormalized = true;
1629 assert(blockNode->OperIsBlk());
1631 // Sink the GT_COMMA below the blockNode addr.
1632 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1633 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1635 // In case of a chained GT_COMMA case, we sink the last
1636 // GT_COMMA below the blockNode addr.
1637 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1638 assert(blockNodeAddr->gtType == TYP_BYREF);
1639 GenTree* commaNode = parent;
1640 commaNode->gtType = TYP_BYREF;
1641 commaNode->gtOp.gtOp2 = blockNodeAddr;
1642 blockNode->gtOp.gtOp1 = commaNode;
1643 if (parent == structVal)
1645 structVal = blockNode;
1647 alreadyNormalized = true;
1653 assert(!"Unexpected node in impNormStructVal()");
1656 structVal->gtType = structType;
1657 GenTree* structObj = structVal;
1659 if (!alreadyNormalized || forceNormalization)
1663 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1665 impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1667 // The structVal is now the temp itself
1669 structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1670 // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1671 structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1673 else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1675 // Wrap it in a GT_OBJ
1676 structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1680 if (structLcl != nullptr)
1682 // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1683 // so we don't set GTF_EXCEPT here.
1684 if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1686 structObj->gtFlags &= ~GTF_GLOB_REF;
1691 // In general a OBJ is an indirection and could raise an exception.
1692 structObj->gtFlags |= GTF_EXCEPT;
1697 /******************************************************************************/
1698 // Given a type token, generate code that will evaluate to the correct
1699 // handle representation of that token (type handle, field handle, or method handle)
1701 // For most cases, the handle is determined at compile-time, and the code
1702 // generated is simply an embedded handle.
1704 // Run-time lookup is required if the enclosing method is shared between instantiations
1705 // and the token refers to formal type parameters whose instantiation is not known
1708 GenTreePtr Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1709 BOOL* pRuntimeLookup /* = NULL */,
1710 BOOL mustRestoreHandle /* = FALSE */,
1711 BOOL importParent /* = FALSE */)
1713 assert(!fgGlobalMorph);
1715 CORINFO_GENERICHANDLE_RESULT embedInfo;
1716 info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1720 *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1723 if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1725 switch (embedInfo.handleType)
1727 case CORINFO_HANDLETYPE_CLASS:
1728 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1731 case CORINFO_HANDLETYPE_METHOD:
1732 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1735 case CORINFO_HANDLETYPE_FIELD:
1736 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1737 info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1745 return impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1746 embedInfo.compileTimeHandle);
1749 GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1750 CORINFO_LOOKUP* pLookup,
1751 unsigned handleFlags,
1752 void* compileTimeHandle)
1754 if (!pLookup->lookupKind.needsRuntimeLookup)
1756 // No runtime lookup is required.
1757 // Access is direct or memory-indirect (of a fixed address) reference
1759 CORINFO_GENERIC_HANDLE handle = nullptr;
1760 void* pIndirection = nullptr;
1761 assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1763 if (pLookup->constLookup.accessType == IAT_VALUE)
1765 handle = pLookup->constLookup.handle;
1767 else if (pLookup->constLookup.accessType == IAT_PVALUE)
1769 pIndirection = pLookup->constLookup.addr;
1771 return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1773 else if (compIsForInlining())
1775 // Don't import runtime lookups when inlining
1776 // Inlining has to be aborted in such a case
1777 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1782 // Need to use dictionary-based access which depends on the typeContext
1783 // which is only available at runtime, not at compile-time.
1785 return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1789 #ifdef FEATURE_READYTORUN_COMPILER
1790 GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1791 unsigned handleFlags,
1792 void* compileTimeHandle)
1794 CORINFO_GENERIC_HANDLE handle = nullptr;
1795 void* pIndirection = nullptr;
1796 assert(pLookup->accessType != IAT_PPVALUE);
1798 if (pLookup->accessType == IAT_VALUE)
1800 handle = pLookup->handle;
1802 else if (pLookup->accessType == IAT_PVALUE)
1804 pIndirection = pLookup->addr;
1806 return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1809 GenTreeCall* Compiler::impReadyToRunHelperToTree(
1810 CORINFO_RESOLVED_TOKEN* pResolvedToken,
1811 CorInfoHelpFunc helper,
1813 GenTreeArgList* args /* =NULL*/,
1814 CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */)
1816 CORINFO_CONST_LOOKUP lookup;
1817 if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1822 GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args);
1824 op1->setEntryPoint(lookup);
1830 GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1832 GenTreePtr op1 = nullptr;
1834 switch (pCallInfo->kind)
1837 op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1839 #ifdef FEATURE_READYTORUN_COMPILER
1840 if (opts.IsReadyToRun())
1842 op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
1846 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1851 case CORINFO_CALL_CODE_POINTER:
1852 if (compIsForInlining())
1854 // Don't import runtime lookups when inlining
1855 // Inlining has to be aborted in such a case
1856 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1860 op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1864 noway_assert(!"unknown call kind");
1871 //------------------------------------------------------------------------
1872 // getRuntimeContextTree: find pointer to context for runtime lookup.
1875 // kind - lookup kind.
1878 // Return GenTree pointer to generic shared context.
1881 // Reports about generic context using.
1883 GenTreePtr Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1885 GenTreePtr ctxTree = nullptr;
1887 // Collectible types requires that for shared generic code, if we use the generic context parameter
1888 // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1889 // context parameter is this that we don't need the eager reporting logic.)
1890 lvaGenericsContextUseCount++;
1892 if (kind == CORINFO_LOOKUP_THISOBJ)
1895 ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1897 // Vtable pointer of this object
1898 ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1899 ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1900 ctxTree->gtFlags |= GTF_IND_INVARIANT;
1904 assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1906 ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1911 /*****************************************************************************/
1912 /* Import a dictionary lookup to access a handle in code shared between
1913 generic instantiations.
1914 The lookup depends on the typeContext which is only available at
1915 runtime, and not at compile-time.
1916 pLookup->token1 and pLookup->token2 specify the handle that is needed.
1919 1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1920 instantiation-specific handle, and the tokens to lookup the handle.
1921 2. pLookup->indirections != CORINFO_USEHELPER :
1922 2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1924 2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1925 If it is non-NULL, it is the handle required. Else, call a helper
1926 to lookup the handle.
1929 GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1930 CORINFO_LOOKUP* pLookup,
1931 void* compileTimeHandle)
1934 // This method can only be called from the importer instance of the Compiler.
1935 // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1936 assert(!compIsForInlining());
1938 GenTreePtr ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1940 CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1941 // It's available only via the run-time helper function
1942 if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1944 #ifdef FEATURE_READYTORUN_COMPILER
1945 if (opts.IsReadyToRun())
1947 return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1948 gtNewArgList(ctxTree), &pLookup->lookupKind);
1952 GenTreeArgList* helperArgs =
1953 gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0,
1954 nullptr, compileTimeHandle));
1956 return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
1960 GenTreePtr slotPtrTree = ctxTree;
1962 if (pRuntimeLookup->testForNull)
1964 slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
1965 nullptr DEBUGARG("impRuntimeLookup slot"));
1968 GenTreePtr indOffTree = nullptr;
1970 // Applied repeated indirections
1971 for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
1973 if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
1975 indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
1976 nullptr DEBUGARG("impRuntimeLookup indirectOffset"));
1981 slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
1982 slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
1983 slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
1986 if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
1988 slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree);
1991 if (pRuntimeLookup->offsets[i] != 0)
1994 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
1998 // No null test required
1999 if (!pRuntimeLookup->testForNull)
2001 if (pRuntimeLookup->indirections == 0)
2006 slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2007 slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2009 if (!pRuntimeLookup->testForFixup)
2014 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2016 unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test"));
2017 impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtOffs);
2019 GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2020 // downcast the pointer to a TYP_INT on 64-bit targets
2021 slot = impImplicitIorI4Cast(slot, TYP_INT);
2022 // Use a GT_AND to check for the lowest bit and indirect if it is set
2023 GenTree* test = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1));
2024 GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0));
2025 relop->gtFlags |= GTF_RELOP_QMARK;
2027 // slot = GT_IND(slot - 1)
2028 slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2029 GenTree* add = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL));
2030 GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add);
2031 slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2032 GenTree* asg = gtNewAssignNode(slot, indir);
2034 GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg);
2035 GenTree* qmark = gtNewQmarkNode(TYP_VOID, relop, colon);
2036 impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2038 return gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2041 assert(pRuntimeLookup->indirections != 0);
2043 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2045 // Extract the handle
2046 GenTreePtr handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2047 handle->gtFlags |= GTF_IND_NONFAULTING;
2049 GenTreePtr handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2050 nullptr DEBUGARG("impRuntimeLookup typehandle"));
2053 GenTreeArgList* helperArgs =
2054 gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0, nullptr,
2055 compileTimeHandle));
2056 GenTreePtr helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
2058 // Check for null and possibly call helper
2059 GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2060 relop->gtFlags |= GTF_RELOP_QMARK;
2062 GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2063 gtNewNothingNode(), // do nothing if nonnull
2066 GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2069 if (handleCopy->IsLocal())
2071 tmp = handleCopy->gtLclVarCommon.gtLclNum;
2075 tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2078 impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2079 return gtNewLclvNode(tmp, TYP_I_IMPL);
2082 /******************************************************************************
2083 * Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2084 * If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2085 * else, grab a new temp.
2086 * For structs (which can be pushed on the stack using obj, etc),
2087 * special handling is needed
2090 struct RecursiveGuard
2095 m_pAddress = nullptr;
2102 *m_pAddress = false;
2106 void Init(bool* pAddress, bool bInitialize)
2108 assert(pAddress && *pAddress == false && "Recursive guard violation");
2109 m_pAddress = pAddress;
2121 bool Compiler::impSpillStackEntry(unsigned level,
2125 bool bAssertOnRecursion,
2132 RecursiveGuard guard;
2133 guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2136 GenTreePtr tree = verCurrentState.esStack[level].val;
2138 /* Allocate a temp if we haven't been asked to use a particular one */
2140 if (tiVerificationNeeded)
2142 // Ignore bad temp requests (they will happen with bad code and will be
2143 // catched when importing the destblock)
2144 if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2151 if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2157 bool isNewTemp = false;
2159 if (tnum == BAD_VAR_NUM)
2161 tnum = lvaGrabTemp(true DEBUGARG(reason));
2164 else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2166 // if verification is needed and tnum's type is incompatible with
2167 // type on that stack, we grab a new temp. This is safe since
2168 // we will throw a verification exception in the dest block.
2170 var_types valTyp = tree->TypeGet();
2171 var_types dstTyp = lvaTable[tnum].TypeGet();
2173 // if the two types are different, we return. This will only happen with bad code and will
2174 // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2175 if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2177 #ifndef _TARGET_64BIT_
2178 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2179 #endif // !_TARGET_64BIT_
2180 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2182 if (verNeedsVerification())
2189 /* Assign the spilled entry to the temp */
2190 impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2192 // If temp is newly introduced and a ref type, grab what type info we can.
2193 if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF))
2195 CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle();
2196 lvaSetClass(tnum, tree, stkHnd);
2199 // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2200 var_types type = genActualType(lvaTable[tnum].TypeGet());
2201 GenTreePtr temp = gtNewLclvNode(tnum, type);
2202 verCurrentState.esStack[level].val = temp;
2207 /*****************************************************************************
2209 * Ensure that the stack has only spilled values
2212 void Compiler::impSpillStackEnsure(bool spillLeaves)
2214 assert(!spillLeaves || opts.compDbgCode);
2216 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2218 GenTreePtr tree = verCurrentState.esStack[level].val;
2220 if (!spillLeaves && tree->OperIsLeaf())
2225 // Temps introduced by the importer itself don't need to be spilled
2227 bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2234 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2238 void Compiler::impSpillEvalStack()
2240 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2242 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2246 /*****************************************************************************
2248 * If the stack contains any trees with side effects in them, assign those
2249 * trees to temps and append the assignments to the statement list.
2250 * On return the stack is guaranteed to be empty.
2253 inline void Compiler::impEvalSideEffects()
2255 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2256 verCurrentState.esStackDepth = 0;
2259 /*****************************************************************************
2261 * If the stack contains any trees with side effects in them, assign those
2262 * trees to temps and replace them on the stack with refs to their temps.
2263 * [0..chkLevel) is the portion of the stack which will be checked and spilled.
2266 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2268 assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2270 /* Before we make any appends to the tree list we must spill the
2271 * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2273 impSpillSpecialSideEff();
2275 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2277 chkLevel = verCurrentState.esStackDepth;
2280 assert(chkLevel <= verCurrentState.esStackDepth);
2282 unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2284 for (unsigned i = 0; i < chkLevel; i++)
2286 GenTreePtr tree = verCurrentState.esStack[i].val;
2288 GenTreePtr lclVarTree;
2290 if ((tree->gtFlags & spillFlags) != 0 ||
2291 (spillGlobEffects && // Only consider the following when spillGlobEffects == TRUE
2292 !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2293 gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2294 // lvAddrTaken flag.
2296 impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2301 /*****************************************************************************
2303 * If the stack contains any trees with special side effects in them, assign
2304 * those trees to temps and replace them on the stack with refs to their temps.
2307 inline void Compiler::impSpillSpecialSideEff()
2309 // Only exception objects need to be carefully handled
2311 if (!compCurBB->bbCatchTyp)
2316 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2318 GenTreePtr tree = verCurrentState.esStack[level].val;
2319 // Make sure if we have an exception object in the sub tree we spill ourselves.
2320 if (gtHasCatchArg(tree))
2322 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2327 /*****************************************************************************
2329 * Spill all stack references to value classes (TYP_STRUCT nodes)
2332 void Compiler::impSpillValueClasses()
2334 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2336 GenTreePtr tree = verCurrentState.esStack[level].val;
2338 if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2340 // Tree walk was aborted, which means that we found a
2341 // value class on the stack. Need to spill that
2344 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2349 /*****************************************************************************
2351 * Callback that checks if a tree node is TYP_STRUCT
2354 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr* pTree, fgWalkData* data)
2356 fgWalkResult walkResult = WALK_CONTINUE;
2358 if ((*pTree)->gtType == TYP_STRUCT)
2360 // Abort the walk and indicate that we found a value class
2362 walkResult = WALK_ABORT;
2368 /*****************************************************************************
2370 * If the stack contains any trees with references to local #lclNum, assign
2371 * those trees to temps and replace their place on the stack with refs to
2375 void Compiler::impSpillLclRefs(ssize_t lclNum)
2377 /* Before we make any appends to the tree list we must spill the
2378 * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2380 impSpillSpecialSideEff();
2382 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2384 GenTreePtr tree = verCurrentState.esStack[level].val;
2386 /* If the tree may throw an exception, and the block has a handler,
2387 then we need to spill assignments to the local if the local is
2388 live on entry to the handler.
2389 Just spill 'em all without considering the liveness */
2391 bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2393 /* Skip the tree if it doesn't have an affected reference,
2394 unless xcptnCaught */
2396 if (xcptnCaught || gtHasRef(tree, lclNum, false))
2398 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2403 /*****************************************************************************
2405 * Push catch arg onto the stack.
2406 * If there are jumps to the beginning of the handler, insert basic block
2407 * and spill catch arg to a temp. Update the handler block if necessary.
2409 * Returns the basic block of the actual handler.
2412 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter)
2414 // Do not inject the basic block twice on reimport. This should be
2415 // hit only under JIT stress. See if the block is the one we injected.
2416 // Note that EH canonicalization can inject internal blocks here. We might
2417 // be able to re-use such a block (but we don't, right now).
2418 if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2419 (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2421 GenTreePtr tree = hndBlk->bbTreeList;
2423 if (tree != nullptr && tree->gtOper == GT_STMT)
2425 tree = tree->gtStmt.gtStmtExpr;
2426 assert(tree != nullptr);
2428 if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2429 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2431 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2433 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2435 return hndBlk->bbNext;
2439 // If we get here, it must have been some other kind of internal block. It's possible that
2440 // someone prepended something to our injected block, but that's unlikely.
2443 /* Push the exception address value on the stack */
2444 GenTreePtr arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2446 /* Mark the node as having a side-effect - i.e. cannot be
2447 * moved around since it is tied to a fixed location (EAX) */
2448 arg->gtFlags |= GTF_ORDER_SIDEEFF;
2450 #if defined(JIT32_GCENCODER)
2451 const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5);
2453 const bool forceInsertNewBlock = compStressCompile(STRESS_CATCH_ARG, 5);
2454 #endif // defined(JIT32_GCENCODER)
2456 /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2457 if (hndBlk->bbRefs > 1 || forceInsertNewBlock)
2459 if (hndBlk->bbRefs == 1)
2464 /* Create extra basic block for the spill */
2465 BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2466 newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2467 newBlk->setBBWeight(hndBlk->bbWeight);
2468 newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2470 /* Account for the new link we are about to create */
2473 /* Spill into a temp */
2474 unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2475 lvaTable[tempNum].lvType = TYP_REF;
2476 arg = gtNewTempAssign(tempNum, arg);
2478 hndBlk->bbStkTempsIn = tempNum;
2480 /* Report the debug info. impImportBlockCode won't treat
2481 * the actual handler as exception block and thus won't do it for us. */
2482 if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2484 impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2485 arg = gtNewStmt(arg, impCurStmtOffs);
2488 fgInsertStmtAtEnd(newBlk, arg);
2490 arg = gtNewLclvNode(tempNum, TYP_REF);
2493 impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2498 /*****************************************************************************
2500 * Given a tree, clone it. *pClone is set to the cloned tree.
2501 * Returns the original tree if the cloning was easy,
2502 * else returns the temp to which the tree had to be spilled to.
2503 * If the tree has side-effects, it will be spilled to a temp.
2506 GenTreePtr Compiler::impCloneExpr(GenTreePtr tree,
2508 CORINFO_CLASS_HANDLE structHnd,
2510 GenTreePtr* pAfterStmt DEBUGARG(const char* reason))
2512 if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2514 GenTreePtr clone = gtClone(tree, true);
2523 /* Store the operand in a temp and return the temp */
2525 unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2527 // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2528 // return a struct type. It also may modify the struct type to a more
2529 // specialized type (e.g. a SIMD type). So we will get the type from
2530 // the lclVar AFTER calling impAssignTempGen().
2532 impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2533 var_types type = genActualType(lvaTable[temp].TypeGet());
2535 *pClone = gtNewLclvNode(temp, type);
2536 return gtNewLclvNode(temp, type);
2539 /*****************************************************************************
2540 * Remember the IL offset (including stack-empty info) for the trees we will
2544 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2546 if (compIsForInlining())
2548 GenTreePtr callStmt = impInlineInfo->iciStmt;
2549 assert(callStmt->gtOper == GT_STMT);
2550 impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2554 assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2555 IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2556 impCurStmtOffs = offs | stkBit;
2560 /*****************************************************************************
2561 * Returns current IL offset with stack-empty and call-instruction info incorporated
2563 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2565 if (compIsForInlining())
2567 return BAD_IL_OFFSET;
2571 assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2572 IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2573 IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2574 return offs | stkBit | callInstructionBit;
2578 //------------------------------------------------------------------------
2579 // impCanSpillNow: check is it possible to spill all values from eeStack to local variables.
2582 // prevOpcode - last importer opcode
2585 // true if it is legal, false if it could be a sequence that we do not want to divide.
2586 bool Compiler::impCanSpillNow(OPCODE prevOpcode)
2588 // Don't spill after ldtoken, because it could be a part of the InitializeArray sequence.
2589 // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed.
2590 return prevOpcode != CEE_LDTOKEN;
2593 /*****************************************************************************
2595 * Remember the instr offset for the statements
2597 * When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2598 * impCurOpcOffs, if the append was done because of a partial stack spill,
2599 * as some of the trees corresponding to code up to impCurOpcOffs might
2600 * still be sitting on the stack.
2601 * So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2602 * This should be called when an opcode finally/explicitly causes
2603 * impAppendTree(tree) to be called (as opposed to being called because of
2604 * a spill caused by the opcode)
2609 void Compiler::impNoteLastILoffs()
2611 if (impLastILoffsStmt == nullptr)
2613 // We should have added a statement for the current basic block
2614 // Is this assert correct ?
2616 assert(impTreeLast);
2617 assert(impTreeLast->gtOper == GT_STMT);
2619 impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2623 impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2624 impLastILoffsStmt = nullptr;
2630 /*****************************************************************************
2631 * We don't create any GenTree (excluding spills) for a branch.
2632 * For debugging info, we need a placeholder so that we can note
2633 * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2636 void Compiler::impNoteBranchOffs()
2638 if (opts.compDbgCode)
2640 impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2644 /*****************************************************************************
2645 * Locate the next stmt boundary for which we need to record info.
2646 * We will have to spill the stack at such boundaries if it is not
2648 * Returns the next stmt boundary (after the start of the block)
2651 unsigned Compiler::impInitBlockLineInfo()
2653 /* Assume the block does not correspond with any IL offset. This prevents
2654 us from reporting extra offsets. Extra mappings can cause confusing
2655 stepping, especially if the extra mapping is a jump-target, and the
2656 debugger does not ignore extra mappings, but instead rewinds to the
2657 nearest known offset */
2659 impCurStmtOffsSet(BAD_IL_OFFSET);
2661 if (compIsForInlining())
2666 IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2668 if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2670 impCurStmtOffsSet(blockOffs);
2673 if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2675 impCurStmtOffsSet(blockOffs);
2678 /* Always report IL offset 0 or some tests get confused.
2679 Probably a good idea anyways */
2683 impCurStmtOffsSet(blockOffs);
2686 if (!info.compStmtOffsetsCount)
2691 /* Find the lowest explicit stmt boundary within the block */
2693 /* Start looking at an entry that is based on our instr offset */
2695 unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2697 if (index >= info.compStmtOffsetsCount)
2699 index = info.compStmtOffsetsCount - 1;
2702 /* If we've guessed too far, back up */
2704 while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2709 /* If we guessed short, advance ahead */
2711 while (info.compStmtOffsets[index] < blockOffs)
2715 if (index == info.compStmtOffsetsCount)
2717 return info.compStmtOffsetsCount;
2721 assert(index < info.compStmtOffsetsCount);
2723 if (info.compStmtOffsets[index] == blockOffs)
2725 /* There is an explicit boundary for the start of this basic block.
2726 So we will start with bbCodeOffs. Else we will wait until we
2727 get to the next explicit boundary */
2729 impCurStmtOffsSet(blockOffs);
2737 /*****************************************************************************/
2739 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2753 /*****************************************************************************/
2755 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2772 /*****************************************************************************/
2774 // One might think it is worth caching these values, but results indicate
2776 // In addition, caching them causes SuperPMI to be unable to completely
2777 // encapsulate an individual method context.
2778 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2780 CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2781 assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2785 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2787 CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2788 assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2789 return typeHandleClass;
2792 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2794 CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2795 assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2796 return argIteratorClass;
2799 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2801 CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2802 assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2806 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2808 CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2809 assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2813 /*****************************************************************************
2814 * "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2815 * set its type to TYP_BYREF when we create it. We know if it can be
2816 * changed to TYP_I_IMPL only at the point where we use it
2820 void Compiler::impBashVarAddrsToI(GenTreePtr tree1, GenTreePtr tree2)
2822 if (tree1->IsVarAddr())
2824 tree1->gtType = TYP_I_IMPL;
2827 if (tree2 && tree2->IsVarAddr())
2829 tree2->gtType = TYP_I_IMPL;
2833 /*****************************************************************************
2834 * TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2835 * to make that an explicit cast in our trees, so any implicit casts that
2836 * exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2837 * turned into explicit casts here.
2838 * We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2841 GenTreePtr Compiler::impImplicitIorI4Cast(GenTreePtr tree, var_types dstTyp)
2843 var_types currType = genActualType(tree->gtType);
2844 var_types wantedType = genActualType(dstTyp);
2846 if (wantedType != currType)
2848 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2849 if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2851 if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2853 tree->gtType = TYP_I_IMPL;
2856 #ifdef _TARGET_64BIT_
2857 else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2859 // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2860 tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
2862 else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2864 // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2865 tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
2867 #endif // _TARGET_64BIT_
2873 /*****************************************************************************
2874 * TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2875 * but we want to make that an explicit cast in our trees, so any implicit casts
2876 * that exist in the IL are turned into explicit casts here.
2879 GenTreePtr Compiler::impImplicitR4orR8Cast(GenTreePtr tree, var_types dstTyp)
2881 #ifndef LEGACY_BACKEND
2882 if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2884 tree = gtNewCastNode(dstTyp, tree, dstTyp);
2886 #endif // !LEGACY_BACKEND
2891 //------------------------------------------------------------------------
2892 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2893 // with a GT_COPYBLK node.
2896 // sig - The InitializeArray signature.
2899 // A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2900 // nullptr otherwise.
2903 // The function recognizes the following IL pattern:
2904 // ldc <length> or a list of ldc <lower bound>/<length>
2907 // ldtoken <field handle>
2908 // call InitializeArray
2909 // The lower bounds need not be constant except when the array rank is 1.
2910 // The function recognizes all kinds of arrays thus enabling a small runtime
2911 // such as CoreRT to skip providing an implementation for InitializeArray.
2913 GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2915 assert(sig->numArgs == 2);
2917 GenTreePtr fieldTokenNode = impStackTop(0).val;
2918 GenTreePtr arrayLocalNode = impStackTop(1).val;
2921 // Verify that the field token is known and valid. Note that It's also
2922 // possible for the token to come from reflection, in which case we cannot do
2923 // the optimization and must therefore revert to calling the helper. You can
2924 // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2927 // Check to see if the ldtoken helper call is what we see here.
2928 if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2929 (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2934 // Strip helper call away
2935 fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2937 if (fieldTokenNode->gtOper == GT_IND)
2939 fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2942 // Check for constant
2943 if (fieldTokenNode->gtOper != GT_CNS_INT)
2948 CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2949 if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2955 // We need to get the number of elements in the array and the size of each element.
2956 // We verify that the newarr statement is exactly what we expect it to be.
2957 // If it's not then we just return NULL and we don't optimize this call
2961 // It is possible the we don't have any statements in the block yet
2963 if (impTreeLast->gtOper != GT_STMT)
2965 assert(impTreeLast->gtOper == GT_BEG_STMTS);
2970 // We start by looking at the last statement, making sure it's an assignment, and
2971 // that the target of the assignment is the array passed to InitializeArray.
2973 GenTreePtr arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
2974 if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
2975 (arrayLocalNode->gtOper != GT_LCL_VAR) ||
2976 (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
2982 // Make sure that the object being assigned is a helper call.
2985 GenTreePtr newArrayCall = arrayAssignment->gtOp.gtOp2;
2986 if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
2992 // Verify that it is one of the new array helpers.
2995 bool isMDArray = false;
2997 if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
2998 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
2999 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
3000 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
3001 #ifdef FEATURE_READYTORUN_COMPILER
3002 && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_R2R_DIRECT) &&
3003 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
3007 if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3015 CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3018 // Make sure we found a compile time handle to the array
3027 S_UINT32 numElements;
3031 rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3038 GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3039 assert(tokenArg != nullptr);
3040 GenTreeArgList* numArgsArg = tokenArg->Rest();
3041 assert(numArgsArg != nullptr);
3042 GenTreeArgList* argsArg = numArgsArg->Rest();
3043 assert(argsArg != nullptr);
3046 // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3047 // so at least one length must be present and the rank can't exceed 32 so there can
3048 // be at most 64 arguments - 32 lengths and 32 lower bounds.
3051 if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3052 (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3057 unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3058 bool lowerBoundsSpecified;
3060 if (numArgs == rank * 2)
3062 lowerBoundsSpecified = true;
3064 else if (numArgs == rank)
3066 lowerBoundsSpecified = false;
3069 // If the rank is 1 and a lower bound isn't specified then the runtime creates
3070 // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3071 // we get a SDArray as well, see the for loop below.
3085 // The rank is known to be at least 1 so we can start with numElements being 1
3086 // to avoid the need to special case the first dimension.
3089 numElements = S_UINT32(1);
3093 static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3095 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3096 IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3099 static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3101 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3102 (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3103 IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3106 static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3108 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3109 (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3112 static bool IsComma(GenTree* tree)
3114 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3118 unsigned argIndex = 0;
3121 for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3123 if (lowerBoundsSpecified)
3126 // In general lower bounds can be ignored because they're not needed to
3127 // calculate the total number of elements. But for single dimensional arrays
3128 // we need to know if the lower bound is 0 because in this case the runtime
3129 // creates a SDArray and this affects the way the array data offset is calculated.
3134 GenTree* lowerBoundAssign = comma->gtGetOp1();
3135 assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3136 GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3138 if (lowerBoundNode->IsIntegralConst(0))
3144 comma = comma->gtGetOp2();
3148 GenTree* lengthNodeAssign = comma->gtGetOp1();
3149 assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3150 GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3152 if (!lengthNode->IsCnsIntOrI())
3157 numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3161 assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3163 if (argIndex != numArgs)
3171 // Make sure there are exactly two arguments: the array class and
3172 // the number of elements.
3175 GenTreePtr arrayLengthNode;
3177 GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3178 #ifdef FEATURE_READYTORUN_COMPILER
3179 if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3181 // Array length is 1st argument for readytorun helper
3182 arrayLengthNode = args->Current();
3187 // Array length is 2nd argument for regular helper
3188 arrayLengthNode = args->Rest()->Current();
3192 // Make sure that the number of elements look valid.
3194 if (arrayLengthNode->gtOper != GT_CNS_INT)
3199 numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3201 if (!info.compCompHnd->isSDArray(arrayClsHnd))
3207 CORINFO_CLASS_HANDLE elemClsHnd;
3208 var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3211 // Note that genTypeSize will return zero for non primitive types, which is exactly
3212 // what we want (size will then be 0, and we will catch this in the conditional below).
3213 // Note that we don't expect this to fail for valid binaries, so we assert in the
3214 // non-verification case (the verification case should not assert but rather correctly
3215 // handle bad binaries). This assert is not guarding any specific invariant, but rather
3216 // saying that we don't expect this to happen, and if it is hit, we need to investigate
3220 S_UINT32 elemSize(genTypeSize(elementType));
3221 S_UINT32 size = elemSize * S_UINT32(numElements);
3223 if (size.IsOverflow())
3228 if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3230 assert(verNeedsVerification());
3234 void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3241 // At this point we are ready to commit to implementing the InitializeArray
3242 // intrinsic using a struct assignment. Pop the arguments from the stack and
3243 // return the struct assignment node.
3249 const unsigned blkSize = size.Value();
3250 unsigned dataOffset;
3254 dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3258 dataOffset = eeGetArrayDataOffset(elementType);
3261 GenTreePtr dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3262 GenTreePtr blk = gtNewBlockVal(dst, blkSize);
3263 GenTreePtr srcAddr = gtNewIconHandleNode((size_t)initData, GTF_ICON_STATIC_HDL);
3264 GenTreePtr src = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
3266 return gtNewBlkOpNode(blk, // dst
3273 /*****************************************************************************/
3274 // Returns the GenTree that should be used to do the intrinsic instead of the call.
3275 // Returns NULL if an intrinsic cannot be used
3277 GenTreePtr Compiler::impIntrinsic(GenTreePtr newobjThis,
3278 CORINFO_CLASS_HANDLE clsHnd,
3279 CORINFO_METHOD_HANDLE method,
3280 CORINFO_SIG_INFO* sig,
3284 bool isJitIntrinsic,
3285 CorInfoIntrinsics* pIntrinsicID)
3287 bool mustExpand = false;
3288 CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3289 *pIntrinsicID = intrinsicID;
3291 // Jit intrinsics are always optional to expand, and won't have an
3295 assert(!mustExpand);
3296 assert(intrinsicID == CORINFO_INTRINSIC_Illegal);
3299 #ifndef _TARGET_ARM_
3300 genTreeOps interlockedOperator;
3303 if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3305 // must be done regardless of DbgCode and MinOpts
3306 return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3308 #ifdef _TARGET_64BIT_
3309 if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3311 // must be done regardless of DbgCode and MinOpts
3312 return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3315 assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3318 GenTreePtr retNode = nullptr;
3321 // We disable the inlining of instrinsics for MinOpts.
3323 if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3325 *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3329 // Currently we don't have CORINFO_INTRINSIC_Exp because it does not
3330 // seem to work properly for Infinity values, we don't do
3331 // CORINFO_INTRINSIC_Pow because it needs a Helper which we currently don't have
3333 var_types callType = JITtype2varType(sig->retType);
3335 /* First do the intrinsics which are always smaller than a call */
3337 switch (intrinsicID)
3339 GenTreePtr op1, op2;
3341 case CORINFO_INTRINSIC_Sin:
3342 case CORINFO_INTRINSIC_Sqrt:
3343 case CORINFO_INTRINSIC_Abs:
3344 case CORINFO_INTRINSIC_Cos:
3345 case CORINFO_INTRINSIC_Round:
3346 case CORINFO_INTRINSIC_Cosh:
3347 case CORINFO_INTRINSIC_Sinh:
3348 case CORINFO_INTRINSIC_Tan:
3349 case CORINFO_INTRINSIC_Tanh:
3350 case CORINFO_INTRINSIC_Asin:
3351 case CORINFO_INTRINSIC_Acos:
3352 case CORINFO_INTRINSIC_Atan:
3353 case CORINFO_INTRINSIC_Atan2:
3354 case CORINFO_INTRINSIC_Log10:
3355 case CORINFO_INTRINSIC_Pow:
3356 case CORINFO_INTRINSIC_Exp:
3357 case CORINFO_INTRINSIC_Ceiling:
3358 case CORINFO_INTRINSIC_Floor:
3360 // These are math intrinsics
3362 assert(callType != TYP_STRUCT);
3366 #if defined(LEGACY_BACKEND)
3367 if (IsTargetIntrinsic(intrinsicID))
3368 #elif !defined(_TARGET_X86_)
3369 // Intrinsics that are not implemented directly by target instructions will
3370 // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3371 // don't do this optimization, because
3372 // a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3373 // b) It will be non-trivial task or too late to re-materialize a surviving
3374 // tail prefixed GT_INTRINSIC as tail call in rationalizer.
3375 if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3377 // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3378 // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3379 // code generation for certain EH constructs.
3380 if (!IsIntrinsicImplementedByUserCall(intrinsicID))
3383 switch (sig->numArgs)
3386 op1 = impPopStack().val;
3388 #if FEATURE_X87_DOUBLES
3390 // X87 stack doesn't differentiate between float/double
3391 // so it doesn't need a cast, but everybody else does
3392 // Just double check it is at least a FP type
3393 noway_assert(varTypeIsFloating(op1));
3395 #else // FEATURE_X87_DOUBLES
3397 if (op1->TypeGet() != callType)
3399 op1 = gtNewCastNode(callType, op1, callType);
3402 #endif // FEATURE_X87_DOUBLES
3404 op1 = new (this, GT_INTRINSIC)
3405 GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3409 op2 = impPopStack().val;
3410 op1 = impPopStack().val;
3412 #if FEATURE_X87_DOUBLES
3414 // X87 stack doesn't differentiate between float/double
3415 // so it doesn't need a cast, but everybody else does
3416 // Just double check it is at least a FP type
3417 noway_assert(varTypeIsFloating(op2));
3418 noway_assert(varTypeIsFloating(op1));
3420 #else // FEATURE_X87_DOUBLES
3422 if (op2->TypeGet() != callType)
3424 op2 = gtNewCastNode(callType, op2, callType);
3426 if (op1->TypeGet() != callType)
3428 op1 = gtNewCastNode(callType, op1, callType);
3431 #endif // FEATURE_X87_DOUBLES
3433 op1 = new (this, GT_INTRINSIC)
3434 GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
3438 NO_WAY("Unsupported number of args for Math Instrinsic");
3441 #ifndef LEGACY_BACKEND
3442 if (IsIntrinsicImplementedByUserCall(intrinsicID))
3444 op1->gtFlags |= GTF_CALL;
3452 #ifdef _TARGET_XARCH_
3453 // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3454 case CORINFO_INTRINSIC_InterlockedAdd32:
3455 interlockedOperator = GT_LOCKADD;
3456 goto InterlockedBinOpCommon;
3457 case CORINFO_INTRINSIC_InterlockedXAdd32:
3458 interlockedOperator = GT_XADD;
3459 goto InterlockedBinOpCommon;
3460 case CORINFO_INTRINSIC_InterlockedXchg32:
3461 interlockedOperator = GT_XCHG;
3462 goto InterlockedBinOpCommon;
3464 #ifdef _TARGET_AMD64_
3465 case CORINFO_INTRINSIC_InterlockedAdd64:
3466 interlockedOperator = GT_LOCKADD;
3467 goto InterlockedBinOpCommon;
3468 case CORINFO_INTRINSIC_InterlockedXAdd64:
3469 interlockedOperator = GT_XADD;
3470 goto InterlockedBinOpCommon;
3471 case CORINFO_INTRINSIC_InterlockedXchg64:
3472 interlockedOperator = GT_XCHG;
3473 goto InterlockedBinOpCommon;
3474 #endif // _TARGET_AMD64_
3476 InterlockedBinOpCommon:
3477 assert(callType != TYP_STRUCT);
3478 assert(sig->numArgs == 2);
3480 op2 = impPopStack().val;
3481 op1 = impPopStack().val;
3487 // field (for example)
3489 // In the case where the first argument is the address of a local, we might
3490 // want to make this *not* make the var address-taken -- but atomic instructions
3491 // on a local are probably pretty useless anyway, so we probably don't care.
3493 op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3494 op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3497 #endif // _TARGET_XARCH_
3499 case CORINFO_INTRINSIC_MemoryBarrier:
3501 assert(sig->numArgs == 0);
3503 op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3504 op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3508 #ifdef _TARGET_XARCH_
3509 // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3510 case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3511 #ifdef _TARGET_AMD64_
3512 case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3515 assert(callType != TYP_STRUCT);
3516 assert(sig->numArgs == 3);
3519 op3 = impPopStack().val; // comparand
3520 op2 = impPopStack().val; // value
3521 op1 = impPopStack().val; // location
3523 GenTreePtr node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3525 node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3531 case CORINFO_INTRINSIC_StringLength:
3532 op1 = impPopStack().val;
3533 if (!opts.MinOpts() && !opts.compDbgCode)
3535 GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3540 /* Create the expression "*(str_addr + stringLengthOffset)" */
3541 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3542 gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3543 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3546 // Getting the length of a null string should throw
3547 op1->gtFlags |= GTF_EXCEPT;
3552 case CORINFO_INTRINSIC_StringGetChar:
3553 op2 = impPopStack().val;
3554 op1 = impPopStack().val;
3555 op1 = gtNewIndexRef(TYP_CHAR, op1, op2);
3556 op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3560 case CORINFO_INTRINSIC_InitializeArray:
3561 retNode = impInitializeArrayIntrinsic(sig);
3564 case CORINFO_INTRINSIC_Array_Address:
3565 case CORINFO_INTRINSIC_Array_Get:
3566 case CORINFO_INTRINSIC_Array_Set:
3567 retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3570 case CORINFO_INTRINSIC_GetTypeFromHandle:
3571 op1 = impStackTop(0).val;
3572 if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3573 gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
3575 op1 = impPopStack().val;
3576 // Change call to return RuntimeType directly.
3577 op1->gtType = TYP_REF;
3580 // Call the regular function.
3583 case CORINFO_INTRINSIC_RTH_GetValueInternal:
3584 op1 = impStackTop(0).val;
3585 if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3586 gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
3589 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3592 // TreeToGetNativeTypeHandle
3594 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3597 op1 = impPopStack().val;
3599 // Get native TypeHandle argument to old helper
3600 op1 = op1->gtCall.gtCallArgs;
3601 assert(op1->OperIsList());
3602 assert(op1->gtOp.gtOp2 == nullptr);
3603 op1 = op1->gtOp.gtOp1;
3606 // Call the regular function.
3609 #ifndef LEGACY_BACKEND
3610 case CORINFO_INTRINSIC_Object_GetType:
3612 op1 = impPopStack().val;
3613 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3615 // Set the CALL flag to indicate that the operator is implemented by a call.
3616 // Set also the EXCEPTION flag because the native implementation of
3617 // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3618 op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3622 // Implement ByReference Ctor. This wraps the assignment of the ref into a byref-like field
3623 // in a value type. The canonical example of this is Span<T>. In effect this is just a
3624 // substitution. The parameter byref will be assigned into the newly allocated object.
3625 case CORINFO_INTRINSIC_ByReference_Ctor:
3627 // Remove call to constructor and directly assign the byref passed
3628 // to the call to the first slot of the ByReference struct.
3629 op1 = impPopStack().val;
3630 GenTreePtr thisptr = newobjThis;
3631 CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3632 GenTreePtr field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0, false);
3633 GenTreePtr assign = gtNewAssignNode(field, op1);
3634 GenTreePtr byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3635 assert(byReferenceStruct != nullptr);
3636 impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3640 // Implement ptr value getter for ByReference struct.
3641 case CORINFO_INTRINSIC_ByReference_Value:
3643 op1 = impPopStack().val;
3644 CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3645 GenTreePtr field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0, false);
3649 case CORINFO_INTRINSIC_Span_GetItem:
3650 case CORINFO_INTRINSIC_ReadOnlySpan_GetItem:
3652 // Have index, stack pointer-to Span<T> s on the stack. Expand to:
3656 // BoundsCheck(index, s->_length)
3657 // s->_pointer + index * sizeof(T)
3659 // For ReadOnlySpan<T>
3661 // BoundsCheck(index, s->_length)
3662 // *(s->_pointer + index * sizeof(T))
3664 // Signature should show one class type parameter, which
3665 // we need to examine.
3666 assert(sig->sigInst.classInstCount == 1);
3667 CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0];
3668 const unsigned elemSize = info.compCompHnd->getClassSize(spanElemHnd);
3669 assert(elemSize > 0);
3671 const bool isReadOnly = (intrinsicID == CORINFO_INTRINSIC_ReadOnlySpan_GetItem);
3673 JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "",
3674 info.compCompHnd->getClassName(spanElemHnd), elemSize);
3676 GenTreePtr index = impPopStack().val;
3677 GenTreePtr ptrToSpan = impPopStack().val;
3678 GenTreePtr indexClone = nullptr;
3679 GenTreePtr ptrToSpanClone = nullptr;
3684 printf("with ptr-to-span\n");
3685 gtDispTree(ptrToSpan);
3686 printf("and index\n");
3689 #endif // defined(DEBUG)
3691 // We need to use both index and ptr-to-span twice, so clone or spill.
3692 index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3693 nullptr DEBUGARG("Span.get_Item index"));
3694 ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3695 nullptr DEBUGARG("Span.get_Item ptrToSpan"));
3698 CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(clsHnd, 1);
3699 const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd);
3700 GenTreePtr length = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset, false);
3701 GenTreePtr boundsCheck = new (this, GT_ARR_BOUNDS_CHECK)
3702 GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, index, length, SCK_RNGCHK_FAIL);
3705 GenTreePtr indexIntPtr = impImplicitIorI4Cast(indexClone, TYP_I_IMPL);
3706 GenTreePtr sizeofNode = gtNewIconNode(elemSize);
3707 GenTreePtr mulNode = gtNewOperNode(GT_MUL, TYP_I_IMPL, indexIntPtr, sizeofNode);
3708 CORINFO_FIELD_HANDLE ptrHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3709 const unsigned ptrOffset = info.compCompHnd->getFieldOffset(ptrHnd);
3710 GenTreePtr data = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset, false);
3711 GenTreePtr result = gtNewOperNode(GT_ADD, TYP_BYREF, data, mulNode);
3714 var_types resultType = JITtype2varType(sig->retType);
3718 result = gtNewOperNode(GT_IND, resultType, result);
3722 assert(resultType == result->TypeGet());
3725 retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result);
3730 case CORINFO_INTRINSIC_GetRawHandle:
3732 noway_assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it.
3733 CORINFO_RESOLVED_TOKEN resolvedToken;
3734 resolvedToken.tokenContext = MAKE_METHODCONTEXT(info.compMethodHnd);
3735 resolvedToken.tokenScope = info.compScopeHnd;
3736 resolvedToken.token = memberRef;
3737 resolvedToken.tokenType = CORINFO_TOKENKIND_Method;
3739 CORINFO_GENERICHANDLE_RESULT embedInfo;
3740 info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo);
3742 GenTreePtr rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef),
3743 embedInfo.compileTimeHandle);
3744 if (rawHandle == nullptr)
3749 noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL));
3751 unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle"));
3752 impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE);
3754 GenTreePtr lclVar = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL);
3755 GenTreePtr lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar);
3756 var_types resultType = JITtype2varType(sig->retType);
3757 retNode = gtNewOperNode(GT_IND, resultType, lclVarAddr);
3763 /* Unknown intrinsic */
3768 // Sample code showing how to use the new intrinsic mechansim.
3771 assert(retNode == nullptr);
3772 const char* className = nullptr;
3773 const char* namespaceName = nullptr;
3774 const char* methodName = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName);
3776 if ((namespaceName != nullptr) && strcmp(namespaceName, "System") == 0)
3778 if ((className != nullptr) && strcmp(className, "Enum") == 0)
3780 if ((methodName != nullptr) && strcmp(methodName, "HasFlag") == 0)
3782 // Todo: plug in the intrinsic expansion
3783 JITDUMP("Found Intrinsic call to Enum.HasFlag\n");
3792 if (retNode == nullptr)
3794 NO_WAY("JIT must expand the intrinsic!");
3801 /*****************************************************************************/
3803 GenTreePtr Compiler::impArrayAccessIntrinsic(
3804 CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
3806 /* If we are generating SMALL_CODE, we don't want to use intrinsics for
3807 the following, as it generates fatter code.
3810 if (compCodeOpt() == SMALL_CODE)
3815 /* These intrinsics generate fatter (but faster) code and are only
3816 done if we don't need SMALL_CODE */
3818 unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
3820 // The rank 1 case is special because it has to handle two array formats
3821 // we will simply not do that case
3822 if (rank > GT_ARR_MAX_RANK || rank <= 1)
3827 CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
3828 var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
3830 // For the ref case, we will only be able to inline if the types match
3831 // (verifier checks for this, we don't care for the nonverified case and the
3832 // type is final (so we don't need to do the cast)
3833 if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
3835 // Get the call site signature
3836 CORINFO_SIG_INFO LocalSig;
3837 eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
3838 assert(LocalSig.hasThis());
3840 CORINFO_CLASS_HANDLE actualElemClsHnd;
3842 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3844 // Fetch the last argument, the one that indicates the type we are setting.
3845 CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
3846 for (unsigned r = 0; r < rank; r++)
3848 argType = info.compCompHnd->getArgNext(argType);
3851 typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
3852 actualElemClsHnd = argInfo.GetClassHandle();
3856 assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
3858 // Fetch the return type
3859 typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
3860 assert(retInfo.IsByRef());
3861 actualElemClsHnd = retInfo.GetClassHandle();
3864 // if it's not final, we can't do the optimization
3865 if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
3871 unsigned arrayElemSize;
3872 if (elemType == TYP_STRUCT)
3874 assert(arrElemClsHnd);
3876 arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
3880 arrayElemSize = genTypeSize(elemType);
3883 if ((unsigned char)arrayElemSize != arrayElemSize)
3885 // arrayElemSize would be truncated as an unsigned char.
3886 // This means the array element is too large. Don't do the optimization.
3890 GenTreePtr val = nullptr;
3892 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3894 // Assignment of a struct is more work, and there are more gets than sets.
3895 if (elemType == TYP_STRUCT)
3900 val = impPopStack().val;
3901 assert(genActualType(elemType) == genActualType(val->gtType) ||
3902 (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
3903 (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
3904 (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
3907 noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
3909 GenTreePtr inds[GT_ARR_MAX_RANK];
3910 for (unsigned k = rank; k > 0; k--)
3912 inds[k - 1] = impPopStack().val;
3915 GenTreePtr arr = impPopStack().val;
3916 assert(arr->gtType == TYP_REF);
3918 GenTreePtr arrElem =
3919 new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
3920 static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
3922 if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
3924 arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
3927 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3929 assert(val != nullptr);
3930 return gtNewAssignNode(arrElem, val);
3938 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
3942 // do some basic checks first
3943 if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
3948 if (verCurrentState.esStackDepth > 0)
3950 // merge stack types
3951 StackEntry* parentStack = block->bbStackOnEntry();
3952 StackEntry* childStack = verCurrentState.esStack;
3954 for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
3956 if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
3963 // merge initialization status of this ptr
3965 if (verTrackObjCtorInitState)
3967 // If we're tracking the CtorInitState, then it must not be unknown in the current state.
3968 assert(verCurrentState.thisInitialized != TIS_Bottom);
3970 // If the successor block's thisInit state is unknown, copy it from the current state.
3971 if (block->bbThisOnEntry() == TIS_Bottom)
3974 verSetThisInit(block, verCurrentState.thisInitialized);
3976 else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
3978 if (block->bbThisOnEntry() != TIS_Top)
3981 verSetThisInit(block, TIS_Top);
3983 if (block->bbFlags & BBF_FAILED_VERIFICATION)
3985 // The block is bad. Control can flow through the block to any handler that catches the
3986 // verification exception, but the importer ignores bad blocks and therefore won't model
3987 // this flow in the normal way. To complete the merge into the bad block, the new state
3988 // needs to be manually pushed to the handlers that may be reached after the verification
3989 // exception occurs.
3991 // Usually, the new state was already propagated to the relevant handlers while processing
3992 // the predecessors of the bad block. The exception is when the bad block is at the start
3993 // of a try region, meaning it is protected by additional handlers that do not protect its
3996 if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
3998 // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
3999 // recursive calls back into this code path (if successors of the current bad block are
4000 // also bad blocks).
4002 ThisInitState origTIS = verCurrentState.thisInitialized;
4003 verCurrentState.thisInitialized = TIS_Top;
4004 impVerifyEHBlock(block, true);
4005 verCurrentState.thisInitialized = origTIS;
4013 assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
4019 /*****************************************************************************
4020 * 'logMsg' is true if a log message needs to be logged. false if the caller has
4021 * already logged it (presumably in a more detailed fashion than done here)
4022 * 'bVerificationException' is true for a verification exception, false for a
4023 * "call unauthorized by host" exception.
4026 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
4028 block->bbJumpKind = BBJ_THROW;
4029 block->bbFlags |= BBF_FAILED_VERIFICATION;
4031 impCurStmtOffsSet(block->bbCodeOffs);
4034 // we need this since BeginTreeList asserts otherwise
4035 impTreeList = impTreeLast = nullptr;
4036 block->bbFlags &= ~BBF_IMPORTED;
4040 JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
4041 block->bbCodeOffs, block->bbCodeOffsEnd));
4044 printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
4048 if (JitConfig.DebugBreakOnVerificationFailure())
4056 // if the stack is non-empty evaluate all the side-effects
4057 if (verCurrentState.esStackDepth > 0)
4059 impEvalSideEffects();
4061 assert(verCurrentState.esStackDepth == 0);
4064 gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
4065 // verCurrentState.esStackDepth = 0;
4066 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
4068 // The inliner is not able to handle methods that require throw block, so
4069 // make sure this methods never gets inlined.
4070 info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
4073 /*****************************************************************************
4076 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
4079 // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
4080 // slightly different mechanism in which it calls the JIT to perform IL verification:
4081 // in the case of transparent methods the VM calls for a predicate IsVerifiable()
4082 // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
4083 // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
4084 // it bubble up until reported by the runtime. Currently in RyuJIT, this method doesn't bubble
4085 // up the exception, instead it embeds a throw inside the offending basic block and lets this
4086 // to fail upon runtime of the jitted method.
4088 // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
4089 // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
4090 // just try to find out whether to fail this method before even actually jitting it. So, in case
4091 // we detect these two conditions, instead of generating a throw statement inside the offending
4092 // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
4093 // to return false and make RyuJIT behave the same way JIT64 does.
4095 // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
4096 // RyuJIT for the time being until we completely replace JIT64.
4097 // TODO-ARM64-Cleanup: We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
4099 // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
4100 // exception if we are only importing and verifying. The method verNeedsVerification() can also modify the
4101 // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
4102 // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
4103 // be turned off during importation).
4104 CLANG_FORMAT_COMMENT_ANCHOR;
4106 #ifdef _TARGET_64BIT_
4109 bool canSkipVerificationResult =
4110 info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
4111 assert(tiVerificationNeeded || canSkipVerificationResult);
4114 // Add the non verifiable flag to the compiler
4115 if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
4117 tiIsVerifiableCode = FALSE;
4119 #endif //_TARGET_64BIT_
4120 verResetCurrentState(block, &verCurrentState);
4121 verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
4124 impNoteLastILoffs(); // Remember at which BC offset the tree was finished
4128 /******************************************************************************/
4129 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
4131 assert(ciType < CORINFO_TYPE_COUNT);
4136 case CORINFO_TYPE_STRING:
4137 case CORINFO_TYPE_CLASS:
4138 tiResult = verMakeTypeInfo(clsHnd);
4139 if (!tiResult.IsType(TI_REF))
4140 { // type must be consistent with element type
4145 #ifdef _TARGET_64BIT_
4146 case CORINFO_TYPE_NATIVEINT:
4147 case CORINFO_TYPE_NATIVEUINT:
4150 // If we have more precise information, use it
4151 return verMakeTypeInfo(clsHnd);
4155 return typeInfo::nativeInt();
4158 #endif // _TARGET_64BIT_
4160 case CORINFO_TYPE_VALUECLASS:
4161 case CORINFO_TYPE_REFANY:
4162 tiResult = verMakeTypeInfo(clsHnd);
4163 // type must be constant with element type;
4164 if (!tiResult.IsValueClass())
4169 case CORINFO_TYPE_VAR:
4170 return verMakeTypeInfo(clsHnd);
4172 case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4173 case CORINFO_TYPE_VOID:
4177 case CORINFO_TYPE_BYREF:
4179 CORINFO_CLASS_HANDLE childClassHandle;
4180 CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4181 return ByRef(verMakeTypeInfo(childType, childClassHandle));
4187 { // If we have more precise information, use it
4188 return typeInfo(TI_STRUCT, clsHnd);
4192 return typeInfo(JITtype2tiType(ciType));
4198 /******************************************************************************/
4200 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4202 if (clsHnd == nullptr)
4207 // Byrefs should only occur in method and local signatures, which are accessed
4208 // using ICorClassInfo and ICorClassInfo.getChildType.
4209 // So findClass() and getClassAttribs() should not be called for byrefs
4211 if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4213 assert(!"Did findClass() return a Byref?");
4217 unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4219 if (attribs & CORINFO_FLG_VALUECLASS)
4221 CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4223 // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4224 // not occur here, so we may want to change this to an assert instead.
4225 if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4230 #ifdef _TARGET_64BIT_
4231 if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4233 return typeInfo::nativeInt();
4235 #endif // _TARGET_64BIT_
4237 if (t != CORINFO_TYPE_UNDEF)
4239 return (typeInfo(JITtype2tiType(t)));
4241 else if (bashStructToRef)
4243 return (typeInfo(TI_REF, clsHnd));
4247 return (typeInfo(TI_STRUCT, clsHnd));
4250 else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4252 // See comment in _typeInfo.h for why we do it this way.
4253 return (typeInfo(TI_REF, clsHnd, true));
4257 return (typeInfo(TI_REF, clsHnd));
4261 /******************************************************************************/
4262 BOOL Compiler::verIsSDArray(typeInfo ti)
4264 if (ti.IsNullObjRef())
4265 { // nulls are SD arrays
4269 if (!ti.IsType(TI_REF))
4274 if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4281 /******************************************************************************/
4282 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4283 /* Returns an error type if anything goes wrong */
4285 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4287 assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4289 if (!verIsSDArray(arrayObjectType))
4294 CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4295 CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4297 return verMakeTypeInfo(ciType, childClassHandle);
4300 /*****************************************************************************
4302 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4304 CORINFO_CLASS_HANDLE classHandle;
4305 CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4307 var_types type = JITtype2varType(ciType);
4308 if (varTypeIsGC(type))
4310 // For efficiency, getArgType only returns something in classHandle for
4311 // value types. For other types that have addition type info, you
4312 // have to call back explicitly
4313 classHandle = info.compCompHnd->getArgClass(sig, args);
4316 NO_WAY("Could not figure out Class specified in argument or local signature");
4320 return verMakeTypeInfo(ciType, classHandle);
4323 /*****************************************************************************/
4325 // This does the expensive check to figure out whether the method
4326 // needs to be verified. It is called only when we fail verification,
4327 // just before throwing the verification exception.
4329 BOOL Compiler::verNeedsVerification()
4331 // If we have previously determined that verification is NOT needed
4332 // (for example in Compiler::compCompile), that means verification is really not needed.
4333 // Return the same decision we made before.
4334 // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4336 if (!tiVerificationNeeded)
4338 return tiVerificationNeeded;
4341 assert(tiVerificationNeeded);
4343 // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4344 // obtain the answer.
4345 CorInfoCanSkipVerificationResult canSkipVerificationResult =
4346 info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4348 // canSkipVerification will return one of the following three values:
4349 // CORINFO_VERIFICATION_CANNOT_SKIP = 0, // Cannot skip verification during jit time.
4350 // CORINFO_VERIFICATION_CAN_SKIP = 1, // Can skip verification during jit time.
4351 // CORINFO_VERIFICATION_RUNTIME_CHECK = 2, // Skip verification during jit time,
4352 // but need to insert a callout to the VM to ask during runtime
4353 // whether to skip verification or not.
4355 // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4356 if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4358 tiRuntimeCalloutNeeded = true;
4361 if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4363 // Dev10 706080 - Testers don't like the assert, so just silence it
4364 // by not using the macros that invoke debugAssert.
4368 // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4369 // The following line means we will NOT do jit time verification if canSkipVerification
4370 // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4371 tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4372 return tiVerificationNeeded;
4375 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4381 if (!ti.IsType(TI_STRUCT))
4385 return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4388 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4390 if (ti.IsPermanentHomeByRef())
4400 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4402 return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4403 || ti.IsUnboxedGenericTypeVar() ||
4404 (ti.IsType(TI_STRUCT) &&
4405 // exclude byreflike structs
4406 !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4409 // Is it a boxed value type?
4410 bool Compiler::verIsBoxedValueType(typeInfo ti)
4412 if (ti.GetType() == TI_REF)
4414 CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4415 return !!eeIsValueClass(clsHnd);
4423 /*****************************************************************************
4425 * Check if a TailCall is legal.
4428 bool Compiler::verCheckTailCallConstraint(
4430 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4431 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4432 bool speculative // If true, won't throw if verificatoin fails. Instead it will
4433 // return false to the caller.
4434 // If false, it will throw.
4438 CORINFO_SIG_INFO sig;
4439 unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
4440 // this counter is used to keep track of how many items have been
4443 CORINFO_METHOD_HANDLE methodHnd = nullptr;
4444 CORINFO_CLASS_HANDLE methodClassHnd = nullptr;
4445 unsigned methodClassFlgs = 0;
4447 assert(impOpcodeIsCallOpcode(opcode));
4449 if (compIsForInlining())
4454 // for calli, VerifyOrReturn that this is not a virtual method
4455 if (opcode == CEE_CALLI)
4457 /* Get the call sig */
4458 eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4460 // We don't know the target method, so we have to infer the flags, or
4461 // assume the worst-case.
4462 mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4466 methodHnd = pResolvedToken->hMethod;
4468 mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4470 // When verifying generic code we pair the method handle with its
4471 // owning class to get the exact method signature.
4472 methodClassHnd = pResolvedToken->hClass;
4473 assert(methodClassHnd);
4475 eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4477 // opcode specific check
4478 methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4481 // We must have got the methodClassHnd if opcode is not CEE_CALLI
4482 assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4484 if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4486 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4489 // check compatibility of the arguments
4490 unsigned int argCount;
4491 argCount = sig.numArgs;
4492 CORINFO_ARG_LIST_HANDLE args;
4496 typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4498 // check that the argument is not a byref for tailcalls
4499 VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4501 // For unsafe code, we might have parameters containing pointer to the stack location.
4502 // Disallow the tailcall for this kind.
4503 CORINFO_CLASS_HANDLE classHandle;
4504 CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4505 VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4507 args = info.compCompHnd->getArgNext(args);
4511 popCount += sig.numArgs;
4513 // check for 'this' which is on non-static methods, not called via NEWOBJ
4514 if (!(mflags & CORINFO_FLG_STATIC))
4516 // Always update the popCount.
4517 // This is crucial for the stack calculation to be correct.
4518 typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4521 if (opcode == CEE_CALLI)
4523 // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4525 if (tiThis.IsValueClass())
4529 VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4533 // Check type compatibility of the this argument
4534 typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4535 if (tiDeclaredThis.IsValueClass())
4537 tiDeclaredThis.MakeByRef();
4540 VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4544 // Tail calls on constrained calls should be illegal too:
4545 // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4546 VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4548 // Get the exact view of the signature for an array method
4549 if (sig.retType != CORINFO_TYPE_VOID)
4551 if (methodClassFlgs & CORINFO_FLG_ARRAY)
4553 assert(opcode != CEE_CALLI);
4554 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4558 typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4559 typeInfo tiCallerRetType =
4560 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4562 // void return type gets morphed into the error type, so we have to treat them specially here
4563 if (sig.retType == CORINFO_TYPE_VOID)
4565 VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4570 VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4571 NormaliseForStack(tiCallerRetType), true),
4572 "tailcall return mismatch", speculative);
4575 // for tailcall, stack must be empty
4576 VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4578 return true; // Yes, tailcall is legal
4581 /*****************************************************************************
4583 * Checks the IL verification rules for the call
4586 void Compiler::verVerifyCall(OPCODE opcode,
4587 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4588 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4591 const BYTE* delegateCreateStart,
4592 const BYTE* codeAddr,
4593 CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4596 CORINFO_SIG_INFO* sig = nullptr;
4597 unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
4598 // this counter is used to keep track of how many items have been
4601 // for calli, VerifyOrReturn that this is not a virtual method
4602 if (opcode == CEE_CALLI)
4604 Verify(false, "Calli not verifiable");
4608 //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4609 mflags = callInfo->verMethodFlags;
4611 sig = &callInfo->verSig;
4613 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4615 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4618 // opcode specific check
4619 unsigned methodClassFlgs = callInfo->classFlags;
4623 // cannot do callvirt on valuetypes
4624 VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4625 VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4630 assert(!tailCall); // Importer should not allow this
4631 VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4632 "newobj must be on instance");
4634 if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4636 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4637 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4638 typeInfo tiDeclaredFtn =
4639 verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4640 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4642 assert(popCount == 0);
4643 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4644 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4646 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4647 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4648 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4649 "delegate object type mismatch");
4651 CORINFO_CLASS_HANDLE objTypeHandle =
4652 tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4654 // the method signature must be compatible with the delegate's invoke method
4656 // check that for virtual functions, the type of the object used to get the
4657 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4658 // since this is a bit of work to determine in general, we pattern match stylized
4661 // the delegate creation code check, which used to be done later, is now done here
4662 // so we can read delegateMethodRef directly from
4663 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
4664 // we then use it in our call to isCompatibleDelegate().
4666 mdMemberRef delegateMethodRef = mdMemberRefNil;
4667 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
4668 "must create delegates with certain IL");
4670 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
4671 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
4672 delegateResolvedToken.tokenScope = info.compScopeHnd;
4673 delegateResolvedToken.token = delegateMethodRef;
4674 delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method;
4675 info.compCompHnd->resolveToken(&delegateResolvedToken);
4677 CORINFO_CALL_INFO delegateCallInfo;
4678 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
4679 addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
4681 BOOL isOpenDelegate = FALSE;
4682 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
4683 tiActualFtn.GetMethod(), pResolvedToken->hClass,
4685 "function incompatible with delegate");
4687 // check the constraints on the target method
4688 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
4689 "delegate target has unsatisfied class constraints");
4690 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
4691 tiActualFtn.GetMethod()),
4692 "delegate target has unsatisfied method constraints");
4694 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
4695 // for additional verification rules for delegates
4696 CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod();
4697 DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
4698 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4701 if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
4703 && StrictCheckForNonVirtualCallToVirtualMethod()
4707 if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4709 VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
4710 verIsBoxedValueType(tiActualObj),
4711 "The 'this' parameter to the call must be either the calling method's "
4712 "'this' parameter or "
4713 "a boxed value type.");
4718 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
4720 BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
4722 Verify(targetIsStatic || !isOpenDelegate,
4723 "Unverifiable creation of an open instance delegate for a protected member.");
4725 CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
4727 : tiActualObj.GetClassHandleForObjRef();
4729 // In the case of protected methods, it is a requirement that the 'this'
4730 // pointer be a subclass of the current context. Perform this check.
4731 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4732 "Accessing protected method through wrong type.");
4737 // fall thru to default checks
4739 VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
4741 VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
4742 "can only newobj a delegate constructor");
4744 // check compatibility of the arguments
4745 unsigned int argCount;
4746 argCount = sig->numArgs;
4747 CORINFO_ARG_LIST_HANDLE args;
4751 typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
4753 typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
4754 VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
4756 args = info.compCompHnd->getArgNext(args);
4762 popCount += sig->numArgs;
4764 // check for 'this' which are is non-static methods, not called via NEWOBJ
4765 CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
4766 if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
4768 typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4771 // If it is null, we assume we can access it (since it will AV shortly)
4772 // If it is anything but a reference class, there is no hierarchy, so
4773 // again, we don't need the precise instance class to compute 'protected' access
4774 if (tiThis.IsType(TI_REF))
4776 instanceClassHnd = tiThis.GetClassHandleForObjRef();
4779 // Check type compatibility of the this argument
4780 typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
4781 if (tiDeclaredThis.IsValueClass())
4783 tiDeclaredThis.MakeByRef();
4786 // If this is a call to the base class .ctor, set thisPtr Init for
4788 if (mflags & CORINFO_FLG_CONSTRUCTOR)
4790 if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
4791 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
4793 assert(verCurrentState.thisInitialized !=
4794 TIS_Bottom); // This should never be the case just from the logic of the verifier.
4795 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
4796 "Call to base class constructor when 'this' is possibly initialized");
4797 // Otherwise, 'this' is now initialized.
4798 verCurrentState.thisInitialized = TIS_Init;
4799 tiThis.SetInitialisedObjRef();
4803 // We allow direct calls to value type constructors
4804 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
4805 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
4806 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
4807 "Bad call to a constructor");
4811 if (pConstrainedResolvedToken != nullptr)
4813 VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
4815 typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
4817 // We just dereference this and test for equality
4818 tiThis.DereferenceByRef();
4819 VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
4820 "this type mismatch with constrained type operand");
4822 // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
4823 tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
4826 // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
4827 if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
4829 tiDeclaredThis.SetIsReadonlyByRef();
4832 VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
4834 if (tiThis.IsByRef())
4836 // Find the actual type where the method exists (as opposed to what is declared
4837 // in the metadata). This is to prevent passing a byref as the "this" argument
4838 // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
4840 CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
4841 VerifyOrReturn(eeIsValueClass(actualClassHnd),
4842 "Call to base type of valuetype (which is never a valuetype)");
4845 // Rules for non-virtual call to a non-final virtual method:
4848 // The "this" pointer is considered to be "possibly written" if
4849 // 1. Its address have been taken (LDARGA 0) anywhere in the method.
4851 // 2. It has been stored to (STARG.0) anywhere in the method.
4853 // A non-virtual call to a non-final virtual method is only allowed if
4854 // 1. The this pointer passed to the callee is an instance of a boxed value type.
4856 // 2. The this pointer passed to the callee is the current method's this pointer.
4857 // (and) The current method's this pointer is not "possibly written".
4859 // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
4860 // virtual methods. (Luckily this does affect .ctors, since they are not virtual).
4861 // This is stronger that is strictly needed, but implementing a laxer rule is significantly
4862 // hard and more error prone.
4864 if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
4866 && StrictCheckForNonVirtualCallToVirtualMethod()
4870 if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4873 tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
4874 "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
4875 "a boxed value type.");
4880 // check any constraints on the callee's class and type parameters
4881 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
4882 "method has unsatisfied class constraints");
4883 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
4884 "method has unsatisfied method constraints");
4886 if (mflags & CORINFO_FLG_PROTECTED)
4888 VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4889 "Can't access protected method");
4892 // Get the exact view of the signature for an array method
4893 if (sig->retType != CORINFO_TYPE_VOID)
4895 eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
4898 // "readonly." prefixed calls only allowed for the Address operation on arrays.
4899 // The methods supported by array types are under the control of the EE
4900 // so we can trust that only the Address operation returns a byref.
4903 typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
4904 VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
4905 "unexpected use of readonly prefix");
4908 // Verify the tailcall
4911 verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
4915 /*****************************************************************************
4916 * Checks that a delegate creation is done using the following pattern:
4918 * ldvirtftn targetMemberRef
4920 * ldftn targetMemberRef
4922 * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
4923 * not in this basic block)
4925 * targetMemberRef is read from the code sequence.
4926 * targetMemberRef is validated iff verificationNeeded.
4929 BOOL Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart,
4930 const BYTE* codeAddr,
4931 mdMemberRef& targetMemberRef)
4933 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4935 targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
4938 else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
4940 targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
4947 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
4949 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
4950 typeInfo ptrVal = verVerifyLDIND(tiTo, instrType);
4951 typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
4952 if (!tiCompatibleWith(value, normPtrVal, true))
4954 Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
4955 compUnsafeCastUsed = true;
4960 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
4962 assert(!instrType.IsStruct());
4967 ptrVal = DereferenceByRef(ptr);
4968 if (instrType.IsObjRef() && !ptrVal.IsObjRef())
4970 Verify(false, "bad pointer");
4971 compUnsafeCastUsed = true;
4973 else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
4975 Verify(false, "pointer not consistent with instr");
4976 compUnsafeCastUsed = true;
4981 Verify(false, "pointer not byref");
4982 compUnsafeCastUsed = true;
4988 // Verify that the field is used properly. 'tiThis' is NULL for statics,
4989 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
4990 // ld*flda or a st*fld.
4991 // 'enclosingClass' is given if we are accessing a field in some specific type.
4993 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken,
4994 const CORINFO_FIELD_INFO& fieldInfo,
4995 const typeInfo* tiThis,
4997 BOOL allowPlainStructAsThis)
4999 CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
5000 unsigned fieldFlags = fieldInfo.fieldFlags;
5001 CORINFO_CLASS_HANDLE instanceClass =
5002 info.compClassHnd; // for statics, we imagine the instance is the current class.
5004 bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
5007 Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
5008 if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
5010 Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
5011 info.compIsStatic == isStaticField,
5012 "bad use of initonly field (set or address taken)");
5016 if (tiThis == nullptr)
5018 Verify(isStaticField, "used static opcode with non-static field");
5022 typeInfo tThis = *tiThis;
5024 if (allowPlainStructAsThis && tThis.IsValueClass())
5029 // If it is null, we assume we can access it (since it will AV shortly)
5030 // If it is anything but a refernce class, there is no hierarchy, so
5031 // again, we don't need the precise instance class to compute 'protected' access
5032 if (tiThis->IsType(TI_REF))
5034 instanceClass = tiThis->GetClassHandleForObjRef();
5037 // Note that even if the field is static, we require that the this pointer
5038 // satisfy the same constraints as a non-static field This happens to
5039 // be simpler and seems reasonable
5040 typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
5041 if (tiDeclaredThis.IsValueClass())
5043 tiDeclaredThis.MakeByRef();
5045 // we allow read-only tThis, on any field access (even stores!), because if the
5046 // class implementor wants to prohibit stores he should make the field private.
5047 // we do this by setting the read-only bit on the type we compare tThis to.
5048 tiDeclaredThis.SetIsReadonlyByRef();
5050 else if (verTrackObjCtorInitState && tThis.IsThisPtr())
5052 // Any field access is legal on "uninitialized" this pointers.
5053 // The easiest way to implement this is to simply set the
5054 // initialized bit for the duration of the type check on the
5055 // field access only. It does not change the state of the "this"
5056 // for the function as a whole. Note that the "tThis" is a copy
5057 // of the original "this" type (*tiThis) passed in.
5058 tThis.SetInitialisedObjRef();
5061 Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
5064 // Presently the JIT does not check that we don't store or take the address of init-only fields
5065 // since we cannot guarantee their immutability and it is not a security issue.
5067 // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
5068 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
5069 "field has unsatisfied class constraints");
5070 if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
5072 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
5073 "Accessing protected method through wrong type.");
5077 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
5079 if (tiOp1.IsNumberType())
5081 #ifdef _TARGET_64BIT_
5082 Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
5083 #else // _TARGET_64BIT
5084 // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
5085 // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
5086 // but compatible, since we can coalesce native int with int32 (see section III.1.5).
5087 Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
5088 #endif // !_TARGET_64BIT_
5090 else if (tiOp1.IsObjRef())
5102 Verify(FALSE, "Cond not allowed on object types");
5104 Verify(tiOp2.IsObjRef(), "Cond type mismatch");
5106 else if (tiOp1.IsByRef())
5108 Verify(tiOp2.IsByRef(), "Cond type mismatch");
5112 Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
5116 void Compiler::verVerifyThisPtrInitialised()
5118 if (verTrackObjCtorInitState)
5120 Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
5124 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
5126 // Either target == context, in this case calling an alternate .ctor
5127 // Or target is the immediate parent of context
5129 return ((target == context) || (target == info.compCompHnd->getParentType(context)));
5132 GenTreePtr Compiler::impImportLdvirtftn(GenTreePtr thisPtr,
5133 CORINFO_RESOLVED_TOKEN* pResolvedToken,
5134 CORINFO_CALL_INFO* pCallInfo)
5136 if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
5138 NO_WAY("Virtual call to a function added via EnC is not supported");
5141 // CoreRT generic virtual method
5142 if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
5144 GenTreePtr runtimeMethodHandle = nullptr;
5145 if (pCallInfo->exactContextNeedsRuntimeLookup)
5147 runtimeMethodHandle =
5148 impRuntimeLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, pCallInfo->hMethod);
5152 runtimeMethodHandle = gtNewIconEmbMethHndNode(pResolvedToken->hMethod);
5154 return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL,
5155 gtNewArgList(thisPtr, runtimeMethodHandle));
5158 #ifdef FEATURE_READYTORUN_COMPILER
5159 if (opts.IsReadyToRun())
5161 if (!pCallInfo->exactContextNeedsRuntimeLookup)
5164 gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewArgList(thisPtr));
5166 call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5171 // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5172 if (IsTargetAbi(CORINFO_CORERT_ABI))
5174 GenTreePtr ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5176 return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5177 gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5182 // Get the exact descriptor for the static callsite
5183 GenTreePtr exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5184 if (exactTypeDesc == nullptr)
5185 { // compDonotInline()
5189 GenTreePtr exactMethodDesc = impTokenToHandle(pResolvedToken);
5190 if (exactMethodDesc == nullptr)
5191 { // compDonotInline()
5195 GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5197 helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5199 helpArgs = gtNewListNode(thisPtr, helpArgs);
5201 // Call helper function. This gets the target address of the final destination callsite.
5203 return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs);
5206 //------------------------------------------------------------------------
5207 // impImportAndPushBox: build and import a value-type box
5210 // pResolvedToken - resolved token from the box operation
5216 // The value to be boxed is popped from the stack, and a tree for
5217 // the boxed value is pushed. This method may create upstream
5218 // statements, spill side effecting trees, and create new temps.
5220 // If importing an inlinee, we may also discover the inline must
5221 // fail. If so there is no new value pushed on the stack. Callers
5222 // should use CompDoNotInline after calling this method to see if
5223 // ongoing importation should be aborted.
5226 // Boxing of ref classes results in the same value as the value on
5227 // the top of the stack, so is handled inline in impImportBlockCode
5228 // for the CEE_BOX case. Only value or primitive type boxes make it
5231 // Boxing for nullable types is done via a helper call; boxing
5232 // of other value types is expanded inline or handled via helper
5233 // call, depending on the jit's codegen mode.
5235 // When the jit is operating in size and time constrained modes,
5236 // using a helper call here can save jit time and code size. But it
5237 // also may inhibit cleanup optimizations that could have also had a
5238 // even greater benefit effect on code size and jit time. An optimal
5239 // strategy may need to peek ahead and see if it is easy to tell how
5240 // the box is being used. For now, we defer.
5242 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5244 // Spill any special side effects
5245 impSpillSpecialSideEff();
5247 // Get get the expression to box from the stack.
5248 GenTreePtr op1 = nullptr;
5249 GenTreePtr op2 = nullptr;
5250 StackEntry se = impPopStack();
5251 CORINFO_CLASS_HANDLE operCls = se.seTypeInfo.GetClassHandle();
5252 GenTreePtr exprToBox = se.val;
5254 // Look at what helper we should use.
5255 CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5257 // Determine what expansion to prefer.
5259 // In size/time/debuggable constrained modes, the helper call
5260 // expansion for box is generally smaller and is preferred, unless
5261 // the value to box is a struct that comes from a call. In that
5262 // case the call can construct its return value directly into the
5263 // box payload, saving possibly some up-front zeroing.
5265 // Currently primitive type boxes always get inline expanded. We may
5266 // want to do the same for small structs if they don't come from
5267 // calls and don't have GC pointers, since explicitly copying such
5268 // structs is cheap.
5269 JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via");
5270 bool canExpandInline = (boxHelper == CORINFO_HELP_BOX);
5271 bool optForSize = !exprToBox->IsCall() && (operCls != nullptr) && (opts.compDbgCode || opts.MinOpts());
5272 bool expandInline = canExpandInline && !optForSize;
5276 JITDUMP(" inline allocate/copy sequence\n");
5278 // we are doing 'normal' boxing. This means that we can inline the box operation
5279 // Box(expr) gets morphed into
5280 // temp = new(clsHnd)
5281 // cpobj(temp+4, expr, clsHnd)
5283 // The code paths differ slightly below for structs and primitives because
5284 // "cpobj" differs in these cases. In one case you get
5285 // impAssignStructPtr(temp+4, expr, clsHnd)
5286 // and the other you get
5289 if (opts.MinOpts() || opts.compDbgCode)
5291 // For minopts/debug code, try and minimize the total number
5292 // of box temps by reusing an existing temp when possible.
5293 if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5295 impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper"));
5300 // When optimizing, use a new temp for each box operation
5301 // since we then know the exact class of the box temp.
5302 impBoxTemp = lvaGrabTemp(true DEBUGARG("Single-def Box Helper"));
5303 lvaTable[impBoxTemp].lvType = TYP_REF;
5304 const bool isExact = true;
5305 lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact);
5308 // needs to stay in use until this box expression is appended
5309 // some other node. We approximate this by keeping it alive until
5310 // the opcode stack becomes empty
5311 impBoxTempInUse = true;
5313 #ifdef FEATURE_READYTORUN_COMPILER
5314 bool usingReadyToRunHelper = false;
5316 if (opts.IsReadyToRun())
5318 op1 = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5319 usingReadyToRunHelper = (op1 != nullptr);
5322 if (!usingReadyToRunHelper)
5325 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5326 // and the newfast call with a single call to a dynamic R2R cell that will:
5327 // 1) Load the context
5328 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5329 // 3) Allocate and return the new object for boxing
5330 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5332 // Ensure that the value class is restored
5333 op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5336 // We must be backing out of an inline.
5337 assert(compDonotInline());
5341 op1 = gtNewHelperCallNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd), TYP_REF,
5345 /* Remember that this basic block contains 'new' of an object */
5346 compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5348 GenTreePtr asg = gtNewTempAssign(impBoxTemp, op1);
5350 GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5352 op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5353 op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
5354 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5356 if (varTypeIsStruct(exprToBox))
5358 assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5359 op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5363 var_types lclTyp = exprToBox->TypeGet();
5364 if (lclTyp == TYP_BYREF)
5366 lclTyp = TYP_I_IMPL;
5368 CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5369 if (impIsPrimitive(jitType))
5371 lclTyp = JITtype2varType(jitType);
5373 assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5374 varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5375 var_types srcTyp = exprToBox->TypeGet();
5376 var_types dstTyp = lclTyp;
5378 if (srcTyp != dstTyp)
5380 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5381 (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5382 exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
5384 op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5387 // Spill eval stack to flush out any pending side effects.
5388 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox"));
5390 // Set up this copy as a second assignment.
5391 GenTreePtr copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5393 op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5395 // Record that this is a "box" node and keep track of the matching parts.
5396 op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt);
5398 // If it is a value class, mark the "box" node. We can use this information
5399 // to optimise several cases:
5400 // "box(x) == null" --> false
5401 // "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5402 // "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5404 op1->gtFlags |= GTF_BOX_VALUE;
5405 assert(op1->IsBoxedValue());
5406 assert(asg->gtOper == GT_ASG);
5410 // Don't optimize, just call the helper and be done with it.
5411 JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable");
5412 assert(operCls != nullptr);
5414 // Ensure that the value class is restored
5415 op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5418 // We must be backing out of an inline.
5419 assert(compDonotInline());
5423 GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5424 op1 = gtNewHelperCallNode(boxHelper, TYP_REF, args);
5427 /* Push the result back on the stack, */
5428 /* even if clsHnd is a value class we want the TI_REF */
5429 typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5430 impPushOnStack(op1, tiRetVal);
5433 //------------------------------------------------------------------------
5434 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5437 // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5438 // by a call to CEEInfo::resolveToken().
5439 // pCallInfo - The CORINFO_CALL_INFO that has been initialized
5440 // by a call to CEEInfo::getCallInfo().
5443 // The multi-dimensional array constructor arguments (array dimensions) are
5444 // pushed on the IL stack on entry to this method.
5447 // Multi-dimensional array constructors are imported as calls to a JIT
5448 // helper, not as regular calls.
5450 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5452 GenTreePtr classHandle = impParentClassTokenToHandle(pResolvedToken);
5453 if (classHandle == nullptr)
5454 { // compDonotInline()
5458 assert(pCallInfo->sig.numArgs);
5461 GenTreeArgList* args;
5464 // There are two different JIT helpers that can be used to allocate
5465 // multi-dimensional arrays:
5467 // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5468 // This variant is deprecated. It should be eventually removed.
5470 // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5471 // pointer to block of int32s. This variant is more portable.
5473 // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5474 // unconditionally would require ReadyToRun version bump.
5476 CLANG_FORMAT_COMMENT_ANCHOR;
5478 if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5480 LclVarDsc* newObjArrayArgsVar;
5482 // Reuse the temp used to pass the array dimensions to avoid bloating
5483 // the stack frame in case there are multiple calls to multi-dim array
5484 // constructors within a single method.
5485 if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5487 lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5488 lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK;
5489 lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5492 // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5493 // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5494 lvaTable[lvaNewObjArrayArgs].lvExactSize =
5495 max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5497 // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5498 // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5499 // to one allocation at a time.
5500 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5503 // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5504 // - Array class handle
5505 // - Number of dimension arguments
5506 // - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp.
5509 node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5510 node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5512 // Pop dimension arguments from the stack one at a time and store it
5513 // into lvaNewObjArrayArgs temp.
5514 for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5516 GenTreePtr arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5518 GenTreePtr dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5519 dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5520 dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5521 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5522 dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5524 node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5527 args = gtNewArgList(node);
5529 // pass number of arguments to the helper
5530 args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5532 args = gtNewListNode(classHandle, args);
5534 node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, args);
5539 // The varargs helper needs the type and method handles as last
5540 // and last-1 param (this is a cdecl call, so args will be
5541 // pushed in reverse order on the CPU stack)
5544 args = gtNewArgList(classHandle);
5546 // pass number of arguments to the helper
5547 args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5549 unsigned argFlags = 0;
5550 args = impPopList(pCallInfo->sig.numArgs, &pCallInfo->sig, args);
5552 node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args);
5554 // varargs, so we pop the arguments
5555 node->gtFlags |= GTF_CALL_POP_ARGS;
5558 // At the present time we don't track Caller pop arguments
5559 // that have GC references in them
5560 for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5562 assert(temp->Current()->gtType != TYP_REF);
5567 node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5568 node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5570 // Remember that this basic block contains 'new' of a md array
5571 compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5573 impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5576 GenTreePtr Compiler::impTransformThis(GenTreePtr thisPtr,
5577 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5578 CORINFO_THIS_TRANSFORM transform)
5582 case CORINFO_DEREF_THIS:
5584 GenTreePtr obj = thisPtr;
5586 // This does a LDIND on the obj, which should be a byref. pointing to a ref
5587 impBashVarAddrsToI(obj);
5588 assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5589 CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5591 obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5592 // ldind could point anywhere, example a boxed class static int
5593 obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5598 case CORINFO_BOX_THIS:
5600 // Constraint calls where there might be no
5601 // unboxed entry point require us to implement the call via helper.
5602 // These only occur when a possible target of the call
5603 // may have inherited an implementation of an interface
5604 // method from System.Object or System.ValueType. The EE does not provide us with
5605 // "unboxed" versions of these methods.
5607 GenTreePtr obj = thisPtr;
5609 assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5610 obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5611 obj->gtFlags |= GTF_EXCEPT;
5613 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5614 var_types objType = JITtype2varType(jitTyp);
5615 if (impIsPrimitive(jitTyp))
5617 if (obj->OperIsBlk())
5619 obj->ChangeOperUnchecked(GT_IND);
5621 // Obj could point anywhere, example a boxed class static int
5622 obj->gtFlags |= GTF_IND_TGTANYWHERE;
5623 obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5626 obj->gtType = JITtype2varType(jitTyp);
5627 assert(varTypeIsArithmetic(obj->gtType));
5630 // This pushes on the dereferenced byref
5631 // This is then used immediately to box.
5632 impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5634 // This pops off the byref-to-a-value-type remaining on the stack and
5635 // replaces it with a boxed object.
5636 // This is then used as the object to the virtual call immediately below.
5637 impImportAndPushBox(pConstrainedResolvedToken);
5638 if (compDonotInline())
5643 obj = impPopStack().val;
5646 case CORINFO_NO_THIS_TRANSFORM:
5652 //------------------------------------------------------------------------
5653 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
5656 // true if PInvoke inlining should be enabled in current method, false otherwise
5659 // Checks a number of ambient conditions where we could pinvoke but choose not to
5661 bool Compiler::impCanPInvokeInline()
5663 return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
5664 (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
5668 //------------------------------------------------------------------------
5669 // impCanPInvokeInlineCallSite: basic legality checks using information
5670 // from a call to see if the call qualifies as an inline pinvoke.
5673 // block - block contaning the call, or for inlinees, block
5674 // containing the call being inlined
5677 // true if this call can legally qualify as an inline pinvoke, false otherwise
5680 // For runtimes that support exception handling interop there are
5681 // restrictions on using inline pinvoke in handler regions.
5683 // * We have to disable pinvoke inlining inside of filters because
5684 // in case the main execution (i.e. in the try block) is inside
5685 // unmanaged code, we cannot reuse the inlined stub (we still need
5686 // the original state until we are in the catch handler)
5688 // * We disable pinvoke inlining inside handlers since the GSCookie
5689 // is in the inlined Frame (see
5690 // CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
5691 // this would not protect framelets/return-address of handlers.
5693 // These restrictions are currently also in place for CoreCLR but
5694 // can be relaxed when coreclr/#8459 is addressed.
5696 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
5698 if (block->hasHndIndex())
5703 // The remaining limitations do not apply to CoreRT
5704 if (IsTargetAbi(CORINFO_CORERT_ABI))
5709 #ifdef _TARGET_AMD64_
5710 // On x64, we disable pinvoke inlining inside of try regions.
5711 // Here is the comment from JIT64 explaining why:
5713 // [VSWhidbey: 611015] - because the jitted code links in the
5714 // Frame (instead of the stub) we rely on the Frame not being
5715 // 'active' until inside the stub. This normally happens by the
5716 // stub setting the return address pointer in the Frame object
5717 // inside the stub. On a normal return, the return address
5718 // pointer is zeroed out so the Frame can be safely re-used, but
5719 // if an exception occurs, nobody zeros out the return address
5720 // pointer. Thus if we re-used the Frame object, it would go
5721 // 'active' as soon as we link it into the Frame chain.
5723 // Technically we only need to disable PInvoke inlining if we're
5724 // in a handler or if we're in a try body with a catch or
5725 // filter/except where other non-handler code in this method
5726 // might run and try to re-use the dirty Frame object.
5728 // A desktop test case where this seems to matter is
5729 // jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
5730 if (block->hasTryIndex())
5734 #endif // _TARGET_AMD64_
5739 //------------------------------------------------------------------------
5740 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
5741 // if it can be expressed as an inline pinvoke.
5744 // call - tree for the call
5745 // methHnd - handle for the method being called (may be null)
5746 // sig - signature of the method being called
5747 // mflags - method flags for the method being called
5748 // block - block contaning the call, or for inlinees, block
5749 // containing the call being inlined
5752 // Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
5754 // Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
5755 // call passes a combination of legality and profitabilty checks.
5757 // If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
5759 void Compiler::impCheckForPInvokeCall(
5760 GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
5762 CorInfoUnmanagedCallConv unmanagedCallConv;
5764 // If VM flagged it as Pinvoke, flag the call node accordingly
5765 if ((mflags & CORINFO_FLG_PINVOKE) != 0)
5767 call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
5772 if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
5777 unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
5781 CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
5782 if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
5784 // Used by the IL Stubs.
5785 callConv = CORINFO_CALLCONV_C;
5787 static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
5788 static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
5789 static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
5790 unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
5792 assert(!call->gtCallCookie);
5795 if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
5796 unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
5800 optNativeCallCount++;
5802 if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && methHnd == nullptr)
5804 // PInvoke CALLI in IL stubs must be inlined
5809 if (!impCanPInvokeInlineCallSite(block))
5814 // PInvoke CALL in IL stubs must be inlined on CoreRT. Skip the ambient conditions checks and
5815 // profitability checks
5816 if (!(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && IsTargetAbi(CORINFO_CORERT_ABI)))
5818 if (!impCanPInvokeInline())
5823 // Size-speed tradeoff: don't use inline pinvoke at rarely
5824 // executed call sites. The non-inline version is more
5826 if (block->isRunRarely())
5832 // The expensive check should be last
5833 if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
5839 JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
5841 call->gtFlags |= GTF_CALL_UNMANAGED;
5842 info.compCallUnmanaged++;
5844 // AMD64 convention is same for native and managed
5845 if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
5847 call->gtFlags |= GTF_CALL_POP_ARGS;
5850 if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
5852 call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
5856 GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
5858 var_types callRetTyp = JITtype2varType(sig->retType);
5860 /* The function pointer is on top of the stack - It may be a
5861 * complex expression. As it is evaluated after the args,
5862 * it may cause registered args to be spilled. Simply spill it.
5865 // Ignore this trivial case.
5866 if (impStackTop().val->gtOper != GT_LCL_VAR)
5868 impSpillStackEntry(verCurrentState.esStackDepth - 1,
5869 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
5872 /* Get the function pointer */
5874 GenTreePtr fptr = impPopStack().val;
5876 // The function pointer is typically a sized to match the target pointer size
5877 // However, stubgen IL optimization can change LDC.I8 to LDC.I4
5878 // See ILCodeStream::LowerOpcode
5879 assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT);
5882 // This temporary must never be converted to a double in stress mode,
5883 // because that can introduce a call to the cast helper after the
5884 // arguments have already been evaluated.
5886 if (fptr->OperGet() == GT_LCL_VAR)
5888 lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
5892 /* Create the call node */
5894 GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
5896 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
5901 /*****************************************************************************/
5903 void Compiler::impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig)
5905 assert(call->gtFlags & GTF_CALL_UNMANAGED);
5907 /* Since we push the arguments in reverse order (i.e. right -> left)
5908 * spill any side effects from the stack
5910 * OBS: If there is only one side effect we do not need to spill it
5911 * thus we have to spill all side-effects except last one
5914 unsigned lastLevelWithSideEffects = UINT_MAX;
5916 unsigned argsToReverse = sig->numArgs;
5918 // For "thiscall", the first argument goes in a register. Since its
5919 // order does not need to be changed, we do not need to spill it
5921 if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5923 assert(argsToReverse);
5927 #ifndef _TARGET_X86_
5928 // Don't reverse args on ARM or x64 - first four args always placed in regs in order
5932 for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
5934 if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
5936 assert(lastLevelWithSideEffects == UINT_MAX);
5938 impSpillStackEntry(level,
5939 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
5941 else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
5943 if (lastLevelWithSideEffects != UINT_MAX)
5945 /* We had a previous side effect - must spill it */
5946 impSpillStackEntry(lastLevelWithSideEffects,
5947 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
5949 /* Record the level for the current side effect in case we will spill it */
5950 lastLevelWithSideEffects = level;
5954 /* This is the first side effect encountered - record its level */
5956 lastLevelWithSideEffects = level;
5961 /* The argument list is now "clean" - no out-of-order side effects
5962 * Pop the argument list in reverse order */
5964 GenTreePtr args = call->gtCall.gtCallArgs = impPopRevList(sig->numArgs, sig, sig->numArgs - argsToReverse);
5966 if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5968 GenTreePtr thisPtr = args->Current();
5969 impBashVarAddrsToI(thisPtr);
5970 assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
5975 call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5979 //------------------------------------------------------------------------
5980 // impInitClass: Build a node to initialize the class before accessing the
5981 // field if necessary
5984 // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5985 // by a call to CEEInfo::resolveToken().
5987 // Return Value: If needed, a pointer to the node that will perform the class
5988 // initializtion. Otherwise, nullptr.
5991 GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5993 CorInfoInitClassResult initClassResult =
5994 info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
5996 if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
6002 GenTreePtr node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
6004 if (node == nullptr)
6006 assert(compDonotInline());
6012 node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewArgList(node));
6016 // Call the shared non gc static helper, as its the fastest
6017 node = fgGetSharedCCtor(pResolvedToken->hClass);
6023 GenTreePtr Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
6025 GenTreePtr op1 = nullptr;
6034 ival = *((bool*)fldAddr);
6038 ival = *((signed char*)fldAddr);
6042 ival = *((unsigned char*)fldAddr);
6046 ival = *((short*)fldAddr);
6051 ival = *((unsigned short*)fldAddr);
6056 ival = *((int*)fldAddr);
6058 op1 = gtNewIconNode(ival);
6063 lval = *((__int64*)fldAddr);
6064 op1 = gtNewLconNode(lval);
6068 dval = *((float*)fldAddr);
6069 op1 = gtNewDconNode(dval);
6070 #if !FEATURE_X87_DOUBLES
6071 // X87 stack doesn't differentiate between float/double
6072 // so R4 is treated as R8, but everybody else does
6073 op1->gtType = TYP_FLOAT;
6074 #endif // FEATURE_X87_DOUBLES
6078 dval = *((double*)fldAddr);
6079 op1 = gtNewDconNode(dval);
6083 assert(!"Unexpected lclTyp");
6090 GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
6091 CORINFO_ACCESS_FLAGS access,
6092 CORINFO_FIELD_INFO* pFieldInfo,
6097 switch (pFieldInfo->fieldAccessor)
6099 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
6101 assert(!compIsForInlining());
6103 // We first call a special helper to get the statics base pointer
6104 op1 = impParentClassTokenToHandle(pResolvedToken);
6106 // compIsForInlining() is false so we should not neve get NULL here
6107 assert(op1 != nullptr);
6109 var_types type = TYP_BYREF;
6111 switch (pFieldInfo->helper)
6113 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
6116 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
6117 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
6118 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
6121 assert(!"unknown generic statics helper");
6125 op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewArgList(op1));
6127 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6128 op1 = gtNewOperNode(GT_ADD, type, op1,
6129 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6133 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
6135 #ifdef FEATURE_READYTORUN_COMPILER
6136 if (opts.IsReadyToRun())
6138 unsigned callFlags = 0;
6140 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6142 callFlags |= GTF_CALL_HOISTABLE;
6145 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF);
6146 op1->gtFlags |= callFlags;
6148 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6153 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
6157 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6158 op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
6159 new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
6164 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
6166 #ifdef FEATURE_READYTORUN_COMPILER
6167 noway_assert(opts.IsReadyToRun());
6168 CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
6169 assert(kind.needsRuntimeLookup);
6171 GenTreePtr ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
6172 GenTreeArgList* args = gtNewArgList(ctxTree);
6174 unsigned callFlags = 0;
6176 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6178 callFlags |= GTF_CALL_HOISTABLE;
6180 var_types type = TYP_BYREF;
6181 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args);
6182 op1->gtFlags |= callFlags;
6184 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6185 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6186 op1 = gtNewOperNode(GT_ADD, type, op1,
6187 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6190 #endif // FEATURE_READYTORUN_COMPILER
6196 if (!(access & CORINFO_ACCESS_ADDRESS))
6198 // In future, it may be better to just create the right tree here instead of folding it later.
6199 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
6201 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6203 op1->gtFlags |= GTF_FLD_INITCLASS;
6206 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6208 op1->gtType = TYP_REF; // points at boxed object
6209 FieldSeqNode* firstElemFldSeq =
6210 GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6212 gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6213 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
6215 if (varTypeIsStruct(lclTyp))
6217 // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT.
6218 op1 = gtNewObjNode(pFieldInfo->structType, op1);
6222 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6223 op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
6231 void** pFldAddr = nullptr;
6232 void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
6234 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6236 /* Create the data member node */
6237 op1 = gtNewIconHandleNode(pFldAddr == nullptr ? (size_t)fldAddr : (size_t)pFldAddr, GTF_ICON_STATIC_HDL,
6240 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6242 op1->gtFlags |= GTF_ICON_INITCLASS;
6245 if (pFldAddr != nullptr)
6247 // There are two cases here, either the static is RVA based,
6248 // in which case the type of the FIELD node is not a GC type
6249 // and the handle to the RVA is a TYP_I_IMPL. Or the FIELD node is
6250 // a GC type and the handle to it is a TYP_BYREF in the GC heap
6251 // because handles to statics now go into the large object heap
6253 var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
6254 op1 = gtNewOperNode(GT_IND, handleTyp, op1);
6255 op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
6262 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6264 op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
6266 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6268 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6269 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
6272 if (!(access & CORINFO_ACCESS_ADDRESS))
6274 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6275 op1->gtFlags |= GTF_GLOB_REF;
6281 // In general try to call this before most of the verification work. Most people expect the access
6282 // exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns
6283 // out if you can't access something we also think that you're unverifiable for other reasons.
6284 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6286 if (result != CORINFO_ACCESS_ALLOWED)
6288 impHandleAccessAllowedInternal(result, helperCall);
6292 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6296 case CORINFO_ACCESS_ALLOWED:
6298 case CORINFO_ACCESS_ILLEGAL:
6299 // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
6300 // method is verifiable. Otherwise, delay the exception to runtime.
6301 if (compIsForImportOnly())
6303 info.compCompHnd->ThrowExceptionForHelper(helperCall);
6307 impInsertHelperCall(helperCall);
6310 case CORINFO_ACCESS_RUNTIME_CHECK:
6311 impInsertHelperCall(helperCall);
6316 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
6318 // Construct the argument list
6319 GenTreeArgList* args = nullptr;
6320 assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6321 for (unsigned i = helperInfo->numArgs; i > 0; --i)
6323 const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1];
6324 GenTreePtr currentArg = nullptr;
6325 switch (helperArg.argType)
6327 case CORINFO_HELPER_ARG_TYPE_Field:
6328 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6329 info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6330 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6332 case CORINFO_HELPER_ARG_TYPE_Method:
6333 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6334 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6336 case CORINFO_HELPER_ARG_TYPE_Class:
6337 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6338 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6340 case CORINFO_HELPER_ARG_TYPE_Module:
6341 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6343 case CORINFO_HELPER_ARG_TYPE_Const:
6344 currentArg = gtNewIconNode(helperArg.constant);
6347 NO_WAY("Illegal helper arg type");
6349 args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6353 * Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee.
6354 * Also, consider sticking this in the first basic block.
6356 GenTreePtr callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args);
6357 impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6360 // Checks whether the return types of caller and callee are compatible
6361 // so that callee can be tail called. Note that here we don't check
6362 // compatibility in IL Verifier sense, but on the lines of return type
6363 // sizes are equal and get returned in the same return register.
6364 bool Compiler::impTailCallRetTypeCompatible(var_types callerRetType,
6365 CORINFO_CLASS_HANDLE callerRetTypeClass,
6366 var_types calleeRetType,
6367 CORINFO_CLASS_HANDLE calleeRetTypeClass)
6369 // Note that we can not relax this condition with genActualType() as the
6370 // calling convention dictates that the caller of a function with a small
6371 // typed return value is responsible for normalizing the return val.
6372 if (callerRetType == calleeRetType)
6377 // If the class handles are the same and not null, the return types are compatible.
6378 if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass))
6383 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6385 if (callerRetType == TYP_VOID)
6387 // This needs to be allowed to support the following IL pattern that Jit64 allows:
6392 // Note that the above IL pattern is not valid as per IL verification rules.
6393 // Therefore, only full trust code can take advantage of this pattern.
6397 // These checks return true if the return value type sizes are the same and
6398 // get returned in the same return register i.e. caller doesn't need to normalize
6399 // return value. Some of the tail calls permitted by below checks would have
6400 // been rejected by IL Verifier before we reached here. Therefore, only full
6401 // trust code can make those tail calls.
6402 unsigned callerRetTypeSize = 0;
6403 unsigned calleeRetTypeSize = 0;
6404 bool isCallerRetTypMBEnreg =
6405 VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6406 bool isCalleeRetTypMBEnreg =
6407 VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6409 if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6411 return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6413 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6421 PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6422 PREFIX_TAILCALL_IMPLICIT =
6423 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6424 PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6425 PREFIX_VOLATILE = 0x00000100,
6426 PREFIX_UNALIGNED = 0x00001000,
6427 PREFIX_CONSTRAINED = 0x00010000,
6428 PREFIX_READONLY = 0x00100000
6431 /********************************************************************************
6433 * Returns true if the current opcode and and the opcodes following it correspond
6434 * to a supported tail call IL pattern.
6437 bool Compiler::impIsTailCallILPattern(bool tailPrefixed,
6439 const BYTE* codeAddrOfNextOpcode,
6440 const BYTE* codeEnd,
6442 bool* isCallPopAndRet /* = nullptr */)
6444 // Bail out if the current opcode is not a call.
6445 if (!impOpcodeIsCallOpcode(curOpcode))
6450 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6451 // If shared ret tail opt is not enabled, we will enable
6452 // it for recursive methods.
6456 // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6457 // sequence. Make sure we don't go past the end of the IL however.
6458 codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6461 // Bail out if there is no next opcode after call
6462 if (codeAddrOfNextOpcode >= codeEnd)
6467 // Scan the opcodes to look for the following IL patterns if either
6468 // i) the call is not tail prefixed (i.e. implicit tail call) or
6469 // ii) if tail prefixed, IL verification is not needed for the method.
6471 // Only in the above two cases we can allow the below tail call patterns
6472 // violating ECMA spec.
6488 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
6491 nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6492 codeAddrOfNextOpcode += sizeof(__int8);
6493 } while ((codeAddrOfNextOpcode < codeEnd) && // Haven't reached end of method
6494 (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6495 ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6496 // one pop seen so far.
6498 nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6499 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
6501 if (isCallPopAndRet)
6503 // Allow call+pop+ret to be tail call optimized if caller ret type is void
6504 *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6507 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
6509 // Tail call IL pattern could be either of the following
6510 // 1) call/callvirt/calli + ret
6511 // 2) call/callvirt/calli + pop + ret in a method returning void.
6512 return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6514 return (nextOpcode == CEE_RET) && (cntPop == 0);
6515 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
6518 /*****************************************************************************
6520 * Determine whether the call could be converted to an implicit tail call
6523 bool Compiler::impIsImplicitTailCallCandidate(
6524 OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6527 #if FEATURE_TAILCALL_OPT
6528 if (!opts.compTailCallOpt)
6533 if (opts.compDbgCode || opts.MinOpts())
6538 // must not be tail prefixed
6539 if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6544 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6545 // the block containing call is marked as BBJ_RETURN
6546 // We allow shared ret tail call optimization on recursive calls even under
6547 // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6548 if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6550 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6552 // must be call+ret or call+pop+ret
6553 if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6561 #endif // FEATURE_TAILCALL_OPT
6564 //------------------------------------------------------------------------
6565 // impImportCall: import a call-inspiring opcode
6568 // opcode - opcode that inspires the call
6569 // pResolvedToken - resolved token for the call target
6570 // pConstrainedResolvedToken - resolved constraint token (or nullptr)
6571 // newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr)
6572 // prefixFlags - IL prefix flags for the call
6573 // callInfo - EE supplied info for the call
6574 // rawILOffset - IL offset of the opcode
6577 // Type of the call's return value.
6580 // opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6582 // For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6583 // uninitalized object.
6586 #pragma warning(push)
6587 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6590 var_types Compiler::impImportCall(OPCODE opcode,
6591 CORINFO_RESOLVED_TOKEN* pResolvedToken,
6592 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6593 GenTreePtr newobjThis,
6595 CORINFO_CALL_INFO* callInfo,
6596 IL_OFFSET rawILOffset)
6598 assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6600 IL_OFFSETX ilOffset = impCurILOffset(rawILOffset, true);
6601 var_types callRetTyp = TYP_COUNT;
6602 CORINFO_SIG_INFO* sig = nullptr;
6603 CORINFO_METHOD_HANDLE methHnd = nullptr;
6604 CORINFO_CLASS_HANDLE clsHnd = nullptr;
6605 unsigned clsFlags = 0;
6606 unsigned mflags = 0;
6607 unsigned argFlags = 0;
6608 GenTreePtr call = nullptr;
6609 GenTreeArgList* args = nullptr;
6610 CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM;
6611 CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr;
6612 bool exactContextNeedsRuntimeLookup = false;
6613 bool canTailCall = true;
6614 const char* szCanTailCallFailReason = nullptr;
6615 int tailCall = prefixFlags & PREFIX_TAILCALL;
6616 bool readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
6618 CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr;
6620 // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6621 // do that before tailcalls, but that is probably not the intended
6622 // semantic. So just disallow tailcalls from synchronized methods.
6623 // Also, popping arguments in a varargs function is more work and NYI
6624 // If we have a security object, we have to keep our frame around for callers
6625 // to see any imperative security.
6626 if (info.compFlags & CORINFO_FLG_SYNCH)
6628 canTailCall = false;
6629 szCanTailCallFailReason = "Caller is synchronized";
6631 #if !FEATURE_FIXED_OUT_ARGS
6632 else if (info.compIsVarArgs)
6634 canTailCall = false;
6635 szCanTailCallFailReason = "Caller is varargs";
6637 #endif // FEATURE_FIXED_OUT_ARGS
6638 else if (opts.compNeedSecurityCheck)
6640 canTailCall = false;
6641 szCanTailCallFailReason = "Caller requires a security check.";
6644 // We only need to cast the return value of pinvoke inlined calls that return small types
6646 // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6647 // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6648 // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6649 // the time being that the callee might be compiled by the other JIT and thus the return
6650 // value will need to be widened by us (or not widened at all...)
6652 // ReadyToRun code sticks with default calling convention that does not widen small return types.
6654 bool checkForSmallType = opts.IsJit64Compat() || opts.IsReadyToRun();
6655 bool bIntrinsicImported = false;
6657 CORINFO_SIG_INFO calliSig;
6658 GenTreeArgList* extraArg = nullptr;
6660 /*-------------------------------------------------------------------------
6661 * First create the call node
6664 if (opcode == CEE_CALLI)
6666 /* Get the call site sig */
6667 eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
6669 callRetTyp = JITtype2varType(calliSig.retType);
6671 call = impImportIndirectCall(&calliSig, ilOffset);
6673 // We don't know the target method, so we have to infer the flags, or
6674 // assume the worst-case.
6675 mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
6680 unsigned structSize =
6681 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
6682 printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6683 opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6686 // This should be checked in impImportBlockCode.
6687 assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
6692 // We cannot lazily obtain the signature of a CALLI call because it has no method
6693 // handle that we can use, so we need to save its full call signature here.
6694 assert(call->gtCall.callSig == nullptr);
6695 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6696 *call->gtCall.callSig = calliSig;
6699 if (IsTargetAbi(CORINFO_CORERT_ABI))
6701 bool managedCall = (((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_STDCALL) &&
6702 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_C) &&
6703 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_THISCALL) &&
6704 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_FASTCALL));
6707 addFatPointerCandidate(call->AsCall());
6711 else // (opcode != CEE_CALLI)
6713 CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
6715 // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
6716 // supply the instantiation parameters necessary to make direct calls to underlying
6717 // shared generic code, rather than calling through instantiating stubs. If the
6718 // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
6719 // must indeed pass an instantiation parameter.
6721 methHnd = callInfo->hMethod;
6723 sig = &(callInfo->sig);
6724 callRetTyp = JITtype2varType(sig->retType);
6726 mflags = callInfo->methodFlags;
6731 unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
6732 printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6733 opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6736 if (compIsForInlining())
6738 /* Does this call site have security boundary restrictions? */
6740 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
6742 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
6746 /* Does the inlinee need a security check token on the frame */
6748 if (mflags & CORINFO_FLG_SECURITYCHECK)
6750 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6754 /* Does the inlinee use StackCrawlMark */
6756 if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
6758 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
6762 /* For now ignore delegate invoke */
6764 if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6766 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
6770 /* For now ignore varargs */
6771 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6773 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
6777 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
6779 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
6783 if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
6785 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
6790 clsHnd = pResolvedToken->hClass;
6792 clsFlags = callInfo->classFlags;
6795 // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
6797 // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
6798 // These should be in mscorlib.h, and available through a JIT/EE interface call.
6799 const char* modName;
6800 const char* className;
6801 const char* methodName;
6802 if ((className = eeGetClassName(clsHnd)) != nullptr &&
6803 strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
6804 (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
6806 return impImportJitTestLabelMark(sig->numArgs);
6810 // <NICE> Factor this into getCallInfo </NICE>
6811 const bool isIntrinsic = (mflags & CORINFO_FLG_INTRINSIC) != 0;
6812 const bool isJitIntrinsic = (mflags & CORINFO_FLG_JIT_INTRINSIC) != 0;
6813 if ((isIntrinsic || isJitIntrinsic) && !pConstrainedResolvedToken)
6815 call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, pResolvedToken->token, readonlyCall,
6816 (canTailCall && (tailCall != 0)), isJitIntrinsic, &intrinsicID);
6818 if (compIsForInlining() && compInlineResult->IsFailure())
6823 if (call != nullptr)
6825 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
6826 (clsFlags & CORINFO_FLG_FINAL));
6828 #ifdef FEATURE_READYTORUN_COMPILER
6829 if (call->OperGet() == GT_INTRINSIC)
6831 if (opts.IsReadyToRun())
6833 noway_assert(callInfo->kind == CORINFO_CALL);
6834 call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
6838 call->gtIntrinsic.gtEntryPoint.addr = nullptr;
6843 bIntrinsicImported = true;
6851 call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
6852 if (call != nullptr)
6854 bIntrinsicImported = true;
6858 #endif // FEATURE_SIMD
6860 if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
6862 NO_WAY("Virtual call to a function added via EnC is not supported");
6865 if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
6866 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6867 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
6869 BADCODE("Bad calling convention");
6872 //-------------------------------------------------------------------------
6873 // Construct the call node
6875 // Work out what sort of call we're making.
6876 // Dispense with virtual calls implemented via LDVIRTFTN immediately.
6878 constraintCallThisTransform = callInfo->thisTransform;
6879 exactContextHnd = callInfo->contextHandle;
6880 exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup == TRUE;
6882 // Recursive call is treaded as a loop to the begining of the method.
6883 if (methHnd == info.compMethodHnd)
6888 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
6889 fgFirstBB->bbNum, compCurBB->bbNum);
6892 fgMarkBackwardJump(fgFirstBB, compCurBB);
6895 switch (callInfo->kind)
6898 case CORINFO_VIRTUALCALL_STUB:
6900 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6901 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6902 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
6905 if (compIsForInlining())
6907 // Don't import runtime lookups when inlining
6908 // Inlining has to be aborted in such a case
6909 /* XXX Fri 3/20/2009
6910 * By the way, this would never succeed. If the handle lookup is into the generic
6911 * dictionary for a candidate, you'll generate different dictionary offsets and the
6912 * inlined code will crash.
6914 * To anyone code reviewing this, when could this ever succeed in the future? It'll
6915 * always have a handle lookup. These lookups are safe intra-module, but we're just
6918 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
6922 GenTreePtr stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
6923 assert(!compDonotInline());
6925 // This is the rough code to set up an indirect stub call
6926 assert(stubAddr != nullptr);
6928 // The stubAddr may be a
6929 // complex expression. As it is evaluated after the args,
6930 // it may cause registered args to be spilled. Simply spill it.
6932 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
6933 impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
6934 stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6936 // Create the actual call node
6938 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6939 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6941 call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
6943 call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
6944 call->gtFlags |= GTF_CALL_VIRT_STUB;
6947 // No tailcalls allowed for these yet...
6948 canTailCall = false;
6949 szCanTailCallFailReason = "VirtualCall with runtime lookup";
6954 // ok, the stub is available at compile type.
6956 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6957 call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
6958 call->gtFlags |= GTF_CALL_VIRT_STUB;
6959 assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
6960 if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
6962 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
6966 #ifdef FEATURE_READYTORUN_COMPILER
6967 if (opts.IsReadyToRun())
6969 // Null check is sometimes needed for ready to run to handle
6970 // non-virtual <-> virtual changes between versions
6971 if (callInfo->nullInstanceCheck)
6973 call->gtFlags |= GTF_CALL_NULLCHECK;
6981 case CORINFO_VIRTUALCALL_VTABLE:
6983 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6984 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6985 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6986 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
6990 case CORINFO_VIRTUALCALL_LDVIRTFTN:
6992 if (compIsForInlining())
6994 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
6998 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6999 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7000 // OK, We've been told to call via LDVIRTFTN, so just
7001 // take the call now....
7003 args = impPopList(sig->numArgs, sig);
7005 GenTreePtr thisPtr = impPopStack().val;
7006 thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
7007 if (compDonotInline())
7012 // Clone the (possibly transformed) "this" pointer
7013 GenTreePtr thisPtrCopy;
7014 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
7015 nullptr DEBUGARG("LDVIRTFTN this pointer"));
7017 GenTreePtr fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
7019 if (compDonotInline())
7024 thisPtr = nullptr; // can't reuse it
7026 // Now make an indirect call through the function pointer
7028 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
7029 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7030 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7032 // Create the actual call node
7034 call = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
7035 call->gtCall.gtCallObjp = thisPtrCopy;
7036 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7038 if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
7040 // CoreRT generic virtual method: need to handle potential fat function pointers
7041 addFatPointerCandidate(call->AsCall());
7043 #ifdef FEATURE_READYTORUN_COMPILER
7044 if (opts.IsReadyToRun())
7046 // Null check is needed for ready to run to handle
7047 // non-virtual <-> virtual changes between versions
7048 call->gtFlags |= GTF_CALL_NULLCHECK;
7052 // Sine we are jumping over some code, check that its OK to skip that code
7053 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7054 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7060 // This is for a non-virtual, non-interface etc. call
7061 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7063 // We remove the nullcheck for the GetType call instrinsic.
7064 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
7066 if (callInfo->nullInstanceCheck &&
7067 !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
7069 call->gtFlags |= GTF_CALL_NULLCHECK;
7072 #ifdef FEATURE_READYTORUN_COMPILER
7073 if (opts.IsReadyToRun())
7075 call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
7081 case CORINFO_CALL_CODE_POINTER:
7083 // The EE has asked us to call by computing a code pointer and then doing an
7084 // indirect call. This is because a runtime lookup is required to get the code entry point.
7086 // These calls always follow a uniform calling convention, i.e. no extra hidden params
7087 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
7089 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
7090 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7093 impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
7095 if (compDonotInline())
7100 // Now make an indirect call through the function pointer
7102 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
7103 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7104 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7106 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
7107 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7108 if (callInfo->nullInstanceCheck)
7110 call->gtFlags |= GTF_CALL_NULLCHECK;
7117 assert(!"unknown call kind");
7121 //-------------------------------------------------------------------------
7124 PREFIX_ASSUME(call != nullptr);
7126 if (mflags & CORINFO_FLG_NOGCCHECK)
7128 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
7131 // Mark call if it's one of the ones we will maybe treat as an intrinsic
7132 if (intrinsicID == CORINFO_INTRINSIC_Object_GetType || intrinsicID == CORINFO_INTRINSIC_TypeEQ ||
7133 intrinsicID == CORINFO_INTRINSIC_TypeNEQ || intrinsicID == CORINFO_INTRINSIC_GetCurrentManagedThread ||
7134 intrinsicID == CORINFO_INTRINSIC_GetManagedThreadId)
7136 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
7140 assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
7142 /* Some sanity checks */
7144 // CALL_VIRT and NEWOBJ must have a THIS pointer
7145 assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
7146 // static bit and hasThis are negations of one another
7147 assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
7148 assert(call != nullptr);
7150 /*-------------------------------------------------------------------------
7151 * Check special-cases etc
7154 /* Special case - Check if it is a call to Delegate.Invoke(). */
7156 if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7158 assert(!compIsForInlining());
7159 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7160 assert(mflags & CORINFO_FLG_FINAL);
7162 /* Set the delegate flag */
7163 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
7165 if (callInfo->secureDelegateInvoke)
7167 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
7170 if (opcode == CEE_CALLVIRT)
7172 assert(mflags & CORINFO_FLG_FINAL);
7174 /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
7175 assert(call->gtFlags & GTF_CALL_NULLCHECK);
7176 call->gtFlags &= ~GTF_CALL_NULLCHECK;
7180 CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
7181 actualMethodRetTypeSigClass = sig->retTypeSigClass;
7182 if (varTypeIsStruct(callRetTyp))
7184 callRetTyp = impNormStructType(actualMethodRetTypeSigClass);
7185 call->gtType = callRetTyp;
7189 /* Check for varargs */
7190 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7191 (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7193 BADCODE("Varargs not supported.");
7195 #endif // !FEATURE_VARARG
7198 if (call->gtCall.callSig == nullptr)
7200 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7201 *call->gtCall.callSig = *sig;
7203 #endif // UNIX_X86_ABI
7205 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7206 (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7208 assert(!compIsForInlining());
7210 /* Set the right flags */
7212 call->gtFlags |= GTF_CALL_POP_ARGS;
7213 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
7215 /* Can't allow tailcall for varargs as it is caller-pop. The caller
7216 will be expecting to pop a certain number of arguments, but if we
7217 tailcall to a function with a different number of arguments, we
7218 are hosed. There are ways around this (caller remembers esp value,
7219 varargs is not caller-pop, etc), but not worth it. */
7220 CLANG_FORMAT_COMMENT_ANCHOR;
7225 canTailCall = false;
7226 szCanTailCallFailReason = "Callee is varargs";
7230 /* Get the total number of arguments - this is already correct
7231 * for CALLI - for methods we have to get it from the call site */
7233 if (opcode != CEE_CALLI)
7236 unsigned numArgsDef = sig->numArgs;
7238 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
7241 // We cannot lazily obtain the signature of a vararg call because using its method
7242 // handle will give us only the declared argument list, not the full argument list.
7243 assert(call->gtCall.callSig == nullptr);
7244 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7245 *call->gtCall.callSig = *sig;
7248 // For vararg calls we must be sure to load the return type of the
7249 // method actually being called, as well as the return types of the
7250 // specified in the vararg signature. With type equivalency, these types
7251 // may not be the same.
7252 if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
7254 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
7255 sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
7256 sig->retType != CORINFO_TYPE_VAR)
7258 // Make sure that all valuetypes (including enums) that we push are loaded.
7259 // This is to guarantee that if a GC is triggerred from the prestub of this methods,
7260 // all valuetypes in the method signature are already loaded.
7261 // We need to be able to find the size of the valuetypes, but we cannot
7262 // do a class-load from within GC.
7263 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
7267 assert(numArgsDef <= sig->numArgs);
7270 /* We will have "cookie" as the last argument but we cannot push
7271 * it on the operand stack because we may overflow, so we append it
7272 * to the arg list next after we pop them */
7275 if (mflags & CORINFO_FLG_SECURITYCHECK)
7277 assert(!compIsForInlining());
7279 // Need security prolog/epilog callouts when there is
7280 // imperative security in the method. This is to give security a
7281 // chance to do any setup in the prolog and cleanup in the epilog if needed.
7283 if (compIsForInlining())
7285 // Cannot handle this if the method being imported is an inlinee by itself.
7286 // Because inlinee method does not have its own frame.
7288 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7293 tiSecurityCalloutNeeded = true;
7295 // If the current method calls a method which needs a security check,
7296 // (i.e. the method being compiled has imperative security)
7297 // we need to reserve a slot for the security object in
7298 // the current method's stack frame
7299 opts.compNeedSecurityCheck = true;
7303 //--------------------------- Inline NDirect ------------------------------
7305 // For inline cases we technically should look at both the current
7306 // block and the call site block (or just the latter if we've
7307 // fused the EH trees). However the block-related checks pertain to
7308 // EH and we currently won't inline a method with EH. So for
7309 // inlinees, just checking the call site block is sufficient.
7311 // New lexical block here to avoid compilation errors because of GOTOs.
7312 BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7313 impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block);
7316 if (call->gtFlags & GTF_CALL_UNMANAGED)
7318 // We set up the unmanaged call by linking the frame, disabling GC, etc
7319 // This needs to be cleaned up on return
7322 canTailCall = false;
7323 szCanTailCallFailReason = "Callee is native";
7326 checkForSmallType = true;
7328 impPopArgsForUnmanagedCall(call, sig);
7332 else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
7333 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
7334 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
7335 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
7337 if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
7339 // Normally this only happens with inlining.
7340 // However, a generic method (or type) being NGENd into another module
7341 // can run into this issue as well. There's not an easy fall-back for NGEN
7342 // so instead we fallback to JIT.
7343 if (compIsForInlining())
7345 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7349 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7355 GenTreePtr cookie = eeGetPInvokeCookie(sig);
7357 // This cookie is required to be either a simple GT_CNS_INT or
7358 // an indirection of a GT_CNS_INT
7360 GenTreePtr cookieConst = cookie;
7361 if (cookie->gtOper == GT_IND)
7363 cookieConst = cookie->gtOp.gtOp1;
7365 assert(cookieConst->gtOper == GT_CNS_INT);
7367 // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7368 // we won't allow this tree to participate in any CSE logic
7370 cookie->gtFlags |= GTF_DONT_CSE;
7371 cookieConst->gtFlags |= GTF_DONT_CSE;
7373 call->gtCall.gtCallCookie = cookie;
7377 canTailCall = false;
7378 szCanTailCallFailReason = "PInvoke calli";
7382 /*-------------------------------------------------------------------------
7383 * Create the argument list
7386 //-------------------------------------------------------------------------
7387 // Special case - for varargs we have an implicit last argument
7389 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7391 assert(!compIsForInlining());
7393 void *varCookie, *pVarCookie;
7394 if (!info.compCompHnd->canGetVarArgsHandle(sig))
7396 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7400 varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7401 assert((!varCookie) != (!pVarCookie));
7402 GenTreePtr cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL);
7404 assert(extraArg == nullptr);
7405 extraArg = gtNewArgList(cookie);
7408 //-------------------------------------------------------------------------
7409 // Extra arg for shared generic code and array methods
7411 // Extra argument containing instantiation information is passed in the
7412 // following circumstances:
7413 // (a) To the "Address" method on array classes; the extra parameter is
7414 // the array's type handle (a TypeDesc)
7415 // (b) To shared-code instance methods in generic structs; the extra parameter
7416 // is the struct's type handle (a vtable ptr)
7417 // (c) To shared-code per-instantiation non-generic static methods in generic
7418 // classes and structs; the extra parameter is the type handle
7419 // (d) To shared-code generic methods; the extra parameter is an
7420 // exact-instantiation MethodDesc
7422 // We also set the exact type context associated with the call so we can
7423 // inline the call correctly later on.
7425 if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7427 assert(call->gtCall.gtCallType == CT_USER_FUNC);
7428 if (clsHnd == nullptr)
7430 NO_WAY("CALLI on parameterized type");
7433 assert(opcode != CEE_CALLI);
7435 GenTreePtr instParam;
7438 // Instantiated generic method
7439 if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7441 CORINFO_METHOD_HANDLE exactMethodHandle =
7442 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7444 if (!exactContextNeedsRuntimeLookup)
7446 #ifdef FEATURE_READYTORUN_COMPILER
7447 if (opts.IsReadyToRun())
7450 impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7451 if (instParam == nullptr)
7459 instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7460 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7465 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7466 if (instParam == nullptr)
7473 // otherwise must be an instance method in a generic struct,
7474 // a static method in a generic type, or a runtime-generated array method
7477 assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7478 CORINFO_CLASS_HANDLE exactClassHandle =
7479 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7481 if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7483 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7487 if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7489 // We indicate "readonly" to the Address operation by using a null
7491 instParam = gtNewIconNode(0, TYP_REF);
7493 else if (!exactContextNeedsRuntimeLookup)
7495 #ifdef FEATURE_READYTORUN_COMPILER
7496 if (opts.IsReadyToRun())
7499 impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7500 if (instParam == nullptr)
7508 instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7509 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7514 // If the EE was able to resolve a constrained call, the instantiating parameter to use is the type
7515 // by which the call was constrained with. We embed pConstrainedResolvedToken as the extra argument
7516 // because pResolvedToken is an interface method and interface types make a poor generic context.
7517 if (pConstrainedResolvedToken)
7519 instParam = impTokenToHandle(pConstrainedResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/,
7520 FALSE /* importParent */);
7524 instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7527 if (instParam == nullptr)
7534 assert(extraArg == nullptr);
7535 extraArg = gtNewArgList(instParam);
7538 // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7539 // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7540 // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7541 // exactContextHnd is not currently required when inlining shared generic code into shared
7542 // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7543 // (e.g. anything marked needsRuntimeLookup)
7544 if (exactContextNeedsRuntimeLookup)
7546 exactContextHnd = nullptr;
7549 if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0))
7551 // Only verifiable cases are supported.
7552 // dup; ldvirtftn; newobj; or ldftn; newobj.
7553 // IL test could contain unverifiable sequence, in this case optimization should not be done.
7554 if (impStackHeight() > 0)
7556 typeInfo delegateTypeInfo = impStackTop().seTypeInfo;
7557 if (delegateTypeInfo.IsToken())
7559 ldftnToken = delegateTypeInfo.GetToken();
7564 //-------------------------------------------------------------------------
7565 // The main group of arguments
7567 args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, sig, extraArg);
7571 call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7574 //-------------------------------------------------------------------------
7575 // The "this" pointer
7577 if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7581 if (opcode == CEE_NEWOBJ)
7587 obj = impPopStack().val;
7588 obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7589 if (compDonotInline())
7595 /* Is this a virtual or interface call? */
7597 if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
7599 /* only true object pointers can be virtual */
7600 assert(obj->gtType == TYP_REF);
7602 // See if we can devirtualize.
7603 impDevirtualizeCall(call->AsCall(), obj, &callInfo->hMethod, &callInfo->methodFlags,
7604 &callInfo->contextHandle, &exactContextHnd);
7610 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7614 /* Store the "this" value in the call */
7616 call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7617 call->gtCall.gtCallObjp = obj;
7620 //-------------------------------------------------------------------------
7621 // The "this" pointer for "newobj"
7623 if (opcode == CEE_NEWOBJ)
7625 if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7627 assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7628 // This is a 'new' of a variable sized object, wher
7629 // the constructor is to return the object. In this case
7630 // the constructor claims to return VOID but we know it
7631 // actually returns the new object
7632 assert(callRetTyp == TYP_VOID);
7633 callRetTyp = TYP_REF;
7634 call->gtType = TYP_REF;
7635 impSpillSpecialSideEff();
7637 impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7641 if (clsFlags & CORINFO_FLG_DELEGATE)
7643 // New inliner morph it in impImportCall.
7644 // This will allow us to inline the call to the delegate constructor.
7645 call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken);
7648 if (!bIntrinsicImported)
7651 #if defined(DEBUG) || defined(INLINE_DATA)
7653 // Keep track of the raw IL offset of the call
7654 call->gtCall.gtRawILOffset = rawILOffset;
7656 #endif // defined(DEBUG) || defined(INLINE_DATA)
7658 // Is it an inline candidate?
7659 impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
7662 // append the call node.
7663 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7665 // Now push the value of the 'new onto the stack
7667 // This is a 'new' of a non-variable sized object.
7668 // Append the new node (op1) to the statement list,
7669 // and then push the local holding the value of this
7670 // new instruction on the stack.
7672 if (clsFlags & CORINFO_FLG_VALUECLASS)
7674 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
7676 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
7677 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
7681 if (newobjThis->gtOper == GT_COMMA)
7683 // In coreclr the callout can be inserted even if verification is disabled
7684 // so we cannot rely on tiVerificationNeeded alone
7686 // We must have inserted the callout. Get the real newobj.
7687 newobjThis = newobjThis->gtOp.gtOp2;
7690 assert(newobjThis->gtOper == GT_LCL_VAR);
7691 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
7701 // This check cannot be performed for implicit tail calls for the reason
7702 // that impIsImplicitTailCallCandidate() is not checking whether return
7703 // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
7704 // As a result it is possible that in the following case, we find that
7705 // the type stack is non-empty if Callee() is considered for implicit
7707 // int Caller(..) { .... void Callee(); ret val; ... }
7709 // Note that we cannot check return type compatibility before ImpImportCall()
7710 // as we don't have required info or need to duplicate some of the logic of
7713 // For implicit tail calls, we perform this check after return types are
7714 // known to be compatible.
7715 if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
7717 BADCODE("Stack should be empty after tailcall");
7720 // Note that we can not relax this condition with genActualType() as
7721 // the calling convention dictates that the caller of a function with
7722 // a small-typed return value is responsible for normalizing the return val
7725 !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
7726 callInfo->sig.retTypeClass))
7728 canTailCall = false;
7729 szCanTailCallFailReason = "Return types are not tail call compatible";
7732 // Stack empty check for implicit tail calls.
7733 if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
7735 #ifdef _TARGET_AMD64_
7736 // JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException
7737 // in JIT64, not an InvalidProgramException.
7738 Verify(false, "Stack should be empty after tailcall");
7739 #else // _TARGET_64BIT_
7740 BADCODE("Stack should be empty after tailcall");
7741 #endif //!_TARGET_64BIT_
7744 // assert(compCurBB is not a catch, finally or filter block);
7745 // assert(compCurBB is not a try block protected by a finally block);
7747 // Check for permission to tailcall
7748 bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
7750 assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
7754 // True virtual or indirect calls, shouldn't pass in a callee handle.
7755 CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->gtCall.gtCallType != CT_USER_FUNC) ||
7756 ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT))
7759 GenTreePtr thisArg = call->gtCall.gtCallObjp;
7761 if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
7764 if (explicitTailCall)
7766 // In case of explicit tail calls, mark it so that it is not considered
7768 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
7772 printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
7780 #if FEATURE_TAILCALL_OPT
7781 // Must be an implicit tail call.
7782 assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
7784 // It is possible that a call node is both an inline candidate and marked
7785 // for opportunistic tail calling. In-lining happens before morhphing of
7786 // trees. If in-lining of an in-line candidate gets aborted for whatever
7787 // reason, it will survive to the morphing stage at which point it will be
7788 // transformed into a tail call after performing additional checks.
7790 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
7794 printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
7800 #else //! FEATURE_TAILCALL_OPT
7801 NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
7803 #endif // FEATURE_TAILCALL_OPT
7806 // we can't report success just yet...
7810 canTailCall = false;
7811 // canTailCall reported its reasons already
7815 printf("\ninfo.compCompHnd->canTailCall returned false for call ");
7824 // If this assert fires it means that canTailCall was set to false without setting a reason!
7825 assert(szCanTailCallFailReason != nullptr);
7830 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
7832 printf(": %s\n", szCanTailCallFailReason);
7835 info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
7836 szCanTailCallFailReason);
7840 // Note: we assume that small return types are already normalized by the managed callee
7841 // or by the pinvoke stub for calls to unmanaged code.
7843 if (!bIntrinsicImported)
7846 // Things needed to be checked when bIntrinsicImported is false.
7849 assert(call->gtOper == GT_CALL);
7850 assert(sig != nullptr);
7852 // Tail calls require us to save the call site's sig info so we can obtain an argument
7853 // copying thunk from the EE later on.
7854 if (call->gtCall.callSig == nullptr)
7856 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7857 *call->gtCall.callSig = *sig;
7860 if (compIsForInlining() && opcode == CEE_CALLVIRT)
7862 GenTreePtr callObj = call->gtCall.gtCallObjp;
7863 assert(callObj != nullptr);
7865 unsigned callKind = call->gtFlags & GTF_CALL_VIRT_KIND_MASK;
7867 if (((callKind != GTF_CALL_NONVIRT) || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
7868 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
7869 impInlineInfo->inlArgInfo))
7871 impInlineInfo->thisDereferencedFirst = true;
7875 #if defined(DEBUG) || defined(INLINE_DATA)
7877 // Keep track of the raw IL offset of the call
7878 call->gtCall.gtRawILOffset = rawILOffset;
7880 #endif // defined(DEBUG) || defined(INLINE_DATA)
7882 // Is it an inline candidate?
7883 impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
7887 // Push or append the result of the call
7888 if (callRetTyp == TYP_VOID)
7890 if (opcode == CEE_NEWOBJ)
7892 // we actually did push something, so don't spill the thing we just pushed.
7893 assert(verCurrentState.esStackDepth > 0);
7894 impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
7898 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7903 impSpillSpecialSideEff();
7905 if (clsFlags & CORINFO_FLG_ARRAY)
7907 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
7910 // Find the return type used for verification by interpreting the method signature.
7911 // NB: we are clobbering the already established sig.
7912 if (tiVerificationNeeded)
7914 // Actually, we never get the sig for the original method.
7915 sig = &(callInfo->verSig);
7918 typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
7919 tiRetVal.NormaliseForStack();
7921 // The CEE_READONLY prefix modifies the verification semantics of an Address
7922 // operation on an array type.
7923 if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
7925 tiRetVal.SetIsReadonlyByRef();
7928 if (tiVerificationNeeded)
7930 // We assume all calls return permanent home byrefs. If they
7931 // didn't they wouldn't be verifiable. This is also covering
7932 // the Address() helper for multidimensional arrays.
7933 if (tiRetVal.IsByRef())
7935 tiRetVal.SetIsPermanentHomeByRef();
7941 // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
7943 bool fatPointerCandidate = call->AsCall()->IsFatPointerCandidate();
7944 if (varTypeIsStruct(callRetTyp))
7946 call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass);
7949 if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
7951 assert(opts.OptEnabled(CLFLG_INLINING));
7952 assert(!fatPointerCandidate); // We should not try to inline calli.
7954 // Make the call its own tree (spill the stack if needed).
7955 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7957 // TODO: Still using the widened type.
7958 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
7962 if (fatPointerCandidate)
7964 // fatPointer candidates should be in statements of the form call() or var = call().
7965 // Such form allows to find statements with fat calls without walking through whole trees
7966 // and removes problems with cutting trees.
7967 assert(!bIntrinsicImported);
7968 assert(IsTargetAbi(CORINFO_CORERT_ABI));
7969 if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
7971 unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli"));
7972 LclVarDsc* varDsc = &lvaTable[calliSlot];
7973 varDsc->lvVerTypeInfo = tiRetVal;
7974 impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE);
7975 // impAssignTempGen can change src arg list and return type for call that returns struct.
7976 var_types type = genActualType(lvaTable[calliSlot].TypeGet());
7977 call = gtNewLclvNode(calliSlot, type);
7981 // For non-candidates we must also spill, since we
7982 // might have locals live on the eval stack that this
7985 // Suppress this for certain well-known call targets
7986 // that we know won't modify locals, eg calls that are
7987 // recognized in gtCanOptimizeTypeEquality. Otherwise
7988 // we may break key fragile pattern matches later on.
7989 bool spillStack = true;
7992 GenTreeCall* callNode = call->AsCall();
7993 if ((callNode->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHelper(callNode))
7997 else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
8005 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
8010 if (!bIntrinsicImported)
8012 //-------------------------------------------------------------------------
8014 /* If the call is of a small type and the callee is managed, the callee will normalize the result
8016 However, we need to normalize small type values returned by unmanaged
8017 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
8018 if we use the shorter inlined pinvoke stub. */
8020 if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
8022 call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
8026 impPushOnStack(call, tiRetVal);
8029 // VSD functions get a new call target each time we getCallInfo, so clear the cache.
8030 // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
8031 // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
8032 // callInfoCache.uncacheCallInfo();
8037 #pragma warning(pop)
8040 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
8042 CorInfoType corType = methInfo->args.retType;
8044 if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
8046 // We have some kind of STRUCT being returned
8048 structPassingKind howToReturnStruct = SPK_Unknown;
8050 var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
8052 if (howToReturnStruct == SPK_ByReference)
8063 var_types Compiler::impImportJitTestLabelMark(int numArgs)
8065 TestLabelAndNum tlAndN;
8069 StackEntry se = impPopStack();
8070 assert(se.seTypeInfo.GetType() == TI_INT);
8071 GenTreePtr val = se.val;
8072 assert(val->IsCnsIntOrI());
8073 tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8075 else if (numArgs == 3)
8077 StackEntry se = impPopStack();
8078 assert(se.seTypeInfo.GetType() == TI_INT);
8079 GenTreePtr val = se.val;
8080 assert(val->IsCnsIntOrI());
8081 tlAndN.m_num = val->AsIntConCommon()->IconValue();
8083 assert(se.seTypeInfo.GetType() == TI_INT);
8085 assert(val->IsCnsIntOrI());
8086 tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8093 StackEntry expSe = impPopStack();
8094 GenTreePtr node = expSe.val;
8096 // There are a small number of special cases, where we actually put the annotation on a subnode.
8097 if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
8099 // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
8100 // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
8101 // offset within the the static field block whose address is returned by the helper call.
8102 // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
8103 GenTreePtr helperCall = nullptr;
8104 assert(node->OperGet() == GT_IND);
8105 tlAndN.m_num -= 100;
8106 GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
8107 GetNodeTestData()->Remove(node);
8111 GetNodeTestData()->Set(node, tlAndN);
8114 impPushOnStack(node, expSe.seTypeInfo);
8115 return node->TypeGet();
8119 //-----------------------------------------------------------------------------------
8120 // impFixupCallStructReturn: For a call node that returns a struct type either
8121 // adjust the return type to an enregisterable type, or set the flag to indicate
8122 // struct return via retbuf arg.
8125 // call - GT_CALL GenTree node
8126 // retClsHnd - Class handle of return type of the call
8129 // Returns new GenTree node after fixing struct return of call node
8131 GenTreePtr Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd)
8133 if (!varTypeIsStruct(call))
8138 call->gtRetClsHnd = retClsHnd;
8140 #if FEATURE_MULTIREG_RET
8141 // Initialize Return type descriptor of call node
8142 ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
8143 retTypeDesc->InitializeStructReturnType(this, retClsHnd);
8144 #endif // FEATURE_MULTIREG_RET
8146 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
8148 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
8149 assert(!call->IsVarargs() && "varargs not allowed for System V OSs.");
8151 // The return type will remain as the incoming struct type unless normalized to a
8152 // single eightbyte return type below.
8153 call->gtReturnType = call->gtType;
8155 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8156 if (retRegCount != 0)
8158 if (retRegCount == 1)
8160 // struct returned in a single register
8161 call->gtReturnType = retTypeDesc->GetReturnRegType(0);
8165 // must be a struct returned in two registers
8166 assert(retRegCount == 2);
8168 if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8170 // Force a call returning multi-reg struct to be always of the IR form
8173 // No need to assign a multi-reg struct to a local var if:
8174 // - It is a tail call or
8175 // - The call is marked for in-lining later
8176 return impAssignMultiRegTypeToVar(call, retClsHnd);
8182 // struct not returned in registers i.e returned via hiddden retbuf arg.
8183 call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8186 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8188 #if FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
8189 // There is no fixup necessary if the return type is a HFA struct.
8190 // HFA structs are returned in registers for ARM32 and ARM64
8192 if (!call->IsVarargs() && IsHfa(retClsHnd))
8194 if (call->CanTailCall())
8196 if (info.compIsVarArgs)
8198 // We cannot tail call because control needs to return to fixup the calling
8199 // convention for result return.
8200 call->gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8204 // If we can tail call returning HFA, then don't assign it to
8205 // a variable back and forth.
8210 if (call->gtFlags & GTF_CALL_INLINE_CANDIDATE)
8215 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8216 if (retRegCount >= 2)
8218 return impAssignMultiRegTypeToVar(call, retClsHnd);
8221 #endif // _TARGET_ARM_
8223 // Check for TYP_STRUCT type that wraps a primitive type
8224 // Such structs are returned using a single register
8225 // and we change the return type on those calls here.
8227 structPassingKind howToReturnStruct;
8228 var_types returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
8230 if (howToReturnStruct == SPK_ByReference)
8232 assert(returnType == TYP_UNKNOWN);
8233 call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8237 assert(returnType != TYP_UNKNOWN);
8238 call->gtReturnType = returnType;
8240 // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
8241 if ((returnType == TYP_LONG) && (compLongUsed == false))
8243 compLongUsed = true;
8245 else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
8247 compFloatingPointUsed = true;
8250 #if FEATURE_MULTIREG_RET
8251 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8252 assert(retRegCount != 0);
8254 if (retRegCount >= 2)
8256 if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8258 // Force a call returning multi-reg struct to be always of the IR form
8261 // No need to assign a multi-reg struct to a local var if:
8262 // - It is a tail call or
8263 // - The call is marked for in-lining later
8264 return impAssignMultiRegTypeToVar(call, retClsHnd);
8267 #endif // FEATURE_MULTIREG_RET
8270 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8275 /*****************************************************************************
8276 For struct return values, re-type the operand in the case where the ABI
8277 does not use a struct return buffer
8278 Note that this method is only call for !_TARGET_X86_
8281 GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd)
8283 assert(varTypeIsStruct(info.compRetType));
8284 assert(info.compRetBuffArg == BAD_VAR_NUM);
8286 #if defined(_TARGET_XARCH_)
8288 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
8289 // No VarArgs for CoreCLR on x64 Unix
8290 assert(!info.compIsVarArgs);
8292 // Is method returning a multi-reg struct?
8293 if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
8295 // In case of multi-reg struct return, we force IR to be one of the following:
8296 // GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a
8297 // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
8299 if (op->gtOper == GT_LCL_VAR)
8301 // Make sure that this struct stays in memory and doesn't get promoted.
8302 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8303 lvaTable[lclNum].lvIsMultiRegRet = true;
8305 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8306 op->gtFlags |= GTF_DONT_CSE;
8311 if (op->gtOper == GT_CALL)
8316 return impAssignMultiRegTypeToVar(op, retClsHnd);
8318 #else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8319 assert(info.compRetNativeType != TYP_STRUCT);
8320 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8322 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
8324 if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
8326 if (op->gtOper == GT_LCL_VAR)
8328 // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
8329 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8330 // Make sure this struct type stays as struct so that we can return it as an HFA
8331 lvaTable[lclNum].lvIsMultiRegRet = true;
8333 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8334 op->gtFlags |= GTF_DONT_CSE;
8339 if (op->gtOper == GT_CALL)
8341 if (op->gtCall.IsVarargs())
8343 // We cannot tail call because control needs to return to fixup the calling
8344 // convention for result return.
8345 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8346 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8353 return impAssignMultiRegTypeToVar(op, retClsHnd);
8356 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
8358 // Is method returning a multi-reg struct?
8359 if (IsMultiRegReturnedType(retClsHnd))
8361 if (op->gtOper == GT_LCL_VAR)
8363 // This LCL_VAR stays as a TYP_STRUCT
8364 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8366 // Make sure this struct type is not struct promoted
8367 lvaTable[lclNum].lvIsMultiRegRet = true;
8369 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8370 op->gtFlags |= GTF_DONT_CSE;
8375 if (op->gtOper == GT_CALL)
8377 if (op->gtCall.IsVarargs())
8379 // We cannot tail call because control needs to return to fixup the calling
8380 // convention for result return.
8381 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8382 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8389 return impAssignMultiRegTypeToVar(op, retClsHnd);
8392 #endif // FEATURE_MULTIREG_RET && FEATURE_HFA
8395 // adjust the type away from struct to integral
8396 // and no normalizing
8397 if (op->gtOper == GT_LCL_VAR)
8399 op->ChangeOper(GT_LCL_FLD);
8401 else if (op->gtOper == GT_OBJ)
8403 GenTreePtr op1 = op->AsObj()->Addr();
8405 // We will fold away OBJ/ADDR
8406 // except for OBJ/ADDR/INDEX
8407 // as the array type influences the array element's offset
8408 // Later in this method we change op->gtType to info.compRetNativeType
8409 // This is not correct when op is a GT_INDEX as the starting offset
8410 // for the array elements 'elemOffs' is different for an array of
8411 // TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8412 // Also refer to the GTF_INX_REFARR_LAYOUT flag
8414 if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8416 // Change '*(&X)' to 'X' and see if we can do better
8417 op = op1->gtOp.gtOp1;
8418 goto REDO_RETURN_NODE;
8420 op->gtObj.gtClass = NO_CLASS_HANDLE;
8421 op->ChangeOperUnchecked(GT_IND);
8422 op->gtFlags |= GTF_IND_TGTANYWHERE;
8424 else if (op->gtOper == GT_CALL)
8426 if (op->AsCall()->TreatAsHasRetBufArg(this))
8428 // This must be one of those 'special' helpers that don't
8429 // really have a return buffer, but instead use it as a way
8430 // to keep the trees cleaner with fewer address-taken temps.
8432 // Well now we have to materialize the the return buffer as
8433 // an address-taken temp. Then we can return the temp.
8435 // NOTE: this code assumes that since the call directly
8436 // feeds the return, then the call must be returning the
8437 // same structure/class/type.
8439 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8441 // No need to spill anything as we're about to return.
8442 impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8444 // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8445 // jump directly to a GT_LCL_FLD.
8446 op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8447 op->ChangeOper(GT_LCL_FLD);
8451 assert(info.compRetNativeType == op->gtCall.gtReturnType);
8453 // Don't change the gtType of the node just yet, it will get changed later.
8457 else if (op->gtOper == GT_COMMA)
8459 op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8462 op->gtType = info.compRetNativeType;
8467 /*****************************************************************************
8468 CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8469 finally-protected try. We find the finally blocks protecting the current
8470 offset (in order) by walking over the complete exception table and
8471 finding enclosing clauses. This assumes that the table is sorted.
8472 This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8474 If we are leaving a catch handler, we need to attach the
8475 CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8477 After this function, the BBJ_LEAVE block has been converted to a different type.
8480 #if !FEATURE_EH_FUNCLETS
8482 void Compiler::impImportLeave(BasicBlock* block)
8487 printf("\nBefore import CEE_LEAVE:\n");
8488 fgDispBasicBlocks();
8493 bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8494 unsigned blkAddr = block->bbCodeOffs;
8495 BasicBlock* leaveTarget = block->bbJumpDest;
8496 unsigned jmpAddr = leaveTarget->bbCodeOffs;
8498 // LEAVE clears the stack, spill side effects, and set stack to 0
8500 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8501 verCurrentState.esStackDepth = 0;
8503 assert(block->bbJumpKind == BBJ_LEAVE);
8504 assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8506 BasicBlock* step = DUMMY_INIT(NULL);
8507 unsigned encFinallies = 0; // Number of enclosing finallies.
8508 GenTreePtr endCatches = NULL;
8509 GenTreePtr endLFin = NULL; // The statement tree to indicate the end of locally-invoked finally.
8514 for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8516 // Grab the handler offsets
8518 IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8519 IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8520 IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8521 IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8523 /* Is this a catch-handler we are CEE_LEAVEing out of?
8524 * If so, we need to call CORINFO_HELP_ENDCATCH.
8527 if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8529 // Can't CEE_LEAVE out of a finally/fault handler
8530 if (HBtab->HasFinallyOrFaultHandler())
8531 BADCODE("leave out of fault/finally block");
8533 // Create the call to CORINFO_HELP_ENDCATCH
8534 GenTreePtr endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8536 // Make a list of all the currently pending endCatches
8538 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8540 endCatches = endCatch;
8545 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8546 "CORINFO_HELP_ENDCATCH\n",
8547 block->bbNum, XTnum);
8551 else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8552 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8554 /* This is a finally-protected try we are jumping out of */
8556 /* If there are any pending endCatches, and we have already
8557 jumped out of a finally-protected try, then the endCatches
8558 have to be put in a block in an outer try for async
8559 exceptions to work correctly.
8560 Else, just use append to the original block */
8562 BasicBlock* callBlock;
8564 assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8566 if (encFinallies == 0)
8568 assert(step == DUMMY_INIT(NULL));
8570 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8573 impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8578 printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8580 callBlock->dspToString());
8586 assert(step != DUMMY_INIT(NULL));
8588 /* Calling the finally block */
8589 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8590 assert(step->bbJumpKind == BBJ_ALWAYS);
8591 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8592 // finally in the chain)
8593 step->bbJumpDest->bbRefs++;
8595 /* The new block will inherit this block's weight */
8596 callBlock->setBBWeight(block->bbWeight);
8597 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8602 printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n",
8603 callBlock->dspToString());
8607 GenTreePtr lastStmt;
8611 lastStmt = gtNewStmt(endCatches);
8612 endLFin->gtNext = lastStmt;
8613 lastStmt->gtPrev = endLFin;
8620 // note that this sets BBF_IMPORTED on the block
8621 impEndTreeList(callBlock, endLFin, lastStmt);
8624 step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8625 /* The new block will inherit this block's weight */
8626 step->setBBWeight(block->bbWeight);
8627 step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8632 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n",
8633 step->dspToString());
8637 unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8638 assert(finallyNesting <= compHndBBtabCount);
8640 callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8641 endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8642 endLFin = gtNewStmt(endLFin);
8647 invalidatePreds = true;
8651 /* Append any remaining endCatches, if any */
8653 assert(!encFinallies == !endLFin);
8655 if (encFinallies == 0)
8657 assert(step == DUMMY_INIT(NULL));
8658 block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8661 impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8666 printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8668 block->dspToString());
8674 // If leaveTarget is the start of another try block, we want to make sure that
8675 // we do not insert finalStep into that try block. Hence, we find the enclosing
8677 unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8679 // Insert a new BB either in the try region indicated by tryIndex or
8680 // the handler region indicated by leaveTarget->bbHndIndex,
8681 // depending on which is the inner region.
8682 BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8683 finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8684 step->bbJumpDest = finalStep;
8686 /* The new block will inherit this block's weight */
8687 finalStep->setBBWeight(block->bbWeight);
8688 finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8693 printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies,
8694 finalStep->dspToString());
8698 GenTreePtr lastStmt;
8702 lastStmt = gtNewStmt(endCatches);
8703 endLFin->gtNext = lastStmt;
8704 lastStmt->gtPrev = endLFin;
8711 impEndTreeList(finalStep, endLFin, lastStmt);
8713 finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8715 // Queue up the jump target for importing
8717 impImportBlockPending(leaveTarget);
8719 invalidatePreds = true;
8722 if (invalidatePreds && fgComputePredsDone)
8724 JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8729 fgVerifyHandlerTab();
8733 printf("\nAfter import CEE_LEAVE:\n");
8734 fgDispBasicBlocks();
8740 #else // FEATURE_EH_FUNCLETS
8742 void Compiler::impImportLeave(BasicBlock* block)
8747 printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
8748 fgDispBasicBlocks();
8753 bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8754 unsigned blkAddr = block->bbCodeOffs;
8755 BasicBlock* leaveTarget = block->bbJumpDest;
8756 unsigned jmpAddr = leaveTarget->bbCodeOffs;
8758 // LEAVE clears the stack, spill side effects, and set stack to 0
8760 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8761 verCurrentState.esStackDepth = 0;
8763 assert(block->bbJumpKind == BBJ_LEAVE);
8764 assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
8766 BasicBlock* step = nullptr;
8770 // No step type; step == NULL.
8773 // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
8774 // That is, is step->bbJumpDest where a finally will return to?
8777 // The step block is a catch return.
8780 // The step block is in a "try", created as the target for a finally return or the target for a catch return.
8783 StepType stepType = ST_None;
8788 for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8790 // Grab the handler offsets
8792 IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8793 IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8794 IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8795 IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8797 /* Is this a catch-handler we are CEE_LEAVEing out of?
8800 if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8802 // Can't CEE_LEAVE out of a finally/fault handler
8803 if (HBtab->HasFinallyOrFaultHandler())
8805 BADCODE("leave out of fault/finally block");
8808 /* We are jumping out of a catch */
8810 if (step == nullptr)
8813 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
8814 stepType = ST_Catch;
8819 printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
8821 XTnum, step->bbNum);
8827 BasicBlock* exitBlock;
8829 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
8831 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
8833 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8834 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
8835 // exit) returns to this block
8836 step->bbJumpDest->bbRefs++;
8838 #if defined(_TARGET_ARM_)
8839 if (stepType == ST_FinallyReturn)
8841 assert(step->bbJumpKind == BBJ_ALWAYS);
8842 // Mark the target of a finally return
8843 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8845 #endif // defined(_TARGET_ARM_)
8847 /* The new block will inherit this block's weight */
8848 exitBlock->setBBWeight(block->bbWeight);
8849 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8851 /* This exit block is the new step */
8853 stepType = ST_Catch;
8855 invalidatePreds = true;
8860 printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
8866 else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8867 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8869 /* We are jumping out of a finally-protected try */
8871 BasicBlock* callBlock;
8873 if (step == nullptr)
8875 #if FEATURE_EH_CALLFINALLY_THUNKS
8877 // Put the call to the finally in the enclosing region.
8878 unsigned callFinallyTryIndex =
8879 (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8880 unsigned callFinallyHndIndex =
8881 (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8882 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
8884 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
8885 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
8886 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
8887 // next block, and flow optimizations will remove it.
8888 block->bbJumpKind = BBJ_ALWAYS;
8889 block->bbJumpDest = callBlock;
8890 block->bbJumpDest->bbRefs++;
8892 /* The new block will inherit this block's weight */
8893 callBlock->setBBWeight(block->bbWeight);
8894 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8899 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8900 "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
8901 XTnum, block->bbNum, callBlock->bbNum);
8905 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8908 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8913 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8914 "BBJ_CALLFINALLY block\n",
8915 XTnum, callBlock->bbNum);
8919 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8923 // Calling the finally block. We already have a step block that is either the call-to-finally from a
8924 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
8925 // a 'finally'), or the step block is the return from a catch.
8927 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
8928 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
8929 // automatically re-raise the exception, using the return address of the catch (that is, the target
8930 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
8931 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
8932 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
8933 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
8934 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
8935 // within the 'try' region protected by the finally, since we generate code in such a way that execution
8936 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
8939 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8941 #if FEATURE_EH_CALLFINALLY_THUNKS
8942 if (step->bbJumpKind == BBJ_EHCATCHRET)
8944 // Need to create another step block in the 'try' region that will actually branch to the
8945 // call-to-finally thunk.
8946 BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8947 step->bbJumpDest = step2;
8948 step->bbJumpDest->bbRefs++;
8949 step2->setBBWeight(block->bbWeight);
8950 step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8955 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
8956 "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
8957 XTnum, step->bbNum, step2->bbNum);
8962 assert(stepType == ST_Catch); // Leave it as catch type for now.
8964 #endif // FEATURE_EH_CALLFINALLY_THUNKS
8966 #if FEATURE_EH_CALLFINALLY_THUNKS
8967 unsigned callFinallyTryIndex =
8968 (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8969 unsigned callFinallyHndIndex =
8970 (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8971 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8972 unsigned callFinallyTryIndex = XTnum + 1;
8973 unsigned callFinallyHndIndex = 0; // don't care
8974 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8976 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
8977 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8978 // finally in the chain)
8979 step->bbJumpDest->bbRefs++;
8981 #if defined(_TARGET_ARM_)
8982 if (stepType == ST_FinallyReturn)
8984 assert(step->bbJumpKind == BBJ_ALWAYS);
8985 // Mark the target of a finally return
8986 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8988 #endif // defined(_TARGET_ARM_)
8990 /* The new block will inherit this block's weight */
8991 callBlock->setBBWeight(block->bbWeight);
8992 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8997 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
8999 XTnum, callBlock->bbNum);
9004 step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
9005 stepType = ST_FinallyReturn;
9007 /* The new block will inherit this block's weight */
9008 step->setBBWeight(block->bbWeight);
9009 step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
9014 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
9016 XTnum, step->bbNum);
9020 callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
9022 invalidatePreds = true;
9024 else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9025 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9027 // We are jumping out of a catch-protected try.
9029 // If we are returning from a call to a finally, then we must have a step block within a try
9030 // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
9031 // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
9032 // and invoke the appropriate catch.
9034 // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
9035 // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
9036 // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
9037 // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
9038 // address of the catch return as the new exception address. That is, the re-raised exception appears to
9039 // occur at the catch return address. If this exception return address skips an enclosing try/catch that
9040 // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
9045 // // something here raises ThreadAbortException
9046 // LEAVE LABEL_1; // no need to stop at LABEL_2
9047 // } catch (Exception) {
9048 // // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
9049 // // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
9050 // // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
9051 // // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
9052 // // need to do this transformation if the current EH block is a try/catch that catches
9053 // // ThreadAbortException (or one of its parents), however we might not be able to find that
9054 // // information, so currently we do it for all catch types.
9055 // LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
9057 // LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
9058 // } catch (ThreadAbortException) {
9062 // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
9065 if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
9067 BasicBlock* catchStep;
9071 if (stepType == ST_FinallyReturn)
9073 assert(step->bbJumpKind == BBJ_ALWAYS);
9077 assert(stepType == ST_Catch);
9078 assert(step->bbJumpKind == BBJ_EHCATCHRET);
9081 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
9082 catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9083 step->bbJumpDest = catchStep;
9084 step->bbJumpDest->bbRefs++;
9086 #if defined(_TARGET_ARM_)
9087 if (stepType == ST_FinallyReturn)
9089 // Mark the target of a finally return
9090 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9092 #endif // defined(_TARGET_ARM_)
9094 /* The new block will inherit this block's weight */
9095 catchStep->setBBWeight(block->bbWeight);
9096 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9101 if (stepType == ST_FinallyReturn)
9103 printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
9104 "BBJ_ALWAYS block BB%02u\n",
9105 XTnum, catchStep->bbNum);
9109 assert(stepType == ST_Catch);
9110 printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
9111 "BBJ_ALWAYS block BB%02u\n",
9112 XTnum, catchStep->bbNum);
9117 /* This block is the new step */
9121 invalidatePreds = true;
9126 if (step == nullptr)
9128 block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
9133 printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
9134 "block BB%02u to BBJ_ALWAYS\n",
9141 step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9143 #if defined(_TARGET_ARM_)
9144 if (stepType == ST_FinallyReturn)
9146 assert(step->bbJumpKind == BBJ_ALWAYS);
9147 // Mark the target of a finally return
9148 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9150 #endif // defined(_TARGET_ARM_)
9155 printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
9159 // Queue up the jump target for importing
9161 impImportBlockPending(leaveTarget);
9164 if (invalidatePreds && fgComputePredsDone)
9166 JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9171 fgVerifyHandlerTab();
9175 printf("\nAfter import CEE_LEAVE:\n");
9176 fgDispBasicBlocks();
9182 #endif // FEATURE_EH_FUNCLETS
9184 /*****************************************************************************/
9185 // This is called when reimporting a leave block. It resets the JumpKind,
9186 // JumpDest, and bbNext to the original values
9188 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
9190 #if FEATURE_EH_FUNCLETS
9191 // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
9192 // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0,
9193 // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
9194 // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
9195 // only predecessor are also considered orphans and attempted to be deleted.
9202 // leave OUTSIDE; // B0 is the block containing this leave, following this would be B1
9207 // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
9208 // where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block.
9209 // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To
9210 // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
9211 // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
9212 // will be treated as pair and handled correctly.
9213 if (block->bbJumpKind == BBJ_CALLFINALLY)
9215 BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
9216 dupBlock->bbFlags = block->bbFlags;
9217 dupBlock->bbJumpDest = block->bbJumpDest;
9218 dupBlock->copyEHRegion(block);
9219 dupBlock->bbCatchTyp = block->bbCatchTyp;
9221 // Mark this block as
9222 // a) not referenced by any other block to make sure that it gets deleted
9224 // c) prevent from being imported
9227 dupBlock->bbRefs = 0;
9228 dupBlock->bbWeight = 0;
9229 dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
9231 // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
9232 // will be next to each other.
9233 fgInsertBBafter(block, dupBlock);
9238 printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
9242 #endif // FEATURE_EH_FUNCLETS
9244 block->bbJumpKind = BBJ_LEAVE;
9246 block->bbJumpDest = fgLookupBB(jmpAddr);
9248 // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
9249 // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
9250 // reason we don't want to remove the block at this point is that if we call
9251 // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
9252 // added and the linked list length will be different than fgBBcount.
9255 /*****************************************************************************/
9256 // Get the first non-prefix opcode. Used for verification of valid combinations
9257 // of prefixes and actual opcodes.
9259 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
9261 while (codeAddr < codeEndp)
9263 OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9264 codeAddr += sizeof(__int8);
9266 if (opcode == CEE_PREFIX1)
9268 if (codeAddr >= codeEndp)
9272 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9273 codeAddr += sizeof(__int8);
9281 case CEE_CONSTRAINED:
9288 codeAddr += opcodeSizes[opcode];
9294 /*****************************************************************************/
9295 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
9297 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
9299 OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
9302 // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
9303 ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
9304 (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
9305 (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
9306 // volatile. prefix is allowed with the ldsfld and stsfld
9307 (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
9309 BADCODE("Invalid opcode for unaligned. or volatile. prefix");
9313 /*****************************************************************************/
9317 #undef RETURN // undef contracts RETURN macro
9332 const static controlFlow_t controlFlow[] = {
9333 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
9334 #include "opcode.def"
9340 /*****************************************************************************
9341 * Determine the result type of an arithemetic operation
9342 * On 64-bit inserts upcasts when native int is mixed with int32
9344 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr* pOp1, GenTreePtr* pOp2)
9346 var_types type = TYP_UNDEF;
9347 GenTreePtr op1 = *pOp1, op2 = *pOp2;
9349 // Arithemetic operations are generally only allowed with
9350 // primitive types, but certain operations are allowed
9353 if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9355 if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9357 // byref1-byref2 => gives a native int
9360 else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9362 // [native] int - byref => gives a native int
9365 // The reason is that it is possible, in managed C++,
9366 // to have a tree like this:
9373 // const(h) int addr byref
9375 // <BUGNUM> VSW 318822 </BUGNUM>
9377 // So here we decide to make the resulting type to be a native int.
9378 CLANG_FORMAT_COMMENT_ANCHOR;
9380 #ifdef _TARGET_64BIT_
9381 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9383 // insert an explicit upcast
9384 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9386 #endif // _TARGET_64BIT_
9392 // byref - [native] int => gives a byref
9393 assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
9395 #ifdef _TARGET_64BIT_
9396 if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
9398 // insert an explicit upcast
9399 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9401 #endif // _TARGET_64BIT_
9406 else if ((oper == GT_ADD) &&
9407 (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9409 // byref + [native] int => gives a byref
9411 // [native] int + byref => gives a byref
9413 // only one can be a byref : byref op byref not allowed
9414 assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9415 assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9417 #ifdef _TARGET_64BIT_
9418 if (genActualType(op2->TypeGet()) == TYP_BYREF)
9420 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9422 // insert an explicit upcast
9423 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9426 else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9428 // insert an explicit upcast
9429 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9431 #endif // _TARGET_64BIT_
9435 #ifdef _TARGET_64BIT_
9436 else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9438 assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9440 // int + long => gives long
9441 // long + int => gives long
9442 // we get this because in the IL the long isn't Int64, it's just IntPtr
9444 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9446 // insert an explicit upcast
9447 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9449 else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9451 // insert an explicit upcast
9452 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9457 #else // 32-bit TARGET
9458 else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9460 assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9462 // int + long => gives long
9463 // long + int => gives long
9467 #endif // _TARGET_64BIT_
9470 // int + int => gives an int
9471 assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9473 assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9474 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9476 type = genActualType(op1->gtType);
9478 #if FEATURE_X87_DOUBLES
9480 // For x87, since we only have 1 size of registers, prefer double
9481 // For everybody else, be more precise
9482 if (type == TYP_FLOAT)
9485 #else // !FEATURE_X87_DOUBLES
9487 // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9488 // Otherwise, turn floats into doubles
9489 if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9491 assert(genActualType(op2->gtType) == TYP_DOUBLE);
9495 #endif // FEATURE_X87_DOUBLES
9498 #if FEATURE_X87_DOUBLES
9499 assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9500 #else // FEATURE_X87_DOUBLES
9501 assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9502 #endif // FEATURE_X87_DOUBLES
9507 //------------------------------------------------------------------------
9508 // impCastClassOrIsInstToTree: build and import castclass/isinst
9511 // op1 - value to cast
9512 // op2 - type handle for type to cast to
9513 // pResolvedToken - resolved token from the cast operation
9514 // isCastClass - true if this is castclass, false means isinst
9517 // Tree representing the cast
9520 // May expand into a series of runtime checks or a helper call.
9522 GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr op1,
9524 CORINFO_RESOLVED_TOKEN* pResolvedToken,
9527 assert(op1->TypeGet() == TYP_REF);
9529 // Optimistically assume the jit should expand this as an inline test
9530 bool shouldExpandInline = true;
9532 // Profitability check.
9534 // Don't bother with inline expansion when jit is trying to
9535 // generate code quickly, or the cast is in code that won't run very
9536 // often, or the method already is pretty big.
9537 if (compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts())
9539 // not worth the code expansion if jitting fast or in a rarely run block
9540 shouldExpandInline = false;
9542 else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9544 // not worth creating an untracked local variable
9545 shouldExpandInline = false;
9548 // Pessimistically assume the jit cannot expand this as an inline test
9549 bool canExpandInline = false;
9550 const CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9554 // Not all classclass/isinst operations can be inline expanded.
9555 // Check legality only if an inline expansion is desirable.
9556 if (shouldExpandInline)
9560 // Jit can only inline expand the normal CHKCASTCLASS helper.
9561 canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9565 if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9567 // Check the class attributes.
9568 DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9570 // If the class is final and is not marshal byref or
9571 // contextful, the jit can expand the IsInst check inline.
9572 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL;
9573 canExpandInline = ((flags & flagsMask) == CORINFO_FLG_FINAL);
9578 const bool expandInline = canExpandInline && shouldExpandInline;
9582 JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst",
9583 canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
9585 // If we CSE this class handle we prevent assertionProp from making SubType assertions
9586 // so instead we force the CSE logic to not consider CSE-ing this class handle.
9588 op2->gtFlags |= GTF_DONT_CSE;
9590 return gtNewHelperCallNode(helper, TYP_REF, gtNewArgList(op2, op1));
9593 JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst");
9595 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9600 // expand the methodtable match:
9604 // GT_IND op2 (typically CNS_INT)
9609 // This can replace op1 with a GT_COMMA that evaluates op1 into a local
9611 op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
9613 // op1 is now known to be a non-complex tree
9614 // thus we can use gtClone(op1) from now on
9617 GenTreePtr op2Var = op2;
9620 op2Var = fgInsertCommaFormTemp(&op2);
9621 lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
9623 temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
9624 temp->gtFlags |= GTF_EXCEPT;
9625 condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
9627 GenTreePtr condNull;
9629 // expand the null check:
9631 // condNull ==> GT_EQ
9636 condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
9639 // expand the true and false trees for the condMT
9641 GenTreePtr condFalse = gtClone(op1);
9642 GenTreePtr condTrue;
9646 // use the special helper that skips the cases checked by our inlined cast
9648 const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
9650 condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewArgList(op2Var, gtClone(op1)));
9654 condTrue = gtNewIconNode(0, TYP_REF);
9657 #define USE_QMARK_TREES
9659 #ifdef USE_QMARK_TREES
9662 // Generate first QMARK - COLON tree
9664 // qmarkMT ==> GT_QMARK
9668 // condFalse condTrue
9670 temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
9671 qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
9672 condMT->gtFlags |= GTF_RELOP_QMARK;
9674 GenTreePtr qmarkNull;
9676 // Generate second QMARK - COLON tree
9678 // qmarkNull ==> GT_QMARK
9680 // condNull GT_COLON
9684 temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
9685 qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
9686 qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
9687 condNull->gtFlags |= GTF_RELOP_QMARK;
9689 // Make QMark node a top level node by spilling it.
9690 unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
9691 impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
9693 // TODO: Is it possible op1 has a better type?
9694 lvaSetClass(tmp, pResolvedToken->hClass);
9695 return gtNewLclvNode(tmp, TYP_REF);
9700 #define assertImp(cond) ((void)0)
9702 #define assertImp(cond) \
9707 const int cchAssertImpBuf = 600; \
9708 char* assertImpBuf = (char*)alloca(cchAssertImpBuf); \
9709 _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \
9710 "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \
9711 impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \
9712 op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \
9713 assertAbort(assertImpBuf, __FILE__, __LINE__); \
9719 #pragma warning(push)
9720 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
9722 /*****************************************************************************
9723 * Import the instr for the given basic block
9725 void Compiler::impImportBlockCode(BasicBlock* block)
9727 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
9733 printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
9737 unsigned nxtStmtIndex = impInitBlockLineInfo();
9738 IL_OFFSET nxtStmtOffs;
9740 GenTreePtr arrayNodeFrom, arrayNodeTo, arrayNodeToIndex;
9741 CorInfoHelpFunc helper;
9742 CorInfoIsAccessAllowedResult accessAllowedResult;
9743 CORINFO_HELPER_DESC calloutHelper;
9744 const BYTE* lastLoadToken = nullptr;
9746 // reject cyclic constraints
9747 if (tiVerificationNeeded)
9749 Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
9750 Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
9753 /* Get the tree list started */
9757 /* Walk the opcodes that comprise the basic block */
9759 const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
9760 const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
9762 IL_OFFSET opcodeOffs = block->bbCodeOffs;
9763 IL_OFFSET lastSpillOffs = opcodeOffs;
9767 /* remember the start of the delegate creation sequence (used for verification) */
9768 const BYTE* delegateCreateStart = nullptr;
9770 int prefixFlags = 0;
9771 bool explicitTailCall, constraintCall, readonlyCall;
9775 unsigned numArgs = info.compArgsCount;
9777 /* Now process all the opcodes in the block */
9779 var_types callTyp = TYP_COUNT;
9780 OPCODE prevOpcode = CEE_ILLEGAL;
9782 if (block->bbCatchTyp)
9784 if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
9786 impCurStmtOffsSet(block->bbCodeOffs);
9789 // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
9790 // to a temp. This is a trade off for code simplicity
9791 impSpillSpecialSideEff();
9794 while (codeAddr < codeEndp)
9796 bool usingReadyToRunHelper = false;
9797 CORINFO_RESOLVED_TOKEN resolvedToken;
9798 CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
9799 CORINFO_CALL_INFO callInfo;
9800 CORINFO_FIELD_INFO fieldInfo;
9802 tiRetVal = typeInfo(); // Default type info
9804 //---------------------------------------------------------------------
9806 /* We need to restrict the max tree depth as many of the Compiler
9807 functions are recursive. We do this by spilling the stack */
9809 if (verCurrentState.esStackDepth)
9811 /* Has it been a while since we last saw a non-empty stack (which
9812 guarantees that the tree depth isnt accumulating. */
9814 if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode))
9816 impSpillStackEnsure();
9817 lastSpillOffs = opcodeOffs;
9822 lastSpillOffs = opcodeOffs;
9823 impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
9826 /* Compute the current instr offset */
9828 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9831 if (opts.compDbgInfo)
9834 if (!compIsForInlining())
9837 (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
9839 /* Have we reached the next stmt boundary ? */
9841 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
9843 assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
9845 if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
9847 /* We need to provide accurate IP-mapping at this point.
9848 So spill anything on the stack so that it will form
9849 gtStmts with the correct stmt offset noted */
9851 impSpillStackEnsure(true);
9854 // Has impCurStmtOffs been reported in any tree?
9856 if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
9858 GenTreePtr placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
9859 impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9861 assert(impCurStmtOffs == BAD_IL_OFFSET);
9864 if (impCurStmtOffs == BAD_IL_OFFSET)
9866 /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
9867 If opcodeOffs has gone past nxtStmtIndex, catch up */
9869 while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
9870 info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
9875 /* Go to the new stmt */
9877 impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
9879 /* Update the stmt boundary index */
9882 assert(nxtStmtIndex <= info.compStmtOffsetsCount);
9884 /* Are there any more line# entries after this one? */
9886 if (nxtStmtIndex < info.compStmtOffsetsCount)
9888 /* Remember where the next line# starts */
9890 nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
9894 /* No more line# entries */
9896 nxtStmtOffs = BAD_IL_OFFSET;
9900 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
9901 (verCurrentState.esStackDepth == 0))
9903 /* At stack-empty locations, we have already added the tree to
9904 the stmt list with the last offset. We just need to update
9908 impCurStmtOffsSet(opcodeOffs);
9910 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
9911 impOpcodeIsCallSiteBoundary(prevOpcode))
9913 /* Make sure we have a type cached */
9914 assert(callTyp != TYP_COUNT);
9916 if (callTyp == TYP_VOID)
9918 impCurStmtOffsSet(opcodeOffs);
9920 else if (opts.compDbgCode)
9922 impSpillStackEnsure(true);
9923 impCurStmtOffsSet(opcodeOffs);
9926 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
9928 if (opts.compDbgCode)
9930 impSpillStackEnsure(true);
9933 impCurStmtOffsSet(opcodeOffs);
9936 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
9937 jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
9941 CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL);
9942 CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
9943 CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
9945 var_types lclTyp, ovflType = TYP_UNKNOWN;
9946 GenTreePtr op1 = DUMMY_INIT(NULL);
9947 GenTreePtr op2 = DUMMY_INIT(NULL);
9948 GenTreeArgList* args = nullptr; // What good do these "DUMMY_INIT"s do?
9949 GenTreePtr newObjThisPtr = DUMMY_INIT(NULL);
9950 bool uns = DUMMY_INIT(false);
9951 bool isLocal = false;
9953 /* Get the next opcode and the size of its parameters */
9955 OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9956 codeAddr += sizeof(__int8);
9959 impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9960 JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
9965 // Return if any previous code has caused inline to fail.
9966 if (compDonotInline())
9971 /* Get the size of additional parameters */
9973 signed int sz = opcodeSizes[opcode];
9976 clsHnd = NO_CLASS_HANDLE;
9978 callTyp = TYP_COUNT;
9980 impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9981 impCurOpcName = opcodeNames[opcode];
9983 if (verbose && (opcode != CEE_PREFIX1))
9985 printf("%s", impCurOpcName);
9988 /* Use assertImp() to display the opcode */
9990 op1 = op2 = nullptr;
9993 /* See what kind of an opcode we have, then */
9995 unsigned mflags = 0;
9996 unsigned clsFlags = 0;
10009 CORINFO_SIG_INFO sig;
10012 bool ovfl, unordered, callNode;
10014 CORINFO_CLASS_HANDLE tokenType;
10024 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
10025 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10026 codeAddr += sizeof(__int8);
10027 goto DECODE_OPCODE;
10031 // We need to call impSpillLclRefs() for a struct type lclVar.
10032 // This is done for non-block assignments in the handling of stloc.
10033 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
10034 (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
10036 impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
10039 /* Append 'op1' to the list of statements */
10040 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10045 /* Append 'op1' to the list of statements */
10047 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10053 // Remember at which BC offset the tree was finished
10054 impNoteLastILoffs();
10059 impPushNullObjRefOnStack();
10062 case CEE_LDC_I4_M1:
10072 cval.intVal = (opcode - CEE_LDC_I4_0);
10073 assert(-1 <= cval.intVal && cval.intVal <= 8);
10077 cval.intVal = getI1LittleEndian(codeAddr);
10080 cval.intVal = getI4LittleEndian(codeAddr);
10083 JITDUMP(" %d", cval.intVal);
10084 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
10088 cval.lngVal = getI8LittleEndian(codeAddr);
10089 JITDUMP(" 0x%016llx", cval.lngVal);
10090 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
10094 cval.dblVal = getR8LittleEndian(codeAddr);
10095 JITDUMP(" %#.17g", cval.dblVal);
10096 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
10100 cval.dblVal = getR4LittleEndian(codeAddr);
10101 JITDUMP(" %#.17g", cval.dblVal);
10103 GenTreePtr cnsOp = gtNewDconNode(cval.dblVal);
10104 #if !FEATURE_X87_DOUBLES
10105 // X87 stack doesn't differentiate between float/double
10106 // so R4 is treated as R8, but everybody else does
10107 cnsOp->gtType = TYP_FLOAT;
10108 #endif // FEATURE_X87_DOUBLES
10109 impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
10115 if (compIsForInlining())
10117 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
10119 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
10124 val = getU4LittleEndian(codeAddr);
10125 JITDUMP(" %08X", val);
10126 if (tiVerificationNeeded)
10128 Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
10129 tiRetVal = typeInfo(TI_REF, impGetStringClass());
10131 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
10136 lclNum = getU2LittleEndian(codeAddr);
10137 JITDUMP(" %u", lclNum);
10138 impLoadArg(lclNum, opcodeOffs + sz + 1);
10142 lclNum = getU1LittleEndian(codeAddr);
10143 JITDUMP(" %u", lclNum);
10144 impLoadArg(lclNum, opcodeOffs + sz + 1);
10151 lclNum = (opcode - CEE_LDARG_0);
10152 assert(lclNum >= 0 && lclNum < 4);
10153 impLoadArg(lclNum, opcodeOffs + sz + 1);
10157 lclNum = getU2LittleEndian(codeAddr);
10158 JITDUMP(" %u", lclNum);
10159 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10163 lclNum = getU1LittleEndian(codeAddr);
10164 JITDUMP(" %u", lclNum);
10165 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10172 lclNum = (opcode - CEE_LDLOC_0);
10173 assert(lclNum >= 0 && lclNum < 4);
10174 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10178 lclNum = getU2LittleEndian(codeAddr);
10182 lclNum = getU1LittleEndian(codeAddr);
10184 JITDUMP(" %u", lclNum);
10186 if (tiVerificationNeeded)
10188 Verify(lclNum < info.compILargsCount, "bad arg num");
10191 if (compIsForInlining())
10193 op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10194 noway_assert(op1->gtOper == GT_LCL_VAR);
10195 lclNum = op1->AsLclVar()->gtLclNum;
10200 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10201 assertImp(lclNum < numArgs);
10203 if (lclNum == info.compThisArg)
10205 lclNum = lvaArg0Var;
10208 // We should have seen this arg write in the prescan
10209 assert(lvaTable[lclNum].lvHasILStoreOp);
10211 if (tiVerificationNeeded)
10213 typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
10214 Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
10217 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10219 Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
10226 lclNum = getU2LittleEndian(codeAddr);
10228 JITDUMP(" %u", lclNum);
10232 lclNum = getU1LittleEndian(codeAddr);
10234 JITDUMP(" %u", lclNum);
10242 lclNum = (opcode - CEE_STLOC_0);
10243 assert(lclNum >= 0 && lclNum < 4);
10246 if (tiVerificationNeeded)
10248 Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10249 Verify(tiCompatibleWith(impStackTop().seTypeInfo,
10250 NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
10254 if (compIsForInlining())
10256 lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10258 /* Have we allocated a temp for this local? */
10260 lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
10269 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
10271 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10277 /* if it is a struct assignment, make certain we don't overflow the buffer */
10278 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
10280 if (lvaTable[lclNum].lvNormalizeOnLoad())
10282 lclTyp = lvaGetRealType(lclNum);
10286 lclTyp = lvaGetActualType(lclNum);
10290 /* Pop the value being assigned */
10293 StackEntry se = impPopStack();
10294 clsHnd = se.seTypeInfo.GetClassHandle();
10296 tiRetVal = se.seTypeInfo;
10299 #ifdef FEATURE_SIMD
10300 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
10302 assert(op1->TypeGet() == TYP_STRUCT);
10303 op1->gtType = lclTyp;
10305 #endif // FEATURE_SIMD
10307 op1 = impImplicitIorI4Cast(op1, lclTyp);
10309 #ifdef _TARGET_64BIT_
10310 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
10311 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
10313 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10314 op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
10316 #endif // _TARGET_64BIT_
10318 // We had better assign it a value of the correct type
10320 genActualType(lclTyp) == genActualType(op1->gtType) ||
10321 genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
10322 (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
10323 (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
10324 (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
10325 ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
10327 /* If op1 is "&var" then its type is the transient "*" and it can
10328 be used either as TYP_BYREF or TYP_I_IMPL */
10330 if (op1->IsVarAddr())
10332 assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
10334 /* When "&var" is created, we assume it is a byref. If it is
10335 being assigned to a TYP_I_IMPL var, change the type to
10336 prevent unnecessary GC info */
10338 if (genActualType(lclTyp) == TYP_I_IMPL)
10340 op1->gtType = TYP_I_IMPL;
10344 // If this is a local and the local is a ref type, see
10345 // if we can improve type information based on the
10346 // value being assigned.
10347 if (isLocal && (lclTyp == TYP_REF))
10349 // We should have seen a stloc in our IL prescan.
10350 assert(lvaTable[lclNum].lvHasILStoreOp);
10352 const bool isSingleILStoreLocal =
10353 !lvaTable[lclNum].lvHasMultipleILStoreOp && !lvaTable[lclNum].lvHasLdAddrOp;
10355 // Conservative check that there is just one
10356 // definition that reaches this store.
10357 const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0);
10359 if (isSingleILStoreLocal && hasSingleReachingDef)
10361 lvaUpdateClass(lclNum, op1, clsHnd);
10365 /* Filter out simple assignments to itself */
10367 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
10369 if (opts.compDbgCode)
10371 op1 = gtNewNothingNode();
10380 /* Create the assignment node */
10382 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
10384 /* If the local is aliased, we need to spill calls and
10385 indirections from the stack. */
10387 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp) &&
10388 verCurrentState.esStackDepth > 0)
10390 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased"));
10393 /* Spill any refs to the local from the stack */
10395 impSpillLclRefs(lclNum);
10397 #if !FEATURE_X87_DOUBLES
10398 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
10399 // We insert a cast to the dest 'op2' type
10401 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
10402 varTypeIsFloating(op2->gtType))
10404 op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
10406 #endif // !FEATURE_X87_DOUBLES
10408 if (varTypeIsStruct(lclTyp))
10410 op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
10414 // The code generator generates GC tracking information
10415 // based on the RHS of the assignment. Later the LHS (which is
10416 // is a BYREF) gets used and the emitter checks that that variable
10417 // is being tracked. It is not (since the RHS was an int and did
10418 // not need tracking). To keep this assert happy, we change the RHS
10419 if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
10421 op1->gtType = TYP_BYREF;
10423 op1 = gtNewAssignNode(op2, op1);
10429 lclNum = getU2LittleEndian(codeAddr);
10433 lclNum = getU1LittleEndian(codeAddr);
10435 JITDUMP(" %u", lclNum);
10436 if (tiVerificationNeeded)
10438 Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10439 Verify(info.compInitMem, "initLocals not set");
10442 if (compIsForInlining())
10444 // Get the local type
10445 lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10447 /* Have we allocated a temp for this local? */
10449 lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10451 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10457 assertImp(lclNum < info.compLocalsCount);
10461 lclNum = getU2LittleEndian(codeAddr);
10465 lclNum = getU1LittleEndian(codeAddr);
10467 JITDUMP(" %u", lclNum);
10468 Verify(lclNum < info.compILargsCount, "bad arg num");
10470 if (compIsForInlining())
10472 // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10473 // followed by a ldfld to load the field.
10475 op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10476 if (op1->gtOper != GT_LCL_VAR)
10478 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10482 assert(op1->gtOper == GT_LCL_VAR);
10487 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10488 assertImp(lclNum < numArgs);
10490 if (lclNum == info.compThisArg)
10492 lclNum = lvaArg0Var;
10499 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10502 assert(op1->gtOper == GT_LCL_VAR);
10504 /* Note that this is supposed to create the transient type "*"
10505 which may be used as a TYP_I_IMPL. However we catch places
10506 where it is used as a TYP_I_IMPL and change the node if needed.
10507 Thus we are pessimistic and may report byrefs in the GC info
10508 where it was not absolutely needed, but it is safer this way.
10510 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10512 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10513 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10515 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10516 if (tiVerificationNeeded)
10518 // Don't allow taking address of uninit this ptr.
10519 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10521 Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10524 if (!tiRetVal.IsByRef())
10526 tiRetVal.MakeByRef();
10530 Verify(false, "byref to byref");
10534 impPushOnStack(op1, tiRetVal);
10539 if (!info.compIsVarArgs)
10541 BADCODE("arglist in non-vararg method");
10544 if (tiVerificationNeeded)
10546 tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10548 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10550 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10551 adjusted the arg count cos this is like fetching the last param */
10552 assertImp(0 < numArgs);
10553 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10554 lclNum = lvaVarargsHandleArg;
10555 op1 = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10556 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10557 impPushOnStack(op1, tiRetVal);
10560 case CEE_ENDFINALLY:
10562 if (compIsForInlining())
10564 assert(!"Shouldn't have exception handlers in the inliner!");
10565 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10569 if (verCurrentState.esStackDepth > 0)
10571 impEvalSideEffects();
10574 if (info.compXcptnsCount == 0)
10576 BADCODE("endfinally outside finally");
10579 assert(verCurrentState.esStackDepth == 0);
10581 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10584 case CEE_ENDFILTER:
10586 if (compIsForInlining())
10588 assert(!"Shouldn't have exception handlers in the inliner!");
10589 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10593 block->bbSetRunRarely(); // filters are rare
10595 if (info.compXcptnsCount == 0)
10597 BADCODE("endfilter outside filter");
10600 if (tiVerificationNeeded)
10602 Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
10605 op1 = impPopStack().val;
10606 assertImp(op1->gtType == TYP_INT);
10607 if (!bbInFilterILRange(block))
10609 BADCODE("EndFilter outside a filter handler");
10612 /* Mark current bb as end of filter */
10614 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
10615 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
10617 /* Mark catch handler as successor */
10619 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
10620 if (verCurrentState.esStackDepth != 0)
10622 verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
10623 DEBUGARG(__LINE__));
10628 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
10630 if (!impReturnInstruction(block, prefixFlags, opcode))
10641 assert(!compIsForInlining());
10643 if (tiVerificationNeeded)
10645 Verify(false, "Invalid opcode: CEE_JMP");
10648 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
10650 /* CEE_JMP does not make sense in some "protected" regions. */
10652 BADCODE("Jmp not allowed in protected region");
10655 if (verCurrentState.esStackDepth != 0)
10657 BADCODE("Stack must be empty after CEE_JMPs");
10660 _impResolveToken(CORINFO_TOKENKIND_Method);
10662 JITDUMP(" %08X", resolvedToken.token);
10664 /* The signature of the target has to be identical to ours.
10665 At least check that argCnt and returnType match */
10667 eeGetMethodSig(resolvedToken.hMethod, &sig);
10668 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
10669 sig.retType != info.compMethodInfo->args.retType ||
10670 sig.callConv != info.compMethodInfo->args.callConv)
10672 BADCODE("Incompatible target for CEE_JMPs");
10675 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
10677 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
10679 /* Mark the basic block as being a JUMP instead of RETURN */
10681 block->bbFlags |= BBF_HAS_JMP;
10683 /* Set this flag to make sure register arguments have a location assigned
10684 * even if we don't use them inside the method */
10686 compJmpOpUsed = true;
10688 fgNoStructPromotion = true;
10692 #else // !_TARGET_XARCH_ && !_TARGET_ARMARCH_
10694 // Import this just like a series of LDARGs + tail. + call + ret
10696 if (info.compIsVarArgs)
10698 // For now we don't implement true tail calls, so this breaks varargs.
10699 // So warn the user instead of generating bad code.
10700 // This is a semi-temporary workaround for DevDiv 173860, until we can properly
10701 // implement true tail calls.
10702 IMPL_LIMITATION("varags + CEE_JMP doesn't work yet");
10705 // First load up the arguments (0 - N)
10706 for (unsigned argNum = 0; argNum < info.compILargsCount; argNum++)
10708 impLoadArg(argNum, opcodeOffs + sz + 1);
10711 // Now generate the tail call
10712 noway_assert(prefixFlags == 0);
10713 prefixFlags = PREFIX_TAILCALL_EXPLICIT;
10716 eeGetCallInfo(&resolvedToken, NULL,
10717 combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), &callInfo);
10719 // All calls and delegates need a security callout.
10720 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
10722 callTyp = impImportCall(CEE_CALL, &resolvedToken, NULL, NULL, PREFIX_TAILCALL_EXPLICIT, &callInfo,
10725 // And finish with the ret
10728 #endif // _TARGET_XARCH_ || _TARGET_ARMARCH_
10731 assertImp(sz == sizeof(unsigned));
10733 _impResolveToken(CORINFO_TOKENKIND_Class);
10735 JITDUMP(" %08X", resolvedToken.token);
10737 ldelemClsHnd = resolvedToken.hClass;
10739 if (tiVerificationNeeded)
10741 typeInfo tiArray = impStackTop(1).seTypeInfo;
10742 typeInfo tiIndex = impStackTop().seTypeInfo;
10744 // As per ECMA 'index' specified can be either int32 or native int.
10745 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10747 typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
10748 Verify(tiArray.IsNullObjRef() ||
10749 typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
10752 tiRetVal = arrayElemType;
10753 tiRetVal.MakeByRef();
10754 if (prefixFlags & PREFIX_READONLY)
10756 tiRetVal.SetIsReadonlyByRef();
10759 // an array interior pointer is always in the heap
10760 tiRetVal.SetIsPermanentHomeByRef();
10763 // If it's a value class array we just do a simple address-of
10764 if (eeIsValueClass(ldelemClsHnd))
10766 CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
10767 if (cit == CORINFO_TYPE_UNDEF)
10769 lclTyp = TYP_STRUCT;
10773 lclTyp = JITtype2varType(cit);
10775 goto ARR_LD_POST_VERIFY;
10778 // Similarly, if its a readonly access, we can do a simple address-of
10779 // without doing a runtime type-check
10780 if (prefixFlags & PREFIX_READONLY)
10783 goto ARR_LD_POST_VERIFY;
10786 // Otherwise we need the full helper function with run-time type check
10787 op1 = impTokenToHandle(&resolvedToken);
10788 if (op1 == nullptr)
10789 { // compDonotInline()
10793 args = gtNewArgList(op1); // Type
10794 args = gtNewListNode(impPopStack().val, args); // index
10795 args = gtNewListNode(impPopStack().val, args); // array
10796 op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args);
10798 impPushOnStack(op1, tiRetVal);
10801 // ldelem for reference and value types
10803 assertImp(sz == sizeof(unsigned));
10805 _impResolveToken(CORINFO_TOKENKIND_Class);
10807 JITDUMP(" %08X", resolvedToken.token);
10809 ldelemClsHnd = resolvedToken.hClass;
10811 if (tiVerificationNeeded)
10813 typeInfo tiArray = impStackTop(1).seTypeInfo;
10814 typeInfo tiIndex = impStackTop().seTypeInfo;
10816 // As per ECMA 'index' specified can be either int32 or native int.
10817 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10818 tiRetVal = verMakeTypeInfo(ldelemClsHnd);
10820 Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
10821 "type of array incompatible with type operand");
10822 tiRetVal.NormaliseForStack();
10825 // If it's a reference type or generic variable type
10826 // then just generate code as though it's a ldelem.ref instruction
10827 if (!eeIsValueClass(ldelemClsHnd))
10830 opcode = CEE_LDELEM_REF;
10834 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
10835 lclTyp = JITtype2varType(jitTyp);
10836 tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
10837 tiRetVal.NormaliseForStack();
10839 goto ARR_LD_POST_VERIFY;
10841 case CEE_LDELEM_I1:
10844 case CEE_LDELEM_I2:
10845 lclTyp = TYP_SHORT;
10848 lclTyp = TYP_I_IMPL;
10851 // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
10852 // and treating it as TYP_INT avoids other asserts.
10853 case CEE_LDELEM_U4:
10857 case CEE_LDELEM_I4:
10860 case CEE_LDELEM_I8:
10863 case CEE_LDELEM_REF:
10866 case CEE_LDELEM_R4:
10867 lclTyp = TYP_FLOAT;
10869 case CEE_LDELEM_R8:
10870 lclTyp = TYP_DOUBLE;
10872 case CEE_LDELEM_U1:
10873 lclTyp = TYP_UBYTE;
10875 case CEE_LDELEM_U2:
10881 if (tiVerificationNeeded)
10883 typeInfo tiArray = impStackTop(1).seTypeInfo;
10884 typeInfo tiIndex = impStackTop().seTypeInfo;
10886 // As per ECMA 'index' specified can be either int32 or native int.
10887 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10888 if (tiArray.IsNullObjRef())
10890 if (lclTyp == TYP_REF)
10891 { // we will say a deref of a null array yields a null ref
10892 tiRetVal = typeInfo(TI_NULL);
10896 tiRetVal = typeInfo(lclTyp);
10901 tiRetVal = verGetArrayElemType(tiArray);
10902 typeInfo arrayElemTi = typeInfo(lclTyp);
10903 #ifdef _TARGET_64BIT_
10904 if (opcode == CEE_LDELEM_I)
10906 arrayElemTi = typeInfo::nativeInt();
10909 if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
10911 Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
10914 #endif // _TARGET_64BIT_
10916 Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
10919 tiRetVal.NormaliseForStack();
10921 ARR_LD_POST_VERIFY:
10923 /* Pull the index value and array address */
10924 op2 = impPopStack().val;
10925 op1 = impPopStack().val;
10926 assertImp(op1->gtType == TYP_REF);
10928 /* Check for null pointer - in the inliner case we simply abort */
10930 if (compIsForInlining())
10932 if (op1->gtOper == GT_CNS_INT)
10934 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
10939 op1 = impCheckForNullPointer(op1);
10941 /* Mark the block as containing an index expression */
10943 if (op1->gtOper == GT_LCL_VAR)
10945 if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
10947 block->bbFlags |= BBF_HAS_IDX_LEN;
10948 optMethodFlags |= OMF_HAS_ARRAYREF;
10952 /* Create the index node and push it on the stack */
10954 op1 = gtNewIndexRef(lclTyp, op1, op2);
10956 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
10958 if ((opcode == CEE_LDELEMA) || ldstruct ||
10959 (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
10961 assert(ldelemClsHnd != DUMMY_INIT(NULL));
10963 // remember the element size
10964 if (lclTyp == TYP_REF)
10966 op1->gtIndex.gtIndElemSize = sizeof(void*);
10970 // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
10971 if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
10973 op1->gtIndex.gtStructElemClass = ldelemClsHnd;
10975 assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
10976 if (lclTyp == TYP_STRUCT)
10978 size = info.compCompHnd->getClassSize(ldelemClsHnd);
10979 op1->gtIndex.gtIndElemSize = size;
10980 op1->gtType = lclTyp;
10984 if ((opcode == CEE_LDELEMA) || ldstruct)
10987 lclTyp = TYP_BYREF;
10989 op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
10993 assert(lclTyp != TYP_STRUCT);
10999 // Create an OBJ for the result
11000 op1 = gtNewObjNode(ldelemClsHnd, op1);
11001 op1->gtFlags |= GTF_EXCEPT;
11003 impPushOnStack(op1, tiRetVal);
11006 // stelem for reference and value types
11009 assertImp(sz == sizeof(unsigned));
11011 _impResolveToken(CORINFO_TOKENKIND_Class);
11013 JITDUMP(" %08X", resolvedToken.token);
11015 stelemClsHnd = resolvedToken.hClass;
11017 if (tiVerificationNeeded)
11019 typeInfo tiArray = impStackTop(2).seTypeInfo;
11020 typeInfo tiIndex = impStackTop(1).seTypeInfo;
11021 typeInfo tiValue = impStackTop().seTypeInfo;
11023 // As per ECMA 'index' specified can be either int32 or native int.
11024 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11025 typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
11027 Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
11028 "type operand incompatible with array element type");
11029 arrayElem.NormaliseForStack();
11030 Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
11033 // If it's a reference type just behave as though it's a stelem.ref instruction
11034 if (!eeIsValueClass(stelemClsHnd))
11036 goto STELEM_REF_POST_VERIFY;
11039 // Otherwise extract the type
11041 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
11042 lclTyp = JITtype2varType(jitTyp);
11043 goto ARR_ST_POST_VERIFY;
11046 case CEE_STELEM_REF:
11048 if (tiVerificationNeeded)
11050 typeInfo tiArray = impStackTop(2).seTypeInfo;
11051 typeInfo tiIndex = impStackTop(1).seTypeInfo;
11052 typeInfo tiValue = impStackTop().seTypeInfo;
11054 // As per ECMA 'index' specified can be either int32 or native int.
11055 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11056 Verify(tiValue.IsObjRef(), "bad value");
11058 // we only check that it is an object referece, The helper does additional checks
11059 Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
11062 arrayNodeTo = impStackTop(2).val;
11063 arrayNodeToIndex = impStackTop(1).val;
11064 arrayNodeFrom = impStackTop().val;
11067 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
11068 // lot of cases because of covariance. ie. foo[] can be cast to object[].
11071 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
11072 // This does not need CORINFO_HELP_ARRADDR_ST
11074 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
11075 arrayNodeTo->gtOper == GT_LCL_VAR &&
11076 arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
11077 !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
11080 goto ARR_ST_POST_VERIFY;
11083 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
11085 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
11087 assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
11090 goto ARR_ST_POST_VERIFY;
11093 STELEM_REF_POST_VERIFY:
11095 /* Call a helper function to do the assignment */
11096 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopList(3, nullptr));
11100 case CEE_STELEM_I1:
11103 case CEE_STELEM_I2:
11104 lclTyp = TYP_SHORT;
11107 lclTyp = TYP_I_IMPL;
11109 case CEE_STELEM_I4:
11112 case CEE_STELEM_I8:
11115 case CEE_STELEM_R4:
11116 lclTyp = TYP_FLOAT;
11118 case CEE_STELEM_R8:
11119 lclTyp = TYP_DOUBLE;
11124 if (tiVerificationNeeded)
11126 typeInfo tiArray = impStackTop(2).seTypeInfo;
11127 typeInfo tiIndex = impStackTop(1).seTypeInfo;
11128 typeInfo tiValue = impStackTop().seTypeInfo;
11130 // As per ECMA 'index' specified can be either int32 or native int.
11131 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11132 typeInfo arrayElem = typeInfo(lclTyp);
11133 #ifdef _TARGET_64BIT_
11134 if (opcode == CEE_STELEM_I)
11136 arrayElem = typeInfo::nativeInt();
11138 #endif // _TARGET_64BIT_
11139 Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
11142 Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
11146 ARR_ST_POST_VERIFY:
11147 /* The strict order of evaluation is LHS-operands, RHS-operands,
11148 range-check, and then assignment. However, codegen currently
11149 does the range-check before evaluation the RHS-operands. So to
11150 maintain strict ordering, we spill the stack. */
11152 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
11154 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11155 "Strict ordering of exceptions for Array store"));
11158 /* Pull the new value from the stack */
11159 op2 = impPopStack().val;
11161 /* Pull the index value */
11162 op1 = impPopStack().val;
11164 /* Pull the array address */
11165 op3 = impPopStack().val;
11167 assertImp(op3->gtType == TYP_REF);
11168 if (op2->IsVarAddr())
11170 op2->gtType = TYP_I_IMPL;
11173 op3 = impCheckForNullPointer(op3);
11175 // Mark the block as containing an index expression
11177 if (op3->gtOper == GT_LCL_VAR)
11179 if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
11181 block->bbFlags |= BBF_HAS_IDX_LEN;
11182 optMethodFlags |= OMF_HAS_ARRAYREF;
11186 /* Create the index node */
11188 op1 = gtNewIndexRef(lclTyp, op3, op1);
11190 /* Create the assignment node and append it */
11192 if (lclTyp == TYP_STRUCT)
11194 assert(stelemClsHnd != DUMMY_INIT(NULL));
11196 op1->gtIndex.gtStructElemClass = stelemClsHnd;
11197 op1->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd);
11199 if (varTypeIsStruct(op1))
11201 op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
11205 op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
11206 op1 = gtNewAssignNode(op1, op2);
11209 /* Mark the expression as containing an assignment */
11211 op1->gtFlags |= GTF_ASG;
11222 case CEE_ADD_OVF_UN:
11230 goto MATH_OP2_FLAGS;
11239 case CEE_SUB_OVF_UN:
11247 goto MATH_OP2_FLAGS;
11251 goto MATH_MAYBE_CALL_NO_OVF;
11256 case CEE_MUL_OVF_UN:
11263 goto MATH_MAYBE_CALL_OVF;
11265 // Other binary math operations
11269 goto MATH_MAYBE_CALL_NO_OVF;
11273 goto MATH_MAYBE_CALL_NO_OVF;
11277 goto MATH_MAYBE_CALL_NO_OVF;
11281 goto MATH_MAYBE_CALL_NO_OVF;
11283 MATH_MAYBE_CALL_NO_OVF:
11285 MATH_MAYBE_CALL_OVF:
11286 // Morpher has some complex logic about when to turn different
11287 // typed nodes on different platforms into helper calls. We
11288 // need to either duplicate that logic here, or just
11289 // pessimistically make all the nodes large enough to become
11290 // call nodes. Since call nodes aren't that much larger and
11291 // these opcodes are infrequent enough I chose the latter.
11293 goto MATH_OP2_FLAGS;
11305 MATH_OP2: // For default values of 'ovfl' and 'callNode'
11310 MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
11312 /* Pull two values and push back the result */
11314 if (tiVerificationNeeded)
11316 const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
11317 const typeInfo& tiOp2 = impStackTop().seTypeInfo;
11319 Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
11320 if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
11322 Verify(tiOp1.IsNumberType(), "not number");
11326 Verify(tiOp1.IsIntegerType(), "not integer");
11329 Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
11333 #ifdef _TARGET_64BIT_
11334 if (tiOp2.IsNativeIntType())
11338 #endif // _TARGET_64BIT_
11341 op2 = impPopStack().val;
11342 op1 = impPopStack().val;
11344 #if !CPU_HAS_FP_SUPPORT
11345 if (varTypeIsFloating(op1->gtType))
11350 /* Can't do arithmetic with references */
11351 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
11353 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
11354 // if it is in the stack)
11355 impBashVarAddrsToI(op1, op2);
11357 type = impGetByRefResultType(oper, uns, &op1, &op2);
11359 assert(!ovfl || !varTypeIsFloating(op1->gtType));
11361 /* Special case: "int+0", "int-0", "int*1", "int/1" */
11363 if (op2->gtOper == GT_CNS_INT)
11365 if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
11366 (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
11369 impPushOnStack(op1, tiRetVal);
11374 #if !FEATURE_X87_DOUBLES
11375 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
11377 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
11379 if (op1->TypeGet() != type)
11381 // We insert a cast of op1 to 'type'
11382 op1 = gtNewCastNode(type, op1, type);
11384 if (op2->TypeGet() != type)
11386 // We insert a cast of op2 to 'type'
11387 op2 = gtNewCastNode(type, op2, type);
11390 #endif // !FEATURE_X87_DOUBLES
11392 #if SMALL_TREE_NODES
11395 /* These operators can later be transformed into 'GT_CALL' */
11397 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
11398 #ifndef _TARGET_ARM_
11399 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
11400 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
11401 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11402 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11404 // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11405 // that we'll need to transform into a general large node, but rather specifically
11406 // to a call: by doing it this way, things keep working if there are multiple sizes,
11407 // and a CALL is no longer the largest.
11408 // That said, as of now it *is* a large node, so we'll do this with an assert rather
11410 assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11411 op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11414 #endif // SMALL_TREE_NODES
11416 op1 = gtNewOperNode(oper, type, op1, op2);
11419 /* Special case: integer/long division may throw an exception */
11421 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this))
11423 op1->gtFlags |= GTF_EXCEPT;
11428 assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11429 if (ovflType != TYP_UNKNOWN)
11431 op1->gtType = ovflType;
11433 op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11436 op1->gtFlags |= GTF_UNSIGNED;
11440 impPushOnStack(op1, tiRetVal);
11455 if (tiVerificationNeeded)
11457 const typeInfo& tiVal = impStackTop(1).seTypeInfo;
11458 const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11459 Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11462 op2 = impPopStack().val;
11463 op1 = impPopStack().val; // operand to be shifted
11464 impBashVarAddrsToI(op1, op2);
11466 type = genActualType(op1->TypeGet());
11467 op1 = gtNewOperNode(oper, type, op1, op2);
11469 impPushOnStack(op1, tiRetVal);
11473 if (tiVerificationNeeded)
11475 tiRetVal = impStackTop().seTypeInfo;
11476 Verify(tiRetVal.IsIntegerType(), "bad int value");
11479 op1 = impPopStack().val;
11480 impBashVarAddrsToI(op1, nullptr);
11481 type = genActualType(op1->TypeGet());
11482 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11486 if (tiVerificationNeeded)
11488 tiRetVal = impStackTop().seTypeInfo;
11489 Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11491 op1 = impPopStack().val;
11492 type = op1->TypeGet();
11493 op1 = gtNewOperNode(GT_CKFINITE, type, op1);
11494 op1->gtFlags |= GTF_EXCEPT;
11496 impPushOnStack(op1, tiRetVal);
11501 val = getI4LittleEndian(codeAddr); // jump distance
11502 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11506 val = getI1LittleEndian(codeAddr); // jump distance
11507 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11511 if (compIsForInlining())
11513 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11517 JITDUMP(" %04X", jmpAddr);
11518 if (block->bbJumpKind != BBJ_LEAVE)
11520 impResetLeaveBlock(block, jmpAddr);
11523 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11524 impImportLeave(block);
11525 impNoteBranchOffs();
11531 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11533 if (compIsForInlining() && jmpDist == 0)
11538 impNoteBranchOffs();
11544 case CEE_BRFALSE_S:
11546 /* Pop the comparand (now there's a neat term) from the stack */
11547 if (tiVerificationNeeded)
11549 typeInfo& tiVal = impStackTop().seTypeInfo;
11550 Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11554 op1 = impPopStack().val;
11555 type = op1->TypeGet();
11557 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11558 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11560 block->bbJumpKind = BBJ_NONE;
11562 if (op1->gtFlags & GTF_GLOB_EFFECT)
11564 op1 = gtUnusedValNode(op1);
11573 if (op1->OperIsCompare())
11575 if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11577 // Flip the sense of the compare
11579 op1 = gtReverseCond(op1);
11584 /* We'll compare against an equally-sized integer 0 */
11585 /* For small types, we always compare against int */
11586 op2 = gtNewZeroConNode(genActualType(op1->gtType));
11588 /* Create the comparison operator and try to fold it */
11590 oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11591 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11598 /* Fold comparison if we can */
11600 op1 = gtFoldExpr(op1);
11602 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11603 /* Don't make any blocks unreachable in import only mode */
11605 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11607 /* gtFoldExpr() should prevent this as we don't want to make any blocks
11608 unreachable under compDbgCode */
11609 assert(!opts.compDbgCode);
11611 BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11612 assertImp((block->bbJumpKind == BBJ_COND) // normal case
11613 || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11614 // block for the second time
11616 block->bbJumpKind = foldedJumpKind;
11620 if (op1->gtIntCon.gtIconVal)
11622 printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11623 block->bbJumpDest->bbNum);
11627 printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11634 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11636 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
11637 in impImportBlock(block). For correct line numbers, spill stack. */
11639 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
11641 impSpillStackEnsure(true);
11668 if (tiVerificationNeeded)
11670 verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11671 tiRetVal = typeInfo(TI_INT);
11674 op2 = impPopStack().val;
11675 op1 = impPopStack().val;
11677 #ifdef _TARGET_64BIT_
11678 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
11680 op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11682 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
11684 op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11686 #endif // _TARGET_64BIT_
11688 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11689 varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11690 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11692 /* Create the comparison node */
11694 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11696 /* TODO: setting both flags when only one is appropriate */
11697 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
11699 op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
11702 impPushOnStack(op1, tiRetVal);
11708 goto CMP_2_OPs_AND_BR;
11713 goto CMP_2_OPs_AND_BR;
11718 goto CMP_2_OPs_AND_BR_UN;
11723 goto CMP_2_OPs_AND_BR;
11728 goto CMP_2_OPs_AND_BR_UN;
11733 goto CMP_2_OPs_AND_BR;
11738 goto CMP_2_OPs_AND_BR_UN;
11743 goto CMP_2_OPs_AND_BR;
11748 goto CMP_2_OPs_AND_BR_UN;
11753 goto CMP_2_OPs_AND_BR_UN;
11755 CMP_2_OPs_AND_BR_UN:
11758 goto CMP_2_OPs_AND_BR_ALL;
11762 goto CMP_2_OPs_AND_BR_ALL;
11763 CMP_2_OPs_AND_BR_ALL:
11765 if (tiVerificationNeeded)
11767 verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11770 /* Pull two values */
11771 op2 = impPopStack().val;
11772 op1 = impPopStack().val;
11774 #ifdef _TARGET_64BIT_
11775 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
11777 op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11779 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
11781 op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11783 #endif // _TARGET_64BIT_
11785 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11786 varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11787 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11789 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11791 block->bbJumpKind = BBJ_NONE;
11793 if (op1->gtFlags & GTF_GLOB_EFFECT)
11795 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11796 "Branch to next Optimization, op1 side effect"));
11797 impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11799 if (op2->gtFlags & GTF_GLOB_EFFECT)
11801 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11802 "Branch to next Optimization, op2 side effect"));
11803 impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11807 if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
11809 impNoteLastILoffs();
11814 #if !FEATURE_X87_DOUBLES
11815 // We can generate an compare of different sized floating point op1 and op2
11816 // We insert a cast
11818 if (varTypeIsFloating(op1->TypeGet()))
11820 if (op1->TypeGet() != op2->TypeGet())
11822 assert(varTypeIsFloating(op2->TypeGet()));
11824 // say op1=double, op2=float. To avoid loss of precision
11825 // while comparing, op2 is converted to double and double
11826 // comparison is done.
11827 if (op1->TypeGet() == TYP_DOUBLE)
11829 // We insert a cast of op2 to TYP_DOUBLE
11830 op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
11832 else if (op2->TypeGet() == TYP_DOUBLE)
11834 // We insert a cast of op1 to TYP_DOUBLE
11835 op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
11839 #endif // !FEATURE_X87_DOUBLES
11841 /* Create and append the operator */
11843 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11847 op1->gtFlags |= GTF_UNSIGNED;
11852 op1->gtFlags |= GTF_RELOP_NAN_UN;
11858 assert(!compIsForInlining());
11860 if (tiVerificationNeeded)
11862 Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
11864 /* Pop the switch value off the stack */
11865 op1 = impPopStack().val;
11866 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
11868 /* We can create a switch node */
11870 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
11872 val = (int)getU4LittleEndian(codeAddr);
11873 codeAddr += 4 + val * 4; // skip over the switch-table
11877 /************************** Casting OPCODES ***************************/
11879 case CEE_CONV_OVF_I1:
11882 case CEE_CONV_OVF_I2:
11883 lclTyp = TYP_SHORT;
11885 case CEE_CONV_OVF_I:
11886 lclTyp = TYP_I_IMPL;
11888 case CEE_CONV_OVF_I4:
11891 case CEE_CONV_OVF_I8:
11895 case CEE_CONV_OVF_U1:
11896 lclTyp = TYP_UBYTE;
11898 case CEE_CONV_OVF_U2:
11901 case CEE_CONV_OVF_U:
11902 lclTyp = TYP_U_IMPL;
11904 case CEE_CONV_OVF_U4:
11907 case CEE_CONV_OVF_U8:
11908 lclTyp = TYP_ULONG;
11911 case CEE_CONV_OVF_I1_UN:
11914 case CEE_CONV_OVF_I2_UN:
11915 lclTyp = TYP_SHORT;
11917 case CEE_CONV_OVF_I_UN:
11918 lclTyp = TYP_I_IMPL;
11920 case CEE_CONV_OVF_I4_UN:
11923 case CEE_CONV_OVF_I8_UN:
11927 case CEE_CONV_OVF_U1_UN:
11928 lclTyp = TYP_UBYTE;
11930 case CEE_CONV_OVF_U2_UN:
11933 case CEE_CONV_OVF_U_UN:
11934 lclTyp = TYP_U_IMPL;
11936 case CEE_CONV_OVF_U4_UN:
11939 case CEE_CONV_OVF_U8_UN:
11940 lclTyp = TYP_ULONG;
11945 goto CONV_OVF_COMMON;
11948 goto CONV_OVF_COMMON;
11958 lclTyp = TYP_SHORT;
11961 lclTyp = TYP_I_IMPL;
11971 lclTyp = TYP_UBYTE;
11976 #if (REGSIZE_BYTES == 8)
11978 lclTyp = TYP_U_IMPL;
11982 lclTyp = TYP_U_IMPL;
11989 lclTyp = TYP_ULONG;
11993 lclTyp = TYP_FLOAT;
11996 lclTyp = TYP_DOUBLE;
11999 case CEE_CONV_R_UN:
12000 lclTyp = TYP_DOUBLE;
12014 // just check that we have a number on the stack
12015 if (tiVerificationNeeded)
12017 const typeInfo& tiVal = impStackTop().seTypeInfo;
12018 Verify(tiVal.IsNumberType(), "bad arg");
12020 #ifdef _TARGET_64BIT_
12021 bool isNative = false;
12025 case CEE_CONV_OVF_I:
12026 case CEE_CONV_OVF_I_UN:
12028 case CEE_CONV_OVF_U:
12029 case CEE_CONV_OVF_U_UN:
12033 // leave 'isNative' = false;
12038 tiRetVal = typeInfo::nativeInt();
12041 #endif // _TARGET_64BIT_
12043 tiRetVal = typeInfo(lclTyp).NormaliseForStack();
12047 // only converts from FLOAT or DOUBLE to an integer type
12048 // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls
12050 if (varTypeIsFloating(lclTyp))
12052 callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
12053 #ifdef _TARGET_64BIT_
12054 // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
12055 // TYP_BYREF could be used as TYP_I_IMPL which is long.
12056 // TODO-CQ: remove this when we lower casts long/ulong --> float/double
12057 // and generate SSE2 code instead of going through helper calls.
12058 || (impStackTop().val->TypeGet() == TYP_BYREF)
12064 callNode = varTypeIsFloating(impStackTop().val->TypeGet());
12067 // At this point uns, ovf, callNode all set
12069 op1 = impPopStack().val;
12070 impBashVarAddrsToI(op1);
12072 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
12074 op2 = op1->gtOp.gtOp2;
12076 if (op2->gtOper == GT_CNS_INT)
12078 ssize_t ival = op2->gtIntCon.gtIconVal;
12079 ssize_t mask, umask;
12095 assert(!"unexpected type");
12099 if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
12101 /* Toss the cast, it's a waste of time */
12103 impPushOnStack(op1, tiRetVal);
12106 else if (ival == mask)
12108 /* Toss the masking, it's a waste of time, since
12109 we sign-extend from the small value anyways */
12111 op1 = op1->gtOp.gtOp1;
12116 /* The 'op2' sub-operand of a cast is the 'real' type number,
12117 since the result of a cast to one of the 'small' integer
12118 types is an integer.
12121 type = genActualType(lclTyp);
12123 #if SMALL_TREE_NODES
12126 op1 = gtNewCastNodeL(type, op1, lclTyp);
12129 #endif // SMALL_TREE_NODES
12131 op1 = gtNewCastNode(type, op1, lclTyp);
12136 op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
12140 op1->gtFlags |= GTF_UNSIGNED;
12142 impPushOnStack(op1, tiRetVal);
12146 if (tiVerificationNeeded)
12148 tiRetVal = impStackTop().seTypeInfo;
12149 Verify(tiRetVal.IsNumberType(), "Bad arg");
12152 op1 = impPopStack().val;
12153 impBashVarAddrsToI(op1, nullptr);
12154 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
12159 /* Pull the top value from the stack */
12161 StackEntry se = impPopStack();
12162 clsHnd = se.seTypeInfo.GetClassHandle();
12165 /* Get hold of the type of the value being duplicated */
12167 lclTyp = genActualType(op1->gtType);
12169 /* Does the value have any side effects? */
12171 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
12173 // Since we are throwing away the value, just normalize
12174 // it to its address. This is more efficient.
12176 if (varTypeIsStruct(op1))
12178 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
12179 // Non-calls, such as obj or ret_expr, have to go through this.
12180 // Calls with large struct return value have to go through this.
12181 // Helper calls with small struct return value also have to go
12182 // through this since they do not follow Unix calling convention.
12183 if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
12184 op1->AsCall()->gtCallType == CT_HELPER)
12185 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
12187 op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
12191 // If op1 is non-overflow cast, throw it away since it is useless.
12192 // Another reason for throwing away the useless cast is in the context of
12193 // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
12194 // The cast gets added as part of importing GT_CALL, which gets in the way
12195 // of fgMorphCall() on the forms of tail call nodes that we assert.
12196 if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
12198 op1 = op1->gtOp.gtOp1;
12201 // If 'op1' is an expression, create an assignment node.
12202 // Helps analyses (like CSE) to work fine.
12204 if (op1->gtOper != GT_CALL)
12206 op1 = gtUnusedValNode(op1);
12209 /* Append the value to the tree list */
12213 /* No side effects - just throw the <BEEP> thing away */
12219 if (tiVerificationNeeded)
12221 // Dup could start the begining of delegate creation sequence, remember that
12222 delegateCreateStart = codeAddr - 1;
12226 // If the expression to dup is simple, just clone it.
12227 // Otherwise spill it to a temp, and reload the temp
12229 StackEntry se = impPopStack();
12230 tiRetVal = se.seTypeInfo;
12233 if (!opts.compDbgCode && !op1->IsIntegralConst(0) && !op1->IsFPZero() && !op1->IsLocal())
12235 const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill"));
12236 impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL);
12237 var_types type = genActualType(lvaTable[tmpNum].TypeGet());
12238 op1 = gtNewLclvNode(tmpNum, type);
12240 // Propagate type info to the temp
12241 if (type == TYP_REF)
12243 lvaSetClass(tmpNum, op1, tiRetVal.GetClassHandle());
12247 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
12248 nullptr DEBUGARG("DUP instruction"));
12250 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
12251 impPushOnStack(op1, tiRetVal);
12252 impPushOnStack(op2, tiRetVal);
12260 lclTyp = TYP_SHORT;
12269 lclTyp = TYP_I_IMPL;
12271 case CEE_STIND_REF:
12275 lclTyp = TYP_FLOAT;
12278 lclTyp = TYP_DOUBLE;
12282 if (tiVerificationNeeded)
12284 typeInfo instrType(lclTyp);
12285 #ifdef _TARGET_64BIT_
12286 if (opcode == CEE_STIND_I)
12288 instrType = typeInfo::nativeInt();
12290 #endif // _TARGET_64BIT_
12291 verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
12295 compUnsafeCastUsed = true; // Have to go conservative
12300 op2 = impPopStack().val; // value to store
12301 op1 = impPopStack().val; // address to store to
12303 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
12304 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12306 impBashVarAddrsToI(op1, op2);
12308 op2 = impImplicitR4orR8Cast(op2, lclTyp);
12310 #ifdef _TARGET_64BIT_
12311 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
12312 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
12314 op2->gtType = TYP_I_IMPL;
12318 // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
12320 if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
12322 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12323 op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
12325 // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12327 if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
12329 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12330 op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
12333 #endif // _TARGET_64BIT_
12335 if (opcode == CEE_STIND_REF)
12337 // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
12338 assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
12339 lclTyp = genActualType(op2->TypeGet());
12342 // Check target type.
12344 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
12346 if (op2->gtType == TYP_BYREF)
12348 assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
12350 else if (lclTyp == TYP_BYREF)
12352 assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
12357 assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
12358 ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
12359 (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
12363 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12365 // stind could point anywhere, example a boxed class static int
12366 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12368 if (prefixFlags & PREFIX_VOLATILE)
12370 assert(op1->OperGet() == GT_IND);
12371 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
12372 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12373 op1->gtFlags |= GTF_IND_VOLATILE;
12376 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
12378 assert(op1->OperGet() == GT_IND);
12379 op1->gtFlags |= GTF_IND_UNALIGNED;
12382 op1 = gtNewAssignNode(op1, op2);
12383 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12385 // Spill side-effects AND global-data-accesses
12386 if (verCurrentState.esStackDepth > 0)
12388 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12397 lclTyp = TYP_SHORT;
12406 case CEE_LDIND_REF:
12410 lclTyp = TYP_I_IMPL;
12413 lclTyp = TYP_FLOAT;
12416 lclTyp = TYP_DOUBLE;
12419 lclTyp = TYP_UBYTE;
12426 if (tiVerificationNeeded)
12428 typeInfo lclTiType(lclTyp);
12429 #ifdef _TARGET_64BIT_
12430 if (opcode == CEE_LDIND_I)
12432 lclTiType = typeInfo::nativeInt();
12434 #endif // _TARGET_64BIT_
12435 tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12436 tiRetVal.NormaliseForStack();
12440 compUnsafeCastUsed = true; // Have to go conservative
12445 op1 = impPopStack().val; // address to load from
12446 impBashVarAddrsToI(op1);
12448 #ifdef _TARGET_64BIT_
12449 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12451 if (genActualType(op1->gtType) == TYP_INT)
12453 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12454 op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
12458 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12460 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12462 // ldind could point anywhere, example a boxed class static int
12463 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12465 if (prefixFlags & PREFIX_VOLATILE)
12467 assert(op1->OperGet() == GT_IND);
12468 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
12469 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12470 op1->gtFlags |= GTF_IND_VOLATILE;
12473 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
12475 assert(op1->OperGet() == GT_IND);
12476 op1->gtFlags |= GTF_IND_UNALIGNED;
12479 impPushOnStack(op1, tiRetVal);
12483 case CEE_UNALIGNED:
12486 val = getU1LittleEndian(codeAddr);
12488 JITDUMP(" %u", val);
12489 if ((val != 1) && (val != 2) && (val != 4))
12491 BADCODE("Alignment unaligned. must be 1, 2, or 4");
12494 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12495 prefixFlags |= PREFIX_UNALIGNED;
12497 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12500 opcode = (OPCODE)getU1LittleEndian(codeAddr);
12501 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12502 codeAddr += sizeof(__int8);
12503 goto DECODE_OPCODE;
12507 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12508 prefixFlags |= PREFIX_VOLATILE;
12510 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12517 // Need to do a lookup here so that we perform an access check
12518 // and do a NOWAY if protections are violated
12519 _impResolveToken(CORINFO_TOKENKIND_Method);
12521 JITDUMP(" %08X", resolvedToken.token);
12523 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12524 addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12527 // This check really only applies to intrinsic Array.Address methods
12528 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12530 NO_WAY("Currently do not support LDFTN of Parameterized functions");
12533 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12534 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12536 if (tiVerificationNeeded)
12538 // LDFTN could start the begining of delegate creation sequence, remember that
12539 delegateCreateStart = codeAddr - 2;
12541 // check any constraints on the callee's class and type parameters
12542 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12543 "method has unsatisfied class constraints");
12544 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12545 resolvedToken.hMethod),
12546 "method has unsatisfied method constraints");
12548 mflags = callInfo.verMethodFlags;
12549 Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12553 op1 = impMethodPointer(&resolvedToken, &callInfo);
12554 if (compDonotInline())
12559 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
12560 impPushOnStack(op1, typeInfo(heapToken));
12565 case CEE_LDVIRTFTN:
12567 /* Get the method token */
12569 _impResolveToken(CORINFO_TOKENKIND_Method);
12571 JITDUMP(" %08X", resolvedToken.token);
12573 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12574 addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12575 CORINFO_CALLINFO_CALLVIRT)),
12578 // This check really only applies to intrinsic Array.Address methods
12579 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12581 NO_WAY("Currently do not support LDFTN of Parameterized functions");
12584 mflags = callInfo.methodFlags;
12586 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12588 if (compIsForInlining())
12590 if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12592 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12597 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12599 if (tiVerificationNeeded)
12602 Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12603 Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12605 // JIT32 verifier rejects verifiable ldvirtftn pattern
12606 typeInfo declType =
12607 verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12609 typeInfo arg = impStackTop().seTypeInfo;
12610 Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12613 CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12614 if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12616 instanceClassHnd = arg.GetClassHandleForObjRef();
12619 // check any constraints on the method's class and type parameters
12620 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12621 "method has unsatisfied class constraints");
12622 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12623 resolvedToken.hMethod),
12624 "method has unsatisfied method constraints");
12626 if (mflags & CORINFO_FLG_PROTECTED)
12628 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12629 "Accessing protected method through wrong type.");
12633 /* Get the object-ref */
12634 op1 = impPopStack().val;
12635 assertImp(op1->gtType == TYP_REF);
12637 if (opts.IsReadyToRun())
12639 if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
12641 if (op1->gtFlags & GTF_SIDE_EFFECT)
12643 op1 = gtUnusedValNode(op1);
12644 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12649 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12651 if (op1->gtFlags & GTF_SIDE_EFFECT)
12653 op1 = gtUnusedValNode(op1);
12654 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12659 GenTreePtr fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
12660 if (compDonotInline())
12665 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
12666 assert(heapToken->tokenType == CORINFO_TOKENKIND_Method);
12667 heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn;
12668 impPushOnStack(fptr, typeInfo(heapToken));
12673 case CEE_CONSTRAINED:
12675 assertImp(sz == sizeof(unsigned));
12676 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
12677 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
12678 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
12680 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
12681 prefixFlags |= PREFIX_CONSTRAINED;
12684 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12685 if (actualOpcode != CEE_CALLVIRT)
12687 BADCODE("constrained. has to be followed by callvirt");
12694 JITDUMP(" readonly.");
12696 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
12697 prefixFlags |= PREFIX_READONLY;
12700 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12701 if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
12703 BADCODE("readonly. has to be followed by ldelema or call");
12713 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
12714 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12717 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12718 if (!impOpcodeIsCallOpcode(actualOpcode))
12720 BADCODE("tailcall. has to be followed by call, callvirt or calli");
12728 /* Since we will implicitly insert newObjThisPtr at the start of the
12729 argument list, spill any GTF_ORDER_SIDEEFF */
12730 impSpillSpecialSideEff();
12732 /* NEWOBJ does not respond to TAIL */
12733 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
12735 /* NEWOBJ does not respond to CONSTRAINED */
12736 prefixFlags &= ~PREFIX_CONSTRAINED;
12738 _impResolveToken(CORINFO_TOKENKIND_NewObj);
12740 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12741 addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
12744 if (compIsForInlining())
12746 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12748 // Check to see if this call violates the boundary.
12749 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
12754 mflags = callInfo.methodFlags;
12756 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
12758 BADCODE("newobj on static or abstract method");
12761 // Insert the security callout before any actual code is generated
12762 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12764 // There are three different cases for new
12765 // Object size is variable (depends on arguments)
12766 // 1) Object is an array (arrays treated specially by the EE)
12767 // 2) Object is some other variable sized object (e.g. String)
12768 // 3) Class Size can be determined beforehand (normal case)
12769 // In the first case, we need to call a NEWOBJ helper (multinewarray)
12770 // in the second case we call the constructor with a '0' this pointer
12771 // In the third case we alloc the memory, then call the constuctor
12773 clsFlags = callInfo.classFlags;
12774 if (clsFlags & CORINFO_FLG_ARRAY)
12776 if (tiVerificationNeeded)
12778 CORINFO_CLASS_HANDLE elemTypeHnd;
12779 INDEBUG(CorInfoType corType =)
12780 info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
12781 assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
12782 Verify(elemTypeHnd == nullptr ||
12783 !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
12784 "newarr of byref-like objects");
12785 verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
12786 ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
12787 &callInfo DEBUGARG(info.compFullName));
12789 // Arrays need to call the NEWOBJ helper.
12790 assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
12792 impImportNewObjArray(&resolvedToken, &callInfo);
12793 if (compDonotInline())
12801 // At present this can only be String
12802 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
12804 if (IsTargetAbi(CORINFO_CORERT_ABI))
12806 // The dummy argument does not exist in CoreRT
12807 newObjThisPtr = nullptr;
12811 // This is the case for variable-sized objects that are not
12812 // arrays. In this case, call the constructor with a null 'this'
12814 newObjThisPtr = gtNewIconNode(0, TYP_REF);
12817 /* Remember that this basic block contains 'new' of an object */
12818 block->bbFlags |= BBF_HAS_NEWOBJ;
12819 optMethodFlags |= OMF_HAS_NEWOBJ;
12823 // This is the normal case where the size of the object is
12824 // fixed. Allocate the memory and call the constructor.
12826 // Note: We cannot add a peep to avoid use of temp here
12827 // becase we don't have enough interference info to detect when
12828 // sources and destination interfere, example: s = new S(ref);
12830 // TODO: We find the correct place to introduce a general
12831 // reverse copy prop for struct return values from newobj or
12832 // any function returning structs.
12834 /* get a temporary for the new object */
12835 lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
12836 if (compDonotInline())
12838 // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS.
12839 assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS);
12843 // In the value class case we only need clsHnd for size calcs.
12845 // The lookup of the code pointer will be handled by CALL in this case
12846 if (clsFlags & CORINFO_FLG_VALUECLASS)
12848 if (compIsForInlining())
12850 // If value class has GC fields, inform the inliner. It may choose to
12851 // bail out on the inline.
12852 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
12853 if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
12855 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
12856 if (compInlineResult->IsFailure())
12861 // Do further notification in the case where the call site is rare;
12862 // some policies do not track the relative hotness of call sites for
12863 // "always" inline cases.
12864 if (impInlineInfo->iciBlock->isRunRarely())
12866 compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
12867 if (compInlineResult->IsFailure())
12875 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
12876 unsigned size = info.compCompHnd->getClassSize(resolvedToken.hClass);
12878 if (impIsPrimitive(jitTyp))
12880 lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
12884 // The local variable itself is the allocated space.
12885 // Here we need unsafe value cls check, since the address of struct is taken for further use
12886 // and potentially exploitable.
12887 lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
12890 // Append a tree to zero-out the temp
12891 newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
12893 newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest
12894 gtNewIconNode(0), // Value
12896 false, // isVolatile
12897 false); // not copyBlock
12898 impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12900 // Obtain the address of the temp
12902 gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
12906 #ifdef FEATURE_READYTORUN_COMPILER
12907 if (opts.IsReadyToRun())
12909 op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
12910 usingReadyToRunHelper = (op1 != nullptr);
12913 if (!usingReadyToRunHelper)
12916 op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
12917 if (op1 == nullptr)
12918 { // compDonotInline()
12922 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
12923 // and the newfast call with a single call to a dynamic R2R cell that will:
12924 // 1) Load the context
12925 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate
12927 // 3) Allocate and return the new object
12928 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
12930 op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
12931 resolvedToken.hClass, TYP_REF, op1);
12934 // Remember that this basic block contains 'new' of an object
12935 block->bbFlags |= BBF_HAS_NEWOBJ;
12936 optMethodFlags |= OMF_HAS_NEWOBJ;
12938 // Append the assignment to the temp/local. Dont need to spill
12939 // at all as we are just calling an EE-Jit helper which can only
12940 // cause an (async) OutOfMemoryException.
12942 // We assign the newly allocated object (by a GT_ALLOCOBJ node)
12943 // to a temp. Note that the pattern "temp = allocObj" is required
12944 // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
12945 // without exhaustive walk over all expressions.
12947 impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
12948 lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */);
12950 newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
12957 /* CALLI does not respond to CONSTRAINED */
12958 prefixFlags &= ~PREFIX_CONSTRAINED;
12960 if (compIsForInlining())
12962 // CALLI doesn't have a method handle, so assume the worst.
12963 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12965 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
12975 // We can't call getCallInfo on the token from a CALLI, but we need it in
12976 // many other places. We unfortunately embed that knowledge here.
12977 if (opcode != CEE_CALLI)
12979 _impResolveToken(CORINFO_TOKENKIND_Method);
12981 eeGetCallInfo(&resolvedToken,
12982 (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
12983 // this is how impImportCall invokes getCallInfo
12985 combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
12986 (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
12987 : CORINFO_CALLINFO_NONE)),
12992 // Suppress uninitialized use warning.
12993 memset(&resolvedToken, 0, sizeof(resolvedToken));
12994 memset(&callInfo, 0, sizeof(callInfo));
12996 resolvedToken.token = getU4LittleEndian(codeAddr);
12999 CALL: // memberRef should be set.
13000 // newObjThisPtr should be set for CEE_NEWOBJ
13002 JITDUMP(" %08X", resolvedToken.token);
13003 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
13005 bool newBBcreatedForTailcallStress;
13007 newBBcreatedForTailcallStress = false;
13009 if (compIsForInlining())
13011 if (compDonotInline())
13015 // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
13016 assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
13020 if (compTailCallStress())
13022 // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
13023 // Tail call stress only recognizes call+ret patterns and forces them to be
13024 // explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress
13025 // doesn't import 'ret' opcode following the call into the basic block containing
13026 // the call instead imports it to a new basic block. Note that fgMakeBasicBlocks()
13027 // is already checking that there is an opcode following call and hence it is
13028 // safe here to read next opcode without bounds check.
13029 newBBcreatedForTailcallStress =
13030 impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
13031 // make it jump to RET.
13032 (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
13034 if (newBBcreatedForTailcallStress &&
13035 !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
13036 verCheckTailCallConstraint(opcode, &resolvedToken,
13037 constraintCall ? &constrainedResolvedToken : nullptr,
13038 true) // Is it legal to do talcall?
13041 // Stress the tailcall.
13042 JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
13043 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13048 // This is split up to avoid goto flow warnings.
13050 isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd);
13052 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
13053 // hence will not be considered for implicit tail calling.
13054 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
13056 if (compIsForInlining())
13058 #if FEATURE_TAILCALL_OPT_SHARED_RETURN
13059 // Are we inlining at an implicit tail call site? If so the we can flag
13060 // implicit tail call sites in the inline body. These call sites
13061 // often end up in non BBJ_RETURN blocks, so only flag them when
13062 // we're able to handle shared returns.
13063 if (impInlineInfo->iciCall->IsImplicitTailCall())
13065 JITDUMP(" (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13066 prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13068 #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN
13072 JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13073 prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13077 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
13078 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
13079 readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
13081 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
13083 // All calls and delegates need a security callout.
13084 // For delegates, this is the call to the delegate constructor, not the access check on the
13086 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13088 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
13090 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
13091 // and the field it is reading, thus it is now unverifiable to not immediately precede with
13092 // ldtoken <filed token>, and we now check accessibility
13093 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
13094 (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
13096 if (prevOpcode != CEE_LDTOKEN)
13098 Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
13102 assert(lastLoadToken != NULL);
13103 // Now that we know we have a token, verify that it is accessible for loading
13104 CORINFO_RESOLVED_TOKEN resolvedLoadField;
13105 impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
13106 eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
13107 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13111 #endif // DevDiv 410397
13114 if (tiVerificationNeeded)
13116 verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13117 explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
13118 &callInfo DEBUGARG(info.compFullName));
13121 // Insert delegate callout here.
13122 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
13125 // We should do this only if verification is enabled
13126 // If verification is disabled, delegateCreateStart will not be initialized correctly
13127 if (tiVerificationNeeded)
13129 mdMemberRef delegateMethodRef = mdMemberRefNil;
13130 // We should get here only for well formed delegate creation.
13131 assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
13136 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13137 newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
13138 if (compDonotInline())
13143 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
13144 // have created a new BB after the "call"
13145 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
13147 assert(!compIsForInlining());
13159 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
13160 BOOL isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
13162 /* Get the CP_Fieldref index */
13163 assertImp(sz == sizeof(unsigned));
13165 _impResolveToken(CORINFO_TOKENKIND_Field);
13167 JITDUMP(" %08X", resolvedToken.token);
13169 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
13171 GenTreePtr obj = nullptr;
13172 typeInfo* tiObj = nullptr;
13173 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
13175 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
13177 tiObj = &impStackTop().seTypeInfo;
13178 StackEntry se = impPopStack();
13179 objType = se.seTypeInfo.GetClassHandle();
13182 if (impIsThis(obj))
13184 aflags |= CORINFO_ACCESS_THIS;
13186 // An optimization for Contextful classes:
13187 // we unwrap the proxy when we have a 'this reference'
13189 if (info.compUnwrapContextful)
13191 aflags |= CORINFO_ACCESS_UNWRAP;
13196 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13198 // Figure out the type of the member. We always call canAccessField, so you always need this
13200 CorInfoType ciType = fieldInfo.fieldType;
13201 clsHnd = fieldInfo.structType;
13203 lclTyp = JITtype2varType(ciType);
13205 #ifdef _TARGET_AMD64
13206 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
13207 #endif // _TARGET_AMD64
13209 if (compIsForInlining())
13211 switch (fieldInfo.fieldAccessor)
13213 case CORINFO_FIELD_INSTANCE_HELPER:
13214 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13215 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13216 case CORINFO_FIELD_STATIC_TLS:
13218 compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
13221 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13222 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13223 /* We may be able to inline the field accessors in specific instantiations of generic
13225 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
13232 if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
13235 if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
13236 !(info.compFlags & CORINFO_FLG_FORCEINLINE))
13238 // Loading a static valuetype field usually will cause a JitHelper to be called
13239 // for the static base. This will bloat the code.
13240 compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
13242 if (compInlineResult->IsFailure())
13250 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
13253 tiRetVal.MakeByRef();
13257 tiRetVal.NormaliseForStack();
13260 // Perform this check always to ensure that we get field access exceptions even with
13261 // SkipVerification.
13262 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13264 if (tiVerificationNeeded)
13266 // You can also pass the unboxed struct to LDFLD
13267 BOOL bAllowPlainValueTypeAsThis = FALSE;
13268 if (opcode == CEE_LDFLD && impIsValueType(tiObj))
13270 bAllowPlainValueTypeAsThis = TRUE;
13273 verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
13275 // If we're doing this on a heap object or from a 'safe' byref
13276 // then the result is a safe byref too
13277 if (isLoadAddress) // load address
13279 if (fieldInfo.fieldFlags &
13280 CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
13282 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
13284 tiRetVal.SetIsPermanentHomeByRef();
13287 else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
13289 // ldflda of byref is safe if done on a gc object or on a
13291 tiRetVal.SetIsPermanentHomeByRef();
13297 // tiVerificationNeeded is false.
13298 // Raise InvalidProgramException if static load accesses non-static field
13299 if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13301 BADCODE("static access on an instance field");
13305 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
13306 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13308 if (obj->gtFlags & GTF_SIDE_EFFECT)
13310 obj = gtUnusedValNode(obj);
13311 impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13316 /* Preserve 'small' int types */
13317 if (lclTyp > TYP_INT)
13319 lclTyp = genActualType(lclTyp);
13322 bool usesHelper = false;
13324 switch (fieldInfo.fieldAccessor)
13326 case CORINFO_FIELD_INSTANCE:
13327 #ifdef FEATURE_READYTORUN_COMPILER
13328 case CORINFO_FIELD_INSTANCE_WITH_BASE:
13331 bool nullcheckNeeded = false;
13333 obj = impCheckForNullPointer(obj);
13335 if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
13337 nullcheckNeeded = true;
13340 // If the object is a struct, what we really want is
13341 // for the field to operate on the address of the struct.
13342 if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
13344 assert(opcode == CEE_LDFLD && objType != nullptr);
13346 obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
13349 /* Create the data member node */
13350 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
13352 #ifdef FEATURE_READYTORUN_COMPILER
13353 if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13355 op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13359 op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13361 if (fgAddrCouldBeNull(obj))
13363 op1->gtFlags |= GTF_EXCEPT;
13366 // If gtFldObj is a BYREF then our target is a value class and
13367 // it could point anywhere, example a boxed class static int
13368 if (obj->gtType == TYP_BYREF)
13370 op1->gtFlags |= GTF_IND_TGTANYWHERE;
13373 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13374 if (StructHasOverlappingFields(typeFlags))
13376 op1->gtField.gtFldMayOverlap = true;
13379 // wrap it in a address of operator if necessary
13382 op1 = gtNewOperNode(GT_ADDR,
13383 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
13387 if (compIsForInlining() &&
13388 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13389 impInlineInfo->inlArgInfo))
13391 impInlineInfo->thisDereferencedFirst = true;
13397 case CORINFO_FIELD_STATIC_TLS:
13398 #ifdef _TARGET_X86_
13399 // Legacy TLS access is implemented as intrinsic on x86 only
13401 /* Create the data member node */
13402 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13403 op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13407 op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13411 fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13416 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13417 case CORINFO_FIELD_INSTANCE_HELPER:
13418 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13419 op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13424 case CORINFO_FIELD_STATIC_ADDRESS:
13425 // Replace static read-only fields with constant if possible
13426 if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13427 !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13428 (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13430 CorInfoInitClassResult initClassResult =
13431 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13432 impTokenLookupContextHandle);
13434 if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13436 void** pFldAddr = nullptr;
13438 info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13440 // We should always be able to access this static's address directly
13441 assert(pFldAddr == nullptr);
13443 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13450 case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13451 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13452 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13453 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13454 op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13458 case CORINFO_FIELD_INTRINSIC_ZERO:
13460 assert(aflags & CORINFO_ACCESS_GET);
13461 op1 = gtNewIconNode(0, lclTyp);
13466 case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13468 assert(aflags & CORINFO_ACCESS_GET);
13471 InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13472 op1 = gtNewStringLiteralNode(iat, pValue);
13477 case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN:
13479 assert(aflags & CORINFO_ACCESS_GET);
13481 op1 = gtNewIconNode(0, lclTyp);
13483 op1 = gtNewIconNode(1, lclTyp);
13490 assert(!"Unexpected fieldAccessor");
13493 if (!isLoadAddress)
13496 if (prefixFlags & PREFIX_VOLATILE)
13498 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
13499 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13503 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13504 (op1->OperGet() == GT_OBJ));
13505 op1->gtFlags |= GTF_IND_VOLATILE;
13509 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
13513 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13514 (op1->OperGet() == GT_OBJ));
13515 op1->gtFlags |= GTF_IND_UNALIGNED;
13520 /* Check if the class needs explicit initialization */
13522 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13524 GenTreePtr helperNode = impInitClass(&resolvedToken);
13525 if (compDonotInline())
13529 if (helperNode != nullptr)
13531 op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13536 impPushOnStack(op1, tiRetVal);
13544 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13546 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13548 /* Get the CP_Fieldref index */
13550 assertImp(sz == sizeof(unsigned));
13552 _impResolveToken(CORINFO_TOKENKIND_Field);
13554 JITDUMP(" %08X", resolvedToken.token);
13556 int aflags = CORINFO_ACCESS_SET;
13557 GenTreePtr obj = nullptr;
13558 typeInfo* tiObj = nullptr;
13561 /* Pull the value from the stack */
13562 StackEntry se = impPopStack();
13564 tiVal = se.seTypeInfo;
13565 clsHnd = tiVal.GetClassHandle();
13567 if (opcode == CEE_STFLD)
13569 tiObj = &impStackTop().seTypeInfo;
13570 obj = impPopStack().val;
13572 if (impIsThis(obj))
13574 aflags |= CORINFO_ACCESS_THIS;
13576 // An optimization for Contextful classes:
13577 // we unwrap the proxy when we have a 'this reference'
13579 if (info.compUnwrapContextful)
13581 aflags |= CORINFO_ACCESS_UNWRAP;
13586 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13588 // Figure out the type of the member. We always call canAccessField, so you always need this
13590 CorInfoType ciType = fieldInfo.fieldType;
13591 fieldClsHnd = fieldInfo.structType;
13593 lclTyp = JITtype2varType(ciType);
13595 if (compIsForInlining())
13597 /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13598 * per-inst static? */
13600 switch (fieldInfo.fieldAccessor)
13602 case CORINFO_FIELD_INSTANCE_HELPER:
13603 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13604 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13605 case CORINFO_FIELD_STATIC_TLS:
13607 compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13610 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13611 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13612 /* We may be able to inline the field accessors in specific instantiations of generic
13614 compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13622 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13624 if (tiVerificationNeeded)
13626 verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13627 typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13628 Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13632 // tiVerificationNeed is false.
13633 // Raise InvalidProgramException if static store accesses non-static field
13634 if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13636 BADCODE("static access on an instance field");
13640 // We are using stfld on a static field.
13641 // We allow it, but need to eval any side-effects for obj
13642 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13644 if (obj->gtFlags & GTF_SIDE_EFFECT)
13646 obj = gtUnusedValNode(obj);
13647 impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13652 /* Preserve 'small' int types */
13653 if (lclTyp > TYP_INT)
13655 lclTyp = genActualType(lclTyp);
13658 switch (fieldInfo.fieldAccessor)
13660 case CORINFO_FIELD_INSTANCE:
13661 #ifdef FEATURE_READYTORUN_COMPILER
13662 case CORINFO_FIELD_INSTANCE_WITH_BASE:
13665 obj = impCheckForNullPointer(obj);
13667 /* Create the data member node */
13668 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
13669 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13670 if (StructHasOverlappingFields(typeFlags))
13672 op1->gtField.gtFldMayOverlap = true;
13675 #ifdef FEATURE_READYTORUN_COMPILER
13676 if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13678 op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13682 op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13684 if (fgAddrCouldBeNull(obj))
13686 op1->gtFlags |= GTF_EXCEPT;
13689 // If gtFldObj is a BYREF then our target is a value class and
13690 // it could point anywhere, example a boxed class static int
13691 if (obj->gtType == TYP_BYREF)
13693 op1->gtFlags |= GTF_IND_TGTANYWHERE;
13696 if (compIsForInlining() &&
13697 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
13699 impInlineInfo->thisDereferencedFirst = true;
13704 case CORINFO_FIELD_STATIC_TLS:
13705 #ifdef _TARGET_X86_
13706 // Legacy TLS access is implemented as intrinsic on x86 only
13708 /* Create the data member node */
13709 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13710 op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13714 fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13719 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13720 case CORINFO_FIELD_INSTANCE_HELPER:
13721 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13722 op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13726 case CORINFO_FIELD_STATIC_ADDRESS:
13727 case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13728 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13729 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13730 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13731 op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13736 assert(!"Unexpected fieldAccessor");
13739 // Create the member assignment, unless we have a struct.
13740 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
13741 bool deferStructAssign = varTypeIsStruct(lclTyp);
13743 if (!deferStructAssign)
13745 if (prefixFlags & PREFIX_VOLATILE)
13747 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13748 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
13749 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13750 op1->gtFlags |= GTF_IND_VOLATILE;
13752 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
13754 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13755 op1->gtFlags |= GTF_IND_UNALIGNED;
13758 /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
13759 trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during
13760 importation and reads from the union as if it were a long during code generation. Though this
13761 can potentially read garbage, one can get lucky to have this working correctly.
13763 This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
13764 /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a
13765 dependency on it. To be backward compatible, we will explicitly add an upward cast here so that
13766 it works correctly always.
13768 Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT
13771 CLANG_FORMAT_COMMENT_ANCHOR;
13773 #ifndef _TARGET_64BIT_
13774 // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be
13775 // generated for ARM as well as x86, so the following IR will be accepted:
13777 // | /--* CNS_INT int 2
13779 // \--* CLS_VAR long
13781 if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
13782 varTypeIsLong(op1->TypeGet()))
13784 op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13788 #ifdef _TARGET_64BIT_
13789 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
13790 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
13792 op2->gtType = TYP_I_IMPL;
13796 // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
13798 if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
13800 op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
13802 // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13804 if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
13806 op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
13811 #if !FEATURE_X87_DOUBLES
13812 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
13813 // We insert a cast to the dest 'op1' type
13815 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
13816 varTypeIsFloating(op2->gtType))
13818 op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13820 #endif // !FEATURE_X87_DOUBLES
13822 op1 = gtNewAssignNode(op1, op2);
13824 /* Mark the expression as containing an assignment */
13826 op1->gtFlags |= GTF_ASG;
13829 /* Check if the class needs explicit initialization */
13831 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13833 GenTreePtr helperNode = impInitClass(&resolvedToken);
13834 if (compDonotInline())
13838 if (helperNode != nullptr)
13840 op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13844 /* stfld can interfere with value classes (consider the sequence
13845 ldloc, ldloca, ..., stfld, stloc). We will be conservative and
13846 spill all value class references from the stack. */
13848 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
13852 if (impIsValueType(tiObj))
13854 impSpillEvalStack();
13858 impSpillValueClasses();
13862 /* Spill any refs to the same member from the stack */
13864 impSpillLclRefs((ssize_t)resolvedToken.hField);
13866 /* stsfld also interferes with indirect accesses (for aliased
13867 statics) and calls. But don't need to spill other statics
13868 as we have explicitly spilled this particular static field. */
13870 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
13872 if (deferStructAssign)
13874 op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
13882 /* Get the class type index operand */
13884 _impResolveToken(CORINFO_TOKENKIND_Newarr);
13886 JITDUMP(" %08X", resolvedToken.token);
13888 if (!opts.IsReadyToRun())
13890 // Need to restore array classes before creating array objects on the heap
13891 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13892 if (op1 == nullptr)
13893 { // compDonotInline()
13898 if (tiVerificationNeeded)
13900 // As per ECMA 'numElems' specified can be either int32 or native int.
13901 Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
13903 CORINFO_CLASS_HANDLE elemTypeHnd;
13904 info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13905 Verify(elemTypeHnd == nullptr ||
13906 !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13907 "array of byref-like type");
13910 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13912 accessAllowedResult =
13913 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13914 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13916 /* Form the arglist: array class handle, size */
13917 op2 = impPopStack().val;
13918 assertImp(genActualTypeIsIntOrI(op2->gtType));
13920 #ifdef FEATURE_READYTORUN_COMPILER
13921 if (opts.IsReadyToRun())
13923 op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
13924 gtNewArgList(op2));
13925 usingReadyToRunHelper = (op1 != nullptr);
13927 if (!usingReadyToRunHelper)
13929 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13930 // and the newarr call with a single call to a dynamic R2R cell that will:
13931 // 1) Load the context
13932 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13933 // 3) Allocate the new array
13934 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13936 // Need to restore array classes before creating array objects on the heap
13937 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13938 if (op1 == nullptr)
13939 { // compDonotInline()
13945 if (!usingReadyToRunHelper)
13948 args = gtNewArgList(op1, op2);
13950 /* Create a call to 'new' */
13952 // Note that this only works for shared generic code because the same helper is used for all
13953 // reference array types
13954 op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args);
13957 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
13959 /* Remember that this basic block contains 'new' of an sd array */
13961 block->bbFlags |= BBF_HAS_NEWARRAY;
13962 optMethodFlags |= OMF_HAS_NEWARRAY;
13964 /* Push the result of the call on the stack */
13966 impPushOnStack(op1, tiRetVal);
13973 assert(!compIsForInlining());
13975 if (tiVerificationNeeded)
13977 Verify(false, "bad opcode");
13980 // We don't allow locallocs inside handlers
13981 if (block->hasHndIndex())
13983 BADCODE("Localloc can't be inside handler");
13986 /* The FP register may not be back to the original value at the end
13987 of the method, even if the frame size is 0, as localloc may
13988 have modified it. So we will HAVE to reset it */
13990 compLocallocUsed = true;
13991 setNeedsGSSecurityCookie();
13993 // Get the size to allocate
13995 op2 = impPopStack().val;
13996 assertImp(genActualTypeIsIntOrI(op2->gtType));
13998 if (verCurrentState.esStackDepth != 0)
14000 BADCODE("Localloc can only be used when the stack is empty");
14003 op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
14005 // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
14007 op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
14009 impPushOnStack(op1, tiRetVal);
14014 /* Get the type token */
14015 assertImp(sz == sizeof(unsigned));
14017 _impResolveToken(CORINFO_TOKENKIND_Casting);
14019 JITDUMP(" %08X", resolvedToken.token);
14021 if (!opts.IsReadyToRun())
14023 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14024 if (op2 == nullptr)
14025 { // compDonotInline()
14030 if (tiVerificationNeeded)
14032 Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
14033 // Even if this is a value class, we know it is boxed.
14034 tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14036 accessAllowedResult =
14037 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14038 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14040 op1 = impPopStack().val;
14042 #ifdef FEATURE_READYTORUN_COMPILER
14043 if (opts.IsReadyToRun())
14045 GenTreeCall* opLookup =
14046 impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
14047 gtNewArgList(op1));
14048 usingReadyToRunHelper = (opLookup != nullptr);
14049 op1 = (usingReadyToRunHelper ? opLookup : op1);
14051 if (!usingReadyToRunHelper)
14053 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14054 // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
14055 // 1) Load the context
14056 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14057 // 3) Perform the 'is instance' check on the input object
14058 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14060 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14061 if (op2 == nullptr)
14062 { // compDonotInline()
14068 if (!usingReadyToRunHelper)
14071 op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
14073 if (compDonotInline())
14078 impPushOnStack(op1, tiRetVal);
14082 case CEE_REFANYVAL:
14084 // get the class handle and make a ICON node out of it
14086 _impResolveToken(CORINFO_TOKENKIND_Class);
14088 JITDUMP(" %08X", resolvedToken.token);
14090 op2 = impTokenToHandle(&resolvedToken);
14091 if (op2 == nullptr)
14092 { // compDonotInline()
14096 if (tiVerificationNeeded)
14098 Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
14100 tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
14103 op1 = impPopStack().val;
14104 // make certain it is normalized;
14105 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
14107 // Call helper GETREFANY(classHandle, op1);
14108 args = gtNewArgList(op2, op1);
14109 op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, args);
14111 impPushOnStack(op1, tiRetVal);
14114 case CEE_REFANYTYPE:
14116 if (tiVerificationNeeded)
14118 Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
14122 op1 = impPopStack().val;
14124 // make certain it is normalized;
14125 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
14127 if (op1->gtOper == GT_OBJ)
14129 // Get the address of the refany
14130 op1 = op1->gtOp.gtOp1;
14132 // Fetch the type from the correct slot
14133 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14134 gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
14135 op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
14139 assertImp(op1->gtOper == GT_MKREFANY);
14141 // The pointer may have side-effects
14142 if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
14144 impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14146 impNoteLastILoffs();
14150 // We already have the class handle
14151 op1 = op1->gtOp.gtOp2;
14154 // convert native TypeHandle to RuntimeTypeHandle
14156 GenTreeArgList* helperArgs = gtNewArgList(op1);
14158 op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, helperArgs);
14160 // The handle struct is returned in register
14161 op1->gtCall.gtReturnType = TYP_REF;
14163 tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
14166 impPushOnStack(op1, tiRetVal);
14171 /* Get the Class index */
14172 assertImp(sz == sizeof(unsigned));
14173 lastLoadToken = codeAddr;
14174 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
14176 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
14178 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14179 if (op1 == nullptr)
14180 { // compDonotInline()
14184 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
14185 assert(resolvedToken.hClass != nullptr);
14187 if (resolvedToken.hMethod != nullptr)
14189 helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
14191 else if (resolvedToken.hField != nullptr)
14193 helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
14196 GenTreeArgList* helperArgs = gtNewArgList(op1);
14198 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs);
14200 // The handle struct is returned in register
14201 op1->gtCall.gtReturnType = TYP_REF;
14203 tiRetVal = verMakeTypeInfo(tokenType);
14204 impPushOnStack(op1, tiRetVal);
14209 case CEE_UNBOX_ANY:
14211 /* Get the Class index */
14212 assertImp(sz == sizeof(unsigned));
14214 _impResolveToken(CORINFO_TOKENKIND_Class);
14216 JITDUMP(" %08X", resolvedToken.token);
14218 BOOL runtimeLookup;
14219 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
14220 if (op2 == nullptr)
14222 assert(compDonotInline());
14226 // Run this always so we can get access exceptions even with SkipVerification.
14227 accessAllowedResult =
14228 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14229 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14231 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
14233 if (tiVerificationNeeded)
14235 typeInfo tiUnbox = impStackTop().seTypeInfo;
14236 Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
14237 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14238 tiRetVal.NormaliseForStack();
14240 JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n");
14241 op1 = impPopStack().val;
14245 /* Pop the object and create the unbox helper call */
14246 /* You might think that for UNBOX_ANY we need to push a different */
14247 /* (non-byref) type, but here we're making the tiRetVal that is used */
14248 /* for the intermediate pointer which we then transfer onto the OBJ */
14249 /* instruction. OBJ then creates the appropriate tiRetVal. */
14250 if (tiVerificationNeeded)
14252 typeInfo tiUnbox = impStackTop().seTypeInfo;
14253 Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
14255 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14256 Verify(tiRetVal.IsValueClass(), "not value class");
14257 tiRetVal.MakeByRef();
14259 // We always come from an objref, so this is safe byref
14260 tiRetVal.SetIsPermanentHomeByRef();
14261 tiRetVal.SetIsReadonlyByRef();
14264 op1 = impPopStack().val;
14265 assertImp(op1->gtType == TYP_REF);
14267 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
14268 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
14270 // Check legality and profitability of inline expansion for unboxing.
14271 const bool canExpandInline = (helper == CORINFO_HELP_UNBOX);
14272 const bool shouldExpandInline = !(compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts());
14274 if (canExpandInline && shouldExpandInline)
14276 JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY");
14277 // we are doing normal unboxing
14278 // inline the common case of the unbox helper
14279 // UNBOX(exp) morphs into
14280 // clone = pop(exp);
14281 // ((*clone == typeToken) ? nop : helper(clone, typeToken));
14282 // push(clone + sizeof(void*))
14284 GenTreePtr cloneOperand;
14285 op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14286 nullptr DEBUGARG("inline UNBOX clone1"));
14287 op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
14289 GenTreePtr condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
14291 op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14292 nullptr DEBUGARG("inline UNBOX clone2"));
14293 op2 = impTokenToHandle(&resolvedToken);
14294 if (op2 == nullptr)
14295 { // compDonotInline()
14298 args = gtNewArgList(op2, op1);
14299 op1 = gtNewHelperCallNode(helper, TYP_VOID, args);
14301 op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
14302 op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
14303 condBox->gtFlags |= GTF_RELOP_QMARK;
14305 // QMARK nodes cannot reside on the evaluation stack. Because there
14306 // may be other trees on the evaluation stack that side-effect the
14307 // sources of the UNBOX operation we must spill the stack.
14309 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14311 // Create the address-expression to reference past the object header
14312 // to the beginning of the value-type. Today this means adjusting
14313 // past the base of the objects vtable field which is pointer sized.
14315 op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
14316 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
14320 JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY",
14321 canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
14323 // Don't optimize, just call the helper and be done with it
14324 args = gtNewArgList(op2, op1);
14326 gtNewHelperCallNode(helper,
14327 (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), args);
14330 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
14331 helper == CORINFO_HELP_UNBOX_NULLABLE &&
14332 varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
14336 ----------------------------------------------------------------------
14339 | \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE |
14340 | \ | (which returns a BYREF) | (which returns a STRUCT) | |
14342 |---------------------------------------------------------------------
14343 | UNBOX | push the BYREF | spill the STRUCT to a local, |
14344 | | | push the BYREF to this local |
14345 |---------------------------------------------------------------------
14346 | UNBOX_ANY | push a GT_OBJ of | push the STRUCT |
14347 | | the BYREF | For Linux when the |
14348 | | | struct is returned in two |
14349 | | | registers create a temp |
14350 | | | which address is passed to |
14351 | | | the unbox_nullable helper. |
14352 |---------------------------------------------------------------------
14355 if (opcode == CEE_UNBOX)
14357 if (helper == CORINFO_HELP_UNBOX_NULLABLE)
14359 // Unbox nullable helper returns a struct type.
14360 // We need to spill it to a temp so than can take the address of it.
14361 // Here we need unsafe value cls check, since the address of struct is taken to be used
14362 // further along and potetially be exploitable.
14364 unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
14365 lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14367 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14368 op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14369 assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14371 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14372 op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14373 op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14376 assert(op1->gtType == TYP_BYREF);
14377 assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14381 assert(opcode == CEE_UNBOX_ANY);
14383 if (helper == CORINFO_HELP_UNBOX)
14385 // Normal unbox helper returns a TYP_BYREF.
14386 impPushOnStack(op1, tiRetVal);
14391 assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
14393 #if FEATURE_MULTIREG_RET
14395 if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14397 // Unbox nullable helper returns a TYP_STRUCT.
14398 // For the multi-reg case we need to spill it to a temp so that
14399 // we can pass the address to the unbox_nullable jit helper.
14401 unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14402 lvaTable[tmp].lvIsMultiRegArg = true;
14403 lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14405 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14406 op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14407 assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14409 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14410 op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14411 op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14413 // In this case the return value of the unbox helper is TYP_BYREF.
14414 // Make sure the right type is placed on the operand type stack.
14415 impPushOnStack(op1, tiRetVal);
14417 // Load the struct.
14420 assert(op1->gtType == TYP_BYREF);
14421 assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14427 #endif // !FEATURE_MULTIREG_RET
14430 // If non register passable struct we have it materialized in the RetBuf.
14431 assert(op1->gtType == TYP_STRUCT);
14432 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14433 assert(tiRetVal.IsValueClass());
14437 impPushOnStack(op1, tiRetVal);
14443 /* Get the Class index */
14444 assertImp(sz == sizeof(unsigned));
14446 _impResolveToken(CORINFO_TOKENKIND_Box);
14448 JITDUMP(" %08X", resolvedToken.token);
14450 if (tiVerificationNeeded)
14452 typeInfo tiActual = impStackTop().seTypeInfo;
14453 typeInfo tiBox = verMakeTypeInfo(resolvedToken.hClass);
14455 Verify(verIsBoxable(tiBox), "boxable type expected");
14457 // check the class constraints of the boxed type in case we are boxing an uninitialized value
14458 Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14459 "boxed type has unsatisfied class constraints");
14461 Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14463 // Observation: the following code introduces a boxed value class on the stack, but,
14464 // according to the ECMA spec, one would simply expect: tiRetVal =
14465 // typeInfo(TI_REF,impGetObjectClass());
14467 // Push the result back on the stack,
14468 // even if clsHnd is a value class we want the TI_REF
14469 // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14470 tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14473 accessAllowedResult =
14474 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14475 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14477 // Note BOX can be used on things that are not value classes, in which
14478 // case we get a NOP. However the verifier's view of the type on the
14479 // stack changes (in generic code a 'T' becomes a 'boxed T')
14480 if (!eeIsValueClass(resolvedToken.hClass))
14482 JITDUMP("\n Importing BOX(refClass) as NOP\n");
14483 verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14487 // Look ahead for unbox.any
14488 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14490 DWORD classAttribs = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14491 if (!(classAttribs & CORINFO_FLG_SHAREDINST))
14493 CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14495 impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14497 if (unboxResolvedToken.hClass == resolvedToken.hClass)
14499 JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n");
14500 // Skip the next unbox.any instruction
14501 sz += sizeof(mdToken) + 1;
14507 impImportAndPushBox(&resolvedToken);
14508 if (compDonotInline())
14517 /* Get the Class index */
14518 assertImp(sz == sizeof(unsigned));
14520 _impResolveToken(CORINFO_TOKENKIND_Class);
14522 JITDUMP(" %08X", resolvedToken.token);
14524 if (tiVerificationNeeded)
14526 tiRetVal = typeInfo(TI_INT);
14529 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14530 impPushOnStack(op1, tiRetVal);
14533 case CEE_CASTCLASS:
14535 /* Get the Class index */
14537 assertImp(sz == sizeof(unsigned));
14539 _impResolveToken(CORINFO_TOKENKIND_Casting);
14541 JITDUMP(" %08X", resolvedToken.token);
14543 if (!opts.IsReadyToRun())
14545 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14546 if (op2 == nullptr)
14547 { // compDonotInline()
14552 if (tiVerificationNeeded)
14554 Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14556 tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14559 accessAllowedResult =
14560 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14561 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14563 op1 = impPopStack().val;
14565 /* Pop the address and create the 'checked cast' helper call */
14567 // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
14568 // and op2 to contain code that creates the type handle corresponding to typeRef
14571 #ifdef FEATURE_READYTORUN_COMPILER
14572 if (opts.IsReadyToRun())
14574 GenTreeCall* opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST,
14575 TYP_REF, gtNewArgList(op1));
14576 usingReadyToRunHelper = (opLookup != nullptr);
14577 op1 = (usingReadyToRunHelper ? opLookup : op1);
14579 if (!usingReadyToRunHelper)
14581 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14582 // and the chkcastany call with a single call to a dynamic R2R cell that will:
14583 // 1) Load the context
14584 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14585 // 3) Check the object on the stack for the type-cast
14586 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14588 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14589 if (op2 == nullptr)
14590 { // compDonotInline()
14596 if (!usingReadyToRunHelper)
14599 op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
14601 if (compDonotInline())
14606 /* Push the result back on the stack */
14607 impPushOnStack(op1, tiRetVal);
14612 if (compIsForInlining())
14614 // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14615 // TODO: Will this be too strict, given that we will inline many basic blocks?
14616 // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14618 /* Do we have just the exception on the stack ?*/
14620 if (verCurrentState.esStackDepth != 1)
14622 /* if not, just don't inline the method */
14624 compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
14629 if (tiVerificationNeeded)
14631 tiRetVal = impStackTop().seTypeInfo;
14632 Verify(tiRetVal.IsObjRef(), "object ref expected");
14633 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
14635 Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
14639 block->bbSetRunRarely(); // any block with a throw is rare
14640 /* Pop the exception object and create the 'throw' helper call */
14642 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewArgList(impPopStack().val));
14645 if (verCurrentState.esStackDepth > 0)
14647 impEvalSideEffects();
14650 assert(verCurrentState.esStackDepth == 0);
14656 assert(!compIsForInlining());
14658 if (info.compXcptnsCount == 0)
14660 BADCODE("rethrow outside catch");
14663 if (tiVerificationNeeded)
14665 Verify(block->hasHndIndex(), "rethrow outside catch");
14666 if (block->hasHndIndex())
14668 EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
14669 Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
14670 if (HBtab->HasFilter())
14672 // we better be in the handler clause part, not the filter part
14673 Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
14674 "rethrow in filter");
14679 /* Create the 'rethrow' helper call */
14681 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID);
14687 assertImp(sz == sizeof(unsigned));
14689 _impResolveToken(CORINFO_TOKENKIND_Class);
14691 JITDUMP(" %08X", resolvedToken.token);
14693 if (tiVerificationNeeded)
14695 typeInfo tiTo = impStackTop().seTypeInfo;
14696 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14698 Verify(tiTo.IsByRef(), "byref expected");
14699 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14701 Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14702 "type operand incompatible with type of address");
14705 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
14706 op2 = gtNewIconNode(0); // Value
14707 op1 = impPopStack().val; // Dest
14708 op1 = gtNewBlockVal(op1, size);
14709 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14714 if (tiVerificationNeeded)
14716 Verify(false, "bad opcode");
14719 op3 = impPopStack().val; // Size
14720 op2 = impPopStack().val; // Value
14721 op1 = impPopStack().val; // Dest
14723 if (op3->IsCnsIntOrI())
14725 size = (unsigned)op3->AsIntConCommon()->IconValue();
14726 op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14730 op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14733 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14739 if (tiVerificationNeeded)
14741 Verify(false, "bad opcode");
14743 op3 = impPopStack().val; // Size
14744 op2 = impPopStack().val; // Src
14745 op1 = impPopStack().val; // Dest
14747 if (op3->IsCnsIntOrI())
14749 size = (unsigned)op3->AsIntConCommon()->IconValue();
14750 op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14754 op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14757 if (op2->OperGet() == GT_ADDR)
14759 op2 = op2->gtOp.gtOp1;
14763 op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
14766 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
14771 assertImp(sz == sizeof(unsigned));
14773 _impResolveToken(CORINFO_TOKENKIND_Class);
14775 JITDUMP(" %08X", resolvedToken.token);
14777 if (tiVerificationNeeded)
14779 typeInfo tiFrom = impStackTop().seTypeInfo;
14780 typeInfo tiTo = impStackTop(1).seTypeInfo;
14781 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14783 Verify(tiFrom.IsByRef(), "expected byref source");
14784 Verify(tiTo.IsByRef(), "expected byref destination");
14786 Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
14787 "type of source address incompatible with type operand");
14788 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14789 Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14790 "type operand incompatible with type of destination address");
14793 if (!eeIsValueClass(resolvedToken.hClass))
14795 op1 = impPopStack().val; // address to load from
14797 impBashVarAddrsToI(op1);
14799 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
14801 op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
14802 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
14804 impPushOnStack(op1, typeInfo());
14805 opcode = CEE_STIND_REF;
14807 goto STIND_POST_VERIFY;
14810 op2 = impPopStack().val; // Src
14811 op1 = impPopStack().val; // Dest
14812 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
14817 assertImp(sz == sizeof(unsigned));
14819 _impResolveToken(CORINFO_TOKENKIND_Class);
14821 JITDUMP(" %08X", resolvedToken.token);
14823 if (eeIsValueClass(resolvedToken.hClass))
14825 lclTyp = TYP_STRUCT;
14832 if (tiVerificationNeeded)
14835 typeInfo tiPtr = impStackTop(1).seTypeInfo;
14837 // Make sure we have a good looking byref
14838 Verify(tiPtr.IsByRef(), "pointer not byref");
14839 Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
14840 if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
14842 compUnsafeCastUsed = true;
14845 typeInfo ptrVal = DereferenceByRef(tiPtr);
14846 typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
14848 if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
14850 Verify(false, "type of value incompatible with type operand");
14851 compUnsafeCastUsed = true;
14854 if (!tiCompatibleWith(argVal, ptrVal, false))
14856 Verify(false, "type operand incompatible with type of address");
14857 compUnsafeCastUsed = true;
14862 compUnsafeCastUsed = true;
14865 if (lclTyp == TYP_REF)
14867 opcode = CEE_STIND_REF;
14868 goto STIND_POST_VERIFY;
14871 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14872 if (impIsPrimitive(jitTyp))
14874 lclTyp = JITtype2varType(jitTyp);
14875 goto STIND_POST_VERIFY;
14878 op2 = impPopStack().val; // Value
14879 op1 = impPopStack().val; // Ptr
14881 assertImp(varTypeIsStruct(op2));
14883 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14885 if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED))
14887 op1->gtFlags |= GTF_BLK_UNALIGNED;
14894 assert(!compIsForInlining());
14896 // Being lazy here. Refanys are tricky in terms of gc tracking.
14897 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
14899 JITDUMP("disabling struct promotion because of mkrefany\n");
14900 fgNoStructPromotion = true;
14902 oper = GT_MKREFANY;
14903 assertImp(sz == sizeof(unsigned));
14905 _impResolveToken(CORINFO_TOKENKIND_Class);
14907 JITDUMP(" %08X", resolvedToken.token);
14909 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14910 if (op2 == nullptr)
14911 { // compDonotInline()
14915 if (tiVerificationNeeded)
14917 typeInfo tiPtr = impStackTop().seTypeInfo;
14918 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14920 Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
14921 Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
14922 Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
14925 accessAllowedResult =
14926 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14927 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14929 op1 = impPopStack().val;
14931 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
14932 // But JIT32 allowed it, so we continue to allow it.
14933 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
14935 // MKREFANY returns a struct. op2 is the class token.
14936 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
14938 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
14944 assertImp(sz == sizeof(unsigned));
14946 _impResolveToken(CORINFO_TOKENKIND_Class);
14948 JITDUMP(" %08X", resolvedToken.token);
14952 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14954 if (tiVerificationNeeded)
14956 typeInfo tiPtr = impStackTop().seTypeInfo;
14958 // Make sure we have a byref
14959 if (!tiPtr.IsByRef())
14961 Verify(false, "pointer not byref");
14962 compUnsafeCastUsed = true;
14964 typeInfo tiPtrVal = DereferenceByRef(tiPtr);
14966 if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
14968 Verify(false, "type of address incompatible with type operand");
14969 compUnsafeCastUsed = true;
14971 tiRetVal.NormaliseForStack();
14975 compUnsafeCastUsed = true;
14978 if (eeIsValueClass(resolvedToken.hClass))
14980 lclTyp = TYP_STRUCT;
14985 opcode = CEE_LDIND_REF;
14986 goto LDIND_POST_VERIFY;
14989 op1 = impPopStack().val;
14991 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
14993 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14994 if (impIsPrimitive(jitTyp))
14996 op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
14998 // Could point anywhere, example a boxed class static int
14999 op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
15000 assertImp(varTypeIsArithmetic(op1->gtType));
15004 // OBJ returns a struct
15005 // and an inline argument which is the class token of the loaded obj
15006 op1 = gtNewObjNode(resolvedToken.hClass, op1);
15008 op1->gtFlags |= GTF_EXCEPT;
15010 if (prefixFlags & PREFIX_UNALIGNED)
15012 op1->gtFlags |= GTF_IND_UNALIGNED;
15015 impPushOnStack(op1, tiRetVal);
15020 if (tiVerificationNeeded)
15022 typeInfo tiArray = impStackTop().seTypeInfo;
15023 Verify(verIsSDArray(tiArray), "bad array");
15024 tiRetVal = typeInfo(TI_INT);
15027 op1 = impPopStack().val;
15028 if (!opts.MinOpts() && !opts.compDbgCode)
15030 /* Use GT_ARR_LENGTH operator so rng check opts see this */
15031 GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
15033 /* Mark the block as containing a length expression */
15035 if (op1->gtOper == GT_LCL_VAR)
15037 block->bbFlags |= BBF_HAS_IDX_LEN;
15044 /* Create the expression "*(array_addr + ArrLenOffs)" */
15045 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
15046 gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
15047 op1 = gtNewIndir(TYP_INT, op1);
15048 op1->gtFlags |= GTF_IND_ARR_LEN;
15051 /* Push the result back on the stack */
15052 impPushOnStack(op1, tiRetVal);
15056 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
15060 if (opts.compDbgCode)
15062 op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
15067 /******************************** NYI *******************************/
15070 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
15073 case CEE_MACRO_END:
15076 BADCODE3("unknown opcode", ": %02X", (int)opcode);
15080 prevOpcode = opcode;
15086 #undef _impResolveToken
15089 #pragma warning(pop)
15092 // Push a local/argument treeon the operand stack
15093 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
15095 tiRetVal.NormaliseForStack();
15097 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
15099 tiRetVal.SetUninitialisedObjRef();
15102 impPushOnStack(op, tiRetVal);
15105 // Load a local/argument on the operand stack
15106 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
15107 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
15111 if (lvaTable[lclNum].lvNormalizeOnLoad())
15113 lclTyp = lvaGetRealType(lclNum);
15117 lclTyp = lvaGetActualType(lclNum);
15120 impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
15123 // Load an argument on the operand stack
15124 // Shared by the various CEE_LDARG opcodes
15125 // ilArgNum is the argument index as specified in IL.
15126 // It will be mapped to the correct lvaTable index
15127 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
15129 Verify(ilArgNum < info.compILargsCount, "bad arg num");
15131 if (compIsForInlining())
15133 if (ilArgNum >= info.compArgsCount)
15135 compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
15139 impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
15140 impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
15144 if (ilArgNum >= info.compArgsCount)
15149 unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
15151 if (lclNum == info.compThisArg)
15153 lclNum = lvaArg0Var;
15156 impLoadVar(lclNum, offset);
15160 // Load a local on the operand stack
15161 // Shared by the various CEE_LDLOC opcodes
15162 // ilLclNum is the local index as specified in IL.
15163 // It will be mapped to the correct lvaTable index
15164 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
15166 if (tiVerificationNeeded)
15168 Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
15169 Verify(info.compInitMem, "initLocals not set");
15172 if (compIsForInlining())
15174 if (ilLclNum >= info.compMethodInfo->locals.numArgs)
15176 compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
15180 // Get the local type
15181 var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
15183 typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
15185 /* Have we allocated a temp for this local? */
15187 unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
15189 // All vars of inlined methods should be !lvNormalizeOnLoad()
15191 assert(!lvaTable[lclNum].lvNormalizeOnLoad());
15192 lclTyp = genActualType(lclTyp);
15194 impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
15198 if (ilLclNum >= info.compMethodInfo->locals.numArgs)
15203 unsigned lclNum = info.compArgsCount + ilLclNum;
15205 impLoadVar(lclNum, offset);
15209 #ifdef _TARGET_ARM_
15210 /**************************************************************************************
15212 * When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
15213 * dst struct, because struct promotion will turn it into a float/double variable while
15214 * the rhs will be an int/long variable. We don't code generate assignment of int into
15215 * a float, but there is nothing that might prevent us from doing so. The tree however
15216 * would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
15218 * tmpNum - the lcl dst variable num that is a struct.
15219 * src - the src tree assigned to the dest that is a struct/int (when varargs call.)
15220 * hClass - the type handle for the struct variable.
15222 * TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
15223 * however, we could do a codegen of transferring from int to float registers
15224 * (transfer, not a cast.)
15227 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr src, CORINFO_CLASS_HANDLE hClass)
15229 if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
15231 int hfaSlots = GetHfaCount(hClass);
15232 var_types hfaType = GetHfaType(hClass);
15234 // If we have varargs we morph the method's return type to be "int" irrespective of its original
15235 // type: struct/float at importer because the ABI calls out return in integer registers.
15236 // We don't want struct promotion to replace an expression like this:
15237 // lclFld_int = callvar_int() into lclFld_float = callvar_int();
15238 // This means an int is getting assigned to a float without a cast. Prevent the promotion.
15239 if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
15240 (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
15242 // Make sure this struct type stays as struct so we can receive the call in a struct.
15243 lvaTable[tmpNum].lvIsMultiRegRet = true;
15247 #endif // _TARGET_ARM_
15249 #if FEATURE_MULTIREG_RET
15250 GenTreePtr Compiler::impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HANDLE hClass)
15252 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
15253 impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
15254 GenTreePtr ret = gtNewLclvNode(tmpNum, op->gtType);
15256 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
15257 ret->gtFlags |= GTF_DONT_CSE;
15259 assert(IsMultiRegReturnedType(hClass));
15261 // Mark the var so that fields are not promoted and stay together.
15262 lvaTable[tmpNum].lvIsMultiRegRet = true;
15266 #endif // FEATURE_MULTIREG_RET
15268 // do import for a return
15269 // returns false if inlining was aborted
15270 // opcode can be ret or call in the case of a tail.call
15271 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
15273 if (tiVerificationNeeded)
15275 verVerifyThisPtrInitialised();
15277 unsigned expectedStack = 0;
15278 if (info.compRetType != TYP_VOID)
15280 typeInfo tiVal = impStackTop().seTypeInfo;
15281 typeInfo tiDeclared =
15282 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
15284 Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
15286 Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
15289 Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
15293 // If we are importing an inlinee and have GC ref locals we always
15294 // need to have a spill temp for the return value. This temp
15295 // should have been set up in advance, over in fgFindBasicBlocks.
15296 if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID))
15298 assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM);
15302 GenTree* op2 = nullptr;
15303 GenTree* op1 = nullptr;
15304 CORINFO_CLASS_HANDLE retClsHnd = nullptr;
15306 if (info.compRetType != TYP_VOID)
15308 StackEntry se = impPopStack();
15309 retClsHnd = se.seTypeInfo.GetClassHandle();
15312 if (!compIsForInlining())
15314 impBashVarAddrsToI(op2);
15315 op2 = impImplicitIorI4Cast(op2, info.compRetType);
15316 op2 = impImplicitR4orR8Cast(op2, info.compRetType);
15317 assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
15318 ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
15319 ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
15320 (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
15321 (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
15324 if (opts.compGcChecks && info.compRetType == TYP_REF)
15326 // DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path
15327 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
15330 assert(op2->gtType == TYP_REF);
15332 // confirm that the argument is a GC pointer (for debugging (GC stress))
15333 GenTreeArgList* args = gtNewArgList(op2);
15334 op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args);
15338 printf("\ncompGcChecks tree:\n");
15346 // inlinee's stack should be empty now.
15347 assert(verCurrentState.esStackDepth == 0);
15352 printf("\n\n Inlinee Return expression (before normalization) =>\n");
15357 // Make sure the type matches the original call.
15359 var_types returnType = genActualType(op2->gtType);
15360 var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
15361 if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
15363 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
15366 if (returnType != originalCallType)
15368 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
15372 // Below, we are going to set impInlineInfo->retExpr to the tree with the return
15373 // expression. At this point, retExpr could already be set if there are multiple
15374 // return blocks (meaning lvaInlineeReturnSpillTemp != BAD_VAR_NUM) and one of
15375 // the other blocks already set it. If there is only a single return block,
15376 // retExpr shouldn't be set. However, this is not true if we reimport a block
15377 // with a return. In that case, retExpr will be set, then the block will be
15378 // reimported, but retExpr won't get cleared as part of setting the block to
15379 // be reimported. The reimported retExpr value should be the same, so even if
15380 // we don't unconditionally overwrite it, it shouldn't matter.
15381 if (info.compRetNativeType != TYP_STRUCT)
15383 // compRetNativeType is not TYP_STRUCT.
15384 // This implies it could be either a scalar type or SIMD vector type or
15385 // a struct type that can be normalized to a scalar type.
15387 if (varTypeIsStruct(info.compRetType))
15389 noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
15390 // adjust the type away from struct to integral
15391 // and no normalizing
15392 op2 = impFixupStructReturnType(op2, retClsHnd);
15396 // Do we have to normalize?
15397 var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
15398 if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
15399 fgCastNeeded(op2, fncRealRetType))
15401 // Small-typed return values are normalized by the callee
15402 op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
15406 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15408 assert(info.compRetNativeType != TYP_VOID &&
15409 (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()));
15411 // This is a bit of a workaround...
15412 // If we are inlining a call that returns a struct, where the actual "native" return type is
15413 // not a struct (for example, the struct is composed of exactly one int, and the native
15414 // return type is thus an int), and the inlinee has multiple return blocks (thus,
15415 // lvaInlineeReturnSpillTemp is != BAD_VAR_NUM, and is the index of a local var that is set
15416 // to the *native* return type), and at least one of the return blocks is the result of
15417 // a call, then we have a problem. The situation is like this (from a failed test case):
15420 // // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15421 // call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15422 // plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15426 // ldobj !!T // this gets bashed to a GT_LCL_FLD, type TYP_INT
15429 // call !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15430 // object&, class System.Func`1<!!0>)
15433 // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15434 // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15435 // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15436 // inlining properly by leaving the correct type on the GT_CALL node through importing.
15438 // To fix this, for this case, we temporarily change the GT_CALL node type to the
15439 // native return type, which is what it will be set to eventually. We generate the
15440 // assignment to the return temp, using the correct type, and then restore the GT_CALL
15441 // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15443 bool restoreType = false;
15444 if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15446 noway_assert(op2->TypeGet() == TYP_STRUCT);
15447 op2->gtType = info.compRetNativeType;
15448 restoreType = true;
15451 impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15452 (unsigned)CHECK_SPILL_ALL);
15454 GenTreePtr tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15458 op2->gtType = TYP_STRUCT; // restore it to what it was
15464 if (impInlineInfo->retExpr)
15466 // Some other block(s) have seen the CEE_RET first.
15467 // Better they spilled to the same temp.
15468 assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15469 assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15477 printf("\n\n Inlinee Return expression (after normalization) =>\n");
15482 // Report the return expression
15483 impInlineInfo->retExpr = op2;
15487 // compRetNativeType is TYP_STRUCT.
15488 // This implies that struct return via RetBuf arg or multi-reg struct return
15490 GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall();
15492 // Assign the inlinee return into a spill temp.
15493 // spill temp only exists if there are multiple return points
15494 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15496 // in this case we have to insert multiple struct copies to the temp
15497 // and the retexpr is just the temp.
15498 assert(info.compRetNativeType != TYP_VOID);
15499 assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals());
15501 impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15502 (unsigned)CHECK_SPILL_ALL);
15505 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15506 #if defined(_TARGET_ARM_)
15507 // TODO-ARM64-NYI: HFA
15508 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15509 // next ifdefs could be refactored in a single method with the ifdef inside.
15510 if (IsHfa(retClsHnd))
15512 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15513 #else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15514 ReturnTypeDesc retTypeDesc;
15515 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15516 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15518 if (retRegCount != 0)
15520 // If single eightbyte, the return type would have been normalized and there won't be a temp var.
15521 // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
15523 assert(retRegCount == MAX_RET_REG_COUNT);
15524 // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
15525 CLANG_FORMAT_COMMENT_ANCHOR;
15526 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15528 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15530 if (!impInlineInfo->retExpr)
15532 #if defined(_TARGET_ARM_)
15533 impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
15534 #else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15535 // The inlinee compiler has figured out the type of the temp already. Use it here.
15536 impInlineInfo->retExpr =
15537 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15538 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15543 impInlineInfo->retExpr = op2;
15547 #elif defined(_TARGET_ARM64_)
15548 ReturnTypeDesc retTypeDesc;
15549 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15550 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15552 if (retRegCount != 0)
15554 assert(!iciCall->HasRetBufArg());
15555 assert(retRegCount >= 2);
15556 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15558 if (!impInlineInfo->retExpr)
15560 // The inlinee compiler has figured out the type of the temp already. Use it here.
15561 impInlineInfo->retExpr =
15562 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15567 impInlineInfo->retExpr = op2;
15571 #endif // defined(_TARGET_ARM64_)
15573 assert(iciCall->HasRetBufArg());
15574 GenTreePtr dest = gtCloneExpr(iciCall->gtCallArgs->gtOp.gtOp1);
15575 // spill temp only exists if there are multiple return points
15576 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15578 // if this is the first return we have seen set the retExpr
15579 if (!impInlineInfo->retExpr)
15581 impInlineInfo->retExpr =
15582 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
15583 retClsHnd, (unsigned)CHECK_SPILL_ALL);
15588 impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15595 if (compIsForInlining())
15600 if (info.compRetType == TYP_VOID)
15603 op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15605 else if (info.compRetBuffArg != BAD_VAR_NUM)
15607 // Assign value to return buff (first param)
15608 GenTreePtr retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
15610 op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15611 impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15613 // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
15614 CLANG_FORMAT_COMMENT_ANCHOR;
15616 #if defined(_TARGET_AMD64_)
15618 // x64 (System V and Win64) calling convention requires to
15619 // return the implicit return buffer explicitly (in RAX).
15620 // Change the return type to be BYREF.
15621 op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15622 #else // !defined(_TARGET_AMD64_)
15623 // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
15624 // In such case the return value of the function is changed to BYREF.
15625 // If profiler hook is not needed the return type of the function is TYP_VOID.
15626 if (compIsProfilerHookNeeded())
15628 op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15633 op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15635 #endif // !defined(_TARGET_AMD64_)
15637 else if (varTypeIsStruct(info.compRetType))
15639 #if !FEATURE_MULTIREG_RET
15640 // For both ARM architectures the HFA native types are maintained as structs.
15641 // Also on System V AMD64 the multireg structs returns are also left as structs.
15642 noway_assert(info.compRetNativeType != TYP_STRUCT);
15644 op2 = impFixupStructReturnType(op2, retClsHnd);
15646 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
15651 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
15654 // We must have imported a tailcall and jumped to RET
15655 if (prefixFlags & PREFIX_TAILCALL)
15657 #if defined(FEATURE_CORECLR) || !defined(_TARGET_AMD64_)
15659 // This cannot be asserted on Amd64 since we permit the following IL pattern:
15663 assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
15664 #endif // FEATURE_CORECLR || !_TARGET_AMD64_
15666 opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
15668 // impImportCall() would have already appended TYP_VOID calls
15669 if (info.compRetType == TYP_VOID)
15675 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15677 // Remember at which BC offset the tree was finished
15678 impNoteLastILoffs();
15683 /*****************************************************************************
15684 * Mark the block as unimported.
15685 * Note that the caller is responsible for calling impImportBlockPending(),
15686 * with the appropriate stack-state
15689 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
15692 if (verbose && (block->bbFlags & BBF_IMPORTED))
15694 printf("\nBB%02u will be reimported\n", block->bbNum);
15698 block->bbFlags &= ~BBF_IMPORTED;
15701 /*****************************************************************************
15702 * Mark the successors of the given block as unimported.
15703 * Note that the caller is responsible for calling impImportBlockPending()
15704 * for all the successors, with the appropriate stack-state.
15707 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
15709 const unsigned numSuccs = block->NumSucc();
15710 for (unsigned i = 0; i < numSuccs; i++)
15712 impReimportMarkBlock(block->GetSucc(i));
15716 /*****************************************************************************
15718 * Filter wrapper to handle only passed in exception code
15722 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
15724 if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
15726 return EXCEPTION_EXECUTE_HANDLER;
15729 return EXCEPTION_CONTINUE_SEARCH;
15732 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
15734 assert(block->hasTryIndex());
15735 assert(!compIsForInlining());
15737 unsigned tryIndex = block->getTryIndex();
15738 EHblkDsc* HBtab = ehGetDsc(tryIndex);
15742 assert(block->bbFlags & BBF_TRY_BEG);
15744 // The Stack must be empty
15746 if (block->bbStkDepth != 0)
15748 BADCODE("Evaluation stack must be empty on entry into a try block");
15752 // Save the stack contents, we'll need to restore it later
15754 SavedStack blockState;
15755 impSaveStackState(&blockState, false);
15757 while (HBtab != nullptr)
15761 // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
15762 // We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
15764 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15766 // We trigger an invalid program exception here unless we have a try/fault region.
15768 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
15771 "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
15775 // Allow a try/fault region to proceed.
15776 assert(HBtab->HasFaultHandler());
15780 /* Recursively process the handler block */
15781 BasicBlock* hndBegBB = HBtab->ebdHndBeg;
15783 // Construct the proper verification stack state
15784 // either empty or one that contains just
15785 // the Exception Object that we are dealing with
15787 verCurrentState.esStackDepth = 0;
15789 if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
15791 CORINFO_CLASS_HANDLE clsHnd;
15793 if (HBtab->HasFilter())
15795 clsHnd = impGetObjectClass();
15799 CORINFO_RESOLVED_TOKEN resolvedToken;
15801 resolvedToken.tokenContext = impTokenLookupContextHandle;
15802 resolvedToken.tokenScope = info.compScopeHnd;
15803 resolvedToken.token = HBtab->ebdTyp;
15804 resolvedToken.tokenType = CORINFO_TOKENKIND_Class;
15805 info.compCompHnd->resolveToken(&resolvedToken);
15807 clsHnd = resolvedToken.hClass;
15810 // push catch arg the stack, spill to a temp if necessary
15811 // Note: can update HBtab->ebdHndBeg!
15812 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false);
15815 // Queue up the handler for importing
15817 impImportBlockPending(hndBegBB);
15819 if (HBtab->HasFilter())
15821 /* @VERIFICATION : Ideally the end of filter state should get
15822 propagated to the catch handler, this is an incompleteness,
15823 but is not a security/compliance issue, since the only
15824 interesting state is the 'thisInit' state.
15827 verCurrentState.esStackDepth = 0;
15829 BasicBlock* filterBB = HBtab->ebdFilter;
15831 // push catch arg the stack, spill to a temp if necessary
15832 // Note: can update HBtab->ebdFilter!
15833 const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB);
15834 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter);
15836 impImportBlockPending(filterBB);
15839 else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
15841 /* Recursively process the handler block */
15843 verCurrentState.esStackDepth = 0;
15845 // Queue up the fault handler for importing
15847 impImportBlockPending(HBtab->ebdHndBeg);
15850 // Now process our enclosing try index (if any)
15852 tryIndex = HBtab->ebdEnclosingTryIndex;
15853 if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
15859 HBtab = ehGetDsc(tryIndex);
15863 // Restore the stack contents
15864 impRestoreStackState(&blockState);
15867 //***************************************************************
15868 // Import the instructions for the given basic block. Perform
15869 // verification, throwing an exception on failure. Push any successor blocks that are enabled for the first
15870 // time, or whose verification pre-state is changed.
15873 #pragma warning(push)
15874 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
15876 void Compiler::impImportBlock(BasicBlock* block)
15878 // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
15879 // handle them specially. In particular, there is no IL to import for them, but we do need
15880 // to mark them as imported and put their successors on the pending import list.
15881 if (block->bbFlags & BBF_INTERNAL)
15883 JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
15884 block->bbFlags |= BBF_IMPORTED;
15886 const unsigned numSuccs = block->NumSucc();
15887 for (unsigned i = 0; i < numSuccs; i++)
15889 impImportBlockPending(block->GetSucc(i));
15899 /* Make the block globaly available */
15904 /* Initialize the debug variables */
15905 impCurOpcName = "unknown";
15906 impCurOpcOffs = block->bbCodeOffs;
15909 /* Set the current stack state to the merged result */
15910 verResetCurrentState(block, &verCurrentState);
15912 /* Now walk the code and import the IL into GenTrees */
15914 struct FilterVerificationExceptionsParam
15919 FilterVerificationExceptionsParam param;
15921 param.pThis = this;
15922 param.block = block;
15924 PAL_TRY(FilterVerificationExceptionsParam*, pParam, ¶m)
15926 /* @VERIFICATION : For now, the only state propagation from try
15927 to it's handler is "thisInit" state (stack is empty at start of try).
15928 In general, for state that we track in verification, we need to
15929 model the possibility that an exception might happen at any IL
15930 instruction, so we really need to merge all states that obtain
15931 between IL instructions in a try block into the start states of
15934 However we do not allow the 'this' pointer to be uninitialized when
15935 entering most kinds try regions (only try/fault are allowed to have
15936 an uninitialized this pointer on entry to the try)
15938 Fortunately, the stack is thrown away when an exception
15939 leads to a handler, so we don't have to worry about that.
15940 We DO, however, have to worry about the "thisInit" state.
15941 But only for the try/fault case.
15943 The only allowed transition is from TIS_Uninit to TIS_Init.
15945 So for a try/fault region for the fault handler block
15946 we will merge the start state of the try begin
15947 and the post-state of each block that is part of this try region
15950 // merge the start state of the try begin
15952 if (pParam->block->bbFlags & BBF_TRY_BEG)
15954 pParam->pThis->impVerifyEHBlock(pParam->block, true);
15957 pParam->pThis->impImportBlockCode(pParam->block);
15959 // As discussed above:
15960 // merge the post-state of each block that is part of this try region
15962 if (pParam->block->hasTryIndex())
15964 pParam->pThis->impVerifyEHBlock(pParam->block, false);
15967 PAL_EXCEPT_FILTER(FilterVerificationExceptions)
15969 verHandleVerificationFailure(block DEBUGARG(false));
15973 if (compDonotInline())
15978 assert(!compDonotInline());
15980 markImport = false;
15984 unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks
15985 bool reimportSpillClique = false;
15986 BasicBlock* tgtBlock = nullptr;
15988 /* If the stack is non-empty, we might have to spill its contents */
15990 if (verCurrentState.esStackDepth != 0)
15992 impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
15993 // on the stack, its lifetime is hard to determine, simply
15994 // don't reuse such temps.
15996 GenTreePtr addStmt = nullptr;
15998 /* Do the successors of 'block' have any other predecessors ?
15999 We do not want to do some of the optimizations related to multiRef
16000 if we can reimport blocks */
16002 unsigned multRef = impCanReimport ? unsigned(~0) : 0;
16004 switch (block->bbJumpKind)
16008 /* Temporarily remove the 'jtrue' from the end of the tree list */
16010 assert(impTreeLast);
16011 assert(impTreeLast->gtOper == GT_STMT);
16012 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
16014 addStmt = impTreeLast;
16015 impTreeLast = impTreeLast->gtPrev;
16017 /* Note if the next block has more than one ancestor */
16019 multRef |= block->bbNext->bbRefs;
16021 /* Does the next block have temps assigned? */
16023 baseTmp = block->bbNext->bbStkTempsIn;
16024 tgtBlock = block->bbNext;
16026 if (baseTmp != NO_BASE_TMP)
16031 /* Try the target of the jump then */
16033 multRef |= block->bbJumpDest->bbRefs;
16034 baseTmp = block->bbJumpDest->bbStkTempsIn;
16035 tgtBlock = block->bbJumpDest;
16039 multRef |= block->bbJumpDest->bbRefs;
16040 baseTmp = block->bbJumpDest->bbStkTempsIn;
16041 tgtBlock = block->bbJumpDest;
16045 multRef |= block->bbNext->bbRefs;
16046 baseTmp = block->bbNext->bbStkTempsIn;
16047 tgtBlock = block->bbNext;
16052 BasicBlock** jmpTab;
16055 /* Temporarily remove the GT_SWITCH from the end of the tree list */
16057 assert(impTreeLast);
16058 assert(impTreeLast->gtOper == GT_STMT);
16059 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
16061 addStmt = impTreeLast;
16062 impTreeLast = impTreeLast->gtPrev;
16064 jmpCnt = block->bbJumpSwt->bbsCount;
16065 jmpTab = block->bbJumpSwt->bbsDstTab;
16069 tgtBlock = (*jmpTab);
16071 multRef |= tgtBlock->bbRefs;
16073 // Thanks to spill cliques, we should have assigned all or none
16074 assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
16075 baseTmp = tgtBlock->bbStkTempsIn;
16080 } while (++jmpTab, --jmpCnt);
16084 case BBJ_CALLFINALLY:
16085 case BBJ_EHCATCHRET:
16087 case BBJ_EHFINALLYRET:
16088 case BBJ_EHFILTERRET:
16090 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
16094 noway_assert(!"Unexpected bbJumpKind");
16098 assert(multRef >= 1);
16100 /* Do we have a base temp number? */
16102 bool newTemps = (baseTmp == NO_BASE_TMP);
16106 /* Grab enough temps for the whole stack */
16107 baseTmp = impGetSpillTmpBase(block);
16110 /* Spill all stack entries into temps */
16111 unsigned level, tempNum;
16113 JITDUMP("\nSpilling stack entries into temps\n");
16114 for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
16116 GenTreePtr tree = verCurrentState.esStack[level].val;
16118 /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
16119 the other. This should merge to a byref in unverifiable code.
16120 However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
16121 successor would be imported assuming there was a TYP_I_IMPL on
16122 the stack. Thus the value would not get GC-tracked. Hence,
16123 change the temp to TYP_BYREF and reimport the successors.
16124 Note: We should only allow this in unverifiable code.
16126 if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
16128 lvaTable[tempNum].lvType = TYP_BYREF;
16129 impReimportMarkSuccessors(block);
16133 #ifdef _TARGET_64BIT_
16134 if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
16136 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
16137 (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
16139 // Merge the current state into the entry state of block;
16140 // the call to verMergeEntryStates must have changed
16141 // the entry state of the block by merging the int local var
16142 // and the native-int stack entry.
16143 bool changed = false;
16144 if (verMergeEntryStates(tgtBlock, &changed))
16146 impRetypeEntryStateTemps(tgtBlock);
16147 impReimportBlockPending(tgtBlock);
16152 tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
16157 // Some other block in the spill clique set this to "int", but now we have "native int".
16158 // Change the type and go back to re-import any blocks that used the wrong type.
16159 lvaTable[tempNum].lvType = TYP_I_IMPL;
16160 reimportSpillClique = true;
16162 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
16164 // Spill clique has decided this should be "native int", but this block only pushes an "int".
16165 // Insert a sign-extension to "native int" so we match the clique.
16166 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
16169 // Consider the case where one branch left a 'byref' on the stack and the other leaves
16170 // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
16171 // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
16172 // behavior instead of asserting and then generating bad code (where we save/restore the
16173 // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
16174 // imported already, we need to change the type of the local and reimport the spill clique.
16175 // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
16176 // the 'byref' size.
16177 if (!tiVerificationNeeded)
16179 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
16181 // Some other block in the spill clique set this to "int", but now we have "byref".
16182 // Change the type and go back to re-import any blocks that used the wrong type.
16183 lvaTable[tempNum].lvType = TYP_BYREF;
16184 reimportSpillClique = true;
16186 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
16188 // Spill clique has decided this should be "byref", but this block only pushes an "int".
16189 // Insert a sign-extension to "native int" so we match the clique size.
16190 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
16193 #endif // _TARGET_64BIT_
16195 #if FEATURE_X87_DOUBLES
16196 // X87 stack doesn't differentiate between float/double
16197 // so promoting is no big deal.
16198 // For everybody else keep it as float until we have a collision and then promote
16199 // Just like for x64's TYP_INT<->TYP_I_IMPL
16201 if (multRef > 1 && tree->gtType == TYP_FLOAT)
16203 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
16206 #else // !FEATURE_X87_DOUBLES
16208 if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
16210 // Some other block in the spill clique set this to "float", but now we have "double".
16211 // Change the type and go back to re-import any blocks that used the wrong type.
16212 lvaTable[tempNum].lvType = TYP_DOUBLE;
16213 reimportSpillClique = true;
16215 else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
16217 // Spill clique has decided this should be "double", but this block only pushes a "float".
16218 // Insert a cast to "double" so we match the clique.
16219 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
16222 #endif // FEATURE_X87_DOUBLES
16224 /* If addStmt has a reference to tempNum (can only happen if we
16225 are spilling to the temps already used by a previous block),
16226 we need to spill addStmt */
16228 if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
16230 GenTreePtr addTree = addStmt->gtStmt.gtStmtExpr;
16232 if (addTree->gtOper == GT_JTRUE)
16234 GenTreePtr relOp = addTree->gtOp.gtOp1;
16235 assert(relOp->OperIsCompare());
16237 var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
16239 if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
16241 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
16242 impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
16243 type = genActualType(lvaTable[temp].TypeGet());
16244 relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
16247 if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
16249 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
16250 impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
16251 type = genActualType(lvaTable[temp].TypeGet());
16252 relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
16257 assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->gtOp.gtOp1->TypeGet()));
16259 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
16260 impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
16261 addTree->gtOp.gtOp1 = gtNewLclvNode(temp, genActualType(addTree->gtOp.gtOp1->TypeGet()));
16265 /* Spill the stack entry, and replace with the temp */
16267 if (!impSpillStackEntry(level, tempNum
16270 true, "Spill Stack Entry"
16276 BADCODE("bad stack state");
16279 // Oops. Something went wrong when spilling. Bad code.
16280 verHandleVerificationFailure(block DEBUGARG(true));
16286 /* Put back the 'jtrue'/'switch' if we removed it earlier */
16290 impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
16294 // Some of the append/spill logic works on compCurBB
16296 assert(compCurBB == block);
16298 /* Save the tree list in the block */
16299 impEndTreeList(block);
16301 // impEndTreeList sets BBF_IMPORTED on the block
16302 // We do *NOT* want to set it later than this because
16303 // impReimportSpillClique might clear it if this block is both a
16304 // predecessor and successor in the current spill clique
16305 assert(block->bbFlags & BBF_IMPORTED);
16307 // If we had a int/native int, or float/double collision, we need to re-import
16308 if (reimportSpillClique)
16310 // This will re-import all the successors of block (as well as each of their predecessors)
16311 impReimportSpillClique(block);
16313 // For blocks that haven't been imported yet, we still need to mark them as pending import.
16314 const unsigned numSuccs = block->NumSucc();
16315 for (unsigned i = 0; i < numSuccs; i++)
16317 BasicBlock* succ = block->GetSucc(i);
16318 if ((succ->bbFlags & BBF_IMPORTED) == 0)
16320 impImportBlockPending(succ);
16324 else // the normal case
16326 // otherwise just import the successors of block
16328 /* Does this block jump to any other blocks? */
16329 const unsigned numSuccs = block->NumSucc();
16330 for (unsigned i = 0; i < numSuccs; i++)
16332 impImportBlockPending(block->GetSucc(i));
16337 #pragma warning(pop)
16340 /*****************************************************************************/
16342 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16343 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16344 // impPendingBlockMembers). Merges the current verification state into the verification state of "block"
16345 // (its "pre-state").
16347 void Compiler::impImportBlockPending(BasicBlock* block)
16352 printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
16356 // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
16357 // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
16358 // (When we're doing verification, we always attempt the merge to detect verification errors.)
16360 // If the block has not been imported, add to pending set.
16361 bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
16363 // Initialize bbEntryState just the first time we try to add this block to the pending list
16364 // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
16365 // We use NULL to indicate the 'common' state to avoid memory allocation
16366 if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
16367 (impGetPendingBlockMember(block) == 0))
16369 verInitBBEntryState(block, &verCurrentState);
16370 assert(block->bbStkDepth == 0);
16371 block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
16372 assert(addToPending);
16373 assert(impGetPendingBlockMember(block) == 0);
16377 // The stack should have the same height on entry to the block from all its predecessors.
16378 if (block->bbStkDepth != verCurrentState.esStackDepth)
16382 sprintf_s(buffer, sizeof(buffer),
16383 "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
16384 "Previous depth was %d, current depth is %d",
16385 block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
16386 verCurrentState.esStackDepth);
16387 buffer[400 - 1] = 0;
16390 NO_WAY("Block entered with different stack depths");
16394 // Additionally, if we need to verify, merge the verification state.
16395 if (tiVerificationNeeded)
16397 // Merge the current state into the entry state of block; if this does not change the entry state
16398 // by merging, do not add the block to the pending-list.
16399 bool changed = false;
16400 if (!verMergeEntryStates(block, &changed))
16402 block->bbFlags |= BBF_FAILED_VERIFICATION;
16403 addToPending = true; // We will pop it off, and check the flag set above.
16407 addToPending = true;
16409 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
16418 if (block->bbStkDepth > 0)
16420 // We need to fix the types of any spill temps that might have changed:
16421 // int->native int, float->double, int->byref, etc.
16422 impRetypeEntryStateTemps(block);
16425 // OK, we must add to the pending list, if it's not already in it.
16426 if (impGetPendingBlockMember(block) != 0)
16432 // Get an entry to add to the pending list
16436 if (impPendingFree)
16438 // We can reuse one of the freed up dscs.
16439 dsc = impPendingFree;
16440 impPendingFree = dsc->pdNext;
16444 // We have to create a new dsc
16445 dsc = new (this, CMK_Unknown) PendingDsc;
16449 dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16450 dsc->pdThisPtrInit = verCurrentState.thisInitialized;
16452 // Save the stack trees for later
16454 if (verCurrentState.esStackDepth)
16456 impSaveStackState(&dsc->pdSavedStack, false);
16459 // Add the entry to the pending list
16461 dsc->pdNext = impPendingList;
16462 impPendingList = dsc;
16463 impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16465 // Various assertions require us to now to consider the block as not imported (at least for
16466 // the final time...)
16467 block->bbFlags &= ~BBF_IMPORTED;
16472 printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16477 /*****************************************************************************/
16479 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16480 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16481 // impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block.
16483 void Compiler::impReimportBlockPending(BasicBlock* block)
16485 JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16487 assert(block->bbFlags & BBF_IMPORTED);
16489 // OK, we must add to the pending list, if it's not already in it.
16490 if (impGetPendingBlockMember(block) != 0)
16495 // Get an entry to add to the pending list
16499 if (impPendingFree)
16501 // We can reuse one of the freed up dscs.
16502 dsc = impPendingFree;
16503 impPendingFree = dsc->pdNext;
16507 // We have to create a new dsc
16508 dsc = new (this, CMK_ImpStack) PendingDsc;
16513 if (block->bbEntryState)
16515 dsc->pdThisPtrInit = block->bbEntryState->thisInitialized;
16516 dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
16517 dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
16521 dsc->pdThisPtrInit = TIS_Bottom;
16522 dsc->pdSavedStack.ssDepth = 0;
16523 dsc->pdSavedStack.ssTrees = nullptr;
16526 // Add the entry to the pending list
16528 dsc->pdNext = impPendingList;
16529 impPendingList = dsc;
16530 impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16532 // Various assertions require us to now to consider the block as not imported (at least for
16533 // the final time...)
16534 block->bbFlags &= ~BBF_IMPORTED;
16539 printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16544 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
16546 if (comp->impBlockListNodeFreeList == nullptr)
16548 return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
16552 BlockListNode* res = comp->impBlockListNodeFreeList;
16553 comp->impBlockListNodeFreeList = res->m_next;
16558 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
16560 node->m_next = impBlockListNodeFreeList;
16561 impBlockListNodeFreeList = node;
16564 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
16568 noway_assert(!fgComputePredsDone);
16569 if (!fgCheapPredsValid)
16571 fgComputeCheapPreds();
16574 BlockListNode* succCliqueToDo = nullptr;
16575 BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
16579 // Look at the successors of every member of the predecessor to-do list.
16580 while (predCliqueToDo != nullptr)
16582 BlockListNode* node = predCliqueToDo;
16583 predCliqueToDo = node->m_next;
16584 BasicBlock* blk = node->m_blk;
16585 FreeBlockListNode(node);
16587 const unsigned numSuccs = blk->NumSucc();
16588 for (unsigned succNum = 0; succNum < numSuccs; succNum++)
16590 BasicBlock* succ = blk->GetSucc(succNum);
16591 // If it's not already in the clique, add it, and also add it
16592 // as a member of the successor "toDo" set.
16593 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
16595 callback->Visit(SpillCliqueSucc, succ);
16596 impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
16597 succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
16602 // Look at the predecessors of every member of the successor to-do list.
16603 while (succCliqueToDo != nullptr)
16605 BlockListNode* node = succCliqueToDo;
16606 succCliqueToDo = node->m_next;
16607 BasicBlock* blk = node->m_blk;
16608 FreeBlockListNode(node);
16610 for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
16612 BasicBlock* predBlock = pred->block;
16613 // If it's not already in the clique, add it, and also add it
16614 // as a member of the predecessor "toDo" set.
16615 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
16617 callback->Visit(SpillCliquePred, predBlock);
16618 impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
16619 predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
16626 // If this fails, it means we didn't walk the spill clique properly and somehow managed
16627 // miss walking back to include the predecessor we started from.
16628 // This most likely cause: missing or out of date bbPreds
16629 assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
16632 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16634 if (predOrSucc == SpillCliqueSucc)
16636 assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
16637 blk->bbStkTempsIn = m_baseTmp;
16641 assert(predOrSucc == SpillCliquePred);
16642 assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
16643 blk->bbStkTempsOut = m_baseTmp;
16647 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16649 // For Preds we could be a little smarter and just find the existing store
16650 // and re-type it/add a cast, but that is complicated and hopefully very rare, so
16651 // just re-import the whole block (just like we do for successors)
16653 if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
16655 // If we haven't imported this block and we're not going to (because it isn't on
16656 // the pending list) then just ignore it for now.
16658 // This block has either never been imported (EntryState == NULL) or it failed
16659 // verification. Neither state requires us to force it to be imported now.
16660 assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
16664 // For successors we have a valid verCurrentState, so just mark them for reimport
16665 // the 'normal' way
16666 // Unlike predecessors, we *DO* need to reimport the current block because the
16667 // initial import had the wrong entry state types.
16668 // Similarly, blocks that are currently on the pending list, still need to call
16669 // impImportBlockPending to fixup their entry state.
16670 if (predOrSucc == SpillCliqueSucc)
16672 m_pComp->impReimportMarkBlock(blk);
16674 // Set the current stack state to that of the blk->bbEntryState
16675 m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
16676 assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
16678 m_pComp->impImportBlockPending(blk);
16680 else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
16682 // As described above, we are only visiting predecessors so they can
16683 // add the appropriate casts, since we have already done that for the current
16684 // block, it does not need to be reimported.
16685 // Nor do we need to reimport blocks that are still pending, but not yet
16688 // For predecessors, we have no state to seed the EntryState, so we just have
16689 // to assume the existing one is correct.
16690 // If the block is also a successor, it will get the EntryState properly
16691 // updated when it is visited as a successor in the above "if" block.
16692 assert(predOrSucc == SpillCliquePred);
16693 m_pComp->impReimportBlockPending(blk);
16697 // Re-type the incoming lclVar nodes to match the varDsc.
16698 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
16700 if (blk->bbEntryState != nullptr)
16702 EntryState* es = blk->bbEntryState;
16703 for (unsigned level = 0; level < es->esStackDepth; level++)
16705 GenTreePtr tree = es->esStack[level].val;
16706 if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
16708 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
16709 noway_assert(lclNum < lvaCount);
16710 LclVarDsc* varDsc = lvaTable + lclNum;
16711 es->esStack[level].val->gtType = varDsc->TypeGet();
16717 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
16719 if (block->bbStkTempsOut != NO_BASE_TMP)
16721 return block->bbStkTempsOut;
16727 printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
16731 // Otherwise, choose one, and propagate to all members of the spill clique.
16732 // Grab enough temps for the whole stack.
16733 unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
16734 SetSpillTempsBase callback(baseTmp);
16736 // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
16737 // to one spill clique, and similarly can only be the sucessor to one spill clique
16738 impWalkSpillCliqueFromPred(block, &callback);
16743 void Compiler::impReimportSpillClique(BasicBlock* block)
16748 printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
16752 // If we get here, it is because this block is already part of a spill clique
16753 // and one predecessor had an outgoing live stack slot of type int, and this
16754 // block has an outgoing live stack slot of type native int.
16755 // We need to reset these before traversal because they have already been set
16756 // by the previous walk to determine all the members of the spill clique.
16757 impInlineRoot()->impSpillCliquePredMembers.Reset();
16758 impInlineRoot()->impSpillCliqueSuccMembers.Reset();
16760 ReimportSpillClique callback(this);
16762 impWalkSpillCliqueFromPred(block, &callback);
16765 // Set the pre-state of "block" (which should not have a pre-state allocated) to
16766 // a copy of "srcState", cloning tree pointers as required.
16767 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
16769 if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
16771 block->bbEntryState = nullptr;
16775 block->bbEntryState = (EntryState*)compGetMemA(sizeof(EntryState));
16777 // block->bbEntryState.esRefcount = 1;
16779 block->bbEntryState->esStackDepth = srcState->esStackDepth;
16780 block->bbEntryState->thisInitialized = TIS_Bottom;
16782 if (srcState->esStackDepth > 0)
16784 block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
16785 unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
16787 memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
16788 for (unsigned level = 0; level < srcState->esStackDepth; level++)
16790 GenTreePtr tree = srcState->esStack[level].val;
16791 block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
16795 if (verTrackObjCtorInitState)
16797 verSetThisInit(block, srcState->thisInitialized);
16803 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
16805 assert(tis != TIS_Bottom); // Precondition.
16806 if (block->bbEntryState == nullptr)
16808 block->bbEntryState = new (this, CMK_Unknown) EntryState();
16811 block->bbEntryState->thisInitialized = tis;
16815 * Resets the current state to the state at the start of the basic block
16817 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
16820 if (block->bbEntryState == nullptr)
16822 destState->esStackDepth = 0;
16823 destState->thisInitialized = TIS_Bottom;
16827 destState->esStackDepth = block->bbEntryState->esStackDepth;
16829 if (destState->esStackDepth > 0)
16831 unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
16833 memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
16836 destState->thisInitialized = block->bbThisOnEntry();
16841 ThisInitState BasicBlock::bbThisOnEntry()
16843 return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
16846 unsigned BasicBlock::bbStackDepthOnEntry()
16848 return (bbEntryState ? bbEntryState->esStackDepth : 0);
16851 void BasicBlock::bbSetStack(void* stackBuffer)
16853 assert(bbEntryState);
16854 assert(stackBuffer);
16855 bbEntryState->esStack = (StackEntry*)stackBuffer;
16858 StackEntry* BasicBlock::bbStackOnEntry()
16860 assert(bbEntryState);
16861 return bbEntryState->esStack;
16864 void Compiler::verInitCurrentState()
16866 verTrackObjCtorInitState = FALSE;
16867 verCurrentState.thisInitialized = TIS_Bottom;
16869 if (tiVerificationNeeded)
16871 // Track this ptr initialization
16872 if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
16874 verTrackObjCtorInitState = TRUE;
16875 verCurrentState.thisInitialized = TIS_Uninit;
16879 // initialize stack info
16881 verCurrentState.esStackDepth = 0;
16882 assert(verCurrentState.esStack != nullptr);
16884 // copy current state to entry state of first BB
16885 verInitBBEntryState(fgFirstBB, &verCurrentState);
16888 Compiler* Compiler::impInlineRoot()
16890 if (impInlineInfo == nullptr)
16896 return impInlineInfo->InlineRoot;
16900 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
16902 if (predOrSucc == SpillCliquePred)
16904 return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
16908 assert(predOrSucc == SpillCliqueSucc);
16909 return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
16913 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
16915 if (predOrSucc == SpillCliquePred)
16917 impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
16921 assert(predOrSucc == SpillCliqueSucc);
16922 impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
16926 /*****************************************************************************
16928 * Convert the instrs ("import") into our internal format (trees). The
16929 * basic flowgraph has already been constructed and is passed in.
16932 void Compiler::impImport(BasicBlock* method)
16937 printf("*************** In impImport() for %s\n", info.compFullName);
16941 /* Allocate the stack contents */
16943 if (info.compMaxStack <= sizeof(impSmallStack) / sizeof(impSmallStack[0]))
16945 /* Use local variable, don't waste time allocating on the heap */
16947 impStkSize = sizeof(impSmallStack) / sizeof(impSmallStack[0]);
16948 verCurrentState.esStack = impSmallStack;
16952 impStkSize = info.compMaxStack;
16953 verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
16956 // initialize the entry state at start of method
16957 verInitCurrentState();
16959 // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
16960 Compiler* inlineRoot = impInlineRoot();
16961 if (this == inlineRoot) // These are only used on the root of the inlining tree.
16963 // We have initialized these previously, but to size 0. Make them larger.
16964 impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
16965 impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
16966 impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
16968 inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
16969 inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
16970 inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
16971 impBlockListNodeFreeList = nullptr;
16974 impLastILoffsStmt = nullptr;
16975 impNestedStackSpill = false;
16977 impBoxTemp = BAD_VAR_NUM;
16979 impPendingList = impPendingFree = nullptr;
16981 /* Add the entry-point to the worker-list */
16983 // Skip leading internal blocks. There can be one as a leading scratch BB, and more
16984 // from EH normalization.
16985 // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
16987 for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
16989 // Treat these as imported.
16990 assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
16991 JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
16992 method->bbFlags |= BBF_IMPORTED;
16995 impImportBlockPending(method);
16997 /* Import blocks in the worker-list until there are no more */
16999 while (impPendingList)
17001 /* Remove the entry at the front of the list */
17003 PendingDsc* dsc = impPendingList;
17004 impPendingList = impPendingList->pdNext;
17005 impSetPendingBlockMember(dsc->pdBB, 0);
17007 /* Restore the stack state */
17009 verCurrentState.thisInitialized = dsc->pdThisPtrInit;
17010 verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth;
17011 if (verCurrentState.esStackDepth)
17013 impRestoreStackState(&dsc->pdSavedStack);
17016 /* Add the entry to the free list for reuse */
17018 dsc->pdNext = impPendingFree;
17019 impPendingFree = dsc;
17021 /* Now import the block */
17023 if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
17026 #ifdef _TARGET_64BIT_
17027 // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
17028 // coupled with the JIT64 IL Verification logic. Look inside verHandleVerificationFailure
17029 // method for further explanation on why we raise this exception instead of making the jitted
17030 // code throw the verification exception during execution.
17031 if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
17033 BADCODE("Basic block marked as not verifiable");
17036 #endif // _TARGET_64BIT_
17038 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
17039 impEndTreeList(dsc->pdBB);
17044 impImportBlock(dsc->pdBB);
17046 if (compDonotInline())
17050 if (compIsForImportOnly() && !tiVerificationNeeded)
17058 if (verbose && info.compXcptnsCount)
17060 printf("\nAfter impImport() added block for try,catch,finally");
17061 fgDispBasicBlocks();
17065 // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
17066 for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
17068 block->bbFlags &= ~BBF_VISITED;
17072 assert(!compIsForInlining() || !tiVerificationNeeded);
17075 // Checks if a typeinfo (usually stored in the type stack) is a struct.
17076 // The invariant here is that if it's not a ref or a method and has a class handle
17077 // it's a valuetype
17078 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
17080 if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
17090 /*****************************************************************************
17091 * Check to see if the tree is the address of a local or
17092 the address of a field in a local.
17094 *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
17098 BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr* lclVarTreeOut)
17100 if (tree->gtOper != GT_ADDR)
17105 GenTreePtr op = tree->gtOp.gtOp1;
17106 while (op->gtOper == GT_FIELD)
17108 op = op->gtField.gtFldObj;
17109 if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
17111 op = op->gtOp.gtOp1;
17119 if (op->gtOper == GT_LCL_VAR)
17121 *lclVarTreeOut = op;
17130 //------------------------------------------------------------------------
17131 // impMakeDiscretionaryInlineObservations: make observations that help
17132 // determine the profitability of a discretionary inline
17135 // pInlineInfo -- InlineInfo for the inline, or null for the prejit root
17136 // inlineResult -- InlineResult accumulating information about this inline
17139 // If inlining or prejitting the root, this method also makes
17140 // various observations about the method that factor into inline
17141 // decisions. It sets `compNativeSizeEstimate` as a side effect.
17143 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
17145 assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
17146 pInlineInfo == nullptr && !compIsForInlining() // Calculate the static inlining hint for ngen.
17149 // If we're really inlining, we should just have one result in play.
17150 assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
17152 // If this is a "forceinline" method, the JIT probably shouldn't have gone
17153 // to the trouble of estimating the native code size. Even if it did, it
17154 // shouldn't be relying on the result of this method.
17155 assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
17157 // Note if the caller contains NEWOBJ or NEWARR.
17158 Compiler* rootCompiler = impInlineRoot();
17160 if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
17162 inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
17165 if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
17167 inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
17170 bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0;
17171 bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
17173 if (isSpecialMethod)
17175 if (calleeIsStatic)
17177 inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
17181 inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
17184 else if (!calleeIsStatic)
17186 // Callee is an instance method.
17188 // Check if the callee has the same 'this' as the root.
17189 if (pInlineInfo != nullptr)
17191 GenTreePtr thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
17193 bool isSameThis = impIsThis(thisArg);
17194 inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
17198 // Note if the callee's class is a promotable struct
17199 if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
17201 lvaStructPromotionInfo structPromotionInfo;
17202 lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
17203 if (structPromotionInfo.canPromote)
17205 inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
17209 #ifdef FEATURE_SIMD
17211 // Note if this method is has SIMD args or return value
17212 if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
17214 inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
17217 #endif // FEATURE_SIMD
17219 // Roughly classify callsite frequency.
17220 InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
17222 // If this is a prejit root, or a maximally hot block...
17223 if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
17225 frequency = InlineCallsiteFrequency::HOT;
17227 // No training data. Look for loop-like things.
17228 // We consider a recursive call loop-like. Do not give the inlining boost to the method itself.
17229 // However, give it to things nearby.
17230 else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
17231 (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
17233 frequency = InlineCallsiteFrequency::LOOP;
17235 else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
17237 frequency = InlineCallsiteFrequency::WARM;
17239 // Now modify the multiplier based on where we're called from.
17240 else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
17242 frequency = InlineCallsiteFrequency::RARE;
17246 frequency = InlineCallsiteFrequency::BORING;
17249 // Also capture the block weight of the call site. In the prejit
17250 // root case, assume there's some hot call site for this method.
17251 unsigned weight = 0;
17253 if (pInlineInfo != nullptr)
17255 weight = pInlineInfo->iciBlock->bbWeight;
17259 weight = BB_MAX_WEIGHT;
17262 inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
17263 inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
17266 /*****************************************************************************
17267 This method makes STATIC inlining decision based on the IL code.
17268 It should not make any inlining decision based on the context.
17269 If forceInline is true, then the inlining decision should not depend on
17270 performance heuristics (code size, etc.).
17273 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
17274 CORINFO_METHOD_INFO* methInfo,
17276 InlineResult* inlineResult)
17278 unsigned codeSize = methInfo->ILCodeSize;
17280 // We shouldn't have made up our minds yet...
17281 assert(!inlineResult->IsDecided());
17283 if (methInfo->EHcount)
17285 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
17289 if ((methInfo->ILCode == nullptr) || (codeSize == 0))
17291 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
17295 // For now we don't inline varargs (import code can't handle it)
17297 if (methInfo->args.isVarArg())
17299 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
17303 // Reject if it has too many locals.
17304 // This is currently an implementation limit due to fixed-size arrays in the
17305 // inline info, rather than a performance heuristic.
17307 inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
17309 if (methInfo->locals.numArgs > MAX_INL_LCLS)
17311 inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
17315 // Make sure there aren't too many arguments.
17316 // This is currently an implementation limit due to fixed-size arrays in the
17317 // inline info, rather than a performance heuristic.
17319 inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
17321 if (methInfo->args.numArgs > MAX_INL_ARGS)
17323 inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
17327 // Note force inline state
17329 inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
17331 // Note IL code size
17333 inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
17335 if (inlineResult->IsFailure())
17340 // Make sure maxstack is not too big
17342 inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
17344 if (inlineResult->IsFailure())
17350 /*****************************************************************************
17353 void Compiler::impCheckCanInline(GenTreePtr call,
17354 CORINFO_METHOD_HANDLE fncHandle,
17356 CORINFO_CONTEXT_HANDLE exactContextHnd,
17357 InlineCandidateInfo** ppInlineCandidateInfo,
17358 InlineResult* inlineResult)
17360 // Either EE or JIT might throw exceptions below.
17361 // If that happens, just don't inline the method.
17367 CORINFO_METHOD_HANDLE fncHandle;
17369 CORINFO_CONTEXT_HANDLE exactContextHnd;
17370 InlineResult* result;
17371 InlineCandidateInfo** ppInlineCandidateInfo;
17373 memset(¶m, 0, sizeof(param));
17375 param.pThis = this;
17377 param.fncHandle = fncHandle;
17378 param.methAttr = methAttr;
17379 param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
17380 param.result = inlineResult;
17381 param.ppInlineCandidateInfo = ppInlineCandidateInfo;
17383 bool success = eeRunWithErrorTrap<Param>(
17384 [](Param* pParam) {
17385 DWORD dwRestrictions = 0;
17386 CorInfoInitClassResult initClassResult;
17389 const char* methodName;
17390 const char* className;
17391 methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
17393 if (JitConfig.JitNoInline())
17395 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
17400 /* Try to get the code address/size for the method */
17402 CORINFO_METHOD_INFO methInfo;
17403 if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
17405 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
17410 forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
17412 pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
17414 if (pParam->result->IsFailure())
17416 assert(pParam->result->IsNever());
17420 // Speculatively check if initClass() can be done.
17421 // If it can be done, we will try to inline the method. If inlining
17422 // succeeds, then we will do the non-speculative initClass() and commit it.
17423 // If this speculative call to initClass() fails, there is no point
17424 // trying to inline this method.
17426 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17427 pParam->exactContextHnd /* context */,
17428 TRUE /* speculative */);
17430 if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17432 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17436 // Given the EE the final say in whether to inline or not.
17437 // This should be last since for verifiable code, this can be expensive
17439 /* VM Inline check also ensures that the method is verifiable if needed */
17440 CorInfoInline vmResult;
17441 vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17444 if (vmResult == INLINE_FAIL)
17446 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17448 else if (vmResult == INLINE_NEVER)
17450 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17453 if (pParam->result->IsFailure())
17455 // Make sure not to report this one. It was already reported by the VM.
17456 pParam->result->SetReported();
17460 // check for unsupported inlining restrictions
17461 assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17463 if (dwRestrictions & INLINE_SAME_THIS)
17465 GenTreePtr thisArg = pParam->call->gtCall.gtCallObjp;
17468 if (!pParam->pThis->impIsThis(thisArg))
17470 pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17475 /* Get the method properties */
17477 CORINFO_CLASS_HANDLE clsHandle;
17478 clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17480 clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17482 /* Get the return type */
17484 var_types fncRetType;
17485 fncRetType = pParam->call->TypeGet();
17488 var_types fncRealRetType;
17489 fncRealRetType = JITtype2varType(methInfo.args.retType);
17491 assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17492 // <BUGNUM> VSW 288602 </BUGNUM>
17493 // In case of IJW, we allow to assign a native pointer to a BYREF.
17494 (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17495 (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17499 // Allocate an InlineCandidateInfo structure
17501 InlineCandidateInfo* pInfo;
17502 pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17504 pInfo->dwRestrictions = dwRestrictions;
17505 pInfo->methInfo = methInfo;
17506 pInfo->methAttr = pParam->methAttr;
17507 pInfo->clsHandle = clsHandle;
17508 pInfo->clsAttr = clsAttr;
17509 pInfo->fncRetType = fncRetType;
17510 pInfo->exactContextHnd = pParam->exactContextHnd;
17511 pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd;
17512 pInfo->initClassResult = initClassResult;
17514 *(pParam->ppInlineCandidateInfo) = pInfo;
17521 param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
17525 void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo,
17526 GenTreePtr curArgVal,
17528 InlineResult* inlineResult)
17530 InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
17532 if (curArgVal->gtOper == GT_MKREFANY)
17534 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
17538 inlCurArgInfo->argNode = curArgVal;
17540 GenTreePtr lclVarTree;
17541 if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
17543 inlCurArgInfo->argIsByRefToStructLocal = true;
17544 #ifdef FEATURE_SIMD
17545 if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
17547 pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
17549 #endif // FEATURE_SIMD
17552 if (curArgVal->gtFlags & GTF_ALL_EFFECT)
17554 inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
17555 inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
17558 if (curArgVal->gtOper == GT_LCL_VAR)
17560 inlCurArgInfo->argIsLclVar = true;
17562 /* Remember the "original" argument number */
17563 curArgVal->gtLclVar.gtLclILoffs = argNum;
17566 if ((curArgVal->OperKind() & GTK_CONST) ||
17567 ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
17569 inlCurArgInfo->argIsInvariant = true;
17570 if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
17572 /* Abort, but do not mark as not inlinable */
17573 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
17578 if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
17580 inlCurArgInfo->argHasLdargaOp = true;
17586 if (inlCurArgInfo->argIsThis)
17588 printf("thisArg:");
17592 printf("\nArgument #%u:", argNum);
17594 if (inlCurArgInfo->argIsLclVar)
17596 printf(" is a local var");
17598 if (inlCurArgInfo->argIsInvariant)
17600 printf(" is a constant");
17602 if (inlCurArgInfo->argHasGlobRef)
17604 printf(" has global refs");
17606 if (inlCurArgInfo->argHasSideEff)
17608 printf(" has side effects");
17610 if (inlCurArgInfo->argHasLdargaOp)
17612 printf(" has ldarga effect");
17614 if (inlCurArgInfo->argHasStargOp)
17616 printf(" has starg effect");
17618 if (inlCurArgInfo->argIsByRefToStructLocal)
17620 printf(" is byref to a struct local");
17624 gtDispTree(curArgVal);
17630 /*****************************************************************************
17634 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
17636 assert(!compIsForInlining());
17638 GenTreePtr call = pInlineInfo->iciCall;
17639 CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo;
17640 unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr;
17641 InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo;
17642 InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo;
17643 InlineResult* inlineResult = pInlineInfo->inlineResult;
17645 const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
17647 /* init the argument stuct */
17649 memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
17651 /* Get hold of the 'this' pointer and the argument list proper */
17653 GenTreePtr thisArg = call->gtCall.gtCallObjp;
17654 GenTreePtr argList = call->gtCall.gtCallArgs;
17655 unsigned argCnt = 0; // Count of the arguments
17657 assert((methInfo->args.hasThis()) == (thisArg != nullptr));
17661 inlArgInfo[0].argIsThis = true;
17663 impInlineRecordArgInfo(pInlineInfo, thisArg, argCnt, inlineResult);
17665 if (inlineResult->IsFailure())
17670 /* Increment the argument count */
17674 /* Record some information about each of the arguments */
17675 bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
17677 #if USER_ARGS_COME_LAST
17678 unsigned typeCtxtArg = thisArg ? 1 : 0;
17679 #else // USER_ARGS_COME_LAST
17680 unsigned typeCtxtArg = methInfo->args.totalILArgs();
17681 #endif // USER_ARGS_COME_LAST
17683 for (GenTreePtr argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
17685 if (argTmp == argList && hasRetBuffArg)
17690 // Ignore the type context argument
17691 if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
17693 pInlineInfo->typeContextArg = typeCtxtArg;
17694 typeCtxtArg = 0xFFFFFFFF;
17698 assert(argTmp->gtOper == GT_LIST);
17699 GenTreePtr argVal = argTmp->gtOp.gtOp1;
17701 impInlineRecordArgInfo(pInlineInfo, argVal, argCnt, inlineResult);
17703 if (inlineResult->IsFailure())
17708 /* Increment the argument count */
17712 /* Make sure we got the arg number right */
17713 assert(argCnt == methInfo->args.totalILArgs());
17715 #ifdef FEATURE_SIMD
17716 bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
17717 #endif // FEATURE_SIMD
17719 /* We have typeless opcodes, get type information from the signature */
17725 if (clsAttr & CORINFO_FLG_VALUECLASS)
17727 sigType = TYP_BYREF;
17734 lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
17735 lclVarInfo[0].lclHasLdlocaOp = false;
17737 #ifdef FEATURE_SIMD
17738 // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
17739 // the inlining multiplier) for anything in that assembly.
17740 // But we only need to normalize it if it is a TYP_STRUCT
17741 // (which we need to do even if we have already set foundSIMDType).
17742 if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
17744 if (sigType == TYP_STRUCT)
17746 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
17748 foundSIMDType = true;
17750 #endif // FEATURE_SIMD
17751 lclVarInfo[0].lclTypeInfo = sigType;
17753 assert(varTypeIsGC(thisArg->gtType) || // "this" is managed
17754 (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
17755 (clsAttr & CORINFO_FLG_VALUECLASS)));
17757 if (genActualType(thisArg->gtType) != genActualType(sigType))
17759 if (sigType == TYP_REF)
17761 /* The argument cannot be bashed into a ref (see bug 750871) */
17762 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
17766 /* This can only happen with byrefs <-> ints/shorts */
17768 assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
17769 assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
17771 if (sigType == TYP_BYREF)
17773 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17775 else if (thisArg->gtType == TYP_BYREF)
17777 assert(sigType == TYP_I_IMPL);
17779 /* If possible change the BYREF to an int */
17780 if (thisArg->IsVarAddr())
17782 thisArg->gtType = TYP_I_IMPL;
17783 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17787 /* Arguments 'int <- byref' cannot be bashed */
17788 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17795 /* Init the types of the arguments and make sure the types
17796 * from the trees match the types in the signature */
17798 CORINFO_ARG_LIST_HANDLE argLst;
17799 argLst = methInfo->args.args;
17802 for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
17804 var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
17806 lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
17808 #ifdef FEATURE_SIMD
17809 if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
17811 // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
17812 // found a SIMD type, even if this may not be a type we recognize (the assumption is that
17813 // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
17814 foundSIMDType = true;
17815 if (sigType == TYP_STRUCT)
17817 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
17818 sigType = structType;
17821 #endif // FEATURE_SIMD
17823 lclVarInfo[i].lclTypeInfo = sigType;
17824 lclVarInfo[i].lclHasLdlocaOp = false;
17826 /* Does the tree type match the signature type? */
17828 GenTreePtr inlArgNode = inlArgInfo[i].argNode;
17830 if (sigType != inlArgNode->gtType)
17832 /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
17833 but in bad IL cases with caller-callee signature mismatches we can see other types.
17834 Intentionally reject cases with mismatches so the jit is more flexible when
17835 encountering bad IL. */
17837 bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
17838 (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
17839 (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
17841 if (!isPlausibleTypeMatch)
17843 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
17847 /* Is it a narrowing or widening cast?
17848 * Widening casts are ok since the value computed is already
17849 * normalized to an int (on the IL stack) */
17851 if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
17853 if (sigType == TYP_BYREF)
17855 lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17857 else if (inlArgNode->gtType == TYP_BYREF)
17859 assert(varTypeIsIntOrI(sigType));
17861 /* If possible bash the BYREF to an int */
17862 if (inlArgNode->IsVarAddr())
17864 inlArgNode->gtType = TYP_I_IMPL;
17865 lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17869 /* Arguments 'int <- byref' cannot be changed */
17870 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17874 else if (genTypeSize(sigType) < EA_PTRSIZE)
17876 /* Narrowing cast */
17878 if (inlArgNode->gtOper == GT_LCL_VAR &&
17879 !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
17880 sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
17882 /* We don't need to insert a cast here as the variable
17883 was assigned a normalized value of the right type */
17888 inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
17890 inlArgInfo[i].argIsLclVar = false;
17892 /* Try to fold the node in case we have constant arguments */
17894 if (inlArgInfo[i].argIsInvariant)
17896 inlArgNode = gtFoldExprConst(inlArgNode);
17897 inlArgInfo[i].argNode = inlArgNode;
17898 assert(inlArgNode->OperIsConst());
17901 #ifdef _TARGET_64BIT_
17902 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
17904 // This should only happen for int -> native int widening
17905 inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
17907 inlArgInfo[i].argIsLclVar = false;
17909 /* Try to fold the node in case we have constant arguments */
17911 if (inlArgInfo[i].argIsInvariant)
17913 inlArgNode = gtFoldExprConst(inlArgNode);
17914 inlArgInfo[i].argNode = inlArgNode;
17915 assert(inlArgNode->OperIsConst());
17918 #endif // _TARGET_64BIT_
17923 /* Init the types of the local variables */
17925 CORINFO_ARG_LIST_HANDLE localsSig;
17926 localsSig = methInfo->locals.args;
17928 for (i = 0; i < methInfo->locals.numArgs; i++)
17931 var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
17933 lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
17934 lclVarInfo[i + argCnt].lclIsPinned = isPinned;
17935 lclVarInfo[i + argCnt].lclTypeInfo = type;
17937 if (varTypeIsGC(type))
17939 pInlineInfo->numberOfGcRefLocals++;
17944 // Pinned locals may cause inlines to fail.
17945 inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
17946 if (inlineResult->IsFailure())
17952 lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
17954 // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
17955 // out on the inline.
17956 if (type == TYP_STRUCT)
17958 CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
17959 DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
17960 if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
17962 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
17963 if (inlineResult->IsFailure())
17968 // Do further notification in the case where the call site is rare; some policies do
17969 // not track the relative hotness of call sites for "always" inline cases.
17970 if (pInlineInfo->iciBlock->isRunRarely())
17972 inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
17973 if (inlineResult->IsFailure())
17982 localsSig = info.compCompHnd->getArgNext(localsSig);
17984 #ifdef FEATURE_SIMD
17985 if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
17987 foundSIMDType = true;
17988 if (featureSIMD && type == TYP_STRUCT)
17990 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
17991 lclVarInfo[i + argCnt].lclTypeInfo = structType;
17994 #endif // FEATURE_SIMD
17997 #ifdef FEATURE_SIMD
17998 if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
18000 foundSIMDType = true;
18002 pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
18003 #endif // FEATURE_SIMD
18006 //------------------------------------------------------------------------
18007 // impInlineFetchLocal: get a local var that represents an inlinee local
18010 // lclNum -- number of the inlinee local
18011 // reason -- debug string describing purpose of the local var
18014 // Number of the local to use
18017 // This method is invoked only for locals actually used in the
18020 // Allocates a new temp if necessary, and copies key properties
18021 // over from the inlinee local var info.
18023 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
18025 assert(compIsForInlining());
18027 unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
18029 if (tmpNum == BAD_VAR_NUM)
18031 const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt];
18032 const var_types lclTyp = inlineeLocal.lclTypeInfo;
18034 // The lifetime of this local might span multiple BBs.
18035 // So it is a long lifetime local.
18036 impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
18038 // Copy over key info
18039 lvaTable[tmpNum].lvType = lclTyp;
18040 lvaTable[tmpNum].lvHasLdAddrOp = inlineeLocal.lclHasLdlocaOp;
18041 lvaTable[tmpNum].lvPinned = inlineeLocal.lclIsPinned;
18042 lvaTable[tmpNum].lvHasILStoreOp = inlineeLocal.lclHasStlocOp;
18043 lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp;
18045 // Copy over class handle for ref types. Note this may be a
18046 // shared type -- someday perhaps we can get the exact
18047 // signature and pass in a more precise type.
18048 if (lclTyp == TYP_REF)
18050 lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef());
18053 if (inlineeLocal.lclVerTypeInfo.IsStruct())
18055 if (varTypeIsStruct(lclTyp))
18057 lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
18061 // This is a wrapped primitive. Make sure the verstate knows that
18062 lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo;
18067 // Sanity check that we're properly prepared for gc ref locals.
18068 if (varTypeIsGC(lclTyp))
18070 // Since there are gc locals we should have seen them earlier
18071 // and if there was a return value, set up the spill temp.
18072 assert(impInlineInfo->HasGcRefLocals());
18073 assert((info.compRetNativeType == TYP_VOID) || (lvaInlineeReturnSpillTemp != BAD_VAR_NUM));
18077 // Make sure all pinned locals count as gc refs.
18078 assert(!inlineeLocal.lclIsPinned);
18086 //------------------------------------------------------------------------
18087 // impInlineFetchArg: return tree node for argument value in an inlinee
18090 // lclNum -- argument number in inlinee IL
18091 // inlArgInfo -- argument info for inlinee
18092 // lclVarInfo -- var info for inlinee
18095 // Tree for the argument's value. Often an inlinee-scoped temp
18096 // GT_LCL_VAR but can be other tree kinds, if the argument
18097 // expression from the caller can be directly substituted into the
18101 // Must be used only for arguments -- use impInlineFetchLocal for
18104 // Direct substitution is performed when the formal argument cannot
18105 // change value in the inlinee body (no starg or ldarga), and the
18106 // actual argument expression's value cannot be changed if it is
18107 // substituted it into the inlinee body.
18109 // Even if an inlinee-scoped temp is returned here, it may later be
18110 // "bashed" to a caller-supplied tree when arguments are actually
18111 // passed (see fgInlinePrependStatements). Bashing can happen if
18112 // the argument ends up being single use and other conditions are
18113 // met. So the contents of the tree returned here may not end up
18114 // being the ones ultimately used for the argument.
18116 // This method will side effect inlArgInfo. It should only be called
18117 // for actual uses of the argument in the inlinee.
18119 GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
18121 // Cache the relevant arg and lcl info for this argument.
18122 // We will modify argInfo but not lclVarInfo.
18123 InlArgInfo& argInfo = inlArgInfo[lclNum];
18124 const InlLclVarInfo& lclInfo = lclVarInfo[lclNum];
18125 const bool argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp;
18126 const var_types lclTyp = lclInfo.lclTypeInfo;
18127 GenTreePtr op1 = nullptr;
18129 if (argInfo.argIsInvariant && !argCanBeModified)
18131 // Directly substitute constants or addresses of locals
18133 // Clone the constant. Note that we cannot directly use
18134 // argNode in the trees even if !argInfo.argIsUsed as this
18135 // would introduce aliasing between inlArgInfo[].argNode and
18136 // impInlineExpr. Then gtFoldExpr() could change it, causing
18137 // further references to the argument working off of the
18139 op1 = gtCloneExpr(argInfo.argNode);
18140 PREFIX_ASSUME(op1 != nullptr);
18141 argInfo.argTmpNum = BAD_VAR_NUM;
18143 else if (argInfo.argIsLclVar && !argCanBeModified)
18145 // Directly substitute caller locals
18147 // Use the caller-supplied node if this is the first use.
18148 op1 = argInfo.argNode;
18149 argInfo.argTmpNum = op1->gtLclVarCommon.gtLclNum;
18151 // Use an equivalent copy if this is the second or subsequent
18152 // use, or if we need to retype.
18154 // Note argument type mismatches that prevent inlining should
18155 // have been caught in impInlineInitVars.
18156 if (argInfo.argIsUsed || (op1->TypeGet() != lclTyp))
18158 assert(op1->gtOper == GT_LCL_VAR);
18159 assert(lclNum == op1->gtLclVar.gtLclILoffs);
18161 var_types newTyp = lclTyp;
18163 if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
18165 newTyp = genActualType(lclTyp);
18168 // Create a new lcl var node - remember the argument lclNum
18169 op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, newTyp, op1->gtLclVar.gtLclILoffs);
18172 else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp)
18174 /* Argument is a by-ref address to a struct, a normed struct, or its field.
18175 In these cases, don't spill the byref to a local, simply clone the tree and use it.
18176 This way we will increase the chance for this byref to be optimized away by
18177 a subsequent "dereference" operation.
18179 From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
18180 (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
18181 For example, if the caller is:
18182 ldloca.s V_1 // V_1 is a local struct
18183 call void Test.ILPart::RunLdargaOnPointerArg(int32*)
18184 and the callee being inlined has:
18185 .method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed
18187 call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
18188 then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
18189 soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
18191 assert(argInfo.argNode->TypeGet() == TYP_BYREF || argInfo.argNode->TypeGet() == TYP_I_IMPL);
18192 op1 = gtCloneExpr(argInfo.argNode);
18196 /* Argument is a complex expression - it must be evaluated into a temp */
18198 if (argInfo.argHasTmp)
18200 assert(argInfo.argIsUsed);
18201 assert(argInfo.argTmpNum < lvaCount);
18203 /* Create a new lcl var node - remember the argument lclNum */
18204 op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp));
18206 /* This is the second or later use of the this argument,
18207 so we have to use the temp (instead of the actual arg) */
18208 argInfo.argBashTmpNode = nullptr;
18212 /* First time use */
18213 assert(!argInfo.argIsUsed);
18215 /* Reserve a temp for the expression.
18216 * Use a large size node as we may change it later */
18218 const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
18220 lvaTable[tmpNum].lvType = lclTyp;
18222 // For ref types, determine the type of the temp.
18223 if (lclTyp == TYP_REF)
18225 if (!argCanBeModified)
18227 // If the arg can't be modified in the method
18228 // body, use the type of the value, if
18229 // known. Otherwise, use the declared type.
18230 lvaSetClass(tmpNum, argInfo.argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
18234 // Arg might be modified, use the delcared type of
18236 lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
18240 assert(lvaTable[tmpNum].lvAddrExposed == 0);
18241 if (argInfo.argHasLdargaOp)
18243 lvaTable[tmpNum].lvHasLdAddrOp = 1;
18246 if (lclInfo.lclVerTypeInfo.IsStruct())
18248 if (varTypeIsStruct(lclTyp))
18250 lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
18254 // This is a wrapped primitive. Make sure the verstate knows that
18255 lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo;
18259 argInfo.argHasTmp = true;
18260 argInfo.argTmpNum = tmpNum;
18262 // If we require strict exception order, then arguments must
18263 // be evaluated in sequence before the body of the inlined method.
18264 // So we need to evaluate them to a temp.
18265 // Also, if arguments have global references, we need to
18266 // evaluate them to a temp before the inlined body as the
18267 // inlined body may be modifying the global ref.
18268 // TODO-1stClassStructs: We currently do not reuse an existing lclVar
18269 // if it is a struct, because it requires some additional handling.
18271 if (!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef)
18273 /* Get a *LARGE* LCL_VAR node */
18274 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
18276 /* Record op1 as the very first use of this argument.
18277 If there are no further uses of the arg, we may be
18278 able to use the actual arg node instead of the temp.
18279 If we do see any further uses, we will clear this. */
18280 argInfo.argBashTmpNode = op1;
18284 /* Get a small LCL_VAR node */
18285 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
18286 /* No bashing of this argument */
18287 argInfo.argBashTmpNode = nullptr;
18292 // Mark this argument as used.
18293 argInfo.argIsUsed = true;
18298 /******************************************************************************
18299 Is this the original "this" argument to the call being inlined?
18301 Note that we do not inline methods with "starg 0", and so we do not need to
18305 BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo* inlArgInfo)
18307 assert(compIsForInlining());
18308 return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
18311 //-----------------------------------------------------------------------------
18312 // This function checks if a dereference in the inlinee can guarantee that
18313 // the "this" is non-NULL.
18314 // If we haven't hit a branch or a side effect, and we are dereferencing
18315 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
18316 // then we can avoid a separate null pointer check.
18318 // "additionalTreesToBeEvaluatedBefore"
18319 // is the set of pending trees that have not yet been added to the statement list,
18320 // and which have been removed from verCurrentState.esStack[]
18322 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTreePtr additionalTreesToBeEvaluatedBefore,
18323 GenTreePtr variableBeingDereferenced,
18324 InlArgInfo* inlArgInfo)
18326 assert(compIsForInlining());
18327 assert(opts.OptEnabled(CLFLG_INLINING));
18329 BasicBlock* block = compCurBB;
18334 if (block != fgFirstBB)
18339 if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
18344 if (additionalTreesToBeEvaluatedBefore &&
18345 GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
18350 for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
18352 expr = stmt->gtStmt.gtStmtExpr;
18354 if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
18360 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
18362 unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
18363 if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
18372 //------------------------------------------------------------------------
18373 // impMarkInlineCandidate: determine if this call can be subsequently inlined
18376 // callNode -- call under scrutiny
18377 // exactContextHnd -- context handle for inlining
18378 // exactContextNeedsRuntimeLookup -- true if context required runtime lookup
18379 // callInfo -- call info from VM
18382 // If callNode is an inline candidate, this method sets the flag
18383 // GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have
18384 // filled in the associated InlineCandidateInfo.
18386 // If callNode is not an inline candidate, and the reason is
18387 // something that is inherent to the method being called, the
18388 // method may be marked as "noinline" to short-circuit any
18389 // future assessments of calls to this method.
18391 void Compiler::impMarkInlineCandidate(GenTreePtr callNode,
18392 CORINFO_CONTEXT_HANDLE exactContextHnd,
18393 bool exactContextNeedsRuntimeLookup,
18394 CORINFO_CALL_INFO* callInfo)
18396 // Let the strategy know there's another call
18397 impInlineRoot()->m_inlineStrategy->NoteCall();
18399 if (!opts.OptEnabled(CLFLG_INLINING))
18401 /* XXX Mon 8/18/2008
18402 * This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before
18403 * calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and
18404 * CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and
18405 * figure out why we did not set MAXOPT for this compile.
18407 assert(!compIsForInlining());
18411 if (compIsForImportOnly())
18413 // Don't bother creating the inline candidate during verification.
18414 // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
18415 // that leads to the creation of multiple instances of Compiler.
18419 GenTreeCall* call = callNode->AsCall();
18420 InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
18422 // Don't inline if not optimizing root method
18423 if (opts.compDbgCode)
18425 inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
18429 // Don't inline if inlining into root method is disabled.
18430 if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
18432 inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
18436 // Inlining candidate determination needs to honor only IL tail prefix.
18437 // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
18438 if (call->IsTailPrefixedCall())
18440 inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
18444 // Tail recursion elimination takes precedence over inlining.
18445 // TODO: We may want to do some of the additional checks from fgMorphCall
18446 // here to reduce the chance we don't inline a call that won't be optimized
18447 // as a fast tail call or turned into a loop.
18448 if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
18450 inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
18454 if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
18456 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
18460 /* Ignore helper calls */
18462 if (call->gtCallType == CT_HELPER)
18464 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
18468 /* Ignore indirect calls */
18469 if (call->gtCallType == CT_INDIRECT)
18471 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
18475 /* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less
18476 * restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding
18477 * inlining in throw blocks. I should consider the same thing for catch and filter regions. */
18479 CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
18482 // Reuse method flags from the original callInfo if possible
18483 if (fncHandle == callInfo->hMethod)
18485 methAttr = callInfo->methodFlags;
18489 methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
18493 if (compStressCompile(STRESS_FORCE_INLINE, 0))
18495 methAttr |= CORINFO_FLG_FORCEINLINE;
18499 // Check for COMPlus_AggressiveInlining
18500 if (compDoAggressiveInlining)
18502 methAttr |= CORINFO_FLG_FORCEINLINE;
18505 if (!(methAttr & CORINFO_FLG_FORCEINLINE))
18507 /* Don't bother inline blocks that are in the filter region */
18508 if (bbInCatchHandlerILRange(compCurBB))
18513 printf("\nWill not inline blocks that are in the catch handler region\n");
18518 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
18522 if (bbInFilterILRange(compCurBB))
18527 printf("\nWill not inline blocks that are in the filter region\n");
18531 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
18536 /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
18538 if (opts.compNeedSecurityCheck)
18540 inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
18544 /* Check if we tried to inline this method before */
18546 if (methAttr & CORINFO_FLG_DONT_INLINE)
18548 inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
18552 /* Cannot inline synchronized methods */
18554 if (methAttr & CORINFO_FLG_SYNCH)
18556 inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
18560 /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
18562 if (methAttr & CORINFO_FLG_SECURITYCHECK)
18564 inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
18568 InlineCandidateInfo* inlineCandidateInfo = nullptr;
18569 impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
18571 if (inlineResult.IsFailure())
18576 // The old value should be NULL
18577 assert(call->gtInlineCandidateInfo == nullptr);
18579 // The new value should not be NULL.
18580 assert(inlineCandidateInfo != nullptr);
18581 inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup;
18583 call->gtInlineCandidateInfo = inlineCandidateInfo;
18585 // Mark the call node as inline candidate.
18586 call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
18588 // Let the strategy know there's another candidate.
18589 impInlineRoot()->m_inlineStrategy->NoteCandidate();
18591 // Since we're not actually inlining yet, and this call site is
18592 // still just an inline candidate, there's nothing to report.
18593 inlineResult.SetReported();
18596 /******************************************************************************/
18597 // Returns true if the given intrinsic will be implemented by target-specific
18600 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
18602 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
18603 switch (intrinsicId)
18605 // Amd64 only has SSE2 instruction to directly compute sqrt/abs.
18607 // TODO: Because the x86 backend only targets SSE for floating-point code,
18608 // it does not treat Sine, Cosine, or Round as intrinsics (JIT32
18609 // implemented those intrinsics as x87 instructions). If this poses
18610 // a CQ problem, it may be necessary to change the implementation of
18611 // the helper calls to decrease call overhead or switch back to the
18612 // x87 instructions. This is tracked by #7097.
18613 case CORINFO_INTRINSIC_Sqrt:
18614 case CORINFO_INTRINSIC_Abs:
18620 #elif defined(_TARGET_ARM64_)
18621 switch (intrinsicId)
18623 case CORINFO_INTRINSIC_Sqrt:
18624 case CORINFO_INTRINSIC_Abs:
18625 case CORINFO_INTRINSIC_Round:
18631 #elif defined(_TARGET_ARM_)
18632 switch (intrinsicId)
18634 case CORINFO_INTRINSIC_Sqrt:
18635 case CORINFO_INTRINSIC_Abs:
18636 case CORINFO_INTRINSIC_Round:
18642 #elif defined(_TARGET_X86_)
18643 switch (intrinsicId)
18645 case CORINFO_INTRINSIC_Sin:
18646 case CORINFO_INTRINSIC_Cos:
18647 case CORINFO_INTRINSIC_Sqrt:
18648 case CORINFO_INTRINSIC_Abs:
18649 case CORINFO_INTRINSIC_Round:
18656 // TODO: This portion of logic is not implemented for other arch.
18657 // The reason for returning true is that on all other arch the only intrinsic
18658 // enabled are target intrinsics.
18660 #endif //_TARGET_AMD64_
18663 /******************************************************************************/
18664 // Returns true if the given intrinsic will be implemented by calling System.Math
18667 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
18669 // Currently, if an math intrisic is not implemented by target-specific
18670 // intructions, it will be implemented by a System.Math call. In the
18671 // future, if we turn to implementing some of them with helper callers,
18672 // this predicate needs to be revisited.
18673 return !IsTargetIntrinsic(intrinsicId);
18676 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
18678 switch (intrinsicId)
18680 case CORINFO_INTRINSIC_Sin:
18681 case CORINFO_INTRINSIC_Sqrt:
18682 case CORINFO_INTRINSIC_Abs:
18683 case CORINFO_INTRINSIC_Cos:
18684 case CORINFO_INTRINSIC_Round:
18685 case CORINFO_INTRINSIC_Cosh:
18686 case CORINFO_INTRINSIC_Sinh:
18687 case CORINFO_INTRINSIC_Tan:
18688 case CORINFO_INTRINSIC_Tanh:
18689 case CORINFO_INTRINSIC_Asin:
18690 case CORINFO_INTRINSIC_Acos:
18691 case CORINFO_INTRINSIC_Atan:
18692 case CORINFO_INTRINSIC_Atan2:
18693 case CORINFO_INTRINSIC_Log10:
18694 case CORINFO_INTRINSIC_Pow:
18695 case CORINFO_INTRINSIC_Exp:
18696 case CORINFO_INTRINSIC_Ceiling:
18697 case CORINFO_INTRINSIC_Floor:
18704 bool Compiler::IsMathIntrinsic(GenTreePtr tree)
18706 return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
18709 //------------------------------------------------------------------------
18710 // impDevirtualizeCall: Attempt to change a virtual vtable call into a
18714 // call -- the call node to examine/modify
18715 // thisObj -- the value of 'this' for the call
18716 // method -- [IN/OUT] the method handle for call. Updated iff call devirtualized.
18717 // methodAttribs -- [IN/OUT] flags for the method to call. Updated iff call devirtualized.
18718 // contextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized.
18719 // exactContextHnd -- [OUT] updated context handle iff call devirtualized
18722 // Virtual calls in IL will always "invoke" the base class method.
18724 // This transformation looks for evidence that the type of 'this'
18725 // in the call is exactly known, is a final class or would invoke
18726 // a final method, and if that and other safety checks pan out,
18727 // modifies the call and the call info to create a direct call.
18729 // This transformation is initially done in the importer and not
18730 // in some subsequent optimization pass because we want it to be
18731 // upstream of inline candidate identification.
18733 // However, later phases may supply improved type information that
18734 // can enable further devirtualization. We currently reinvoke this
18735 // code after inlining, if the return value of the inlined call is
18736 // the 'this obj' of a subsequent virtual call.
18738 void Compiler::impDevirtualizeCall(GenTreeCall* call,
18739 GenTreePtr thisObj,
18740 CORINFO_METHOD_HANDLE* method,
18741 unsigned* methodFlags,
18742 CORINFO_CONTEXT_HANDLE* contextHandle,
18743 CORINFO_CONTEXT_HANDLE* exactContextHandle)
18745 assert(call != nullptr);
18746 assert(method != nullptr);
18747 assert(methodFlags != nullptr);
18748 assert(contextHandle != nullptr);
18750 // This should be a virtual vtable or virtual stub call.
18751 assert(call->IsVirtual());
18753 // Bail if not optimizing
18754 if (opts.MinOpts())
18759 // Bail if debuggable codegen
18760 if (opts.compDbgCode)
18766 // Bail if devirt is disabled.
18767 if (JitConfig.JitEnableDevirtualization() == 0)
18772 const bool doPrint = JitConfig.JitPrintDevirtualizedMethods() == 1;
18775 // Fetch information about the virtual method we're calling.
18776 CORINFO_METHOD_HANDLE baseMethod = *method;
18777 unsigned baseMethodAttribs = *methodFlags;
18779 if (baseMethodAttribs == 0)
18781 // For late devirt we may not have method attributes, so fetch them.
18782 baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
18787 // Validate that callInfo has up to date method flags
18788 const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
18790 // All the base method attributes should agree, save that
18791 // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1
18792 // because of concurrent jitting activity.
18794 // Note we don't look at this particular flag bit below, and
18795 // later on (if we do try and inline) we will rediscover why
18796 // the method can't be inlined, so there's no danger here in
18797 // seeing this particular flag bit in different states between
18798 // the cached and fresh values.
18799 if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE))
18801 assert(!"mismatched method attributes");
18806 // In R2R mode, we might see virtual stub calls to
18807 // non-virtuals. For instance cases where the non-virtual method
18808 // is in a different assembly but is called via CALLVIRT. For
18809 // verison resilience we must allow for the fact that the method
18810 // might become virtual in some update.
18812 // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a
18813 // regular call+nullcheck upstream, so we won't reach this
18815 if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0)
18817 assert(call->IsVirtualStub());
18818 assert(opts.IsReadyToRun());
18819 JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n");
18823 // See what we know about the type of 'this' in the call.
18824 bool isExact = false;
18825 bool objIsNonNull = false;
18826 CORINFO_CLASS_HANDLE objClass = gtGetClassHandle(thisObj, &isExact, &objIsNonNull);
18828 // Bail if we know nothing.
18829 if (objClass == nullptr)
18831 JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet()));
18835 // Fetch information about the class that introduced the virtual method.
18836 CORINFO_CLASS_HANDLE baseClass = info.compCompHnd->getMethodClass(baseMethod);
18837 const DWORD baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass);
18839 #if !defined(FEATURE_CORECLR)
18840 // If base class is not beforefieldinit then devirtualizing may
18841 // cause us to miss a base class init trigger. Spec says we don't
18842 // need a trigger for ref class callvirts but desktop seems to
18843 // have one anyways. So defer.
18844 if ((baseClassAttribs & CORINFO_FLG_BEFOREFIELDINIT) == 0)
18846 JITDUMP("\nimpDevirtualizeCall: base class has precise initialization, sorry\n");
18849 #endif // FEATURE_CORECLR
18851 // Is the call an interface call?
18852 const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0;
18854 // If the objClass is sealed (final), then we may be able to devirtualize.
18855 const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass);
18856 const bool objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0;
18859 const char* callKind = isInterface ? "interface" : "virtual";
18860 const char* objClassNote = "[?]";
18861 const char* objClassName = "?objClass";
18862 const char* baseClassName = "?baseClass";
18863 const char* baseMethodName = "?baseMethod";
18865 if (verbose || doPrint)
18867 objClassNote = isExact ? " [exact]" : objClassIsFinal ? " [final]" : "";
18868 objClassName = info.compCompHnd->getClassName(objClass);
18869 baseClassName = info.compCompHnd->getClassName(baseClass);
18870 baseMethodName = eeGetMethodName(baseMethod, nullptr);
18874 printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n"
18875 " class for 'this' is %s%s (attrib %08x)\n"
18876 " base method is %s::%s\n",
18877 callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName);
18880 #endif // defined(DEBUG)
18882 // Bail if obj class is an interface.
18883 // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal
18884 // IL_021d: ldloc.0
18885 // IL_021e: callvirt instance int32 System.Object::GetHashCode()
18886 if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0)
18888 JITDUMP("--- obj class is interface, sorry\n");
18894 assert(call->IsVirtualStub());
18895 JITDUMP("--- base class is interface\n");
18898 // Fetch the method that would be called based on the declared type of 'this'
18899 CORINFO_CONTEXT_HANDLE ownerType = *contextHandle;
18900 CORINFO_METHOD_HANDLE derivedMethod = info.compCompHnd->resolveVirtualMethod(baseMethod, objClass, ownerType);
18902 // If we failed to get a handle, we can't devirtualize. This can
18903 // happen when prejitting, if the devirtualization crosses
18904 // servicing bubble boundaries.
18905 if (derivedMethod == nullptr)
18907 JITDUMP("--- no derived method, sorry\n");
18911 // Fetch method attributes to see if method is marked final.
18912 const DWORD derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod);
18913 const bool derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0);
18916 const char* derivedClassName = "?derivedClass";
18917 const char* derivedMethodName = "?derivedMethod";
18919 const char* note = "speculative";
18924 else if (objClassIsFinal)
18926 note = "final class";
18928 else if (derivedMethodIsFinal)
18930 note = "final method";
18933 if (verbose || doPrint)
18935 derivedMethodName = eeGetMethodName(derivedMethod, &derivedClassName);
18938 printf(" devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note);
18942 #endif // defined(DEBUG)
18944 if (!isExact && !objClassIsFinal && !derivedMethodIsFinal)
18946 // Type is not exact, and neither class or method is final.
18948 // We could speculatively devirtualize, but there's no
18949 // reason to believe the derived method is the one that
18950 // is likely to be invoked.
18952 // If there's currently no further overriding (that is, at
18953 // the time of jitting, objClass has no subclasses that
18954 // override this method), then perhaps we'd be willing to
18956 JITDUMP(" Class not final or exact, method not final, no devirtualization\n");
18960 // For interface calls we must have an exact type or final class.
18961 if (isInterface && !isExact && !objClassIsFinal)
18963 JITDUMP(" Class not final or exact for interface, no devirtualization\n");
18967 JITDUMP(" %s; can devirtualize\n", note);
18969 // Make the updates.
18970 call->gtFlags &= ~GTF_CALL_VIRT_VTABLE;
18971 call->gtFlags &= ~GTF_CALL_VIRT_STUB;
18972 call->gtCallMethHnd = derivedMethod;
18973 call->gtCallType = CT_USER_FUNC;
18975 // Virtual calls include an implicit null check, which we may
18976 // now need to make explicit.
18979 call->gtFlags |= GTF_CALL_NULLCHECK;
18982 // Clear the inline candidate info (may be non-null since
18983 // it's a union field used for other things by virtual
18985 call->gtInlineCandidateInfo = nullptr;
18987 // Fetch the class that introduced the derived method.
18989 // Note this may not equal objClass, if there is a
18990 // final method that objClass inherits.
18991 CORINFO_CLASS_HANDLE derivedClass = info.compCompHnd->getMethodClass(derivedMethod);
18993 #ifdef FEATURE_READYTORUN_COMPILER
18994 if (opts.IsReadyToRun())
18996 // For R2R, getCallInfo triggers bookkeeping on the zap
18997 // side so we need to call it here.
18999 // First, cons up a suitable resolved token.
19000 CORINFO_RESOLVED_TOKEN derivedResolvedToken = {};
19002 derivedResolvedToken.tokenScope = info.compScopeHnd;
19003 derivedResolvedToken.tokenContext = *contextHandle;
19004 derivedResolvedToken.token = info.compCompHnd->getMethodDefFromMethod(derivedMethod);
19005 derivedResolvedToken.tokenType = CORINFO_TOKENKIND_Method;
19006 derivedResolvedToken.hClass = derivedClass;
19007 derivedResolvedToken.hMethod = derivedMethod;
19009 // Look up the new call info.
19010 CORINFO_CALL_INFO derivedCallInfo;
19011 eeGetCallInfo(&derivedResolvedToken, nullptr, addVerifyFlag(CORINFO_CALLINFO_ALLOWINSTPARAM), &derivedCallInfo);
19013 // Update the call.
19014 call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
19015 call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT;
19016 call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup);
19018 #endif // FEATURE_READYTORUN_COMPILER
19020 // Need to update call info too. This is fragile
19021 // but hopefully the derived method conforms to
19022 // the base in most other ways.
19023 *method = derivedMethod;
19024 *methodFlags = derivedMethodAttribs;
19025 *contextHandle = MAKE_METHODCONTEXT(derivedMethod);
19027 // Update context handle.
19028 if ((exactContextHandle != nullptr) && (*exactContextHandle != nullptr))
19030 *exactContextHandle = MAKE_METHODCONTEXT(derivedMethod);
19036 printf("... after devirt...\n");
19042 printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName,
19043 baseMethodName, derivedClassName, derivedMethodName, note);
19045 #endif // defined(DEBUG)
19048 //------------------------------------------------------------------------
19049 // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it.
19052 // token - init value for the allocated token.
19055 // pointer to token into jit-allocated memory.
19056 CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(CORINFO_RESOLVED_TOKEN token)
19058 CORINFO_RESOLVED_TOKEN* memory = (CORINFO_RESOLVED_TOKEN*)compGetMem(sizeof(token));