1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
10 XX Imports the given method and converts it to semantic trees XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
23 #define Verify(cond, msg) \
28 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
32 #define VerifyOrReturn(cond, msg) \
37 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
42 #define VerifyOrReturnSpeculative(cond, msg, speculative) \
56 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
62 /*****************************************************************************/
64 void Compiler::impInit()
68 impTreeList = nullptr;
69 impTreeLast = nullptr;
70 impInlinedCodeSize = 0;
74 /*****************************************************************************
76 * Pushes the given tree on the stack.
79 void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
81 /* Check for overflow. If inlining, we may be using a bigger stack */
83 if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84 (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
86 BADCODE("stack overflow");
90 // If we are pushing a struct, make certain we know the precise type!
91 if (tree->TypeGet() == TYP_STRUCT)
93 assert(ti.IsType(TI_STRUCT));
94 CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95 assert(clsHnd != NO_CLASS_HANDLE);
98 if (tiVerificationNeeded && !ti.IsDead())
100 assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
102 // The ti type is consistent with the tree type.
105 // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106 // In the verification type system, we always transform "native int" to "TI_INT".
107 // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108 // attempts to do that have proved too difficult. Instead, we'll assume that in checks like this,
109 // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110 // method used in the last disjunct allows exactly this mismatch.
111 assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112 ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113 ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114 ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115 typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116 NormaliseForStack(typeInfo(tree->TypeGet()))));
118 // If it is a struct type, make certain we normalized the primitive types
119 assert(!ti.IsType(TI_STRUCT) ||
120 info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
124 if (VERBOSE && tiVerificationNeeded)
127 printf(TI_DUMP_PADDING);
128 printf("About to push to stack: ");
131 #endif // VERBOSE_VERIFY
135 verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136 verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
138 if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
142 else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
144 compFloatingPointUsed = true;
148 /******************************************************************************/
149 // used in the inliner, where we can assume typesafe code. please don't use in the importer!!
150 inline void Compiler::impPushOnStackNoType(GenTreePtr tree)
152 assert(verCurrentState.esStackDepth < impStkSize);
153 INDEBUG(verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = typeInfo());
154 verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
156 if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
160 else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
162 compFloatingPointUsed = true;
166 inline void Compiler::impPushNullObjRefOnStack()
168 impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
171 // This method gets called when we run into unverifiable code
172 // (and we are verifying the method)
174 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
175 DEBUGARG(unsigned line))
177 // Remember that the code is not verifiable
178 // Note that the method may yet pass canSkipMethodVerification(),
179 // and so the presence of unverifiable code may not be an issue.
180 tiIsVerifiableCode = FALSE;
183 const char* tail = strrchr(file, '\\');
189 if (JitConfig.JitBreakOnUnsafeCode())
191 assert(!"Unsafe code detected");
195 JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
196 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
198 if (verNeedsVerification() || compIsForImportOnly())
200 JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
201 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
202 verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
206 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
207 DEBUGARG(unsigned line))
209 JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
210 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
213 // BreakIfDebuggerPresent();
214 if (getBreakOnBadCode())
216 assert(!"Typechecking error");
220 RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
224 // helper function that will tell us if the IL instruction at the addr passed
225 // by param consumes an address at the top of the stack. We use it to save
227 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
229 assert(!compIsForInlining());
233 opcode = (OPCODE)getU1LittleEndian(codeAddr);
237 // case CEE_LDFLDA: We're taking this one out as if you have a sequence
243 // of a primitivelike struct, you end up after morphing with addr of a local
244 // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
245 // for structs that contain other structs, which isnt a case we handle very
246 // well now for other reasons.
250 // We won't collapse small fields. This is probably not the right place to have this
251 // check, but we're only using the function for this purpose, and is easy to factor
252 // out if we need to do so.
254 CORINFO_RESOLVED_TOKEN resolvedToken;
255 impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
257 CORINFO_CLASS_HANDLE clsHnd;
258 var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
260 // Preserve 'small' int types
261 if (lclTyp > TYP_INT)
263 lclTyp = genActualType(lclTyp);
266 if (varTypeIsSmall(lclTyp))
280 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
282 pResolvedToken->tokenContext = impTokenLookupContextHandle;
283 pResolvedToken->tokenScope = info.compScopeHnd;
284 pResolvedToken->token = getU4LittleEndian(addr);
285 pResolvedToken->tokenType = kind;
287 if (!tiVerificationNeeded)
289 info.compCompHnd->resolveToken(pResolvedToken);
293 Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
297 /*****************************************************************************
299 * Pop one tree from the stack.
302 StackEntry Compiler::impPopStack()
304 if (verCurrentState.esStackDepth == 0)
306 BADCODE("stack underflow");
311 if (VERBOSE && tiVerificationNeeded)
314 printf(TI_DUMP_PADDING);
315 printf("About to pop from the stack: ");
316 const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
319 #endif // VERBOSE_VERIFY
322 return verCurrentState.esStack[--verCurrentState.esStackDepth];
325 StackEntry Compiler::impPopStack(CORINFO_CLASS_HANDLE& structType)
327 StackEntry ret = impPopStack();
328 structType = verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo.GetClassHandle();
332 GenTreePtr Compiler::impPopStack(typeInfo& ti)
334 StackEntry ret = impPopStack();
339 /*****************************************************************************
341 * Peep at n'th (0-based) tree on the top of the stack.
344 StackEntry& Compiler::impStackTop(unsigned n)
346 if (verCurrentState.esStackDepth <= n)
348 BADCODE("stack underflow");
351 return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
353 /*****************************************************************************
354 * Some of the trees are spilled specially. While unspilling them, or
355 * making a copy, these need to be handled specially. The function
356 * enumerates the operators possible after spilling.
359 #ifdef DEBUG // only used in asserts
360 static bool impValidSpilledStackEntry(GenTreePtr tree)
362 if (tree->gtOper == GT_LCL_VAR)
367 if (tree->OperIsConst())
376 /*****************************************************************************
378 * The following logic is used to save/restore stack contents.
379 * If 'copy' is true, then we make a copy of the trees on the stack. These
380 * have to all be cloneable/spilled values.
383 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
385 savePtr->ssDepth = verCurrentState.esStackDepth;
387 if (verCurrentState.esStackDepth)
389 savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
390 size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
394 StackEntry* table = savePtr->ssTrees;
396 /* Make a fresh copy of all the stack entries */
398 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
400 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
401 GenTreePtr tree = verCurrentState.esStack[level].val;
403 assert(impValidSpilledStackEntry(tree));
405 switch (tree->gtOper)
412 table->val = gtCloneExpr(tree);
416 assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
423 memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
428 void Compiler::impRestoreStackState(SavedStack* savePtr)
430 verCurrentState.esStackDepth = savePtr->ssDepth;
432 if (verCurrentState.esStackDepth)
434 memcpy(verCurrentState.esStack, savePtr->ssTrees,
435 verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
439 /*****************************************************************************
441 * Get the tree list started for a new basic block.
443 inline void Compiler::impBeginTreeList()
445 assert(impTreeList == nullptr && impTreeLast == nullptr);
447 impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
450 /*****************************************************************************
452 * Store the given start and end stmt in the given basic block. This is
453 * mostly called by impEndTreeList(BasicBlock *block). It is called
454 * directly only for handling CEE_LEAVEs out of finally-protected try's.
457 inline void Compiler::impEndTreeList(BasicBlock* block, GenTreePtr firstStmt, GenTreePtr lastStmt)
459 assert(firstStmt->gtOper == GT_STMT);
460 assert(lastStmt->gtOper == GT_STMT);
462 /* Make the list circular, so that we can easily walk it backwards */
464 firstStmt->gtPrev = lastStmt;
466 /* Store the tree list in the basic block */
468 block->bbTreeList = firstStmt;
470 /* The block should not already be marked as imported */
471 assert((block->bbFlags & BBF_IMPORTED) == 0);
473 block->bbFlags |= BBF_IMPORTED;
476 /*****************************************************************************
478 * Store the current tree list in the given basic block.
481 inline void Compiler::impEndTreeList(BasicBlock* block)
483 assert(impTreeList->gtOper == GT_BEG_STMTS);
485 GenTreePtr firstTree = impTreeList->gtNext;
489 /* The block should not already be marked as imported */
490 assert((block->bbFlags & BBF_IMPORTED) == 0);
492 // Empty block. Just mark it as imported
493 block->bbFlags |= BBF_IMPORTED;
497 // Ignore the GT_BEG_STMTS
498 assert(firstTree->gtPrev == impTreeList);
500 impEndTreeList(block, firstTree, impTreeLast);
504 if (impLastILoffsStmt != nullptr)
506 impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
507 impLastILoffsStmt = nullptr;
510 impTreeList = impTreeLast = nullptr;
514 /*****************************************************************************
516 * Check that storing the given tree doesnt mess up the semantic order. Note
517 * that this has only limited value as we can only check [0..chkLevel).
520 inline void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel)
525 assert(stmt->gtOper == GT_STMT);
527 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
529 chkLevel = verCurrentState.esStackDepth;
532 if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
537 GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
539 // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
541 if (tree->gtFlags & GTF_CALL)
543 for (unsigned level = 0; level < chkLevel; level++)
545 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
549 if (tree->gtOper == GT_ASG)
551 // For an assignment to a local variable, all references of that
552 // variable have to be spilled. If it is aliased, all calls and
553 // indirect accesses have to be spilled
555 if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
557 unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
558 for (unsigned level = 0; level < chkLevel; level++)
560 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
561 assert(!lvaTable[lclNum].lvAddrExposed ||
562 (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
566 // If the access may be to global memory, all side effects have to be spilled.
568 else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
570 for (unsigned level = 0; level < chkLevel; level++)
572 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
579 /*****************************************************************************
581 * Append the given GT_STMT node to the current block's tree list.
582 * [0..chkLevel) is the portion of the stack which we will check for
583 * interference with stmt and spill if needed.
586 inline void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
588 assert(stmt->gtOper == GT_STMT);
589 noway_assert(impTreeLast != nullptr);
591 /* If the statement being appended has any side-effects, check the stack
592 to see if anything needs to be spilled to preserve correct ordering. */
594 GenTreePtr expr = stmt->gtStmt.gtStmtExpr;
595 unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
597 // Assignment to (unaliased) locals don't count as a side-effect as
598 // we handle them specially using impSpillLclRefs(). Temp locals should
601 if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
602 !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
604 unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
605 assert(flags == (op2Flags | GTF_ASG));
609 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
611 chkLevel = verCurrentState.esStackDepth;
614 if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
616 assert(chkLevel <= verCurrentState.esStackDepth);
620 // If there is a call, we have to spill global refs
621 bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
623 if (expr->gtOper == GT_ASG)
625 GenTree* lhs = expr->gtGetOp1();
626 // If we are assigning to a global ref, we have to spill global refs on stack.
627 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
628 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
629 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
630 if (!expr->OperIsBlkOp())
632 // If we are assigning to a global ref, we have to spill global refs on stack
633 if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
635 spillGlobEffects = true;
638 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
639 ((lhs->OperGet() == GT_LCL_VAR) &&
640 (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
642 spillGlobEffects = true;
646 impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
650 impSpillSpecialSideEff();
654 impAppendStmtCheck(stmt, chkLevel);
656 /* Point 'prev' at the previous node, so that we can walk backwards */
658 stmt->gtPrev = impTreeLast;
660 /* Append the expression statement to the list */
662 impTreeLast->gtNext = stmt;
666 impMarkContiguousSIMDFieldAssignments(stmt);
669 /* Once we set impCurStmtOffs in an appended tree, we are ready to
670 report the following offsets. So reset impCurStmtOffs */
672 if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
674 impCurStmtOffsSet(BAD_IL_OFFSET);
678 if (impLastILoffsStmt == nullptr)
680 impLastILoffsStmt = stmt;
691 /*****************************************************************************
693 * Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
696 inline void Compiler::impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore)
698 assert(stmt->gtOper == GT_STMT);
699 assert(stmtBefore->gtOper == GT_STMT);
701 GenTreePtr stmtPrev = stmtBefore->gtPrev;
702 stmt->gtPrev = stmtPrev;
703 stmt->gtNext = stmtBefore;
704 stmtPrev->gtNext = stmt;
705 stmtBefore->gtPrev = stmt;
708 /*****************************************************************************
710 * Append the given expression tree to the current block's tree list.
711 * Return the newly created statement.
714 GenTreePtr Compiler::impAppendTree(GenTreePtr tree, unsigned chkLevel, IL_OFFSETX offset)
718 /* Allocate an 'expression statement' node */
720 GenTreePtr expr = gtNewStmt(tree, offset);
722 /* Append the statement to the current block's stmt list */
724 impAppendStmt(expr, chkLevel);
729 /*****************************************************************************
731 * Insert the given exression tree before GT_STMT "stmtBefore"
734 void Compiler::impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore)
736 assert(stmtBefore->gtOper == GT_STMT);
738 /* Allocate an 'expression statement' node */
740 GenTreePtr expr = gtNewStmt(tree, offset);
742 /* Append the statement to the current block's stmt list */
744 impInsertStmtBefore(expr, stmtBefore);
747 /*****************************************************************************
749 * Append an assignment of the given value to a temp to the current tree list.
750 * curLevel is the stack level for which the spill to the temp is being done.
753 void Compiler::impAssignTempGen(unsigned tmp,
756 GenTreePtr* pAfterStmt, /* = NULL */
757 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
758 BasicBlock* block /* = NULL */
761 GenTreePtr asg = gtNewTempAssign(tmp, val);
763 if (!asg->IsNothingNode())
767 GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
768 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
772 impAppendTree(asg, curLevel, impCurStmtOffs);
777 /*****************************************************************************
778 * same as above, but handle the valueclass case too
781 void Compiler::impAssignTempGen(unsigned tmpNum,
783 CORINFO_CLASS_HANDLE structType,
785 GenTreePtr* pAfterStmt, /* = NULL */
786 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
787 BasicBlock* block /* = NULL */
792 if (varTypeIsStruct(val))
794 assert(tmpNum < lvaCount);
795 assert(structType != NO_CLASS_HANDLE);
797 // if the method is non-verifiable the assert is not true
798 // so at least ignore it in the case when verification is turned on
799 // since any block that tries to use the temp would have failed verification.
800 var_types varType = lvaTable[tmpNum].lvType;
801 assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
802 lvaSetStruct(tmpNum, structType, false);
804 // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
805 // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
806 // that has been passed in for the value being assigned to the temp, in which case we
807 // need to set 'val' to that same type.
808 // Note also that if we always normalized the types of any node that might be a struct
809 // type, this would not be necessary - but that requires additional JIT/EE interface
810 // calls that may not actually be required - e.g. if we only access a field of a struct.
812 val->gtType = lvaTable[tmpNum].lvType;
814 GenTreePtr dst = gtNewLclvNode(tmpNum, val->gtType);
815 asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
819 asg = gtNewTempAssign(tmpNum, val);
822 if (!asg->IsNothingNode())
826 GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
827 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
831 impAppendTree(asg, curLevel, impCurStmtOffs);
836 /*****************************************************************************
838 * Pop the given number of values from the stack and return a list node with
840 * The 'prefixTree' argument may optionally contain an argument
841 * list that is prepended to the list returned from this function.
843 * The notion of prepended is a bit misleading in that the list is backwards
844 * from the way I would expect: The first element popped is at the end of
845 * the returned list, and prefixTree is 'before' that, meaning closer to
846 * the end of the list. To get to prefixTree, you have to walk to the
849 * For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
850 * such we reverse its meaning such that returnValue has a reversed
851 * prefixTree at the head of the list.
854 GenTreeArgList* Compiler::impPopList(unsigned count,
856 CORINFO_SIG_INFO* sig,
857 GenTreeArgList* prefixTree)
859 assert(sig == nullptr || count == sig->numArgs);
862 CORINFO_CLASS_HANDLE structType;
863 GenTreeArgList* treeList;
865 if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
871 treeList = prefixTree;
876 StackEntry se = impPopStack();
877 typeInfo ti = se.seTypeInfo;
878 GenTreePtr temp = se.val;
880 if (varTypeIsStruct(temp))
882 // Morph trees that aren't already OBJs or MKREFANY to be OBJs
883 assert(ti.IsType(TI_STRUCT));
884 structType = ti.GetClassHandleForValueClass();
885 temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
888 /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
889 flags |= temp->gtFlags;
890 treeList = gtNewListNode(temp, treeList);
897 if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
898 sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
900 // Make sure that all valuetypes (including enums) that we push are loaded.
901 // This is to guarantee that if a GC is triggerred from the prestub of this methods,
902 // all valuetypes in the method signature are already loaded.
903 // We need to be able to find the size of the valuetypes, but we cannot
904 // do a class-load from within GC.
905 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
908 CORINFO_ARG_LIST_HANDLE argLst = sig->args;
909 CORINFO_CLASS_HANDLE argClass;
910 CORINFO_CLASS_HANDLE argRealClass;
911 GenTreeArgList* args;
914 for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
916 PREFIX_ASSUME(args != nullptr);
918 CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
920 // insert implied casts (from float to double or double to float)
922 if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
924 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
926 else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
928 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
931 // insert any widening or narrowing casts for backwards compatibility
933 args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
935 if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
936 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
938 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
939 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
941 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
943 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
945 args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
948 // Make sure that all valuetypes (including enums) that we push are loaded.
949 // This is to guarantee that if a GC is triggered from the prestub of this methods,
950 // all valuetypes in the method signature are already loaded.
951 // We need to be able to find the size of the valuetypes, but we cannot
952 // do a class-load from within GC.
953 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
956 argLst = info.compCompHnd->getArgNext(argLst);
960 if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
962 // Prepend the prefixTree
964 // Simple in-place reversal to place treeList
965 // at the end of a reversed prefixTree
966 while (prefixTree != nullptr)
968 GenTreeArgList* next = prefixTree->Rest();
969 prefixTree->Rest() = treeList;
970 treeList = prefixTree;
977 /*****************************************************************************
979 * Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
980 * The first "skipReverseCount" items are not reversed.
983 GenTreeArgList* Compiler::impPopRevList(unsigned count,
985 CORINFO_SIG_INFO* sig,
986 unsigned skipReverseCount)
989 assert(skipReverseCount <= count);
991 GenTreeArgList* list = impPopList(count, flagsPtr, sig);
994 if (list == nullptr || skipReverseCount == count)
999 GenTreeArgList* ptr = nullptr; // Initialized to the first node that needs to be reversed
1000 GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
1002 if (skipReverseCount == 0)
1008 lastSkipNode = list;
1009 // Get to the first node that needs to be reversed
1010 for (unsigned i = 0; i < skipReverseCount - 1; i++)
1012 lastSkipNode = lastSkipNode->Rest();
1015 PREFIX_ASSUME(lastSkipNode != nullptr);
1016 ptr = lastSkipNode->Rest();
1019 GenTreeArgList* reversedList = nullptr;
1023 GenTreeArgList* tmp = ptr->Rest();
1024 ptr->Rest() = reversedList;
1027 } while (ptr != nullptr);
1029 if (skipReverseCount)
1031 lastSkipNode->Rest() = reversedList;
1036 return reversedList;
1040 /*****************************************************************************
1041 Assign (copy) the structure from 'src' to 'dest'. The structure is a value
1042 class of type 'clsHnd'. It returns the tree that should be appended to the
1043 statement list that represents the assignment.
1044 Temp assignments may be appended to impTreeList if spilling is necessary.
1045 curLevel is the stack level for which a spill may be being done.
1048 GenTreePtr Compiler::impAssignStruct(GenTreePtr dest,
1050 CORINFO_CLASS_HANDLE structHnd,
1052 GenTreePtr* pAfterStmt, /* = NULL */
1053 BasicBlock* block /* = NULL */
1056 assert(varTypeIsStruct(dest));
1058 while (dest->gtOper == GT_COMMA)
1060 assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1062 // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1065 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1069 impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1072 // set dest to the second thing
1073 dest = dest->gtOp.gtOp2;
1076 assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1077 dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1079 if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1080 src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1083 return gtNewNothingNode();
1086 // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1087 // or re-creating a Blk node if it is.
1088 GenTreePtr destAddr;
1090 if (dest->gtOper == GT_IND || dest->OperIsBlk())
1092 destAddr = dest->gtOp.gtOp1;
1096 destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1099 return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1102 /*****************************************************************************/
1104 GenTreePtr Compiler::impAssignStructPtr(GenTreePtr destAddr,
1106 CORINFO_CLASS_HANDLE structHnd,
1108 GenTreePtr* pAfterStmt, /* = NULL */
1109 BasicBlock* block /* = NULL */
1113 GenTreePtr dest = nullptr;
1114 unsigned destFlags = 0;
1116 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1117 assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1118 // TODO-ARM-BUG: Does ARM need this?
1119 // TODO-ARM64-BUG: Does ARM64 need this?
1120 assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1121 src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1122 src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1123 (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1124 #else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1125 assert(varTypeIsStruct(src));
1127 assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1128 src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1129 src->gtOper == GT_COMMA ||
1130 (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1131 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1132 if (destAddr->OperGet() == GT_ADDR)
1134 GenTree* destNode = destAddr->gtGetOp1();
1135 // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1136 // will be morphed, don't insert an OBJ(ADDR).
1137 if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1138 #ifndef LEGACY_BACKEND
1139 || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1140 #endif // !LEGACY_BACKEND
1145 destType = destNode->TypeGet();
1149 destType = src->TypeGet();
1152 var_types asgType = src->TypeGet();
1154 if (src->gtOper == GT_CALL)
1156 if (src->AsCall()->TreatAsHasRetBufArg(this))
1158 // Case of call returning a struct via hidden retbuf arg
1160 // insert the return value buffer into the argument list as first byref parameter
1161 src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1163 // now returns void, not a struct
1164 src->gtType = TYP_VOID;
1166 // return the morphed call node
1171 // Case of call returning a struct in one or more registers.
1173 var_types returnType = (var_types)src->gtCall.gtReturnType;
1175 // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1176 src->gtType = genActualType(returnType);
1178 // First we try to change this to "LclVar/LclFld = call"
1180 if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1182 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1183 // That is, the IR will be of the form lclVar = call for multi-reg return
1185 GenTreePtr lcl = destAddr->gtOp.gtOp1;
1186 if (src->AsCall()->HasMultiRegRetVal())
1188 // Mark the struct LclVar as used in a MultiReg return context
1189 // which currently makes it non promotable.
1190 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1191 // handle multireg returns.
1192 lcl->gtFlags |= GTF_DONT_CSE;
1193 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1195 else // The call result is not a multireg return
1197 // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1198 lcl->ChangeOper(GT_LCL_FLD);
1199 fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1202 lcl->gtType = src->gtType;
1203 asgType = src->gtType;
1206 #if defined(_TARGET_ARM_)
1207 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1208 // but that method has not been updadted to include ARM.
1209 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1210 lcl->gtFlags |= GTF_DONT_CSE;
1211 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1212 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1213 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1215 // Make the struct non promotable. The eightbytes could contain multiple fields.
1216 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1217 // handle multireg returns.
1218 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1219 // non-multireg returns.
1220 lcl->gtFlags |= GTF_DONT_CSE;
1221 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1224 else // we don't have a GT_ADDR of a GT_LCL_VAR
1226 // !!! The destination could be on stack. !!!
1227 // This flag will let us choose the correct write barrier.
1228 asgType = returnType;
1229 destFlags = GTF_IND_TGTANYWHERE;
1233 else if (src->gtOper == GT_RET_EXPR)
1235 GenTreePtr call = src->gtRetExpr.gtInlineCandidate;
1236 noway_assert(call->gtOper == GT_CALL);
1238 if (call->AsCall()->HasRetBufArg())
1240 // insert the return value buffer into the argument list as first byref parameter
1241 call->gtCall.gtCallArgs = gtNewListNode(destAddr, call->gtCall.gtCallArgs);
1243 // now returns void, not a struct
1244 src->gtType = TYP_VOID;
1245 call->gtType = TYP_VOID;
1247 // We already have appended the write to 'dest' GT_CALL's args
1248 // So now we just return an empty node (pruning the GT_RET_EXPR)
1253 // Case of inline method returning a struct in one or more registers.
1255 var_types returnType = (var_types)call->gtCall.gtReturnType;
1257 // We won't need a return buffer
1258 asgType = returnType;
1259 src->gtType = genActualType(returnType);
1260 call->gtType = src->gtType;
1262 // If we've changed the type, and it no longer matches a local destination,
1263 // we must use an indirection.
1264 if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1269 // !!! The destination could be on stack. !!!
1270 // This flag will let us choose the correct write barrier.
1271 destFlags = GTF_IND_TGTANYWHERE;
1274 else if (src->OperIsBlk())
1276 asgType = impNormStructType(structHnd);
1277 if (src->gtOper == GT_OBJ)
1279 assert(src->gtObj.gtClass == structHnd);
1282 else if (src->gtOper == GT_INDEX)
1284 asgType = impNormStructType(structHnd);
1285 assert(src->gtIndex.gtStructElemClass == structHnd);
1287 else if (src->gtOper == GT_MKREFANY)
1289 // Since we are assigning the result of a GT_MKREFANY,
1290 // "destAddr" must point to a refany.
1292 GenTreePtr destAddrClone;
1294 impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1296 assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1297 assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1298 GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1299 GenTreePtr ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1300 GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1301 typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1302 GenTreePtr typeSlot =
1303 gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1305 // append the assign of the pointer value
1306 GenTreePtr asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1309 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1313 impAppendTree(asg, curLevel, impCurStmtOffs);
1316 // return the assign of the type value, to be appended
1317 return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1319 else if (src->gtOper == GT_COMMA)
1321 // The second thing is the struct or its address.
1322 assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1325 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1329 impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1332 // Evaluate the second thing using recursion.
1333 return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1335 else if (src->IsLocal())
1337 asgType = src->TypeGet();
1339 else if (asgType == TYP_STRUCT)
1341 asgType = impNormStructType(structHnd);
1342 src->gtType = asgType;
1343 #ifdef LEGACY_BACKEND
1344 if (asgType == TYP_STRUCT)
1346 GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1347 src = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1351 if (dest == nullptr)
1353 // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1354 // if this is a known struct type.
1355 if (asgType == TYP_STRUCT)
1357 dest = gtNewObjNode(structHnd, destAddr);
1358 gtSetObjGcInfo(dest->AsObj());
1359 // Although an obj as a call argument was always assumed to be a globRef
1360 // (which is itself overly conservative), that is not true of the operands
1361 // of a block assignment.
1362 dest->gtFlags &= ~GTF_GLOB_REF;
1363 dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1365 else if (varTypeIsStruct(asgType))
1367 dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1371 dest = gtNewOperNode(GT_IND, asgType, destAddr);
1376 dest->gtType = asgType;
1379 dest->gtFlags |= destFlags;
1380 destFlags = dest->gtFlags;
1382 // return an assignment node, to be appended
1383 GenTree* asgNode = gtNewAssignNode(dest, src);
1384 gtBlockOpInit(asgNode, dest, src, false);
1386 // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1388 if ((destFlags & GTF_DONT_CSE) == 0)
1390 dest->gtFlags &= ~(GTF_DONT_CSE);
1395 /*****************************************************************************
1396 Given a struct value, and the class handle for that structure, return
1397 the expression for the address for that structure value.
1399 willDeref - does the caller guarantee to dereference the pointer.
1402 GenTreePtr Compiler::impGetStructAddr(GenTreePtr structVal,
1403 CORINFO_CLASS_HANDLE structHnd,
1407 assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1409 var_types type = structVal->TypeGet();
1411 genTreeOps oper = structVal->gtOper;
1413 if (oper == GT_OBJ && willDeref)
1415 assert(structVal->gtObj.gtClass == structHnd);
1416 return (structVal->gtObj.Addr());
1418 else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
1420 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1422 impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1424 // The 'return value' is now the temp itself
1426 type = genActualType(lvaTable[tmpNum].TypeGet());
1427 GenTreePtr temp = gtNewLclvNode(tmpNum, type);
1428 temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1431 else if (oper == GT_COMMA)
1433 assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1435 GenTreePtr oldTreeLast = impTreeLast;
1436 structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1437 structVal->gtType = TYP_BYREF;
1439 if (oldTreeLast != impTreeLast)
1441 // Some temp assignment statement was placed on the statement list
1442 // for Op2, but that would be out of order with op1, so we need to
1443 // spill op1 onto the statement list after whatever was last
1444 // before we recursed on Op2 (i.e. before whatever Op2 appended).
1445 impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1446 structVal->gtOp.gtOp1 = gtNewNothingNode();
1452 return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1455 //------------------------------------------------------------------------
1456 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1457 // and optionally determine the GC layout of the struct.
1460 // structHnd - The class handle for the struct type of interest.
1461 // gcLayout - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1462 // into which the gcLayout will be written.
1463 // pNumGCVars - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1464 // which will be set to the number of GC fields in the struct.
1465 // pSimdBaseType - (optional, default nullptr) - if non-null, and the struct is a SIMD
1466 // type, set to the SIMD base type
1469 // The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1470 // The gcLayout will be returned using the pointers provided by the caller, if non-null.
1471 // It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1474 // The caller must set gcLayout to nullptr OR ensure that it is large enough
1475 // (see ICorStaticInfo::getClassGClayout in corinfo.h).
1478 // Normalizing the type involves examining the struct type to determine if it should
1479 // be modified to one that is handled specially by the JIT, possibly being a candidate
1480 // for full enregistration, e.g. TYP_SIMD16.
1482 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1484 unsigned* pNumGCVars,
1485 var_types* pSimdBaseType)
1487 assert(structHnd != NO_CLASS_HANDLE);
1489 const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1490 var_types structType = TYP_STRUCT;
1492 #ifdef FEATURE_CORECLR
1493 const bool hasGCPtrs = (structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0;
1495 // Desktop CLR won't report FLG_CONTAINS_GC_PTR for RefAnyClass - need to check explicitly.
1496 const bool isRefAny = (structHnd == impGetRefAnyClass());
1497 const bool hasGCPtrs = isRefAny || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0);
1501 // Check to see if this is a SIMD type.
1502 if (featureSIMD && !hasGCPtrs)
1504 unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1506 if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1508 unsigned int sizeBytes;
1509 var_types simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1510 if (simdBaseType != TYP_UNKNOWN)
1512 assert(sizeBytes == originalSize);
1513 structType = getSIMDTypeForSize(sizeBytes);
1514 if (pSimdBaseType != nullptr)
1516 *pSimdBaseType = simdBaseType;
1518 #ifdef _TARGET_AMD64_
1519 // Amd64: also indicate that we use floating point registers
1520 compFloatingPointUsed = true;
1525 #endif // FEATURE_SIMD
1527 // Fetch GC layout info if requested
1528 if (gcLayout != nullptr)
1530 unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1532 // Verify that the quick test up above via the class attributes gave a
1533 // safe view of the type's GCness.
1535 // Note there are cases where hasGCPtrs is true but getClassGClayout
1536 // does not report any gc fields.
1537 assert(hasGCPtrs || (numGCVars == 0));
1539 if (pNumGCVars != nullptr)
1541 *pNumGCVars = numGCVars;
1546 // Can't safely ask for number of GC pointers without also
1547 // asking for layout.
1548 assert(pNumGCVars == nullptr);
1554 //****************************************************************************
1555 // Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1556 // it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1558 GenTreePtr Compiler::impNormStructVal(GenTreePtr structVal,
1559 CORINFO_CLASS_HANDLE structHnd,
1561 bool forceNormalization /*=false*/)
1563 assert(forceNormalization || varTypeIsStruct(structVal));
1564 assert(structHnd != NO_CLASS_HANDLE);
1565 var_types structType = structVal->TypeGet();
1566 bool makeTemp = false;
1567 if (structType == TYP_STRUCT)
1569 structType = impNormStructType(structHnd);
1571 bool alreadyNormalized = false;
1572 GenTreeLclVarCommon* structLcl = nullptr;
1574 genTreeOps oper = structVal->OperGet();
1577 // GT_RETURN and GT_MKREFANY don't capture the handle.
1581 alreadyNormalized = true;
1585 structVal->gtCall.gtRetClsHnd = structHnd;
1590 structVal->gtRetExpr.gtRetClsHnd = structHnd;
1595 structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1599 // This will be transformed to an OBJ later.
1600 alreadyNormalized = true;
1601 structVal->gtIndex.gtStructElemClass = structHnd;
1602 structVal->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(structHnd);
1606 // Wrap it in a GT_OBJ.
1607 structVal->gtType = structType;
1608 structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1613 structLcl = structVal->AsLclVarCommon();
1614 // Wrap it in a GT_OBJ.
1615 structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1622 // These should already have the appropriate type.
1623 assert(structVal->gtType == structType);
1624 alreadyNormalized = true;
1628 assert(structVal->gtType == structType);
1629 structVal = gtNewObjNode(structHnd, structVal->gtGetOp1());
1630 alreadyNormalized = true;
1635 assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1637 #endif // FEATURE_SIMD
1641 // The second thing could either be a block node or a GT_SIMD or a GT_COMMA node.
1642 GenTree* blockNode = structVal->gtOp.gtOp2;
1643 assert(blockNode->gtType == structType);
1645 // Is this GT_COMMA(op1, GT_COMMA())?
1646 GenTree* parent = structVal;
1647 if (blockNode->OperGet() == GT_COMMA)
1649 // Find the last node in the comma chain.
1652 assert(blockNode->gtType == structType);
1654 blockNode = blockNode->gtOp.gtOp2;
1655 } while (blockNode->OperGet() == GT_COMMA);
1659 if (blockNode->OperGet() == GT_SIMD)
1661 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1662 alreadyNormalized = true;
1667 assert(blockNode->OperIsBlk());
1669 // Sink the GT_COMMA below the blockNode addr.
1670 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1671 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1673 // In case of a chained GT_COMMA case, we sink the last
1674 // GT_COMMA below the blockNode addr.
1675 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1676 assert(blockNodeAddr->gtType == TYP_BYREF);
1677 GenTree* commaNode = parent;
1678 commaNode->gtType = TYP_BYREF;
1679 commaNode->gtOp.gtOp2 = blockNodeAddr;
1680 blockNode->gtOp.gtOp1 = commaNode;
1681 if (parent == structVal)
1683 structVal = blockNode;
1685 alreadyNormalized = true;
1691 assert(!"Unexpected node in impNormStructVal()");
1694 structVal->gtType = structType;
1695 GenTree* structObj = structVal;
1697 if (!alreadyNormalized || forceNormalization)
1701 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1703 impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1705 // The structVal is now the temp itself
1707 structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1708 // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1709 structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1711 else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1713 // Wrap it in a GT_OBJ
1714 structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1718 if (structLcl != nullptr)
1720 // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1721 // so we don't set GTF_EXCEPT here.
1722 if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1724 structObj->gtFlags &= ~GTF_GLOB_REF;
1729 // In general a OBJ is an indirection and could raise an exception.
1730 structObj->gtFlags |= GTF_EXCEPT;
1735 /******************************************************************************/
1736 // Given a type token, generate code that will evaluate to the correct
1737 // handle representation of that token (type handle, field handle, or method handle)
1739 // For most cases, the handle is determined at compile-time, and the code
1740 // generated is simply an embedded handle.
1742 // Run-time lookup is required if the enclosing method is shared between instantiations
1743 // and the token refers to formal type parameters whose instantiation is not known
1746 GenTreePtr Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1747 BOOL* pRuntimeLookup /* = NULL */,
1748 BOOL mustRestoreHandle /* = FALSE */,
1749 BOOL importParent /* = FALSE */)
1751 assert(!fgGlobalMorph);
1753 CORINFO_GENERICHANDLE_RESULT embedInfo;
1754 info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1758 *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1761 if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1763 switch (embedInfo.handleType)
1765 case CORINFO_HANDLETYPE_CLASS:
1766 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1769 case CORINFO_HANDLETYPE_METHOD:
1770 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1773 case CORINFO_HANDLETYPE_FIELD:
1774 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1775 info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1783 return impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1784 embedInfo.compileTimeHandle);
1787 GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1788 CORINFO_LOOKUP* pLookup,
1789 unsigned handleFlags,
1790 void* compileTimeHandle)
1792 if (!pLookup->lookupKind.needsRuntimeLookup)
1794 // No runtime lookup is required.
1795 // Access is direct or memory-indirect (of a fixed address) reference
1797 CORINFO_GENERIC_HANDLE handle = nullptr;
1798 void* pIndirection = nullptr;
1799 assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1801 if (pLookup->constLookup.accessType == IAT_VALUE)
1803 handle = pLookup->constLookup.handle;
1805 else if (pLookup->constLookup.accessType == IAT_PVALUE)
1807 pIndirection = pLookup->constLookup.addr;
1809 return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1811 else if (compIsForInlining())
1813 // Don't import runtime lookups when inlining
1814 // Inlining has to be aborted in such a case
1815 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1820 // Need to use dictionary-based access which depends on the typeContext
1821 // which is only available at runtime, not at compile-time.
1823 return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1827 #ifdef FEATURE_READYTORUN_COMPILER
1828 GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1829 unsigned handleFlags,
1830 void* compileTimeHandle)
1832 CORINFO_GENERIC_HANDLE handle = nullptr;
1833 void* pIndirection = nullptr;
1834 assert(pLookup->accessType != IAT_PPVALUE);
1836 if (pLookup->accessType == IAT_VALUE)
1838 handle = pLookup->handle;
1840 else if (pLookup->accessType == IAT_PVALUE)
1842 pIndirection = pLookup->addr;
1844 return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1847 GenTreePtr Compiler::impReadyToRunHelperToTree(
1848 CORINFO_RESOLVED_TOKEN* pResolvedToken,
1849 CorInfoHelpFunc helper,
1851 GenTreeArgList* args /* =NULL*/,
1852 CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */)
1854 CORINFO_CONST_LOOKUP lookup;
1855 #if COR_JIT_EE_VERSION > 460
1856 if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1861 info.compCompHnd->getReadyToRunHelper(pResolvedToken, helper, &lookup);
1864 GenTreePtr op1 = gtNewHelperCallNode(helper, type, GTF_EXCEPT, args);
1866 op1->gtCall.setEntryPoint(lookup);
1872 GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1874 GenTreePtr op1 = nullptr;
1876 switch (pCallInfo->kind)
1879 op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1881 #ifdef FEATURE_READYTORUN_COMPILER
1882 if (opts.IsReadyToRun())
1884 op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
1885 op1->gtFptrVal.gtLdftnResolvedToken = new (this, CMK_Unknown) CORINFO_RESOLVED_TOKEN;
1886 *op1->gtFptrVal.gtLdftnResolvedToken = *pResolvedToken;
1890 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1895 case CORINFO_CALL_CODE_POINTER:
1896 if (compIsForInlining())
1898 // Don't import runtime lookups when inlining
1899 // Inlining has to be aborted in such a case
1900 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1904 op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1908 noway_assert(!"unknown call kind");
1915 //------------------------------------------------------------------------
1916 // getRuntimeContextTree: find pointer to context for runtime lookup.
1919 // kind - lookup kind.
1922 // Return GenTree pointer to generic shared context.
1925 // Reports about generic context using.
1927 GenTreePtr Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1929 GenTreePtr ctxTree = nullptr;
1931 // Collectible types requires that for shared generic code, if we use the generic context parameter
1932 // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1933 // context parameter is this that we don't need the eager reporting logic.)
1934 lvaGenericsContextUsed = true;
1936 if (kind == CORINFO_LOOKUP_THISOBJ)
1939 ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1941 // Vtable pointer of this object
1942 ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1943 ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1944 ctxTree->gtFlags |= GTF_IND_INVARIANT;
1948 assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1950 ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1955 /*****************************************************************************/
1956 /* Import a dictionary lookup to access a handle in code shared between
1957 generic instantiations.
1958 The lookup depends on the typeContext which is only available at
1959 runtime, and not at compile-time.
1960 pLookup->token1 and pLookup->token2 specify the handle that is needed.
1963 1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1964 instantiation-specific handle, and the tokens to lookup the handle.
1965 2. pLookup->indirections != CORINFO_USEHELPER :
1966 2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1968 2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1969 If it is non-NULL, it is the handle required. Else, call a helper
1970 to lookup the handle.
1973 GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1974 CORINFO_LOOKUP* pLookup,
1975 void* compileTimeHandle)
1978 // This method can only be called from the importer instance of the Compiler.
1979 // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1980 assert(!compIsForInlining());
1982 GenTreePtr ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1984 #ifdef FEATURE_READYTORUN_COMPILER
1985 if (opts.IsReadyToRun())
1987 return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1988 gtNewArgList(ctxTree), &pLookup->lookupKind);
1992 CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1993 // It's available only via the run-time helper function
1994 if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1996 GenTreeArgList* helperArgs =
1997 gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0,
1998 nullptr, compileTimeHandle));
2000 return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2004 GenTreePtr slotPtrTree = ctxTree;
2006 if (pRuntimeLookup->testForNull)
2008 slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2009 nullptr DEBUGARG("impRuntimeLookup slot"));
2012 // Applied repeated indirections
2013 for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
2017 slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2018 slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2019 slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
2021 if (pRuntimeLookup->offsets[i] != 0)
2024 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
2028 // No null test required
2029 if (!pRuntimeLookup->testForNull)
2031 if (pRuntimeLookup->indirections == 0)
2036 slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2037 slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2039 if (!pRuntimeLookup->testForFixup)
2044 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2046 GenTreePtr op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2047 nullptr DEBUGARG("impRuntimeLookup test"));
2048 op1 = impImplicitIorI4Cast(op1, TYP_INT); // downcast the pointer to a TYP_INT on 64-bit targets
2050 // Use a GT_AND to check for the lowest bit and indirect if it is set
2051 GenTreePtr testTree = gtNewOperNode(GT_AND, TYP_INT, op1, gtNewIconNode(1));
2052 GenTreePtr relop = gtNewOperNode(GT_EQ, TYP_INT, testTree, gtNewIconNode(0));
2053 relop->gtFlags |= GTF_RELOP_QMARK;
2055 op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2056 nullptr DEBUGARG("impRuntimeLookup indir"));
2057 op1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, op1, gtNewIconNode(-1, TYP_I_IMPL)); // subtract 1 from the pointer
2058 GenTreePtr indirTree = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
2059 GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, slotPtrTree, indirTree);
2061 GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2063 unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark0"));
2064 impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2065 return gtNewLclvNode(tmp, TYP_I_IMPL);
2068 assert(pRuntimeLookup->indirections != 0);
2070 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2072 // Extract the handle
2073 GenTreePtr handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2074 handle->gtFlags |= GTF_IND_NONFAULTING;
2076 GenTreePtr handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2077 nullptr DEBUGARG("impRuntimeLookup typehandle"));
2080 GenTreeArgList* helperArgs =
2081 gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0, nullptr,
2082 compileTimeHandle));
2083 GenTreePtr helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2085 // Check for null and possibly call helper
2086 GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2087 relop->gtFlags |= GTF_RELOP_QMARK;
2089 GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2090 gtNewNothingNode(), // do nothing if nonnull
2093 GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2096 if (handleCopy->IsLocal())
2098 tmp = handleCopy->gtLclVarCommon.gtLclNum;
2102 tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2105 impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2106 return gtNewLclvNode(tmp, TYP_I_IMPL);
2109 /******************************************************************************
2110 * Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2111 * If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2112 * else, grab a new temp.
2113 * For structs (which can be pushed on the stack using obj, etc),
2114 * special handling is needed
2117 struct RecursiveGuard
2122 m_pAddress = nullptr;
2129 *m_pAddress = false;
2133 void Init(bool* pAddress, bool bInitialize)
2135 assert(pAddress && *pAddress == false && "Recursive guard violation");
2136 m_pAddress = pAddress;
2148 bool Compiler::impSpillStackEntry(unsigned level,
2152 bool bAssertOnRecursion,
2159 RecursiveGuard guard;
2160 guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2163 GenTreePtr tree = verCurrentState.esStack[level].val;
2165 /* Allocate a temp if we haven't been asked to use a particular one */
2167 if (tiVerificationNeeded)
2169 // Ignore bad temp requests (they will happen with bad code and will be
2170 // catched when importing the destblock)
2171 if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2178 if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2184 if (tnum == BAD_VAR_NUM)
2186 tnum = lvaGrabTemp(true DEBUGARG(reason));
2188 else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2190 // if verification is needed and tnum's type is incompatible with
2191 // type on that stack, we grab a new temp. This is safe since
2192 // we will throw a verification exception in the dest block.
2194 var_types valTyp = tree->TypeGet();
2195 var_types dstTyp = lvaTable[tnum].TypeGet();
2197 // if the two types are different, we return. This will only happen with bad code and will
2198 // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2199 if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2201 #ifndef _TARGET_64BIT_
2202 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2203 #endif // !_TARGET_64BIT_
2204 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2206 if (verNeedsVerification())
2213 /* Assign the spilled entry to the temp */
2214 impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2216 // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2217 var_types type = genActualType(lvaTable[tnum].TypeGet());
2218 GenTreePtr temp = gtNewLclvNode(tnum, type);
2219 verCurrentState.esStack[level].val = temp;
2224 /*****************************************************************************
2226 * Ensure that the stack has only spilled values
2229 void Compiler::impSpillStackEnsure(bool spillLeaves)
2231 assert(!spillLeaves || opts.compDbgCode);
2233 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2235 GenTreePtr tree = verCurrentState.esStack[level].val;
2237 if (!spillLeaves && tree->OperIsLeaf())
2242 // Temps introduced by the importer itself don't need to be spilled
2244 bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2251 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2255 void Compiler::impSpillEvalStack()
2257 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2259 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2263 /*****************************************************************************
2265 * If the stack contains any trees with side effects in them, assign those
2266 * trees to temps and append the assignments to the statement list.
2267 * On return the stack is guaranteed to be empty.
2270 inline void Compiler::impEvalSideEffects()
2272 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2273 verCurrentState.esStackDepth = 0;
2276 /*****************************************************************************
2278 * If the stack contains any trees with side effects in them, assign those
2279 * trees to temps and replace them on the stack with refs to their temps.
2280 * [0..chkLevel) is the portion of the stack which will be checked and spilled.
2283 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2285 assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2287 /* Before we make any appends to the tree list we must spill the
2288 * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2290 impSpillSpecialSideEff();
2292 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2294 chkLevel = verCurrentState.esStackDepth;
2297 assert(chkLevel <= verCurrentState.esStackDepth);
2299 unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2301 for (unsigned i = 0; i < chkLevel; i++)
2303 GenTreePtr tree = verCurrentState.esStack[i].val;
2305 GenTreePtr lclVarTree;
2307 if ((tree->gtFlags & spillFlags) != 0 ||
2308 (spillGlobEffects && // Only consider the following when spillGlobEffects == TRUE
2309 !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2310 gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2311 // lvAddrTaken flag.
2313 impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2318 /*****************************************************************************
2320 * If the stack contains any trees with special side effects in them, assign
2321 * those trees to temps and replace them on the stack with refs to their temps.
2324 inline void Compiler::impSpillSpecialSideEff()
2326 // Only exception objects need to be carefully handled
2328 if (!compCurBB->bbCatchTyp)
2333 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2335 GenTreePtr tree = verCurrentState.esStack[level].val;
2336 // Make sure if we have an exception object in the sub tree we spill ourselves.
2337 if (gtHasCatchArg(tree))
2339 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2344 /*****************************************************************************
2346 * Spill all stack references to value classes (TYP_STRUCT nodes)
2349 void Compiler::impSpillValueClasses()
2351 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2353 GenTreePtr tree = verCurrentState.esStack[level].val;
2355 if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2357 // Tree walk was aborted, which means that we found a
2358 // value class on the stack. Need to spill that
2361 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2366 /*****************************************************************************
2368 * Callback that checks if a tree node is TYP_STRUCT
2371 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr* pTree, fgWalkData* data)
2373 fgWalkResult walkResult = WALK_CONTINUE;
2375 if ((*pTree)->gtType == TYP_STRUCT)
2377 // Abort the walk and indicate that we found a value class
2379 walkResult = WALK_ABORT;
2385 /*****************************************************************************
2387 * If the stack contains any trees with references to local #lclNum, assign
2388 * those trees to temps and replace their place on the stack with refs to
2392 void Compiler::impSpillLclRefs(ssize_t lclNum)
2394 /* Before we make any appends to the tree list we must spill the
2395 * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2397 impSpillSpecialSideEff();
2399 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2401 GenTreePtr tree = verCurrentState.esStack[level].val;
2403 /* If the tree may throw an exception, and the block has a handler,
2404 then we need to spill assignments to the local if the local is
2405 live on entry to the handler.
2406 Just spill 'em all without considering the liveness */
2408 bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2410 /* Skip the tree if it doesn't have an affected reference,
2411 unless xcptnCaught */
2413 if (xcptnCaught || gtHasRef(tree, lclNum, false))
2415 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2420 /*****************************************************************************
2422 * Push catch arg onto the stack.
2423 * If there are jumps to the beginning of the handler, insert basic block
2424 * and spill catch arg to a temp. Update the handler block if necessary.
2426 * Returns the basic block of the actual handler.
2429 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd)
2431 // Do not inject the basic block twice on reimport. This should be
2432 // hit only under JIT stress. See if the block is the one we injected.
2433 // Note that EH canonicalization can inject internal blocks here. We might
2434 // be able to re-use such a block (but we don't, right now).
2435 if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2436 (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2438 GenTreePtr tree = hndBlk->bbTreeList;
2440 if (tree != nullptr && tree->gtOper == GT_STMT)
2442 tree = tree->gtStmt.gtStmtExpr;
2443 assert(tree != nullptr);
2445 if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2446 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2448 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2450 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2452 return hndBlk->bbNext;
2456 // If we get here, it must have been some other kind of internal block. It's possible that
2457 // someone prepended something to our injected block, but that's unlikely.
2460 /* Push the exception address value on the stack */
2461 GenTreePtr arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2463 /* Mark the node as having a side-effect - i.e. cannot be
2464 * moved around since it is tied to a fixed location (EAX) */
2465 arg->gtFlags |= GTF_ORDER_SIDEEFF;
2467 /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2468 if (hndBlk->bbRefs > 1 || compStressCompile(STRESS_CATCH_ARG, 5))
2470 if (hndBlk->bbRefs == 1)
2475 /* Create extra basic block for the spill */
2476 BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2477 newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2478 newBlk->setBBWeight(hndBlk->bbWeight);
2479 newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2481 /* Account for the new link we are about to create */
2484 /* Spill into a temp */
2485 unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2486 lvaTable[tempNum].lvType = TYP_REF;
2487 arg = gtNewTempAssign(tempNum, arg);
2489 hndBlk->bbStkTempsIn = tempNum;
2491 /* Report the debug info. impImportBlockCode won't treat
2492 * the actual handler as exception block and thus won't do it for us. */
2493 if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2495 impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2496 arg = gtNewStmt(arg, impCurStmtOffs);
2499 fgInsertStmtAtEnd(newBlk, arg);
2501 arg = gtNewLclvNode(tempNum, TYP_REF);
2504 impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2509 /*****************************************************************************
2511 * Given a tree, clone it. *pClone is set to the cloned tree.
2512 * Returns the original tree if the cloning was easy,
2513 * else returns the temp to which the tree had to be spilled to.
2514 * If the tree has side-effects, it will be spilled to a temp.
2517 GenTreePtr Compiler::impCloneExpr(GenTreePtr tree,
2519 CORINFO_CLASS_HANDLE structHnd,
2521 GenTreePtr* pAfterStmt DEBUGARG(const char* reason))
2523 if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2525 GenTreePtr clone = gtClone(tree, true);
2534 /* Store the operand in a temp and return the temp */
2536 unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2538 // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2539 // return a struct type. It also may modify the struct type to a more
2540 // specialized type (e.g. a SIMD type). So we will get the type from
2541 // the lclVar AFTER calling impAssignTempGen().
2543 impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2544 var_types type = genActualType(lvaTable[temp].TypeGet());
2546 *pClone = gtNewLclvNode(temp, type);
2547 return gtNewLclvNode(temp, type);
2550 /*****************************************************************************
2551 * Remember the IL offset (including stack-empty info) for the trees we will
2555 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2557 if (compIsForInlining())
2559 GenTreePtr callStmt = impInlineInfo->iciStmt;
2560 assert(callStmt->gtOper == GT_STMT);
2561 impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2565 assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2566 IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2567 impCurStmtOffs = offs | stkBit;
2571 /*****************************************************************************
2572 * Returns current IL offset with stack-empty and call-instruction info incorporated
2574 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2576 if (compIsForInlining())
2578 return BAD_IL_OFFSET;
2582 assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2583 IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2584 IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2585 return offs | stkBit | callInstructionBit;
2589 /*****************************************************************************
2591 * Remember the instr offset for the statements
2593 * When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2594 * impCurOpcOffs, if the append was done because of a partial stack spill,
2595 * as some of the trees corresponding to code up to impCurOpcOffs might
2596 * still be sitting on the stack.
2597 * So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2598 * This should be called when an opcode finally/explicitly causes
2599 * impAppendTree(tree) to be called (as opposed to being called because of
2600 * a spill caused by the opcode)
2605 void Compiler::impNoteLastILoffs()
2607 if (impLastILoffsStmt == nullptr)
2609 // We should have added a statement for the current basic block
2610 // Is this assert correct ?
2612 assert(impTreeLast);
2613 assert(impTreeLast->gtOper == GT_STMT);
2615 impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2619 impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2620 impLastILoffsStmt = nullptr;
2626 /*****************************************************************************
2627 * We don't create any GenTree (excluding spills) for a branch.
2628 * For debugging info, we need a placeholder so that we can note
2629 * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2632 void Compiler::impNoteBranchOffs()
2634 if (opts.compDbgCode)
2636 impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2640 /*****************************************************************************
2641 * Locate the next stmt boundary for which we need to record info.
2642 * We will have to spill the stack at such boundaries if it is not
2644 * Returns the next stmt boundary (after the start of the block)
2647 unsigned Compiler::impInitBlockLineInfo()
2649 /* Assume the block does not correspond with any IL offset. This prevents
2650 us from reporting extra offsets. Extra mappings can cause confusing
2651 stepping, especially if the extra mapping is a jump-target, and the
2652 debugger does not ignore extra mappings, but instead rewinds to the
2653 nearest known offset */
2655 impCurStmtOffsSet(BAD_IL_OFFSET);
2657 if (compIsForInlining())
2662 IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2664 if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2666 impCurStmtOffsSet(blockOffs);
2669 if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2671 impCurStmtOffsSet(blockOffs);
2674 /* Always report IL offset 0 or some tests get confused.
2675 Probably a good idea anyways */
2679 impCurStmtOffsSet(blockOffs);
2682 if (!info.compStmtOffsetsCount)
2687 /* Find the lowest explicit stmt boundary within the block */
2689 /* Start looking at an entry that is based on our instr offset */
2691 unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2693 if (index >= info.compStmtOffsetsCount)
2695 index = info.compStmtOffsetsCount - 1;
2698 /* If we've guessed too far, back up */
2700 while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2705 /* If we guessed short, advance ahead */
2707 while (info.compStmtOffsets[index] < blockOffs)
2711 if (index == info.compStmtOffsetsCount)
2713 return info.compStmtOffsetsCount;
2717 assert(index < info.compStmtOffsetsCount);
2719 if (info.compStmtOffsets[index] == blockOffs)
2721 /* There is an explicit boundary for the start of this basic block.
2722 So we will start with bbCodeOffs. Else we will wait until we
2723 get to the next explicit boundary */
2725 impCurStmtOffsSet(blockOffs);
2733 /*****************************************************************************/
2735 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2749 /*****************************************************************************/
2751 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2768 /*****************************************************************************/
2770 // One might think it is worth caching these values, but results indicate
2772 // In addition, caching them causes SuperPMI to be unable to completely
2773 // encapsulate an individual method context.
2774 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2776 CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2777 assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2781 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2783 CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2784 assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2785 return typeHandleClass;
2788 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2790 CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2791 assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2792 return argIteratorClass;
2795 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2797 CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2798 assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2802 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2804 CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2805 assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2809 /*****************************************************************************
2810 * "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2811 * set its type to TYP_BYREF when we create it. We know if it can be
2812 * changed to TYP_I_IMPL only at the point where we use it
2816 void Compiler::impBashVarAddrsToI(GenTreePtr tree1, GenTreePtr tree2)
2818 if (tree1->IsVarAddr())
2820 tree1->gtType = TYP_I_IMPL;
2823 if (tree2 && tree2->IsVarAddr())
2825 tree2->gtType = TYP_I_IMPL;
2829 /*****************************************************************************
2830 * TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2831 * to make that an explicit cast in our trees, so any implicit casts that
2832 * exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2833 * turned into explicit casts here.
2834 * We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2837 GenTreePtr Compiler::impImplicitIorI4Cast(GenTreePtr tree, var_types dstTyp)
2839 var_types currType = genActualType(tree->gtType);
2840 var_types wantedType = genActualType(dstTyp);
2842 if (wantedType != currType)
2844 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2845 if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2847 if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2849 tree->gtType = TYP_I_IMPL;
2852 #ifdef _TARGET_64BIT_
2853 else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2855 // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2856 tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
2858 else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2860 // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2861 tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
2863 #endif // _TARGET_64BIT_
2869 /*****************************************************************************
2870 * TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2871 * but we want to make that an explicit cast in our trees, so any implicit casts
2872 * that exist in the IL are turned into explicit casts here.
2875 GenTreePtr Compiler::impImplicitR4orR8Cast(GenTreePtr tree, var_types dstTyp)
2877 #ifndef LEGACY_BACKEND
2878 if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2880 tree = gtNewCastNode(dstTyp, tree, dstTyp);
2882 #endif // !LEGACY_BACKEND
2887 //------------------------------------------------------------------------
2888 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2889 // with a GT_COPYBLK node.
2892 // sig - The InitializeArray signature.
2895 // A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2896 // nullptr otherwise.
2899 // The function recognizes the following IL pattern:
2900 // ldc <length> or a list of ldc <lower bound>/<length>
2903 // ldtoken <field handle>
2904 // call InitializeArray
2905 // The lower bounds need not be constant except when the array rank is 1.
2906 // The function recognizes all kinds of arrays thus enabling a small runtime
2907 // such as CoreRT to skip providing an implementation for InitializeArray.
2909 GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2911 assert(sig->numArgs == 2);
2913 GenTreePtr fieldTokenNode = impStackTop(0).val;
2914 GenTreePtr arrayLocalNode = impStackTop(1).val;
2917 // Verify that the field token is known and valid. Note that It's also
2918 // possible for the token to come from reflection, in which case we cannot do
2919 // the optimization and must therefore revert to calling the helper. You can
2920 // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2923 // Check to see if the ldtoken helper call is what we see here.
2924 if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2925 (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2930 // Strip helper call away
2931 fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2933 if (fieldTokenNode->gtOper == GT_IND)
2935 fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2938 // Check for constant
2939 if (fieldTokenNode->gtOper != GT_CNS_INT)
2944 CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2945 if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2951 // We need to get the number of elements in the array and the size of each element.
2952 // We verify that the newarr statement is exactly what we expect it to be.
2953 // If it's not then we just return NULL and we don't optimize this call
2957 // It is possible the we don't have any statements in the block yet
2959 if (impTreeLast->gtOper != GT_STMT)
2961 assert(impTreeLast->gtOper == GT_BEG_STMTS);
2966 // We start by looking at the last statement, making sure it's an assignment, and
2967 // that the target of the assignment is the array passed to InitializeArray.
2969 GenTreePtr arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
2970 if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
2971 (arrayLocalNode->gtOper != GT_LCL_VAR) ||
2972 (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
2978 // Make sure that the object being assigned is a helper call.
2981 GenTreePtr newArrayCall = arrayAssignment->gtOp.gtOp2;
2982 if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
2988 // Verify that it is one of the new array helpers.
2991 bool isMDArray = false;
2993 if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
2994 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
2995 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
2996 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
2997 #ifdef FEATURE_READYTORUN_COMPILER
2998 && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
3002 #if COR_JIT_EE_VERSION > 460
3003 if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3012 CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3015 // Make sure we found a compile time handle to the array
3024 S_UINT32 numElements;
3028 rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3035 GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3036 assert(tokenArg != nullptr);
3037 GenTreeArgList* numArgsArg = tokenArg->Rest();
3038 assert(numArgsArg != nullptr);
3039 GenTreeArgList* argsArg = numArgsArg->Rest();
3040 assert(argsArg != nullptr);
3043 // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3044 // so at least one length must be present and the rank can't exceed 32 so there can
3045 // be at most 64 arguments - 32 lengths and 32 lower bounds.
3048 if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3049 (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3054 unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3055 bool lowerBoundsSpecified;
3057 if (numArgs == rank * 2)
3059 lowerBoundsSpecified = true;
3061 else if (numArgs == rank)
3063 lowerBoundsSpecified = false;
3066 // If the rank is 1 and a lower bound isn't specified then the runtime creates
3067 // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3068 // we get a SDArray as well, see the for loop below.
3082 // The rank is known to be at least 1 so we can start with numElements being 1
3083 // to avoid the need to special case the first dimension.
3086 numElements = S_UINT32(1);
3090 static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3092 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3093 IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3096 static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3098 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3099 (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3100 IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3103 static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3105 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3106 (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3109 static bool IsComma(GenTree* tree)
3111 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3115 unsigned argIndex = 0;
3118 for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3120 if (lowerBoundsSpecified)
3123 // In general lower bounds can be ignored because they're not needed to
3124 // calculate the total number of elements. But for single dimensional arrays
3125 // we need to know if the lower bound is 0 because in this case the runtime
3126 // creates a SDArray and this affects the way the array data offset is calculated.
3131 GenTree* lowerBoundAssign = comma->gtGetOp1();
3132 assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3133 GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3135 if (lowerBoundNode->IsIntegralConst(0))
3141 comma = comma->gtGetOp2();
3145 GenTree* lengthNodeAssign = comma->gtGetOp1();
3146 assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3147 GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3149 if (!lengthNode->IsCnsIntOrI())
3154 numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3158 assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3160 if (argIndex != numArgs)
3168 // Make sure there are exactly two arguments: the array class and
3169 // the number of elements.
3172 GenTreePtr arrayLengthNode;
3174 GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3175 #ifdef FEATURE_READYTORUN_COMPILER
3176 if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3178 // Array length is 1st argument for readytorun helper
3179 arrayLengthNode = args->Current();
3184 // Array length is 2nd argument for regular helper
3185 arrayLengthNode = args->Rest()->Current();
3189 // Make sure that the number of elements look valid.
3191 if (arrayLengthNode->gtOper != GT_CNS_INT)
3196 numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3198 if (!info.compCompHnd->isSDArray(arrayClsHnd))
3204 CORINFO_CLASS_HANDLE elemClsHnd;
3205 var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3208 // Note that genTypeSize will return zero for non primitive types, which is exactly
3209 // what we want (size will then be 0, and we will catch this in the conditional below).
3210 // Note that we don't expect this to fail for valid binaries, so we assert in the
3211 // non-verification case (the verification case should not assert but rather correctly
3212 // handle bad binaries). This assert is not guarding any specific invariant, but rather
3213 // saying that we don't expect this to happen, and if it is hit, we need to investigate
3217 S_UINT32 elemSize(genTypeSize(elementType));
3218 S_UINT32 size = elemSize * S_UINT32(numElements);
3220 if (size.IsOverflow())
3225 if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3227 assert(verNeedsVerification());
3231 void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3238 // At this point we are ready to commit to implementing the InitializeArray
3239 // intrinsic using a struct assignment. Pop the arguments from the stack and
3240 // return the struct assignment node.
3246 const unsigned blkSize = size.Value();
3251 unsigned dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3253 dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3257 dst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewIndexRef(elementType, arrayLocalNode, gtNewIconNode(0)));
3259 GenTreePtr blk = gtNewBlockVal(dst, blkSize);
3260 GenTreePtr srcAddr = gtNewIconHandleNode((size_t)initData, GTF_ICON_STATIC_HDL);
3261 GenTreePtr src = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
3263 return gtNewBlkOpNode(blk, // dst
3270 /*****************************************************************************/
3271 // Returns the GenTree that should be used to do the intrinsic instead of the call.
3272 // Returns NULL if an intrinsic cannot be used
3274 GenTreePtr Compiler::impIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
3275 CORINFO_METHOD_HANDLE method,
3276 CORINFO_SIG_INFO* sig,
3280 CorInfoIntrinsics* pIntrinsicID)
3282 bool mustExpand = false;
3283 #if COR_JIT_EE_VERSION > 460
3284 CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3286 CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method);
3288 *pIntrinsicID = intrinsicID;
3290 #ifndef _TARGET_ARM_
3291 genTreeOps interlockedOperator;
3294 if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3296 // must be done regardless of DbgCode and MinOpts
3297 return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3299 #ifdef _TARGET_64BIT_
3300 if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3302 // must be done regardless of DbgCode and MinOpts
3303 return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3306 assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3309 GenTreePtr retNode = nullptr;
3312 // We disable the inlining of instrinsics for MinOpts.
3314 if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3316 *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3320 // Currently we don't have CORINFO_INTRINSIC_Exp because it does not
3321 // seem to work properly for Infinity values, we don't do
3322 // CORINFO_INTRINSIC_Pow because it needs a Helper which we currently don't have
3324 var_types callType = JITtype2varType(sig->retType);
3326 /* First do the intrinsics which are always smaller than a call */
3328 switch (intrinsicID)
3330 GenTreePtr op1, op2;
3332 case CORINFO_INTRINSIC_Sin:
3333 case CORINFO_INTRINSIC_Sqrt:
3334 case CORINFO_INTRINSIC_Abs:
3335 case CORINFO_INTRINSIC_Cos:
3336 case CORINFO_INTRINSIC_Round:
3337 case CORINFO_INTRINSIC_Cosh:
3338 case CORINFO_INTRINSIC_Sinh:
3339 case CORINFO_INTRINSIC_Tan:
3340 case CORINFO_INTRINSIC_Tanh:
3341 case CORINFO_INTRINSIC_Asin:
3342 case CORINFO_INTRINSIC_Acos:
3343 case CORINFO_INTRINSIC_Atan:
3344 case CORINFO_INTRINSIC_Atan2:
3345 case CORINFO_INTRINSIC_Log10:
3346 case CORINFO_INTRINSIC_Pow:
3347 case CORINFO_INTRINSIC_Exp:
3348 case CORINFO_INTRINSIC_Ceiling:
3349 case CORINFO_INTRINSIC_Floor:
3351 // These are math intrinsics
3353 assert(callType != TYP_STRUCT);
3357 #if defined(LEGACY_BACKEND)
3358 if (IsTargetIntrinsic(intrinsicID))
3359 #elif !defined(_TARGET_X86_)
3360 // Intrinsics that are not implemented directly by target instructions will
3361 // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3362 // don't do this optimization, because
3363 // a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3364 // b) It will be non-trivial task or too late to re-materialize a surviving
3365 // tail prefixed GT_INTRINSIC as tail call in rationalizer.
3366 if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3368 // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3369 // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3370 // code generation for certain EH constructs.
3371 if (!IsIntrinsicImplementedByUserCall(intrinsicID))
3374 switch (sig->numArgs)
3377 op1 = impPopStack().val;
3379 #if FEATURE_X87_DOUBLES
3381 // X87 stack doesn't differentiate between float/double
3382 // so it doesn't need a cast, but everybody else does
3383 // Just double check it is at least a FP type
3384 noway_assert(varTypeIsFloating(op1));
3386 #else // FEATURE_X87_DOUBLES
3388 if (op1->TypeGet() != callType)
3390 op1 = gtNewCastNode(callType, op1, callType);
3393 #endif // FEATURE_X87_DOUBLES
3395 op1 = new (this, GT_INTRINSIC)
3396 GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3400 op2 = impPopStack().val;
3401 op1 = impPopStack().val;
3403 #if FEATURE_X87_DOUBLES
3405 // X87 stack doesn't differentiate between float/double
3406 // so it doesn't need a cast, but everybody else does
3407 // Just double check it is at least a FP type
3408 noway_assert(varTypeIsFloating(op2));
3409 noway_assert(varTypeIsFloating(op1));
3411 #else // FEATURE_X87_DOUBLES
3413 if (op2->TypeGet() != callType)
3415 op2 = gtNewCastNode(callType, op2, callType);
3417 if (op1->TypeGet() != callType)
3419 op1 = gtNewCastNode(callType, op1, callType);
3422 #endif // FEATURE_X87_DOUBLES
3424 op1 = new (this, GT_INTRINSIC)
3425 GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
3429 NO_WAY("Unsupported number of args for Math Instrinsic");
3432 #ifndef LEGACY_BACKEND
3433 if (IsIntrinsicImplementedByUserCall(intrinsicID))
3435 op1->gtFlags |= GTF_CALL;
3443 #ifdef _TARGET_XARCH_
3444 // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3445 case CORINFO_INTRINSIC_InterlockedAdd32:
3446 interlockedOperator = GT_LOCKADD;
3447 goto InterlockedBinOpCommon;
3448 case CORINFO_INTRINSIC_InterlockedXAdd32:
3449 interlockedOperator = GT_XADD;
3450 goto InterlockedBinOpCommon;
3451 case CORINFO_INTRINSIC_InterlockedXchg32:
3452 interlockedOperator = GT_XCHG;
3453 goto InterlockedBinOpCommon;
3455 #ifdef _TARGET_AMD64_
3456 case CORINFO_INTRINSIC_InterlockedAdd64:
3457 interlockedOperator = GT_LOCKADD;
3458 goto InterlockedBinOpCommon;
3459 case CORINFO_INTRINSIC_InterlockedXAdd64:
3460 interlockedOperator = GT_XADD;
3461 goto InterlockedBinOpCommon;
3462 case CORINFO_INTRINSIC_InterlockedXchg64:
3463 interlockedOperator = GT_XCHG;
3464 goto InterlockedBinOpCommon;
3465 #endif // _TARGET_AMD64_
3467 InterlockedBinOpCommon:
3468 assert(callType != TYP_STRUCT);
3469 assert(sig->numArgs == 2);
3471 op2 = impPopStack().val;
3472 op1 = impPopStack().val;
3478 // field (for example)
3480 // In the case where the first argument is the address of a local, we might
3481 // want to make this *not* make the var address-taken -- but atomic instructions
3482 // on a local are probably pretty useless anyway, so we probably don't care.
3484 op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3485 op1->gtFlags |= GTF_GLOB_EFFECT;
3488 #endif // _TARGET_XARCH_
3490 case CORINFO_INTRINSIC_MemoryBarrier:
3492 assert(sig->numArgs == 0);
3494 op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3495 op1->gtFlags |= GTF_GLOB_EFFECT;
3499 #ifdef _TARGET_XARCH_
3500 // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3501 case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3502 #ifdef _TARGET_AMD64_
3503 case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3506 assert(callType != TYP_STRUCT);
3507 assert(sig->numArgs == 3);
3510 op3 = impPopStack().val; // comparand
3511 op2 = impPopStack().val; // value
3512 op1 = impPopStack().val; // location
3514 GenTreePtr node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3516 node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3522 case CORINFO_INTRINSIC_StringLength:
3523 op1 = impPopStack().val;
3524 if (!opts.MinOpts() && !opts.compDbgCode)
3526 GenTreeArrLen* arrLen =
3527 new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3532 /* Create the expression "*(str_addr + stringLengthOffset)" */
3533 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3534 gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3535 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3540 case CORINFO_INTRINSIC_StringGetChar:
3541 op2 = impPopStack().val;
3542 op1 = impPopStack().val;
3543 op1 = gtNewIndexRef(TYP_CHAR, op1, op2);
3544 op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3548 case CORINFO_INTRINSIC_InitializeArray:
3549 retNode = impInitializeArrayIntrinsic(sig);
3552 case CORINFO_INTRINSIC_Array_Address:
3553 case CORINFO_INTRINSIC_Array_Get:
3554 case CORINFO_INTRINSIC_Array_Set:
3555 retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3558 case CORINFO_INTRINSIC_GetTypeFromHandle:
3559 op1 = impStackTop(0).val;
3560 if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3561 gtIsTypeHandleToRuntimeTypeHelper(op1))
3563 op1 = impPopStack().val;
3564 // Change call to return RuntimeType directly.
3565 op1->gtType = TYP_REF;
3568 // Call the regular function.
3571 case CORINFO_INTRINSIC_RTH_GetValueInternal:
3572 op1 = impStackTop(0).val;
3573 if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3574 gtIsTypeHandleToRuntimeTypeHelper(op1))
3577 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3580 // TreeToGetNativeTypeHandle
3582 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3585 op1 = impPopStack().val;
3587 // Get native TypeHandle argument to old helper
3588 op1 = op1->gtCall.gtCallArgs;
3589 assert(op1->OperIsList());
3590 assert(op1->gtOp.gtOp2 == nullptr);
3591 op1 = op1->gtOp.gtOp1;
3594 // Call the regular function.
3597 #ifndef LEGACY_BACKEND
3598 case CORINFO_INTRINSIC_Object_GetType:
3600 op1 = impPopStack().val;
3601 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3603 // Set the CALL flag to indicate that the operator is implemented by a call.
3604 // Set also the EXCEPTION flag because the native implementation of
3605 // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3606 op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3612 /* Unknown intrinsic */
3618 if (retNode == nullptr)
3620 NO_WAY("JIT must expand the intrinsic!");
3627 /*****************************************************************************/
3629 GenTreePtr Compiler::impArrayAccessIntrinsic(
3630 CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
3632 /* If we are generating SMALL_CODE, we don't want to use intrinsics for
3633 the following, as it generates fatter code.
3636 if (compCodeOpt() == SMALL_CODE)
3641 /* These intrinsics generate fatter (but faster) code and are only
3642 done if we don't need SMALL_CODE */
3644 unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
3646 // The rank 1 case is special because it has to handle two array formats
3647 // we will simply not do that case
3648 if (rank > GT_ARR_MAX_RANK || rank <= 1)
3653 CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
3654 var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
3656 // For the ref case, we will only be able to inline if the types match
3657 // (verifier checks for this, we don't care for the nonverified case and the
3658 // type is final (so we don't need to do the cast)
3659 if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
3661 // Get the call site signature
3662 CORINFO_SIG_INFO LocalSig;
3663 eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
3664 assert(LocalSig.hasThis());
3666 CORINFO_CLASS_HANDLE actualElemClsHnd;
3668 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3670 // Fetch the last argument, the one that indicates the type we are setting.
3671 CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
3672 for (unsigned r = 0; r < rank; r++)
3674 argType = info.compCompHnd->getArgNext(argType);
3677 typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
3678 actualElemClsHnd = argInfo.GetClassHandle();
3682 assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
3684 // Fetch the return type
3685 typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
3686 assert(retInfo.IsByRef());
3687 actualElemClsHnd = retInfo.GetClassHandle();
3690 // if it's not final, we can't do the optimization
3691 if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
3697 unsigned arrayElemSize;
3698 if (elemType == TYP_STRUCT)
3700 assert(arrElemClsHnd);
3702 arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
3706 arrayElemSize = genTypeSize(elemType);
3709 if ((unsigned char)arrayElemSize != arrayElemSize)
3711 // arrayElemSize would be truncated as an unsigned char.
3712 // This means the array element is too large. Don't do the optimization.
3716 GenTreePtr val = nullptr;
3718 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3720 // Assignment of a struct is more work, and there are more gets than sets.
3721 if (elemType == TYP_STRUCT)
3726 val = impPopStack().val;
3727 assert(genActualType(elemType) == genActualType(val->gtType) ||
3728 (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
3729 (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
3730 (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
3733 noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
3735 GenTreePtr inds[GT_ARR_MAX_RANK];
3736 for (unsigned k = rank; k > 0; k--)
3738 inds[k - 1] = impPopStack().val;
3741 GenTreePtr arr = impPopStack().val;
3742 assert(arr->gtType == TYP_REF);
3744 GenTreePtr arrElem =
3745 new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
3746 static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
3748 if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
3750 arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
3753 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3755 assert(val != nullptr);
3756 return gtNewAssignNode(arrElem, val);
3764 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
3768 // do some basic checks first
3769 if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
3774 if (verCurrentState.esStackDepth > 0)
3776 // merge stack types
3777 StackEntry* parentStack = block->bbStackOnEntry();
3778 StackEntry* childStack = verCurrentState.esStack;
3780 for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
3782 if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
3789 // merge initialization status of this ptr
3791 if (verTrackObjCtorInitState)
3793 // If we're tracking the CtorInitState, then it must not be unknown in the current state.
3794 assert(verCurrentState.thisInitialized != TIS_Bottom);
3796 // If the successor block's thisInit state is unknown, copy it from the current state.
3797 if (block->bbThisOnEntry() == TIS_Bottom)
3800 verSetThisInit(block, verCurrentState.thisInitialized);
3802 else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
3804 if (block->bbThisOnEntry() != TIS_Top)
3807 verSetThisInit(block, TIS_Top);
3809 if (block->bbFlags & BBF_FAILED_VERIFICATION)
3811 // The block is bad. Control can flow through the block to any handler that catches the
3812 // verification exception, but the importer ignores bad blocks and therefore won't model
3813 // this flow in the normal way. To complete the merge into the bad block, the new state
3814 // needs to be manually pushed to the handlers that may be reached after the verification
3815 // exception occurs.
3817 // Usually, the new state was already propagated to the relevant handlers while processing
3818 // the predecessors of the bad block. The exception is when the bad block is at the start
3819 // of a try region, meaning it is protected by additional handlers that do not protect its
3822 if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
3824 // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
3825 // recursive calls back into this code path (if successors of the current bad block are
3826 // also bad blocks).
3828 ThisInitState origTIS = verCurrentState.thisInitialized;
3829 verCurrentState.thisInitialized = TIS_Top;
3830 impVerifyEHBlock(block, true);
3831 verCurrentState.thisInitialized = origTIS;
3839 assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
3845 /*****************************************************************************
3846 * 'logMsg' is true if a log message needs to be logged. false if the caller has
3847 * already logged it (presumably in a more detailed fashion than done here)
3848 * 'bVerificationException' is true for a verification exception, false for a
3849 * "call unauthorized by host" exception.
3852 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
3854 block->bbJumpKind = BBJ_THROW;
3855 block->bbFlags |= BBF_FAILED_VERIFICATION;
3857 impCurStmtOffsSet(block->bbCodeOffs);
3860 // we need this since BeginTreeList asserts otherwise
3861 impTreeList = impTreeLast = nullptr;
3862 block->bbFlags &= ~BBF_IMPORTED;
3866 JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
3867 block->bbCodeOffs, block->bbCodeOffsEnd));
3870 printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
3874 if (JitConfig.DebugBreakOnVerificationFailure())
3882 // if the stack is non-empty evaluate all the side-effects
3883 if (verCurrentState.esStackDepth > 0)
3885 impEvalSideEffects();
3887 assert(verCurrentState.esStackDepth == 0);
3889 GenTreePtr op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, GTF_EXCEPT,
3890 gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
3891 // verCurrentState.esStackDepth = 0;
3892 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
3894 // The inliner is not able to handle methods that require throw block, so
3895 // make sure this methods never gets inlined.
3896 info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
3899 /*****************************************************************************
3902 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
3905 // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
3906 // slightly different mechanism in which it calls the JIT to perform IL verification:
3907 // in the case of transparent methods the VM calls for a predicate IsVerifiable()
3908 // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
3909 // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
3910 // it bubble up until reported by the runtime. Currently in RyuJIT, this method doesn't bubble
3911 // up the exception, instead it embeds a throw inside the offending basic block and lets this
3912 // to fail upon runtime of the jitted method.
3914 // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
3915 // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
3916 // just try to find out whether to fail this method before even actually jitting it. So, in case
3917 // we detect these two conditions, instead of generating a throw statement inside the offending
3918 // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
3919 // to return false and make RyuJIT behave the same way JIT64 does.
3921 // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
3922 // RyuJIT for the time being until we completely replace JIT64.
3923 // TODO-ARM64-Cleanup: We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
3925 // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
3926 // exception if we are only importing and verifying. The method verNeedsVerification() can also modify the
3927 // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
3928 // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
3929 // be turned off during importation).
3930 CLANG_FORMAT_COMMENT_ANCHOR;
3932 #ifdef _TARGET_64BIT_
3935 bool canSkipVerificationResult =
3936 info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
3937 assert(tiVerificationNeeded || canSkipVerificationResult);
3940 // Add the non verifiable flag to the compiler
3941 if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
3943 tiIsVerifiableCode = FALSE;
3945 #endif //_TARGET_64BIT_
3946 verResetCurrentState(block, &verCurrentState);
3947 verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
3950 impNoteLastILoffs(); // Remember at which BC offset the tree was finished
3954 /******************************************************************************/
3955 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
3957 assert(ciType < CORINFO_TYPE_COUNT);
3962 case CORINFO_TYPE_STRING:
3963 case CORINFO_TYPE_CLASS:
3964 tiResult = verMakeTypeInfo(clsHnd);
3965 if (!tiResult.IsType(TI_REF))
3966 { // type must be consistent with element type
3971 #ifdef _TARGET_64BIT_
3972 case CORINFO_TYPE_NATIVEINT:
3973 case CORINFO_TYPE_NATIVEUINT:
3976 // If we have more precise information, use it
3977 return verMakeTypeInfo(clsHnd);
3981 return typeInfo::nativeInt();
3984 #endif // _TARGET_64BIT_
3986 case CORINFO_TYPE_VALUECLASS:
3987 case CORINFO_TYPE_REFANY:
3988 tiResult = verMakeTypeInfo(clsHnd);
3989 // type must be constant with element type;
3990 if (!tiResult.IsValueClass())
3995 case CORINFO_TYPE_VAR:
3996 return verMakeTypeInfo(clsHnd);
3998 case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
3999 case CORINFO_TYPE_VOID:
4003 case CORINFO_TYPE_BYREF:
4005 CORINFO_CLASS_HANDLE childClassHandle;
4006 CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4007 return ByRef(verMakeTypeInfo(childType, childClassHandle));
4013 { // If we have more precise information, use it
4014 return typeInfo(TI_STRUCT, clsHnd);
4018 return typeInfo(JITtype2tiType(ciType));
4024 /******************************************************************************/
4026 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4028 if (clsHnd == nullptr)
4033 // Byrefs should only occur in method and local signatures, which are accessed
4034 // using ICorClassInfo and ICorClassInfo.getChildType.
4035 // So findClass() and getClassAttribs() should not be called for byrefs
4037 if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4039 assert(!"Did findClass() return a Byref?");
4043 unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4045 if (attribs & CORINFO_FLG_VALUECLASS)
4047 CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4049 // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4050 // not occur here, so we may want to change this to an assert instead.
4051 if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4056 #ifdef _TARGET_64BIT_
4057 if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4059 return typeInfo::nativeInt();
4061 #endif // _TARGET_64BIT_
4063 if (t != CORINFO_TYPE_UNDEF)
4065 return (typeInfo(JITtype2tiType(t)));
4067 else if (bashStructToRef)
4069 return (typeInfo(TI_REF, clsHnd));
4073 return (typeInfo(TI_STRUCT, clsHnd));
4076 else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4078 // See comment in _typeInfo.h for why we do it this way.
4079 return (typeInfo(TI_REF, clsHnd, true));
4083 return (typeInfo(TI_REF, clsHnd));
4087 /******************************************************************************/
4088 BOOL Compiler::verIsSDArray(typeInfo ti)
4090 if (ti.IsNullObjRef())
4091 { // nulls are SD arrays
4095 if (!ti.IsType(TI_REF))
4100 if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4107 /******************************************************************************/
4108 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4109 /* Returns an error type if anything goes wrong */
4111 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4113 assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4115 if (!verIsSDArray(arrayObjectType))
4120 CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4121 CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4123 return verMakeTypeInfo(ciType, childClassHandle);
4126 /*****************************************************************************
4128 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4130 CORINFO_CLASS_HANDLE classHandle;
4131 CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4133 var_types type = JITtype2varType(ciType);
4134 if (varTypeIsGC(type))
4136 // For efficiency, getArgType only returns something in classHandle for
4137 // value types. For other types that have addition type info, you
4138 // have to call back explicitly
4139 classHandle = info.compCompHnd->getArgClass(sig, args);
4142 NO_WAY("Could not figure out Class specified in argument or local signature");
4146 return verMakeTypeInfo(ciType, classHandle);
4149 /*****************************************************************************/
4151 // This does the expensive check to figure out whether the method
4152 // needs to be verified. It is called only when we fail verification,
4153 // just before throwing the verification exception.
4155 BOOL Compiler::verNeedsVerification()
4157 // If we have previously determined that verification is NOT needed
4158 // (for example in Compiler::compCompile), that means verification is really not needed.
4159 // Return the same decision we made before.
4160 // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4162 if (!tiVerificationNeeded)
4164 return tiVerificationNeeded;
4167 assert(tiVerificationNeeded);
4169 // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4170 // obtain the answer.
4171 CorInfoCanSkipVerificationResult canSkipVerificationResult =
4172 info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4174 // canSkipVerification will return one of the following three values:
4175 // CORINFO_VERIFICATION_CANNOT_SKIP = 0, // Cannot skip verification during jit time.
4176 // CORINFO_VERIFICATION_CAN_SKIP = 1, // Can skip verification during jit time.
4177 // CORINFO_VERIFICATION_RUNTIME_CHECK = 2, // Skip verification during jit time,
4178 // but need to insert a callout to the VM to ask during runtime
4179 // whether to skip verification or not.
4181 // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4182 if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4184 tiRuntimeCalloutNeeded = true;
4187 if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4189 // Dev10 706080 - Testers don't like the assert, so just silence it
4190 // by not using the macros that invoke debugAssert.
4194 // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4195 // The following line means we will NOT do jit time verification if canSkipVerification
4196 // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4197 tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4198 return tiVerificationNeeded;
4201 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4207 if (!ti.IsType(TI_STRUCT))
4211 return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4214 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4216 if (ti.IsPermanentHomeByRef())
4226 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4228 return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4229 || ti.IsUnboxedGenericTypeVar() ||
4230 (ti.IsType(TI_STRUCT) &&
4231 // exclude byreflike structs
4232 !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4235 // Is it a boxed value type?
4236 bool Compiler::verIsBoxedValueType(typeInfo ti)
4238 if (ti.GetType() == TI_REF)
4240 CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4241 return !!eeIsValueClass(clsHnd);
4249 /*****************************************************************************
4251 * Check if a TailCall is legal.
4254 bool Compiler::verCheckTailCallConstraint(
4256 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4257 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4258 bool speculative // If true, won't throw if verificatoin fails. Instead it will
4259 // return false to the caller.
4260 // If false, it will throw.
4264 CORINFO_SIG_INFO sig;
4265 unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
4266 // this counter is used to keep track of how many items have been
4269 CORINFO_METHOD_HANDLE methodHnd = nullptr;
4270 CORINFO_CLASS_HANDLE methodClassHnd = nullptr;
4271 unsigned methodClassFlgs = 0;
4273 assert(impOpcodeIsCallOpcode(opcode));
4275 if (compIsForInlining())
4280 // for calli, VerifyOrReturn that this is not a virtual method
4281 if (opcode == CEE_CALLI)
4283 /* Get the call sig */
4284 eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4286 // We don't know the target method, so we have to infer the flags, or
4287 // assume the worst-case.
4288 mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4292 methodHnd = pResolvedToken->hMethod;
4294 mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4296 // When verifying generic code we pair the method handle with its
4297 // owning class to get the exact method signature.
4298 methodClassHnd = pResolvedToken->hClass;
4299 assert(methodClassHnd);
4301 eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4303 // opcode specific check
4304 methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4307 // We must have got the methodClassHnd if opcode is not CEE_CALLI
4308 assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4310 if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4312 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4315 // check compatibility of the arguments
4316 unsigned int argCount;
4317 argCount = sig.numArgs;
4318 CORINFO_ARG_LIST_HANDLE args;
4322 typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4324 // check that the argument is not a byref for tailcalls
4325 VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4327 // For unsafe code, we might have parameters containing pointer to the stack location.
4328 // Disallow the tailcall for this kind.
4329 CORINFO_CLASS_HANDLE classHandle;
4330 CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4331 VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4333 args = info.compCompHnd->getArgNext(args);
4337 popCount += sig.numArgs;
4339 // check for 'this' which is on non-static methods, not called via NEWOBJ
4340 if (!(mflags & CORINFO_FLG_STATIC))
4342 // Always update the popCount.
4343 // This is crucial for the stack calculation to be correct.
4344 typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4347 if (opcode == CEE_CALLI)
4349 // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4351 if (tiThis.IsValueClass())
4355 VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4359 // Check type compatibility of the this argument
4360 typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4361 if (tiDeclaredThis.IsValueClass())
4363 tiDeclaredThis.MakeByRef();
4366 VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4370 // Tail calls on constrained calls should be illegal too:
4371 // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4372 VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4374 // Get the exact view of the signature for an array method
4375 if (sig.retType != CORINFO_TYPE_VOID)
4377 if (methodClassFlgs & CORINFO_FLG_ARRAY)
4379 assert(opcode != CEE_CALLI);
4380 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4384 typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4385 typeInfo tiCallerRetType =
4386 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4388 // void return type gets morphed into the error type, so we have to treat them specially here
4389 if (sig.retType == CORINFO_TYPE_VOID)
4391 VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4396 VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4397 NormaliseForStack(tiCallerRetType), true),
4398 "tailcall return mismatch", speculative);
4401 // for tailcall, stack must be empty
4402 VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4404 return true; // Yes, tailcall is legal
4407 /*****************************************************************************
4409 * Checks the IL verification rules for the call
4412 void Compiler::verVerifyCall(OPCODE opcode,
4413 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4414 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4417 const BYTE* delegateCreateStart,
4418 const BYTE* codeAddr,
4419 CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4422 CORINFO_SIG_INFO* sig = nullptr;
4423 unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
4424 // this counter is used to keep track of how many items have been
4427 // for calli, VerifyOrReturn that this is not a virtual method
4428 if (opcode == CEE_CALLI)
4430 Verify(false, "Calli not verifiable");
4434 //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4435 mflags = callInfo->verMethodFlags;
4437 sig = &callInfo->verSig;
4439 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4441 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4444 // opcode specific check
4445 unsigned methodClassFlgs = callInfo->classFlags;
4449 // cannot do callvirt on valuetypes
4450 VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4451 VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4456 assert(!tailCall); // Importer should not allow this
4457 VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4458 "newobj must be on instance");
4460 if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4462 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4463 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4464 typeInfo tiDeclaredFtn =
4465 verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4466 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4468 assert(popCount == 0);
4469 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4470 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4472 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4473 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4474 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4475 "delegate object type mismatch");
4477 CORINFO_CLASS_HANDLE objTypeHandle =
4478 tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4480 // the method signature must be compatible with the delegate's invoke method
4482 // check that for virtual functions, the type of the object used to get the
4483 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4484 // since this is a bit of work to determine in general, we pattern match stylized
4487 // the delegate creation code check, which used to be done later, is now done here
4488 // so we can read delegateMethodRef directly from
4489 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
4490 // we then use it in our call to isCompatibleDelegate().
4492 mdMemberRef delegateMethodRef = mdMemberRefNil;
4493 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
4494 "must create delegates with certain IL");
4496 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
4497 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
4498 delegateResolvedToken.tokenScope = info.compScopeHnd;
4499 delegateResolvedToken.token = delegateMethodRef;
4500 delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method;
4501 info.compCompHnd->resolveToken(&delegateResolvedToken);
4503 CORINFO_CALL_INFO delegateCallInfo;
4504 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
4505 addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
4507 BOOL isOpenDelegate = FALSE;
4508 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
4509 tiActualFtn.GetMethod(), pResolvedToken->hClass,
4511 "function incompatible with delegate");
4513 // check the constraints on the target method
4514 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
4515 "delegate target has unsatisfied class constraints");
4516 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
4517 tiActualFtn.GetMethod()),
4518 "delegate target has unsatisfied method constraints");
4520 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
4521 // for additional verification rules for delegates
4522 CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod();
4523 DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
4524 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4527 if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
4529 && StrictCheckForNonVirtualCallToVirtualMethod()
4533 if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4535 VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
4536 verIsBoxedValueType(tiActualObj),
4537 "The 'this' parameter to the call must be either the calling method's "
4538 "'this' parameter or "
4539 "a boxed value type.");
4544 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
4546 BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
4548 Verify(targetIsStatic || !isOpenDelegate,
4549 "Unverifiable creation of an open instance delegate for a protected member.");
4551 CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
4553 : tiActualObj.GetClassHandleForObjRef();
4555 // In the case of protected methods, it is a requirement that the 'this'
4556 // pointer be a subclass of the current context. Perform this check.
4557 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4558 "Accessing protected method through wrong type.");
4563 // fall thru to default checks
4565 VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
4567 VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
4568 "can only newobj a delegate constructor");
4570 // check compatibility of the arguments
4571 unsigned int argCount;
4572 argCount = sig->numArgs;
4573 CORINFO_ARG_LIST_HANDLE args;
4577 typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
4579 typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
4580 VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
4582 args = info.compCompHnd->getArgNext(args);
4588 popCount += sig->numArgs;
4590 // check for 'this' which are is non-static methods, not called via NEWOBJ
4591 CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
4592 if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
4594 typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4597 // If it is null, we assume we can access it (since it will AV shortly)
4598 // If it is anything but a reference class, there is no hierarchy, so
4599 // again, we don't need the precise instance class to compute 'protected' access
4600 if (tiThis.IsType(TI_REF))
4602 instanceClassHnd = tiThis.GetClassHandleForObjRef();
4605 // Check type compatibility of the this argument
4606 typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
4607 if (tiDeclaredThis.IsValueClass())
4609 tiDeclaredThis.MakeByRef();
4612 // If this is a call to the base class .ctor, set thisPtr Init for
4614 if (mflags & CORINFO_FLG_CONSTRUCTOR)
4616 if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
4617 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
4619 assert(verCurrentState.thisInitialized !=
4620 TIS_Bottom); // This should never be the case just from the logic of the verifier.
4621 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
4622 "Call to base class constructor when 'this' is possibly initialized");
4623 // Otherwise, 'this' is now initialized.
4624 verCurrentState.thisInitialized = TIS_Init;
4625 tiThis.SetInitialisedObjRef();
4629 // We allow direct calls to value type constructors
4630 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
4631 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
4632 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
4633 "Bad call to a constructor");
4637 if (pConstrainedResolvedToken != nullptr)
4639 VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
4641 typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
4643 // We just dereference this and test for equality
4644 tiThis.DereferenceByRef();
4645 VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
4646 "this type mismatch with constrained type operand");
4648 // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
4649 tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
4652 // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
4653 if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
4655 tiDeclaredThis.SetIsReadonlyByRef();
4658 VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
4660 if (tiThis.IsByRef())
4662 // Find the actual type where the method exists (as opposed to what is declared
4663 // in the metadata). This is to prevent passing a byref as the "this" argument
4664 // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
4666 CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
4667 VerifyOrReturn(eeIsValueClass(actualClassHnd),
4668 "Call to base type of valuetype (which is never a valuetype)");
4671 // Rules for non-virtual call to a non-final virtual method:
4674 // The "this" pointer is considered to be "possibly written" if
4675 // 1. Its address have been taken (LDARGA 0) anywhere in the method.
4677 // 2. It has been stored to (STARG.0) anywhere in the method.
4679 // A non-virtual call to a non-final virtual method is only allowed if
4680 // 1. The this pointer passed to the callee is an instance of a boxed value type.
4682 // 2. The this pointer passed to the callee is the current method's this pointer.
4683 // (and) The current method's this pointer is not "possibly written".
4685 // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
4686 // virtual methods. (Luckily this does affect .ctors, since they are not virtual).
4687 // This is stronger that is strictly needed, but implementing a laxer rule is significantly
4688 // hard and more error prone.
4690 if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
4692 && StrictCheckForNonVirtualCallToVirtualMethod()
4696 if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4699 tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
4700 "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
4701 "a boxed value type.");
4706 // check any constraints on the callee's class and type parameters
4707 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
4708 "method has unsatisfied class constraints");
4709 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
4710 "method has unsatisfied method constraints");
4712 if (mflags & CORINFO_FLG_PROTECTED)
4714 VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4715 "Can't access protected method");
4718 // Get the exact view of the signature for an array method
4719 if (sig->retType != CORINFO_TYPE_VOID)
4721 eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
4724 // "readonly." prefixed calls only allowed for the Address operation on arrays.
4725 // The methods supported by array types are under the control of the EE
4726 // so we can trust that only the Address operation returns a byref.
4729 typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
4730 VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
4731 "unexpected use of readonly prefix");
4734 // Verify the tailcall
4737 verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
4741 /*****************************************************************************
4742 * Checks that a delegate creation is done using the following pattern:
4744 * ldvirtftn targetMemberRef
4746 * ldftn targetMemberRef
4748 * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
4749 * not in this basic block)
4751 * targetMemberRef is read from the code sequence.
4752 * targetMemberRef is validated iff verificationNeeded.
4755 BOOL Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart,
4756 const BYTE* codeAddr,
4757 mdMemberRef& targetMemberRef)
4759 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4761 targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
4764 else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
4766 targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
4773 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
4775 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
4776 typeInfo ptrVal = verVerifyLDIND(tiTo, instrType);
4777 typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
4778 if (!tiCompatibleWith(value, normPtrVal, true))
4780 Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
4781 compUnsafeCastUsed = true;
4786 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
4788 assert(!instrType.IsStruct());
4793 ptrVal = DereferenceByRef(ptr);
4794 if (instrType.IsObjRef() && !ptrVal.IsObjRef())
4796 Verify(false, "bad pointer");
4797 compUnsafeCastUsed = true;
4799 else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
4801 Verify(false, "pointer not consistent with instr");
4802 compUnsafeCastUsed = true;
4807 Verify(false, "pointer not byref");
4808 compUnsafeCastUsed = true;
4814 // Verify that the field is used properly. 'tiThis' is NULL for statics,
4815 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
4816 // ld*flda or a st*fld.
4817 // 'enclosingClass' is given if we are accessing a field in some specific type.
4819 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken,
4820 const CORINFO_FIELD_INFO& fieldInfo,
4821 const typeInfo* tiThis,
4823 BOOL allowPlainStructAsThis)
4825 CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
4826 unsigned fieldFlags = fieldInfo.fieldFlags;
4827 CORINFO_CLASS_HANDLE instanceClass =
4828 info.compClassHnd; // for statics, we imagine the instance is the current class.
4830 bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
4833 Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
4834 if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
4836 Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
4837 info.compIsStatic == isStaticField,
4838 "bad use of initonly field (set or address taken)");
4842 if (tiThis == nullptr)
4844 Verify(isStaticField, "used static opcode with non-static field");
4848 typeInfo tThis = *tiThis;
4850 if (allowPlainStructAsThis && tThis.IsValueClass())
4855 // If it is null, we assume we can access it (since it will AV shortly)
4856 // If it is anything but a refernce class, there is no hierarchy, so
4857 // again, we don't need the precise instance class to compute 'protected' access
4858 if (tiThis->IsType(TI_REF))
4860 instanceClass = tiThis->GetClassHandleForObjRef();
4863 // Note that even if the field is static, we require that the this pointer
4864 // satisfy the same constraints as a non-static field This happens to
4865 // be simpler and seems reasonable
4866 typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
4867 if (tiDeclaredThis.IsValueClass())
4869 tiDeclaredThis.MakeByRef();
4871 // we allow read-only tThis, on any field access (even stores!), because if the
4872 // class implementor wants to prohibit stores he should make the field private.
4873 // we do this by setting the read-only bit on the type we compare tThis to.
4874 tiDeclaredThis.SetIsReadonlyByRef();
4876 else if (verTrackObjCtorInitState && tThis.IsThisPtr())
4878 // Any field access is legal on "uninitialized" this pointers.
4879 // The easiest way to implement this is to simply set the
4880 // initialized bit for the duration of the type check on the
4881 // field access only. It does not change the state of the "this"
4882 // for the function as a whole. Note that the "tThis" is a copy
4883 // of the original "this" type (*tiThis) passed in.
4884 tThis.SetInitialisedObjRef();
4887 Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
4890 // Presently the JIT does not check that we don't store or take the address of init-only fields
4891 // since we cannot guarantee their immutability and it is not a security issue.
4893 // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
4894 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
4895 "field has unsatisfied class constraints");
4896 if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
4898 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
4899 "Accessing protected method through wrong type.");
4903 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
4905 if (tiOp1.IsNumberType())
4907 #ifdef _TARGET_64BIT_
4908 Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
4909 #else // _TARGET_64BIT
4910 // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
4911 // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
4912 // but compatible, since we can coalesce native int with int32 (see section III.1.5).
4913 Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
4914 #endif // !_TARGET_64BIT_
4916 else if (tiOp1.IsObjRef())
4928 Verify(FALSE, "Cond not allowed on object types");
4930 Verify(tiOp2.IsObjRef(), "Cond type mismatch");
4932 else if (tiOp1.IsByRef())
4934 Verify(tiOp2.IsByRef(), "Cond type mismatch");
4938 Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
4942 void Compiler::verVerifyThisPtrInitialised()
4944 if (verTrackObjCtorInitState)
4946 Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
4950 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
4952 // Either target == context, in this case calling an alternate .ctor
4953 // Or target is the immediate parent of context
4955 return ((target == context) || (target == info.compCompHnd->getParentType(context)));
4958 GenTreePtr Compiler::impImportLdvirtftn(GenTreePtr thisPtr,
4959 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4960 CORINFO_CALL_INFO* pCallInfo)
4962 if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
4964 NO_WAY("Virtual call to a function added via EnC is not supported");
4967 #ifdef FEATURE_READYTORUN_COMPILER
4968 if (opts.IsReadyToRun())
4970 if (!pCallInfo->exactContextNeedsRuntimeLookup)
4972 GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT,
4973 gtNewArgList(thisPtr));
4975 call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
4980 // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
4981 if (IsTargetAbi(CORINFO_CORERT_ABI))
4983 GenTreePtr ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
4985 return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
4986 gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
4991 // Get the exact descriptor for the static callsite
4992 GenTreePtr exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
4993 if (exactTypeDesc == nullptr)
4994 { // compDonotInline()
4998 GenTreePtr exactMethodDesc = impTokenToHandle(pResolvedToken);
4999 if (exactMethodDesc == nullptr)
5000 { // compDonotInline()
5004 GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5006 helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5008 helpArgs = gtNewListNode(thisPtr, helpArgs);
5010 // Call helper function. This gets the target address of the final destination callsite.
5012 return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT, helpArgs);
5015 /*****************************************************************************
5017 * Build and import a box node
5020 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5022 // Get the tree for the type handle for the boxed object. In the case
5023 // of shared generic code or ngen'd code this might be an embedded
5025 // Note we can only box do it if the class construtor has been called
5026 // We can always do it on primitive types
5028 GenTreePtr op1 = nullptr;
5029 GenTreePtr op2 = nullptr;
5032 impSpillSpecialSideEff();
5034 // Now get the expression to box from the stack.
5035 CORINFO_CLASS_HANDLE operCls;
5036 GenTreePtr exprToBox = impPopStack(operCls).val;
5038 CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5039 if (boxHelper == CORINFO_HELP_BOX)
5041 // we are doing 'normal' boxing. This means that we can inline the box operation
5042 // Box(expr) gets morphed into
5043 // temp = new(clsHnd)
5044 // cpobj(temp+4, expr, clsHnd)
5046 // The code paths differ slightly below for structs and primitives because
5047 // "cpobj" differs in these cases. In one case you get
5048 // impAssignStructPtr(temp+4, expr, clsHnd)
5049 // and the other you get
5052 if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5054 impBoxTemp = lvaGrabTemp(true DEBUGARG("Box Helper"));
5057 // needs to stay in use until this box expression is appended
5058 // some other node. We approximate this by keeping it alive until
5059 // the opcode stack becomes empty
5060 impBoxTempInUse = true;
5062 #ifdef FEATURE_READYTORUN_COMPILER
5063 bool usingReadyToRunHelper = false;
5065 if (opts.IsReadyToRun())
5067 op1 = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5068 usingReadyToRunHelper = (op1 != nullptr);
5071 if (!usingReadyToRunHelper)
5074 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5075 // and the newfast call with a single call to a dynamic R2R cell that will:
5076 // 1) Load the context
5077 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5078 // 3) Allocate and return the new object for boxing
5079 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5081 // Ensure that the value class is restored
5082 op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5084 { // compDonotInline()
5088 op1 = gtNewHelperCallNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd), TYP_REF, 0,
5092 /* Remember that this basic block contains 'new' of an array */
5093 compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5095 GenTreePtr asg = gtNewTempAssign(impBoxTemp, op1);
5097 GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5099 op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5100 op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
5101 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5103 if (varTypeIsStruct(exprToBox))
5105 assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5106 op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5110 lclTyp = exprToBox->TypeGet();
5111 if (lclTyp == TYP_BYREF)
5113 lclTyp = TYP_I_IMPL;
5115 CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5116 if (impIsPrimitive(jitType))
5118 lclTyp = JITtype2varType(jitType);
5120 assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5121 varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5122 var_types srcTyp = exprToBox->TypeGet();
5123 var_types dstTyp = lclTyp;
5125 if (srcTyp != dstTyp)
5127 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5128 (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5129 exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
5131 op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5134 op2 = gtNewLclvNode(impBoxTemp, TYP_REF);
5135 op1 = gtNewOperNode(GT_COMMA, TYP_REF, op1, op2);
5137 // Record that this is a "box" node.
5138 op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt);
5140 // If it is a value class, mark the "box" node. We can use this information
5141 // to optimise several cases:
5142 // "box(x) == null" --> false
5143 // "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5144 // "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5146 op1->gtFlags |= GTF_BOX_VALUE;
5147 assert(op1->IsBoxedValue());
5148 assert(asg->gtOper == GT_ASG);
5152 // Don't optimize, just call the helper and be done with it
5154 // Ensure that the value class is restored
5155 op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5157 { // compDonotInline()
5161 GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5162 op1 = gtNewHelperCallNode(boxHelper, TYP_REF, GTF_EXCEPT, args);
5165 /* Push the result back on the stack, */
5166 /* even if clsHnd is a value class we want the TI_REF */
5167 typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5168 impPushOnStack(op1, tiRetVal);
5171 //------------------------------------------------------------------------
5172 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5175 // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5176 // by a call to CEEInfo::resolveToken().
5177 // pCallInfo - The CORINFO_CALL_INFO that has been initialized
5178 // by a call to CEEInfo::getCallInfo().
5181 // The multi-dimensional array constructor arguments (array dimensions) are
5182 // pushed on the IL stack on entry to this method.
5185 // Multi-dimensional array constructors are imported as calls to a JIT
5186 // helper, not as regular calls.
5188 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5190 GenTreePtr classHandle = impParentClassTokenToHandle(pResolvedToken);
5191 if (classHandle == nullptr)
5192 { // compDonotInline()
5196 assert(pCallInfo->sig.numArgs);
5199 GenTreeArgList* args;
5202 // There are two different JIT helpers that can be used to allocate
5203 // multi-dimensional arrays:
5205 // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5206 // This variant is deprecated. It should be eventually removed.
5208 // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5209 // pointer to block of int32s. This variant is more portable.
5211 // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5212 // unconditionally would require ReadyToRun version bump.
5214 CLANG_FORMAT_COMMENT_ANCHOR;
5216 #if COR_JIT_EE_VERSION > 460
5217 if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5219 LclVarDsc* newObjArrayArgsVar;
5221 // Reuse the temp used to pass the array dimensions to avoid bloating
5222 // the stack frame in case there are multiple calls to multi-dim array
5223 // constructors within a single method.
5224 if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5226 lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5227 lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK;
5228 lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5231 // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5232 // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5233 lvaTable[lvaNewObjArrayArgs].lvExactSize =
5234 max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5236 // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5237 // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5238 // to one allocation at a time.
5239 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5242 // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5243 // - Array class handle
5244 // - Number of dimension arguments
5245 // - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp.
5248 node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5249 node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5251 // Pop dimension arguments from the stack one at a time and store it
5252 // into lvaNewObjArrayArgs temp.
5253 for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5255 GenTreePtr arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5257 GenTreePtr dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5258 dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5259 dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5260 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5261 dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5263 node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5266 args = gtNewArgList(node);
5268 // pass number of arguments to the helper
5269 args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5271 args = gtNewListNode(classHandle, args);
5273 node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, 0, args);
5279 // The varargs helper needs the type and method handles as last
5280 // and last-1 param (this is a cdecl call, so args will be
5281 // pushed in reverse order on the CPU stack)
5284 args = gtNewArgList(classHandle);
5286 // pass number of arguments to the helper
5287 args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5289 unsigned argFlags = 0;
5290 args = impPopList(pCallInfo->sig.numArgs, &argFlags, &pCallInfo->sig, args);
5292 node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, 0, args);
5294 // varargs, so we pop the arguments
5295 node->gtFlags |= GTF_CALL_POP_ARGS;
5298 // At the present time we don't track Caller pop arguments
5299 // that have GC references in them
5300 for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5302 assert(temp->Current()->gtType != TYP_REF);
5307 node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5308 node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5310 // Remember that this basic block contains 'new' of a md array
5311 compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5313 impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5316 GenTreePtr Compiler::impTransformThis(GenTreePtr thisPtr,
5317 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5318 CORINFO_THIS_TRANSFORM transform)
5322 case CORINFO_DEREF_THIS:
5324 GenTreePtr obj = thisPtr;
5326 // This does a LDIND on the obj, which should be a byref. pointing to a ref
5327 impBashVarAddrsToI(obj);
5328 assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5329 CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5331 obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5332 // ldind could point anywhere, example a boxed class static int
5333 obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5338 case CORINFO_BOX_THIS:
5340 // Constraint calls where there might be no
5341 // unboxed entry point require us to implement the call via helper.
5342 // These only occur when a possible target of the call
5343 // may have inherited an implementation of an interface
5344 // method from System.Object or System.ValueType. The EE does not provide us with
5345 // "unboxed" versions of these methods.
5347 GenTreePtr obj = thisPtr;
5349 assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5350 obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5351 obj->gtFlags |= GTF_EXCEPT;
5353 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5354 var_types objType = JITtype2varType(jitTyp);
5355 if (impIsPrimitive(jitTyp))
5357 if (obj->OperIsBlk())
5359 obj->ChangeOperUnchecked(GT_IND);
5361 // Obj could point anywhere, example a boxed class static int
5362 obj->gtFlags |= GTF_IND_TGTANYWHERE;
5363 obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5366 obj->gtType = JITtype2varType(jitTyp);
5367 assert(varTypeIsArithmetic(obj->gtType));
5370 // This pushes on the dereferenced byref
5371 // This is then used immediately to box.
5372 impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5374 // This pops off the byref-to-a-value-type remaining on the stack and
5375 // replaces it with a boxed object.
5376 // This is then used as the object to the virtual call immediately below.
5377 impImportAndPushBox(pConstrainedResolvedToken);
5378 if (compDonotInline())
5383 obj = impPopStack().val;
5386 case CORINFO_NO_THIS_TRANSFORM:
5392 //------------------------------------------------------------------------
5393 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
5396 // true if PInvoke inlining should be enabled in current method, false otherwise
5399 // Checks a number of ambient conditions where we could pinvoke but choose not to
5401 bool Compiler::impCanPInvokeInline()
5403 return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
5404 (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
5408 //------------------------------------------------------------------------
5409 // impCanPInvokeInlineCallSite: basic legality checks using information
5410 // from a call to see if the call qualifies as an inline pinvoke.
5413 // block - block contaning the call, or for inlinees, block
5414 // containing the call being inlined
5417 // true if this call can legally qualify as an inline pinvoke, false otherwise
5420 // For runtimes that support exception handling interop there are
5421 // restrictions on using inline pinvoke in handler regions.
5423 // * We have to disable pinvoke inlining inside of filters because
5424 // in case the main execution (i.e. in the try block) is inside
5425 // unmanaged code, we cannot reuse the inlined stub (we still need
5426 // the original state until we are in the catch handler)
5428 // * We disable pinvoke inlining inside handlers since the GSCookie
5429 // is in the inlined Frame (see
5430 // CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
5431 // this would not protect framelets/return-address of handlers.
5433 // These restrictions are currently also in place for CoreCLR but
5434 // can be relaxed when coreclr/#8459 is addressed.
5436 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
5438 if (block->hasHndIndex())
5443 // The remaining limitations do not apply to CoreRT
5444 if (IsTargetAbi(CORINFO_CORERT_ABI))
5449 #ifdef _TARGET_AMD64_
5450 // On x64, we disable pinvoke inlining inside of try regions.
5451 // Here is the comment from JIT64 explaining why:
5453 // [VSWhidbey: 611015] - because the jitted code links in the
5454 // Frame (instead of the stub) we rely on the Frame not being
5455 // 'active' until inside the stub. This normally happens by the
5456 // stub setting the return address pointer in the Frame object
5457 // inside the stub. On a normal return, the return address
5458 // pointer is zeroed out so the Frame can be safely re-used, but
5459 // if an exception occurs, nobody zeros out the return address
5460 // pointer. Thus if we re-used the Frame object, it would go
5461 // 'active' as soon as we link it into the Frame chain.
5463 // Technically we only need to disable PInvoke inlining if we're
5464 // in a handler or if we're in a try body with a catch or
5465 // filter/except where other non-handler code in this method
5466 // might run and try to re-use the dirty Frame object.
5468 // A desktop test case where this seems to matter is
5469 // jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
5470 if (block->hasTryIndex())
5474 #endif // _TARGET_AMD64_
5479 //------------------------------------------------------------------------
5480 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
5481 // if it can be expressed as an inline pinvoke.
5484 // call - tree for the call
5485 // methHnd - handle for the method being called (may be null)
5486 // sig - signature of the method being called
5487 // mflags - method flags for the method being called
5488 // block - block contaning the call, or for inlinees, block
5489 // containing the call being inlined
5492 // Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
5494 // Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
5495 // call passes a combination of legality and profitabilty checks.
5497 // If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
5499 void Compiler::impCheckForPInvokeCall(
5500 GenTreePtr call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
5502 CorInfoUnmanagedCallConv unmanagedCallConv;
5504 // If VM flagged it as Pinvoke, flag the call node accordingly
5505 if ((mflags & CORINFO_FLG_PINVOKE) != 0)
5507 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
5512 if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
5517 unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
5521 CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
5522 if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
5524 // Used by the IL Stubs.
5525 callConv = CORINFO_CALLCONV_C;
5527 static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
5528 static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
5529 static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
5530 unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
5532 assert(!call->gtCall.gtCallCookie);
5535 if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
5536 unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
5540 optNativeCallCount++;
5542 if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && methHnd == nullptr)
5544 // PInvoke CALLI in IL stubs must be inlined
5549 if (!impCanPInvokeInlineCallSite(block))
5554 // PInvoke CALL in IL stubs must be inlined on CoreRT. Skip the ambient conditions checks and
5555 // profitability checks
5556 if (!(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && IsTargetAbi(CORINFO_CORERT_ABI)))
5558 if (impCanPInvokeInline())
5563 // Size-speed tradeoff: don't use inline pinvoke at rarely
5564 // executed call sites. The non-inline version is more
5566 if (block->isRunRarely())
5572 // The expensive check should be last
5573 if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
5579 JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
5581 call->gtFlags |= GTF_CALL_UNMANAGED;
5582 info.compCallUnmanaged++;
5584 // AMD64 convention is same for native and managed
5585 if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
5587 call->gtFlags |= GTF_CALL_POP_ARGS;
5590 if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
5592 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
5596 GenTreePtr Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
5598 var_types callRetTyp = JITtype2varType(sig->retType);
5600 /* The function pointer is on top of the stack - It may be a
5601 * complex expression. As it is evaluated after the args,
5602 * it may cause registered args to be spilled. Simply spill it.
5605 // Ignore this trivial case.
5606 if (impStackTop().val->gtOper != GT_LCL_VAR)
5608 impSpillStackEntry(verCurrentState.esStackDepth - 1,
5609 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
5612 /* Get the function pointer */
5614 GenTreePtr fptr = impPopStack().val;
5615 assert(genActualType(fptr->gtType) == TYP_I_IMPL);
5618 // This temporary must never be converted to a double in stress mode,
5619 // because that can introduce a call to the cast helper after the
5620 // arguments have already been evaluated.
5622 if (fptr->OperGet() == GT_LCL_VAR)
5624 lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
5628 /* Create the call node */
5630 GenTreePtr call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
5632 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
5637 /*****************************************************************************/
5639 void Compiler::impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig)
5641 assert(call->gtFlags & GTF_CALL_UNMANAGED);
5643 /* Since we push the arguments in reverse order (i.e. right -> left)
5644 * spill any side effects from the stack
5646 * OBS: If there is only one side effect we do not need to spill it
5647 * thus we have to spill all side-effects except last one
5650 unsigned lastLevelWithSideEffects = UINT_MAX;
5652 unsigned argsToReverse = sig->numArgs;
5654 // For "thiscall", the first argument goes in a register. Since its
5655 // order does not need to be changed, we do not need to spill it
5657 if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5659 assert(argsToReverse);
5663 #ifndef _TARGET_X86_
5664 // Don't reverse args on ARM or x64 - first four args always placed in regs in order
5668 for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
5670 if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
5672 assert(lastLevelWithSideEffects == UINT_MAX);
5674 impSpillStackEntry(level,
5675 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
5677 else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
5679 if (lastLevelWithSideEffects != UINT_MAX)
5681 /* We had a previous side effect - must spill it */
5682 impSpillStackEntry(lastLevelWithSideEffects,
5683 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
5685 /* Record the level for the current side effect in case we will spill it */
5686 lastLevelWithSideEffects = level;
5690 /* This is the first side effect encountered - record its level */
5692 lastLevelWithSideEffects = level;
5697 /* The argument list is now "clean" - no out-of-order side effects
5698 * Pop the argument list in reverse order */
5700 unsigned argFlags = 0;
5701 GenTreePtr args = call->gtCall.gtCallArgs =
5702 impPopRevList(sig->numArgs, &argFlags, sig, sig->numArgs - argsToReverse);
5704 if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5706 GenTreePtr thisPtr = args->Current();
5707 impBashVarAddrsToI(thisPtr);
5708 assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
5713 call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5717 //------------------------------------------------------------------------
5718 // impInitClass: Build a node to initialize the class before accessing the
5719 // field if necessary
5722 // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5723 // by a call to CEEInfo::resolveToken().
5725 // Return Value: If needed, a pointer to the node that will perform the class
5726 // initializtion. Otherwise, nullptr.
5729 GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5731 CorInfoInitClassResult initClassResult =
5732 info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
5734 if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
5740 GenTreePtr node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
5742 if (node == nullptr)
5744 assert(compDonotInline());
5750 node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, 0, gtNewArgList(node));
5754 // Call the shared non gc static helper, as its the fastest
5755 node = fgGetSharedCCtor(pResolvedToken->hClass);
5761 GenTreePtr Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
5763 GenTreePtr op1 = nullptr;
5772 ival = *((bool*)fldAddr);
5776 ival = *((signed char*)fldAddr);
5780 ival = *((unsigned char*)fldAddr);
5784 ival = *((short*)fldAddr);
5789 ival = *((unsigned short*)fldAddr);
5794 ival = *((int*)fldAddr);
5796 op1 = gtNewIconNode(ival);
5801 lval = *((__int64*)fldAddr);
5802 op1 = gtNewLconNode(lval);
5806 dval = *((float*)fldAddr);
5807 op1 = gtNewDconNode(dval);
5808 #if !FEATURE_X87_DOUBLES
5809 // X87 stack doesn't differentiate between float/double
5810 // so R4 is treated as R8, but everybody else does
5811 op1->gtType = TYP_FLOAT;
5812 #endif // FEATURE_X87_DOUBLES
5816 dval = *((double*)fldAddr);
5817 op1 = gtNewDconNode(dval);
5821 assert(!"Unexpected lclTyp");
5828 GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
5829 CORINFO_ACCESS_FLAGS access,
5830 CORINFO_FIELD_INFO* pFieldInfo,
5835 switch (pFieldInfo->fieldAccessor)
5837 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
5839 assert(!compIsForInlining());
5841 // We first call a special helper to get the statics base pointer
5842 op1 = impParentClassTokenToHandle(pResolvedToken);
5844 // compIsForInlining() is false so we should not neve get NULL here
5845 assert(op1 != nullptr);
5847 var_types type = TYP_BYREF;
5849 switch (pFieldInfo->helper)
5851 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
5854 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
5855 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
5856 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
5859 assert(!"unknown generic statics helper");
5863 op1 = gtNewHelperCallNode(pFieldInfo->helper, type, 0, gtNewArgList(op1));
5865 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5866 op1 = gtNewOperNode(GT_ADD, type, op1,
5867 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5871 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
5873 #ifdef FEATURE_READYTORUN_COMPILER
5874 if (opts.IsReadyToRun())
5876 unsigned callFlags = 0;
5878 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5880 callFlags |= GTF_CALL_HOISTABLE;
5883 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF, callFlags);
5885 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5890 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
5894 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5895 op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
5896 new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
5900 #if COR_JIT_EE_VERSION > 460
5901 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
5903 #ifdef FEATURE_READYTORUN_COMPILER
5904 noway_assert(opts.IsReadyToRun());
5905 CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
5906 assert(kind.needsRuntimeLookup);
5908 GenTreePtr ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
5909 GenTreeArgList* args = gtNewArgList(ctxTree);
5911 unsigned callFlags = 0;
5913 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5915 callFlags |= GTF_CALL_HOISTABLE;
5917 var_types type = TYP_BYREF;
5918 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, callFlags, args);
5920 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5921 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5922 op1 = gtNewOperNode(GT_ADD, type, op1,
5923 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5926 #endif // FEATURE_READYTORUN_COMPILER
5929 #endif // COR_JIT_EE_VERSION > 460
5932 if (!(access & CORINFO_ACCESS_ADDRESS))
5934 // In future, it may be better to just create the right tree here instead of folding it later.
5935 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
5937 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
5939 op1->gtType = TYP_REF; // points at boxed object
5940 FieldSeqNode* firstElemFldSeq =
5941 GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
5943 gtNewOperNode(GT_ADD, TYP_BYREF, op1,
5944 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
5946 if (varTypeIsStruct(lclTyp))
5948 // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT.
5949 op1 = gtNewObjNode(pFieldInfo->structType, op1);
5953 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
5954 op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
5962 void** pFldAddr = nullptr;
5963 void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
5965 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5967 /* Create the data member node */
5968 if (pFldAddr == nullptr)
5970 op1 = gtNewIconHandleNode((size_t)fldAddr, GTF_ICON_STATIC_HDL, fldSeq);
5974 op1 = gtNewIconHandleNode((size_t)pFldAddr, GTF_ICON_STATIC_HDL, fldSeq);
5976 // There are two cases here, either the static is RVA based,
5977 // in which case the type of the FIELD node is not a GC type
5978 // and the handle to the RVA is a TYP_I_IMPL. Or the FIELD node is
5979 // a GC type and the handle to it is a TYP_BYREF in the GC heap
5980 // because handles to statics now go into the large object heap
5982 var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
5983 op1 = gtNewOperNode(GT_IND, handleTyp, op1);
5984 op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
5991 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
5993 op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
5995 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
5997 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
5998 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
6001 if (!(access & CORINFO_ACCESS_ADDRESS))
6003 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6004 op1->gtFlags |= GTF_GLOB_REF;
6010 // In general try to call this before most of the verification work. Most people expect the access
6011 // exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns
6012 // out if you can't access something we also think that you're unverifiable for other reasons.
6013 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6015 if (result != CORINFO_ACCESS_ALLOWED)
6017 impHandleAccessAllowedInternal(result, helperCall);
6021 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6025 case CORINFO_ACCESS_ALLOWED:
6027 case CORINFO_ACCESS_ILLEGAL:
6028 // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
6029 // method is verifiable. Otherwise, delay the exception to runtime.
6030 if (compIsForImportOnly())
6032 info.compCompHnd->ThrowExceptionForHelper(helperCall);
6036 impInsertHelperCall(helperCall);
6039 case CORINFO_ACCESS_RUNTIME_CHECK:
6040 impInsertHelperCall(helperCall);
6045 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
6047 // Construct the argument list
6048 GenTreeArgList* args = nullptr;
6049 assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6050 for (unsigned i = helperInfo->numArgs; i > 0; --i)
6052 const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1];
6053 GenTreePtr currentArg = nullptr;
6054 switch (helperArg.argType)
6056 case CORINFO_HELPER_ARG_TYPE_Field:
6057 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6058 info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6059 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6061 case CORINFO_HELPER_ARG_TYPE_Method:
6062 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6063 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6065 case CORINFO_HELPER_ARG_TYPE_Class:
6066 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6067 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6069 case CORINFO_HELPER_ARG_TYPE_Module:
6070 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6072 case CORINFO_HELPER_ARG_TYPE_Const:
6073 currentArg = gtNewIconNode(helperArg.constant);
6076 NO_WAY("Illegal helper arg type");
6078 args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6082 * Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee.
6083 * Also, consider sticking this in the first basic block.
6085 GenTreePtr callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, GTF_EXCEPT, args);
6086 impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6089 void Compiler::impInsertCalloutForDelegate(CORINFO_METHOD_HANDLE callerMethodHnd,
6090 CORINFO_METHOD_HANDLE calleeMethodHnd,
6091 CORINFO_CLASS_HANDLE delegateTypeHnd)
6093 #ifdef FEATURE_CORECLR
6094 if (!info.compCompHnd->isDelegateCreationAllowed(delegateTypeHnd, calleeMethodHnd))
6096 // Call the JIT_DelegateSecurityCheck helper before calling the actual function.
6097 // This helper throws an exception if the CLR host disallows the call.
6099 GenTreePtr helper = gtNewHelperCallNode(CORINFO_HELP_DELEGATE_SECURITY_CHECK, TYP_VOID, GTF_EXCEPT,
6100 gtNewArgList(gtNewIconEmbClsHndNode(delegateTypeHnd),
6101 gtNewIconEmbMethHndNode(calleeMethodHnd)));
6102 // Append the callout statement
6103 impAppendTree(helper, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6105 #endif // FEATURE_CORECLR
6108 // Checks whether the return types of caller and callee are compatible
6109 // so that callee can be tail called. Note that here we don't check
6110 // compatibility in IL Verifier sense, but on the lines of return type
6111 // sizes are equal and get returned in the same return register.
6112 bool Compiler::impTailCallRetTypeCompatible(var_types callerRetType,
6113 CORINFO_CLASS_HANDLE callerRetTypeClass,
6114 var_types calleeRetType,
6115 CORINFO_CLASS_HANDLE calleeRetTypeClass)
6117 // Note that we can not relax this condition with genActualType() as the
6118 // calling convention dictates that the caller of a function with a small
6119 // typed return value is responsible for normalizing the return val.
6120 if (callerRetType == calleeRetType)
6125 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6127 if (callerRetType == TYP_VOID)
6129 // This needs to be allowed to support the following IL pattern that Jit64 allows:
6134 // Note that the above IL pattern is not valid as per IL verification rules.
6135 // Therefore, only full trust code can take advantage of this pattern.
6139 // These checks return true if the return value type sizes are the same and
6140 // get returned in the same return register i.e. caller doesn't need to normalize
6141 // return value. Some of the tail calls permitted by below checks would have
6142 // been rejected by IL Verifier before we reached here. Therefore, only full
6143 // trust code can make those tail calls.
6144 unsigned callerRetTypeSize = 0;
6145 unsigned calleeRetTypeSize = 0;
6146 bool isCallerRetTypMBEnreg =
6147 VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6148 bool isCalleeRetTypMBEnreg =
6149 VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6151 if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6153 return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6155 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6163 PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6164 PREFIX_TAILCALL_IMPLICIT =
6165 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6166 PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6167 PREFIX_VOLATILE = 0x00000100,
6168 PREFIX_UNALIGNED = 0x00001000,
6169 PREFIX_CONSTRAINED = 0x00010000,
6170 PREFIX_READONLY = 0x00100000
6173 /********************************************************************************
6175 * Returns true if the current opcode and and the opcodes following it correspond
6176 * to a supported tail call IL pattern.
6179 bool Compiler::impIsTailCallILPattern(bool tailPrefixed,
6181 const BYTE* codeAddrOfNextOpcode,
6182 const BYTE* codeEnd,
6184 bool* isCallPopAndRet /* = nullptr */)
6186 // Bail out if the current opcode is not a call.
6187 if (!impOpcodeIsCallOpcode(curOpcode))
6192 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6193 // If shared ret tail opt is not enabled, we will enable
6194 // it for recursive methods.
6198 // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6199 // sequence. Make sure we don't go past the end of the IL however.
6200 codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6203 // Bail out if there is no next opcode after call
6204 if (codeAddrOfNextOpcode >= codeEnd)
6209 // Scan the opcodes to look for the following IL patterns if either
6210 // i) the call is not tail prefixed (i.e. implicit tail call) or
6211 // ii) if tail prefixed, IL verification is not needed for the method.
6213 // Only in the above two cases we can allow the below tail call patterns
6214 // violating ECMA spec.
6230 #ifdef _TARGET_AMD64_
6233 nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6234 codeAddrOfNextOpcode += sizeof(__int8);
6235 } while ((codeAddrOfNextOpcode < codeEnd) && // Haven't reached end of method
6236 (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6237 ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6238 // one pop seen so far.
6240 nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6243 if (isCallPopAndRet)
6245 // Allow call+pop+ret to be tail call optimized if caller ret type is void
6246 *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6249 #ifdef _TARGET_AMD64_
6251 // Tail call IL pattern could be either of the following
6252 // 1) call/callvirt/calli + ret
6253 // 2) call/callvirt/calli + pop + ret in a method returning void.
6254 return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6255 #else //!_TARGET_AMD64_
6256 return (nextOpcode == CEE_RET) && (cntPop == 0);
6260 /*****************************************************************************
6262 * Determine whether the call could be converted to an implicit tail call
6265 bool Compiler::impIsImplicitTailCallCandidate(
6266 OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6269 #if FEATURE_TAILCALL_OPT
6270 if (!opts.compTailCallOpt)
6275 if (opts.compDbgCode || opts.MinOpts())
6280 // must not be tail prefixed
6281 if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6286 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6287 // the block containing call is marked as BBJ_RETURN
6288 // We allow shared ret tail call optimization on recursive calls even under
6289 // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6290 if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6292 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6294 // must be call+ret or call+pop+ret
6295 if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6303 #endif // FEATURE_TAILCALL_OPT
6306 //------------------------------------------------------------------------
6307 // impImportCall: import a call-inspiring opcode
6310 // opcode - opcode that inspires the call
6311 // pResolvedToken - resolved token for the call target
6312 // pConstrainedResolvedToken - resolved constraint token (or nullptr)
6313 // newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr)
6314 // prefixFlags - IL prefix flags for the call
6315 // callInfo - EE supplied info for the call
6316 // rawILOffset - IL offset of the opcode
6319 // Type of the call's return value.
6322 // opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6324 // For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6325 // uninitalized object.
6328 #pragma warning(push)
6329 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6332 var_types Compiler::impImportCall(OPCODE opcode,
6333 CORINFO_RESOLVED_TOKEN* pResolvedToken,
6334 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6335 GenTreePtr newobjThis,
6337 CORINFO_CALL_INFO* callInfo,
6338 IL_OFFSET rawILOffset)
6340 assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6342 IL_OFFSETX ilOffset = impCurILOffset(rawILOffset, true);
6343 var_types callRetTyp = TYP_COUNT;
6344 CORINFO_SIG_INFO* sig = nullptr;
6345 CORINFO_METHOD_HANDLE methHnd = nullptr;
6346 CORINFO_CLASS_HANDLE clsHnd = nullptr;
6347 unsigned clsFlags = 0;
6348 unsigned mflags = 0;
6349 unsigned argFlags = 0;
6350 GenTreePtr call = nullptr;
6351 GenTreeArgList* args = nullptr;
6352 CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM;
6353 CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr;
6354 BOOL exactContextNeedsRuntimeLookup = FALSE;
6355 bool canTailCall = true;
6356 const char* szCanTailCallFailReason = nullptr;
6357 int tailCall = prefixFlags & PREFIX_TAILCALL;
6358 bool readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
6360 // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6361 // do that before tailcalls, but that is probably not the intended
6362 // semantic. So just disallow tailcalls from synchronized methods.
6363 // Also, popping arguments in a varargs function is more work and NYI
6364 // If we have a security object, we have to keep our frame around for callers
6365 // to see any imperative security.
6366 if (info.compFlags & CORINFO_FLG_SYNCH)
6368 canTailCall = false;
6369 szCanTailCallFailReason = "Caller is synchronized";
6371 #if !FEATURE_FIXED_OUT_ARGS
6372 else if (info.compIsVarArgs)
6374 canTailCall = false;
6375 szCanTailCallFailReason = "Caller is varargs";
6377 #endif // FEATURE_FIXED_OUT_ARGS
6378 else if (opts.compNeedSecurityCheck)
6380 canTailCall = false;
6381 szCanTailCallFailReason = "Caller requires a security check.";
6384 // We only need to cast the return value of pinvoke inlined calls that return small types
6386 // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6387 // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6388 // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6389 // the time being that the callee might be compiled by the other JIT and thus the return
6390 // value will need to be widened by us (or not widened at all...)
6392 // ReadyToRun code sticks with default calling convention that does not widen small return types.
6394 bool checkForSmallType = opts.IsJit64Compat() || opts.IsReadyToRun();
6395 bool bIntrinsicImported = false;
6397 CORINFO_SIG_INFO calliSig;
6398 GenTreeArgList* extraArg = nullptr;
6400 /*-------------------------------------------------------------------------
6401 * First create the call node
6404 if (opcode == CEE_CALLI)
6406 /* Get the call site sig */
6407 eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
6409 callRetTyp = JITtype2varType(calliSig.retType);
6411 call = impImportIndirectCall(&calliSig, ilOffset);
6413 // We don't know the target method, so we have to infer the flags, or
6414 // assume the worst-case.
6415 mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
6420 unsigned structSize =
6421 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
6422 printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6423 opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6426 // This should be checked in impImportBlockCode.
6427 assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
6432 // We cannot lazily obtain the signature of a CALLI call because it has no method
6433 // handle that we can use, so we need to save its full call signature here.
6434 assert(call->gtCall.callSig == nullptr);
6435 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6436 *call->gtCall.callSig = calliSig;
6439 else // (opcode != CEE_CALLI)
6441 CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
6443 // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
6444 // supply the instantiation parameters necessary to make direct calls to underlying
6445 // shared generic code, rather than calling through instantiating stubs. If the
6446 // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
6447 // must indeed pass an instantiation parameter.
6449 methHnd = callInfo->hMethod;
6451 sig = &(callInfo->sig);
6452 callRetTyp = JITtype2varType(sig->retType);
6454 mflags = callInfo->methodFlags;
6459 unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
6460 printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6461 opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6464 if (compIsForInlining())
6466 /* Does this call site have security boundary restrictions? */
6468 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
6470 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
6474 /* Does the inlinee need a security check token on the frame */
6476 if (mflags & CORINFO_FLG_SECURITYCHECK)
6478 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6482 /* Does the inlinee use StackCrawlMark */
6484 if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
6486 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
6490 /* For now ignore delegate invoke */
6492 if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6494 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
6498 /* For now ignore varargs */
6499 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6501 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
6505 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
6507 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
6511 if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
6513 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
6518 clsHnd = pResolvedToken->hClass;
6520 clsFlags = callInfo->classFlags;
6523 // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
6525 // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
6526 // These should be in mscorlib.h, and available through a JIT/EE interface call.
6527 const char* modName;
6528 const char* className;
6529 const char* methodName;
6530 if ((className = eeGetClassName(clsHnd)) != nullptr &&
6531 strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
6532 (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
6534 return impImportJitTestLabelMark(sig->numArgs);
6538 // <NICE> Factor this into getCallInfo </NICE>
6539 if ((mflags & CORINFO_FLG_INTRINSIC) && !pConstrainedResolvedToken)
6541 call = impIntrinsic(clsHnd, methHnd, sig, pResolvedToken->token, readonlyCall,
6542 (canTailCall && (tailCall != 0)), &intrinsicID);
6544 if (call != nullptr)
6546 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
6547 (clsFlags & CORINFO_FLG_FINAL));
6549 #ifdef FEATURE_READYTORUN_COMPILER
6550 if (call->OperGet() == GT_INTRINSIC)
6552 if (opts.IsReadyToRun())
6554 noway_assert(callInfo->kind == CORINFO_CALL);
6555 call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
6559 call->gtIntrinsic.gtEntryPoint.addr = nullptr;
6564 bIntrinsicImported = true;
6572 call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
6573 if (call != nullptr)
6575 bIntrinsicImported = true;
6579 #endif // FEATURE_SIMD
6581 if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
6583 NO_WAY("Virtual call to a function added via EnC is not supported");
6587 if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
6588 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6589 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
6591 BADCODE("Bad calling convention");
6594 //-------------------------------------------------------------------------
6595 // Construct the call node
6597 // Work out what sort of call we're making.
6598 // Dispense with virtual calls implemented via LDVIRTFTN immediately.
6600 constraintCallThisTransform = callInfo->thisTransform;
6602 exactContextHnd = callInfo->contextHandle;
6603 exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup;
6605 // Recursive call is treaded as a loop to the begining of the method.
6606 if (methHnd == info.compMethodHnd)
6611 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
6612 fgFirstBB->bbNum, compCurBB->bbNum);
6615 fgMarkBackwardJump(fgFirstBB, compCurBB);
6618 switch (callInfo->kind)
6621 case CORINFO_VIRTUALCALL_STUB:
6623 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6624 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6625 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
6628 if (compIsForInlining())
6630 // Don't import runtime lookups when inlining
6631 // Inlining has to be aborted in such a case
6632 /* XXX Fri 3/20/2009
6633 * By the way, this would never succeed. If the handle lookup is into the generic
6634 * dictionary for a candidate, you'll generate different dictionary offsets and the
6635 * inlined code will crash.
6637 * To anyone code reviewing this, when could this ever succeed in the future? It'll
6638 * always have a handle lookup. These lookups are safe intra-module, but we're just
6641 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
6645 GenTreePtr stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
6646 assert(!compDonotInline());
6648 // This is the rough code to set up an indirect stub call
6649 assert(stubAddr != nullptr);
6651 // The stubAddr may be a
6652 // complex expression. As it is evaluated after the args,
6653 // it may cause registered args to be spilled. Simply spill it.
6655 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
6656 impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
6657 stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6659 // Create the actual call node
6661 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6662 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6664 call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
6666 call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
6667 call->gtFlags |= GTF_CALL_VIRT_STUB;
6670 // No tailcalls allowed for these yet...
6671 canTailCall = false;
6672 szCanTailCallFailReason = "VirtualCall with runtime lookup";
6677 // ok, the stub is available at compile type.
6679 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6680 call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
6681 call->gtFlags |= GTF_CALL_VIRT_STUB;
6682 assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
6683 if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
6685 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
6689 #ifdef FEATURE_READYTORUN_COMPILER
6690 if (opts.IsReadyToRun())
6692 // Null check is sometimes needed for ready to run to handle
6693 // non-virtual <-> virtual changes between versions
6694 if (callInfo->nullInstanceCheck)
6696 call->gtFlags |= GTF_CALL_NULLCHECK;
6704 case CORINFO_VIRTUALCALL_VTABLE:
6706 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6707 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6708 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6709 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
6713 case CORINFO_VIRTUALCALL_LDVIRTFTN:
6715 if (compIsForInlining())
6717 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
6721 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6722 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6723 // OK, We've been told to call via LDVIRTFTN, so just
6724 // take the call now....
6726 args = impPopList(sig->numArgs, &argFlags, sig);
6728 GenTreePtr thisPtr = impPopStack().val;
6729 thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
6730 if (compDonotInline())
6735 // Clone the (possibly transformed) "this" pointer
6736 GenTreePtr thisPtrCopy;
6737 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
6738 nullptr DEBUGARG("LDVIRTFTN this pointer"));
6740 GenTreePtr fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
6741 if (compDonotInline())
6746 thisPtr = nullptr; // can't reuse it
6748 // Now make an indirect call through the function pointer
6750 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
6751 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6752 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6754 // Create the actual call node
6756 call = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
6757 call->gtCall.gtCallObjp = thisPtrCopy;
6758 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6760 #ifdef FEATURE_READYTORUN_COMPILER
6761 if (opts.IsReadyToRun())
6763 // Null check is needed for ready to run to handle
6764 // non-virtual <-> virtual changes between versions
6765 call->gtFlags |= GTF_CALL_NULLCHECK;
6769 // Sine we are jumping over some code, check that its OK to skip that code
6770 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6771 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6777 // This is for a non-virtual, non-interface etc. call
6778 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6780 // We remove the nullcheck for the GetType call instrinsic.
6781 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
6783 if (callInfo->nullInstanceCheck &&
6784 !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
6786 call->gtFlags |= GTF_CALL_NULLCHECK;
6789 #ifdef FEATURE_READYTORUN_COMPILER
6790 if (opts.IsReadyToRun())
6792 call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
6798 case CORINFO_CALL_CODE_POINTER:
6800 // The EE has asked us to call by computing a code pointer and then doing an
6801 // indirect call. This is because a runtime lookup is required to get the code entry point.
6803 // These calls always follow a uniform calling convention, i.e. no extra hidden params
6804 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
6806 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
6807 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6810 impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
6812 if (compDonotInline())
6817 // Now make an indirect call through the function pointer
6819 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
6820 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6821 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6823 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6824 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6825 if (callInfo->nullInstanceCheck)
6827 call->gtFlags |= GTF_CALL_NULLCHECK;
6834 assert(!"unknown call kind");
6838 //-------------------------------------------------------------------------
6841 PREFIX_ASSUME(call != nullptr);
6843 if (mflags & CORINFO_FLG_NOGCCHECK)
6845 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
6848 // Mark call if it's one of the ones we will maybe treat as an intrinsic
6849 if (intrinsicID == CORINFO_INTRINSIC_Object_GetType || intrinsicID == CORINFO_INTRINSIC_TypeEQ ||
6850 intrinsicID == CORINFO_INTRINSIC_TypeNEQ || intrinsicID == CORINFO_INTRINSIC_GetCurrentManagedThread ||
6851 intrinsicID == CORINFO_INTRINSIC_GetManagedThreadId)
6853 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
6857 assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
6859 /* Some sanity checks */
6861 // CALL_VIRT and NEWOBJ must have a THIS pointer
6862 assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
6863 // static bit and hasThis are negations of one another
6864 assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
6865 assert(call != nullptr);
6867 /*-------------------------------------------------------------------------
6868 * Check special-cases etc
6871 /* Special case - Check if it is a call to Delegate.Invoke(). */
6873 if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6875 assert(!compIsForInlining());
6876 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6877 assert(mflags & CORINFO_FLG_FINAL);
6879 /* Set the delegate flag */
6880 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
6882 if (callInfo->secureDelegateInvoke)
6884 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
6887 if (opcode == CEE_CALLVIRT)
6889 assert(mflags & CORINFO_FLG_FINAL);
6891 /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
6892 assert(call->gtFlags & GTF_CALL_NULLCHECK);
6893 call->gtFlags &= ~GTF_CALL_NULLCHECK;
6897 CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
6898 actualMethodRetTypeSigClass = sig->retTypeSigClass;
6899 if (varTypeIsStruct(callRetTyp))
6901 callRetTyp = impNormStructType(actualMethodRetTypeSigClass);
6902 call->gtType = callRetTyp;
6906 /* Check for varargs */
6907 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6908 (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6910 BADCODE("Varargs not supported.");
6912 #endif // !FEATURE_VARARG
6914 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6915 (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6917 assert(!compIsForInlining());
6919 /* Set the right flags */
6921 call->gtFlags |= GTF_CALL_POP_ARGS;
6922 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
6924 /* Can't allow tailcall for varargs as it is caller-pop. The caller
6925 will be expecting to pop a certain number of arguments, but if we
6926 tailcall to a function with a different number of arguments, we
6927 are hosed. There are ways around this (caller remembers esp value,
6928 varargs is not caller-pop, etc), but not worth it. */
6929 CLANG_FORMAT_COMMENT_ANCHOR;
6934 canTailCall = false;
6935 szCanTailCallFailReason = "Callee is varargs";
6939 /* Get the total number of arguments - this is already correct
6940 * for CALLI - for methods we have to get it from the call site */
6942 if (opcode != CEE_CALLI)
6945 unsigned numArgsDef = sig->numArgs;
6947 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
6950 // We cannot lazily obtain the signature of a vararg call because using its method
6951 // handle will give us only the declared argument list, not the full argument list.
6952 assert(call->gtCall.callSig == nullptr);
6953 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6954 *call->gtCall.callSig = *sig;
6957 // For vararg calls we must be sure to load the return type of the
6958 // method actually being called, as well as the return types of the
6959 // specified in the vararg signature. With type equivalency, these types
6960 // may not be the same.
6961 if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
6963 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
6964 sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
6965 sig->retType != CORINFO_TYPE_VAR)
6967 // Make sure that all valuetypes (including enums) that we push are loaded.
6968 // This is to guarantee that if a GC is triggerred from the prestub of this methods,
6969 // all valuetypes in the method signature are already loaded.
6970 // We need to be able to find the size of the valuetypes, but we cannot
6971 // do a class-load from within GC.
6972 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
6976 assert(numArgsDef <= sig->numArgs);
6979 /* We will have "cookie" as the last argument but we cannot push
6980 * it on the operand stack because we may overflow, so we append it
6981 * to the arg list next after we pop them */
6984 if (mflags & CORINFO_FLG_SECURITYCHECK)
6986 assert(!compIsForInlining());
6988 // Need security prolog/epilog callouts when there is
6989 // imperative security in the method. This is to give security a
6990 // chance to do any setup in the prolog and cleanup in the epilog if needed.
6992 if (compIsForInlining())
6994 // Cannot handle this if the method being imported is an inlinee by itself.
6995 // Because inlinee method does not have its own frame.
6997 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7002 tiSecurityCalloutNeeded = true;
7004 // If the current method calls a method which needs a security check,
7005 // (i.e. the method being compiled has imperative security)
7006 // we need to reserve a slot for the security object in
7007 // the current method's stack frame
7008 opts.compNeedSecurityCheck = true;
7012 //--------------------------- Inline NDirect ------------------------------
7014 // For inline cases we technically should look at both the current
7015 // block and the call site block (or just the latter if we've
7016 // fused the EH trees). However the block-related checks pertain to
7017 // EH and we currently won't inline a method with EH. So for
7018 // inlinees, just checking the call site block is sufficient.
7020 // New lexical block here to avoid compilation errors because of GOTOs.
7021 BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7022 impCheckForPInvokeCall(call, methHnd, sig, mflags, block);
7025 if (call->gtFlags & GTF_CALL_UNMANAGED)
7027 // We set up the unmanaged call by linking the frame, disabling GC, etc
7028 // This needs to be cleaned up on return
7031 canTailCall = false;
7032 szCanTailCallFailReason = "Callee is native";
7035 checkForSmallType = true;
7037 impPopArgsForUnmanagedCall(call, sig);
7041 else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
7042 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
7043 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
7044 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
7046 if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
7048 // Normally this only happens with inlining.
7049 // However, a generic method (or type) being NGENd into another module
7050 // can run into this issue as well. There's not an easy fall-back for NGEN
7051 // so instead we fallback to JIT.
7052 if (compIsForInlining())
7054 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7058 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7064 GenTreePtr cookie = eeGetPInvokeCookie(sig);
7066 // This cookie is required to be either a simple GT_CNS_INT or
7067 // an indirection of a GT_CNS_INT
7069 GenTreePtr cookieConst = cookie;
7070 if (cookie->gtOper == GT_IND)
7072 cookieConst = cookie->gtOp.gtOp1;
7074 assert(cookieConst->gtOper == GT_CNS_INT);
7076 // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7077 // we won't allow this tree to participate in any CSE logic
7079 cookie->gtFlags |= GTF_DONT_CSE;
7080 cookieConst->gtFlags |= GTF_DONT_CSE;
7082 call->gtCall.gtCallCookie = cookie;
7086 canTailCall = false;
7087 szCanTailCallFailReason = "PInvoke calli";
7091 /*-------------------------------------------------------------------------
7092 * Create the argument list
7095 //-------------------------------------------------------------------------
7096 // Special case - for varargs we have an implicit last argument
7098 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7100 assert(!compIsForInlining());
7102 void *varCookie, *pVarCookie;
7103 if (!info.compCompHnd->canGetVarArgsHandle(sig))
7105 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7109 varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7110 assert((!varCookie) != (!pVarCookie));
7111 GenTreePtr cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL);
7113 assert(extraArg == nullptr);
7114 extraArg = gtNewArgList(cookie);
7117 //-------------------------------------------------------------------------
7118 // Extra arg for shared generic code and array methods
7120 // Extra argument containing instantiation information is passed in the
7121 // following circumstances:
7122 // (a) To the "Address" method on array classes; the extra parameter is
7123 // the array's type handle (a TypeDesc)
7124 // (b) To shared-code instance methods in generic structs; the extra parameter
7125 // is the struct's type handle (a vtable ptr)
7126 // (c) To shared-code per-instantiation non-generic static methods in generic
7127 // classes and structs; the extra parameter is the type handle
7128 // (d) To shared-code generic methods; the extra parameter is an
7129 // exact-instantiation MethodDesc
7131 // We also set the exact type context associated with the call so we can
7132 // inline the call correctly later on.
7134 if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7136 assert(call->gtCall.gtCallType == CT_USER_FUNC);
7137 if (clsHnd == nullptr)
7139 NO_WAY("CALLI on parameterized type");
7142 assert(opcode != CEE_CALLI);
7144 GenTreePtr instParam;
7147 // Instantiated generic method
7148 if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7150 CORINFO_METHOD_HANDLE exactMethodHandle =
7151 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7153 if (!exactContextNeedsRuntimeLookup)
7155 #ifdef FEATURE_READYTORUN_COMPILER
7156 if (opts.IsReadyToRun())
7159 impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7160 if (instParam == nullptr)
7168 instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7169 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7174 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7175 if (instParam == nullptr)
7182 // otherwise must be an instance method in a generic struct,
7183 // a static method in a generic type, or a runtime-generated array method
7186 assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7187 CORINFO_CLASS_HANDLE exactClassHandle =
7188 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7190 if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7192 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7196 if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7198 // We indicate "readonly" to the Address operation by using a null
7200 instParam = gtNewIconNode(0, TYP_REF);
7203 if (!exactContextNeedsRuntimeLookup)
7205 #ifdef FEATURE_READYTORUN_COMPILER
7206 if (opts.IsReadyToRun())
7209 impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7210 if (instParam == nullptr)
7218 instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7219 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7224 instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7225 if (instParam == nullptr)
7232 assert(extraArg == nullptr);
7233 extraArg = gtNewArgList(instParam);
7236 // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7237 // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7238 // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7239 // exactContextHnd is not currently required when inlining shared generic code into shared
7240 // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7241 // (e.g. anything marked needsRuntimeLookup)
7242 if (exactContextNeedsRuntimeLookup)
7244 exactContextHnd = nullptr;
7247 //-------------------------------------------------------------------------
7248 // The main group of arguments
7250 args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, &argFlags, sig, extraArg);
7254 call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7257 //-------------------------------------------------------------------------
7258 // The "this" pointer
7260 if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7264 if (opcode == CEE_NEWOBJ)
7270 obj = impPopStack().val;
7271 obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7272 if (compDonotInline())
7278 /* Is this a virtual or interface call? */
7280 if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
7282 /* only true object pointers can be virtual */
7284 assert(obj->gtType == TYP_REF);
7290 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7294 /* Store the "this" value in the call */
7296 call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7297 call->gtCall.gtCallObjp = obj;
7300 //-------------------------------------------------------------------------
7301 // The "this" pointer for "newobj"
7303 if (opcode == CEE_NEWOBJ)
7305 if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7307 assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7308 // This is a 'new' of a variable sized object, wher
7309 // the constructor is to return the object. In this case
7310 // the constructor claims to return VOID but we know it
7311 // actually returns the new object
7312 assert(callRetTyp == TYP_VOID);
7313 callRetTyp = TYP_REF;
7314 call->gtType = TYP_REF;
7315 impSpillSpecialSideEff();
7317 impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7321 if (clsFlags & CORINFO_FLG_DELEGATE)
7323 // New inliner morph it in impImportCall.
7324 // This will allow us to inline the call to the delegate constructor.
7325 call = fgOptimizeDelegateConstructor(call, &exactContextHnd);
7328 if (!bIntrinsicImported)
7331 #if defined(DEBUG) || defined(INLINE_DATA)
7333 // Keep track of the raw IL offset of the call
7334 call->gtCall.gtRawILOffset = rawILOffset;
7336 #endif // defined(DEBUG) || defined(INLINE_DATA)
7338 // Is it an inline candidate?
7339 impMarkInlineCandidate(call, exactContextHnd, callInfo);
7342 // append the call node.
7343 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7345 // Now push the value of the 'new onto the stack
7347 // This is a 'new' of a non-variable sized object.
7348 // Append the new node (op1) to the statement list,
7349 // and then push the local holding the value of this
7350 // new instruction on the stack.
7352 if (clsFlags & CORINFO_FLG_VALUECLASS)
7354 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
7356 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
7357 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
7361 if (newobjThis->gtOper == GT_COMMA)
7363 // In coreclr the callout can be inserted even if verification is disabled
7364 // so we cannot rely on tiVerificationNeeded alone
7366 // We must have inserted the callout. Get the real newobj.
7367 newobjThis = newobjThis->gtOp.gtOp2;
7370 assert(newobjThis->gtOper == GT_LCL_VAR);
7371 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
7381 // This check cannot be performed for implicit tail calls for the reason
7382 // that impIsImplicitTailCallCandidate() is not checking whether return
7383 // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
7384 // As a result it is possible that in the following case, we find that
7385 // the type stack is non-empty if Callee() is considered for implicit
7387 // int Caller(..) { .... void Callee(); ret val; ... }
7389 // Note that we cannot check return type compatibility before ImpImportCall()
7390 // as we don't have required info or need to duplicate some of the logic of
7393 // For implicit tail calls, we perform this check after return types are
7394 // known to be compatible.
7395 if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
7397 BADCODE("Stack should be empty after tailcall");
7400 // Note that we can not relax this condition with genActualType() as
7401 // the calling convention dictates that the caller of a function with
7402 // a small-typed return value is responsible for normalizing the return val
7405 !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
7406 callInfo->sig.retTypeClass))
7408 canTailCall = false;
7409 szCanTailCallFailReason = "Return types are not tail call compatible";
7412 // Stack empty check for implicit tail calls.
7413 if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
7415 #ifdef _TARGET_AMD64_
7416 // JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException
7417 // in JIT64, not an InvalidProgramException.
7418 Verify(false, "Stack should be empty after tailcall");
7419 #else // _TARGET_64BIT_
7420 BADCODE("Stack should be empty after tailcall");
7421 #endif //!_TARGET_64BIT_
7424 // assert(compCurBB is not a catch, finally or filter block);
7425 // assert(compCurBB is not a try block protected by a finally block);
7427 // Check for permission to tailcall
7428 bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
7430 assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
7434 // True virtual or indirect calls, shouldn't pass in a callee handle.
7435 CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->gtCall.gtCallType != CT_USER_FUNC) ||
7436 ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT))
7439 GenTreePtr thisArg = call->gtCall.gtCallObjp;
7441 if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
7444 if (explicitTailCall)
7446 // In case of explicit tail calls, mark it so that it is not considered
7448 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
7452 printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
7460 #if FEATURE_TAILCALL_OPT
7461 // Must be an implicit tail call.
7462 assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
7464 // It is possible that a call node is both an inline candidate and marked
7465 // for opportunistic tail calling. In-lining happens before morhphing of
7466 // trees. If in-lining of an in-line candidate gets aborted for whatever
7467 // reason, it will survive to the morphing stage at which point it will be
7468 // transformed into a tail call after performing additional checks.
7470 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
7474 printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
7480 #else //! FEATURE_TAILCALL_OPT
7481 NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
7483 #endif // FEATURE_TAILCALL_OPT
7486 // we can't report success just yet...
7490 canTailCall = false;
7491 // canTailCall reported its reasons already
7495 printf("\ninfo.compCompHnd->canTailCall returned false for call ");
7504 // If this assert fires it means that canTailCall was set to false without setting a reason!
7505 assert(szCanTailCallFailReason != nullptr);
7510 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
7512 printf(": %s\n", szCanTailCallFailReason);
7515 info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
7516 szCanTailCallFailReason);
7520 // Note: we assume that small return types are already normalized by the managed callee
7521 // or by the pinvoke stub for calls to unmanaged code.
7525 if (!bIntrinsicImported)
7528 // Things needed to be checked when bIntrinsicImported is false.
7531 assert(call->gtOper == GT_CALL);
7532 assert(sig != nullptr);
7534 // Tail calls require us to save the call site's sig info so we can obtain an argument
7535 // copying thunk from the EE later on.
7536 if (call->gtCall.callSig == nullptr)
7538 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7539 *call->gtCall.callSig = *sig;
7542 if (compIsForInlining() && opcode == CEE_CALLVIRT)
7544 GenTreePtr callObj = call->gtCall.gtCallObjp;
7545 assert(callObj != nullptr);
7547 unsigned callKind = call->gtFlags & GTF_CALL_VIRT_KIND_MASK;
7549 if (((callKind != GTF_CALL_NONVIRT) || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
7550 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
7551 impInlineInfo->inlArgInfo))
7553 impInlineInfo->thisDereferencedFirst = true;
7557 #if defined(DEBUG) || defined(INLINE_DATA)
7559 // Keep track of the raw IL offset of the call
7560 call->gtCall.gtRawILOffset = rawILOffset;
7562 #endif // defined(DEBUG) || defined(INLINE_DATA)
7564 // Is it an inline candidate?
7565 impMarkInlineCandidate(call, exactContextHnd, callInfo);
7568 // Push or append the result of the call
7569 if (callRetTyp == TYP_VOID)
7571 if (opcode == CEE_NEWOBJ)
7573 // we actually did push something, so don't spill the thing we just pushed.
7574 assert(verCurrentState.esStackDepth > 0);
7575 impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
7579 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7584 impSpillSpecialSideEff();
7586 if (clsFlags & CORINFO_FLG_ARRAY)
7588 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
7591 // Find the return type used for verification by interpreting the method signature.
7592 // NB: we are clobbering the already established sig.
7593 if (tiVerificationNeeded)
7595 // Actually, we never get the sig for the original method.
7596 sig = &(callInfo->verSig);
7599 typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
7600 tiRetVal.NormaliseForStack();
7602 // The CEE_READONLY prefix modifies the verification semantics of an Address
7603 // operation on an array type.
7604 if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
7606 tiRetVal.SetIsReadonlyByRef();
7609 if (tiVerificationNeeded)
7611 // We assume all calls return permanent home byrefs. If they
7612 // didn't they wouldn't be verifiable. This is also covering
7613 // the Address() helper for multidimensional arrays.
7614 if (tiRetVal.IsByRef())
7616 tiRetVal.SetIsPermanentHomeByRef();
7620 if (call->gtOper == GT_CALL)
7622 // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
7623 if (varTypeIsStruct(callRetTyp))
7625 call = impFixupCallStructReturn(call, sig->retTypeClass);
7628 if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
7630 assert(opts.OptEnabled(CLFLG_INLINING));
7632 // Make the call its own tree (spill the stack if needed).
7633 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7635 // TODO: Still using the widened type.
7636 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
7640 // For non-candidates we must also spill, since we
7641 // might have locals live on the eval stack that this
7643 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
7647 if (!bIntrinsicImported)
7649 //-------------------------------------------------------------------------
7651 /* If the call is of a small type and the callee is managed, the callee will normalize the result
7653 However, we need to normalize small type values returned by unmanaged
7654 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
7655 if we use the shorter inlined pinvoke stub. */
7657 if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
7659 call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
7663 impPushOnStack(call, tiRetVal);
7666 // VSD functions get a new call target each time we getCallInfo, so clear the cache.
7667 // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
7668 // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
7669 // callInfoCache.uncacheCallInfo();
7674 #pragma warning(pop)
7677 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
7679 CorInfoType corType = methInfo->args.retType;
7681 if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
7683 // We have some kind of STRUCT being returned
7685 structPassingKind howToReturnStruct = SPK_Unknown;
7687 var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
7689 if (howToReturnStruct == SPK_ByReference)
7700 var_types Compiler::impImportJitTestLabelMark(int numArgs)
7702 TestLabelAndNum tlAndN;
7706 StackEntry se = impPopStack();
7707 assert(se.seTypeInfo.GetType() == TI_INT);
7708 GenTreePtr val = se.val;
7709 assert(val->IsCnsIntOrI());
7710 tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7712 else if (numArgs == 3)
7714 StackEntry se = impPopStack();
7715 assert(se.seTypeInfo.GetType() == TI_INT);
7716 GenTreePtr val = se.val;
7717 assert(val->IsCnsIntOrI());
7718 tlAndN.m_num = val->AsIntConCommon()->IconValue();
7720 assert(se.seTypeInfo.GetType() == TI_INT);
7722 assert(val->IsCnsIntOrI());
7723 tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7730 StackEntry expSe = impPopStack();
7731 GenTreePtr node = expSe.val;
7733 // There are a small number of special cases, where we actually put the annotation on a subnode.
7734 if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
7736 // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
7737 // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
7738 // offset within the the static field block whose address is returned by the helper call.
7739 // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
7740 GenTreePtr helperCall = nullptr;
7741 assert(node->OperGet() == GT_IND);
7742 tlAndN.m_num -= 100;
7743 GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
7744 GetNodeTestData()->Remove(node);
7748 GetNodeTestData()->Set(node, tlAndN);
7751 impPushOnStack(node, expSe.seTypeInfo);
7752 return node->TypeGet();
7756 //-----------------------------------------------------------------------------------
7757 // impFixupCallStructReturn: For a call node that returns a struct type either
7758 // adjust the return type to an enregisterable type, or set the flag to indicate
7759 // struct return via retbuf arg.
7762 // call - GT_CALL GenTree node
7763 // retClsHnd - Class handle of return type of the call
7766 // Returns new GenTree node after fixing struct return of call node
7768 GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call, CORINFO_CLASS_HANDLE retClsHnd)
7770 assert(call->gtOper == GT_CALL);
7772 if (!varTypeIsStruct(call))
7777 call->gtCall.gtRetClsHnd = retClsHnd;
7779 GenTreeCall* callNode = call->AsCall();
7781 #if FEATURE_MULTIREG_RET
7782 // Initialize Return type descriptor of call node
7783 ReturnTypeDesc* retTypeDesc = callNode->GetReturnTypeDesc();
7784 retTypeDesc->InitializeStructReturnType(this, retClsHnd);
7785 #endif // FEATURE_MULTIREG_RET
7787 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7789 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
7790 assert(!callNode->IsVarargs() && "varargs not allowed for System V OSs.");
7792 // The return type will remain as the incoming struct type unless normalized to a
7793 // single eightbyte return type below.
7794 callNode->gtReturnType = call->gtType;
7796 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7797 if (retRegCount != 0)
7799 if (retRegCount == 1)
7801 // struct returned in a single register
7802 callNode->gtReturnType = retTypeDesc->GetReturnRegType(0);
7806 // must be a struct returned in two registers
7807 assert(retRegCount == 2);
7809 if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7811 // Force a call returning multi-reg struct to be always of the IR form
7814 // No need to assign a multi-reg struct to a local var if:
7815 // - It is a tail call or
7816 // - The call is marked for in-lining later
7817 return impAssignMultiRegTypeToVar(call, retClsHnd);
7823 // struct not returned in registers i.e returned via hiddden retbuf arg.
7824 callNode->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7827 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7829 #if FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7830 // There is no fixup necessary if the return type is a HFA struct.
7831 // HFA structs are returned in registers for ARM32 and ARM64
7833 if (!call->gtCall.IsVarargs() && IsHfa(retClsHnd))
7835 if (call->gtCall.CanTailCall())
7837 if (info.compIsVarArgs)
7839 // We cannot tail call because control needs to return to fixup the calling
7840 // convention for result return.
7841 call->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7845 // If we can tail call returning HFA, then don't assign it to
7846 // a variable back and forth.
7851 if (call->gtFlags & GTF_CALL_INLINE_CANDIDATE)
7856 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7857 if (retRegCount >= 2)
7859 return impAssignMultiRegTypeToVar(call, retClsHnd);
7862 #endif // _TARGET_ARM_
7864 // Check for TYP_STRUCT type that wraps a primitive type
7865 // Such structs are returned using a single register
7866 // and we change the return type on those calls here.
7868 structPassingKind howToReturnStruct;
7869 var_types returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
7871 if (howToReturnStruct == SPK_ByReference)
7873 assert(returnType == TYP_UNKNOWN);
7874 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7878 assert(returnType != TYP_UNKNOWN);
7879 call->gtCall.gtReturnType = returnType;
7881 // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
7882 if ((returnType == TYP_LONG) && (compLongUsed == false))
7884 compLongUsed = true;
7886 else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
7888 compFloatingPointUsed = true;
7891 #if FEATURE_MULTIREG_RET
7892 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7893 assert(retRegCount != 0);
7895 if (retRegCount >= 2)
7897 if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7899 // Force a call returning multi-reg struct to be always of the IR form
7902 // No need to assign a multi-reg struct to a local var if:
7903 // - It is a tail call or
7904 // - The call is marked for in-lining later
7905 return impAssignMultiRegTypeToVar(call, retClsHnd);
7908 #endif // FEATURE_MULTIREG_RET
7911 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7916 /*****************************************************************************
7917 For struct return values, re-type the operand in the case where the ABI
7918 does not use a struct return buffer
7919 Note that this method is only call for !_TARGET_X86_
7922 GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd)
7924 assert(varTypeIsStruct(info.compRetType));
7925 assert(info.compRetBuffArg == BAD_VAR_NUM);
7927 #if defined(_TARGET_XARCH_)
7929 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7930 // No VarArgs for CoreCLR on x64 Unix
7931 assert(!info.compIsVarArgs);
7933 // Is method returning a multi-reg struct?
7934 if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
7936 // In case of multi-reg struct return, we force IR to be one of the following:
7937 // GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a
7938 // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
7940 if (op->gtOper == GT_LCL_VAR)
7942 // Make sure that this struct stays in memory and doesn't get promoted.
7943 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
7944 lvaTable[lclNum].lvIsMultiRegRet = true;
7946 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
7947 op->gtFlags |= GTF_DONT_CSE;
7952 if (op->gtOper == GT_CALL)
7957 return impAssignMultiRegTypeToVar(op, retClsHnd);
7959 #else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7960 assert(info.compRetNativeType != TYP_STRUCT);
7961 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7963 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7965 if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
7967 if (op->gtOper == GT_LCL_VAR)
7969 // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
7970 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
7971 // Make sure this struct type stays as struct so that we can return it as an HFA
7972 lvaTable[lclNum].lvIsMultiRegRet = true;
7974 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
7975 op->gtFlags |= GTF_DONT_CSE;
7980 if (op->gtOper == GT_CALL)
7982 if (op->gtCall.IsVarargs())
7984 // We cannot tail call because control needs to return to fixup the calling
7985 // convention for result return.
7986 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
7987 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7994 return impAssignMultiRegTypeToVar(op, retClsHnd);
7997 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
7999 // Is method returning a multi-reg struct?
8000 if (IsMultiRegReturnedType(retClsHnd))
8002 if (op->gtOper == GT_LCL_VAR)
8004 // This LCL_VAR stays as a TYP_STRUCT
8005 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8007 // Make sure this struct type is not struct promoted
8008 lvaTable[lclNum].lvIsMultiRegRet = true;
8010 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8011 op->gtFlags |= GTF_DONT_CSE;
8016 if (op->gtOper == GT_CALL)
8018 if (op->gtCall.IsVarargs())
8020 // We cannot tail call because control needs to return to fixup the calling
8021 // convention for result return.
8022 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8023 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8030 return impAssignMultiRegTypeToVar(op, retClsHnd);
8033 #endif // FEATURE_MULTIREG_RET && FEATURE_HFA
8036 // adjust the type away from struct to integral
8037 // and no normalizing
8038 if (op->gtOper == GT_LCL_VAR)
8040 op->ChangeOper(GT_LCL_FLD);
8042 else if (op->gtOper == GT_OBJ)
8044 GenTreePtr op1 = op->AsObj()->Addr();
8046 // We will fold away OBJ/ADDR
8047 // except for OBJ/ADDR/INDEX
8048 // as the array type influences the array element's offset
8049 // Later in this method we change op->gtType to info.compRetNativeType
8050 // This is not correct when op is a GT_INDEX as the starting offset
8051 // for the array elements 'elemOffs' is different for an array of
8052 // TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8053 // Also refer to the GTF_INX_REFARR_LAYOUT flag
8055 if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8057 // Change '*(&X)' to 'X' and see if we can do better
8058 op = op1->gtOp.gtOp1;
8059 goto REDO_RETURN_NODE;
8061 op->gtObj.gtClass = NO_CLASS_HANDLE;
8062 op->ChangeOperUnchecked(GT_IND);
8063 op->gtFlags |= GTF_IND_TGTANYWHERE;
8065 else if (op->gtOper == GT_CALL)
8067 if (op->AsCall()->TreatAsHasRetBufArg(this))
8069 // This must be one of those 'special' helpers that don't
8070 // really have a return buffer, but instead use it as a way
8071 // to keep the trees cleaner with fewer address-taken temps.
8073 // Well now we have to materialize the the return buffer as
8074 // an address-taken temp. Then we can return the temp.
8076 // NOTE: this code assumes that since the call directly
8077 // feeds the return, then the call must be returning the
8078 // same structure/class/type.
8080 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8082 // No need to spill anything as we're about to return.
8083 impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8085 // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8086 // jump directly to a GT_LCL_FLD.
8087 op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8088 op->ChangeOper(GT_LCL_FLD);
8092 assert(info.compRetNativeType == op->gtCall.gtReturnType);
8094 // Don't change the gtType of the node just yet, it will get changed later.
8098 else if (op->gtOper == GT_COMMA)
8100 op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8103 op->gtType = info.compRetNativeType;
8108 /*****************************************************************************
8109 CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8110 finally-protected try. We find the finally blocks protecting the current
8111 offset (in order) by walking over the complete exception table and
8112 finding enclosing clauses. This assumes that the table is sorted.
8113 This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8115 If we are leaving a catch handler, we need to attach the
8116 CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8118 After this function, the BBJ_LEAVE block has been converted to a different type.
8121 #if !FEATURE_EH_FUNCLETS
8123 void Compiler::impImportLeave(BasicBlock* block)
8128 printf("\nBefore import CEE_LEAVE:\n");
8129 fgDispBasicBlocks();
8134 bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8135 unsigned blkAddr = block->bbCodeOffs;
8136 BasicBlock* leaveTarget = block->bbJumpDest;
8137 unsigned jmpAddr = leaveTarget->bbCodeOffs;
8139 // LEAVE clears the stack, spill side effects, and set stack to 0
8141 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8142 verCurrentState.esStackDepth = 0;
8144 assert(block->bbJumpKind == BBJ_LEAVE);
8145 assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8147 BasicBlock* step = DUMMY_INIT(NULL);
8148 unsigned encFinallies = 0; // Number of enclosing finallies.
8149 GenTreePtr endCatches = NULL;
8150 GenTreePtr endLFin = NULL; // The statement tree to indicate the end of locally-invoked finally.
8155 for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8157 // Grab the handler offsets
8159 IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8160 IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8161 IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8162 IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8164 /* Is this a catch-handler we are CEE_LEAVEing out of?
8165 * If so, we need to call CORINFO_HELP_ENDCATCH.
8168 if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8170 // Can't CEE_LEAVE out of a finally/fault handler
8171 if (HBtab->HasFinallyOrFaultHandler())
8172 BADCODE("leave out of fault/finally block");
8174 // Create the call to CORINFO_HELP_ENDCATCH
8175 GenTreePtr endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8177 // Make a list of all the currently pending endCatches
8179 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8181 endCatches = endCatch;
8186 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8187 "CORINFO_HELP_ENDCATCH\n",
8188 block->bbNum, XTnum);
8192 else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8193 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8195 /* This is a finally-protected try we are jumping out of */
8197 /* If there are any pending endCatches, and we have already
8198 jumped out of a finally-protected try, then the endCatches
8199 have to be put in a block in an outer try for async
8200 exceptions to work correctly.
8201 Else, just use append to the original block */
8203 BasicBlock* callBlock;
8205 assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8207 if (encFinallies == 0)
8209 assert(step == DUMMY_INIT(NULL));
8211 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8214 impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8219 printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8220 "block BB%02u [%08p]\n",
8221 callBlock->bbNum, dspPtr(callBlock));
8227 assert(step != DUMMY_INIT(NULL));
8229 /* Calling the finally block */
8230 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8231 assert(step->bbJumpKind == BBJ_ALWAYS);
8232 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8233 // finally in the chain)
8234 step->bbJumpDest->bbRefs++;
8236 /* The new block will inherit this block's weight */
8237 callBlock->setBBWeight(block->bbWeight);
8238 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8243 printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block BB%02u "
8245 callBlock->bbNum, dspPtr(callBlock));
8249 GenTreePtr lastStmt;
8253 lastStmt = gtNewStmt(endCatches);
8254 endLFin->gtNext = lastStmt;
8255 lastStmt->gtPrev = endLFin;
8262 // note that this sets BBF_IMPORTED on the block
8263 impEndTreeList(callBlock, endLFin, lastStmt);
8266 step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8267 /* The new block will inherit this block's weight */
8268 step->setBBWeight(block->bbWeight);
8269 step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8274 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block "
8276 step->bbNum, dspPtr(step));
8280 unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8281 assert(finallyNesting <= compHndBBtabCount);
8283 callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8284 endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8285 endLFin = gtNewStmt(endLFin);
8290 invalidatePreds = true;
8294 /* Append any remaining endCatches, if any */
8296 assert(!encFinallies == !endLFin);
8298 if (encFinallies == 0)
8300 assert(step == DUMMY_INIT(NULL));
8301 block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8304 impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8309 printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8310 "block BB%02u [%08p]\n",
8311 block->bbNum, dspPtr(block));
8317 // If leaveTarget is the start of another try block, we want to make sure that
8318 // we do not insert finalStep into that try block. Hence, we find the enclosing
8320 unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8322 // Insert a new BB either in the try region indicated by tryIndex or
8323 // the handler region indicated by leaveTarget->bbHndIndex,
8324 // depending on which is the inner region.
8325 BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8326 finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8327 step->bbJumpDest = finalStep;
8329 /* The new block will inherit this block's weight */
8330 finalStep->setBBWeight(block->bbWeight);
8331 finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8336 printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block BB%02u [%08p]\n",
8337 encFinallies, finalStep->bbNum, dspPtr(finalStep));
8341 GenTreePtr lastStmt;
8345 lastStmt = gtNewStmt(endCatches);
8346 endLFin->gtNext = lastStmt;
8347 lastStmt->gtPrev = endLFin;
8354 impEndTreeList(finalStep, endLFin, lastStmt);
8356 finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8358 // Queue up the jump target for importing
8360 impImportBlockPending(leaveTarget);
8362 invalidatePreds = true;
8365 if (invalidatePreds && fgComputePredsDone)
8367 JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8372 fgVerifyHandlerTab();
8376 printf("\nAfter import CEE_LEAVE:\n");
8377 fgDispBasicBlocks();
8383 #else // FEATURE_EH_FUNCLETS
8385 void Compiler::impImportLeave(BasicBlock* block)
8390 printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
8391 fgDispBasicBlocks();
8396 bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8397 unsigned blkAddr = block->bbCodeOffs;
8398 BasicBlock* leaveTarget = block->bbJumpDest;
8399 unsigned jmpAddr = leaveTarget->bbCodeOffs;
8401 // LEAVE clears the stack, spill side effects, and set stack to 0
8403 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8404 verCurrentState.esStackDepth = 0;
8406 assert(block->bbJumpKind == BBJ_LEAVE);
8407 assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
8409 BasicBlock* step = nullptr;
8413 // No step type; step == NULL.
8416 // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
8417 // That is, is step->bbJumpDest where a finally will return to?
8420 // The step block is a catch return.
8423 // The step block is in a "try", created as the target for a finally return or the target for a catch return.
8426 StepType stepType = ST_None;
8431 for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8433 // Grab the handler offsets
8435 IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8436 IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8437 IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8438 IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8440 /* Is this a catch-handler we are CEE_LEAVEing out of?
8443 if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8445 // Can't CEE_LEAVE out of a finally/fault handler
8446 if (HBtab->HasFinallyOrFaultHandler())
8448 BADCODE("leave out of fault/finally block");
8451 /* We are jumping out of a catch */
8453 if (step == nullptr)
8456 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
8457 stepType = ST_Catch;
8462 printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
8464 XTnum, step->bbNum);
8470 BasicBlock* exitBlock;
8472 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
8474 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
8476 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8477 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
8478 // exit) returns to this block
8479 step->bbJumpDest->bbRefs++;
8481 #if defined(_TARGET_ARM_)
8482 if (stepType == ST_FinallyReturn)
8484 assert(step->bbJumpKind == BBJ_ALWAYS);
8485 // Mark the target of a finally return
8486 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8488 #endif // defined(_TARGET_ARM_)
8490 /* The new block will inherit this block's weight */
8491 exitBlock->setBBWeight(block->bbWeight);
8492 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8494 /* This exit block is the new step */
8496 stepType = ST_Catch;
8498 invalidatePreds = true;
8503 printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
8509 else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8510 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8512 /* We are jumping out of a finally-protected try */
8514 BasicBlock* callBlock;
8516 if (step == nullptr)
8518 #if FEATURE_EH_CALLFINALLY_THUNKS
8520 // Put the call to the finally in the enclosing region.
8521 unsigned callFinallyTryIndex =
8522 (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8523 unsigned callFinallyHndIndex =
8524 (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8525 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
8527 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
8528 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
8529 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
8530 // next block, and flow optimizations will remove it.
8531 block->bbJumpKind = BBJ_ALWAYS;
8532 block->bbJumpDest = callBlock;
8533 block->bbJumpDest->bbRefs++;
8535 /* The new block will inherit this block's weight */
8536 callBlock->setBBWeight(block->bbWeight);
8537 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8542 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8543 "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
8544 XTnum, block->bbNum, callBlock->bbNum);
8548 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8551 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8556 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8557 "BBJ_CALLFINALLY block\n",
8558 XTnum, callBlock->bbNum);
8562 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8566 // Calling the finally block. We already have a step block that is either the call-to-finally from a
8567 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
8568 // a 'finally'), or the step block is the return from a catch.
8570 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
8571 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
8572 // automatically re-raise the exception, using the return address of the catch (that is, the target
8573 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
8574 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
8575 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
8576 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
8577 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
8578 // within the 'try' region protected by the finally, since we generate code in such a way that execution
8579 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
8582 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8584 #if FEATURE_EH_CALLFINALLY_THUNKS
8585 if (step->bbJumpKind == BBJ_EHCATCHRET)
8587 // Need to create another step block in the 'try' region that will actually branch to the
8588 // call-to-finally thunk.
8589 BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8590 step->bbJumpDest = step2;
8591 step->bbJumpDest->bbRefs++;
8592 step2->setBBWeight(block->bbWeight);
8593 step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8598 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
8599 "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
8600 XTnum, step->bbNum, step2->bbNum);
8605 assert(stepType == ST_Catch); // Leave it as catch type for now.
8607 #endif // FEATURE_EH_CALLFINALLY_THUNKS
8609 #if FEATURE_EH_CALLFINALLY_THUNKS
8610 unsigned callFinallyTryIndex =
8611 (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8612 unsigned callFinallyHndIndex =
8613 (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8614 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8615 unsigned callFinallyTryIndex = XTnum + 1;
8616 unsigned callFinallyHndIndex = 0; // don't care
8617 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8619 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
8620 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8621 // finally in the chain)
8622 step->bbJumpDest->bbRefs++;
8624 #if defined(_TARGET_ARM_)
8625 if (stepType == ST_FinallyReturn)
8627 assert(step->bbJumpKind == BBJ_ALWAYS);
8628 // Mark the target of a finally return
8629 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8631 #endif // defined(_TARGET_ARM_)
8633 /* The new block will inherit this block's weight */
8634 callBlock->setBBWeight(block->bbWeight);
8635 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8640 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
8642 XTnum, callBlock->bbNum);
8647 step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8648 stepType = ST_FinallyReturn;
8650 /* The new block will inherit this block's weight */
8651 step->setBBWeight(block->bbWeight);
8652 step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8657 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
8659 XTnum, step->bbNum);
8663 callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8665 invalidatePreds = true;
8667 else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8668 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8670 // We are jumping out of a catch-protected try.
8672 // If we are returning from a call to a finally, then we must have a step block within a try
8673 // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
8674 // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
8675 // and invoke the appropriate catch.
8677 // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
8678 // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
8679 // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
8680 // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
8681 // address of the catch return as the new exception address. That is, the re-raised exception appears to
8682 // occur at the catch return address. If this exception return address skips an enclosing try/catch that
8683 // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
8688 // // something here raises ThreadAbortException
8689 // LEAVE LABEL_1; // no need to stop at LABEL_2
8690 // } catch (Exception) {
8691 // // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
8692 // // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
8693 // // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
8694 // // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
8695 // // need to do this transformation if the current EH block is a try/catch that catches
8696 // // ThreadAbortException (or one of its parents), however we might not be able to find that
8697 // // information, so currently we do it for all catch types.
8698 // LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
8700 // LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
8701 // } catch (ThreadAbortException) {
8705 // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
8708 if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
8710 BasicBlock* catchStep;
8714 if (stepType == ST_FinallyReturn)
8716 assert(step->bbJumpKind == BBJ_ALWAYS);
8720 assert(stepType == ST_Catch);
8721 assert(step->bbJumpKind == BBJ_EHCATCHRET);
8724 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
8725 catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8726 step->bbJumpDest = catchStep;
8727 step->bbJumpDest->bbRefs++;
8729 #if defined(_TARGET_ARM_)
8730 if (stepType == ST_FinallyReturn)
8732 // Mark the target of a finally return
8733 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8735 #endif // defined(_TARGET_ARM_)
8737 /* The new block will inherit this block's weight */
8738 catchStep->setBBWeight(block->bbWeight);
8739 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8744 if (stepType == ST_FinallyReturn)
8746 printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
8747 "BBJ_ALWAYS block BB%02u\n",
8748 XTnum, catchStep->bbNum);
8752 assert(stepType == ST_Catch);
8753 printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
8754 "BBJ_ALWAYS block BB%02u\n",
8755 XTnum, catchStep->bbNum);
8760 /* This block is the new step */
8764 invalidatePreds = true;
8769 if (step == nullptr)
8771 block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8776 printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
8777 "block BB%02u to BBJ_ALWAYS\n",
8784 step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8786 #if defined(_TARGET_ARM_)
8787 if (stepType == ST_FinallyReturn)
8789 assert(step->bbJumpKind == BBJ_ALWAYS);
8790 // Mark the target of a finally return
8791 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8793 #endif // defined(_TARGET_ARM_)
8798 printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
8802 // Queue up the jump target for importing
8804 impImportBlockPending(leaveTarget);
8807 if (invalidatePreds && fgComputePredsDone)
8809 JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8814 fgVerifyHandlerTab();
8818 printf("\nAfter import CEE_LEAVE:\n");
8819 fgDispBasicBlocks();
8825 #endif // FEATURE_EH_FUNCLETS
8827 /*****************************************************************************/
8828 // This is called when reimporting a leave block. It resets the JumpKind,
8829 // JumpDest, and bbNext to the original values
8831 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
8833 #if FEATURE_EH_FUNCLETS
8834 // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
8835 // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0,
8836 // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
8837 // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
8838 // only predecessor are also considered orphans and attempted to be deleted.
8845 // leave OUTSIDE; // B0 is the block containing this leave, following this would be B1
8850 // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
8851 // where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block.
8852 // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To
8853 // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
8854 // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
8855 // will be treated as pair and handled correctly.
8856 if (block->bbJumpKind == BBJ_CALLFINALLY)
8858 BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
8859 dupBlock->bbFlags = block->bbFlags;
8860 dupBlock->bbJumpDest = block->bbJumpDest;
8861 dupBlock->copyEHRegion(block);
8862 dupBlock->bbCatchTyp = block->bbCatchTyp;
8864 // Mark this block as
8865 // a) not referenced by any other block to make sure that it gets deleted
8867 // c) prevent from being imported
8870 dupBlock->bbRefs = 0;
8871 dupBlock->bbWeight = 0;
8872 dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
8874 // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
8875 // will be next to each other.
8876 fgInsertBBafter(block, dupBlock);
8881 printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
8885 #endif // FEATURE_EH_FUNCLETS
8887 block->bbJumpKind = BBJ_LEAVE;
8889 block->bbJumpDest = fgLookupBB(jmpAddr);
8891 // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
8892 // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
8893 // reason we don't want to remove the block at this point is that if we call
8894 // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
8895 // added and the linked list length will be different than fgBBcount.
8898 /*****************************************************************************/
8899 // Get the first non-prefix opcode. Used for verification of valid combinations
8900 // of prefixes and actual opcodes.
8902 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
8904 while (codeAddr < codeEndp)
8906 OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
8907 codeAddr += sizeof(__int8);
8909 if (opcode == CEE_PREFIX1)
8911 if (codeAddr >= codeEndp)
8915 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
8916 codeAddr += sizeof(__int8);
8924 case CEE_CONSTRAINED:
8931 codeAddr += opcodeSizes[opcode];
8937 /*****************************************************************************/
8938 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
8940 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
8942 OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
8945 // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
8946 ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
8947 (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
8948 (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
8949 // volatile. prefix is allowed with the ldsfld and stsfld
8950 (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
8952 BADCODE("Invalid opcode for unaligned. or volatile. prefix");
8956 /*****************************************************************************/
8960 #undef RETURN // undef contracts RETURN macro
8975 const static controlFlow_t controlFlow[] = {
8976 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
8977 #include "opcode.def"
8983 /*****************************************************************************
8984 * Determine the result type of an arithemetic operation
8985 * On 64-bit inserts upcasts when native int is mixed with int32
8987 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr* pOp1, GenTreePtr* pOp2)
8989 var_types type = TYP_UNDEF;
8990 GenTreePtr op1 = *pOp1, op2 = *pOp2;
8992 // Arithemetic operations are generally only allowed with
8993 // primitive types, but certain operations are allowed
8996 if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
8998 if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9000 // byref1-byref2 => gives a native int
9003 else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9005 // [native] int - byref => gives a native int
9008 // The reason is that it is possible, in managed C++,
9009 // to have a tree like this:
9016 // const(h) int addr byref
9018 // <BUGNUM> VSW 318822 </BUGNUM>
9020 // So here we decide to make the resulting type to be a native int.
9021 CLANG_FORMAT_COMMENT_ANCHOR;
9023 #ifdef _TARGET_64BIT_
9024 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9026 // insert an explicit upcast
9027 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9029 #endif // _TARGET_64BIT_
9035 // byref - [native] int => gives a byref
9036 assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
9038 #ifdef _TARGET_64BIT_
9039 if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
9041 // insert an explicit upcast
9042 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9044 #endif // _TARGET_64BIT_
9049 else if ((oper == GT_ADD) &&
9050 (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9052 // byref + [native] int => gives a byref
9054 // [native] int + byref => gives a byref
9056 // only one can be a byref : byref op byref not allowed
9057 assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9058 assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9060 #ifdef _TARGET_64BIT_
9061 if (genActualType(op2->TypeGet()) == TYP_BYREF)
9063 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9065 // insert an explicit upcast
9066 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9069 else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9071 // insert an explicit upcast
9072 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9074 #endif // _TARGET_64BIT_
9078 #ifdef _TARGET_64BIT_
9079 else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9081 assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9083 // int + long => gives long
9084 // long + int => gives long
9085 // we get this because in the IL the long isn't Int64, it's just IntPtr
9087 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9089 // insert an explicit upcast
9090 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9092 else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9094 // insert an explicit upcast
9095 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9100 #else // 32-bit TARGET
9101 else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9103 assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9105 // int + long => gives long
9106 // long + int => gives long
9110 #endif // _TARGET_64BIT_
9113 // int + int => gives an int
9114 assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9116 assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9117 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9119 type = genActualType(op1->gtType);
9121 #if FEATURE_X87_DOUBLES
9123 // For x87, since we only have 1 size of registers, prefer double
9124 // For everybody else, be more precise
9125 if (type == TYP_FLOAT)
9128 #else // !FEATURE_X87_DOUBLES
9130 // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9131 // Otherwise, turn floats into doubles
9132 if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9134 assert(genActualType(op2->gtType) == TYP_DOUBLE);
9138 #endif // FEATURE_X87_DOUBLES
9141 #if FEATURE_X87_DOUBLES
9142 assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9143 #else // FEATURE_X87_DOUBLES
9144 assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9145 #endif // FEATURE_X87_DOUBLES
9150 /*****************************************************************************
9151 * Casting Helper Function to service both CEE_CASTCLASS and CEE_ISINST
9153 * typeRef contains the token, op1 to contain the value being cast,
9154 * and op2 to contain code that creates the type handle corresponding to typeRef
9155 * isCastClass = true means CEE_CASTCLASS, false means CEE_ISINST
9157 GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr op1,
9159 CORINFO_RESOLVED_TOKEN* pResolvedToken,
9164 assert(op1->TypeGet() == TYP_REF);
9166 CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9170 // We only want to expand inline the normal CHKCASTCLASS helper;
9171 expandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9175 if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9177 // Get the Class Handle abd class attributes for the type we are casting to
9179 DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9182 // If the class handle is marked as final we can also expand the IsInst check inline
9184 expandInline = ((flags & CORINFO_FLG_FINAL) != 0);
9187 // But don't expand inline these two cases
9189 if (flags & CORINFO_FLG_MARSHAL_BYREF)
9191 expandInline = false;
9193 else if (flags & CORINFO_FLG_CONTEXTFUL)
9195 expandInline = false;
9201 // We can't expand inline any other helpers
9203 expandInline = false;
9209 if (compCurBB->isRunRarely())
9211 expandInline = false; // not worth the code expansion in a rarely run block
9214 if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9216 expandInline = false; // not worth creating an untracked local variable
9222 // If we CSE this class handle we prevent assertionProp from making SubType assertions
9223 // so instead we force the CSE logic to not consider CSE-ing this class handle.
9225 op2->gtFlags |= GTF_DONT_CSE;
9227 return gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2, op1));
9230 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9235 // expand the methodtable match:
9239 // GT_IND op2 (typically CNS_INT)
9244 // This can replace op1 with a GT_COMMA that evaluates op1 into a local
9246 op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
9248 // op1 is now known to be a non-complex tree
9249 // thus we can use gtClone(op1) from now on
9252 GenTreePtr op2Var = op2;
9255 op2Var = fgInsertCommaFormTemp(&op2);
9256 lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
9258 temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
9259 temp->gtFlags |= GTF_EXCEPT;
9260 condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
9262 GenTreePtr condNull;
9264 // expand the null check:
9266 // condNull ==> GT_EQ
9271 condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
9274 // expand the true and false trees for the condMT
9276 GenTreePtr condFalse = gtClone(op1);
9277 GenTreePtr condTrue;
9281 // use the special helper that skips the cases checked by our inlined cast
9283 helper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
9285 condTrue = gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2Var, gtClone(op1)));
9289 condTrue = gtNewIconNode(0, TYP_REF);
9292 #define USE_QMARK_TREES
9294 #ifdef USE_QMARK_TREES
9297 // Generate first QMARK - COLON tree
9299 // qmarkMT ==> GT_QMARK
9303 // condFalse condTrue
9305 temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
9306 qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
9307 condMT->gtFlags |= GTF_RELOP_QMARK;
9309 GenTreePtr qmarkNull;
9311 // Generate second QMARK - COLON tree
9313 // qmarkNull ==> GT_QMARK
9315 // condNull GT_COLON
9319 temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
9320 qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
9321 qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
9322 condNull->gtFlags |= GTF_RELOP_QMARK;
9324 // Make QMark node a top level node by spilling it.
9325 unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
9326 impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
9327 return gtNewLclvNode(tmp, TYP_REF);
9332 #define assertImp(cond) ((void)0)
9334 #define assertImp(cond) \
9339 const int cchAssertImpBuf = 600; \
9340 char* assertImpBuf = (char*)alloca(cchAssertImpBuf); \
9341 _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \
9342 "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \
9343 impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \
9344 op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \
9345 assertAbort(assertImpBuf, __FILE__, __LINE__); \
9351 #pragma warning(push)
9352 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
9354 /*****************************************************************************
9355 * Import the instr for the given basic block
9357 void Compiler::impImportBlockCode(BasicBlock* block)
9359 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
9365 printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
9369 unsigned nxtStmtIndex = impInitBlockLineInfo();
9370 IL_OFFSET nxtStmtOffs;
9372 GenTreePtr arrayNodeFrom, arrayNodeTo, arrayNodeToIndex;
9374 CorInfoHelpFunc helper;
9375 CorInfoIsAccessAllowedResult accessAllowedResult;
9376 CORINFO_HELPER_DESC calloutHelper;
9377 const BYTE* lastLoadToken = nullptr;
9379 // reject cyclic constraints
9380 if (tiVerificationNeeded)
9382 Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
9383 Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
9386 /* Get the tree list started */
9390 /* Walk the opcodes that comprise the basic block */
9392 const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
9393 const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
9395 IL_OFFSET opcodeOffs = block->bbCodeOffs;
9396 IL_OFFSET lastSpillOffs = opcodeOffs;
9400 /* remember the start of the delegate creation sequence (used for verification) */
9401 const BYTE* delegateCreateStart = nullptr;
9403 int prefixFlags = 0;
9404 bool explicitTailCall, constraintCall, readonlyCall;
9406 bool insertLdloc = false; // set by CEE_DUP and cleared by following store
9409 unsigned numArgs = info.compArgsCount;
9411 /* Now process all the opcodes in the block */
9413 var_types callTyp = TYP_COUNT;
9414 OPCODE prevOpcode = CEE_ILLEGAL;
9416 if (block->bbCatchTyp)
9418 if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
9420 impCurStmtOffsSet(block->bbCodeOffs);
9423 // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
9424 // to a temp. This is a trade off for code simplicity
9425 impSpillSpecialSideEff();
9428 while (codeAddr < codeEndp)
9430 bool usingReadyToRunHelper = false;
9431 CORINFO_RESOLVED_TOKEN resolvedToken;
9432 CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
9433 CORINFO_CALL_INFO callInfo;
9434 CORINFO_FIELD_INFO fieldInfo;
9436 tiRetVal = typeInfo(); // Default type info
9438 //---------------------------------------------------------------------
9440 /* We need to restrict the max tree depth as many of the Compiler
9441 functions are recursive. We do this by spilling the stack */
9443 if (verCurrentState.esStackDepth)
9445 /* Has it been a while since we last saw a non-empty stack (which
9446 guarantees that the tree depth isnt accumulating. */
9448 if ((opcodeOffs - lastSpillOffs) > 200)
9450 impSpillStackEnsure();
9451 lastSpillOffs = opcodeOffs;
9456 lastSpillOffs = opcodeOffs;
9457 impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
9460 /* Compute the current instr offset */
9462 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9465 if (opts.compDbgInfo)
9468 if (!compIsForInlining())
9471 (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
9473 /* Have we reached the next stmt boundary ? */
9475 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
9477 assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
9479 if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
9481 /* We need to provide accurate IP-mapping at this point.
9482 So spill anything on the stack so that it will form
9483 gtStmts with the correct stmt offset noted */
9485 impSpillStackEnsure(true);
9488 // Has impCurStmtOffs been reported in any tree?
9490 if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
9492 GenTreePtr placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
9493 impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9495 assert(impCurStmtOffs == BAD_IL_OFFSET);
9498 if (impCurStmtOffs == BAD_IL_OFFSET)
9500 /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
9501 If opcodeOffs has gone past nxtStmtIndex, catch up */
9503 while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
9504 info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
9509 /* Go to the new stmt */
9511 impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
9513 /* Update the stmt boundary index */
9516 assert(nxtStmtIndex <= info.compStmtOffsetsCount);
9518 /* Are there any more line# entries after this one? */
9520 if (nxtStmtIndex < info.compStmtOffsetsCount)
9522 /* Remember where the next line# starts */
9524 nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
9528 /* No more line# entries */
9530 nxtStmtOffs = BAD_IL_OFFSET;
9534 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
9535 (verCurrentState.esStackDepth == 0))
9537 /* At stack-empty locations, we have already added the tree to
9538 the stmt list with the last offset. We just need to update
9542 impCurStmtOffsSet(opcodeOffs);
9544 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
9545 impOpcodeIsCallSiteBoundary(prevOpcode))
9547 /* Make sure we have a type cached */
9548 assert(callTyp != TYP_COUNT);
9550 if (callTyp == TYP_VOID)
9552 impCurStmtOffsSet(opcodeOffs);
9554 else if (opts.compDbgCode)
9556 impSpillStackEnsure(true);
9557 impCurStmtOffsSet(opcodeOffs);
9560 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
9562 if (opts.compDbgCode)
9564 impSpillStackEnsure(true);
9567 impCurStmtOffsSet(opcodeOffs);
9570 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
9571 jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
9575 CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL);
9576 CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
9577 CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
9579 var_types lclTyp, ovflType = TYP_UNKNOWN;
9580 GenTreePtr op1 = DUMMY_INIT(NULL);
9581 GenTreePtr op2 = DUMMY_INIT(NULL);
9582 GenTreeArgList* args = nullptr; // What good do these "DUMMY_INIT"s do?
9583 GenTreePtr newObjThisPtr = DUMMY_INIT(NULL);
9584 bool uns = DUMMY_INIT(false);
9586 /* Get the next opcode and the size of its parameters */
9588 OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9589 codeAddr += sizeof(__int8);
9592 impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9593 JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
9598 // Return if any previous code has caused inline to fail.
9599 if (compDonotInline())
9604 /* Get the size of additional parameters */
9606 signed int sz = opcodeSizes[opcode];
9609 clsHnd = NO_CLASS_HANDLE;
9611 callTyp = TYP_COUNT;
9613 impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9614 impCurOpcName = opcodeNames[opcode];
9616 if (verbose && (opcode != CEE_PREFIX1))
9618 printf("%s", impCurOpcName);
9621 /* Use assertImp() to display the opcode */
9623 op1 = op2 = nullptr;
9626 /* See what kind of an opcode we have, then */
9628 unsigned mflags = 0;
9629 unsigned clsFlags = 0;
9642 CORINFO_SIG_INFO sig;
9645 bool ovfl, unordered, callNode;
9647 CORINFO_CLASS_HANDLE tokenType;
9657 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9658 codeAddr += sizeof(__int8);
9659 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9664 // We need to call impSpillLclRefs() for a struct type lclVar.
9665 // This is done for non-block assignments in the handling of stloc.
9666 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
9667 (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
9669 impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
9672 /* Append 'op1' to the list of statements */
9673 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
9678 /* Append 'op1' to the list of statements */
9680 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9686 // Remember at which BC offset the tree was finished
9687 impNoteLastILoffs();
9692 impPushNullObjRefOnStack();
9705 cval.intVal = (opcode - CEE_LDC_I4_0);
9706 assert(-1 <= cval.intVal && cval.intVal <= 8);
9710 cval.intVal = getI1LittleEndian(codeAddr);
9713 cval.intVal = getI4LittleEndian(codeAddr);
9716 JITDUMP(" %d", cval.intVal);
9717 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
9721 cval.lngVal = getI8LittleEndian(codeAddr);
9722 JITDUMP(" 0x%016llx", cval.lngVal);
9723 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
9727 cval.dblVal = getR8LittleEndian(codeAddr);
9728 JITDUMP(" %#.17g", cval.dblVal);
9729 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
9733 cval.dblVal = getR4LittleEndian(codeAddr);
9734 JITDUMP(" %#.17g", cval.dblVal);
9736 GenTreePtr cnsOp = gtNewDconNode(cval.dblVal);
9737 #if !FEATURE_X87_DOUBLES
9738 // X87 stack doesn't differentiate between float/double
9739 // so R4 is treated as R8, but everybody else does
9740 cnsOp->gtType = TYP_FLOAT;
9741 #endif // FEATURE_X87_DOUBLES
9742 impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
9748 if (compIsForInlining())
9750 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
9752 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
9757 val = getU4LittleEndian(codeAddr);
9758 JITDUMP(" %08X", val);
9759 if (tiVerificationNeeded)
9761 Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
9762 tiRetVal = typeInfo(TI_REF, impGetStringClass());
9764 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
9769 lclNum = getU2LittleEndian(codeAddr);
9770 JITDUMP(" %u", lclNum);
9771 impLoadArg(lclNum, opcodeOffs + sz + 1);
9775 lclNum = getU1LittleEndian(codeAddr);
9776 JITDUMP(" %u", lclNum);
9777 impLoadArg(lclNum, opcodeOffs + sz + 1);
9784 lclNum = (opcode - CEE_LDARG_0);
9785 assert(lclNum >= 0 && lclNum < 4);
9786 impLoadArg(lclNum, opcodeOffs + sz + 1);
9790 lclNum = getU2LittleEndian(codeAddr);
9791 JITDUMP(" %u", lclNum);
9792 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9796 lclNum = getU1LittleEndian(codeAddr);
9797 JITDUMP(" %u", lclNum);
9798 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9805 lclNum = (opcode - CEE_LDLOC_0);
9806 assert(lclNum >= 0 && lclNum < 4);
9807 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9811 lclNum = getU2LittleEndian(codeAddr);
9815 lclNum = getU1LittleEndian(codeAddr);
9817 JITDUMP(" %u", lclNum);
9819 if (tiVerificationNeeded)
9821 Verify(lclNum < info.compILargsCount, "bad arg num");
9824 if (compIsForInlining())
9826 op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
9827 noway_assert(op1->gtOper == GT_LCL_VAR);
9828 lclNum = op1->AsLclVar()->gtLclNum;
9833 lclNum = compMapILargNum(lclNum); // account for possible hidden param
9834 assertImp(lclNum < numArgs);
9836 if (lclNum == info.compThisArg)
9838 lclNum = lvaArg0Var;
9840 lvaTable[lclNum].lvArgWrite = 1;
9842 if (tiVerificationNeeded)
9844 typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
9845 Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
9848 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
9850 Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
9857 lclNum = getU2LittleEndian(codeAddr);
9858 JITDUMP(" %u", lclNum);
9862 lclNum = getU1LittleEndian(codeAddr);
9863 JITDUMP(" %u", lclNum);
9870 lclNum = (opcode - CEE_STLOC_0);
9871 assert(lclNum >= 0 && lclNum < 4);
9874 if (tiVerificationNeeded)
9876 Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
9877 Verify(tiCompatibleWith(impStackTop().seTypeInfo,
9878 NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
9882 if (compIsForInlining())
9884 lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
9886 /* Have we allocated a temp for this local? */
9888 lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
9897 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
9899 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9905 /* if it is a struct assignment, make certain we don't overflow the buffer */
9906 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
9908 if (lvaTable[lclNum].lvNormalizeOnLoad())
9910 lclTyp = lvaGetRealType(lclNum);
9914 lclTyp = lvaGetActualType(lclNum);
9918 /* Pop the value being assigned */
9921 StackEntry se = impPopStack(clsHnd);
9923 tiRetVal = se.seTypeInfo;
9927 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
9929 assert(op1->TypeGet() == TYP_STRUCT);
9930 op1->gtType = lclTyp;
9932 #endif // FEATURE_SIMD
9934 op1 = impImplicitIorI4Cast(op1, lclTyp);
9936 #ifdef _TARGET_64BIT_
9937 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
9938 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
9940 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9941 op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
9943 #endif // _TARGET_64BIT_
9945 // We had better assign it a value of the correct type
9947 genActualType(lclTyp) == genActualType(op1->gtType) ||
9948 genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
9949 (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
9950 (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
9951 (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
9952 ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
9954 /* If op1 is "&var" then its type is the transient "*" and it can
9955 be used either as TYP_BYREF or TYP_I_IMPL */
9957 if (op1->IsVarAddr())
9959 assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
9961 /* When "&var" is created, we assume it is a byref. If it is
9962 being assigned to a TYP_I_IMPL var, change the type to
9963 prevent unnecessary GC info */
9965 if (genActualType(lclTyp) == TYP_I_IMPL)
9967 op1->gtType = TYP_I_IMPL;
9971 /* Filter out simple assignments to itself */
9973 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
9977 // This is a sequence of (ldloc, dup, stloc). Can simplify
9978 // to (ldloc, stloc). Goto LDVAR to reconstruct the ldloc node.
9979 CLANG_FORMAT_COMMENT_ANCHOR;
9982 if (tiVerificationNeeded)
9985 typeInfo::AreEquivalent(tiRetVal, NormaliseForStack(lvaTable[lclNum].lvVerTypeInfo)));
9990 insertLdloc = false;
9992 impLoadVar(lclNum, opcodeOffs + sz + 1);
9995 else if (opts.compDbgCode)
9997 op1 = gtNewNothingNode();
10006 /* Create the assignment node */
10008 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
10010 /* If the local is aliased, we need to spill calls and
10011 indirections from the stack. */
10013 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp) &&
10014 verCurrentState.esStackDepth > 0)
10016 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased"));
10019 /* Spill any refs to the local from the stack */
10021 impSpillLclRefs(lclNum);
10023 #if !FEATURE_X87_DOUBLES
10024 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
10025 // We insert a cast to the dest 'op2' type
10027 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
10028 varTypeIsFloating(op2->gtType))
10030 op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
10032 #endif // !FEATURE_X87_DOUBLES
10034 if (varTypeIsStruct(lclTyp))
10036 op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
10040 // The code generator generates GC tracking information
10041 // based on the RHS of the assignment. Later the LHS (which is
10042 // is a BYREF) gets used and the emitter checks that that variable
10043 // is being tracked. It is not (since the RHS was an int and did
10044 // not need tracking). To keep this assert happy, we change the RHS
10045 if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
10047 op1->gtType = TYP_BYREF;
10049 op1 = gtNewAssignNode(op2, op1);
10052 /* If insertLdloc is true, then we need to insert a ldloc following the
10053 stloc. This is done when converting a (dup, stloc) sequence into
10054 a (stloc, ldloc) sequence. */
10058 // From SPILL_APPEND
10059 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10062 // From DONE_APPEND
10063 impNoteLastILoffs();
10066 insertLdloc = false;
10068 impLoadVar(lclNum, opcodeOffs + sz + 1, tiRetVal);
10075 lclNum = getU2LittleEndian(codeAddr);
10079 lclNum = getU1LittleEndian(codeAddr);
10081 JITDUMP(" %u", lclNum);
10082 if (tiVerificationNeeded)
10084 Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10085 Verify(info.compInitMem, "initLocals not set");
10088 if (compIsForInlining())
10090 // Get the local type
10091 lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10093 /* Have we allocated a temp for this local? */
10095 lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10097 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10103 assertImp(lclNum < info.compLocalsCount);
10107 lclNum = getU2LittleEndian(codeAddr);
10111 lclNum = getU1LittleEndian(codeAddr);
10113 JITDUMP(" %u", lclNum);
10114 Verify(lclNum < info.compILargsCount, "bad arg num");
10116 if (compIsForInlining())
10118 // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10119 // followed by a ldfld to load the field.
10121 op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10122 if (op1->gtOper != GT_LCL_VAR)
10124 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10128 assert(op1->gtOper == GT_LCL_VAR);
10133 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10134 assertImp(lclNum < numArgs);
10136 if (lclNum == info.compThisArg)
10138 lclNum = lvaArg0Var;
10145 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10148 assert(op1->gtOper == GT_LCL_VAR);
10150 /* Note that this is supposed to create the transient type "*"
10151 which may be used as a TYP_I_IMPL. However we catch places
10152 where it is used as a TYP_I_IMPL and change the node if needed.
10153 Thus we are pessimistic and may report byrefs in the GC info
10154 where it was not absolutely needed, but it is safer this way.
10156 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10158 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10159 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10161 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10162 if (tiVerificationNeeded)
10164 // Don't allow taking address of uninit this ptr.
10165 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10167 Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10170 if (!tiRetVal.IsByRef())
10172 tiRetVal.MakeByRef();
10176 Verify(false, "byref to byref");
10180 impPushOnStack(op1, tiRetVal);
10185 if (!info.compIsVarArgs)
10187 BADCODE("arglist in non-vararg method");
10190 if (tiVerificationNeeded)
10192 tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10194 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10196 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10197 adjusted the arg count cos this is like fetching the last param */
10198 assertImp(0 < numArgs);
10199 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10200 lclNum = lvaVarargsHandleArg;
10201 op1 = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10202 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10203 impPushOnStack(op1, tiRetVal);
10206 case CEE_ENDFINALLY:
10208 if (compIsForInlining())
10210 assert(!"Shouldn't have exception handlers in the inliner!");
10211 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10215 if (verCurrentState.esStackDepth > 0)
10217 impEvalSideEffects();
10220 if (info.compXcptnsCount == 0)
10222 BADCODE("endfinally outside finally");
10225 assert(verCurrentState.esStackDepth == 0);
10227 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10230 case CEE_ENDFILTER:
10232 if (compIsForInlining())
10234 assert(!"Shouldn't have exception handlers in the inliner!");
10235 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10239 block->bbSetRunRarely(); // filters are rare
10241 if (info.compXcptnsCount == 0)
10243 BADCODE("endfilter outside filter");
10246 if (tiVerificationNeeded)
10248 Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
10251 op1 = impPopStack().val;
10252 assertImp(op1->gtType == TYP_INT);
10253 if (!bbInFilterILRange(block))
10255 BADCODE("EndFilter outside a filter handler");
10258 /* Mark current bb as end of filter */
10260 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
10261 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
10263 /* Mark catch handler as successor */
10265 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
10266 if (verCurrentState.esStackDepth != 0)
10268 verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
10269 DEBUGARG(__LINE__));
10274 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
10276 if (!impReturnInstruction(block, prefixFlags, opcode))
10287 assert(!compIsForInlining());
10289 if (tiVerificationNeeded)
10291 Verify(false, "Invalid opcode: CEE_JMP");
10294 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
10296 /* CEE_JMP does not make sense in some "protected" regions. */
10298 BADCODE("Jmp not allowed in protected region");
10301 if (verCurrentState.esStackDepth != 0)
10303 BADCODE("Stack must be empty after CEE_JMPs");
10306 _impResolveToken(CORINFO_TOKENKIND_Method);
10308 JITDUMP(" %08X", resolvedToken.token);
10310 /* The signature of the target has to be identical to ours.
10311 At least check that argCnt and returnType match */
10313 eeGetMethodSig(resolvedToken.hMethod, &sig);
10314 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
10315 sig.retType != info.compMethodInfo->args.retType ||
10316 sig.callConv != info.compMethodInfo->args.callConv)
10318 BADCODE("Incompatible target for CEE_JMPs");
10321 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
10323 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
10325 /* Mark the basic block as being a JUMP instead of RETURN */
10327 block->bbFlags |= BBF_HAS_JMP;
10329 /* Set this flag to make sure register arguments have a location assigned
10330 * even if we don't use them inside the method */
10332 compJmpOpUsed = true;
10334 fgNoStructPromotion = true;
10338 #else // !_TARGET_XARCH_ && !_TARGET_ARMARCH_
10340 // Import this just like a series of LDARGs + tail. + call + ret
10342 if (info.compIsVarArgs)
10344 // For now we don't implement true tail calls, so this breaks varargs.
10345 // So warn the user instead of generating bad code.
10346 // This is a semi-temporary workaround for DevDiv 173860, until we can properly
10347 // implement true tail calls.
10348 IMPL_LIMITATION("varags + CEE_JMP doesn't work yet");
10351 // First load up the arguments (0 - N)
10352 for (unsigned argNum = 0; argNum < info.compILargsCount; argNum++)
10354 impLoadArg(argNum, opcodeOffs + sz + 1);
10357 // Now generate the tail call
10358 noway_assert(prefixFlags == 0);
10359 prefixFlags = PREFIX_TAILCALL_EXPLICIT;
10362 eeGetCallInfo(&resolvedToken, NULL,
10363 combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), &callInfo);
10365 // All calls and delegates need a security callout.
10366 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
10368 callTyp = impImportCall(CEE_CALL, &resolvedToken, NULL, NULL, PREFIX_TAILCALL_EXPLICIT, &callInfo,
10371 // And finish with the ret
10374 #endif // _TARGET_XARCH_ || _TARGET_ARMARCH_
10377 assertImp(sz == sizeof(unsigned));
10379 _impResolveToken(CORINFO_TOKENKIND_Class);
10381 JITDUMP(" %08X", resolvedToken.token);
10383 ldelemClsHnd = resolvedToken.hClass;
10385 if (tiVerificationNeeded)
10387 typeInfo tiArray = impStackTop(1).seTypeInfo;
10388 typeInfo tiIndex = impStackTop().seTypeInfo;
10390 // As per ECMA 'index' specified can be either int32 or native int.
10391 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10393 typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
10394 Verify(tiArray.IsNullObjRef() ||
10395 typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
10398 tiRetVal = arrayElemType;
10399 tiRetVal.MakeByRef();
10400 if (prefixFlags & PREFIX_READONLY)
10402 tiRetVal.SetIsReadonlyByRef();
10405 // an array interior pointer is always in the heap
10406 tiRetVal.SetIsPermanentHomeByRef();
10409 // If it's a value class array we just do a simple address-of
10410 if (eeIsValueClass(ldelemClsHnd))
10412 CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
10413 if (cit == CORINFO_TYPE_UNDEF)
10415 lclTyp = TYP_STRUCT;
10419 lclTyp = JITtype2varType(cit);
10421 goto ARR_LD_POST_VERIFY;
10424 // Similarly, if its a readonly access, we can do a simple address-of
10425 // without doing a runtime type-check
10426 if (prefixFlags & PREFIX_READONLY)
10429 goto ARR_LD_POST_VERIFY;
10432 // Otherwise we need the full helper function with run-time type check
10433 op1 = impTokenToHandle(&resolvedToken);
10434 if (op1 == nullptr)
10435 { // compDonotInline()
10439 args = gtNewArgList(op1); // Type
10440 args = gtNewListNode(impPopStack().val, args); // index
10441 args = gtNewListNode(impPopStack().val, args); // array
10442 op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, GTF_EXCEPT, args);
10444 impPushOnStack(op1, tiRetVal);
10447 // ldelem for reference and value types
10449 assertImp(sz == sizeof(unsigned));
10451 _impResolveToken(CORINFO_TOKENKIND_Class);
10453 JITDUMP(" %08X", resolvedToken.token);
10455 ldelemClsHnd = resolvedToken.hClass;
10457 if (tiVerificationNeeded)
10459 typeInfo tiArray = impStackTop(1).seTypeInfo;
10460 typeInfo tiIndex = impStackTop().seTypeInfo;
10462 // As per ECMA 'index' specified can be either int32 or native int.
10463 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10464 tiRetVal = verMakeTypeInfo(ldelemClsHnd);
10466 Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
10467 "type of array incompatible with type operand");
10468 tiRetVal.NormaliseForStack();
10471 // If it's a reference type or generic variable type
10472 // then just generate code as though it's a ldelem.ref instruction
10473 if (!eeIsValueClass(ldelemClsHnd))
10476 opcode = CEE_LDELEM_REF;
10480 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
10481 lclTyp = JITtype2varType(jitTyp);
10482 tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
10483 tiRetVal.NormaliseForStack();
10485 goto ARR_LD_POST_VERIFY;
10487 case CEE_LDELEM_I1:
10490 case CEE_LDELEM_I2:
10491 lclTyp = TYP_SHORT;
10494 lclTyp = TYP_I_IMPL;
10497 // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
10498 // and treating it as TYP_INT avoids other asserts.
10499 case CEE_LDELEM_U4:
10503 case CEE_LDELEM_I4:
10506 case CEE_LDELEM_I8:
10509 case CEE_LDELEM_REF:
10512 case CEE_LDELEM_R4:
10513 lclTyp = TYP_FLOAT;
10515 case CEE_LDELEM_R8:
10516 lclTyp = TYP_DOUBLE;
10518 case CEE_LDELEM_U1:
10519 lclTyp = TYP_UBYTE;
10521 case CEE_LDELEM_U2:
10527 if (tiVerificationNeeded)
10529 typeInfo tiArray = impStackTop(1).seTypeInfo;
10530 typeInfo tiIndex = impStackTop().seTypeInfo;
10532 // As per ECMA 'index' specified can be either int32 or native int.
10533 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10534 if (tiArray.IsNullObjRef())
10536 if (lclTyp == TYP_REF)
10537 { // we will say a deref of a null array yields a null ref
10538 tiRetVal = typeInfo(TI_NULL);
10542 tiRetVal = typeInfo(lclTyp);
10547 tiRetVal = verGetArrayElemType(tiArray);
10548 typeInfo arrayElemTi = typeInfo(lclTyp);
10549 #ifdef _TARGET_64BIT_
10550 if (opcode == CEE_LDELEM_I)
10552 arrayElemTi = typeInfo::nativeInt();
10555 if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
10557 Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
10560 #endif // _TARGET_64BIT_
10562 Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
10565 tiRetVal.NormaliseForStack();
10567 ARR_LD_POST_VERIFY:
10569 /* Pull the index value and array address */
10570 op2 = impPopStack().val;
10571 op1 = impPopStack().val;
10572 assertImp(op1->gtType == TYP_REF);
10574 /* Check for null pointer - in the inliner case we simply abort */
10576 if (compIsForInlining())
10578 if (op1->gtOper == GT_CNS_INT)
10580 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
10585 op1 = impCheckForNullPointer(op1);
10587 /* Mark the block as containing an index expression */
10589 if (op1->gtOper == GT_LCL_VAR)
10591 if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
10593 block->bbFlags |= BBF_HAS_IDX_LEN;
10594 optMethodFlags |= OMF_HAS_ARRAYREF;
10598 /* Create the index node and push it on the stack */
10600 op1 = gtNewIndexRef(lclTyp, op1, op2);
10602 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
10604 if ((opcode == CEE_LDELEMA) || ldstruct ||
10605 (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
10607 assert(ldelemClsHnd != DUMMY_INIT(NULL));
10609 // remember the element size
10610 if (lclTyp == TYP_REF)
10612 op1->gtIndex.gtIndElemSize = sizeof(void*);
10616 // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
10617 if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
10619 op1->gtIndex.gtStructElemClass = ldelemClsHnd;
10621 assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
10622 if (lclTyp == TYP_STRUCT)
10624 size = info.compCompHnd->getClassSize(ldelemClsHnd);
10625 op1->gtIndex.gtIndElemSize = size;
10626 op1->gtType = lclTyp;
10630 if ((opcode == CEE_LDELEMA) || ldstruct)
10633 lclTyp = TYP_BYREF;
10635 op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
10639 assert(lclTyp != TYP_STRUCT);
10645 // Create an OBJ for the result
10646 op1 = gtNewObjNode(ldelemClsHnd, op1);
10647 op1->gtFlags |= GTF_EXCEPT;
10649 impPushOnStack(op1, tiRetVal);
10652 // stelem for reference and value types
10655 assertImp(sz == sizeof(unsigned));
10657 _impResolveToken(CORINFO_TOKENKIND_Class);
10659 JITDUMP(" %08X", resolvedToken.token);
10661 stelemClsHnd = resolvedToken.hClass;
10663 if (tiVerificationNeeded)
10665 typeInfo tiArray = impStackTop(2).seTypeInfo;
10666 typeInfo tiIndex = impStackTop(1).seTypeInfo;
10667 typeInfo tiValue = impStackTop().seTypeInfo;
10669 // As per ECMA 'index' specified can be either int32 or native int.
10670 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10671 typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
10673 Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
10674 "type operand incompatible with array element type");
10675 arrayElem.NormaliseForStack();
10676 Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
10679 // If it's a reference type just behave as though it's a stelem.ref instruction
10680 if (!eeIsValueClass(stelemClsHnd))
10682 goto STELEM_REF_POST_VERIFY;
10685 // Otherwise extract the type
10687 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
10688 lclTyp = JITtype2varType(jitTyp);
10689 goto ARR_ST_POST_VERIFY;
10692 case CEE_STELEM_REF:
10694 if (tiVerificationNeeded)
10696 typeInfo tiArray = impStackTop(2).seTypeInfo;
10697 typeInfo tiIndex = impStackTop(1).seTypeInfo;
10698 typeInfo tiValue = impStackTop().seTypeInfo;
10700 // As per ECMA 'index' specified can be either int32 or native int.
10701 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10702 Verify(tiValue.IsObjRef(), "bad value");
10704 // we only check that it is an object referece, The helper does additional checks
10705 Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
10708 arrayNodeTo = impStackTop(2).val;
10709 arrayNodeToIndex = impStackTop(1).val;
10710 arrayNodeFrom = impStackTop().val;
10713 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
10714 // lot of cases because of covariance. ie. foo[] can be cast to object[].
10717 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
10718 // This does not need CORINFO_HELP_ARRADDR_ST
10720 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
10721 arrayNodeTo->gtOper == GT_LCL_VAR &&
10722 arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
10723 !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
10726 goto ARR_ST_POST_VERIFY;
10729 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
10731 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
10733 assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
10736 goto ARR_ST_POST_VERIFY;
10739 STELEM_REF_POST_VERIFY:
10741 /* Call a helper function to do the assignment */
10742 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, 0, impPopList(3, &flags, nullptr));
10746 case CEE_STELEM_I1:
10749 case CEE_STELEM_I2:
10750 lclTyp = TYP_SHORT;
10753 lclTyp = TYP_I_IMPL;
10755 case CEE_STELEM_I4:
10758 case CEE_STELEM_I8:
10761 case CEE_STELEM_R4:
10762 lclTyp = TYP_FLOAT;
10764 case CEE_STELEM_R8:
10765 lclTyp = TYP_DOUBLE;
10770 if (tiVerificationNeeded)
10772 typeInfo tiArray = impStackTop(2).seTypeInfo;
10773 typeInfo tiIndex = impStackTop(1).seTypeInfo;
10774 typeInfo tiValue = impStackTop().seTypeInfo;
10776 // As per ECMA 'index' specified can be either int32 or native int.
10777 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10778 typeInfo arrayElem = typeInfo(lclTyp);
10779 #ifdef _TARGET_64BIT_
10780 if (opcode == CEE_STELEM_I)
10782 arrayElem = typeInfo::nativeInt();
10784 #endif // _TARGET_64BIT_
10785 Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
10788 Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
10792 ARR_ST_POST_VERIFY:
10793 /* The strict order of evaluation is LHS-operands, RHS-operands,
10794 range-check, and then assignment. However, codegen currently
10795 does the range-check before evaluation the RHS-operands. So to
10796 maintain strict ordering, we spill the stack. */
10798 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
10800 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
10801 "Strict ordering of exceptions for Array store"));
10804 /* Pull the new value from the stack */
10805 op2 = impPopStack().val;
10807 /* Pull the index value */
10808 op1 = impPopStack().val;
10810 /* Pull the array address */
10811 op3 = impPopStack().val;
10813 assertImp(op3->gtType == TYP_REF);
10814 if (op2->IsVarAddr())
10816 op2->gtType = TYP_I_IMPL;
10819 op3 = impCheckForNullPointer(op3);
10821 // Mark the block as containing an index expression
10823 if (op3->gtOper == GT_LCL_VAR)
10825 if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
10827 block->bbFlags |= BBF_HAS_IDX_LEN;
10828 optMethodFlags |= OMF_HAS_ARRAYREF;
10832 /* Create the index node */
10834 op1 = gtNewIndexRef(lclTyp, op3, op1);
10836 /* Create the assignment node and append it */
10838 if (lclTyp == TYP_STRUCT)
10840 assert(stelemClsHnd != DUMMY_INIT(NULL));
10842 op1->gtIndex.gtStructElemClass = stelemClsHnd;
10843 op1->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd);
10845 if (varTypeIsStruct(op1))
10847 op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
10851 op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
10852 op1 = gtNewAssignNode(op1, op2);
10855 /* Mark the expression as containing an assignment */
10857 op1->gtFlags |= GTF_ASG;
10868 case CEE_ADD_OVF_UN:
10876 goto MATH_OP2_FLAGS;
10885 case CEE_SUB_OVF_UN:
10893 goto MATH_OP2_FLAGS;
10897 goto MATH_MAYBE_CALL_NO_OVF;
10902 case CEE_MUL_OVF_UN:
10909 goto MATH_MAYBE_CALL_OVF;
10911 // Other binary math operations
10915 goto MATH_MAYBE_CALL_NO_OVF;
10919 goto MATH_MAYBE_CALL_NO_OVF;
10923 goto MATH_MAYBE_CALL_NO_OVF;
10927 goto MATH_MAYBE_CALL_NO_OVF;
10929 MATH_MAYBE_CALL_NO_OVF:
10931 MATH_MAYBE_CALL_OVF:
10932 // Morpher has some complex logic about when to turn different
10933 // typed nodes on different platforms into helper calls. We
10934 // need to either duplicate that logic here, or just
10935 // pessimistically make all the nodes large enough to become
10936 // call nodes. Since call nodes aren't that much larger and
10937 // these opcodes are infrequent enough I chose the latter.
10939 goto MATH_OP2_FLAGS;
10951 MATH_OP2: // For default values of 'ovfl' and 'callNode'
10956 MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
10958 /* Pull two values and push back the result */
10960 if (tiVerificationNeeded)
10962 const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
10963 const typeInfo& tiOp2 = impStackTop().seTypeInfo;
10965 Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
10966 if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
10968 Verify(tiOp1.IsNumberType(), "not number");
10972 Verify(tiOp1.IsIntegerType(), "not integer");
10975 Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
10979 #ifdef _TARGET_64BIT_
10980 if (tiOp2.IsNativeIntType())
10984 #endif // _TARGET_64BIT_
10987 op2 = impPopStack().val;
10988 op1 = impPopStack().val;
10990 #if !CPU_HAS_FP_SUPPORT
10991 if (varTypeIsFloating(op1->gtType))
10996 /* Can't do arithmetic with references */
10997 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
10999 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
11000 // if it is in the stack)
11001 impBashVarAddrsToI(op1, op2);
11003 type = impGetByRefResultType(oper, uns, &op1, &op2);
11005 assert(!ovfl || !varTypeIsFloating(op1->gtType));
11007 /* Special case: "int+0", "int-0", "int*1", "int/1" */
11009 if (op2->gtOper == GT_CNS_INT)
11011 if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
11012 (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
11015 impPushOnStack(op1, tiRetVal);
11020 #if !FEATURE_X87_DOUBLES
11021 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
11023 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
11025 if (op1->TypeGet() != type)
11027 // We insert a cast of op1 to 'type'
11028 op1 = gtNewCastNode(type, op1, type);
11030 if (op2->TypeGet() != type)
11032 // We insert a cast of op2 to 'type'
11033 op2 = gtNewCastNode(type, op2, type);
11036 #endif // !FEATURE_X87_DOUBLES
11038 #if SMALL_TREE_NODES
11041 /* These operators can later be transformed into 'GT_CALL' */
11043 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
11044 #ifndef _TARGET_ARM_
11045 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
11046 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
11047 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11048 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11050 // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11051 // that we'll need to transform into a general large node, but rather specifically
11052 // to a call: by doing it this way, things keep working if there are multiple sizes,
11053 // and a CALL is no longer the largest.
11054 // That said, as of now it *is* a large node, so we'll do this with an assert rather
11056 assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11057 op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11060 #endif // SMALL_TREE_NODES
11062 op1 = gtNewOperNode(oper, type, op1, op2);
11065 /* Special case: integer/long division may throw an exception */
11067 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow())
11069 op1->gtFlags |= GTF_EXCEPT;
11074 assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11075 if (ovflType != TYP_UNKNOWN)
11077 op1->gtType = ovflType;
11079 op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11082 op1->gtFlags |= GTF_UNSIGNED;
11086 impPushOnStack(op1, tiRetVal);
11101 if (tiVerificationNeeded)
11103 const typeInfo& tiVal = impStackTop(1).seTypeInfo;
11104 const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11105 Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11108 op2 = impPopStack().val;
11109 op1 = impPopStack().val; // operand to be shifted
11110 impBashVarAddrsToI(op1, op2);
11112 type = genActualType(op1->TypeGet());
11113 op1 = gtNewOperNode(oper, type, op1, op2);
11115 impPushOnStack(op1, tiRetVal);
11119 if (tiVerificationNeeded)
11121 tiRetVal = impStackTop().seTypeInfo;
11122 Verify(tiRetVal.IsIntegerType(), "bad int value");
11125 op1 = impPopStack().val;
11126 impBashVarAddrsToI(op1, nullptr);
11127 type = genActualType(op1->TypeGet());
11128 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11132 if (tiVerificationNeeded)
11134 tiRetVal = impStackTop().seTypeInfo;
11135 Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11137 op1 = impPopStack().val;
11138 type = op1->TypeGet();
11139 op1 = gtNewOperNode(GT_CKFINITE, type, op1);
11140 op1->gtFlags |= GTF_EXCEPT;
11142 impPushOnStack(op1, tiRetVal);
11147 val = getI4LittleEndian(codeAddr); // jump distance
11148 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11152 val = getI1LittleEndian(codeAddr); // jump distance
11153 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11157 if (compIsForInlining())
11159 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11163 JITDUMP(" %04X", jmpAddr);
11164 if (block->bbJumpKind != BBJ_LEAVE)
11166 impResetLeaveBlock(block, jmpAddr);
11169 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11170 impImportLeave(block);
11171 impNoteBranchOffs();
11177 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11179 if (compIsForInlining() && jmpDist == 0)
11184 impNoteBranchOffs();
11190 case CEE_BRFALSE_S:
11192 /* Pop the comparand (now there's a neat term) from the stack */
11193 if (tiVerificationNeeded)
11195 typeInfo& tiVal = impStackTop().seTypeInfo;
11196 Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11200 op1 = impPopStack().val;
11201 type = op1->TypeGet();
11203 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11204 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11206 block->bbJumpKind = BBJ_NONE;
11208 if (op1->gtFlags & GTF_GLOB_EFFECT)
11210 op1 = gtUnusedValNode(op1);
11219 if (op1->OperIsCompare())
11221 if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11223 // Flip the sense of the compare
11225 op1 = gtReverseCond(op1);
11230 /* We'll compare against an equally-sized integer 0 */
11231 /* For small types, we always compare against int */
11232 op2 = gtNewZeroConNode(genActualType(op1->gtType));
11234 /* Create the comparison operator and try to fold it */
11236 oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11237 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11244 /* Fold comparison if we can */
11246 op1 = gtFoldExpr(op1);
11248 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11249 /* Don't make any blocks unreachable in import only mode */
11251 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11253 /* gtFoldExpr() should prevent this as we don't want to make any blocks
11254 unreachable under compDbgCode */
11255 assert(!opts.compDbgCode);
11257 BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11258 assertImp((block->bbJumpKind == BBJ_COND) // normal case
11259 || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11260 // block for the second time
11262 block->bbJumpKind = foldedJumpKind;
11266 if (op1->gtIntCon.gtIconVal)
11268 printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11269 block->bbJumpDest->bbNum);
11273 printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11280 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11282 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
11283 in impImportBlock(block). For correct line numbers, spill stack. */
11285 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
11287 impSpillStackEnsure(true);
11314 if (tiVerificationNeeded)
11316 verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11317 tiRetVal = typeInfo(TI_INT);
11320 op2 = impPopStack().val;
11321 op1 = impPopStack().val;
11323 #ifdef _TARGET_64BIT_
11324 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
11326 op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11328 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
11330 op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11332 #endif // _TARGET_64BIT_
11334 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11335 varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11336 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11338 /* Create the comparison node */
11340 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11342 /* TODO: setting both flags when only one is appropriate */
11343 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
11345 op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
11348 impPushOnStack(op1, tiRetVal);
11354 goto CMP_2_OPs_AND_BR;
11359 goto CMP_2_OPs_AND_BR;
11364 goto CMP_2_OPs_AND_BR_UN;
11369 goto CMP_2_OPs_AND_BR;
11374 goto CMP_2_OPs_AND_BR_UN;
11379 goto CMP_2_OPs_AND_BR;
11384 goto CMP_2_OPs_AND_BR_UN;
11389 goto CMP_2_OPs_AND_BR;
11394 goto CMP_2_OPs_AND_BR_UN;
11399 goto CMP_2_OPs_AND_BR_UN;
11401 CMP_2_OPs_AND_BR_UN:
11404 goto CMP_2_OPs_AND_BR_ALL;
11408 goto CMP_2_OPs_AND_BR_ALL;
11409 CMP_2_OPs_AND_BR_ALL:
11411 if (tiVerificationNeeded)
11413 verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11416 /* Pull two values */
11417 op2 = impPopStack().val;
11418 op1 = impPopStack().val;
11420 #ifdef _TARGET_64BIT_
11421 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
11423 op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11425 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
11427 op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11429 #endif // _TARGET_64BIT_
11431 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11432 varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11433 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11435 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11437 block->bbJumpKind = BBJ_NONE;
11439 if (op1->gtFlags & GTF_GLOB_EFFECT)
11441 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11442 "Branch to next Optimization, op1 side effect"));
11443 impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11445 if (op2->gtFlags & GTF_GLOB_EFFECT)
11447 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11448 "Branch to next Optimization, op2 side effect"));
11449 impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11453 if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
11455 impNoteLastILoffs();
11460 #if !FEATURE_X87_DOUBLES
11461 // We can generate an compare of different sized floating point op1 and op2
11462 // We insert a cast
11464 if (varTypeIsFloating(op1->TypeGet()))
11466 if (op1->TypeGet() != op2->TypeGet())
11468 assert(varTypeIsFloating(op2->TypeGet()));
11470 // say op1=double, op2=float. To avoid loss of precision
11471 // while comparing, op2 is converted to double and double
11472 // comparison is done.
11473 if (op1->TypeGet() == TYP_DOUBLE)
11475 // We insert a cast of op2 to TYP_DOUBLE
11476 op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
11478 else if (op2->TypeGet() == TYP_DOUBLE)
11480 // We insert a cast of op1 to TYP_DOUBLE
11481 op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
11485 #endif // !FEATURE_X87_DOUBLES
11487 /* Create and append the operator */
11489 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11493 op1->gtFlags |= GTF_UNSIGNED;
11498 op1->gtFlags |= GTF_RELOP_NAN_UN;
11504 assert(!compIsForInlining());
11506 if (tiVerificationNeeded)
11508 Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
11510 /* Pop the switch value off the stack */
11511 op1 = impPopStack().val;
11512 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
11514 #ifdef _TARGET_64BIT_
11515 // Widen 'op1' on 64-bit targets
11516 if (op1->TypeGet() != TYP_I_IMPL)
11518 if (op1->OperGet() == GT_CNS_INT)
11520 op1->gtType = TYP_I_IMPL;
11524 op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
11527 #endif // _TARGET_64BIT_
11528 assert(genActualType(op1->TypeGet()) == TYP_I_IMPL);
11530 /* We can create a switch node */
11532 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
11534 val = (int)getU4LittleEndian(codeAddr);
11535 codeAddr += 4 + val * 4; // skip over the switch-table
11539 /************************** Casting OPCODES ***************************/
11541 case CEE_CONV_OVF_I1:
11544 case CEE_CONV_OVF_I2:
11545 lclTyp = TYP_SHORT;
11547 case CEE_CONV_OVF_I:
11548 lclTyp = TYP_I_IMPL;
11550 case CEE_CONV_OVF_I4:
11553 case CEE_CONV_OVF_I8:
11557 case CEE_CONV_OVF_U1:
11558 lclTyp = TYP_UBYTE;
11560 case CEE_CONV_OVF_U2:
11563 case CEE_CONV_OVF_U:
11564 lclTyp = TYP_U_IMPL;
11566 case CEE_CONV_OVF_U4:
11569 case CEE_CONV_OVF_U8:
11570 lclTyp = TYP_ULONG;
11573 case CEE_CONV_OVF_I1_UN:
11576 case CEE_CONV_OVF_I2_UN:
11577 lclTyp = TYP_SHORT;
11579 case CEE_CONV_OVF_I_UN:
11580 lclTyp = TYP_I_IMPL;
11582 case CEE_CONV_OVF_I4_UN:
11585 case CEE_CONV_OVF_I8_UN:
11589 case CEE_CONV_OVF_U1_UN:
11590 lclTyp = TYP_UBYTE;
11592 case CEE_CONV_OVF_U2_UN:
11595 case CEE_CONV_OVF_U_UN:
11596 lclTyp = TYP_U_IMPL;
11598 case CEE_CONV_OVF_U4_UN:
11601 case CEE_CONV_OVF_U8_UN:
11602 lclTyp = TYP_ULONG;
11607 goto CONV_OVF_COMMON;
11610 goto CONV_OVF_COMMON;
11620 lclTyp = TYP_SHORT;
11623 lclTyp = TYP_I_IMPL;
11633 lclTyp = TYP_UBYTE;
11638 #if (REGSIZE_BYTES == 8)
11640 lclTyp = TYP_U_IMPL;
11644 lclTyp = TYP_U_IMPL;
11651 lclTyp = TYP_ULONG;
11655 lclTyp = TYP_FLOAT;
11658 lclTyp = TYP_DOUBLE;
11661 case CEE_CONV_R_UN:
11662 lclTyp = TYP_DOUBLE;
11676 // just check that we have a number on the stack
11677 if (tiVerificationNeeded)
11679 const typeInfo& tiVal = impStackTop().seTypeInfo;
11680 Verify(tiVal.IsNumberType(), "bad arg");
11682 #ifdef _TARGET_64BIT_
11683 bool isNative = false;
11687 case CEE_CONV_OVF_I:
11688 case CEE_CONV_OVF_I_UN:
11690 case CEE_CONV_OVF_U:
11691 case CEE_CONV_OVF_U_UN:
11695 // leave 'isNative' = false;
11700 tiRetVal = typeInfo::nativeInt();
11703 #endif // _TARGET_64BIT_
11705 tiRetVal = typeInfo(lclTyp).NormaliseForStack();
11709 // only converts from FLOAT or DOUBLE to an integer type
11710 // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls
11712 if (varTypeIsFloating(lclTyp))
11714 callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
11715 #ifdef _TARGET_64BIT_
11716 // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
11717 // TYP_BYREF could be used as TYP_I_IMPL which is long.
11718 // TODO-CQ: remove this when we lower casts long/ulong --> float/double
11719 // and generate SSE2 code instead of going through helper calls.
11720 || (impStackTop().val->TypeGet() == TYP_BYREF)
11726 callNode = varTypeIsFloating(impStackTop().val->TypeGet());
11729 // At this point uns, ovf, callNode all set
11731 op1 = impPopStack().val;
11732 impBashVarAddrsToI(op1);
11734 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
11736 op2 = op1->gtOp.gtOp2;
11738 if (op2->gtOper == GT_CNS_INT)
11740 ssize_t ival = op2->gtIntCon.gtIconVal;
11741 ssize_t mask, umask;
11757 assert(!"unexpected type");
11761 if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
11763 /* Toss the cast, it's a waste of time */
11765 impPushOnStack(op1, tiRetVal);
11768 else if (ival == mask)
11770 /* Toss the masking, it's a waste of time, since
11771 we sign-extend from the small value anyways */
11773 op1 = op1->gtOp.gtOp1;
11778 /* The 'op2' sub-operand of a cast is the 'real' type number,
11779 since the result of a cast to one of the 'small' integer
11780 types is an integer.
11783 type = genActualType(lclTyp);
11785 #if SMALL_TREE_NODES
11788 op1 = gtNewCastNodeL(type, op1, lclTyp);
11791 #endif // SMALL_TREE_NODES
11793 op1 = gtNewCastNode(type, op1, lclTyp);
11798 op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
11802 op1->gtFlags |= GTF_UNSIGNED;
11804 impPushOnStack(op1, tiRetVal);
11808 if (tiVerificationNeeded)
11810 tiRetVal = impStackTop().seTypeInfo;
11811 Verify(tiRetVal.IsNumberType(), "Bad arg");
11814 op1 = impPopStack().val;
11815 impBashVarAddrsToI(op1, nullptr);
11816 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
11820 if (tiVerificationNeeded)
11825 /* Pull the top value from the stack */
11827 op1 = impPopStack(clsHnd).val;
11829 /* Get hold of the type of the value being duplicated */
11831 lclTyp = genActualType(op1->gtType);
11833 /* Does the value have any side effects? */
11835 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
11837 // Since we are throwing away the value, just normalize
11838 // it to its address. This is more efficient.
11840 if (varTypeIsStruct(op1))
11842 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
11843 // Non-calls, such as obj or ret_expr, have to go through this.
11844 // Calls with large struct return value have to go through this.
11845 // Helper calls with small struct return value also have to go
11846 // through this since they do not follow Unix calling convention.
11847 if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
11848 op1->AsCall()->gtCallType == CT_HELPER)
11849 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
11851 op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
11855 // If op1 is non-overflow cast, throw it away since it is useless.
11856 // Another reason for throwing away the useless cast is in the context of
11857 // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
11858 // The cast gets added as part of importing GT_CALL, which gets in the way
11859 // of fgMorphCall() on the forms of tail call nodes that we assert.
11860 if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
11862 op1 = op1->gtOp.gtOp1;
11865 // If 'op1' is an expression, create an assignment node.
11866 // Helps analyses (like CSE) to work fine.
11868 if (op1->gtOper != GT_CALL)
11870 op1 = gtUnusedValNode(op1);
11873 /* Append the value to the tree list */
11877 /* No side effects - just throw the <BEEP> thing away */
11882 if (tiVerificationNeeded)
11884 // Dup could start the begining of delegate creation sequence, remember that
11885 delegateCreateStart = codeAddr - 1;
11889 // Convert a (dup, stloc) sequence into a (stloc, ldloc) sequence in the following cases:
11890 // - If this is non-debug code - so that CSE will recognize the two as equal.
11891 // This helps eliminate a redundant bounds check in cases such as:
11892 // ariba[i+3] += some_value;
11893 // - If the top of the stack is a non-leaf that may be expensive to clone.
11895 if (codeAddr < codeEndp)
11897 OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddr);
11898 if (impIsAnySTLOC(nextOpcode))
11900 if (!opts.compDbgCode)
11902 insertLdloc = true;
11905 GenTree* stackTop = impStackTop().val;
11906 if (!stackTop->IsIntegralConst(0) && !stackTop->IsFPZero() && !stackTop->IsLocal())
11908 insertLdloc = true;
11914 /* Pull the top value from the stack */
11915 op1 = impPopStack(tiRetVal);
11917 /* Clone the value */
11918 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
11919 nullptr DEBUGARG("DUP instruction"));
11921 /* Either the tree started with no global effects, or impCloneExpr
11922 evaluated the tree to a temp and returned two copies of that
11923 temp. Either way, neither op1 nor op2 should have side effects.
11925 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
11927 /* Push the tree/temp back on the stack */
11928 impPushOnStack(op1, tiRetVal);
11930 /* Push the copy on the stack */
11931 impPushOnStack(op2, tiRetVal);
11939 lclTyp = TYP_SHORT;
11948 lclTyp = TYP_I_IMPL;
11950 case CEE_STIND_REF:
11954 lclTyp = TYP_FLOAT;
11957 lclTyp = TYP_DOUBLE;
11961 if (tiVerificationNeeded)
11963 typeInfo instrType(lclTyp);
11964 #ifdef _TARGET_64BIT_
11965 if (opcode == CEE_STIND_I)
11967 instrType = typeInfo::nativeInt();
11969 #endif // _TARGET_64BIT_
11970 verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
11974 compUnsafeCastUsed = true; // Have to go conservative
11979 op2 = impPopStack().val; // value to store
11980 op1 = impPopStack().val; // address to store to
11982 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
11983 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
11985 impBashVarAddrsToI(op1, op2);
11987 op2 = impImplicitR4orR8Cast(op2, lclTyp);
11989 #ifdef _TARGET_64BIT_
11990 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
11991 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
11993 op2->gtType = TYP_I_IMPL;
11997 // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
11999 if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
12001 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12002 op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
12004 // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12006 if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
12008 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12009 op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
12012 #endif // _TARGET_64BIT_
12014 if (opcode == CEE_STIND_REF)
12016 // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
12017 assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
12018 lclTyp = genActualType(op2->TypeGet());
12021 // Check target type.
12023 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
12025 if (op2->gtType == TYP_BYREF)
12027 assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
12029 else if (lclTyp == TYP_BYREF)
12031 assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
12036 assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
12037 ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
12038 (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
12042 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12044 // stind could point anywhere, example a boxed class static int
12045 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12047 if (prefixFlags & PREFIX_VOLATILE)
12049 assert(op1->OperGet() == GT_IND);
12050 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
12051 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12052 op1->gtFlags |= GTF_IND_VOLATILE;
12055 if (prefixFlags & PREFIX_UNALIGNED)
12057 assert(op1->OperGet() == GT_IND);
12058 op1->gtFlags |= GTF_IND_UNALIGNED;
12061 op1 = gtNewAssignNode(op1, op2);
12062 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12064 // Spill side-effects AND global-data-accesses
12065 if (verCurrentState.esStackDepth > 0)
12067 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12076 lclTyp = TYP_SHORT;
12085 case CEE_LDIND_REF:
12089 lclTyp = TYP_I_IMPL;
12092 lclTyp = TYP_FLOAT;
12095 lclTyp = TYP_DOUBLE;
12098 lclTyp = TYP_UBYTE;
12105 if (tiVerificationNeeded)
12107 typeInfo lclTiType(lclTyp);
12108 #ifdef _TARGET_64BIT_
12109 if (opcode == CEE_LDIND_I)
12111 lclTiType = typeInfo::nativeInt();
12113 #endif // _TARGET_64BIT_
12114 tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12115 tiRetVal.NormaliseForStack();
12119 compUnsafeCastUsed = true; // Have to go conservative
12124 op1 = impPopStack().val; // address to load from
12125 impBashVarAddrsToI(op1);
12127 #ifdef _TARGET_64BIT_
12128 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12130 if (genActualType(op1->gtType) == TYP_INT)
12132 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12133 op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
12137 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12139 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12141 // ldind could point anywhere, example a boxed class static int
12142 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12144 if (prefixFlags & PREFIX_VOLATILE)
12146 assert(op1->OperGet() == GT_IND);
12147 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
12148 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12149 op1->gtFlags |= GTF_IND_VOLATILE;
12152 if (prefixFlags & PREFIX_UNALIGNED)
12154 assert(op1->OperGet() == GT_IND);
12155 op1->gtFlags |= GTF_IND_UNALIGNED;
12158 impPushOnStack(op1, tiRetVal);
12162 case CEE_UNALIGNED:
12165 val = getU1LittleEndian(codeAddr);
12167 JITDUMP(" %u", val);
12168 if ((val != 1) && (val != 2) && (val != 4))
12170 BADCODE("Alignment unaligned. must be 1, 2, or 4");
12173 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12174 prefixFlags |= PREFIX_UNALIGNED;
12176 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12179 opcode = (OPCODE)getU1LittleEndian(codeAddr);
12180 codeAddr += sizeof(__int8);
12181 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12182 goto DECODE_OPCODE;
12186 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12187 prefixFlags |= PREFIX_VOLATILE;
12189 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12196 // Need to do a lookup here so that we perform an access check
12197 // and do a NOWAY if protections are violated
12198 _impResolveToken(CORINFO_TOKENKIND_Method);
12200 JITDUMP(" %08X", resolvedToken.token);
12202 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12203 addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12206 // This check really only applies to intrinsic Array.Address methods
12207 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12209 NO_WAY("Currently do not support LDFTN of Parameterized functions");
12212 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12213 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12215 if (tiVerificationNeeded)
12217 // LDFTN could start the begining of delegate creation sequence, remember that
12218 delegateCreateStart = codeAddr - 2;
12220 // check any constraints on the callee's class and type parameters
12221 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12222 "method has unsatisfied class constraints");
12223 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12224 resolvedToken.hMethod),
12225 "method has unsatisfied method constraints");
12227 mflags = callInfo.verMethodFlags;
12228 Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12232 op1 = impMethodPointer(&resolvedToken, &callInfo);
12233 if (compDonotInline())
12238 impPushOnStack(op1, typeInfo(resolvedToken.hMethod));
12243 case CEE_LDVIRTFTN:
12245 /* Get the method token */
12247 _impResolveToken(CORINFO_TOKENKIND_Method);
12249 JITDUMP(" %08X", resolvedToken.token);
12251 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12252 addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12253 CORINFO_CALLINFO_CALLVIRT)),
12256 // This check really only applies to intrinsic Array.Address methods
12257 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12259 NO_WAY("Currently do not support LDFTN of Parameterized functions");
12262 mflags = callInfo.methodFlags;
12264 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12266 if (compIsForInlining())
12268 if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12270 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12275 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12277 if (tiVerificationNeeded)
12280 Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12281 Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12283 // JIT32 verifier rejects verifiable ldvirtftn pattern
12284 typeInfo declType =
12285 verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12287 typeInfo arg = impStackTop().seTypeInfo;
12288 Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12291 CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12292 if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12294 instanceClassHnd = arg.GetClassHandleForObjRef();
12297 // check any constraints on the method's class and type parameters
12298 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12299 "method has unsatisfied class constraints");
12300 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12301 resolvedToken.hMethod),
12302 "method has unsatisfied method constraints");
12304 if (mflags & CORINFO_FLG_PROTECTED)
12306 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12307 "Accessing protected method through wrong type.");
12311 /* Get the object-ref */
12312 op1 = impPopStack().val;
12313 assertImp(op1->gtType == TYP_REF);
12315 if (opts.IsReadyToRun())
12317 if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
12319 if (op1->gtFlags & GTF_SIDE_EFFECT)
12321 op1 = gtUnusedValNode(op1);
12322 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12327 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12329 if (op1->gtFlags & GTF_SIDE_EFFECT)
12331 op1 = gtUnusedValNode(op1);
12332 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12337 GenTreePtr fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
12338 if (compDonotInline())
12343 impPushOnStack(fptr, typeInfo(resolvedToken.hMethod));
12348 case CEE_CONSTRAINED:
12350 assertImp(sz == sizeof(unsigned));
12351 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
12352 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
12353 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
12355 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
12356 prefixFlags |= PREFIX_CONSTRAINED;
12359 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12360 if (actualOpcode != CEE_CALLVIRT)
12362 BADCODE("constrained. has to be followed by callvirt");
12369 JITDUMP(" readonly.");
12371 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
12372 prefixFlags |= PREFIX_READONLY;
12375 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12376 if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
12378 BADCODE("readonly. has to be followed by ldelema or call");
12388 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
12389 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12392 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12393 if (!impOpcodeIsCallOpcode(actualOpcode))
12395 BADCODE("tailcall. has to be followed by call, callvirt or calli");
12403 /* Since we will implicitly insert newObjThisPtr at the start of the
12404 argument list, spill any GTF_ORDER_SIDEEFF */
12405 impSpillSpecialSideEff();
12407 /* NEWOBJ does not respond to TAIL */
12408 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
12410 /* NEWOBJ does not respond to CONSTRAINED */
12411 prefixFlags &= ~PREFIX_CONSTRAINED;
12413 #if COR_JIT_EE_VERSION > 460
12414 _impResolveToken(CORINFO_TOKENKIND_NewObj);
12416 _impResolveToken(CORINFO_TOKENKIND_Method);
12419 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12420 addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
12423 if (compIsForInlining())
12425 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12427 // Check to see if this call violates the boundary.
12428 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
12433 mflags = callInfo.methodFlags;
12435 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
12437 BADCODE("newobj on static or abstract method");
12440 // Insert the security callout before any actual code is generated
12441 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12443 // There are three different cases for new
12444 // Object size is variable (depends on arguments)
12445 // 1) Object is an array (arrays treated specially by the EE)
12446 // 2) Object is some other variable sized object (e.g. String)
12447 // 3) Class Size can be determined beforehand (normal case)
12448 // In the first case, we need to call a NEWOBJ helper (multinewarray)
12449 // in the second case we call the constructor with a '0' this pointer
12450 // In the third case we alloc the memory, then call the constuctor
12452 clsFlags = callInfo.classFlags;
12453 if (clsFlags & CORINFO_FLG_ARRAY)
12455 if (tiVerificationNeeded)
12457 CORINFO_CLASS_HANDLE elemTypeHnd;
12458 INDEBUG(CorInfoType corType =)
12459 info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
12460 assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
12461 Verify(elemTypeHnd == nullptr ||
12462 !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
12463 "newarr of byref-like objects");
12464 verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
12465 ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
12466 &callInfo DEBUGARG(info.compFullName));
12468 // Arrays need to call the NEWOBJ helper.
12469 assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
12471 impImportNewObjArray(&resolvedToken, &callInfo);
12472 if (compDonotInline())
12480 // At present this can only be String
12481 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
12483 if (IsTargetAbi(CORINFO_CORERT_ABI))
12485 // The dummy argument does not exist in CoreRT
12486 newObjThisPtr = nullptr;
12490 // This is the case for variable-sized objects that are not
12491 // arrays. In this case, call the constructor with a null 'this'
12493 newObjThisPtr = gtNewIconNode(0, TYP_REF);
12496 /* Remember that this basic block contains 'new' of an object */
12497 block->bbFlags |= BBF_HAS_NEWOBJ;
12498 optMethodFlags |= OMF_HAS_NEWOBJ;
12502 // This is the normal case where the size of the object is
12503 // fixed. Allocate the memory and call the constructor.
12505 // Note: We cannot add a peep to avoid use of temp here
12506 // becase we don't have enough interference info to detect when
12507 // sources and destination interfere, example: s = new S(ref);
12509 // TODO: We find the correct place to introduce a general
12510 // reverse copy prop for struct return values from newobj or
12511 // any function returning structs.
12513 /* get a temporary for the new object */
12514 lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
12516 // In the value class case we only need clsHnd for size calcs.
12518 // The lookup of the code pointer will be handled by CALL in this case
12519 if (clsFlags & CORINFO_FLG_VALUECLASS)
12521 if (compIsForInlining())
12523 // If value class has GC fields, inform the inliner. It may choose to
12524 // bail out on the inline.
12525 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
12526 if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
12528 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
12529 if (compInlineResult->IsFailure())
12534 // Do further notification in the case where the call site is rare;
12535 // some policies do not track the relative hotness of call sites for
12536 // "always" inline cases.
12537 if (impInlineInfo->iciBlock->isRunRarely())
12539 compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
12540 if (compInlineResult->IsFailure())
12548 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
12549 unsigned size = info.compCompHnd->getClassSize(resolvedToken.hClass);
12551 if (impIsPrimitive(jitTyp))
12553 lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
12557 // The local variable itself is the allocated space.
12558 // Here we need unsafe value cls check, since the address of struct is taken for further use
12559 // and potentially exploitable.
12560 lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
12563 // Append a tree to zero-out the temp
12564 newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
12566 newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest
12567 gtNewIconNode(0), // Value
12569 false, // isVolatile
12570 false); // not copyBlock
12571 impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12573 // Obtain the address of the temp
12575 gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
12579 #ifdef FEATURE_READYTORUN_COMPILER
12580 if (opts.IsReadyToRun())
12582 op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
12583 usingReadyToRunHelper = (op1 != nullptr);
12586 if (!usingReadyToRunHelper)
12589 op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
12590 if (op1 == nullptr)
12591 { // compDonotInline()
12595 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
12596 // and the newfast call with a single call to a dynamic R2R cell that will:
12597 // 1) Load the context
12598 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate
12600 // 3) Allocate and return the new object
12601 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
12603 op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
12604 resolvedToken.hClass, TYP_REF, op1);
12607 // Remember that this basic block contains 'new' of an object
12608 block->bbFlags |= BBF_HAS_NEWOBJ;
12609 optMethodFlags |= OMF_HAS_NEWOBJ;
12611 // Append the assignment to the temp/local. Dont need to spill
12612 // at all as we are just calling an EE-Jit helper which can only
12613 // cause an (async) OutOfMemoryException.
12615 // We assign the newly allocated object (by a GT_ALLOCOBJ node)
12616 // to a temp. Note that the pattern "temp = allocObj" is required
12617 // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
12618 // without exhaustive walk over all expressions.
12620 impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
12622 newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
12629 /* CALLI does not respond to CONSTRAINED */
12630 prefixFlags &= ~PREFIX_CONSTRAINED;
12632 if (compIsForInlining())
12634 // CALLI doesn't have a method handle, so assume the worst.
12635 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12637 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
12647 // We can't call getCallInfo on the token from a CALLI, but we need it in
12648 // many other places. We unfortunately embed that knowledge here.
12649 if (opcode != CEE_CALLI)
12651 _impResolveToken(CORINFO_TOKENKIND_Method);
12653 eeGetCallInfo(&resolvedToken,
12654 (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
12655 // this is how impImportCall invokes getCallInfo
12657 combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
12658 (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
12659 : CORINFO_CALLINFO_NONE)),
12664 // Suppress uninitialized use warning.
12665 memset(&resolvedToken, 0, sizeof(resolvedToken));
12666 memset(&callInfo, 0, sizeof(callInfo));
12668 resolvedToken.token = getU4LittleEndian(codeAddr);
12671 CALL: // memberRef should be set.
12672 // newObjThisPtr should be set for CEE_NEWOBJ
12674 JITDUMP(" %08X", resolvedToken.token);
12675 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
12677 bool newBBcreatedForTailcallStress;
12679 newBBcreatedForTailcallStress = false;
12681 if (compIsForInlining())
12683 if (compDonotInline())
12687 // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
12688 assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
12692 if (compTailCallStress())
12694 // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
12695 // Tail call stress only recognizes call+ret patterns and forces them to be
12696 // explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress
12697 // doesn't import 'ret' opcode following the call into the basic block containing
12698 // the call instead imports it to a new basic block. Note that fgMakeBasicBlocks()
12699 // is already checking that there is an opcode following call and hence it is
12700 // safe here to read next opcode without bounds check.
12701 newBBcreatedForTailcallStress =
12702 impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
12703 // make it jump to RET.
12704 (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
12706 if (newBBcreatedForTailcallStress &&
12707 !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
12708 verCheckTailCallConstraint(opcode, &resolvedToken,
12709 constraintCall ? &constrainedResolvedToken : nullptr,
12710 true) // Is it legal to do talcall?
12713 // Stress the tailcall.
12714 JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
12715 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12719 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
12720 // hence will not be considered for implicit tail calling.
12721 bool isRecursive = (callInfo.hMethod == info.compMethodHnd);
12722 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
12724 JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
12725 prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
12729 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
12730 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
12731 readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
12733 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
12735 // All calls and delegates need a security callout.
12736 // For delegates, this is the call to the delegate constructor, not the access check on the
12738 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12740 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
12742 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
12743 // and the field it is reading, thus it is now unverifiable to not immediately precede with
12744 // ldtoken <filed token>, and we now check accessibility
12745 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
12746 (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
12748 if (prevOpcode != CEE_LDTOKEN)
12750 Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
12754 assert(lastLoadToken != NULL);
12755 // Now that we know we have a token, verify that it is accessible for loading
12756 CORINFO_RESOLVED_TOKEN resolvedLoadField;
12757 impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
12758 eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
12759 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12763 #endif // DevDiv 410397
12766 if (tiVerificationNeeded)
12768 verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12769 explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
12770 &callInfo DEBUGARG(info.compFullName));
12773 // Insert delegate callout here.
12774 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
12777 // We should do this only if verification is enabled
12778 // If verification is disabled, delegateCreateStart will not be initialized correctly
12779 if (tiVerificationNeeded)
12781 mdMemberRef delegateMethodRef = mdMemberRefNil;
12782 // We should get here only for well formed delegate creation.
12783 assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
12787 #ifdef FEATURE_CORECLR
12788 // In coreclr the delegate transparency rule needs to be enforced even if verification is disabled
12789 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
12790 CORINFO_METHOD_HANDLE delegateMethodHandle = tiActualFtn.GetMethod2();
12792 impInsertCalloutForDelegate(info.compMethodHnd, delegateMethodHandle, resolvedToken.hClass);
12793 #endif // FEATURE_CORECLR
12796 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12797 newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
12798 if (compDonotInline())
12803 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
12804 // have created a new BB after the "call"
12805 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
12807 assert(!compIsForInlining());
12819 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
12820 BOOL isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
12822 /* Get the CP_Fieldref index */
12823 assertImp(sz == sizeof(unsigned));
12825 _impResolveToken(CORINFO_TOKENKIND_Field);
12827 JITDUMP(" %08X", resolvedToken.token);
12829 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
12831 GenTreePtr obj = nullptr;
12832 typeInfo* tiObj = nullptr;
12833 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
12835 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
12837 tiObj = &impStackTop().seTypeInfo;
12838 obj = impPopStack(objType).val;
12840 if (impIsThis(obj))
12842 aflags |= CORINFO_ACCESS_THIS;
12844 // An optimization for Contextful classes:
12845 // we unwrap the proxy when we have a 'this reference'
12847 if (info.compUnwrapContextful)
12849 aflags |= CORINFO_ACCESS_UNWRAP;
12854 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
12856 // Figure out the type of the member. We always call canAccessField, so you always need this
12858 CorInfoType ciType = fieldInfo.fieldType;
12859 clsHnd = fieldInfo.structType;
12861 lclTyp = JITtype2varType(ciType);
12863 #ifdef _TARGET_AMD64
12864 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
12865 #endif // _TARGET_AMD64
12867 if (compIsForInlining())
12869 switch (fieldInfo.fieldAccessor)
12871 case CORINFO_FIELD_INSTANCE_HELPER:
12872 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
12873 case CORINFO_FIELD_STATIC_ADDR_HELPER:
12874 case CORINFO_FIELD_STATIC_TLS:
12876 compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
12879 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
12880 #if COR_JIT_EE_VERSION > 460
12881 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
12883 /* We may be able to inline the field accessors in specific instantiations of generic
12885 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
12892 if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
12895 if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
12896 !(info.compFlags & CORINFO_FLG_FORCEINLINE))
12898 // Loading a static valuetype field usually will cause a JitHelper to be called
12899 // for the static base. This will bloat the code.
12900 compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
12902 if (compInlineResult->IsFailure())
12910 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
12913 tiRetVal.MakeByRef();
12917 tiRetVal.NormaliseForStack();
12920 // Perform this check always to ensure that we get field access exceptions even with
12921 // SkipVerification.
12922 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12924 if (tiVerificationNeeded)
12926 // You can also pass the unboxed struct to LDFLD
12927 BOOL bAllowPlainValueTypeAsThis = FALSE;
12928 if (opcode == CEE_LDFLD && impIsValueType(tiObj))
12930 bAllowPlainValueTypeAsThis = TRUE;
12933 verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
12935 // If we're doing this on a heap object or from a 'safe' byref
12936 // then the result is a safe byref too
12937 if (isLoadAddress) // load address
12939 if (fieldInfo.fieldFlags &
12940 CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
12942 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
12944 tiRetVal.SetIsPermanentHomeByRef();
12947 else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
12949 // ldflda of byref is safe if done on a gc object or on a
12951 tiRetVal.SetIsPermanentHomeByRef();
12957 // tiVerificationNeeded is false.
12958 // Raise InvalidProgramException if static load accesses non-static field
12959 if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
12961 BADCODE("static access on an instance field");
12965 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
12966 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
12968 if (obj->gtFlags & GTF_SIDE_EFFECT)
12970 obj = gtUnusedValNode(obj);
12971 impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12976 /* Preserve 'small' int types */
12977 if (lclTyp > TYP_INT)
12979 lclTyp = genActualType(lclTyp);
12982 bool usesHelper = false;
12984 switch (fieldInfo.fieldAccessor)
12986 case CORINFO_FIELD_INSTANCE:
12987 #ifdef FEATURE_READYTORUN_COMPILER
12988 case CORINFO_FIELD_INSTANCE_WITH_BASE:
12991 bool nullcheckNeeded = false;
12993 obj = impCheckForNullPointer(obj);
12995 if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
12997 nullcheckNeeded = true;
13000 // If the object is a struct, what we really want is
13001 // for the field to operate on the address of the struct.
13002 if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
13004 assert(opcode == CEE_LDFLD && objType != nullptr);
13006 obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
13009 /* Create the data member node */
13010 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
13012 #ifdef FEATURE_READYTORUN_COMPILER
13013 if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13015 op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13019 op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13021 if (fgAddrCouldBeNull(obj))
13023 op1->gtFlags |= GTF_EXCEPT;
13026 // If gtFldObj is a BYREF then our target is a value class and
13027 // it could point anywhere, example a boxed class static int
13028 if (obj->gtType == TYP_BYREF)
13030 op1->gtFlags |= GTF_IND_TGTANYWHERE;
13033 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13034 if (StructHasOverlappingFields(typeFlags))
13036 op1->gtField.gtFldMayOverlap = true;
13039 // wrap it in a address of operator if necessary
13042 op1 = gtNewOperNode(GT_ADDR,
13043 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
13047 if (compIsForInlining() &&
13048 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13049 impInlineInfo->inlArgInfo))
13051 impInlineInfo->thisDereferencedFirst = true;
13057 case CORINFO_FIELD_STATIC_TLS:
13058 #ifdef _TARGET_X86_
13059 // Legacy TLS access is implemented as intrinsic on x86 only
13061 /* Create the data member node */
13062 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13063 op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13067 op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13071 fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13076 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13077 case CORINFO_FIELD_INSTANCE_HELPER:
13078 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13079 op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13084 case CORINFO_FIELD_STATIC_ADDRESS:
13085 // Replace static read-only fields with constant if possible
13086 if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13087 !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13088 (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13090 CorInfoInitClassResult initClassResult =
13091 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13092 impTokenLookupContextHandle);
13094 if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13096 void** pFldAddr = nullptr;
13098 info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13100 // We should always be able to access this static's address directly
13101 assert(pFldAddr == nullptr);
13103 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13110 case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13111 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13112 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13113 #if COR_JIT_EE_VERSION > 460
13114 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13116 op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13120 case CORINFO_FIELD_INTRINSIC_ZERO:
13122 assert(aflags & CORINFO_ACCESS_GET);
13123 op1 = gtNewIconNode(0, lclTyp);
13128 case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13130 assert(aflags & CORINFO_ACCESS_GET);
13133 InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13134 op1 = gtNewStringLiteralNode(iat, pValue);
13140 assert(!"Unexpected fieldAccessor");
13143 if (!isLoadAddress)
13146 if (prefixFlags & PREFIX_VOLATILE)
13148 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
13149 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13153 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13154 (op1->OperGet() == GT_OBJ));
13155 op1->gtFlags |= GTF_IND_VOLATILE;
13159 if (prefixFlags & PREFIX_UNALIGNED)
13163 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13164 (op1->OperGet() == GT_OBJ));
13165 op1->gtFlags |= GTF_IND_UNALIGNED;
13170 /* Check if the class needs explicit initialization */
13172 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13174 GenTreePtr helperNode = impInitClass(&resolvedToken);
13175 if (compDonotInline())
13179 if (helperNode != nullptr)
13181 op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13186 impPushOnStack(op1, tiRetVal);
13194 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13196 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13198 /* Get the CP_Fieldref index */
13200 assertImp(sz == sizeof(unsigned));
13202 _impResolveToken(CORINFO_TOKENKIND_Field);
13204 JITDUMP(" %08X", resolvedToken.token);
13206 int aflags = CORINFO_ACCESS_SET;
13207 GenTreePtr obj = nullptr;
13208 typeInfo* tiObj = nullptr;
13211 /* Pull the value from the stack */
13212 op2 = impPopStack(tiVal);
13213 clsHnd = tiVal.GetClassHandle();
13215 if (opcode == CEE_STFLD)
13217 tiObj = &impStackTop().seTypeInfo;
13218 obj = impPopStack().val;
13220 if (impIsThis(obj))
13222 aflags |= CORINFO_ACCESS_THIS;
13224 // An optimization for Contextful classes:
13225 // we unwrap the proxy when we have a 'this reference'
13227 if (info.compUnwrapContextful)
13229 aflags |= CORINFO_ACCESS_UNWRAP;
13234 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13236 // Figure out the type of the member. We always call canAccessField, so you always need this
13238 CorInfoType ciType = fieldInfo.fieldType;
13239 fieldClsHnd = fieldInfo.structType;
13241 lclTyp = JITtype2varType(ciType);
13243 if (compIsForInlining())
13245 /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13246 * per-inst static? */
13248 switch (fieldInfo.fieldAccessor)
13250 case CORINFO_FIELD_INSTANCE_HELPER:
13251 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13252 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13253 case CORINFO_FIELD_STATIC_TLS:
13255 compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13258 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13259 #if COR_JIT_EE_VERSION > 460
13260 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13263 /* We may be able to inline the field accessors in specific instantiations of generic
13265 compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13273 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13275 if (tiVerificationNeeded)
13277 verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13278 typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13279 Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13283 // tiVerificationNeed is false.
13284 // Raise InvalidProgramException if static store accesses non-static field
13285 if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13287 BADCODE("static access on an instance field");
13291 // We are using stfld on a static field.
13292 // We allow it, but need to eval any side-effects for obj
13293 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13295 if (obj->gtFlags & GTF_SIDE_EFFECT)
13297 obj = gtUnusedValNode(obj);
13298 impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13303 /* Preserve 'small' int types */
13304 if (lclTyp > TYP_INT)
13306 lclTyp = genActualType(lclTyp);
13309 switch (fieldInfo.fieldAccessor)
13311 case CORINFO_FIELD_INSTANCE:
13312 #ifdef FEATURE_READYTORUN_COMPILER
13313 case CORINFO_FIELD_INSTANCE_WITH_BASE:
13316 obj = impCheckForNullPointer(obj);
13318 /* Create the data member node */
13319 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
13320 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13321 if (StructHasOverlappingFields(typeFlags))
13323 op1->gtField.gtFldMayOverlap = true;
13326 #ifdef FEATURE_READYTORUN_COMPILER
13327 if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13329 op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13333 op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13335 if (fgAddrCouldBeNull(obj))
13337 op1->gtFlags |= GTF_EXCEPT;
13340 // If gtFldObj is a BYREF then our target is a value class and
13341 // it could point anywhere, example a boxed class static int
13342 if (obj->gtType == TYP_BYREF)
13344 op1->gtFlags |= GTF_IND_TGTANYWHERE;
13347 if (compIsForInlining() &&
13348 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
13350 impInlineInfo->thisDereferencedFirst = true;
13355 case CORINFO_FIELD_STATIC_TLS:
13356 #ifdef _TARGET_X86_
13357 // Legacy TLS access is implemented as intrinsic on x86 only
13359 /* Create the data member node */
13360 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13361 op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13365 fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13370 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13371 case CORINFO_FIELD_INSTANCE_HELPER:
13372 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13373 op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13377 case CORINFO_FIELD_STATIC_ADDRESS:
13378 case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13379 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13380 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13381 #if COR_JIT_EE_VERSION > 460
13382 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13384 op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13389 assert(!"Unexpected fieldAccessor");
13392 // Create the member assignment, unless we have a struct.
13393 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
13394 bool deferStructAssign = varTypeIsStruct(lclTyp);
13396 if (!deferStructAssign)
13398 if (prefixFlags & PREFIX_VOLATILE)
13400 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13401 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
13402 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13403 op1->gtFlags |= GTF_IND_VOLATILE;
13405 if (prefixFlags & PREFIX_UNALIGNED)
13407 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13408 op1->gtFlags |= GTF_IND_UNALIGNED;
13411 /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
13413 apps). The reason this works is that JIT stores an i4 constant in Gentree union during
13415 and reads from the union as if it were a long during code generation. Though this can potentially
13416 read garbage, one can get lucky to have this working correctly.
13418 This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
13420 switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency
13422 it. To be backward compatible, we will explicitly add an upward cast here so that it works
13426 Note that this is limited to x86 alone as thereis no back compat to be addressed for Arm JIT for
13429 CLANG_FORMAT_COMMENT_ANCHOR;
13431 #ifdef _TARGET_X86_
13432 if (op1->TypeGet() != op2->TypeGet() && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
13433 varTypeIsLong(op1->TypeGet()))
13435 op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13439 #ifdef _TARGET_64BIT_
13440 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
13441 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
13443 op2->gtType = TYP_I_IMPL;
13447 // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
13449 if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
13451 op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
13453 // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13455 if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
13457 op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
13462 #if !FEATURE_X87_DOUBLES
13463 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
13464 // We insert a cast to the dest 'op1' type
13466 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
13467 varTypeIsFloating(op2->gtType))
13469 op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13471 #endif // !FEATURE_X87_DOUBLES
13473 op1 = gtNewAssignNode(op1, op2);
13475 /* Mark the expression as containing an assignment */
13477 op1->gtFlags |= GTF_ASG;
13480 /* Check if the class needs explicit initialization */
13482 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13484 GenTreePtr helperNode = impInitClass(&resolvedToken);
13485 if (compDonotInline())
13489 if (helperNode != nullptr)
13491 op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13495 /* stfld can interfere with value classes (consider the sequence
13496 ldloc, ldloca, ..., stfld, stloc). We will be conservative and
13497 spill all value class references from the stack. */
13499 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
13503 if (impIsValueType(tiObj))
13505 impSpillEvalStack();
13509 impSpillValueClasses();
13513 /* Spill any refs to the same member from the stack */
13515 impSpillLclRefs((ssize_t)resolvedToken.hField);
13517 /* stsfld also interferes with indirect accesses (for aliased
13518 statics) and calls. But don't need to spill other statics
13519 as we have explicitly spilled this particular static field. */
13521 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
13523 if (deferStructAssign)
13525 op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
13533 /* Get the class type index operand */
13535 _impResolveToken(CORINFO_TOKENKIND_Newarr);
13537 JITDUMP(" %08X", resolvedToken.token);
13539 if (!opts.IsReadyToRun())
13541 // Need to restore array classes before creating array objects on the heap
13542 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13543 if (op1 == nullptr)
13544 { // compDonotInline()
13549 if (tiVerificationNeeded)
13551 // As per ECMA 'numElems' specified can be either int32 or native int.
13552 Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
13554 CORINFO_CLASS_HANDLE elemTypeHnd;
13555 info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13556 Verify(elemTypeHnd == nullptr ||
13557 !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13558 "array of byref-like type");
13559 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13562 accessAllowedResult =
13563 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13564 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13566 /* Form the arglist: array class handle, size */
13567 op2 = impPopStack().val;
13568 assertImp(genActualTypeIsIntOrI(op2->gtType));
13570 #ifdef FEATURE_READYTORUN_COMPILER
13571 if (opts.IsReadyToRun())
13573 op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
13574 gtNewArgList(op2));
13575 usingReadyToRunHelper = (op1 != nullptr);
13577 if (!usingReadyToRunHelper)
13579 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13580 // and the newarr call with a single call to a dynamic R2R cell that will:
13581 // 1) Load the context
13582 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13583 // 3) Allocate the new array
13584 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13586 // Need to restore array classes before creating array objects on the heap
13587 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13588 if (op1 == nullptr)
13589 { // compDonotInline()
13595 if (!usingReadyToRunHelper)
13598 args = gtNewArgList(op1, op2);
13600 /* Create a call to 'new' */
13602 // Note that this only works for shared generic code because the same helper is used for all
13603 // reference array types
13605 gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, 0, args);
13608 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
13610 /* Remember that this basic block contains 'new' of an sd array */
13612 block->bbFlags |= BBF_HAS_NEWARRAY;
13613 optMethodFlags |= OMF_HAS_NEWARRAY;
13615 /* Push the result of the call on the stack */
13617 impPushOnStack(op1, tiRetVal);
13624 assert(!compIsForInlining());
13626 if (tiVerificationNeeded)
13628 Verify(false, "bad opcode");
13631 // We don't allow locallocs inside handlers
13632 if (block->hasHndIndex())
13634 BADCODE("Localloc can't be inside handler");
13637 /* The FP register may not be back to the original value at the end
13638 of the method, even if the frame size is 0, as localloc may
13639 have modified it. So we will HAVE to reset it */
13641 compLocallocUsed = true;
13642 setNeedsGSSecurityCookie();
13644 // Get the size to allocate
13646 op2 = impPopStack().val;
13647 assertImp(genActualTypeIsIntOrI(op2->gtType));
13649 if (verCurrentState.esStackDepth != 0)
13651 BADCODE("Localloc can only be used when the stack is empty");
13654 op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
13656 // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
13658 op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
13660 impPushOnStack(op1, tiRetVal);
13665 /* Get the type token */
13666 assertImp(sz == sizeof(unsigned));
13668 _impResolveToken(CORINFO_TOKENKIND_Casting);
13670 JITDUMP(" %08X", resolvedToken.token);
13672 if (!opts.IsReadyToRun())
13674 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13675 if (op2 == nullptr)
13676 { // compDonotInline()
13681 if (tiVerificationNeeded)
13683 Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
13684 // Even if this is a value class, we know it is boxed.
13685 tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
13687 accessAllowedResult =
13688 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13689 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13691 op1 = impPopStack().val;
13693 #ifdef FEATURE_READYTORUN_COMPILER
13694 if (opts.IsReadyToRun())
13696 GenTreePtr opLookup =
13697 impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
13698 gtNewArgList(op1));
13699 usingReadyToRunHelper = (opLookup != nullptr);
13700 op1 = (usingReadyToRunHelper ? opLookup : op1);
13702 if (!usingReadyToRunHelper)
13704 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13705 // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
13706 // 1) Load the context
13707 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13708 // 3) Perform the 'is instance' check on the input object
13709 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13711 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13712 if (op2 == nullptr)
13713 { // compDonotInline()
13719 if (!usingReadyToRunHelper)
13722 op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
13724 if (compDonotInline())
13729 impPushOnStack(op1, tiRetVal);
13733 case CEE_REFANYVAL:
13735 // get the class handle and make a ICON node out of it
13737 _impResolveToken(CORINFO_TOKENKIND_Class);
13739 JITDUMP(" %08X", resolvedToken.token);
13741 op2 = impTokenToHandle(&resolvedToken);
13742 if (op2 == nullptr)
13743 { // compDonotInline()
13747 if (tiVerificationNeeded)
13749 Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13751 tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
13754 op1 = impPopStack().val;
13755 // make certain it is normalized;
13756 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13758 // Call helper GETREFANY(classHandle, op1);
13759 args = gtNewArgList(op2, op1);
13760 op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, 0, args);
13762 impPushOnStack(op1, tiRetVal);
13765 case CEE_REFANYTYPE:
13767 if (tiVerificationNeeded)
13769 Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13773 op1 = impPopStack().val;
13775 // make certain it is normalized;
13776 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13778 if (op1->gtOper == GT_OBJ)
13780 // Get the address of the refany
13781 op1 = op1->gtOp.gtOp1;
13783 // Fetch the type from the correct slot
13784 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
13785 gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
13786 op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
13790 assertImp(op1->gtOper == GT_MKREFANY);
13792 // The pointer may have side-effects
13793 if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
13795 impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13797 impNoteLastILoffs();
13801 // We already have the class handle
13802 op1 = op1->gtOp.gtOp2;
13805 // convert native TypeHandle to RuntimeTypeHandle
13807 GenTreeArgList* helperArgs = gtNewArgList(op1);
13809 op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, GTF_EXCEPT,
13812 // The handle struct is returned in register
13813 op1->gtCall.gtReturnType = TYP_REF;
13815 tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
13818 impPushOnStack(op1, tiRetVal);
13823 /* Get the Class index */
13824 assertImp(sz == sizeof(unsigned));
13825 lastLoadToken = codeAddr;
13826 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
13828 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
13830 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
13831 if (op1 == nullptr)
13832 { // compDonotInline()
13836 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
13837 assert(resolvedToken.hClass != nullptr);
13839 if (resolvedToken.hMethod != nullptr)
13841 helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
13843 else if (resolvedToken.hField != nullptr)
13845 helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
13848 GenTreeArgList* helperArgs = gtNewArgList(op1);
13850 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, GTF_EXCEPT, helperArgs);
13852 // The handle struct is returned in register
13853 op1->gtCall.gtReturnType = TYP_REF;
13855 tiRetVal = verMakeTypeInfo(tokenType);
13856 impPushOnStack(op1, tiRetVal);
13861 case CEE_UNBOX_ANY:
13863 /* Get the Class index */
13864 assertImp(sz == sizeof(unsigned));
13866 _impResolveToken(CORINFO_TOKENKIND_Class);
13868 JITDUMP(" %08X", resolvedToken.token);
13870 BOOL runtimeLookup;
13871 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
13872 if (op2 == nullptr)
13873 { // compDonotInline()
13877 // Run this always so we can get access exceptions even with SkipVerification.
13878 accessAllowedResult =
13879 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13880 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13882 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
13884 if (tiVerificationNeeded)
13886 typeInfo tiUnbox = impStackTop().seTypeInfo;
13887 Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
13888 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13889 tiRetVal.NormaliseForStack();
13891 op1 = impPopStack().val;
13895 /* Pop the object and create the unbox helper call */
13896 /* You might think that for UNBOX_ANY we need to push a different */
13897 /* (non-byref) type, but here we're making the tiRetVal that is used */
13898 /* for the intermediate pointer which we then transfer onto the OBJ */
13899 /* instruction. OBJ then creates the appropriate tiRetVal. */
13900 if (tiVerificationNeeded)
13902 typeInfo tiUnbox = impStackTop().seTypeInfo;
13903 Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
13905 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13906 Verify(tiRetVal.IsValueClass(), "not value class");
13907 tiRetVal.MakeByRef();
13909 // We always come from an objref, so this is safe byref
13910 tiRetVal.SetIsPermanentHomeByRef();
13911 tiRetVal.SetIsReadonlyByRef();
13914 op1 = impPopStack().val;
13915 assertImp(op1->gtType == TYP_REF);
13917 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
13918 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
13920 // We only want to expand inline the normal UNBOX helper;
13921 expandInline = (helper == CORINFO_HELP_UNBOX);
13925 if (compCurBB->isRunRarely())
13927 expandInline = false; // not worth the code expansion
13933 // we are doing normal unboxing
13934 // inline the common case of the unbox helper
13935 // UNBOX(exp) morphs into
13936 // clone = pop(exp);
13937 // ((*clone == typeToken) ? nop : helper(clone, typeToken));
13938 // push(clone + sizeof(void*))
13940 GenTreePtr cloneOperand;
13941 op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
13942 nullptr DEBUGARG("inline UNBOX clone1"));
13943 op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
13945 GenTreePtr condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
13947 op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
13948 nullptr DEBUGARG("inline UNBOX clone2"));
13949 op2 = impTokenToHandle(&resolvedToken);
13950 if (op2 == nullptr)
13951 { // compDonotInline()
13954 args = gtNewArgList(op2, op1);
13955 op1 = gtNewHelperCallNode(helper, TYP_VOID, 0, args);
13957 op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
13958 op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
13959 condBox->gtFlags |= GTF_RELOP_QMARK;
13961 // QMARK nodes cannot reside on the evaluation stack. Because there
13962 // may be other trees on the evaluation stack that side-effect the
13963 // sources of the UNBOX operation we must spill the stack.
13965 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13967 // Create the address-expression to reference past the object header
13968 // to the beginning of the value-type. Today this means adjusting
13969 // past the base of the objects vtable field which is pointer sized.
13971 op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
13972 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
13976 unsigned callFlags = (helper == CORINFO_HELP_UNBOX) ? 0 : GTF_EXCEPT;
13978 // Don't optimize, just call the helper and be done with it
13979 args = gtNewArgList(op2, op1);
13980 op1 = gtNewHelperCallNode(helper,
13981 (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT),
13985 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
13986 helper == CORINFO_HELP_UNBOX_NULLABLE &&
13987 varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
13991 ----------------------------------------------------------------------
13994 | \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE |
13995 | \ | (which returns a BYREF) | (which returns a STRUCT) | |
13997 |---------------------------------------------------------------------
13998 | UNBOX | push the BYREF | spill the STRUCT to a local, |
13999 | | | push the BYREF to this local |
14000 |---------------------------------------------------------------------
14001 | UNBOX_ANY | push a GT_OBJ of | push the STRUCT |
14002 | | the BYREF | For Linux when the |
14003 | | | struct is returned in two |
14004 | | | registers create a temp |
14005 | | | which address is passed to |
14006 | | | the unbox_nullable helper. |
14007 |---------------------------------------------------------------------
14010 if (opcode == CEE_UNBOX)
14012 if (helper == CORINFO_HELP_UNBOX_NULLABLE)
14014 // Unbox nullable helper returns a struct type.
14015 // We need to spill it to a temp so than can take the address of it.
14016 // Here we need unsafe value cls check, since the address of struct is taken to be used
14017 // further along and potetially be exploitable.
14019 unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
14020 lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14022 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14023 op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14024 assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14026 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14027 op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14028 op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14031 assert(op1->gtType == TYP_BYREF);
14032 assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14036 assert(opcode == CEE_UNBOX_ANY);
14038 if (helper == CORINFO_HELP_UNBOX)
14040 // Normal unbox helper returns a TYP_BYREF.
14041 impPushOnStack(op1, tiRetVal);
14046 assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
14048 #if FEATURE_MULTIREG_RET
14050 if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14052 // Unbox nullable helper returns a TYP_STRUCT.
14053 // For the multi-reg case we need to spill it to a temp so that
14054 // we can pass the address to the unbox_nullable jit helper.
14056 unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14057 lvaTable[tmp].lvIsMultiRegArg = true;
14058 lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14060 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14061 op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14062 assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14064 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14065 op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14066 op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14068 // In this case the return value of the unbox helper is TYP_BYREF.
14069 // Make sure the right type is placed on the operand type stack.
14070 impPushOnStack(op1, tiRetVal);
14072 // Load the struct.
14075 assert(op1->gtType == TYP_BYREF);
14076 assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14082 #endif // !FEATURE_MULTIREG_RET
14085 // If non register passable struct we have it materialized in the RetBuf.
14086 assert(op1->gtType == TYP_STRUCT);
14087 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14088 assert(tiRetVal.IsValueClass());
14092 impPushOnStack(op1, tiRetVal);
14098 /* Get the Class index */
14099 assertImp(sz == sizeof(unsigned));
14101 _impResolveToken(CORINFO_TOKENKIND_Box);
14103 JITDUMP(" %08X", resolvedToken.token);
14105 if (tiVerificationNeeded)
14107 typeInfo tiActual = impStackTop().seTypeInfo;
14108 typeInfo tiBox = verMakeTypeInfo(resolvedToken.hClass);
14110 Verify(verIsBoxable(tiBox), "boxable type expected");
14112 // check the class constraints of the boxed type in case we are boxing an uninitialized value
14113 Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14114 "boxed type has unsatisfied class constraints");
14116 Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14118 // Observation: the following code introduces a boxed value class on the stack, but,
14119 // according to the ECMA spec, one would simply expect: tiRetVal =
14120 // typeInfo(TI_REF,impGetObjectClass());
14122 // Push the result back on the stack,
14123 // even if clsHnd is a value class we want the TI_REF
14124 // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14125 tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14128 accessAllowedResult =
14129 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14130 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14132 // Note BOX can be used on things that are not value classes, in which
14133 // case we get a NOP. However the verifier's view of the type on the
14134 // stack changes (in generic code a 'T' becomes a 'boxed T')
14135 if (!eeIsValueClass(resolvedToken.hClass))
14137 verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14141 // Look ahead for unbox.any
14142 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14144 DWORD classAttribs = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14145 if (!(classAttribs & CORINFO_FLG_SHAREDINST))
14147 CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14149 impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14151 if (unboxResolvedToken.hClass == resolvedToken.hClass)
14153 // Skip the next unbox.any instruction
14154 sz += sizeof(mdToken) + 1;
14160 impImportAndPushBox(&resolvedToken);
14161 if (compDonotInline())
14170 /* Get the Class index */
14171 assertImp(sz == sizeof(unsigned));
14173 _impResolveToken(CORINFO_TOKENKIND_Class);
14175 JITDUMP(" %08X", resolvedToken.token);
14177 if (tiVerificationNeeded)
14179 tiRetVal = typeInfo(TI_INT);
14182 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14183 impPushOnStack(op1, tiRetVal);
14186 case CEE_CASTCLASS:
14188 /* Get the Class index */
14190 assertImp(sz == sizeof(unsigned));
14192 _impResolveToken(CORINFO_TOKENKIND_Casting);
14194 JITDUMP(" %08X", resolvedToken.token);
14196 if (!opts.IsReadyToRun())
14198 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14199 if (op2 == nullptr)
14200 { // compDonotInline()
14205 if (tiVerificationNeeded)
14207 Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14209 tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14212 accessAllowedResult =
14213 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14214 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14216 op1 = impPopStack().val;
14218 /* Pop the address and create the 'checked cast' helper call */
14220 // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
14221 // and op2 to contain code that creates the type handle corresponding to typeRef
14224 #ifdef FEATURE_READYTORUN_COMPILER
14225 if (opts.IsReadyToRun())
14227 GenTreePtr opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST,
14228 TYP_REF, gtNewArgList(op1));
14229 usingReadyToRunHelper = (opLookup != nullptr);
14230 op1 = (usingReadyToRunHelper ? opLookup : op1);
14232 if (!usingReadyToRunHelper)
14234 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14235 // and the chkcastany call with a single call to a dynamic R2R cell that will:
14236 // 1) Load the context
14237 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14238 // 3) Check the object on the stack for the type-cast
14239 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14241 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14242 if (op2 == nullptr)
14243 { // compDonotInline()
14249 if (!usingReadyToRunHelper)
14252 op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
14254 if (compDonotInline())
14259 /* Push the result back on the stack */
14260 impPushOnStack(op1, tiRetVal);
14265 if (compIsForInlining())
14267 // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14268 // TODO: Will this be too strict, given that we will inline many basic blocks?
14269 // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14271 /* Do we have just the exception on the stack ?*/
14273 if (verCurrentState.esStackDepth != 1)
14275 /* if not, just don't inline the method */
14277 compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
14282 if (tiVerificationNeeded)
14284 tiRetVal = impStackTop().seTypeInfo;
14285 Verify(tiRetVal.IsObjRef(), "object ref expected");
14286 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
14288 Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
14292 block->bbSetRunRarely(); // any block with a throw is rare
14293 /* Pop the exception object and create the 'throw' helper call */
14295 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, GTF_EXCEPT, gtNewArgList(impPopStack().val));
14298 if (verCurrentState.esStackDepth > 0)
14300 impEvalSideEffects();
14303 assert(verCurrentState.esStackDepth == 0);
14309 assert(!compIsForInlining());
14311 if (info.compXcptnsCount == 0)
14313 BADCODE("rethrow outside catch");
14316 if (tiVerificationNeeded)
14318 Verify(block->hasHndIndex(), "rethrow outside catch");
14319 if (block->hasHndIndex())
14321 EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
14322 Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
14323 if (HBtab->HasFilter())
14325 // we better be in the handler clause part, not the filter part
14326 Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
14327 "rethrow in filter");
14332 /* Create the 'rethrow' helper call */
14334 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID, GTF_EXCEPT);
14340 assertImp(sz == sizeof(unsigned));
14342 _impResolveToken(CORINFO_TOKENKIND_Class);
14344 JITDUMP(" %08X", resolvedToken.token);
14346 if (tiVerificationNeeded)
14348 typeInfo tiTo = impStackTop().seTypeInfo;
14349 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14351 Verify(tiTo.IsByRef(), "byref expected");
14352 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14354 Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14355 "type operand incompatible with type of address");
14358 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
14359 op2 = gtNewIconNode(0); // Value
14360 op1 = impPopStack().val; // Dest
14361 op1 = gtNewBlockVal(op1, size);
14362 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14367 if (tiVerificationNeeded)
14369 Verify(false, "bad opcode");
14372 op3 = impPopStack().val; // Size
14373 op2 = impPopStack().val; // Value
14374 op1 = impPopStack().val; // Dest
14376 if (op3->IsCnsIntOrI())
14378 size = (unsigned)op3->AsIntConCommon()->IconValue();
14379 op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14383 op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14386 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14392 if (tiVerificationNeeded)
14394 Verify(false, "bad opcode");
14396 op3 = impPopStack().val; // Size
14397 op2 = impPopStack().val; // Src
14398 op1 = impPopStack().val; // Dest
14400 if (op3->IsCnsIntOrI())
14402 size = (unsigned)op3->AsIntConCommon()->IconValue();
14403 op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14407 op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14410 if (op2->OperGet() == GT_ADDR)
14412 op2 = op2->gtOp.gtOp1;
14416 op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
14419 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
14424 assertImp(sz == sizeof(unsigned));
14426 _impResolveToken(CORINFO_TOKENKIND_Class);
14428 JITDUMP(" %08X", resolvedToken.token);
14430 if (tiVerificationNeeded)
14432 typeInfo tiFrom = impStackTop().seTypeInfo;
14433 typeInfo tiTo = impStackTop(1).seTypeInfo;
14434 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14436 Verify(tiFrom.IsByRef(), "expected byref source");
14437 Verify(tiTo.IsByRef(), "expected byref destination");
14439 Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
14440 "type of source address incompatible with type operand");
14441 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14442 Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14443 "type operand incompatible with type of destination address");
14446 if (!eeIsValueClass(resolvedToken.hClass))
14448 op1 = impPopStack().val; // address to load from
14450 impBashVarAddrsToI(op1);
14452 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
14454 op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
14455 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
14457 impPushOnStackNoType(op1);
14458 opcode = CEE_STIND_REF;
14460 goto STIND_POST_VERIFY;
14463 op2 = impPopStack().val; // Src
14464 op1 = impPopStack().val; // Dest
14465 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
14470 assertImp(sz == sizeof(unsigned));
14472 _impResolveToken(CORINFO_TOKENKIND_Class);
14474 JITDUMP(" %08X", resolvedToken.token);
14476 if (eeIsValueClass(resolvedToken.hClass))
14478 lclTyp = TYP_STRUCT;
14485 if (tiVerificationNeeded)
14488 typeInfo tiPtr = impStackTop(1).seTypeInfo;
14490 // Make sure we have a good looking byref
14491 Verify(tiPtr.IsByRef(), "pointer not byref");
14492 Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
14493 if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
14495 compUnsafeCastUsed = true;
14498 typeInfo ptrVal = DereferenceByRef(tiPtr);
14499 typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
14501 if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
14503 Verify(false, "type of value incompatible with type operand");
14504 compUnsafeCastUsed = true;
14507 if (!tiCompatibleWith(argVal, ptrVal, false))
14509 Verify(false, "type operand incompatible with type of address");
14510 compUnsafeCastUsed = true;
14515 compUnsafeCastUsed = true;
14518 if (lclTyp == TYP_REF)
14520 opcode = CEE_STIND_REF;
14521 goto STIND_POST_VERIFY;
14524 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14525 if (impIsPrimitive(jitTyp))
14527 lclTyp = JITtype2varType(jitTyp);
14528 goto STIND_POST_VERIFY;
14531 op2 = impPopStack().val; // Value
14532 op1 = impPopStack().val; // Ptr
14534 assertImp(varTypeIsStruct(op2));
14536 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14542 assert(!compIsForInlining());
14544 // Being lazy here. Refanys are tricky in terms of gc tracking.
14545 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
14547 JITDUMP("disabling struct promotion because of mkrefany\n");
14548 fgNoStructPromotion = true;
14550 oper = GT_MKREFANY;
14551 assertImp(sz == sizeof(unsigned));
14553 _impResolveToken(CORINFO_TOKENKIND_Class);
14555 JITDUMP(" %08X", resolvedToken.token);
14557 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14558 if (op2 == nullptr)
14559 { // compDonotInline()
14563 if (tiVerificationNeeded)
14565 typeInfo tiPtr = impStackTop().seTypeInfo;
14566 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14568 Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
14569 Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
14570 Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
14573 accessAllowedResult =
14574 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14575 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14577 op1 = impPopStack().val;
14579 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
14580 // But JIT32 allowed it, so we continue to allow it.
14581 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
14583 // MKREFANY returns a struct. op2 is the class token.
14584 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
14586 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
14592 assertImp(sz == sizeof(unsigned));
14594 _impResolveToken(CORINFO_TOKENKIND_Class);
14596 JITDUMP(" %08X", resolvedToken.token);
14600 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14602 if (tiVerificationNeeded)
14604 typeInfo tiPtr = impStackTop().seTypeInfo;
14606 // Make sure we have a byref
14607 if (!tiPtr.IsByRef())
14609 Verify(false, "pointer not byref");
14610 compUnsafeCastUsed = true;
14612 typeInfo tiPtrVal = DereferenceByRef(tiPtr);
14614 if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
14616 Verify(false, "type of address incompatible with type operand");
14617 compUnsafeCastUsed = true;
14619 tiRetVal.NormaliseForStack();
14623 compUnsafeCastUsed = true;
14626 if (eeIsValueClass(resolvedToken.hClass))
14628 lclTyp = TYP_STRUCT;
14633 opcode = CEE_LDIND_REF;
14634 goto LDIND_POST_VERIFY;
14637 op1 = impPopStack().val;
14639 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
14641 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14642 if (impIsPrimitive(jitTyp))
14644 op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
14646 // Could point anywhere, example a boxed class static int
14647 op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
14648 assertImp(varTypeIsArithmetic(op1->gtType));
14652 // OBJ returns a struct
14653 // and an inline argument which is the class token of the loaded obj
14654 op1 = gtNewObjNode(resolvedToken.hClass, op1);
14656 op1->gtFlags |= GTF_EXCEPT;
14658 impPushOnStack(op1, tiRetVal);
14663 if (tiVerificationNeeded)
14665 typeInfo tiArray = impStackTop().seTypeInfo;
14666 Verify(verIsSDArray(tiArray), "bad array");
14667 tiRetVal = typeInfo(TI_INT);
14670 op1 = impPopStack().val;
14671 if (!opts.MinOpts() && !opts.compDbgCode)
14673 /* Use GT_ARR_LENGTH operator so rng check opts see this */
14674 GenTreeArrLen* arrLen =
14675 new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
14677 /* Mark the block as containing a length expression */
14679 if (op1->gtOper == GT_LCL_VAR)
14681 block->bbFlags |= BBF_HAS_IDX_LEN;
14688 /* Create the expression "*(array_addr + ArrLenOffs)" */
14689 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14690 gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
14691 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
14692 op1->gtFlags |= GTF_IND_ARR_LEN;
14695 /* An indirection will cause a GPF if the address is null */
14696 op1->gtFlags |= GTF_EXCEPT;
14698 /* Push the result back on the stack */
14699 impPushOnStack(op1, tiRetVal);
14703 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
14707 if (opts.compDbgCode)
14709 op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
14714 /******************************** NYI *******************************/
14717 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
14720 case CEE_MACRO_END:
14723 BADCODE3("unknown opcode", ": %02X", (int)opcode);
14727 prevOpcode = opcode;
14730 assert(!insertLdloc || opcode == CEE_DUP);
14733 assert(!insertLdloc);
14736 #undef _impResolveToken
14739 #pragma warning(pop)
14742 // Push a local/argument treeon the operand stack
14743 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
14745 tiRetVal.NormaliseForStack();
14747 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
14749 tiRetVal.SetUninitialisedObjRef();
14752 impPushOnStack(op, tiRetVal);
14755 // Load a local/argument on the operand stack
14756 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
14757 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
14761 if (lvaTable[lclNum].lvNormalizeOnLoad())
14763 lclTyp = lvaGetRealType(lclNum);
14767 lclTyp = lvaGetActualType(lclNum);
14770 impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
14773 // Load an argument on the operand stack
14774 // Shared by the various CEE_LDARG opcodes
14775 // ilArgNum is the argument index as specified in IL.
14776 // It will be mapped to the correct lvaTable index
14777 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
14779 Verify(ilArgNum < info.compILargsCount, "bad arg num");
14781 if (compIsForInlining())
14783 if (ilArgNum >= info.compArgsCount)
14785 compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
14789 impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
14790 impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
14794 if (ilArgNum >= info.compArgsCount)
14799 unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
14801 if (lclNum == info.compThisArg)
14803 lclNum = lvaArg0Var;
14806 impLoadVar(lclNum, offset);
14810 // Load a local on the operand stack
14811 // Shared by the various CEE_LDLOC opcodes
14812 // ilLclNum is the local index as specified in IL.
14813 // It will be mapped to the correct lvaTable index
14814 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
14816 if (tiVerificationNeeded)
14818 Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
14819 Verify(info.compInitMem, "initLocals not set");
14822 if (compIsForInlining())
14824 if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14826 compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
14830 // Get the local type
14831 var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
14833 typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
14835 /* Have we allocated a temp for this local? */
14837 unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
14839 // All vars of inlined methods should be !lvNormalizeOnLoad()
14841 assert(!lvaTable[lclNum].lvNormalizeOnLoad());
14842 lclTyp = genActualType(lclTyp);
14844 impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
14848 if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14853 unsigned lclNum = info.compArgsCount + ilLclNum;
14855 impLoadVar(lclNum, offset);
14859 #ifdef _TARGET_ARM_
14860 /**************************************************************************************
14862 * When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
14863 * dst struct, because struct promotion will turn it into a float/double variable while
14864 * the rhs will be an int/long variable. We don't code generate assignment of int into
14865 * a float, but there is nothing that might prevent us from doing so. The tree however
14866 * would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
14868 * tmpNum - the lcl dst variable num that is a struct.
14869 * src - the src tree assigned to the dest that is a struct/int (when varargs call.)
14870 * hClass - the type handle for the struct variable.
14872 * TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
14873 * however, we could do a codegen of transferring from int to float registers
14874 * (transfer, not a cast.)
14877 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr src, CORINFO_CLASS_HANDLE hClass)
14879 if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
14881 int hfaSlots = GetHfaCount(hClass);
14882 var_types hfaType = GetHfaType(hClass);
14884 // If we have varargs we morph the method's return type to be "int" irrespective of its original
14885 // type: struct/float at importer because the ABI calls out return in integer registers.
14886 // We don't want struct promotion to replace an expression like this:
14887 // lclFld_int = callvar_int() into lclFld_float = callvar_int();
14888 // This means an int is getting assigned to a float without a cast. Prevent the promotion.
14889 if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
14890 (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
14892 // Make sure this struct type stays as struct so we can receive the call in a struct.
14893 lvaTable[tmpNum].lvIsMultiRegRet = true;
14897 #endif // _TARGET_ARM_
14899 #if FEATURE_MULTIREG_RET
14900 GenTreePtr Compiler::impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HANDLE hClass)
14902 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
14903 impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_NONE);
14904 GenTreePtr ret = gtNewLclvNode(tmpNum, op->gtType);
14906 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
14907 ret->gtFlags |= GTF_DONT_CSE;
14909 assert(IsMultiRegReturnedType(hClass));
14911 // Mark the var so that fields are not promoted and stay together.
14912 lvaTable[tmpNum].lvIsMultiRegRet = true;
14916 #endif // FEATURE_MULTIREG_RET
14918 // do import for a return
14919 // returns false if inlining was aborted
14920 // opcode can be ret or call in the case of a tail.call
14921 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
14923 if (tiVerificationNeeded)
14925 verVerifyThisPtrInitialised();
14927 unsigned expectedStack = 0;
14928 if (info.compRetType != TYP_VOID)
14930 typeInfo tiVal = impStackTop().seTypeInfo;
14931 typeInfo tiDeclared =
14932 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
14934 Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
14936 Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
14939 Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
14942 GenTree* op2 = nullptr;
14943 GenTree* op1 = nullptr;
14944 CORINFO_CLASS_HANDLE retClsHnd = nullptr;
14946 if (info.compRetType != TYP_VOID)
14948 StackEntry se = impPopStack(retClsHnd);
14951 if (!compIsForInlining())
14953 impBashVarAddrsToI(op2);
14954 op2 = impImplicitIorI4Cast(op2, info.compRetType);
14955 op2 = impImplicitR4orR8Cast(op2, info.compRetType);
14956 assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
14957 ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
14958 ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
14959 (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
14960 (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
14963 if (opts.compGcChecks && info.compRetType == TYP_REF)
14965 // DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path
14966 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
14969 assert(op2->gtType == TYP_REF);
14971 // confirm that the argument is a GC pointer (for debugging (GC stress))
14972 GenTreeArgList* args = gtNewArgList(op2);
14973 op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, 0, args);
14977 printf("\ncompGcChecks tree:\n");
14985 // inlinee's stack should be empty now.
14986 assert(verCurrentState.esStackDepth == 0);
14991 printf("\n\n Inlinee Return expression (before normalization) =>\n");
14996 // Make sure the type matches the original call.
14998 var_types returnType = genActualType(op2->gtType);
14999 var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
15000 if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
15002 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
15005 if (returnType != originalCallType)
15007 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
15011 // Below, we are going to set impInlineInfo->retExpr to the tree with the return
15012 // expression. At this point, retExpr could already be set if there are multiple
15013 // return blocks (meaning lvaInlineeReturnSpillTemp != BAD_VAR_NUM) and one of
15014 // the other blocks already set it. If there is only a single return block,
15015 // retExpr shouldn't be set. However, this is not true if we reimport a block
15016 // with a return. In that case, retExpr will be set, then the block will be
15017 // reimported, but retExpr won't get cleared as part of setting the block to
15018 // be reimported. The reimported retExpr value should be the same, so even if
15019 // we don't unconditionally overwrite it, it shouldn't matter.
15020 if (info.compRetNativeType != TYP_STRUCT)
15022 // compRetNativeType is not TYP_STRUCT.
15023 // This implies it could be either a scalar type or SIMD vector type or
15024 // a struct type that can be normalized to a scalar type.
15026 if (varTypeIsStruct(info.compRetType))
15028 noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
15029 // adjust the type away from struct to integral
15030 // and no normalizing
15031 op2 = impFixupStructReturnType(op2, retClsHnd);
15035 // Do we have to normalize?
15036 var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
15037 if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
15038 fgCastNeeded(op2, fncRealRetType))
15040 // Small-typed return values are normalized by the callee
15041 op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
15045 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15047 assert(info.compRetNativeType != TYP_VOID &&
15048 (fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals));
15050 // This is a bit of a workaround...
15051 // If we are inlining a call that returns a struct, where the actual "native" return type is
15052 // not a struct (for example, the struct is composed of exactly one int, and the native
15053 // return type is thus an int), and the inlinee has multiple return blocks (thus,
15054 // lvaInlineeReturnSpillTemp is != BAD_VAR_NUM, and is the index of a local var that is set
15055 // to the *native* return type), and at least one of the return blocks is the result of
15056 // a call, then we have a problem. The situation is like this (from a failed test case):
15059 // // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15060 // call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15061 // plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15065 // ldobj !!T // this gets bashed to a GT_LCL_FLD, type TYP_INT
15068 // call !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15069 // object&, class System.Func`1<!!0>)
15072 // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15073 // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15074 // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15075 // inlining properly by leaving the correct type on the GT_CALL node through importing.
15077 // To fix this, for this case, we temporarily change the GT_CALL node type to the
15078 // native return type, which is what it will be set to eventually. We generate the
15079 // assignment to the return temp, using the correct type, and then restore the GT_CALL
15080 // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15082 bool restoreType = false;
15083 if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15085 noway_assert(op2->TypeGet() == TYP_STRUCT);
15086 op2->gtType = info.compRetNativeType;
15087 restoreType = true;
15090 impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15091 (unsigned)CHECK_SPILL_ALL);
15093 GenTreePtr tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15097 op2->gtType = TYP_STRUCT; // restore it to what it was
15103 if (impInlineInfo->retExpr)
15105 // Some other block(s) have seen the CEE_RET first.
15106 // Better they spilled to the same temp.
15107 assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15108 assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15116 printf("\n\n Inlinee Return expression (after normalization) =>\n");
15121 // Report the return expression
15122 impInlineInfo->retExpr = op2;
15126 // compRetNativeType is TYP_STRUCT.
15127 // This implies that struct return via RetBuf arg or multi-reg struct return
15129 GenTreePtr iciCall = impInlineInfo->iciCall;
15130 assert(iciCall->gtOper == GT_CALL);
15132 // Assign the inlinee return into a spill temp.
15133 // spill temp only exists if there are multiple return points
15134 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15136 // in this case we have to insert multiple struct copies to the temp
15137 // and the retexpr is just the temp.
15138 assert(info.compRetNativeType != TYP_VOID);
15139 assert(fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals);
15141 impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15142 (unsigned)CHECK_SPILL_ALL);
15145 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15146 #if defined(_TARGET_ARM_)
15147 // TODO-ARM64-NYI: HFA
15148 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15149 // next ifdefs could be refactored in a single method with the ifdef inside.
15150 if (IsHfa(retClsHnd))
15152 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15153 #else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15154 ReturnTypeDesc retTypeDesc;
15155 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15156 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15158 if (retRegCount != 0)
15160 // If single eightbyte, the return type would have been normalized and there won't be a temp var.
15161 // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
15163 assert(retRegCount == MAX_RET_REG_COUNT);
15164 // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
15165 CLANG_FORMAT_COMMENT_ANCHOR;
15166 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15168 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15170 if (!impInlineInfo->retExpr)
15172 #if defined(_TARGET_ARM_)
15173 impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
15174 #else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15175 // The inlinee compiler has figured out the type of the temp already. Use it here.
15176 impInlineInfo->retExpr =
15177 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15178 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15183 impInlineInfo->retExpr = op2;
15187 #elif defined(_TARGET_ARM64_)
15188 ReturnTypeDesc retTypeDesc;
15189 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15190 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15192 if (retRegCount != 0)
15194 assert(!iciCall->AsCall()->HasRetBufArg());
15195 assert(retRegCount >= 2);
15196 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15198 if (!impInlineInfo->retExpr)
15200 // The inlinee compiler has figured out the type of the temp already. Use it here.
15201 impInlineInfo->retExpr =
15202 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15207 impInlineInfo->retExpr = op2;
15211 #endif // defined(_TARGET_ARM64_)
15213 assert(iciCall->AsCall()->HasRetBufArg());
15214 GenTreePtr dest = gtCloneExpr(iciCall->gtCall.gtCallArgs->gtOp.gtOp1);
15215 // spill temp only exists if there are multiple return points
15216 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15218 // if this is the first return we have seen set the retExpr
15219 if (!impInlineInfo->retExpr)
15221 impInlineInfo->retExpr =
15222 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
15223 retClsHnd, (unsigned)CHECK_SPILL_ALL);
15228 impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15235 if (compIsForInlining())
15240 if (info.compRetType == TYP_VOID)
15243 op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15245 else if (info.compRetBuffArg != BAD_VAR_NUM)
15247 // Assign value to return buff (first param)
15248 GenTreePtr retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
15250 op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15251 impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15253 // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
15254 CLANG_FORMAT_COMMENT_ANCHOR;
15256 #if defined(_TARGET_AMD64_)
15258 // x64 (System V and Win64) calling convention requires to
15259 // return the implicit return buffer explicitly (in RAX).
15260 // Change the return type to be BYREF.
15261 op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15262 #else // !defined(_TARGET_AMD64_)
15263 // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
15264 // In such case the return value of the function is changed to BYREF.
15265 // If profiler hook is not needed the return type of the function is TYP_VOID.
15266 if (compIsProfilerHookNeeded())
15268 op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15273 op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15275 #endif // !defined(_TARGET_AMD64_)
15277 else if (varTypeIsStruct(info.compRetType))
15279 #if !FEATURE_MULTIREG_RET
15280 // For both ARM architectures the HFA native types are maintained as structs.
15281 // Also on System V AMD64 the multireg structs returns are also left as structs.
15282 noway_assert(info.compRetNativeType != TYP_STRUCT);
15284 op2 = impFixupStructReturnType(op2, retClsHnd);
15286 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
15291 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
15294 // We must have imported a tailcall and jumped to RET
15295 if (prefixFlags & PREFIX_TAILCALL)
15297 #ifndef _TARGET_AMD64_
15299 // This cannot be asserted on Amd64 since we permit the following IL pattern:
15303 assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
15306 opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
15308 // impImportCall() would have already appended TYP_VOID calls
15309 if (info.compRetType == TYP_VOID)
15315 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15317 // Remember at which BC offset the tree was finished
15318 impNoteLastILoffs();
15323 /*****************************************************************************
15324 * Mark the block as unimported.
15325 * Note that the caller is responsible for calling impImportBlockPending(),
15326 * with the appropriate stack-state
15329 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
15332 if (verbose && (block->bbFlags & BBF_IMPORTED))
15334 printf("\nBB%02u will be reimported\n", block->bbNum);
15338 block->bbFlags &= ~BBF_IMPORTED;
15341 /*****************************************************************************
15342 * Mark the successors of the given block as unimported.
15343 * Note that the caller is responsible for calling impImportBlockPending()
15344 * for all the successors, with the appropriate stack-state.
15347 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
15349 for (unsigned i = 0; i < block->NumSucc(); i++)
15351 impReimportMarkBlock(block->GetSucc(i));
15355 /*****************************************************************************
15357 * Filter wrapper to handle only passed in exception code
15361 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
15363 if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
15365 return EXCEPTION_EXECUTE_HANDLER;
15368 return EXCEPTION_CONTINUE_SEARCH;
15371 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
15373 assert(block->hasTryIndex());
15374 assert(!compIsForInlining());
15376 unsigned tryIndex = block->getTryIndex();
15377 EHblkDsc* HBtab = ehGetDsc(tryIndex);
15381 assert(block->bbFlags & BBF_TRY_BEG);
15383 // The Stack must be empty
15385 if (block->bbStkDepth != 0)
15387 BADCODE("Evaluation stack must be empty on entry into a try block");
15391 // Save the stack contents, we'll need to restore it later
15393 SavedStack blockState;
15394 impSaveStackState(&blockState, false);
15396 while (HBtab != nullptr)
15400 // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
15401 // We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
15403 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15405 // We trigger an invalid program exception here unless we have a try/fault region.
15407 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
15410 "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
15414 // Allow a try/fault region to proceed.
15415 assert(HBtab->HasFaultHandler());
15419 /* Recursively process the handler block */
15420 BasicBlock* hndBegBB = HBtab->ebdHndBeg;
15422 // Construct the proper verification stack state
15423 // either empty or one that contains just
15424 // the Exception Object that we are dealing with
15426 verCurrentState.esStackDepth = 0;
15428 if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
15430 CORINFO_CLASS_HANDLE clsHnd;
15432 if (HBtab->HasFilter())
15434 clsHnd = impGetObjectClass();
15438 CORINFO_RESOLVED_TOKEN resolvedToken;
15440 resolvedToken.tokenContext = impTokenLookupContextHandle;
15441 resolvedToken.tokenScope = info.compScopeHnd;
15442 resolvedToken.token = HBtab->ebdTyp;
15443 resolvedToken.tokenType = CORINFO_TOKENKIND_Class;
15444 info.compCompHnd->resolveToken(&resolvedToken);
15446 clsHnd = resolvedToken.hClass;
15449 // push catch arg the stack, spill to a temp if necessary
15450 // Note: can update HBtab->ebdHndBeg!
15451 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd);
15454 // Queue up the handler for importing
15456 impImportBlockPending(hndBegBB);
15458 if (HBtab->HasFilter())
15460 /* @VERIFICATION : Ideally the end of filter state should get
15461 propagated to the catch handler, this is an incompleteness,
15462 but is not a security/compliance issue, since the only
15463 interesting state is the 'thisInit' state.
15466 verCurrentState.esStackDepth = 0;
15468 BasicBlock* filterBB = HBtab->ebdFilter;
15470 // push catch arg the stack, spill to a temp if necessary
15471 // Note: can update HBtab->ebdFilter!
15472 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass());
15474 impImportBlockPending(filterBB);
15477 else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
15479 /* Recursively process the handler block */
15481 verCurrentState.esStackDepth = 0;
15483 // Queue up the fault handler for importing
15485 impImportBlockPending(HBtab->ebdHndBeg);
15488 // Now process our enclosing try index (if any)
15490 tryIndex = HBtab->ebdEnclosingTryIndex;
15491 if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
15497 HBtab = ehGetDsc(tryIndex);
15501 // Restore the stack contents
15502 impRestoreStackState(&blockState);
15505 //***************************************************************
15506 // Import the instructions for the given basic block. Perform
15507 // verification, throwing an exception on failure. Push any successor blocks that are enabled for the first
15508 // time, or whose verification pre-state is changed.
15511 #pragma warning(push)
15512 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
15514 void Compiler::impImportBlock(BasicBlock* block)
15516 // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
15517 // handle them specially. In particular, there is no IL to import for them, but we do need
15518 // to mark them as imported and put their successors on the pending import list.
15519 if (block->bbFlags & BBF_INTERNAL)
15521 JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
15522 block->bbFlags |= BBF_IMPORTED;
15524 for (unsigned i = 0; i < block->NumSucc(); i++)
15526 impImportBlockPending(block->GetSucc(i));
15536 /* Make the block globaly available */
15541 /* Initialize the debug variables */
15542 impCurOpcName = "unknown";
15543 impCurOpcOffs = block->bbCodeOffs;
15546 /* Set the current stack state to the merged result */
15547 verResetCurrentState(block, &verCurrentState);
15549 /* Now walk the code and import the IL into GenTrees */
15551 struct FilterVerificationExceptionsParam
15556 FilterVerificationExceptionsParam param;
15558 param.pThis = this;
15559 param.block = block;
15561 PAL_TRY(FilterVerificationExceptionsParam*, pParam, ¶m)
15563 /* @VERIFICATION : For now, the only state propagation from try
15564 to it's handler is "thisInit" state (stack is empty at start of try).
15565 In general, for state that we track in verification, we need to
15566 model the possibility that an exception might happen at any IL
15567 instruction, so we really need to merge all states that obtain
15568 between IL instructions in a try block into the start states of
15571 However we do not allow the 'this' pointer to be uninitialized when
15572 entering most kinds try regions (only try/fault are allowed to have
15573 an uninitialized this pointer on entry to the try)
15575 Fortunately, the stack is thrown away when an exception
15576 leads to a handler, so we don't have to worry about that.
15577 We DO, however, have to worry about the "thisInit" state.
15578 But only for the try/fault case.
15580 The only allowed transition is from TIS_Uninit to TIS_Init.
15582 So for a try/fault region for the fault handler block
15583 we will merge the start state of the try begin
15584 and the post-state of each block that is part of this try region
15587 // merge the start state of the try begin
15589 if (pParam->block->bbFlags & BBF_TRY_BEG)
15591 pParam->pThis->impVerifyEHBlock(pParam->block, true);
15594 pParam->pThis->impImportBlockCode(pParam->block);
15596 // As discussed above:
15597 // merge the post-state of each block that is part of this try region
15599 if (pParam->block->hasTryIndex())
15601 pParam->pThis->impVerifyEHBlock(pParam->block, false);
15604 PAL_EXCEPT_FILTER(FilterVerificationExceptions)
15606 verHandleVerificationFailure(block DEBUGARG(false));
15610 if (compDonotInline())
15615 assert(!compDonotInline());
15617 markImport = false;
15621 unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks
15622 bool reimportSpillClique = false;
15623 BasicBlock* tgtBlock = nullptr;
15625 /* If the stack is non-empty, we might have to spill its contents */
15627 if (verCurrentState.esStackDepth != 0)
15629 impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
15630 // on the stack, its lifetime is hard to determine, simply
15631 // don't reuse such temps.
15633 GenTreePtr addStmt = nullptr;
15635 /* Do the successors of 'block' have any other predecessors ?
15636 We do not want to do some of the optimizations related to multiRef
15637 if we can reimport blocks */
15639 unsigned multRef = impCanReimport ? unsigned(~0) : 0;
15641 switch (block->bbJumpKind)
15645 /* Temporarily remove the 'jtrue' from the end of the tree list */
15647 assert(impTreeLast);
15648 assert(impTreeLast->gtOper == GT_STMT);
15649 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
15651 addStmt = impTreeLast;
15652 impTreeLast = impTreeLast->gtPrev;
15654 /* Note if the next block has more than one ancestor */
15656 multRef |= block->bbNext->bbRefs;
15658 /* Does the next block have temps assigned? */
15660 baseTmp = block->bbNext->bbStkTempsIn;
15661 tgtBlock = block->bbNext;
15663 if (baseTmp != NO_BASE_TMP)
15668 /* Try the target of the jump then */
15670 multRef |= block->bbJumpDest->bbRefs;
15671 baseTmp = block->bbJumpDest->bbStkTempsIn;
15672 tgtBlock = block->bbJumpDest;
15676 multRef |= block->bbJumpDest->bbRefs;
15677 baseTmp = block->bbJumpDest->bbStkTempsIn;
15678 tgtBlock = block->bbJumpDest;
15682 multRef |= block->bbNext->bbRefs;
15683 baseTmp = block->bbNext->bbStkTempsIn;
15684 tgtBlock = block->bbNext;
15689 BasicBlock** jmpTab;
15692 /* Temporarily remove the GT_SWITCH from the end of the tree list */
15694 assert(impTreeLast);
15695 assert(impTreeLast->gtOper == GT_STMT);
15696 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
15698 addStmt = impTreeLast;
15699 impTreeLast = impTreeLast->gtPrev;
15701 jmpCnt = block->bbJumpSwt->bbsCount;
15702 jmpTab = block->bbJumpSwt->bbsDstTab;
15706 tgtBlock = (*jmpTab);
15708 multRef |= tgtBlock->bbRefs;
15710 // Thanks to spill cliques, we should have assigned all or none
15711 assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
15712 baseTmp = tgtBlock->bbStkTempsIn;
15717 } while (++jmpTab, --jmpCnt);
15721 case BBJ_CALLFINALLY:
15722 case BBJ_EHCATCHRET:
15724 case BBJ_EHFINALLYRET:
15725 case BBJ_EHFILTERRET:
15727 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
15731 noway_assert(!"Unexpected bbJumpKind");
15735 assert(multRef >= 1);
15737 /* Do we have a base temp number? */
15739 bool newTemps = (baseTmp == NO_BASE_TMP);
15743 /* Grab enough temps for the whole stack */
15744 baseTmp = impGetSpillTmpBase(block);
15747 /* Spill all stack entries into temps */
15748 unsigned level, tempNum;
15750 JITDUMP("\nSpilling stack entries into temps\n");
15751 for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
15753 GenTreePtr tree = verCurrentState.esStack[level].val;
15755 /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
15756 the other. This should merge to a byref in unverifiable code.
15757 However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
15758 successor would be imported assuming there was a TYP_I_IMPL on
15759 the stack. Thus the value would not get GC-tracked. Hence,
15760 change the temp to TYP_BYREF and reimport the successors.
15761 Note: We should only allow this in unverifiable code.
15763 if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
15765 lvaTable[tempNum].lvType = TYP_BYREF;
15766 impReimportMarkSuccessors(block);
15770 #ifdef _TARGET_64BIT_
15771 if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
15773 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
15774 (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
15776 // Merge the current state into the entry state of block;
15777 // the call to verMergeEntryStates must have changed
15778 // the entry state of the block by merging the int local var
15779 // and the native-int stack entry.
15780 bool changed = false;
15781 if (verMergeEntryStates(tgtBlock, &changed))
15783 impRetypeEntryStateTemps(tgtBlock);
15784 impReimportBlockPending(tgtBlock);
15789 tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
15794 // Some other block in the spill clique set this to "int", but now we have "native int".
15795 // Change the type and go back to re-import any blocks that used the wrong type.
15796 lvaTable[tempNum].lvType = TYP_I_IMPL;
15797 reimportSpillClique = true;
15799 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
15801 // Spill clique has decided this should be "native int", but this block only pushes an "int".
15802 // Insert a sign-extension to "native int" so we match the clique.
15803 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15806 // Consider the case where one branch left a 'byref' on the stack and the other leaves
15807 // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
15808 // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
15809 // behavior instead of asserting and then generating bad code (where we save/restore the
15810 // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
15811 // imported already, we need to change the type of the local and reimport the spill clique.
15812 // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
15813 // the 'byref' size.
15814 if (!tiVerificationNeeded)
15816 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
15818 // Some other block in the spill clique set this to "int", but now we have "byref".
15819 // Change the type and go back to re-import any blocks that used the wrong type.
15820 lvaTable[tempNum].lvType = TYP_BYREF;
15821 reimportSpillClique = true;
15823 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
15825 // Spill clique has decided this should be "byref", but this block only pushes an "int".
15826 // Insert a sign-extension to "native int" so we match the clique size.
15827 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15830 #endif // _TARGET_64BIT_
15832 #if FEATURE_X87_DOUBLES
15833 // X87 stack doesn't differentiate between float/double
15834 // so promoting is no big deal.
15835 // For everybody else keep it as float until we have a collision and then promote
15836 // Just like for x64's TYP_INT<->TYP_I_IMPL
15838 if (multRef > 1 && tree->gtType == TYP_FLOAT)
15840 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15843 #else // !FEATURE_X87_DOUBLES
15845 if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
15847 // Some other block in the spill clique set this to "float", but now we have "double".
15848 // Change the type and go back to re-import any blocks that used the wrong type.
15849 lvaTable[tempNum].lvType = TYP_DOUBLE;
15850 reimportSpillClique = true;
15852 else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
15854 // Spill clique has decided this should be "double", but this block only pushes a "float".
15855 // Insert a cast to "double" so we match the clique.
15856 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15859 #endif // FEATURE_X87_DOUBLES
15861 /* If addStmt has a reference to tempNum (can only happen if we
15862 are spilling to the temps already used by a previous block),
15863 we need to spill addStmt */
15865 if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
15867 GenTreePtr addTree = addStmt->gtStmt.gtStmtExpr;
15869 if (addTree->gtOper == GT_JTRUE)
15871 GenTreePtr relOp = addTree->gtOp.gtOp1;
15872 assert(relOp->OperIsCompare());
15874 var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
15876 if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
15878 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
15879 impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
15880 type = genActualType(lvaTable[temp].TypeGet());
15881 relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
15884 if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
15886 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
15887 impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
15888 type = genActualType(lvaTable[temp].TypeGet());
15889 relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
15894 assert(addTree->gtOper == GT_SWITCH && genActualType(addTree->gtOp.gtOp1->gtType) == TYP_I_IMPL);
15896 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
15897 impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
15898 addTree->gtOp.gtOp1 = gtNewLclvNode(temp, TYP_I_IMPL);
15902 /* Spill the stack entry, and replace with the temp */
15904 if (!impSpillStackEntry(level, tempNum
15907 true, "Spill Stack Entry"
15913 BADCODE("bad stack state");
15916 // Oops. Something went wrong when spilling. Bad code.
15917 verHandleVerificationFailure(block DEBUGARG(true));
15923 /* Put back the 'jtrue'/'switch' if we removed it earlier */
15927 impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
15931 // Some of the append/spill logic works on compCurBB
15933 assert(compCurBB == block);
15935 /* Save the tree list in the block */
15936 impEndTreeList(block);
15938 // impEndTreeList sets BBF_IMPORTED on the block
15939 // We do *NOT* want to set it later than this because
15940 // impReimportSpillClique might clear it if this block is both a
15941 // predecessor and successor in the current spill clique
15942 assert(block->bbFlags & BBF_IMPORTED);
15944 // If we had a int/native int, or float/double collision, we need to re-import
15945 if (reimportSpillClique)
15947 // This will re-import all the successors of block (as well as each of their predecessors)
15948 impReimportSpillClique(block);
15950 // For blocks that haven't been imported yet, we still need to mark them as pending import.
15951 for (unsigned i = 0; i < block->NumSucc(); i++)
15953 BasicBlock* succ = block->GetSucc(i);
15954 if ((succ->bbFlags & BBF_IMPORTED) == 0)
15956 impImportBlockPending(succ);
15960 else // the normal case
15962 // otherwise just import the successors of block
15964 /* Does this block jump to any other blocks? */
15965 for (unsigned i = 0; i < block->NumSucc(); i++)
15967 impImportBlockPending(block->GetSucc(i));
15972 #pragma warning(pop)
15975 /*****************************************************************************/
15977 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
15978 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
15979 // impPendingBlockMembers). Merges the current verification state into the verification state of "block"
15980 // (its "pre-state").
15982 void Compiler::impImportBlockPending(BasicBlock* block)
15987 printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
15991 // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
15992 // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
15993 // (When we're doing verification, we always attempt the merge to detect verification errors.)
15995 // If the block has not been imported, add to pending set.
15996 bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
15998 // Initialize bbEntryState just the first time we try to add this block to the pending list
15999 // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
16000 // We use NULL to indicate the 'common' state to avoid memory allocation
16001 if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
16002 (impGetPendingBlockMember(block) == 0))
16004 verInitBBEntryState(block, &verCurrentState);
16005 assert(block->bbStkDepth == 0);
16006 block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
16007 assert(addToPending);
16008 assert(impGetPendingBlockMember(block) == 0);
16012 // The stack should have the same height on entry to the block from all its predecessors.
16013 if (block->bbStkDepth != verCurrentState.esStackDepth)
16017 sprintf_s(buffer, sizeof(buffer),
16018 "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
16019 "Previous depth was %d, current depth is %d",
16020 block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
16021 verCurrentState.esStackDepth);
16022 buffer[400 - 1] = 0;
16025 NO_WAY("Block entered with different stack depths");
16029 // Additionally, if we need to verify, merge the verification state.
16030 if (tiVerificationNeeded)
16032 // Merge the current state into the entry state of block; if this does not change the entry state
16033 // by merging, do not add the block to the pending-list.
16034 bool changed = false;
16035 if (!verMergeEntryStates(block, &changed))
16037 block->bbFlags |= BBF_FAILED_VERIFICATION;
16038 addToPending = true; // We will pop it off, and check the flag set above.
16042 addToPending = true;
16044 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
16053 if (block->bbStkDepth > 0)
16055 // We need to fix the types of any spill temps that might have changed:
16056 // int->native int, float->double, int->byref, etc.
16057 impRetypeEntryStateTemps(block);
16060 // OK, we must add to the pending list, if it's not already in it.
16061 if (impGetPendingBlockMember(block) != 0)
16067 // Get an entry to add to the pending list
16071 if (impPendingFree)
16073 // We can reuse one of the freed up dscs.
16074 dsc = impPendingFree;
16075 impPendingFree = dsc->pdNext;
16079 // We have to create a new dsc
16080 dsc = new (this, CMK_Unknown) PendingDsc;
16084 dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16085 dsc->pdThisPtrInit = verCurrentState.thisInitialized;
16087 // Save the stack trees for later
16089 if (verCurrentState.esStackDepth)
16091 impSaveStackState(&dsc->pdSavedStack, false);
16094 // Add the entry to the pending list
16096 dsc->pdNext = impPendingList;
16097 impPendingList = dsc;
16098 impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16100 // Various assertions require us to now to consider the block as not imported (at least for
16101 // the final time...)
16102 block->bbFlags &= ~BBF_IMPORTED;
16107 printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16112 /*****************************************************************************/
16114 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16115 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16116 // impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block.
16118 void Compiler::impReimportBlockPending(BasicBlock* block)
16120 JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16122 assert(block->bbFlags & BBF_IMPORTED);
16124 // OK, we must add to the pending list, if it's not already in it.
16125 if (impGetPendingBlockMember(block) != 0)
16130 // Get an entry to add to the pending list
16134 if (impPendingFree)
16136 // We can reuse one of the freed up dscs.
16137 dsc = impPendingFree;
16138 impPendingFree = dsc->pdNext;
16142 // We have to create a new dsc
16143 dsc = new (this, CMK_ImpStack) PendingDsc;
16148 if (block->bbEntryState)
16150 dsc->pdThisPtrInit = block->bbEntryState->thisInitialized;
16151 dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
16152 dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
16156 dsc->pdThisPtrInit = TIS_Bottom;
16157 dsc->pdSavedStack.ssDepth = 0;
16158 dsc->pdSavedStack.ssTrees = nullptr;
16161 // Add the entry to the pending list
16163 dsc->pdNext = impPendingList;
16164 impPendingList = dsc;
16165 impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16167 // Various assertions require us to now to consider the block as not imported (at least for
16168 // the final time...)
16169 block->bbFlags &= ~BBF_IMPORTED;
16174 printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16179 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
16181 if (comp->impBlockListNodeFreeList == nullptr)
16183 return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
16187 BlockListNode* res = comp->impBlockListNodeFreeList;
16188 comp->impBlockListNodeFreeList = res->m_next;
16193 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
16195 node->m_next = impBlockListNodeFreeList;
16196 impBlockListNodeFreeList = node;
16199 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
16203 noway_assert(!fgComputePredsDone);
16204 if (!fgCheapPredsValid)
16206 fgComputeCheapPreds();
16209 BlockListNode* succCliqueToDo = nullptr;
16210 BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
16214 // Look at the successors of every member of the predecessor to-do list.
16215 while (predCliqueToDo != nullptr)
16217 BlockListNode* node = predCliqueToDo;
16218 predCliqueToDo = node->m_next;
16219 BasicBlock* blk = node->m_blk;
16220 FreeBlockListNode(node);
16222 for (unsigned succNum = 0; succNum < blk->NumSucc(); succNum++)
16224 BasicBlock* succ = blk->GetSucc(succNum);
16225 // If it's not already in the clique, add it, and also add it
16226 // as a member of the successor "toDo" set.
16227 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
16229 callback->Visit(SpillCliqueSucc, succ);
16230 impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
16231 succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
16236 // Look at the predecessors of every member of the successor to-do list.
16237 while (succCliqueToDo != nullptr)
16239 BlockListNode* node = succCliqueToDo;
16240 succCliqueToDo = node->m_next;
16241 BasicBlock* blk = node->m_blk;
16242 FreeBlockListNode(node);
16244 for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
16246 BasicBlock* predBlock = pred->block;
16247 // If it's not already in the clique, add it, and also add it
16248 // as a member of the predecessor "toDo" set.
16249 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
16251 callback->Visit(SpillCliquePred, predBlock);
16252 impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
16253 predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
16260 // If this fails, it means we didn't walk the spill clique properly and somehow managed
16261 // miss walking back to include the predecessor we started from.
16262 // This most likely cause: missing or out of date bbPreds
16263 assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
16266 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16268 if (predOrSucc == SpillCliqueSucc)
16270 assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
16271 blk->bbStkTempsIn = m_baseTmp;
16275 assert(predOrSucc == SpillCliquePred);
16276 assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
16277 blk->bbStkTempsOut = m_baseTmp;
16281 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16283 // For Preds we could be a little smarter and just find the existing store
16284 // and re-type it/add a cast, but that is complicated and hopefully very rare, so
16285 // just re-import the whole block (just like we do for successors)
16287 if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
16289 // If we haven't imported this block and we're not going to (because it isn't on
16290 // the pending list) then just ignore it for now.
16292 // This block has either never been imported (EntryState == NULL) or it failed
16293 // verification. Neither state requires us to force it to be imported now.
16294 assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
16298 // For successors we have a valid verCurrentState, so just mark them for reimport
16299 // the 'normal' way
16300 // Unlike predecessors, we *DO* need to reimport the current block because the
16301 // initial import had the wrong entry state types.
16302 // Similarly, blocks that are currently on the pending list, still need to call
16303 // impImportBlockPending to fixup their entry state.
16304 if (predOrSucc == SpillCliqueSucc)
16306 m_pComp->impReimportMarkBlock(blk);
16308 // Set the current stack state to that of the blk->bbEntryState
16309 m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
16310 assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
16312 m_pComp->impImportBlockPending(blk);
16314 else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
16316 // As described above, we are only visiting predecessors so they can
16317 // add the appropriate casts, since we have already done that for the current
16318 // block, it does not need to be reimported.
16319 // Nor do we need to reimport blocks that are still pending, but not yet
16322 // For predecessors, we have no state to seed the EntryState, so we just have
16323 // to assume the existing one is correct.
16324 // If the block is also a successor, it will get the EntryState properly
16325 // updated when it is visited as a successor in the above "if" block.
16326 assert(predOrSucc == SpillCliquePred);
16327 m_pComp->impReimportBlockPending(blk);
16331 // Re-type the incoming lclVar nodes to match the varDsc.
16332 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
16334 if (blk->bbEntryState != nullptr)
16336 EntryState* es = blk->bbEntryState;
16337 for (unsigned level = 0; level < es->esStackDepth; level++)
16339 GenTreePtr tree = es->esStack[level].val;
16340 if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
16342 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
16343 noway_assert(lclNum < lvaCount);
16344 LclVarDsc* varDsc = lvaTable + lclNum;
16345 es->esStack[level].val->gtType = varDsc->TypeGet();
16351 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
16353 if (block->bbStkTempsOut != NO_BASE_TMP)
16355 return block->bbStkTempsOut;
16361 printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
16365 // Otherwise, choose one, and propagate to all members of the spill clique.
16366 // Grab enough temps for the whole stack.
16367 unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
16368 SetSpillTempsBase callback(baseTmp);
16370 // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
16371 // to one spill clique, and similarly can only be the sucessor to one spill clique
16372 impWalkSpillCliqueFromPred(block, &callback);
16377 void Compiler::impReimportSpillClique(BasicBlock* block)
16382 printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
16386 // If we get here, it is because this block is already part of a spill clique
16387 // and one predecessor had an outgoing live stack slot of type int, and this
16388 // block has an outgoing live stack slot of type native int.
16389 // We need to reset these before traversal because they have already been set
16390 // by the previous walk to determine all the members of the spill clique.
16391 impInlineRoot()->impSpillCliquePredMembers.Reset();
16392 impInlineRoot()->impSpillCliqueSuccMembers.Reset();
16394 ReimportSpillClique callback(this);
16396 impWalkSpillCliqueFromPred(block, &callback);
16399 // Set the pre-state of "block" (which should not have a pre-state allocated) to
16400 // a copy of "srcState", cloning tree pointers as required.
16401 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
16403 if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
16405 block->bbEntryState = nullptr;
16409 block->bbEntryState = (EntryState*)compGetMemA(sizeof(EntryState));
16411 // block->bbEntryState.esRefcount = 1;
16413 block->bbEntryState->esStackDepth = srcState->esStackDepth;
16414 block->bbEntryState->thisInitialized = TIS_Bottom;
16416 if (srcState->esStackDepth > 0)
16418 block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
16419 unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
16421 memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
16422 for (unsigned level = 0; level < srcState->esStackDepth; level++)
16424 GenTreePtr tree = srcState->esStack[level].val;
16425 block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
16429 if (verTrackObjCtorInitState)
16431 verSetThisInit(block, srcState->thisInitialized);
16437 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
16439 assert(tis != TIS_Bottom); // Precondition.
16440 if (block->bbEntryState == nullptr)
16442 block->bbEntryState = new (this, CMK_Unknown) EntryState();
16445 block->bbEntryState->thisInitialized = tis;
16449 * Resets the current state to the state at the start of the basic block
16451 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
16454 if (block->bbEntryState == nullptr)
16456 destState->esStackDepth = 0;
16457 destState->thisInitialized = TIS_Bottom;
16461 destState->esStackDepth = block->bbEntryState->esStackDepth;
16463 if (destState->esStackDepth > 0)
16465 unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
16467 memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
16470 destState->thisInitialized = block->bbThisOnEntry();
16475 ThisInitState BasicBlock::bbThisOnEntry()
16477 return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
16480 unsigned BasicBlock::bbStackDepthOnEntry()
16482 return (bbEntryState ? bbEntryState->esStackDepth : 0);
16485 void BasicBlock::bbSetStack(void* stackBuffer)
16487 assert(bbEntryState);
16488 assert(stackBuffer);
16489 bbEntryState->esStack = (StackEntry*)stackBuffer;
16492 StackEntry* BasicBlock::bbStackOnEntry()
16494 assert(bbEntryState);
16495 return bbEntryState->esStack;
16498 void Compiler::verInitCurrentState()
16500 verTrackObjCtorInitState = FALSE;
16501 verCurrentState.thisInitialized = TIS_Bottom;
16503 if (tiVerificationNeeded)
16505 // Track this ptr initialization
16506 if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
16508 verTrackObjCtorInitState = TRUE;
16509 verCurrentState.thisInitialized = TIS_Uninit;
16513 // initialize stack info
16515 verCurrentState.esStackDepth = 0;
16516 assert(verCurrentState.esStack != nullptr);
16518 // copy current state to entry state of first BB
16519 verInitBBEntryState(fgFirstBB, &verCurrentState);
16522 Compiler* Compiler::impInlineRoot()
16524 if (impInlineInfo == nullptr)
16530 return impInlineInfo->InlineRoot;
16534 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
16536 if (predOrSucc == SpillCliquePred)
16538 return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
16542 assert(predOrSucc == SpillCliqueSucc);
16543 return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
16547 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
16549 if (predOrSucc == SpillCliquePred)
16551 impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
16555 assert(predOrSucc == SpillCliqueSucc);
16556 impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
16560 /*****************************************************************************
16562 * Convert the instrs ("import") into our internal format (trees). The
16563 * basic flowgraph has already been constructed and is passed in.
16566 void Compiler::impImport(BasicBlock* method)
16571 printf("*************** In impImport() for %s\n", info.compFullName);
16575 /* Allocate the stack contents */
16577 if (info.compMaxStack <= sizeof(impSmallStack) / sizeof(impSmallStack[0]))
16579 /* Use local variable, don't waste time allocating on the heap */
16581 impStkSize = sizeof(impSmallStack) / sizeof(impSmallStack[0]);
16582 verCurrentState.esStack = impSmallStack;
16586 impStkSize = info.compMaxStack;
16587 verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
16590 // initialize the entry state at start of method
16591 verInitCurrentState();
16593 // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
16594 Compiler* inlineRoot = impInlineRoot();
16595 if (this == inlineRoot) // These are only used on the root of the inlining tree.
16597 // We have initialized these previously, but to size 0. Make them larger.
16598 impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
16599 impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
16600 impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
16602 inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
16603 inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
16604 inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
16605 impBlockListNodeFreeList = nullptr;
16608 impLastILoffsStmt = nullptr;
16609 impNestedStackSpill = false;
16611 impBoxTemp = BAD_VAR_NUM;
16613 impPendingList = impPendingFree = nullptr;
16615 /* Add the entry-point to the worker-list */
16617 // Skip leading internal blocks. There can be one as a leading scratch BB, and more
16618 // from EH normalization.
16619 // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
16621 for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
16623 // Treat these as imported.
16624 assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
16625 JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
16626 method->bbFlags |= BBF_IMPORTED;
16629 impImportBlockPending(method);
16631 /* Import blocks in the worker-list until there are no more */
16633 while (impPendingList)
16635 /* Remove the entry at the front of the list */
16637 PendingDsc* dsc = impPendingList;
16638 impPendingList = impPendingList->pdNext;
16639 impSetPendingBlockMember(dsc->pdBB, 0);
16641 /* Restore the stack state */
16643 verCurrentState.thisInitialized = dsc->pdThisPtrInit;
16644 verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth;
16645 if (verCurrentState.esStackDepth)
16647 impRestoreStackState(&dsc->pdSavedStack);
16650 /* Add the entry to the free list for reuse */
16652 dsc->pdNext = impPendingFree;
16653 impPendingFree = dsc;
16655 /* Now import the block */
16657 if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
16660 #ifdef _TARGET_64BIT_
16661 // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
16662 // coupled with the JIT64 IL Verification logic. Look inside verHandleVerificationFailure
16663 // method for further explanation on why we raise this exception instead of making the jitted
16664 // code throw the verification exception during execution.
16665 if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
16667 BADCODE("Basic block marked as not verifiable");
16670 #endif // _TARGET_64BIT_
16672 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
16673 impEndTreeList(dsc->pdBB);
16678 impImportBlock(dsc->pdBB);
16680 if (compDonotInline())
16684 if (compIsForImportOnly() && !tiVerificationNeeded)
16692 if (verbose && info.compXcptnsCount)
16694 printf("\nAfter impImport() added block for try,catch,finally");
16695 fgDispBasicBlocks();
16699 // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
16700 for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
16702 block->bbFlags &= ~BBF_VISITED;
16706 assert(!compIsForInlining() || !tiVerificationNeeded);
16709 // Checks if a typeinfo (usually stored in the type stack) is a struct.
16710 // The invariant here is that if it's not a ref or a method and has a class handle
16711 // it's a valuetype
16712 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
16714 if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
16724 /*****************************************************************************
16725 * Check to see if the tree is the address of a local or
16726 the address of a field in a local.
16728 *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
16732 BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr* lclVarTreeOut)
16734 if (tree->gtOper != GT_ADDR)
16739 GenTreePtr op = tree->gtOp.gtOp1;
16740 while (op->gtOper == GT_FIELD)
16742 op = op->gtField.gtFldObj;
16743 if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
16745 op = op->gtOp.gtOp1;
16753 if (op->gtOper == GT_LCL_VAR)
16755 *lclVarTreeOut = op;
16764 //------------------------------------------------------------------------
16765 // impMakeDiscretionaryInlineObservations: make observations that help
16766 // determine the profitability of a discretionary inline
16769 // pInlineInfo -- InlineInfo for the inline, or null for the prejit root
16770 // inlineResult -- InlineResult accumulating information about this inline
16773 // If inlining or prejitting the root, this method also makes
16774 // various observations about the method that factor into inline
16775 // decisions. It sets `compNativeSizeEstimate` as a side effect.
16777 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
16779 assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
16780 pInlineInfo == nullptr && !compIsForInlining() // Calculate the static inlining hint for ngen.
16783 // If we're really inlining, we should just have one result in play.
16784 assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
16786 // If this is a "forceinline" method, the JIT probably shouldn't have gone
16787 // to the trouble of estimating the native code size. Even if it did, it
16788 // shouldn't be relying on the result of this method.
16789 assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
16791 // Note if the caller contains NEWOBJ or NEWARR.
16792 Compiler* rootCompiler = impInlineRoot();
16794 if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
16796 inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
16799 if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
16801 inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
16804 bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0;
16805 bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
16807 if (isSpecialMethod)
16809 if (calleeIsStatic)
16811 inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
16815 inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
16818 else if (!calleeIsStatic)
16820 // Callee is an instance method.
16822 // Check if the callee has the same 'this' as the root.
16823 if (pInlineInfo != nullptr)
16825 GenTreePtr thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
16827 bool isSameThis = impIsThis(thisArg);
16828 inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
16832 // Note if the callee's class is a promotable struct
16833 if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
16835 lvaStructPromotionInfo structPromotionInfo;
16836 lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
16837 if (structPromotionInfo.canPromote)
16839 inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
16843 #ifdef FEATURE_SIMD
16845 // Note if this method is has SIMD args or return value
16846 if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
16848 inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
16851 #endif // FEATURE_SIMD
16853 // Roughly classify callsite frequency.
16854 InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
16856 // If this is a prejit root, or a maximally hot block...
16857 if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
16859 frequency = InlineCallsiteFrequency::HOT;
16861 // No training data. Look for loop-like things.
16862 // We consider a recursive call loop-like. Do not give the inlining boost to the method itself.
16863 // However, give it to things nearby.
16864 else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
16865 (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
16867 frequency = InlineCallsiteFrequency::LOOP;
16869 else if ((pInlineInfo->iciBlock->bbFlags & BBF_PROF_WEIGHT) && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
16871 frequency = InlineCallsiteFrequency::WARM;
16873 // Now modify the multiplier based on where we're called from.
16874 else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
16876 frequency = InlineCallsiteFrequency::RARE;
16880 frequency = InlineCallsiteFrequency::BORING;
16883 // Also capture the block weight of the call site. In the prejit
16884 // root case, assume there's some hot call site for this method.
16885 unsigned weight = 0;
16887 if (pInlineInfo != nullptr)
16889 weight = pInlineInfo->iciBlock->bbWeight;
16893 weight = BB_MAX_WEIGHT;
16896 inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
16897 inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
16900 /*****************************************************************************
16901 This method makes STATIC inlining decision based on the IL code.
16902 It should not make any inlining decision based on the context.
16903 If forceInline is true, then the inlining decision should not depend on
16904 performance heuristics (code size, etc.).
16907 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
16908 CORINFO_METHOD_INFO* methInfo,
16910 InlineResult* inlineResult)
16912 unsigned codeSize = methInfo->ILCodeSize;
16914 // We shouldn't have made up our minds yet...
16915 assert(!inlineResult->IsDecided());
16917 if (methInfo->EHcount)
16919 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
16923 if ((methInfo->ILCode == nullptr) || (codeSize == 0))
16925 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
16929 // For now we don't inline varargs (import code can't handle it)
16931 if (methInfo->args.isVarArg())
16933 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
16937 // Reject if it has too many locals.
16938 // This is currently an implementation limit due to fixed-size arrays in the
16939 // inline info, rather than a performance heuristic.
16941 inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
16943 if (methInfo->locals.numArgs > MAX_INL_LCLS)
16945 inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
16949 // Make sure there aren't too many arguments.
16950 // This is currently an implementation limit due to fixed-size arrays in the
16951 // inline info, rather than a performance heuristic.
16953 inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
16955 if (methInfo->args.numArgs > MAX_INL_ARGS)
16957 inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
16961 // Note force inline state
16963 inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
16965 // Note IL code size
16967 inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
16969 if (inlineResult->IsFailure())
16974 // Make sure maxstack is not too big
16976 inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
16978 if (inlineResult->IsFailure())
16984 /*****************************************************************************
16987 void Compiler::impCheckCanInline(GenTreePtr call,
16988 CORINFO_METHOD_HANDLE fncHandle,
16990 CORINFO_CONTEXT_HANDLE exactContextHnd,
16991 InlineCandidateInfo** ppInlineCandidateInfo,
16992 InlineResult* inlineResult)
16994 // Either EE or JIT might throw exceptions below.
16995 // If that happens, just don't inline the method.
17001 CORINFO_METHOD_HANDLE fncHandle;
17003 CORINFO_CONTEXT_HANDLE exactContextHnd;
17004 InlineResult* result;
17005 InlineCandidateInfo** ppInlineCandidateInfo;
17006 } param = {nullptr};
17008 param.pThis = this;
17010 param.fncHandle = fncHandle;
17011 param.methAttr = methAttr;
17012 param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
17013 param.result = inlineResult;
17014 param.ppInlineCandidateInfo = ppInlineCandidateInfo;
17016 bool success = eeRunWithErrorTrap<Param>(
17017 [](Param* pParam) {
17018 DWORD dwRestrictions = 0;
17019 CorInfoInitClassResult initClassResult;
17022 const char* methodName;
17023 const char* className;
17024 methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
17026 if (JitConfig.JitNoInline())
17028 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
17033 /* Try to get the code address/size for the method */
17035 CORINFO_METHOD_INFO methInfo;
17036 if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
17038 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
17043 forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
17045 pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
17047 if (pParam->result->IsFailure())
17049 assert(pParam->result->IsNever());
17053 // Speculatively check if initClass() can be done.
17054 // If it can be done, we will try to inline the method. If inlining
17055 // succeeds, then we will do the non-speculative initClass() and commit it.
17056 // If this speculative call to initClass() fails, there is no point
17057 // trying to inline this method.
17059 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17060 pParam->exactContextHnd /* context */,
17061 TRUE /* speculative */);
17063 if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17065 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17069 // Given the EE the final say in whether to inline or not.
17070 // This should be last since for verifiable code, this can be expensive
17072 /* VM Inline check also ensures that the method is verifiable if needed */
17073 CorInfoInline vmResult;
17074 vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17077 if (vmResult == INLINE_FAIL)
17079 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17081 else if (vmResult == INLINE_NEVER)
17083 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17086 if (pParam->result->IsFailure())
17088 // Make sure not to report this one. It was already reported by the VM.
17089 pParam->result->SetReported();
17093 // check for unsupported inlining restrictions
17094 assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17096 if (dwRestrictions & INLINE_SAME_THIS)
17098 GenTreePtr thisArg = pParam->call->gtCall.gtCallObjp;
17101 if (!pParam->pThis->impIsThis(thisArg))
17103 pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17108 /* Get the method properties */
17110 CORINFO_CLASS_HANDLE clsHandle;
17111 clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17113 clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17115 /* Get the return type */
17117 var_types fncRetType;
17118 fncRetType = pParam->call->TypeGet();
17121 var_types fncRealRetType;
17122 fncRealRetType = JITtype2varType(methInfo.args.retType);
17124 assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17125 // <BUGNUM> VSW 288602 </BUGNUM>
17126 // In case of IJW, we allow to assign a native pointer to a BYREF.
17127 (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17128 (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17132 // Allocate an InlineCandidateInfo structure
17134 InlineCandidateInfo* pInfo;
17135 pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17137 pInfo->dwRestrictions = dwRestrictions;
17138 pInfo->methInfo = methInfo;
17139 pInfo->methAttr = pParam->methAttr;
17140 pInfo->clsHandle = clsHandle;
17141 pInfo->clsAttr = clsAttr;
17142 pInfo->fncRetType = fncRetType;
17143 pInfo->exactContextHnd = pParam->exactContextHnd;
17144 pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd;
17145 pInfo->initClassResult = initClassResult;
17147 *(pParam->ppInlineCandidateInfo) = pInfo;
17154 param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
17158 void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo,
17159 GenTreePtr curArgVal,
17161 InlineResult* inlineResult)
17163 InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
17165 if (curArgVal->gtOper == GT_MKREFANY)
17167 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
17171 inlCurArgInfo->argNode = curArgVal;
17173 GenTreePtr lclVarTree;
17174 if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
17176 inlCurArgInfo->argIsByRefToStructLocal = true;
17177 #ifdef FEATURE_SIMD
17178 if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
17180 pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
17182 #endif // FEATURE_SIMD
17185 if (curArgVal->gtFlags & GTF_ALL_EFFECT)
17187 inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
17188 inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
17191 if (curArgVal->gtOper == GT_LCL_VAR)
17193 inlCurArgInfo->argIsLclVar = true;
17195 /* Remember the "original" argument number */
17196 curArgVal->gtLclVar.gtLclILoffs = argNum;
17199 if ((curArgVal->OperKind() & GTK_CONST) ||
17200 ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
17202 inlCurArgInfo->argIsInvariant = true;
17203 if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
17205 /* Abort, but do not mark as not inlinable */
17206 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
17211 if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
17213 inlCurArgInfo->argHasLdargaOp = true;
17219 if (inlCurArgInfo->argIsThis)
17221 printf("thisArg:");
17225 printf("\nArgument #%u:", argNum);
17227 if (inlCurArgInfo->argIsLclVar)
17229 printf(" is a local var");
17231 if (inlCurArgInfo->argIsInvariant)
17233 printf(" is a constant");
17235 if (inlCurArgInfo->argHasGlobRef)
17237 printf(" has global refs");
17239 if (inlCurArgInfo->argHasSideEff)
17241 printf(" has side effects");
17243 if (inlCurArgInfo->argHasLdargaOp)
17245 printf(" has ldarga effect");
17247 if (inlCurArgInfo->argHasStargOp)
17249 printf(" has starg effect");
17251 if (inlCurArgInfo->argIsByRefToStructLocal)
17253 printf(" is byref to a struct local");
17257 gtDispTree(curArgVal);
17263 /*****************************************************************************
17267 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
17269 assert(!compIsForInlining());
17271 GenTreePtr call = pInlineInfo->iciCall;
17272 CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo;
17273 unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr;
17274 InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo;
17275 InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo;
17276 InlineResult* inlineResult = pInlineInfo->inlineResult;
17278 const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
17280 /* init the argument stuct */
17282 memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
17284 /* Get hold of the 'this' pointer and the argument list proper */
17286 GenTreePtr thisArg = call->gtCall.gtCallObjp;
17287 GenTreePtr argList = call->gtCall.gtCallArgs;
17288 unsigned argCnt = 0; // Count of the arguments
17290 assert((methInfo->args.hasThis()) == (thisArg != nullptr));
17294 inlArgInfo[0].argIsThis = true;
17296 impInlineRecordArgInfo(pInlineInfo, thisArg, argCnt, inlineResult);
17298 if (inlineResult->IsFailure())
17303 /* Increment the argument count */
17307 /* Record some information about each of the arguments */
17308 bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
17310 #if USER_ARGS_COME_LAST
17311 unsigned typeCtxtArg = thisArg ? 1 : 0;
17312 #else // USER_ARGS_COME_LAST
17313 unsigned typeCtxtArg = methInfo->args.totalILArgs();
17314 #endif // USER_ARGS_COME_LAST
17316 for (GenTreePtr argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
17318 if (argTmp == argList && hasRetBuffArg)
17323 // Ignore the type context argument
17324 if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
17326 typeCtxtArg = 0xFFFFFFFF;
17330 assert(argTmp->gtOper == GT_LIST);
17331 GenTreePtr argVal = argTmp->gtOp.gtOp1;
17333 impInlineRecordArgInfo(pInlineInfo, argVal, argCnt, inlineResult);
17335 if (inlineResult->IsFailure())
17340 /* Increment the argument count */
17344 /* Make sure we got the arg number right */
17345 assert(argCnt == methInfo->args.totalILArgs());
17347 #ifdef FEATURE_SIMD
17348 bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
17349 #endif // FEATURE_SIMD
17351 /* We have typeless opcodes, get type information from the signature */
17357 if (clsAttr & CORINFO_FLG_VALUECLASS)
17359 sigType = TYP_BYREF;
17366 lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
17367 lclVarInfo[0].lclHasLdlocaOp = false;
17369 #ifdef FEATURE_SIMD
17370 // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
17371 // the inlining multiplier) for anything in that assembly.
17372 // But we only need to normalize it if it is a TYP_STRUCT
17373 // (which we need to do even if we have already set foundSIMDType).
17374 if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
17376 if (sigType == TYP_STRUCT)
17378 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
17380 foundSIMDType = true;
17382 #endif // FEATURE_SIMD
17383 lclVarInfo[0].lclTypeInfo = sigType;
17385 assert(varTypeIsGC(thisArg->gtType) || // "this" is managed
17386 (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
17387 (clsAttr & CORINFO_FLG_VALUECLASS)));
17389 if (genActualType(thisArg->gtType) != genActualType(sigType))
17391 if (sigType == TYP_REF)
17393 /* The argument cannot be bashed into a ref (see bug 750871) */
17394 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
17398 /* This can only happen with byrefs <-> ints/shorts */
17400 assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
17401 assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
17403 if (sigType == TYP_BYREF)
17405 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17407 else if (thisArg->gtType == TYP_BYREF)
17409 assert(sigType == TYP_I_IMPL);
17411 /* If possible change the BYREF to an int */
17412 if (thisArg->IsVarAddr())
17414 thisArg->gtType = TYP_I_IMPL;
17415 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17419 /* Arguments 'int <- byref' cannot be bashed */
17420 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17427 /* Init the types of the arguments and make sure the types
17428 * from the trees match the types in the signature */
17430 CORINFO_ARG_LIST_HANDLE argLst;
17431 argLst = methInfo->args.args;
17434 for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
17436 var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
17438 lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
17440 #ifdef FEATURE_SIMD
17441 if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
17443 // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
17444 // found a SIMD type, even if this may not be a type we recognize (the assumption is that
17445 // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
17446 foundSIMDType = true;
17447 if (sigType == TYP_STRUCT)
17449 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
17450 sigType = structType;
17453 #endif // FEATURE_SIMD
17455 lclVarInfo[i].lclTypeInfo = sigType;
17456 lclVarInfo[i].lclHasLdlocaOp = false;
17458 /* Does the tree type match the signature type? */
17460 GenTreePtr inlArgNode = inlArgInfo[i].argNode;
17462 if (sigType != inlArgNode->gtType)
17464 /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
17465 but in bad IL cases with caller-callee signature mismatches we can see other types.
17466 Intentionally reject cases with mismatches so the jit is more flexible when
17467 encountering bad IL. */
17469 bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
17470 (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
17471 (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
17473 if (!isPlausibleTypeMatch)
17475 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
17479 /* Is it a narrowing or widening cast?
17480 * Widening casts are ok since the value computed is already
17481 * normalized to an int (on the IL stack) */
17483 if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
17485 if (sigType == TYP_BYREF)
17487 lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17489 else if (inlArgNode->gtType == TYP_BYREF)
17491 assert(varTypeIsIntOrI(sigType));
17493 /* If possible bash the BYREF to an int */
17494 if (inlArgNode->IsVarAddr())
17496 inlArgNode->gtType = TYP_I_IMPL;
17497 lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17501 /* Arguments 'int <- byref' cannot be changed */
17502 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17506 else if (genTypeSize(sigType) < EA_PTRSIZE)
17508 /* Narrowing cast */
17510 if (inlArgNode->gtOper == GT_LCL_VAR &&
17511 !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
17512 sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
17514 /* We don't need to insert a cast here as the variable
17515 was assigned a normalized value of the right type */
17520 inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
17522 inlArgInfo[i].argIsLclVar = false;
17524 /* Try to fold the node in case we have constant arguments */
17526 if (inlArgInfo[i].argIsInvariant)
17528 inlArgNode = gtFoldExprConst(inlArgNode);
17529 inlArgInfo[i].argNode = inlArgNode;
17530 assert(inlArgNode->OperIsConst());
17533 #ifdef _TARGET_64BIT_
17534 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
17536 // This should only happen for int -> native int widening
17537 inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
17539 inlArgInfo[i].argIsLclVar = false;
17541 /* Try to fold the node in case we have constant arguments */
17543 if (inlArgInfo[i].argIsInvariant)
17545 inlArgNode = gtFoldExprConst(inlArgNode);
17546 inlArgInfo[i].argNode = inlArgNode;
17547 assert(inlArgNode->OperIsConst());
17550 #endif // _TARGET_64BIT_
17555 /* Init the types of the local variables */
17557 CORINFO_ARG_LIST_HANDLE localsSig;
17558 localsSig = methInfo->locals.args;
17560 for (i = 0; i < methInfo->locals.numArgs; i++)
17563 var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
17565 lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
17566 lclVarInfo[i + argCnt].lclIsPinned = isPinned;
17567 lclVarInfo[i + argCnt].lclTypeInfo = type;
17571 // Pinned locals may cause inlines to fail.
17572 inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
17573 if (inlineResult->IsFailure())
17579 lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
17581 // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
17582 // out on the inline.
17583 if (type == TYP_STRUCT)
17585 CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
17586 DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
17587 if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
17589 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
17590 if (inlineResult->IsFailure())
17595 // Do further notification in the case where the call site is rare; some policies do
17596 // not track the relative hotness of call sites for "always" inline cases.
17597 if (pInlineInfo->iciBlock->isRunRarely())
17599 inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
17600 if (inlineResult->IsFailure())
17609 localsSig = info.compCompHnd->getArgNext(localsSig);
17611 #ifdef FEATURE_SIMD
17612 if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
17614 foundSIMDType = true;
17615 if (featureSIMD && type == TYP_STRUCT)
17617 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
17618 lclVarInfo[i + argCnt].lclTypeInfo = structType;
17621 #endif // FEATURE_SIMD
17624 #ifdef FEATURE_SIMD
17625 if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
17627 foundSIMDType = true;
17629 pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
17630 #endif // FEATURE_SIMD
17633 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
17635 assert(compIsForInlining());
17637 unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
17639 if (tmpNum == BAD_VAR_NUM)
17641 var_types lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
17643 // The lifetime of this local might span multiple BBs.
17644 // So it is a long lifetime local.
17645 impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
17647 lvaTable[tmpNum].lvType = lclTyp;
17648 if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclHasLdlocaOp)
17650 lvaTable[tmpNum].lvHasLdAddrOp = 1;
17653 if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclIsPinned)
17655 lvaTable[tmpNum].lvPinned = 1;
17657 if (!impInlineInfo->hasPinnedLocals)
17659 // If the inlinee returns a value, use a spill temp
17660 // for the return value to ensure that even in case
17661 // where the return expression refers to one of the
17662 // pinned locals, we can unpin the local right after
17663 // the inlined method body.
17664 if ((info.compRetNativeType != TYP_VOID) && (lvaInlineeReturnSpillTemp == BAD_VAR_NUM))
17666 lvaInlineeReturnSpillTemp =
17667 lvaGrabTemp(false DEBUGARG("Inline candidate pinned local return spill temp"));
17668 lvaTable[lvaInlineeReturnSpillTemp].lvType = info.compRetNativeType;
17672 impInlineInfo->hasPinnedLocals = true;
17675 if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.IsStruct())
17677 if (varTypeIsStruct(lclTyp))
17679 lvaSetStruct(tmpNum,
17680 impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.GetClassHandle(),
17681 true /* unsafe value cls check */);
17685 // This is a wrapped primitive. Make sure the verstate knows that
17686 lvaTable[tmpNum].lvVerTypeInfo =
17687 impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo;
17695 // A method used to return the GenTree (usually a GT_LCL_VAR) representing the arguments of the inlined method.
17696 // Only use this method for the arguments of the inlinee method.
17697 // !!! Do not use it for the locals of the inlinee method. !!!!
17699 GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
17701 /* Get the argument type */
17702 var_types lclTyp = lclVarInfo[lclNum].lclTypeInfo;
17704 GenTreePtr op1 = nullptr;
17706 // constant or address of local
17707 if (inlArgInfo[lclNum].argIsInvariant && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17709 /* Clone the constant. Note that we cannot directly use argNode
17710 in the trees even if inlArgInfo[lclNum].argIsUsed==false as this
17711 would introduce aliasing between inlArgInfo[].argNode and
17712 impInlineExpr. Then gtFoldExpr() could change it, causing further
17713 references to the argument working off of the bashed copy. */
17715 op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17716 PREFIX_ASSUME(op1 != nullptr);
17717 inlArgInfo[lclNum].argTmpNum = (unsigned)-1; // illegal temp
17719 else if (inlArgInfo[lclNum].argIsLclVar && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17721 /* Argument is a local variable (of the caller)
17722 * Can we re-use the passed argument node? */
17724 op1 = inlArgInfo[lclNum].argNode;
17725 inlArgInfo[lclNum].argTmpNum = op1->gtLclVarCommon.gtLclNum;
17727 if (inlArgInfo[lclNum].argIsUsed)
17729 assert(op1->gtOper == GT_LCL_VAR);
17730 assert(lclNum == op1->gtLclVar.gtLclILoffs);
17732 if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
17734 lclTyp = genActualType(lclTyp);
17737 /* Create a new lcl var node - remember the argument lclNum */
17738 op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, lclTyp, op1->gtLclVar.gtLclILoffs);
17741 else if (inlArgInfo[lclNum].argIsByRefToStructLocal && !inlArgInfo[lclNum].argHasStargOp)
17743 /* Argument is a by-ref address to a struct, a normed struct, or its field.
17744 In these cases, don't spill the byref to a local, simply clone the tree and use it.
17745 This way we will increase the chance for this byref to be optimized away by
17746 a subsequent "dereference" operation.
17748 From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
17749 (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
17750 For example, if the caller is:
17751 ldloca.s V_1 // V_1 is a local struct
17752 call void Test.ILPart::RunLdargaOnPointerArg(int32*)
17753 and the callee being inlined has:
17754 .method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed
17756 call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
17757 then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
17758 soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
17760 assert(inlArgInfo[lclNum].argNode->TypeGet() == TYP_BYREF ||
17761 inlArgInfo[lclNum].argNode->TypeGet() == TYP_I_IMPL);
17762 op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17766 /* Argument is a complex expression - it must be evaluated into a temp */
17768 if (inlArgInfo[lclNum].argHasTmp)
17770 assert(inlArgInfo[lclNum].argIsUsed);
17771 assert(inlArgInfo[lclNum].argTmpNum < lvaCount);
17773 /* Create a new lcl var node - remember the argument lclNum */
17774 op1 = gtNewLclvNode(inlArgInfo[lclNum].argTmpNum, genActualType(lclTyp));
17776 /* This is the second or later use of the this argument,
17777 so we have to use the temp (instead of the actual arg) */
17778 inlArgInfo[lclNum].argBashTmpNode = nullptr;
17782 /* First time use */
17783 assert(inlArgInfo[lclNum].argIsUsed == false);
17785 /* Reserve a temp for the expression.
17786 * Use a large size node as we may change it later */
17788 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
17790 lvaTable[tmpNum].lvType = lclTyp;
17791 assert(lvaTable[tmpNum].lvAddrExposed == 0);
17792 if (inlArgInfo[lclNum].argHasLdargaOp)
17794 lvaTable[tmpNum].lvHasLdAddrOp = 1;
17797 if (lclVarInfo[lclNum].lclVerTypeInfo.IsStruct())
17799 if (varTypeIsStruct(lclTyp))
17801 lvaSetStruct(tmpNum, impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo.GetClassHandle(),
17802 true /* unsafe value cls check */);
17806 // This is a wrapped primitive. Make sure the verstate knows that
17807 lvaTable[tmpNum].lvVerTypeInfo = impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo;
17811 inlArgInfo[lclNum].argHasTmp = true;
17812 inlArgInfo[lclNum].argTmpNum = tmpNum;
17814 // If we require strict exception order, then arguments must
17815 // be evaluated in sequence before the body of the inlined method.
17816 // So we need to evaluate them to a temp.
17817 // Also, if arguments have global references, we need to
17818 // evaluate them to a temp before the inlined body as the
17819 // inlined body may be modifying the global ref.
17820 // TODO-1stClassStructs: We currently do not reuse an existing lclVar
17821 // if it is a struct, because it requires some additional handling.
17823 if (!varTypeIsStruct(lclTyp) && (!inlArgInfo[lclNum].argHasSideEff) && (!inlArgInfo[lclNum].argHasGlobRef))
17825 /* Get a *LARGE* LCL_VAR node */
17826 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
17828 /* Record op1 as the very first use of this argument.
17829 If there are no further uses of the arg, we may be
17830 able to use the actual arg node instead of the temp.
17831 If we do see any further uses, we will clear this. */
17832 inlArgInfo[lclNum].argBashTmpNode = op1;
17836 /* Get a small LCL_VAR node */
17837 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
17838 /* No bashing of this argument */
17839 inlArgInfo[lclNum].argBashTmpNode = nullptr;
17844 /* Mark the argument as used */
17846 inlArgInfo[lclNum].argIsUsed = true;
17851 /******************************************************************************
17852 Is this the original "this" argument to the call being inlined?
17854 Note that we do not inline methods with "starg 0", and so we do not need to
17858 BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo* inlArgInfo)
17860 assert(compIsForInlining());
17861 return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
17864 //-----------------------------------------------------------------------------
17865 // This function checks if a dereference in the inlinee can guarantee that
17866 // the "this" is non-NULL.
17867 // If we haven't hit a branch or a side effect, and we are dereferencing
17868 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
17869 // then we can avoid a separate null pointer check.
17871 // "additionalTreesToBeEvaluatedBefore"
17872 // is the set of pending trees that have not yet been added to the statement list,
17873 // and which have been removed from verCurrentState.esStack[]
17875 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTreePtr additionalTreesToBeEvaluatedBefore,
17876 GenTreePtr variableBeingDereferenced,
17877 InlArgInfo* inlArgInfo)
17879 assert(compIsForInlining());
17880 assert(opts.OptEnabled(CLFLG_INLINING));
17882 BasicBlock* block = compCurBB;
17887 if (block != fgFirstBB)
17892 if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
17897 if (additionalTreesToBeEvaluatedBefore &&
17898 GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
17903 for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
17905 expr = stmt->gtStmt.gtStmtExpr;
17907 if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
17913 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
17915 unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
17916 if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
17925 /******************************************************************************/
17926 // Check the inlining eligibility of this GT_CALL node.
17927 // Mark GTF_CALL_INLINE_CANDIDATE on the GT_CALL node
17929 // Todo: find a way to record the failure reasons in the IR (or
17930 // otherwise build tree context) so when we do the inlining pass we
17931 // can capture these reasons
17933 void Compiler::impMarkInlineCandidate(GenTreePtr callNode,
17934 CORINFO_CONTEXT_HANDLE exactContextHnd,
17935 CORINFO_CALL_INFO* callInfo)
17937 // Let the strategy know there's another call
17938 impInlineRoot()->m_inlineStrategy->NoteCall();
17940 if (!opts.OptEnabled(CLFLG_INLINING))
17942 /* XXX Mon 8/18/2008
17943 * This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before
17944 * calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and
17945 * CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and
17946 * figure out why we did not set MAXOPT for this compile.
17948 assert(!compIsForInlining());
17952 if (compIsForImportOnly())
17954 // Don't bother creating the inline candidate during verification.
17955 // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
17956 // that leads to the creation of multiple instances of Compiler.
17960 GenTreeCall* call = callNode->AsCall();
17961 InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
17963 // Don't inline if not optimizing root method
17964 if (opts.compDbgCode)
17966 inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
17970 // Don't inline if inlining into root method is disabled.
17971 if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
17973 inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
17977 // Inlining candidate determination needs to honor only IL tail prefix.
17978 // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
17979 if (call->IsTailPrefixedCall())
17981 inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
17985 // Tail recursion elimination takes precedence over inlining.
17986 // TODO: We may want to do some of the additional checks from fgMorphCall
17987 // here to reduce the chance we don't inline a call that won't be optimized
17988 // as a fast tail call or turned into a loop.
17989 if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
17991 inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
17995 if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
17997 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
18001 /* Ignore helper calls */
18003 if (call->gtCallType == CT_HELPER)
18005 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
18009 /* Ignore indirect calls */
18010 if (call->gtCallType == CT_INDIRECT)
18012 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
18016 /* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less
18017 * restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding
18018 * inlining in throw blocks. I should consider the same thing for catch and filter regions. */
18020 CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
18023 // Reuse method flags from the original callInfo if possible
18024 if (fncHandle == callInfo->hMethod)
18026 methAttr = callInfo->methodFlags;
18030 methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
18034 if (compStressCompile(STRESS_FORCE_INLINE, 0))
18036 methAttr |= CORINFO_FLG_FORCEINLINE;
18040 // Check for COMPlus_AggressiveInlining
18041 if (compDoAggressiveInlining)
18043 methAttr |= CORINFO_FLG_FORCEINLINE;
18046 if (!(methAttr & CORINFO_FLG_FORCEINLINE))
18048 /* Don't bother inline blocks that are in the filter region */
18049 if (bbInCatchHandlerILRange(compCurBB))
18054 printf("\nWill not inline blocks that are in the catch handler region\n");
18059 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
18063 if (bbInFilterILRange(compCurBB))
18068 printf("\nWill not inline blocks that are in the filter region\n");
18072 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
18077 /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
18079 if (opts.compNeedSecurityCheck)
18081 inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
18085 /* Check if we tried to inline this method before */
18087 if (methAttr & CORINFO_FLG_DONT_INLINE)
18089 inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
18093 /* Cannot inline synchronized methods */
18095 if (methAttr & CORINFO_FLG_SYNCH)
18097 inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
18101 /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
18103 if (methAttr & CORINFO_FLG_SECURITYCHECK)
18105 inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
18109 InlineCandidateInfo* inlineCandidateInfo = nullptr;
18110 impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
18112 if (inlineResult.IsFailure())
18117 // The old value should be NULL
18118 assert(call->gtInlineCandidateInfo == nullptr);
18120 call->gtInlineCandidateInfo = inlineCandidateInfo;
18122 // Mark the call node as inline candidate.
18123 call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
18125 // Let the strategy know there's another candidate.
18126 impInlineRoot()->m_inlineStrategy->NoteCandidate();
18128 // Since we're not actually inlining yet, and this call site is
18129 // still just an inline candidate, there's nothing to report.
18130 inlineResult.SetReported();
18133 /******************************************************************************/
18134 // Returns true if the given intrinsic will be implemented by target-specific
18137 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
18139 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
18140 switch (intrinsicId)
18142 // Amd64 only has SSE2 instruction to directly compute sqrt/abs.
18144 // TODO: Because the x86 backend only targets SSE for floating-point code,
18145 // it does not treat Sine, Cosine, or Round as intrinsics (JIT32
18146 // implemented those intrinsics as x87 instructions). If this poses
18147 // a CQ problem, it may be necessary to change the implementation of
18148 // the helper calls to decrease call overhead or switch back to the
18149 // x87 instructions. This is tracked by #7097.
18150 case CORINFO_INTRINSIC_Sqrt:
18151 case CORINFO_INTRINSIC_Abs:
18157 #elif defined(_TARGET_ARM64_)
18158 switch (intrinsicId)
18160 case CORINFO_INTRINSIC_Sqrt:
18161 case CORINFO_INTRINSIC_Abs:
18162 case CORINFO_INTRINSIC_Round:
18168 #elif defined(_TARGET_ARM_)
18169 switch (intrinsicId)
18171 case CORINFO_INTRINSIC_Sqrt:
18172 case CORINFO_INTRINSIC_Abs:
18173 case CORINFO_INTRINSIC_Round:
18179 #elif defined(_TARGET_X86_)
18180 switch (intrinsicId)
18182 case CORINFO_INTRINSIC_Sin:
18183 case CORINFO_INTRINSIC_Cos:
18184 case CORINFO_INTRINSIC_Sqrt:
18185 case CORINFO_INTRINSIC_Abs:
18186 case CORINFO_INTRINSIC_Round:
18193 // TODO: This portion of logic is not implemented for other arch.
18194 // The reason for returning true is that on all other arch the only intrinsic
18195 // enabled are target intrinsics.
18197 #endif //_TARGET_AMD64_
18200 /******************************************************************************/
18201 // Returns true if the given intrinsic will be implemented by calling System.Math
18204 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
18206 // Currently, if an math intrisic is not implemented by target-specific
18207 // intructions, it will be implemented by a System.Math call. In the
18208 // future, if we turn to implementing some of them with helper callers,
18209 // this predicate needs to be revisited.
18210 return !IsTargetIntrinsic(intrinsicId);
18213 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
18215 switch (intrinsicId)
18217 case CORINFO_INTRINSIC_Sin:
18218 case CORINFO_INTRINSIC_Sqrt:
18219 case CORINFO_INTRINSIC_Abs:
18220 case CORINFO_INTRINSIC_Cos:
18221 case CORINFO_INTRINSIC_Round:
18222 case CORINFO_INTRINSIC_Cosh:
18223 case CORINFO_INTRINSIC_Sinh:
18224 case CORINFO_INTRINSIC_Tan:
18225 case CORINFO_INTRINSIC_Tanh:
18226 case CORINFO_INTRINSIC_Asin:
18227 case CORINFO_INTRINSIC_Acos:
18228 case CORINFO_INTRINSIC_Atan:
18229 case CORINFO_INTRINSIC_Atan2:
18230 case CORINFO_INTRINSIC_Log10:
18231 case CORINFO_INTRINSIC_Pow:
18232 case CORINFO_INTRINSIC_Exp:
18233 case CORINFO_INTRINSIC_Ceiling:
18234 case CORINFO_INTRINSIC_Floor:
18241 bool Compiler::IsMathIntrinsic(GenTreePtr tree)
18243 return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
18245 /*****************************************************************************/