1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
10 XX Imports the given method and converts it to semantic trees XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
23 #define Verify(cond, msg) \
28 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
32 #define VerifyOrReturn(cond, msg) \
37 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
42 #define VerifyOrReturnSpeculative(cond, msg, speculative) \
56 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
62 /*****************************************************************************/
64 void Compiler::impInit()
68 impTreeList = nullptr;
69 impTreeLast = nullptr;
70 impInlinedCodeSize = 0;
74 /*****************************************************************************
76 * Pushes the given tree on the stack.
79 void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
81 /* Check for overflow. If inlining, we may be using a bigger stack */
83 if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84 (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
86 BADCODE("stack overflow");
90 // If we are pushing a struct, make certain we know the precise type!
91 if (tree->TypeGet() == TYP_STRUCT)
93 assert(ti.IsType(TI_STRUCT));
94 CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95 assert(clsHnd != NO_CLASS_HANDLE);
98 if (tiVerificationNeeded && !ti.IsDead())
100 assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
102 // The ti type is consistent with the tree type.
105 // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106 // In the verification type system, we always transform "native int" to "TI_INT".
107 // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108 // attempts to do that have proved too difficult. Instead, we'll assume that in checks like this,
109 // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110 // method used in the last disjunct allows exactly this mismatch.
111 assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112 ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113 ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114 ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115 typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116 NormaliseForStack(typeInfo(tree->TypeGet()))));
118 // If it is a struct type, make certain we normalized the primitive types
119 assert(!ti.IsType(TI_STRUCT) ||
120 info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
124 if (VERBOSE && tiVerificationNeeded)
127 printf(TI_DUMP_PADDING);
128 printf("About to push to stack: ");
131 #endif // VERBOSE_VERIFY
135 verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136 verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
138 if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
142 else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
144 compFloatingPointUsed = true;
148 /******************************************************************************/
149 // used in the inliner, where we can assume typesafe code. please don't use in the importer!!
150 inline void Compiler::impPushOnStackNoType(GenTreePtr tree)
152 assert(verCurrentState.esStackDepth < impStkSize);
153 INDEBUG(verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = typeInfo());
154 verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
156 if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
160 else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
162 compFloatingPointUsed = true;
166 inline void Compiler::impPushNullObjRefOnStack()
168 impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
171 // This method gets called when we run into unverifiable code
172 // (and we are verifying the method)
174 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
175 DEBUGARG(unsigned line))
177 // Remember that the code is not verifiable
178 // Note that the method may yet pass canSkipMethodVerification(),
179 // and so the presence of unverifiable code may not be an issue.
180 tiIsVerifiableCode = FALSE;
183 const char* tail = strrchr(file, '\\');
189 if (JitConfig.JitBreakOnUnsafeCode())
191 assert(!"Unsafe code detected");
195 JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
196 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
198 if (verNeedsVerification() || compIsForImportOnly())
200 JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
201 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
202 verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
206 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
207 DEBUGARG(unsigned line))
209 JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
210 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
213 // BreakIfDebuggerPresent();
214 if (getBreakOnBadCode())
216 assert(!"Typechecking error");
220 RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
224 // helper function that will tell us if the IL instruction at the addr passed
225 // by param consumes an address at the top of the stack. We use it to save
227 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
229 assert(!compIsForInlining());
233 opcode = (OPCODE)getU1LittleEndian(codeAddr);
237 // case CEE_LDFLDA: We're taking this one out as if you have a sequence
243 // of a primitivelike struct, you end up after morphing with addr of a local
244 // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
245 // for structs that contain other structs, which isnt a case we handle very
246 // well now for other reasons.
250 // We won't collapse small fields. This is probably not the right place to have this
251 // check, but we're only using the function for this purpose, and is easy to factor
252 // out if we need to do so.
254 CORINFO_RESOLVED_TOKEN resolvedToken;
255 impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
257 CORINFO_CLASS_HANDLE clsHnd;
258 var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
260 // Preserve 'small' int types
261 if (lclTyp > TYP_INT)
263 lclTyp = genActualType(lclTyp);
266 if (varTypeIsSmall(lclTyp))
280 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
282 pResolvedToken->tokenContext = impTokenLookupContextHandle;
283 pResolvedToken->tokenScope = info.compScopeHnd;
284 pResolvedToken->token = getU4LittleEndian(addr);
285 pResolvedToken->tokenType = kind;
287 if (!tiVerificationNeeded)
289 info.compCompHnd->resolveToken(pResolvedToken);
293 Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
297 /*****************************************************************************
299 * Pop one tree from the stack.
302 StackEntry Compiler::impPopStack()
304 if (verCurrentState.esStackDepth == 0)
306 BADCODE("stack underflow");
311 if (VERBOSE && tiVerificationNeeded)
314 printf(TI_DUMP_PADDING);
315 printf("About to pop from the stack: ");
316 const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
319 #endif // VERBOSE_VERIFY
322 return verCurrentState.esStack[--verCurrentState.esStackDepth];
325 StackEntry Compiler::impPopStack(CORINFO_CLASS_HANDLE& structType)
327 StackEntry ret = impPopStack();
328 structType = verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo.GetClassHandle();
332 GenTreePtr Compiler::impPopStack(typeInfo& ti)
334 StackEntry ret = impPopStack();
339 /*****************************************************************************
341 * Peep at n'th (0-based) tree on the top of the stack.
344 StackEntry& Compiler::impStackTop(unsigned n)
346 if (verCurrentState.esStackDepth <= n)
348 BADCODE("stack underflow");
351 return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
353 /*****************************************************************************
354 * Some of the trees are spilled specially. While unspilling them, or
355 * making a copy, these need to be handled specially. The function
356 * enumerates the operators possible after spilling.
359 #ifdef DEBUG // only used in asserts
360 static bool impValidSpilledStackEntry(GenTreePtr tree)
362 if (tree->gtOper == GT_LCL_VAR)
367 if (tree->OperIsConst())
376 /*****************************************************************************
378 * The following logic is used to save/restore stack contents.
379 * If 'copy' is true, then we make a copy of the trees on the stack. These
380 * have to all be cloneable/spilled values.
383 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
385 savePtr->ssDepth = verCurrentState.esStackDepth;
387 if (verCurrentState.esStackDepth)
389 savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
390 size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
394 StackEntry* table = savePtr->ssTrees;
396 /* Make a fresh copy of all the stack entries */
398 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
400 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
401 GenTreePtr tree = verCurrentState.esStack[level].val;
403 assert(impValidSpilledStackEntry(tree));
405 switch (tree->gtOper)
412 table->val = gtCloneExpr(tree);
416 assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
423 memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
428 void Compiler::impRestoreStackState(SavedStack* savePtr)
430 verCurrentState.esStackDepth = savePtr->ssDepth;
432 if (verCurrentState.esStackDepth)
434 memcpy(verCurrentState.esStack, savePtr->ssTrees,
435 verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
439 /*****************************************************************************
441 * Get the tree list started for a new basic block.
443 inline void Compiler::impBeginTreeList()
445 assert(impTreeList == nullptr && impTreeLast == nullptr);
447 impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
450 /*****************************************************************************
452 * Store the given start and end stmt in the given basic block. This is
453 * mostly called by impEndTreeList(BasicBlock *block). It is called
454 * directly only for handling CEE_LEAVEs out of finally-protected try's.
457 inline void Compiler::impEndTreeList(BasicBlock* block, GenTreePtr firstStmt, GenTreePtr lastStmt)
459 assert(firstStmt->gtOper == GT_STMT);
460 assert(lastStmt->gtOper == GT_STMT);
462 /* Make the list circular, so that we can easily walk it backwards */
464 firstStmt->gtPrev = lastStmt;
466 /* Store the tree list in the basic block */
468 block->bbTreeList = firstStmt;
470 /* The block should not already be marked as imported */
471 assert((block->bbFlags & BBF_IMPORTED) == 0);
473 block->bbFlags |= BBF_IMPORTED;
476 /*****************************************************************************
478 * Store the current tree list in the given basic block.
481 inline void Compiler::impEndTreeList(BasicBlock* block)
483 assert(impTreeList->gtOper == GT_BEG_STMTS);
485 GenTreePtr firstTree = impTreeList->gtNext;
489 /* The block should not already be marked as imported */
490 assert((block->bbFlags & BBF_IMPORTED) == 0);
492 // Empty block. Just mark it as imported
493 block->bbFlags |= BBF_IMPORTED;
497 // Ignore the GT_BEG_STMTS
498 assert(firstTree->gtPrev == impTreeList);
500 impEndTreeList(block, firstTree, impTreeLast);
504 if (impLastILoffsStmt != nullptr)
506 impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
507 impLastILoffsStmt = nullptr;
510 impTreeList = impTreeLast = nullptr;
514 /*****************************************************************************
516 * Check that storing the given tree doesnt mess up the semantic order. Note
517 * that this has only limited value as we can only check [0..chkLevel).
520 inline void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel)
525 assert(stmt->gtOper == GT_STMT);
527 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
529 chkLevel = verCurrentState.esStackDepth;
532 if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
537 GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
539 // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
541 if (tree->gtFlags & GTF_CALL)
543 for (unsigned level = 0; level < chkLevel; level++)
545 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
549 if (tree->gtOper == GT_ASG)
551 // For an assignment to a local variable, all references of that
552 // variable have to be spilled. If it is aliased, all calls and
553 // indirect accesses have to be spilled
555 if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
557 unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
558 for (unsigned level = 0; level < chkLevel; level++)
560 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
561 assert(!lvaTable[lclNum].lvAddrExposed ||
562 (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
566 // If the access may be to global memory, all side effects have to be spilled.
568 else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
570 for (unsigned level = 0; level < chkLevel; level++)
572 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
579 /*****************************************************************************
581 * Append the given GT_STMT node to the current block's tree list.
582 * [0..chkLevel) is the portion of the stack which we will check for
583 * interference with stmt and spill if needed.
586 inline void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
588 assert(stmt->gtOper == GT_STMT);
589 noway_assert(impTreeLast != nullptr);
591 /* If the statement being appended has any side-effects, check the stack
592 to see if anything needs to be spilled to preserve correct ordering. */
594 GenTreePtr expr = stmt->gtStmt.gtStmtExpr;
595 unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
597 // Assignment to (unaliased) locals don't count as a side-effect as
598 // we handle them specially using impSpillLclRefs(). Temp locals should
601 if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
602 !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
604 unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
605 assert(flags == (op2Flags | GTF_ASG));
609 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
611 chkLevel = verCurrentState.esStackDepth;
614 if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
616 assert(chkLevel <= verCurrentState.esStackDepth);
620 // If there is a call, we have to spill global refs
621 bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
623 if (expr->gtOper == GT_ASG)
625 GenTree* lhs = expr->gtGetOp1();
626 // If we are assigning to a global ref, we have to spill global refs on stack.
627 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
628 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
629 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
630 if (!expr->OperIsBlkOp())
632 // If we are assigning to a global ref, we have to spill global refs on stack
633 if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
635 spillGlobEffects = true;
638 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
639 ((lhs->OperGet() == GT_LCL_VAR) &&
640 (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
642 spillGlobEffects = true;
646 impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
650 impSpillSpecialSideEff();
654 impAppendStmtCheck(stmt, chkLevel);
656 /* Point 'prev' at the previous node, so that we can walk backwards */
658 stmt->gtPrev = impTreeLast;
660 /* Append the expression statement to the list */
662 impTreeLast->gtNext = stmt;
666 impMarkContiguousSIMDFieldAssignments(stmt);
669 /* Once we set impCurStmtOffs in an appended tree, we are ready to
670 report the following offsets. So reset impCurStmtOffs */
672 if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
674 impCurStmtOffsSet(BAD_IL_OFFSET);
678 if (impLastILoffsStmt == nullptr)
680 impLastILoffsStmt = stmt;
691 /*****************************************************************************
693 * Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
696 inline void Compiler::impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore)
698 assert(stmt->gtOper == GT_STMT);
699 assert(stmtBefore->gtOper == GT_STMT);
701 GenTreePtr stmtPrev = stmtBefore->gtPrev;
702 stmt->gtPrev = stmtPrev;
703 stmt->gtNext = stmtBefore;
704 stmtPrev->gtNext = stmt;
705 stmtBefore->gtPrev = stmt;
708 /*****************************************************************************
710 * Append the given expression tree to the current block's tree list.
711 * Return the newly created statement.
714 GenTreePtr Compiler::impAppendTree(GenTreePtr tree, unsigned chkLevel, IL_OFFSETX offset)
718 /* Allocate an 'expression statement' node */
720 GenTreePtr expr = gtNewStmt(tree, offset);
722 /* Append the statement to the current block's stmt list */
724 impAppendStmt(expr, chkLevel);
729 /*****************************************************************************
731 * Insert the given exression tree before GT_STMT "stmtBefore"
734 void Compiler::impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore)
736 assert(stmtBefore->gtOper == GT_STMT);
738 /* Allocate an 'expression statement' node */
740 GenTreePtr expr = gtNewStmt(tree, offset);
742 /* Append the statement to the current block's stmt list */
744 impInsertStmtBefore(expr, stmtBefore);
747 /*****************************************************************************
749 * Append an assignment of the given value to a temp to the current tree list.
750 * curLevel is the stack level for which the spill to the temp is being done.
753 void Compiler::impAssignTempGen(unsigned tmp,
756 GenTreePtr* pAfterStmt, /* = NULL */
757 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
758 BasicBlock* block /* = NULL */
761 GenTreePtr asg = gtNewTempAssign(tmp, val);
763 if (!asg->IsNothingNode())
767 GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
768 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
772 impAppendTree(asg, curLevel, impCurStmtOffs);
777 /*****************************************************************************
778 * same as above, but handle the valueclass case too
781 void Compiler::impAssignTempGen(unsigned tmpNum,
783 CORINFO_CLASS_HANDLE structType,
785 GenTreePtr* pAfterStmt, /* = NULL */
786 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
787 BasicBlock* block /* = NULL */
792 if (varTypeIsStruct(val))
794 assert(tmpNum < lvaCount);
795 assert(structType != NO_CLASS_HANDLE);
797 // if the method is non-verifiable the assert is not true
798 // so at least ignore it in the case when verification is turned on
799 // since any block that tries to use the temp would have failed verification.
800 var_types varType = lvaTable[tmpNum].lvType;
801 assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
802 lvaSetStruct(tmpNum, structType, false);
804 // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
805 // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
806 // that has been passed in for the value being assigned to the temp, in which case we
807 // need to set 'val' to that same type.
808 // Note also that if we always normalized the types of any node that might be a struct
809 // type, this would not be necessary - but that requires additional JIT/EE interface
810 // calls that may not actually be required - e.g. if we only access a field of a struct.
812 val->gtType = lvaTable[tmpNum].lvType;
814 GenTreePtr dst = gtNewLclvNode(tmpNum, val->gtType);
815 asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
819 asg = gtNewTempAssign(tmpNum, val);
822 if (!asg->IsNothingNode())
826 GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
827 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
831 impAppendTree(asg, curLevel, impCurStmtOffs);
836 /*****************************************************************************
838 * Pop the given number of values from the stack and return a list node with
840 * The 'prefixTree' argument may optionally contain an argument
841 * list that is prepended to the list returned from this function.
843 * The notion of prepended is a bit misleading in that the list is backwards
844 * from the way I would expect: The first element popped is at the end of
845 * the returned list, and prefixTree is 'before' that, meaning closer to
846 * the end of the list. To get to prefixTree, you have to walk to the
849 * For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
850 * such we reverse its meaning such that returnValue has a reversed
851 * prefixTree at the head of the list.
854 GenTreeArgList* Compiler::impPopList(unsigned count,
856 CORINFO_SIG_INFO* sig,
857 GenTreeArgList* prefixTree)
859 assert(sig == nullptr || count == sig->numArgs);
862 CORINFO_CLASS_HANDLE structType;
863 GenTreeArgList* treeList;
865 if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
871 treeList = prefixTree;
876 StackEntry se = impPopStack();
877 typeInfo ti = se.seTypeInfo;
878 GenTreePtr temp = se.val;
880 if (varTypeIsStruct(temp))
882 // Morph trees that aren't already OBJs or MKREFANY to be OBJs
883 assert(ti.IsType(TI_STRUCT));
884 structType = ti.GetClassHandleForValueClass();
885 temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
888 /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
889 flags |= temp->gtFlags;
890 treeList = gtNewListNode(temp, treeList);
897 if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
898 sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
900 // Make sure that all valuetypes (including enums) that we push are loaded.
901 // This is to guarantee that if a GC is triggerred from the prestub of this methods,
902 // all valuetypes in the method signature are already loaded.
903 // We need to be able to find the size of the valuetypes, but we cannot
904 // do a class-load from within GC.
905 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
908 CORINFO_ARG_LIST_HANDLE argLst = sig->args;
909 CORINFO_CLASS_HANDLE argClass;
910 CORINFO_CLASS_HANDLE argRealClass;
911 GenTreeArgList* args;
914 for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
916 PREFIX_ASSUME(args != nullptr);
918 CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
920 // insert implied casts (from float to double or double to float)
922 if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
924 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
926 else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
928 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
931 // insert any widening or narrowing casts for backwards compatibility
933 args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
935 if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
936 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
938 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
939 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
941 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
943 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
945 args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
948 // Make sure that all valuetypes (including enums) that we push are loaded.
949 // This is to guarantee that if a GC is triggered from the prestub of this methods,
950 // all valuetypes in the method signature are already loaded.
951 // We need to be able to find the size of the valuetypes, but we cannot
952 // do a class-load from within GC.
953 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
956 argLst = info.compCompHnd->getArgNext(argLst);
960 if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
962 // Prepend the prefixTree
964 // Simple in-place reversal to place treeList
965 // at the end of a reversed prefixTree
966 while (prefixTree != nullptr)
968 GenTreeArgList* next = prefixTree->Rest();
969 prefixTree->Rest() = treeList;
970 treeList = prefixTree;
977 /*****************************************************************************
979 * Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
980 * The first "skipReverseCount" items are not reversed.
983 GenTreeArgList* Compiler::impPopRevList(unsigned count,
985 CORINFO_SIG_INFO* sig,
986 unsigned skipReverseCount)
989 assert(skipReverseCount <= count);
991 GenTreeArgList* list = impPopList(count, flagsPtr, sig);
994 if (list == nullptr || skipReverseCount == count)
999 GenTreeArgList* ptr = nullptr; // Initialized to the first node that needs to be reversed
1000 GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
1002 if (skipReverseCount == 0)
1008 lastSkipNode = list;
1009 // Get to the first node that needs to be reversed
1010 for (unsigned i = 0; i < skipReverseCount - 1; i++)
1012 lastSkipNode = lastSkipNode->Rest();
1015 PREFIX_ASSUME(lastSkipNode != nullptr);
1016 ptr = lastSkipNode->Rest();
1019 GenTreeArgList* reversedList = nullptr;
1023 GenTreeArgList* tmp = ptr->Rest();
1024 ptr->Rest() = reversedList;
1027 } while (ptr != nullptr);
1029 if (skipReverseCount)
1031 lastSkipNode->Rest() = reversedList;
1036 return reversedList;
1040 /*****************************************************************************
1041 Assign (copy) the structure from 'src' to 'dest'. The structure is a value
1042 class of type 'clsHnd'. It returns the tree that should be appended to the
1043 statement list that represents the assignment.
1044 Temp assignments may be appended to impTreeList if spilling is necessary.
1045 curLevel is the stack level for which a spill may be being done.
1048 GenTreePtr Compiler::impAssignStruct(GenTreePtr dest,
1050 CORINFO_CLASS_HANDLE structHnd,
1052 GenTreePtr* pAfterStmt, /* = NULL */
1053 BasicBlock* block /* = NULL */
1056 assert(varTypeIsStruct(dest));
1058 while (dest->gtOper == GT_COMMA)
1060 assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1062 // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1065 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1069 impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1072 // set dest to the second thing
1073 dest = dest->gtOp.gtOp2;
1076 assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1077 dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1079 if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1080 src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1083 return gtNewNothingNode();
1086 // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1087 // or re-creating a Blk node if it is.
1088 GenTreePtr destAddr;
1090 if (dest->gtOper == GT_IND || dest->OperIsBlk())
1092 destAddr = dest->gtOp.gtOp1;
1096 destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1099 return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1102 /*****************************************************************************/
1104 GenTreePtr Compiler::impAssignStructPtr(GenTreePtr destAddr,
1106 CORINFO_CLASS_HANDLE structHnd,
1108 GenTreePtr* pAfterStmt, /* = NULL */
1109 BasicBlock* block /* = NULL */
1113 GenTreePtr dest = nullptr;
1114 unsigned destFlags = 0;
1116 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1117 assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1118 // TODO-ARM-BUG: Does ARM need this?
1119 // TODO-ARM64-BUG: Does ARM64 need this?
1120 assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1121 src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1122 src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1123 (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1124 #else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1125 assert(varTypeIsStruct(src));
1127 assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1128 src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1129 src->gtOper == GT_COMMA ||
1130 (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1131 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1132 if (destAddr->OperGet() == GT_ADDR)
1134 GenTree* destNode = destAddr->gtGetOp1();
1135 // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1136 // will be morphed, don't insert an OBJ(ADDR).
1137 if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1138 #ifndef LEGACY_BACKEND
1139 || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1140 #endif // !LEGACY_BACKEND
1145 destType = destNode->TypeGet();
1149 destType = src->TypeGet();
1152 var_types asgType = src->TypeGet();
1154 if (src->gtOper == GT_CALL)
1156 if (src->AsCall()->TreatAsHasRetBufArg(this))
1158 // Case of call returning a struct via hidden retbuf arg
1160 // insert the return value buffer into the argument list as first byref parameter
1161 src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1163 // now returns void, not a struct
1164 src->gtType = TYP_VOID;
1166 // return the morphed call node
1171 // Case of call returning a struct in one or more registers.
1173 var_types returnType = (var_types)src->gtCall.gtReturnType;
1175 // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1176 src->gtType = genActualType(returnType);
1178 // First we try to change this to "LclVar/LclFld = call"
1180 if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1182 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1183 // That is, the IR will be of the form lclVar = call for multi-reg return
1185 GenTreePtr lcl = destAddr->gtOp.gtOp1;
1186 if (src->AsCall()->HasMultiRegRetVal())
1188 // Mark the struct LclVar as used in a MultiReg return context
1189 // which currently makes it non promotable.
1190 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1191 // handle multireg returns.
1192 lcl->gtFlags |= GTF_DONT_CSE;
1193 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1195 else // The call result is not a multireg return
1197 // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1198 lcl->ChangeOper(GT_LCL_FLD);
1199 fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1202 lcl->gtType = src->gtType;
1203 asgType = src->gtType;
1206 #if defined(_TARGET_ARM_)
1207 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1208 // but that method has not been updadted to include ARM.
1209 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1210 lcl->gtFlags |= GTF_DONT_CSE;
1211 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1212 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1213 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1215 // Make the struct non promotable. The eightbytes could contain multiple fields.
1216 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1217 // handle multireg returns.
1218 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1219 // non-multireg returns.
1220 lcl->gtFlags |= GTF_DONT_CSE;
1221 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1224 else // we don't have a GT_ADDR of a GT_LCL_VAR
1226 // !!! The destination could be on stack. !!!
1227 // This flag will let us choose the correct write barrier.
1228 asgType = returnType;
1229 destFlags = GTF_IND_TGTANYWHERE;
1233 else if (src->gtOper == GT_RET_EXPR)
1235 GenTreePtr call = src->gtRetExpr.gtInlineCandidate;
1236 noway_assert(call->gtOper == GT_CALL);
1238 if (call->AsCall()->HasRetBufArg())
1240 // insert the return value buffer into the argument list as first byref parameter
1241 call->gtCall.gtCallArgs = gtNewListNode(destAddr, call->gtCall.gtCallArgs);
1243 // now returns void, not a struct
1244 src->gtType = TYP_VOID;
1245 call->gtType = TYP_VOID;
1247 // We already have appended the write to 'dest' GT_CALL's args
1248 // So now we just return an empty node (pruning the GT_RET_EXPR)
1253 // Case of inline method returning a struct in one or more registers.
1255 var_types returnType = (var_types)call->gtCall.gtReturnType;
1257 // We won't need a return buffer
1258 asgType = returnType;
1259 src->gtType = genActualType(returnType);
1260 call->gtType = src->gtType;
1262 // If we've changed the type, and it no longer matches a local destination,
1263 // we must use an indirection.
1264 if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1269 // !!! The destination could be on stack. !!!
1270 // This flag will let us choose the correct write barrier.
1271 destFlags = GTF_IND_TGTANYWHERE;
1274 else if (src->OperIsBlk())
1276 asgType = impNormStructType(structHnd);
1277 if (src->gtOper == GT_OBJ)
1279 assert(src->gtObj.gtClass == structHnd);
1282 else if (src->gtOper == GT_INDEX)
1284 asgType = impNormStructType(structHnd);
1285 assert(src->gtIndex.gtStructElemClass == structHnd);
1287 else if (src->gtOper == GT_MKREFANY)
1289 // Since we are assigning the result of a GT_MKREFANY,
1290 // "destAddr" must point to a refany.
1292 GenTreePtr destAddrClone;
1294 impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1296 assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1297 assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1298 GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1299 GenTreePtr ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1300 GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1301 typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1302 GenTreePtr typeSlot =
1303 gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1305 // append the assign of the pointer value
1306 GenTreePtr asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1309 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1313 impAppendTree(asg, curLevel, impCurStmtOffs);
1316 // return the assign of the type value, to be appended
1317 return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1319 else if (src->gtOper == GT_COMMA)
1321 // The second thing is the struct or its address.
1322 assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1325 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1329 impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1332 // Evaluate the second thing using recursion.
1333 return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1335 else if (src->IsLocal())
1337 asgType = src->TypeGet();
1339 else if (asgType == TYP_STRUCT)
1341 asgType = impNormStructType(structHnd);
1342 src->gtType = asgType;
1343 #ifdef LEGACY_BACKEND
1344 if (asgType == TYP_STRUCT)
1346 GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1347 src = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1351 if (dest == nullptr)
1353 // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1354 // if this is a known struct type.
1355 if (asgType == TYP_STRUCT)
1357 dest = gtNewObjNode(structHnd, destAddr);
1358 gtSetObjGcInfo(dest->AsObj());
1359 // Although an obj as a call argument was always assumed to be a globRef
1360 // (which is itself overly conservative), that is not true of the operands
1361 // of a block assignment.
1362 dest->gtFlags &= ~GTF_GLOB_REF;
1363 dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1365 else if (varTypeIsStruct(asgType))
1367 dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1371 dest = gtNewOperNode(GT_IND, asgType, destAddr);
1376 dest->gtType = asgType;
1379 dest->gtFlags |= destFlags;
1380 destFlags = dest->gtFlags;
1382 // return an assignment node, to be appended
1383 GenTree* asgNode = gtNewAssignNode(dest, src);
1384 gtBlockOpInit(asgNode, dest, src, false);
1386 // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1388 if ((destFlags & GTF_DONT_CSE) == 0)
1390 dest->gtFlags &= ~(GTF_DONT_CSE);
1395 /*****************************************************************************
1396 Given a struct value, and the class handle for that structure, return
1397 the expression for the address for that structure value.
1399 willDeref - does the caller guarantee to dereference the pointer.
1402 GenTreePtr Compiler::impGetStructAddr(GenTreePtr structVal,
1403 CORINFO_CLASS_HANDLE structHnd,
1407 assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1409 var_types type = structVal->TypeGet();
1411 genTreeOps oper = structVal->gtOper;
1413 if (oper == GT_OBJ && willDeref)
1415 assert(structVal->gtObj.gtClass == structHnd);
1416 return (structVal->gtObj.Addr());
1418 else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
1420 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1422 impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1424 // The 'return value' is now the temp itself
1426 type = genActualType(lvaTable[tmpNum].TypeGet());
1427 GenTreePtr temp = gtNewLclvNode(tmpNum, type);
1428 temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1431 else if (oper == GT_COMMA)
1433 assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1435 GenTreePtr oldTreeLast = impTreeLast;
1436 structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1437 structVal->gtType = TYP_BYREF;
1439 if (oldTreeLast != impTreeLast)
1441 // Some temp assignment statement was placed on the statement list
1442 // for Op2, but that would be out of order with op1, so we need to
1443 // spill op1 onto the statement list after whatever was last
1444 // before we recursed on Op2 (i.e. before whatever Op2 appended).
1445 impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1446 structVal->gtOp.gtOp1 = gtNewNothingNode();
1452 return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1455 //------------------------------------------------------------------------
1456 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1457 // and optionally determine the GC layout of the struct.
1460 // structHnd - The class handle for the struct type of interest.
1461 // gcLayout - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1462 // into which the gcLayout will be written.
1463 // pNumGCVars - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1464 // which will be set to the number of GC fields in the struct.
1465 // pSimdBaseType - (optional, default nullptr) - if non-null, and the struct is a SIMD
1466 // type, set to the SIMD base type
1469 // The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1470 // The gcLayout will be returned using the pointers provided by the caller, if non-null.
1471 // It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1474 // The caller must set gcLayout to nullptr OR ensure that it is large enough
1475 // (see ICorStaticInfo::getClassGClayout in corinfo.h).
1478 // Normalizing the type involves examining the struct type to determine if it should
1479 // be modified to one that is handled specially by the JIT, possibly being a candidate
1480 // for full enregistration, e.g. TYP_SIMD16.
1482 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1484 unsigned* pNumGCVars,
1485 var_types* pSimdBaseType)
1487 assert(structHnd != NO_CLASS_HANDLE);
1489 const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1490 var_types structType = TYP_STRUCT;
1492 // On coreclr the check for GC includes a "may" to account for the special
1493 // ByRef like span structs. The added check for "CONTAINS_STACK_PTR" is the particular bit.
1494 // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1496 const bool mayContainGCPtrs =
1497 ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1500 // Check to see if this is a SIMD type.
1501 if (featureSIMD && !mayContainGCPtrs)
1503 unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1505 if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1507 unsigned int sizeBytes;
1508 var_types simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1509 if (simdBaseType != TYP_UNKNOWN)
1511 assert(sizeBytes == originalSize);
1512 structType = getSIMDTypeForSize(sizeBytes);
1513 if (pSimdBaseType != nullptr)
1515 *pSimdBaseType = simdBaseType;
1517 // Also indicate that we use floating point registers.
1518 compFloatingPointUsed = true;
1522 #endif // FEATURE_SIMD
1524 // Fetch GC layout info if requested
1525 if (gcLayout != nullptr)
1527 unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1529 // Verify that the quick test up above via the class attributes gave a
1530 // safe view of the type's GCness.
1532 // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1533 // does not report any gc fields.
1535 assert(mayContainGCPtrs || (numGCVars == 0));
1537 if (pNumGCVars != nullptr)
1539 *pNumGCVars = numGCVars;
1544 // Can't safely ask for number of GC pointers without also
1545 // asking for layout.
1546 assert(pNumGCVars == nullptr);
1552 //****************************************************************************
1553 // Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1554 // it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1556 GenTreePtr Compiler::impNormStructVal(GenTreePtr structVal,
1557 CORINFO_CLASS_HANDLE structHnd,
1559 bool forceNormalization /*=false*/)
1561 assert(forceNormalization || varTypeIsStruct(structVal));
1562 assert(structHnd != NO_CLASS_HANDLE);
1563 var_types structType = structVal->TypeGet();
1564 bool makeTemp = false;
1565 if (structType == TYP_STRUCT)
1567 structType = impNormStructType(structHnd);
1569 bool alreadyNormalized = false;
1570 GenTreeLclVarCommon* structLcl = nullptr;
1572 genTreeOps oper = structVal->OperGet();
1575 // GT_RETURN and GT_MKREFANY don't capture the handle.
1579 alreadyNormalized = true;
1583 structVal->gtCall.gtRetClsHnd = structHnd;
1588 structVal->gtRetExpr.gtRetClsHnd = structHnd;
1593 structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1597 // This will be transformed to an OBJ later.
1598 alreadyNormalized = true;
1599 structVal->gtIndex.gtStructElemClass = structHnd;
1600 structVal->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(structHnd);
1604 // Wrap it in a GT_OBJ.
1605 structVal->gtType = structType;
1606 structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1611 structLcl = structVal->AsLclVarCommon();
1612 // Wrap it in a GT_OBJ.
1613 structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1620 // These should already have the appropriate type.
1621 assert(structVal->gtType == structType);
1622 alreadyNormalized = true;
1626 assert(structVal->gtType == structType);
1627 structVal = gtNewObjNode(structHnd, structVal->gtGetOp1());
1628 alreadyNormalized = true;
1633 assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1635 #endif // FEATURE_SIMD
1639 // The second thing could either be a block node or a GT_SIMD or a GT_COMMA node.
1640 GenTree* blockNode = structVal->gtOp.gtOp2;
1641 assert(blockNode->gtType == structType);
1643 // Is this GT_COMMA(op1, GT_COMMA())?
1644 GenTree* parent = structVal;
1645 if (blockNode->OperGet() == GT_COMMA)
1647 // Find the last node in the comma chain.
1650 assert(blockNode->gtType == structType);
1652 blockNode = blockNode->gtOp.gtOp2;
1653 } while (blockNode->OperGet() == GT_COMMA);
1657 if (blockNode->OperGet() == GT_SIMD)
1659 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1660 alreadyNormalized = true;
1665 assert(blockNode->OperIsBlk());
1667 // Sink the GT_COMMA below the blockNode addr.
1668 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1669 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1671 // In case of a chained GT_COMMA case, we sink the last
1672 // GT_COMMA below the blockNode addr.
1673 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1674 assert(blockNodeAddr->gtType == TYP_BYREF);
1675 GenTree* commaNode = parent;
1676 commaNode->gtType = TYP_BYREF;
1677 commaNode->gtOp.gtOp2 = blockNodeAddr;
1678 blockNode->gtOp.gtOp1 = commaNode;
1679 if (parent == structVal)
1681 structVal = blockNode;
1683 alreadyNormalized = true;
1689 assert(!"Unexpected node in impNormStructVal()");
1692 structVal->gtType = structType;
1693 GenTree* structObj = structVal;
1695 if (!alreadyNormalized || forceNormalization)
1699 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1701 impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1703 // The structVal is now the temp itself
1705 structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1706 // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1707 structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1709 else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1711 // Wrap it in a GT_OBJ
1712 structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1716 if (structLcl != nullptr)
1718 // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1719 // so we don't set GTF_EXCEPT here.
1720 if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1722 structObj->gtFlags &= ~GTF_GLOB_REF;
1727 // In general a OBJ is an indirection and could raise an exception.
1728 structObj->gtFlags |= GTF_EXCEPT;
1733 /******************************************************************************/
1734 // Given a type token, generate code that will evaluate to the correct
1735 // handle representation of that token (type handle, field handle, or method handle)
1737 // For most cases, the handle is determined at compile-time, and the code
1738 // generated is simply an embedded handle.
1740 // Run-time lookup is required if the enclosing method is shared between instantiations
1741 // and the token refers to formal type parameters whose instantiation is not known
1744 GenTreePtr Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1745 BOOL* pRuntimeLookup /* = NULL */,
1746 BOOL mustRestoreHandle /* = FALSE */,
1747 BOOL importParent /* = FALSE */)
1749 assert(!fgGlobalMorph);
1751 CORINFO_GENERICHANDLE_RESULT embedInfo;
1752 info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1756 *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1759 if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1761 switch (embedInfo.handleType)
1763 case CORINFO_HANDLETYPE_CLASS:
1764 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1767 case CORINFO_HANDLETYPE_METHOD:
1768 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1771 case CORINFO_HANDLETYPE_FIELD:
1772 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1773 info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1781 return impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1782 embedInfo.compileTimeHandle);
1785 GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1786 CORINFO_LOOKUP* pLookup,
1787 unsigned handleFlags,
1788 void* compileTimeHandle)
1790 if (!pLookup->lookupKind.needsRuntimeLookup)
1792 // No runtime lookup is required.
1793 // Access is direct or memory-indirect (of a fixed address) reference
1795 CORINFO_GENERIC_HANDLE handle = nullptr;
1796 void* pIndirection = nullptr;
1797 assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1799 if (pLookup->constLookup.accessType == IAT_VALUE)
1801 handle = pLookup->constLookup.handle;
1803 else if (pLookup->constLookup.accessType == IAT_PVALUE)
1805 pIndirection = pLookup->constLookup.addr;
1807 return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1809 else if (compIsForInlining())
1811 // Don't import runtime lookups when inlining
1812 // Inlining has to be aborted in such a case
1813 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1818 // Need to use dictionary-based access which depends on the typeContext
1819 // which is only available at runtime, not at compile-time.
1821 return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1825 #ifdef FEATURE_READYTORUN_COMPILER
1826 GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1827 unsigned handleFlags,
1828 void* compileTimeHandle)
1830 CORINFO_GENERIC_HANDLE handle = nullptr;
1831 void* pIndirection = nullptr;
1832 assert(pLookup->accessType != IAT_PPVALUE);
1834 if (pLookup->accessType == IAT_VALUE)
1836 handle = pLookup->handle;
1838 else if (pLookup->accessType == IAT_PVALUE)
1840 pIndirection = pLookup->addr;
1842 return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1845 GenTreePtr Compiler::impReadyToRunHelperToTree(
1846 CORINFO_RESOLVED_TOKEN* pResolvedToken,
1847 CorInfoHelpFunc helper,
1849 GenTreeArgList* args /* =NULL*/,
1850 CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */)
1852 CORINFO_CONST_LOOKUP lookup;
1853 #if COR_JIT_EE_VERSION > 460
1854 if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1859 info.compCompHnd->getReadyToRunHelper(pResolvedToken, helper, &lookup);
1862 GenTreePtr op1 = gtNewHelperCallNode(helper, type, GTF_EXCEPT, args);
1864 op1->gtCall.setEntryPoint(lookup);
1870 GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1872 GenTreePtr op1 = nullptr;
1874 switch (pCallInfo->kind)
1877 op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1879 #ifdef FEATURE_READYTORUN_COMPILER
1880 if (opts.IsReadyToRun())
1882 op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
1883 op1->gtFptrVal.gtLdftnResolvedToken = new (this, CMK_Unknown) CORINFO_RESOLVED_TOKEN;
1884 *op1->gtFptrVal.gtLdftnResolvedToken = *pResolvedToken;
1888 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1893 case CORINFO_CALL_CODE_POINTER:
1894 if (compIsForInlining())
1896 // Don't import runtime lookups when inlining
1897 // Inlining has to be aborted in such a case
1898 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1902 op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1906 noway_assert(!"unknown call kind");
1913 //------------------------------------------------------------------------
1914 // getRuntimeContextTree: find pointer to context for runtime lookup.
1917 // kind - lookup kind.
1920 // Return GenTree pointer to generic shared context.
1923 // Reports about generic context using.
1925 GenTreePtr Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1927 GenTreePtr ctxTree = nullptr;
1929 // Collectible types requires that for shared generic code, if we use the generic context parameter
1930 // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1931 // context parameter is this that we don't need the eager reporting logic.)
1932 lvaGenericsContextUsed = true;
1934 if (kind == CORINFO_LOOKUP_THISOBJ)
1937 ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1939 // Vtable pointer of this object
1940 ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1941 ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1942 ctxTree->gtFlags |= GTF_IND_INVARIANT;
1946 assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1948 ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1953 /*****************************************************************************/
1954 /* Import a dictionary lookup to access a handle in code shared between
1955 generic instantiations.
1956 The lookup depends on the typeContext which is only available at
1957 runtime, and not at compile-time.
1958 pLookup->token1 and pLookup->token2 specify the handle that is needed.
1961 1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1962 instantiation-specific handle, and the tokens to lookup the handle.
1963 2. pLookup->indirections != CORINFO_USEHELPER :
1964 2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1966 2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1967 If it is non-NULL, it is the handle required. Else, call a helper
1968 to lookup the handle.
1971 GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1972 CORINFO_LOOKUP* pLookup,
1973 void* compileTimeHandle)
1976 // This method can only be called from the importer instance of the Compiler.
1977 // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1978 assert(!compIsForInlining());
1980 GenTreePtr ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1982 #ifdef FEATURE_READYTORUN_COMPILER
1983 if (opts.IsReadyToRun())
1985 return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1986 gtNewArgList(ctxTree), &pLookup->lookupKind);
1990 CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1991 // It's available only via the run-time helper function
1992 if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1994 GenTreeArgList* helperArgs =
1995 gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0,
1996 nullptr, compileTimeHandle));
1998 return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2002 GenTreePtr slotPtrTree = ctxTree;
2004 if (pRuntimeLookup->testForNull)
2006 slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2007 nullptr DEBUGARG("impRuntimeLookup slot"));
2010 // Applied repeated indirections
2011 for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
2015 slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2016 slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2017 slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
2019 if (pRuntimeLookup->offsets[i] != 0)
2022 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
2026 // No null test required
2027 if (!pRuntimeLookup->testForNull)
2029 if (pRuntimeLookup->indirections == 0)
2034 slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2035 slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2037 if (!pRuntimeLookup->testForFixup)
2042 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2044 GenTreePtr op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2045 nullptr DEBUGARG("impRuntimeLookup test"));
2046 op1 = impImplicitIorI4Cast(op1, TYP_INT); // downcast the pointer to a TYP_INT on 64-bit targets
2048 // Use a GT_AND to check for the lowest bit and indirect if it is set
2049 GenTreePtr testTree = gtNewOperNode(GT_AND, TYP_INT, op1, gtNewIconNode(1));
2050 GenTreePtr relop = gtNewOperNode(GT_EQ, TYP_INT, testTree, gtNewIconNode(0));
2051 relop->gtFlags |= GTF_RELOP_QMARK;
2053 op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2054 nullptr DEBUGARG("impRuntimeLookup indir"));
2055 op1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, op1, gtNewIconNode(-1, TYP_I_IMPL)); // subtract 1 from the pointer
2056 GenTreePtr indirTree = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
2057 GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, slotPtrTree, indirTree);
2059 GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2061 unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark0"));
2062 impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2063 return gtNewLclvNode(tmp, TYP_I_IMPL);
2066 assert(pRuntimeLookup->indirections != 0);
2068 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2070 // Extract the handle
2071 GenTreePtr handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2072 handle->gtFlags |= GTF_IND_NONFAULTING;
2074 GenTreePtr handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2075 nullptr DEBUGARG("impRuntimeLookup typehandle"));
2078 GenTreeArgList* helperArgs =
2079 gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0, nullptr,
2080 compileTimeHandle));
2081 GenTreePtr helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2083 // Check for null and possibly call helper
2084 GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2085 relop->gtFlags |= GTF_RELOP_QMARK;
2087 GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2088 gtNewNothingNode(), // do nothing if nonnull
2091 GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2094 if (handleCopy->IsLocal())
2096 tmp = handleCopy->gtLclVarCommon.gtLclNum;
2100 tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2103 impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2104 return gtNewLclvNode(tmp, TYP_I_IMPL);
2107 /******************************************************************************
2108 * Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2109 * If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2110 * else, grab a new temp.
2111 * For structs (which can be pushed on the stack using obj, etc),
2112 * special handling is needed
2115 struct RecursiveGuard
2120 m_pAddress = nullptr;
2127 *m_pAddress = false;
2131 void Init(bool* pAddress, bool bInitialize)
2133 assert(pAddress && *pAddress == false && "Recursive guard violation");
2134 m_pAddress = pAddress;
2146 bool Compiler::impSpillStackEntry(unsigned level,
2150 bool bAssertOnRecursion,
2157 RecursiveGuard guard;
2158 guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2161 GenTreePtr tree = verCurrentState.esStack[level].val;
2163 /* Allocate a temp if we haven't been asked to use a particular one */
2165 if (tiVerificationNeeded)
2167 // Ignore bad temp requests (they will happen with bad code and will be
2168 // catched when importing the destblock)
2169 if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2176 if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2182 if (tnum == BAD_VAR_NUM)
2184 tnum = lvaGrabTemp(true DEBUGARG(reason));
2186 else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2188 // if verification is needed and tnum's type is incompatible with
2189 // type on that stack, we grab a new temp. This is safe since
2190 // we will throw a verification exception in the dest block.
2192 var_types valTyp = tree->TypeGet();
2193 var_types dstTyp = lvaTable[tnum].TypeGet();
2195 // if the two types are different, we return. This will only happen with bad code and will
2196 // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2197 if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2199 #ifndef _TARGET_64BIT_
2200 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2201 #endif // !_TARGET_64BIT_
2202 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2204 if (verNeedsVerification())
2211 /* Assign the spilled entry to the temp */
2212 impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2214 // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2215 var_types type = genActualType(lvaTable[tnum].TypeGet());
2216 GenTreePtr temp = gtNewLclvNode(tnum, type);
2217 verCurrentState.esStack[level].val = temp;
2222 /*****************************************************************************
2224 * Ensure that the stack has only spilled values
2227 void Compiler::impSpillStackEnsure(bool spillLeaves)
2229 assert(!spillLeaves || opts.compDbgCode);
2231 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2233 GenTreePtr tree = verCurrentState.esStack[level].val;
2235 if (!spillLeaves && tree->OperIsLeaf())
2240 // Temps introduced by the importer itself don't need to be spilled
2242 bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2249 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2253 void Compiler::impSpillEvalStack()
2255 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2257 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2261 /*****************************************************************************
2263 * If the stack contains any trees with side effects in them, assign those
2264 * trees to temps and append the assignments to the statement list.
2265 * On return the stack is guaranteed to be empty.
2268 inline void Compiler::impEvalSideEffects()
2270 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2271 verCurrentState.esStackDepth = 0;
2274 /*****************************************************************************
2276 * If the stack contains any trees with side effects in them, assign those
2277 * trees to temps and replace them on the stack with refs to their temps.
2278 * [0..chkLevel) is the portion of the stack which will be checked and spilled.
2281 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2283 assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2285 /* Before we make any appends to the tree list we must spill the
2286 * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2288 impSpillSpecialSideEff();
2290 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2292 chkLevel = verCurrentState.esStackDepth;
2295 assert(chkLevel <= verCurrentState.esStackDepth);
2297 unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2299 for (unsigned i = 0; i < chkLevel; i++)
2301 GenTreePtr tree = verCurrentState.esStack[i].val;
2303 GenTreePtr lclVarTree;
2305 if ((tree->gtFlags & spillFlags) != 0 ||
2306 (spillGlobEffects && // Only consider the following when spillGlobEffects == TRUE
2307 !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2308 gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2309 // lvAddrTaken flag.
2311 impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2316 /*****************************************************************************
2318 * If the stack contains any trees with special side effects in them, assign
2319 * those trees to temps and replace them on the stack with refs to their temps.
2322 inline void Compiler::impSpillSpecialSideEff()
2324 // Only exception objects need to be carefully handled
2326 if (!compCurBB->bbCatchTyp)
2331 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2333 GenTreePtr tree = verCurrentState.esStack[level].val;
2334 // Make sure if we have an exception object in the sub tree we spill ourselves.
2335 if (gtHasCatchArg(tree))
2337 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2342 /*****************************************************************************
2344 * Spill all stack references to value classes (TYP_STRUCT nodes)
2347 void Compiler::impSpillValueClasses()
2349 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2351 GenTreePtr tree = verCurrentState.esStack[level].val;
2353 if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2355 // Tree walk was aborted, which means that we found a
2356 // value class on the stack. Need to spill that
2359 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2364 /*****************************************************************************
2366 * Callback that checks if a tree node is TYP_STRUCT
2369 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr* pTree, fgWalkData* data)
2371 fgWalkResult walkResult = WALK_CONTINUE;
2373 if ((*pTree)->gtType == TYP_STRUCT)
2375 // Abort the walk and indicate that we found a value class
2377 walkResult = WALK_ABORT;
2383 /*****************************************************************************
2385 * If the stack contains any trees with references to local #lclNum, assign
2386 * those trees to temps and replace their place on the stack with refs to
2390 void Compiler::impSpillLclRefs(ssize_t lclNum)
2392 /* Before we make any appends to the tree list we must spill the
2393 * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2395 impSpillSpecialSideEff();
2397 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2399 GenTreePtr tree = verCurrentState.esStack[level].val;
2401 /* If the tree may throw an exception, and the block has a handler,
2402 then we need to spill assignments to the local if the local is
2403 live on entry to the handler.
2404 Just spill 'em all without considering the liveness */
2406 bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2408 /* Skip the tree if it doesn't have an affected reference,
2409 unless xcptnCaught */
2411 if (xcptnCaught || gtHasRef(tree, lclNum, false))
2413 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2418 /*****************************************************************************
2420 * Push catch arg onto the stack.
2421 * If there are jumps to the beginning of the handler, insert basic block
2422 * and spill catch arg to a temp. Update the handler block if necessary.
2424 * Returns the basic block of the actual handler.
2427 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd)
2429 // Do not inject the basic block twice on reimport. This should be
2430 // hit only under JIT stress. See if the block is the one we injected.
2431 // Note that EH canonicalization can inject internal blocks here. We might
2432 // be able to re-use such a block (but we don't, right now).
2433 if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2434 (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2436 GenTreePtr tree = hndBlk->bbTreeList;
2438 if (tree != nullptr && tree->gtOper == GT_STMT)
2440 tree = tree->gtStmt.gtStmtExpr;
2441 assert(tree != nullptr);
2443 if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2444 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2446 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2448 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2450 return hndBlk->bbNext;
2454 // If we get here, it must have been some other kind of internal block. It's possible that
2455 // someone prepended something to our injected block, but that's unlikely.
2458 /* Push the exception address value on the stack */
2459 GenTreePtr arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2461 /* Mark the node as having a side-effect - i.e. cannot be
2462 * moved around since it is tied to a fixed location (EAX) */
2463 arg->gtFlags |= GTF_ORDER_SIDEEFF;
2465 /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2466 if (hndBlk->bbRefs > 1 || compStressCompile(STRESS_CATCH_ARG, 5))
2468 if (hndBlk->bbRefs == 1)
2473 /* Create extra basic block for the spill */
2474 BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2475 newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2476 newBlk->setBBWeight(hndBlk->bbWeight);
2477 newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2479 /* Account for the new link we are about to create */
2482 /* Spill into a temp */
2483 unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2484 lvaTable[tempNum].lvType = TYP_REF;
2485 arg = gtNewTempAssign(tempNum, arg);
2487 hndBlk->bbStkTempsIn = tempNum;
2489 /* Report the debug info. impImportBlockCode won't treat
2490 * the actual handler as exception block and thus won't do it for us. */
2491 if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2493 impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2494 arg = gtNewStmt(arg, impCurStmtOffs);
2497 fgInsertStmtAtEnd(newBlk, arg);
2499 arg = gtNewLclvNode(tempNum, TYP_REF);
2502 impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2507 /*****************************************************************************
2509 * Given a tree, clone it. *pClone is set to the cloned tree.
2510 * Returns the original tree if the cloning was easy,
2511 * else returns the temp to which the tree had to be spilled to.
2512 * If the tree has side-effects, it will be spilled to a temp.
2515 GenTreePtr Compiler::impCloneExpr(GenTreePtr tree,
2517 CORINFO_CLASS_HANDLE structHnd,
2519 GenTreePtr* pAfterStmt DEBUGARG(const char* reason))
2521 if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2523 GenTreePtr clone = gtClone(tree, true);
2532 /* Store the operand in a temp and return the temp */
2534 unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2536 // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2537 // return a struct type. It also may modify the struct type to a more
2538 // specialized type (e.g. a SIMD type). So we will get the type from
2539 // the lclVar AFTER calling impAssignTempGen().
2541 impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2542 var_types type = genActualType(lvaTable[temp].TypeGet());
2544 *pClone = gtNewLclvNode(temp, type);
2545 return gtNewLclvNode(temp, type);
2548 /*****************************************************************************
2549 * Remember the IL offset (including stack-empty info) for the trees we will
2553 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2555 if (compIsForInlining())
2557 GenTreePtr callStmt = impInlineInfo->iciStmt;
2558 assert(callStmt->gtOper == GT_STMT);
2559 impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2563 assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2564 IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2565 impCurStmtOffs = offs | stkBit;
2569 /*****************************************************************************
2570 * Returns current IL offset with stack-empty and call-instruction info incorporated
2572 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2574 if (compIsForInlining())
2576 return BAD_IL_OFFSET;
2580 assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2581 IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2582 IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2583 return offs | stkBit | callInstructionBit;
2587 /*****************************************************************************
2589 * Remember the instr offset for the statements
2591 * When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2592 * impCurOpcOffs, if the append was done because of a partial stack spill,
2593 * as some of the trees corresponding to code up to impCurOpcOffs might
2594 * still be sitting on the stack.
2595 * So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2596 * This should be called when an opcode finally/explicitly causes
2597 * impAppendTree(tree) to be called (as opposed to being called because of
2598 * a spill caused by the opcode)
2603 void Compiler::impNoteLastILoffs()
2605 if (impLastILoffsStmt == nullptr)
2607 // We should have added a statement for the current basic block
2608 // Is this assert correct ?
2610 assert(impTreeLast);
2611 assert(impTreeLast->gtOper == GT_STMT);
2613 impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2617 impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2618 impLastILoffsStmt = nullptr;
2624 /*****************************************************************************
2625 * We don't create any GenTree (excluding spills) for a branch.
2626 * For debugging info, we need a placeholder so that we can note
2627 * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2630 void Compiler::impNoteBranchOffs()
2632 if (opts.compDbgCode)
2634 impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2638 /*****************************************************************************
2639 * Locate the next stmt boundary for which we need to record info.
2640 * We will have to spill the stack at such boundaries if it is not
2642 * Returns the next stmt boundary (after the start of the block)
2645 unsigned Compiler::impInitBlockLineInfo()
2647 /* Assume the block does not correspond with any IL offset. This prevents
2648 us from reporting extra offsets. Extra mappings can cause confusing
2649 stepping, especially if the extra mapping is a jump-target, and the
2650 debugger does not ignore extra mappings, but instead rewinds to the
2651 nearest known offset */
2653 impCurStmtOffsSet(BAD_IL_OFFSET);
2655 if (compIsForInlining())
2660 IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2662 if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2664 impCurStmtOffsSet(blockOffs);
2667 if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2669 impCurStmtOffsSet(blockOffs);
2672 /* Always report IL offset 0 or some tests get confused.
2673 Probably a good idea anyways */
2677 impCurStmtOffsSet(blockOffs);
2680 if (!info.compStmtOffsetsCount)
2685 /* Find the lowest explicit stmt boundary within the block */
2687 /* Start looking at an entry that is based on our instr offset */
2689 unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2691 if (index >= info.compStmtOffsetsCount)
2693 index = info.compStmtOffsetsCount - 1;
2696 /* If we've guessed too far, back up */
2698 while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2703 /* If we guessed short, advance ahead */
2705 while (info.compStmtOffsets[index] < blockOffs)
2709 if (index == info.compStmtOffsetsCount)
2711 return info.compStmtOffsetsCount;
2715 assert(index < info.compStmtOffsetsCount);
2717 if (info.compStmtOffsets[index] == blockOffs)
2719 /* There is an explicit boundary for the start of this basic block.
2720 So we will start with bbCodeOffs. Else we will wait until we
2721 get to the next explicit boundary */
2723 impCurStmtOffsSet(blockOffs);
2731 /*****************************************************************************/
2733 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2747 /*****************************************************************************/
2749 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2766 /*****************************************************************************/
2768 // One might think it is worth caching these values, but results indicate
2770 // In addition, caching them causes SuperPMI to be unable to completely
2771 // encapsulate an individual method context.
2772 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2774 CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2775 assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2779 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2781 CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2782 assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2783 return typeHandleClass;
2786 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2788 CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2789 assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2790 return argIteratorClass;
2793 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2795 CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2796 assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2800 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2802 CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2803 assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2807 /*****************************************************************************
2808 * "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2809 * set its type to TYP_BYREF when we create it. We know if it can be
2810 * changed to TYP_I_IMPL only at the point where we use it
2814 void Compiler::impBashVarAddrsToI(GenTreePtr tree1, GenTreePtr tree2)
2816 if (tree1->IsVarAddr())
2818 tree1->gtType = TYP_I_IMPL;
2821 if (tree2 && tree2->IsVarAddr())
2823 tree2->gtType = TYP_I_IMPL;
2827 /*****************************************************************************
2828 * TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2829 * to make that an explicit cast in our trees, so any implicit casts that
2830 * exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2831 * turned into explicit casts here.
2832 * We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2835 GenTreePtr Compiler::impImplicitIorI4Cast(GenTreePtr tree, var_types dstTyp)
2837 var_types currType = genActualType(tree->gtType);
2838 var_types wantedType = genActualType(dstTyp);
2840 if (wantedType != currType)
2842 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2843 if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2845 if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2847 tree->gtType = TYP_I_IMPL;
2850 #ifdef _TARGET_64BIT_
2851 else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2853 // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2854 tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
2856 else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2858 // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2859 tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
2861 #endif // _TARGET_64BIT_
2867 /*****************************************************************************
2868 * TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2869 * but we want to make that an explicit cast in our trees, so any implicit casts
2870 * that exist in the IL are turned into explicit casts here.
2873 GenTreePtr Compiler::impImplicitR4orR8Cast(GenTreePtr tree, var_types dstTyp)
2875 #ifndef LEGACY_BACKEND
2876 if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2878 tree = gtNewCastNode(dstTyp, tree, dstTyp);
2880 #endif // !LEGACY_BACKEND
2885 //------------------------------------------------------------------------
2886 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2887 // with a GT_COPYBLK node.
2890 // sig - The InitializeArray signature.
2893 // A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2894 // nullptr otherwise.
2897 // The function recognizes the following IL pattern:
2898 // ldc <length> or a list of ldc <lower bound>/<length>
2901 // ldtoken <field handle>
2902 // call InitializeArray
2903 // The lower bounds need not be constant except when the array rank is 1.
2904 // The function recognizes all kinds of arrays thus enabling a small runtime
2905 // such as CoreRT to skip providing an implementation for InitializeArray.
2907 GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2909 assert(sig->numArgs == 2);
2911 GenTreePtr fieldTokenNode = impStackTop(0).val;
2912 GenTreePtr arrayLocalNode = impStackTop(1).val;
2915 // Verify that the field token is known and valid. Note that It's also
2916 // possible for the token to come from reflection, in which case we cannot do
2917 // the optimization and must therefore revert to calling the helper. You can
2918 // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2921 // Check to see if the ldtoken helper call is what we see here.
2922 if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2923 (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2928 // Strip helper call away
2929 fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2931 if (fieldTokenNode->gtOper == GT_IND)
2933 fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2936 // Check for constant
2937 if (fieldTokenNode->gtOper != GT_CNS_INT)
2942 CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2943 if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2949 // We need to get the number of elements in the array and the size of each element.
2950 // We verify that the newarr statement is exactly what we expect it to be.
2951 // If it's not then we just return NULL and we don't optimize this call
2955 // It is possible the we don't have any statements in the block yet
2957 if (impTreeLast->gtOper != GT_STMT)
2959 assert(impTreeLast->gtOper == GT_BEG_STMTS);
2964 // We start by looking at the last statement, making sure it's an assignment, and
2965 // that the target of the assignment is the array passed to InitializeArray.
2967 GenTreePtr arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
2968 if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
2969 (arrayLocalNode->gtOper != GT_LCL_VAR) ||
2970 (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
2976 // Make sure that the object being assigned is a helper call.
2979 GenTreePtr newArrayCall = arrayAssignment->gtOp.gtOp2;
2980 if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
2986 // Verify that it is one of the new array helpers.
2989 bool isMDArray = false;
2991 if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
2992 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
2993 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
2994 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
2995 #ifdef FEATURE_READYTORUN_COMPILER
2996 && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
3000 #if COR_JIT_EE_VERSION > 460
3001 if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3010 CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3013 // Make sure we found a compile time handle to the array
3022 S_UINT32 numElements;
3026 rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3033 GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3034 assert(tokenArg != nullptr);
3035 GenTreeArgList* numArgsArg = tokenArg->Rest();
3036 assert(numArgsArg != nullptr);
3037 GenTreeArgList* argsArg = numArgsArg->Rest();
3038 assert(argsArg != nullptr);
3041 // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3042 // so at least one length must be present and the rank can't exceed 32 so there can
3043 // be at most 64 arguments - 32 lengths and 32 lower bounds.
3046 if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3047 (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3052 unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3053 bool lowerBoundsSpecified;
3055 if (numArgs == rank * 2)
3057 lowerBoundsSpecified = true;
3059 else if (numArgs == rank)
3061 lowerBoundsSpecified = false;
3064 // If the rank is 1 and a lower bound isn't specified then the runtime creates
3065 // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3066 // we get a SDArray as well, see the for loop below.
3080 // The rank is known to be at least 1 so we can start with numElements being 1
3081 // to avoid the need to special case the first dimension.
3084 numElements = S_UINT32(1);
3088 static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3090 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3091 IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3094 static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3096 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3097 (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3098 IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3101 static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3103 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3104 (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3107 static bool IsComma(GenTree* tree)
3109 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3113 unsigned argIndex = 0;
3116 for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3118 if (lowerBoundsSpecified)
3121 // In general lower bounds can be ignored because they're not needed to
3122 // calculate the total number of elements. But for single dimensional arrays
3123 // we need to know if the lower bound is 0 because in this case the runtime
3124 // creates a SDArray and this affects the way the array data offset is calculated.
3129 GenTree* lowerBoundAssign = comma->gtGetOp1();
3130 assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3131 GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3133 if (lowerBoundNode->IsIntegralConst(0))
3139 comma = comma->gtGetOp2();
3143 GenTree* lengthNodeAssign = comma->gtGetOp1();
3144 assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3145 GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3147 if (!lengthNode->IsCnsIntOrI())
3152 numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3156 assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3158 if (argIndex != numArgs)
3166 // Make sure there are exactly two arguments: the array class and
3167 // the number of elements.
3170 GenTreePtr arrayLengthNode;
3172 GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3173 #ifdef FEATURE_READYTORUN_COMPILER
3174 if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3176 // Array length is 1st argument for readytorun helper
3177 arrayLengthNode = args->Current();
3182 // Array length is 2nd argument for regular helper
3183 arrayLengthNode = args->Rest()->Current();
3187 // Make sure that the number of elements look valid.
3189 if (arrayLengthNode->gtOper != GT_CNS_INT)
3194 numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3196 if (!info.compCompHnd->isSDArray(arrayClsHnd))
3202 CORINFO_CLASS_HANDLE elemClsHnd;
3203 var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3206 // Note that genTypeSize will return zero for non primitive types, which is exactly
3207 // what we want (size will then be 0, and we will catch this in the conditional below).
3208 // Note that we don't expect this to fail for valid binaries, so we assert in the
3209 // non-verification case (the verification case should not assert but rather correctly
3210 // handle bad binaries). This assert is not guarding any specific invariant, but rather
3211 // saying that we don't expect this to happen, and if it is hit, we need to investigate
3215 S_UINT32 elemSize(genTypeSize(elementType));
3216 S_UINT32 size = elemSize * S_UINT32(numElements);
3218 if (size.IsOverflow())
3223 if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3225 assert(verNeedsVerification());
3229 void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3236 // At this point we are ready to commit to implementing the InitializeArray
3237 // intrinsic using a struct assignment. Pop the arguments from the stack and
3238 // return the struct assignment node.
3244 const unsigned blkSize = size.Value();
3249 unsigned dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3251 dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3255 dst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewIndexRef(elementType, arrayLocalNode, gtNewIconNode(0)));
3257 GenTreePtr blk = gtNewBlockVal(dst, blkSize);
3258 GenTreePtr srcAddr = gtNewIconHandleNode((size_t)initData, GTF_ICON_STATIC_HDL);
3259 GenTreePtr src = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
3261 return gtNewBlkOpNode(blk, // dst
3268 /*****************************************************************************/
3269 // Returns the GenTree that should be used to do the intrinsic instead of the call.
3270 // Returns NULL if an intrinsic cannot be used
3272 GenTreePtr Compiler::impIntrinsic(GenTreePtr newobjThis,
3273 CORINFO_CLASS_HANDLE clsHnd,
3274 CORINFO_METHOD_HANDLE method,
3275 CORINFO_SIG_INFO* sig,
3279 CorInfoIntrinsics* pIntrinsicID)
3281 bool mustExpand = false;
3282 #if COR_JIT_EE_VERSION > 460
3283 CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3285 CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method);
3287 *pIntrinsicID = intrinsicID;
3289 #ifndef _TARGET_ARM_
3290 genTreeOps interlockedOperator;
3293 if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3295 // must be done regardless of DbgCode and MinOpts
3296 return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3298 #ifdef _TARGET_64BIT_
3299 if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3301 // must be done regardless of DbgCode and MinOpts
3302 return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3305 assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3308 GenTreePtr retNode = nullptr;
3311 // We disable the inlining of instrinsics for MinOpts.
3313 if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3315 *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3319 // Currently we don't have CORINFO_INTRINSIC_Exp because it does not
3320 // seem to work properly for Infinity values, we don't do
3321 // CORINFO_INTRINSIC_Pow because it needs a Helper which we currently don't have
3323 var_types callType = JITtype2varType(sig->retType);
3325 /* First do the intrinsics which are always smaller than a call */
3327 switch (intrinsicID)
3329 GenTreePtr op1, op2;
3331 case CORINFO_INTRINSIC_Sin:
3332 case CORINFO_INTRINSIC_Sqrt:
3333 case CORINFO_INTRINSIC_Abs:
3334 case CORINFO_INTRINSIC_Cos:
3335 case CORINFO_INTRINSIC_Round:
3336 case CORINFO_INTRINSIC_Cosh:
3337 case CORINFO_INTRINSIC_Sinh:
3338 case CORINFO_INTRINSIC_Tan:
3339 case CORINFO_INTRINSIC_Tanh:
3340 case CORINFO_INTRINSIC_Asin:
3341 case CORINFO_INTRINSIC_Acos:
3342 case CORINFO_INTRINSIC_Atan:
3343 case CORINFO_INTRINSIC_Atan2:
3344 case CORINFO_INTRINSIC_Log10:
3345 case CORINFO_INTRINSIC_Pow:
3346 case CORINFO_INTRINSIC_Exp:
3347 case CORINFO_INTRINSIC_Ceiling:
3348 case CORINFO_INTRINSIC_Floor:
3350 // These are math intrinsics
3352 assert(callType != TYP_STRUCT);
3356 #if defined(LEGACY_BACKEND)
3357 if (IsTargetIntrinsic(intrinsicID))
3358 #elif !defined(_TARGET_X86_)
3359 // Intrinsics that are not implemented directly by target instructions will
3360 // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3361 // don't do this optimization, because
3362 // a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3363 // b) It will be non-trivial task or too late to re-materialize a surviving
3364 // tail prefixed GT_INTRINSIC as tail call in rationalizer.
3365 if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3367 // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3368 // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3369 // code generation for certain EH constructs.
3370 if (!IsIntrinsicImplementedByUserCall(intrinsicID))
3373 switch (sig->numArgs)
3376 op1 = impPopStack().val;
3378 #if FEATURE_X87_DOUBLES
3380 // X87 stack doesn't differentiate between float/double
3381 // so it doesn't need a cast, but everybody else does
3382 // Just double check it is at least a FP type
3383 noway_assert(varTypeIsFloating(op1));
3385 #else // FEATURE_X87_DOUBLES
3387 if (op1->TypeGet() != callType)
3389 op1 = gtNewCastNode(callType, op1, callType);
3392 #endif // FEATURE_X87_DOUBLES
3394 op1 = new (this, GT_INTRINSIC)
3395 GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3399 op2 = impPopStack().val;
3400 op1 = impPopStack().val;
3402 #if FEATURE_X87_DOUBLES
3404 // X87 stack doesn't differentiate between float/double
3405 // so it doesn't need a cast, but everybody else does
3406 // Just double check it is at least a FP type
3407 noway_assert(varTypeIsFloating(op2));
3408 noway_assert(varTypeIsFloating(op1));
3410 #else // FEATURE_X87_DOUBLES
3412 if (op2->TypeGet() != callType)
3414 op2 = gtNewCastNode(callType, op2, callType);
3416 if (op1->TypeGet() != callType)
3418 op1 = gtNewCastNode(callType, op1, callType);
3421 #endif // FEATURE_X87_DOUBLES
3423 op1 = new (this, GT_INTRINSIC)
3424 GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
3428 NO_WAY("Unsupported number of args for Math Instrinsic");
3431 #ifndef LEGACY_BACKEND
3432 if (IsIntrinsicImplementedByUserCall(intrinsicID))
3434 op1->gtFlags |= GTF_CALL;
3442 #ifdef _TARGET_XARCH_
3443 // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3444 case CORINFO_INTRINSIC_InterlockedAdd32:
3445 interlockedOperator = GT_LOCKADD;
3446 goto InterlockedBinOpCommon;
3447 case CORINFO_INTRINSIC_InterlockedXAdd32:
3448 interlockedOperator = GT_XADD;
3449 goto InterlockedBinOpCommon;
3450 case CORINFO_INTRINSIC_InterlockedXchg32:
3451 interlockedOperator = GT_XCHG;
3452 goto InterlockedBinOpCommon;
3454 #ifdef _TARGET_AMD64_
3455 case CORINFO_INTRINSIC_InterlockedAdd64:
3456 interlockedOperator = GT_LOCKADD;
3457 goto InterlockedBinOpCommon;
3458 case CORINFO_INTRINSIC_InterlockedXAdd64:
3459 interlockedOperator = GT_XADD;
3460 goto InterlockedBinOpCommon;
3461 case CORINFO_INTRINSIC_InterlockedXchg64:
3462 interlockedOperator = GT_XCHG;
3463 goto InterlockedBinOpCommon;
3464 #endif // _TARGET_AMD64_
3466 InterlockedBinOpCommon:
3467 assert(callType != TYP_STRUCT);
3468 assert(sig->numArgs == 2);
3470 op2 = impPopStack().val;
3471 op1 = impPopStack().val;
3477 // field (for example)
3479 // In the case where the first argument is the address of a local, we might
3480 // want to make this *not* make the var address-taken -- but atomic instructions
3481 // on a local are probably pretty useless anyway, so we probably don't care.
3483 op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3484 op1->gtFlags |= GTF_GLOB_EFFECT;
3487 #endif // _TARGET_XARCH_
3489 case CORINFO_INTRINSIC_MemoryBarrier:
3491 assert(sig->numArgs == 0);
3493 op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3494 op1->gtFlags |= GTF_GLOB_EFFECT;
3498 #ifdef _TARGET_XARCH_
3499 // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3500 case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3501 #ifdef _TARGET_AMD64_
3502 case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3505 assert(callType != TYP_STRUCT);
3506 assert(sig->numArgs == 3);
3509 op3 = impPopStack().val; // comparand
3510 op2 = impPopStack().val; // value
3511 op1 = impPopStack().val; // location
3513 GenTreePtr node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3515 node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3521 case CORINFO_INTRINSIC_StringLength:
3522 op1 = impPopStack().val;
3523 if (!opts.MinOpts() && !opts.compDbgCode)
3525 GenTreeArrLen* arrLen =
3526 new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3531 /* Create the expression "*(str_addr + stringLengthOffset)" */
3532 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3533 gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3534 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3539 case CORINFO_INTRINSIC_StringGetChar:
3540 op2 = impPopStack().val;
3541 op1 = impPopStack().val;
3542 op1 = gtNewIndexRef(TYP_CHAR, op1, op2);
3543 op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3547 case CORINFO_INTRINSIC_InitializeArray:
3548 retNode = impInitializeArrayIntrinsic(sig);
3551 case CORINFO_INTRINSIC_Array_Address:
3552 case CORINFO_INTRINSIC_Array_Get:
3553 case CORINFO_INTRINSIC_Array_Set:
3554 retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3557 case CORINFO_INTRINSIC_GetTypeFromHandle:
3558 op1 = impStackTop(0).val;
3559 if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3560 gtIsTypeHandleToRuntimeTypeHelper(op1))
3562 op1 = impPopStack().val;
3563 // Change call to return RuntimeType directly.
3564 op1->gtType = TYP_REF;
3567 // Call the regular function.
3570 case CORINFO_INTRINSIC_RTH_GetValueInternal:
3571 op1 = impStackTop(0).val;
3572 if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3573 gtIsTypeHandleToRuntimeTypeHelper(op1))
3576 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3579 // TreeToGetNativeTypeHandle
3581 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3584 op1 = impPopStack().val;
3586 // Get native TypeHandle argument to old helper
3587 op1 = op1->gtCall.gtCallArgs;
3588 assert(op1->OperIsList());
3589 assert(op1->gtOp.gtOp2 == nullptr);
3590 op1 = op1->gtOp.gtOp1;
3593 // Call the regular function.
3596 #ifndef LEGACY_BACKEND
3597 case CORINFO_INTRINSIC_Object_GetType:
3599 op1 = impPopStack().val;
3600 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3602 // Set the CALL flag to indicate that the operator is implemented by a call.
3603 // Set also the EXCEPTION flag because the native implementation of
3604 // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3605 op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3609 // Implement ByReference Ctor. This wraps the assignment of the ref into a byref-like field
3610 // in a value type. The canonical example of this is Span<T>. In effect this is just a
3611 // substitution. The parameter byref will be assigned into the newly allocated object.
3612 case CORINFO_INTRINSIC_ByReference_Ctor:
3614 // Remove call to constructor and directly assign the byref passed
3615 // to the call to the first slot of the ByReference struct.
3616 op1 = impPopStack().val;
3617 GenTreePtr thisptr = newobjThis;
3618 CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3619 GenTreePtr field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0, false);
3620 GenTreePtr assign = gtNewAssignNode(field, op1);
3621 GenTreePtr byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3622 assert(byReferenceStruct != nullptr);
3623 impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3627 // Implement ptr value getter for ByReference struct.
3628 case CORINFO_INTRINSIC_ByReference_Value:
3630 op1 = impPopStack().val;
3631 CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3632 GenTreePtr field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0, false);
3637 /* Unknown intrinsic */
3643 if (retNode == nullptr)
3645 NO_WAY("JIT must expand the intrinsic!");
3652 /*****************************************************************************/
3654 GenTreePtr Compiler::impArrayAccessIntrinsic(
3655 CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
3657 /* If we are generating SMALL_CODE, we don't want to use intrinsics for
3658 the following, as it generates fatter code.
3661 if (compCodeOpt() == SMALL_CODE)
3666 /* These intrinsics generate fatter (but faster) code and are only
3667 done if we don't need SMALL_CODE */
3669 unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
3671 // The rank 1 case is special because it has to handle two array formats
3672 // we will simply not do that case
3673 if (rank > GT_ARR_MAX_RANK || rank <= 1)
3678 CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
3679 var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
3681 // For the ref case, we will only be able to inline if the types match
3682 // (verifier checks for this, we don't care for the nonverified case and the
3683 // type is final (so we don't need to do the cast)
3684 if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
3686 // Get the call site signature
3687 CORINFO_SIG_INFO LocalSig;
3688 eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
3689 assert(LocalSig.hasThis());
3691 CORINFO_CLASS_HANDLE actualElemClsHnd;
3693 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3695 // Fetch the last argument, the one that indicates the type we are setting.
3696 CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
3697 for (unsigned r = 0; r < rank; r++)
3699 argType = info.compCompHnd->getArgNext(argType);
3702 typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
3703 actualElemClsHnd = argInfo.GetClassHandle();
3707 assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
3709 // Fetch the return type
3710 typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
3711 assert(retInfo.IsByRef());
3712 actualElemClsHnd = retInfo.GetClassHandle();
3715 // if it's not final, we can't do the optimization
3716 if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
3722 unsigned arrayElemSize;
3723 if (elemType == TYP_STRUCT)
3725 assert(arrElemClsHnd);
3727 arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
3731 arrayElemSize = genTypeSize(elemType);
3734 if ((unsigned char)arrayElemSize != arrayElemSize)
3736 // arrayElemSize would be truncated as an unsigned char.
3737 // This means the array element is too large. Don't do the optimization.
3741 GenTreePtr val = nullptr;
3743 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3745 // Assignment of a struct is more work, and there are more gets than sets.
3746 if (elemType == TYP_STRUCT)
3751 val = impPopStack().val;
3752 assert(genActualType(elemType) == genActualType(val->gtType) ||
3753 (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
3754 (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
3755 (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
3758 noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
3760 GenTreePtr inds[GT_ARR_MAX_RANK];
3761 for (unsigned k = rank; k > 0; k--)
3763 inds[k - 1] = impPopStack().val;
3766 GenTreePtr arr = impPopStack().val;
3767 assert(arr->gtType == TYP_REF);
3769 GenTreePtr arrElem =
3770 new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
3771 static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
3773 if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
3775 arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
3778 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3780 assert(val != nullptr);
3781 return gtNewAssignNode(arrElem, val);
3789 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
3793 // do some basic checks first
3794 if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
3799 if (verCurrentState.esStackDepth > 0)
3801 // merge stack types
3802 StackEntry* parentStack = block->bbStackOnEntry();
3803 StackEntry* childStack = verCurrentState.esStack;
3805 for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
3807 if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
3814 // merge initialization status of this ptr
3816 if (verTrackObjCtorInitState)
3818 // If we're tracking the CtorInitState, then it must not be unknown in the current state.
3819 assert(verCurrentState.thisInitialized != TIS_Bottom);
3821 // If the successor block's thisInit state is unknown, copy it from the current state.
3822 if (block->bbThisOnEntry() == TIS_Bottom)
3825 verSetThisInit(block, verCurrentState.thisInitialized);
3827 else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
3829 if (block->bbThisOnEntry() != TIS_Top)
3832 verSetThisInit(block, TIS_Top);
3834 if (block->bbFlags & BBF_FAILED_VERIFICATION)
3836 // The block is bad. Control can flow through the block to any handler that catches the
3837 // verification exception, but the importer ignores bad blocks and therefore won't model
3838 // this flow in the normal way. To complete the merge into the bad block, the new state
3839 // needs to be manually pushed to the handlers that may be reached after the verification
3840 // exception occurs.
3842 // Usually, the new state was already propagated to the relevant handlers while processing
3843 // the predecessors of the bad block. The exception is when the bad block is at the start
3844 // of a try region, meaning it is protected by additional handlers that do not protect its
3847 if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
3849 // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
3850 // recursive calls back into this code path (if successors of the current bad block are
3851 // also bad blocks).
3853 ThisInitState origTIS = verCurrentState.thisInitialized;
3854 verCurrentState.thisInitialized = TIS_Top;
3855 impVerifyEHBlock(block, true);
3856 verCurrentState.thisInitialized = origTIS;
3864 assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
3870 /*****************************************************************************
3871 * 'logMsg' is true if a log message needs to be logged. false if the caller has
3872 * already logged it (presumably in a more detailed fashion than done here)
3873 * 'bVerificationException' is true for a verification exception, false for a
3874 * "call unauthorized by host" exception.
3877 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
3879 block->bbJumpKind = BBJ_THROW;
3880 block->bbFlags |= BBF_FAILED_VERIFICATION;
3882 impCurStmtOffsSet(block->bbCodeOffs);
3885 // we need this since BeginTreeList asserts otherwise
3886 impTreeList = impTreeLast = nullptr;
3887 block->bbFlags &= ~BBF_IMPORTED;
3891 JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
3892 block->bbCodeOffs, block->bbCodeOffsEnd));
3895 printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
3899 if (JitConfig.DebugBreakOnVerificationFailure())
3907 // if the stack is non-empty evaluate all the side-effects
3908 if (verCurrentState.esStackDepth > 0)
3910 impEvalSideEffects();
3912 assert(verCurrentState.esStackDepth == 0);
3914 GenTreePtr op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, GTF_EXCEPT,
3915 gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
3916 // verCurrentState.esStackDepth = 0;
3917 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
3919 // The inliner is not able to handle methods that require throw block, so
3920 // make sure this methods never gets inlined.
3921 info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
3924 /*****************************************************************************
3927 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
3930 // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
3931 // slightly different mechanism in which it calls the JIT to perform IL verification:
3932 // in the case of transparent methods the VM calls for a predicate IsVerifiable()
3933 // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
3934 // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
3935 // it bubble up until reported by the runtime. Currently in RyuJIT, this method doesn't bubble
3936 // up the exception, instead it embeds a throw inside the offending basic block and lets this
3937 // to fail upon runtime of the jitted method.
3939 // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
3940 // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
3941 // just try to find out whether to fail this method before even actually jitting it. So, in case
3942 // we detect these two conditions, instead of generating a throw statement inside the offending
3943 // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
3944 // to return false and make RyuJIT behave the same way JIT64 does.
3946 // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
3947 // RyuJIT for the time being until we completely replace JIT64.
3948 // TODO-ARM64-Cleanup: We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
3950 // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
3951 // exception if we are only importing and verifying. The method verNeedsVerification() can also modify the
3952 // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
3953 // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
3954 // be turned off during importation).
3955 CLANG_FORMAT_COMMENT_ANCHOR;
3957 #ifdef _TARGET_64BIT_
3960 bool canSkipVerificationResult =
3961 info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
3962 assert(tiVerificationNeeded || canSkipVerificationResult);
3965 // Add the non verifiable flag to the compiler
3966 if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
3968 tiIsVerifiableCode = FALSE;
3970 #endif //_TARGET_64BIT_
3971 verResetCurrentState(block, &verCurrentState);
3972 verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
3975 impNoteLastILoffs(); // Remember at which BC offset the tree was finished
3979 /******************************************************************************/
3980 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
3982 assert(ciType < CORINFO_TYPE_COUNT);
3987 case CORINFO_TYPE_STRING:
3988 case CORINFO_TYPE_CLASS:
3989 tiResult = verMakeTypeInfo(clsHnd);
3990 if (!tiResult.IsType(TI_REF))
3991 { // type must be consistent with element type
3996 #ifdef _TARGET_64BIT_
3997 case CORINFO_TYPE_NATIVEINT:
3998 case CORINFO_TYPE_NATIVEUINT:
4001 // If we have more precise information, use it
4002 return verMakeTypeInfo(clsHnd);
4006 return typeInfo::nativeInt();
4009 #endif // _TARGET_64BIT_
4011 case CORINFO_TYPE_VALUECLASS:
4012 case CORINFO_TYPE_REFANY:
4013 tiResult = verMakeTypeInfo(clsHnd);
4014 // type must be constant with element type;
4015 if (!tiResult.IsValueClass())
4020 case CORINFO_TYPE_VAR:
4021 return verMakeTypeInfo(clsHnd);
4023 case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4024 case CORINFO_TYPE_VOID:
4028 case CORINFO_TYPE_BYREF:
4030 CORINFO_CLASS_HANDLE childClassHandle;
4031 CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4032 return ByRef(verMakeTypeInfo(childType, childClassHandle));
4038 { // If we have more precise information, use it
4039 return typeInfo(TI_STRUCT, clsHnd);
4043 return typeInfo(JITtype2tiType(ciType));
4049 /******************************************************************************/
4051 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4053 if (clsHnd == nullptr)
4058 // Byrefs should only occur in method and local signatures, which are accessed
4059 // using ICorClassInfo and ICorClassInfo.getChildType.
4060 // So findClass() and getClassAttribs() should not be called for byrefs
4062 if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4064 assert(!"Did findClass() return a Byref?");
4068 unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4070 if (attribs & CORINFO_FLG_VALUECLASS)
4072 CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4074 // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4075 // not occur here, so we may want to change this to an assert instead.
4076 if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4081 #ifdef _TARGET_64BIT_
4082 if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4084 return typeInfo::nativeInt();
4086 #endif // _TARGET_64BIT_
4088 if (t != CORINFO_TYPE_UNDEF)
4090 return (typeInfo(JITtype2tiType(t)));
4092 else if (bashStructToRef)
4094 return (typeInfo(TI_REF, clsHnd));
4098 return (typeInfo(TI_STRUCT, clsHnd));
4101 else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4103 // See comment in _typeInfo.h for why we do it this way.
4104 return (typeInfo(TI_REF, clsHnd, true));
4108 return (typeInfo(TI_REF, clsHnd));
4112 /******************************************************************************/
4113 BOOL Compiler::verIsSDArray(typeInfo ti)
4115 if (ti.IsNullObjRef())
4116 { // nulls are SD arrays
4120 if (!ti.IsType(TI_REF))
4125 if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4132 /******************************************************************************/
4133 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4134 /* Returns an error type if anything goes wrong */
4136 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4138 assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4140 if (!verIsSDArray(arrayObjectType))
4145 CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4146 CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4148 return verMakeTypeInfo(ciType, childClassHandle);
4151 /*****************************************************************************
4153 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4155 CORINFO_CLASS_HANDLE classHandle;
4156 CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4158 var_types type = JITtype2varType(ciType);
4159 if (varTypeIsGC(type))
4161 // For efficiency, getArgType only returns something in classHandle for
4162 // value types. For other types that have addition type info, you
4163 // have to call back explicitly
4164 classHandle = info.compCompHnd->getArgClass(sig, args);
4167 NO_WAY("Could not figure out Class specified in argument or local signature");
4171 return verMakeTypeInfo(ciType, classHandle);
4174 /*****************************************************************************/
4176 // This does the expensive check to figure out whether the method
4177 // needs to be verified. It is called only when we fail verification,
4178 // just before throwing the verification exception.
4180 BOOL Compiler::verNeedsVerification()
4182 // If we have previously determined that verification is NOT needed
4183 // (for example in Compiler::compCompile), that means verification is really not needed.
4184 // Return the same decision we made before.
4185 // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4187 if (!tiVerificationNeeded)
4189 return tiVerificationNeeded;
4192 assert(tiVerificationNeeded);
4194 // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4195 // obtain the answer.
4196 CorInfoCanSkipVerificationResult canSkipVerificationResult =
4197 info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4199 // canSkipVerification will return one of the following three values:
4200 // CORINFO_VERIFICATION_CANNOT_SKIP = 0, // Cannot skip verification during jit time.
4201 // CORINFO_VERIFICATION_CAN_SKIP = 1, // Can skip verification during jit time.
4202 // CORINFO_VERIFICATION_RUNTIME_CHECK = 2, // Skip verification during jit time,
4203 // but need to insert a callout to the VM to ask during runtime
4204 // whether to skip verification or not.
4206 // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4207 if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4209 tiRuntimeCalloutNeeded = true;
4212 if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4214 // Dev10 706080 - Testers don't like the assert, so just silence it
4215 // by not using the macros that invoke debugAssert.
4219 // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4220 // The following line means we will NOT do jit time verification if canSkipVerification
4221 // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4222 tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4223 return tiVerificationNeeded;
4226 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4232 if (!ti.IsType(TI_STRUCT))
4236 return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4239 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4241 if (ti.IsPermanentHomeByRef())
4251 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4253 return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4254 || ti.IsUnboxedGenericTypeVar() ||
4255 (ti.IsType(TI_STRUCT) &&
4256 // exclude byreflike structs
4257 !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4260 // Is it a boxed value type?
4261 bool Compiler::verIsBoxedValueType(typeInfo ti)
4263 if (ti.GetType() == TI_REF)
4265 CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4266 return !!eeIsValueClass(clsHnd);
4274 /*****************************************************************************
4276 * Check if a TailCall is legal.
4279 bool Compiler::verCheckTailCallConstraint(
4281 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4282 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4283 bool speculative // If true, won't throw if verificatoin fails. Instead it will
4284 // return false to the caller.
4285 // If false, it will throw.
4289 CORINFO_SIG_INFO sig;
4290 unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
4291 // this counter is used to keep track of how many items have been
4294 CORINFO_METHOD_HANDLE methodHnd = nullptr;
4295 CORINFO_CLASS_HANDLE methodClassHnd = nullptr;
4296 unsigned methodClassFlgs = 0;
4298 assert(impOpcodeIsCallOpcode(opcode));
4300 if (compIsForInlining())
4305 // for calli, VerifyOrReturn that this is not a virtual method
4306 if (opcode == CEE_CALLI)
4308 /* Get the call sig */
4309 eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4311 // We don't know the target method, so we have to infer the flags, or
4312 // assume the worst-case.
4313 mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4317 methodHnd = pResolvedToken->hMethod;
4319 mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4321 // When verifying generic code we pair the method handle with its
4322 // owning class to get the exact method signature.
4323 methodClassHnd = pResolvedToken->hClass;
4324 assert(methodClassHnd);
4326 eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4328 // opcode specific check
4329 methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4332 // We must have got the methodClassHnd if opcode is not CEE_CALLI
4333 assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4335 if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4337 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4340 // check compatibility of the arguments
4341 unsigned int argCount;
4342 argCount = sig.numArgs;
4343 CORINFO_ARG_LIST_HANDLE args;
4347 typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4349 // check that the argument is not a byref for tailcalls
4350 VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4352 // For unsafe code, we might have parameters containing pointer to the stack location.
4353 // Disallow the tailcall for this kind.
4354 CORINFO_CLASS_HANDLE classHandle;
4355 CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4356 VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4358 args = info.compCompHnd->getArgNext(args);
4362 popCount += sig.numArgs;
4364 // check for 'this' which is on non-static methods, not called via NEWOBJ
4365 if (!(mflags & CORINFO_FLG_STATIC))
4367 // Always update the popCount.
4368 // This is crucial for the stack calculation to be correct.
4369 typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4372 if (opcode == CEE_CALLI)
4374 // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4376 if (tiThis.IsValueClass())
4380 VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4384 // Check type compatibility of the this argument
4385 typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4386 if (tiDeclaredThis.IsValueClass())
4388 tiDeclaredThis.MakeByRef();
4391 VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4395 // Tail calls on constrained calls should be illegal too:
4396 // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4397 VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4399 // Get the exact view of the signature for an array method
4400 if (sig.retType != CORINFO_TYPE_VOID)
4402 if (methodClassFlgs & CORINFO_FLG_ARRAY)
4404 assert(opcode != CEE_CALLI);
4405 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4409 typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4410 typeInfo tiCallerRetType =
4411 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4413 // void return type gets morphed into the error type, so we have to treat them specially here
4414 if (sig.retType == CORINFO_TYPE_VOID)
4416 VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4421 VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4422 NormaliseForStack(tiCallerRetType), true),
4423 "tailcall return mismatch", speculative);
4426 // for tailcall, stack must be empty
4427 VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4429 return true; // Yes, tailcall is legal
4432 /*****************************************************************************
4434 * Checks the IL verification rules for the call
4437 void Compiler::verVerifyCall(OPCODE opcode,
4438 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4439 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4442 const BYTE* delegateCreateStart,
4443 const BYTE* codeAddr,
4444 CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4447 CORINFO_SIG_INFO* sig = nullptr;
4448 unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
4449 // this counter is used to keep track of how many items have been
4452 // for calli, VerifyOrReturn that this is not a virtual method
4453 if (opcode == CEE_CALLI)
4455 Verify(false, "Calli not verifiable");
4459 //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4460 mflags = callInfo->verMethodFlags;
4462 sig = &callInfo->verSig;
4464 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4466 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4469 // opcode specific check
4470 unsigned methodClassFlgs = callInfo->classFlags;
4474 // cannot do callvirt on valuetypes
4475 VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4476 VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4481 assert(!tailCall); // Importer should not allow this
4482 VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4483 "newobj must be on instance");
4485 if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4487 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4488 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4489 typeInfo tiDeclaredFtn =
4490 verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4491 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4493 assert(popCount == 0);
4494 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4495 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4497 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4498 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4499 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4500 "delegate object type mismatch");
4502 CORINFO_CLASS_HANDLE objTypeHandle =
4503 tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4505 // the method signature must be compatible with the delegate's invoke method
4507 // check that for virtual functions, the type of the object used to get the
4508 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4509 // since this is a bit of work to determine in general, we pattern match stylized
4512 // the delegate creation code check, which used to be done later, is now done here
4513 // so we can read delegateMethodRef directly from
4514 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
4515 // we then use it in our call to isCompatibleDelegate().
4517 mdMemberRef delegateMethodRef = mdMemberRefNil;
4518 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
4519 "must create delegates with certain IL");
4521 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
4522 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
4523 delegateResolvedToken.tokenScope = info.compScopeHnd;
4524 delegateResolvedToken.token = delegateMethodRef;
4525 delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method;
4526 info.compCompHnd->resolveToken(&delegateResolvedToken);
4528 CORINFO_CALL_INFO delegateCallInfo;
4529 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
4530 addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
4532 BOOL isOpenDelegate = FALSE;
4533 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
4534 tiActualFtn.GetMethod(), pResolvedToken->hClass,
4536 "function incompatible with delegate");
4538 // check the constraints on the target method
4539 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
4540 "delegate target has unsatisfied class constraints");
4541 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
4542 tiActualFtn.GetMethod()),
4543 "delegate target has unsatisfied method constraints");
4545 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
4546 // for additional verification rules for delegates
4547 CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod();
4548 DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
4549 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4552 if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
4554 && StrictCheckForNonVirtualCallToVirtualMethod()
4558 if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4560 VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
4561 verIsBoxedValueType(tiActualObj),
4562 "The 'this' parameter to the call must be either the calling method's "
4563 "'this' parameter or "
4564 "a boxed value type.");
4569 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
4571 BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
4573 Verify(targetIsStatic || !isOpenDelegate,
4574 "Unverifiable creation of an open instance delegate for a protected member.");
4576 CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
4578 : tiActualObj.GetClassHandleForObjRef();
4580 // In the case of protected methods, it is a requirement that the 'this'
4581 // pointer be a subclass of the current context. Perform this check.
4582 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4583 "Accessing protected method through wrong type.");
4588 // fall thru to default checks
4590 VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
4592 VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
4593 "can only newobj a delegate constructor");
4595 // check compatibility of the arguments
4596 unsigned int argCount;
4597 argCount = sig->numArgs;
4598 CORINFO_ARG_LIST_HANDLE args;
4602 typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
4604 typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
4605 VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
4607 args = info.compCompHnd->getArgNext(args);
4613 popCount += sig->numArgs;
4615 // check for 'this' which are is non-static methods, not called via NEWOBJ
4616 CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
4617 if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
4619 typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4622 // If it is null, we assume we can access it (since it will AV shortly)
4623 // If it is anything but a reference class, there is no hierarchy, so
4624 // again, we don't need the precise instance class to compute 'protected' access
4625 if (tiThis.IsType(TI_REF))
4627 instanceClassHnd = tiThis.GetClassHandleForObjRef();
4630 // Check type compatibility of the this argument
4631 typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
4632 if (tiDeclaredThis.IsValueClass())
4634 tiDeclaredThis.MakeByRef();
4637 // If this is a call to the base class .ctor, set thisPtr Init for
4639 if (mflags & CORINFO_FLG_CONSTRUCTOR)
4641 if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
4642 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
4644 assert(verCurrentState.thisInitialized !=
4645 TIS_Bottom); // This should never be the case just from the logic of the verifier.
4646 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
4647 "Call to base class constructor when 'this' is possibly initialized");
4648 // Otherwise, 'this' is now initialized.
4649 verCurrentState.thisInitialized = TIS_Init;
4650 tiThis.SetInitialisedObjRef();
4654 // We allow direct calls to value type constructors
4655 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
4656 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
4657 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
4658 "Bad call to a constructor");
4662 if (pConstrainedResolvedToken != nullptr)
4664 VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
4666 typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
4668 // We just dereference this and test for equality
4669 tiThis.DereferenceByRef();
4670 VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
4671 "this type mismatch with constrained type operand");
4673 // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
4674 tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
4677 // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
4678 if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
4680 tiDeclaredThis.SetIsReadonlyByRef();
4683 VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
4685 if (tiThis.IsByRef())
4687 // Find the actual type where the method exists (as opposed to what is declared
4688 // in the metadata). This is to prevent passing a byref as the "this" argument
4689 // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
4691 CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
4692 VerifyOrReturn(eeIsValueClass(actualClassHnd),
4693 "Call to base type of valuetype (which is never a valuetype)");
4696 // Rules for non-virtual call to a non-final virtual method:
4699 // The "this" pointer is considered to be "possibly written" if
4700 // 1. Its address have been taken (LDARGA 0) anywhere in the method.
4702 // 2. It has been stored to (STARG.0) anywhere in the method.
4704 // A non-virtual call to a non-final virtual method is only allowed if
4705 // 1. The this pointer passed to the callee is an instance of a boxed value type.
4707 // 2. The this pointer passed to the callee is the current method's this pointer.
4708 // (and) The current method's this pointer is not "possibly written".
4710 // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
4711 // virtual methods. (Luckily this does affect .ctors, since they are not virtual).
4712 // This is stronger that is strictly needed, but implementing a laxer rule is significantly
4713 // hard and more error prone.
4715 if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
4717 && StrictCheckForNonVirtualCallToVirtualMethod()
4721 if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4724 tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
4725 "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
4726 "a boxed value type.");
4731 // check any constraints on the callee's class and type parameters
4732 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
4733 "method has unsatisfied class constraints");
4734 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
4735 "method has unsatisfied method constraints");
4737 if (mflags & CORINFO_FLG_PROTECTED)
4739 VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4740 "Can't access protected method");
4743 // Get the exact view of the signature for an array method
4744 if (sig->retType != CORINFO_TYPE_VOID)
4746 eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
4749 // "readonly." prefixed calls only allowed for the Address operation on arrays.
4750 // The methods supported by array types are under the control of the EE
4751 // so we can trust that only the Address operation returns a byref.
4754 typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
4755 VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
4756 "unexpected use of readonly prefix");
4759 // Verify the tailcall
4762 verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
4766 /*****************************************************************************
4767 * Checks that a delegate creation is done using the following pattern:
4769 * ldvirtftn targetMemberRef
4771 * ldftn targetMemberRef
4773 * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
4774 * not in this basic block)
4776 * targetMemberRef is read from the code sequence.
4777 * targetMemberRef is validated iff verificationNeeded.
4780 BOOL Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart,
4781 const BYTE* codeAddr,
4782 mdMemberRef& targetMemberRef)
4784 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4786 targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
4789 else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
4791 targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
4798 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
4800 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
4801 typeInfo ptrVal = verVerifyLDIND(tiTo, instrType);
4802 typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
4803 if (!tiCompatibleWith(value, normPtrVal, true))
4805 Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
4806 compUnsafeCastUsed = true;
4811 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
4813 assert(!instrType.IsStruct());
4818 ptrVal = DereferenceByRef(ptr);
4819 if (instrType.IsObjRef() && !ptrVal.IsObjRef())
4821 Verify(false, "bad pointer");
4822 compUnsafeCastUsed = true;
4824 else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
4826 Verify(false, "pointer not consistent with instr");
4827 compUnsafeCastUsed = true;
4832 Verify(false, "pointer not byref");
4833 compUnsafeCastUsed = true;
4839 // Verify that the field is used properly. 'tiThis' is NULL for statics,
4840 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
4841 // ld*flda or a st*fld.
4842 // 'enclosingClass' is given if we are accessing a field in some specific type.
4844 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken,
4845 const CORINFO_FIELD_INFO& fieldInfo,
4846 const typeInfo* tiThis,
4848 BOOL allowPlainStructAsThis)
4850 CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
4851 unsigned fieldFlags = fieldInfo.fieldFlags;
4852 CORINFO_CLASS_HANDLE instanceClass =
4853 info.compClassHnd; // for statics, we imagine the instance is the current class.
4855 bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
4858 Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
4859 if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
4861 Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
4862 info.compIsStatic == isStaticField,
4863 "bad use of initonly field (set or address taken)");
4867 if (tiThis == nullptr)
4869 Verify(isStaticField, "used static opcode with non-static field");
4873 typeInfo tThis = *tiThis;
4875 if (allowPlainStructAsThis && tThis.IsValueClass())
4880 // If it is null, we assume we can access it (since it will AV shortly)
4881 // If it is anything but a refernce class, there is no hierarchy, so
4882 // again, we don't need the precise instance class to compute 'protected' access
4883 if (tiThis->IsType(TI_REF))
4885 instanceClass = tiThis->GetClassHandleForObjRef();
4888 // Note that even if the field is static, we require that the this pointer
4889 // satisfy the same constraints as a non-static field This happens to
4890 // be simpler and seems reasonable
4891 typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
4892 if (tiDeclaredThis.IsValueClass())
4894 tiDeclaredThis.MakeByRef();
4896 // we allow read-only tThis, on any field access (even stores!), because if the
4897 // class implementor wants to prohibit stores he should make the field private.
4898 // we do this by setting the read-only bit on the type we compare tThis to.
4899 tiDeclaredThis.SetIsReadonlyByRef();
4901 else if (verTrackObjCtorInitState && tThis.IsThisPtr())
4903 // Any field access is legal on "uninitialized" this pointers.
4904 // The easiest way to implement this is to simply set the
4905 // initialized bit for the duration of the type check on the
4906 // field access only. It does not change the state of the "this"
4907 // for the function as a whole. Note that the "tThis" is a copy
4908 // of the original "this" type (*tiThis) passed in.
4909 tThis.SetInitialisedObjRef();
4912 Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
4915 // Presently the JIT does not check that we don't store or take the address of init-only fields
4916 // since we cannot guarantee their immutability and it is not a security issue.
4918 // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
4919 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
4920 "field has unsatisfied class constraints");
4921 if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
4923 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
4924 "Accessing protected method through wrong type.");
4928 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
4930 if (tiOp1.IsNumberType())
4932 #ifdef _TARGET_64BIT_
4933 Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
4934 #else // _TARGET_64BIT
4935 // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
4936 // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
4937 // but compatible, since we can coalesce native int with int32 (see section III.1.5).
4938 Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
4939 #endif // !_TARGET_64BIT_
4941 else if (tiOp1.IsObjRef())
4953 Verify(FALSE, "Cond not allowed on object types");
4955 Verify(tiOp2.IsObjRef(), "Cond type mismatch");
4957 else if (tiOp1.IsByRef())
4959 Verify(tiOp2.IsByRef(), "Cond type mismatch");
4963 Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
4967 void Compiler::verVerifyThisPtrInitialised()
4969 if (verTrackObjCtorInitState)
4971 Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
4975 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
4977 // Either target == context, in this case calling an alternate .ctor
4978 // Or target is the immediate parent of context
4980 return ((target == context) || (target == info.compCompHnd->getParentType(context)));
4983 GenTreePtr Compiler::impImportLdvirtftn(GenTreePtr thisPtr,
4984 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4985 CORINFO_CALL_INFO* pCallInfo)
4987 if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
4989 NO_WAY("Virtual call to a function added via EnC is not supported");
4992 #ifdef FEATURE_READYTORUN_COMPILER
4993 if (opts.IsReadyToRun())
4995 if (!pCallInfo->exactContextNeedsRuntimeLookup)
4997 GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT,
4998 gtNewArgList(thisPtr));
5000 call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5005 // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5006 if (IsTargetAbi(CORINFO_CORERT_ABI))
5008 GenTreePtr ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5010 return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5011 gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5016 // Get the exact descriptor for the static callsite
5017 GenTreePtr exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5018 if (exactTypeDesc == nullptr)
5019 { // compDonotInline()
5023 GenTreePtr exactMethodDesc = impTokenToHandle(pResolvedToken);
5024 if (exactMethodDesc == nullptr)
5025 { // compDonotInline()
5029 GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5031 helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5033 helpArgs = gtNewListNode(thisPtr, helpArgs);
5035 // Call helper function. This gets the target address of the final destination callsite.
5037 return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT, helpArgs);
5040 /*****************************************************************************
5042 * Build and import a box node
5045 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5047 // Get the tree for the type handle for the boxed object. In the case
5048 // of shared generic code or ngen'd code this might be an embedded
5050 // Note we can only box do it if the class construtor has been called
5051 // We can always do it on primitive types
5053 GenTreePtr op1 = nullptr;
5054 GenTreePtr op2 = nullptr;
5057 impSpillSpecialSideEff();
5059 // Now get the expression to box from the stack.
5060 CORINFO_CLASS_HANDLE operCls;
5061 GenTreePtr exprToBox = impPopStack(operCls).val;
5063 CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5064 if (boxHelper == CORINFO_HELP_BOX)
5066 // we are doing 'normal' boxing. This means that we can inline the box operation
5067 // Box(expr) gets morphed into
5068 // temp = new(clsHnd)
5069 // cpobj(temp+4, expr, clsHnd)
5071 // The code paths differ slightly below for structs and primitives because
5072 // "cpobj" differs in these cases. In one case you get
5073 // impAssignStructPtr(temp+4, expr, clsHnd)
5074 // and the other you get
5077 if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5079 impBoxTemp = lvaGrabTemp(true DEBUGARG("Box Helper"));
5082 // needs to stay in use until this box expression is appended
5083 // some other node. We approximate this by keeping it alive until
5084 // the opcode stack becomes empty
5085 impBoxTempInUse = true;
5087 #ifdef FEATURE_READYTORUN_COMPILER
5088 bool usingReadyToRunHelper = false;
5090 if (opts.IsReadyToRun())
5092 op1 = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5093 usingReadyToRunHelper = (op1 != nullptr);
5096 if (!usingReadyToRunHelper)
5099 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5100 // and the newfast call with a single call to a dynamic R2R cell that will:
5101 // 1) Load the context
5102 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5103 // 3) Allocate and return the new object for boxing
5104 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5106 // Ensure that the value class is restored
5107 op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5109 { // compDonotInline()
5113 op1 = gtNewHelperCallNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd), TYP_REF, 0,
5117 /* Remember that this basic block contains 'new' of an array */
5118 compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5120 GenTreePtr asg = gtNewTempAssign(impBoxTemp, op1);
5122 GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5124 op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5125 op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
5126 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5128 if (varTypeIsStruct(exprToBox))
5130 assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5131 op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5135 lclTyp = exprToBox->TypeGet();
5136 if (lclTyp == TYP_BYREF)
5138 lclTyp = TYP_I_IMPL;
5140 CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5141 if (impIsPrimitive(jitType))
5143 lclTyp = JITtype2varType(jitType);
5145 assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5146 varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5147 var_types srcTyp = exprToBox->TypeGet();
5148 var_types dstTyp = lclTyp;
5150 if (srcTyp != dstTyp)
5152 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5153 (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5154 exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
5156 op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5159 op2 = gtNewLclvNode(impBoxTemp, TYP_REF);
5160 op1 = gtNewOperNode(GT_COMMA, TYP_REF, op1, op2);
5162 // Record that this is a "box" node.
5163 op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt);
5165 // If it is a value class, mark the "box" node. We can use this information
5166 // to optimise several cases:
5167 // "box(x) == null" --> false
5168 // "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5169 // "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5171 op1->gtFlags |= GTF_BOX_VALUE;
5172 assert(op1->IsBoxedValue());
5173 assert(asg->gtOper == GT_ASG);
5177 // Don't optimize, just call the helper and be done with it
5179 // Ensure that the value class is restored
5180 op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5182 { // compDonotInline()
5186 GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5187 op1 = gtNewHelperCallNode(boxHelper, TYP_REF, GTF_EXCEPT, args);
5190 /* Push the result back on the stack, */
5191 /* even if clsHnd is a value class we want the TI_REF */
5192 typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5193 impPushOnStack(op1, tiRetVal);
5196 //------------------------------------------------------------------------
5197 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5200 // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5201 // by a call to CEEInfo::resolveToken().
5202 // pCallInfo - The CORINFO_CALL_INFO that has been initialized
5203 // by a call to CEEInfo::getCallInfo().
5206 // The multi-dimensional array constructor arguments (array dimensions) are
5207 // pushed on the IL stack on entry to this method.
5210 // Multi-dimensional array constructors are imported as calls to a JIT
5211 // helper, not as regular calls.
5213 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5215 GenTreePtr classHandle = impParentClassTokenToHandle(pResolvedToken);
5216 if (classHandle == nullptr)
5217 { // compDonotInline()
5221 assert(pCallInfo->sig.numArgs);
5224 GenTreeArgList* args;
5227 // There are two different JIT helpers that can be used to allocate
5228 // multi-dimensional arrays:
5230 // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5231 // This variant is deprecated. It should be eventually removed.
5233 // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5234 // pointer to block of int32s. This variant is more portable.
5236 // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5237 // unconditionally would require ReadyToRun version bump.
5239 CLANG_FORMAT_COMMENT_ANCHOR;
5241 #if COR_JIT_EE_VERSION > 460
5242 if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5244 LclVarDsc* newObjArrayArgsVar;
5246 // Reuse the temp used to pass the array dimensions to avoid bloating
5247 // the stack frame in case there are multiple calls to multi-dim array
5248 // constructors within a single method.
5249 if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5251 lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5252 lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK;
5253 lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5256 // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5257 // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5258 lvaTable[lvaNewObjArrayArgs].lvExactSize =
5259 max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5261 // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5262 // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5263 // to one allocation at a time.
5264 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5267 // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5268 // - Array class handle
5269 // - Number of dimension arguments
5270 // - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp.
5273 node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5274 node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5276 // Pop dimension arguments from the stack one at a time and store it
5277 // into lvaNewObjArrayArgs temp.
5278 for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5280 GenTreePtr arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5282 GenTreePtr dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5283 dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5284 dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5285 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5286 dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5288 node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5291 args = gtNewArgList(node);
5293 // pass number of arguments to the helper
5294 args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5296 args = gtNewListNode(classHandle, args);
5298 node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, 0, args);
5304 // The varargs helper needs the type and method handles as last
5305 // and last-1 param (this is a cdecl call, so args will be
5306 // pushed in reverse order on the CPU stack)
5309 args = gtNewArgList(classHandle);
5311 // pass number of arguments to the helper
5312 args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5314 unsigned argFlags = 0;
5315 args = impPopList(pCallInfo->sig.numArgs, &argFlags, &pCallInfo->sig, args);
5317 node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, 0, args);
5319 // varargs, so we pop the arguments
5320 node->gtFlags |= GTF_CALL_POP_ARGS;
5323 // At the present time we don't track Caller pop arguments
5324 // that have GC references in them
5325 for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5327 assert(temp->Current()->gtType != TYP_REF);
5332 node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5333 node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5335 // Remember that this basic block contains 'new' of a md array
5336 compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5338 impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5341 GenTreePtr Compiler::impTransformThis(GenTreePtr thisPtr,
5342 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5343 CORINFO_THIS_TRANSFORM transform)
5347 case CORINFO_DEREF_THIS:
5349 GenTreePtr obj = thisPtr;
5351 // This does a LDIND on the obj, which should be a byref. pointing to a ref
5352 impBashVarAddrsToI(obj);
5353 assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5354 CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5356 obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5357 // ldind could point anywhere, example a boxed class static int
5358 obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5363 case CORINFO_BOX_THIS:
5365 // Constraint calls where there might be no
5366 // unboxed entry point require us to implement the call via helper.
5367 // These only occur when a possible target of the call
5368 // may have inherited an implementation of an interface
5369 // method from System.Object or System.ValueType. The EE does not provide us with
5370 // "unboxed" versions of these methods.
5372 GenTreePtr obj = thisPtr;
5374 assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5375 obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5376 obj->gtFlags |= GTF_EXCEPT;
5378 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5379 var_types objType = JITtype2varType(jitTyp);
5380 if (impIsPrimitive(jitTyp))
5382 if (obj->OperIsBlk())
5384 obj->ChangeOperUnchecked(GT_IND);
5386 // Obj could point anywhere, example a boxed class static int
5387 obj->gtFlags |= GTF_IND_TGTANYWHERE;
5388 obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5391 obj->gtType = JITtype2varType(jitTyp);
5392 assert(varTypeIsArithmetic(obj->gtType));
5395 // This pushes on the dereferenced byref
5396 // This is then used immediately to box.
5397 impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5399 // This pops off the byref-to-a-value-type remaining on the stack and
5400 // replaces it with a boxed object.
5401 // This is then used as the object to the virtual call immediately below.
5402 impImportAndPushBox(pConstrainedResolvedToken);
5403 if (compDonotInline())
5408 obj = impPopStack().val;
5411 case CORINFO_NO_THIS_TRANSFORM:
5417 //------------------------------------------------------------------------
5418 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
5421 // true if PInvoke inlining should be enabled in current method, false otherwise
5424 // Checks a number of ambient conditions where we could pinvoke but choose not to
5426 bool Compiler::impCanPInvokeInline()
5428 return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
5429 (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
5433 //------------------------------------------------------------------------
5434 // impCanPInvokeInlineCallSite: basic legality checks using information
5435 // from a call to see if the call qualifies as an inline pinvoke.
5438 // block - block contaning the call, or for inlinees, block
5439 // containing the call being inlined
5442 // true if this call can legally qualify as an inline pinvoke, false otherwise
5445 // For runtimes that support exception handling interop there are
5446 // restrictions on using inline pinvoke in handler regions.
5448 // * We have to disable pinvoke inlining inside of filters because
5449 // in case the main execution (i.e. in the try block) is inside
5450 // unmanaged code, we cannot reuse the inlined stub (we still need
5451 // the original state until we are in the catch handler)
5453 // * We disable pinvoke inlining inside handlers since the GSCookie
5454 // is in the inlined Frame (see
5455 // CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
5456 // this would not protect framelets/return-address of handlers.
5458 // These restrictions are currently also in place for CoreCLR but
5459 // can be relaxed when coreclr/#8459 is addressed.
5461 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
5463 if (block->hasHndIndex())
5468 // The remaining limitations do not apply to CoreRT
5469 if (IsTargetAbi(CORINFO_CORERT_ABI))
5474 #ifdef _TARGET_AMD64_
5475 // On x64, we disable pinvoke inlining inside of try regions.
5476 // Here is the comment from JIT64 explaining why:
5478 // [VSWhidbey: 611015] - because the jitted code links in the
5479 // Frame (instead of the stub) we rely on the Frame not being
5480 // 'active' until inside the stub. This normally happens by the
5481 // stub setting the return address pointer in the Frame object
5482 // inside the stub. On a normal return, the return address
5483 // pointer is zeroed out so the Frame can be safely re-used, but
5484 // if an exception occurs, nobody zeros out the return address
5485 // pointer. Thus if we re-used the Frame object, it would go
5486 // 'active' as soon as we link it into the Frame chain.
5488 // Technically we only need to disable PInvoke inlining if we're
5489 // in a handler or if we're in a try body with a catch or
5490 // filter/except where other non-handler code in this method
5491 // might run and try to re-use the dirty Frame object.
5493 // A desktop test case where this seems to matter is
5494 // jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
5495 if (block->hasTryIndex())
5499 #endif // _TARGET_AMD64_
5504 //------------------------------------------------------------------------
5505 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
5506 // if it can be expressed as an inline pinvoke.
5509 // call - tree for the call
5510 // methHnd - handle for the method being called (may be null)
5511 // sig - signature of the method being called
5512 // mflags - method flags for the method being called
5513 // block - block contaning the call, or for inlinees, block
5514 // containing the call being inlined
5517 // Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
5519 // Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
5520 // call passes a combination of legality and profitabilty checks.
5522 // If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
5524 void Compiler::impCheckForPInvokeCall(
5525 GenTreePtr call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
5527 CorInfoUnmanagedCallConv unmanagedCallConv;
5529 // If VM flagged it as Pinvoke, flag the call node accordingly
5530 if ((mflags & CORINFO_FLG_PINVOKE) != 0)
5532 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
5537 if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
5542 unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
5546 CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
5547 if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
5549 // Used by the IL Stubs.
5550 callConv = CORINFO_CALLCONV_C;
5552 static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
5553 static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
5554 static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
5555 unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
5557 assert(!call->gtCall.gtCallCookie);
5560 if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
5561 unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
5565 optNativeCallCount++;
5567 if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && methHnd == nullptr)
5569 // PInvoke CALLI in IL stubs must be inlined
5574 if (!impCanPInvokeInlineCallSite(block))
5579 // PInvoke CALL in IL stubs must be inlined on CoreRT. Skip the ambient conditions checks and
5580 // profitability checks
5581 if (!(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && IsTargetAbi(CORINFO_CORERT_ABI)))
5583 if (!impCanPInvokeInline())
5588 // Size-speed tradeoff: don't use inline pinvoke at rarely
5589 // executed call sites. The non-inline version is more
5591 if (block->isRunRarely())
5597 // The expensive check should be last
5598 if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
5604 JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
5606 call->gtFlags |= GTF_CALL_UNMANAGED;
5607 info.compCallUnmanaged++;
5609 // AMD64 convention is same for native and managed
5610 if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
5612 call->gtFlags |= GTF_CALL_POP_ARGS;
5615 if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
5617 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
5621 GenTreePtr Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
5623 var_types callRetTyp = JITtype2varType(sig->retType);
5625 /* The function pointer is on top of the stack - It may be a
5626 * complex expression. As it is evaluated after the args,
5627 * it may cause registered args to be spilled. Simply spill it.
5630 // Ignore this trivial case.
5631 if (impStackTop().val->gtOper != GT_LCL_VAR)
5633 impSpillStackEntry(verCurrentState.esStackDepth - 1,
5634 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
5637 /* Get the function pointer */
5639 GenTreePtr fptr = impPopStack().val;
5640 assert(genActualType(fptr->gtType) == TYP_I_IMPL);
5643 // This temporary must never be converted to a double in stress mode,
5644 // because that can introduce a call to the cast helper after the
5645 // arguments have already been evaluated.
5647 if (fptr->OperGet() == GT_LCL_VAR)
5649 lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
5653 /* Create the call node */
5655 GenTreePtr call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
5657 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
5662 /*****************************************************************************/
5664 void Compiler::impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig)
5666 assert(call->gtFlags & GTF_CALL_UNMANAGED);
5668 /* Since we push the arguments in reverse order (i.e. right -> left)
5669 * spill any side effects from the stack
5671 * OBS: If there is only one side effect we do not need to spill it
5672 * thus we have to spill all side-effects except last one
5675 unsigned lastLevelWithSideEffects = UINT_MAX;
5677 unsigned argsToReverse = sig->numArgs;
5679 // For "thiscall", the first argument goes in a register. Since its
5680 // order does not need to be changed, we do not need to spill it
5682 if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5684 assert(argsToReverse);
5688 #ifndef _TARGET_X86_
5689 // Don't reverse args on ARM or x64 - first four args always placed in regs in order
5693 for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
5695 if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
5697 assert(lastLevelWithSideEffects == UINT_MAX);
5699 impSpillStackEntry(level,
5700 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
5702 else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
5704 if (lastLevelWithSideEffects != UINT_MAX)
5706 /* We had a previous side effect - must spill it */
5707 impSpillStackEntry(lastLevelWithSideEffects,
5708 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
5710 /* Record the level for the current side effect in case we will spill it */
5711 lastLevelWithSideEffects = level;
5715 /* This is the first side effect encountered - record its level */
5717 lastLevelWithSideEffects = level;
5722 /* The argument list is now "clean" - no out-of-order side effects
5723 * Pop the argument list in reverse order */
5725 unsigned argFlags = 0;
5726 GenTreePtr args = call->gtCall.gtCallArgs =
5727 impPopRevList(sig->numArgs, &argFlags, sig, sig->numArgs - argsToReverse);
5729 if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5731 GenTreePtr thisPtr = args->Current();
5732 impBashVarAddrsToI(thisPtr);
5733 assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
5738 call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5742 //------------------------------------------------------------------------
5743 // impInitClass: Build a node to initialize the class before accessing the
5744 // field if necessary
5747 // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5748 // by a call to CEEInfo::resolveToken().
5750 // Return Value: If needed, a pointer to the node that will perform the class
5751 // initializtion. Otherwise, nullptr.
5754 GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5756 CorInfoInitClassResult initClassResult =
5757 info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
5759 if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
5765 GenTreePtr node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
5767 if (node == nullptr)
5769 assert(compDonotInline());
5775 node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, 0, gtNewArgList(node));
5779 // Call the shared non gc static helper, as its the fastest
5780 node = fgGetSharedCCtor(pResolvedToken->hClass);
5786 GenTreePtr Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
5788 GenTreePtr op1 = nullptr;
5797 ival = *((bool*)fldAddr);
5801 ival = *((signed char*)fldAddr);
5805 ival = *((unsigned char*)fldAddr);
5809 ival = *((short*)fldAddr);
5814 ival = *((unsigned short*)fldAddr);
5819 ival = *((int*)fldAddr);
5821 op1 = gtNewIconNode(ival);
5826 lval = *((__int64*)fldAddr);
5827 op1 = gtNewLconNode(lval);
5831 dval = *((float*)fldAddr);
5832 op1 = gtNewDconNode(dval);
5833 #if !FEATURE_X87_DOUBLES
5834 // X87 stack doesn't differentiate between float/double
5835 // so R4 is treated as R8, but everybody else does
5836 op1->gtType = TYP_FLOAT;
5837 #endif // FEATURE_X87_DOUBLES
5841 dval = *((double*)fldAddr);
5842 op1 = gtNewDconNode(dval);
5846 assert(!"Unexpected lclTyp");
5853 GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
5854 CORINFO_ACCESS_FLAGS access,
5855 CORINFO_FIELD_INFO* pFieldInfo,
5860 switch (pFieldInfo->fieldAccessor)
5862 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
5864 assert(!compIsForInlining());
5866 // We first call a special helper to get the statics base pointer
5867 op1 = impParentClassTokenToHandle(pResolvedToken);
5869 // compIsForInlining() is false so we should not neve get NULL here
5870 assert(op1 != nullptr);
5872 var_types type = TYP_BYREF;
5874 switch (pFieldInfo->helper)
5876 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
5879 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
5880 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
5881 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
5884 assert(!"unknown generic statics helper");
5888 op1 = gtNewHelperCallNode(pFieldInfo->helper, type, 0, gtNewArgList(op1));
5890 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5891 op1 = gtNewOperNode(GT_ADD, type, op1,
5892 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5896 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
5898 #ifdef FEATURE_READYTORUN_COMPILER
5899 if (opts.IsReadyToRun())
5901 unsigned callFlags = 0;
5903 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5905 callFlags |= GTF_CALL_HOISTABLE;
5908 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF, callFlags);
5910 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5915 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
5919 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5920 op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
5921 new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
5925 #if COR_JIT_EE_VERSION > 460
5926 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
5928 #ifdef FEATURE_READYTORUN_COMPILER
5929 noway_assert(opts.IsReadyToRun());
5930 CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
5931 assert(kind.needsRuntimeLookup);
5933 GenTreePtr ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
5934 GenTreeArgList* args = gtNewArgList(ctxTree);
5936 unsigned callFlags = 0;
5938 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5940 callFlags |= GTF_CALL_HOISTABLE;
5942 var_types type = TYP_BYREF;
5943 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, callFlags, args);
5945 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5946 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5947 op1 = gtNewOperNode(GT_ADD, type, op1,
5948 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5951 #endif // FEATURE_READYTORUN_COMPILER
5954 #endif // COR_JIT_EE_VERSION > 460
5957 if (!(access & CORINFO_ACCESS_ADDRESS))
5959 // In future, it may be better to just create the right tree here instead of folding it later.
5960 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
5962 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
5964 op1->gtType = TYP_REF; // points at boxed object
5965 FieldSeqNode* firstElemFldSeq =
5966 GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
5968 gtNewOperNode(GT_ADD, TYP_BYREF, op1,
5969 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
5971 if (varTypeIsStruct(lclTyp))
5973 // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT.
5974 op1 = gtNewObjNode(pFieldInfo->structType, op1);
5978 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
5979 op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
5987 void** pFldAddr = nullptr;
5988 void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
5990 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5992 /* Create the data member node */
5993 if (pFldAddr == nullptr)
5995 op1 = gtNewIconHandleNode((size_t)fldAddr, GTF_ICON_STATIC_HDL, fldSeq);
5999 op1 = gtNewIconHandleNode((size_t)pFldAddr, GTF_ICON_STATIC_HDL, fldSeq);
6001 // There are two cases here, either the static is RVA based,
6002 // in which case the type of the FIELD node is not a GC type
6003 // and the handle to the RVA is a TYP_I_IMPL. Or the FIELD node is
6004 // a GC type and the handle to it is a TYP_BYREF in the GC heap
6005 // because handles to statics now go into the large object heap
6007 var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
6008 op1 = gtNewOperNode(GT_IND, handleTyp, op1);
6009 op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
6016 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6018 op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
6020 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6022 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6023 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
6026 if (!(access & CORINFO_ACCESS_ADDRESS))
6028 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6029 op1->gtFlags |= GTF_GLOB_REF;
6035 // In general try to call this before most of the verification work. Most people expect the access
6036 // exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns
6037 // out if you can't access something we also think that you're unverifiable for other reasons.
6038 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6040 if (result != CORINFO_ACCESS_ALLOWED)
6042 impHandleAccessAllowedInternal(result, helperCall);
6046 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6050 case CORINFO_ACCESS_ALLOWED:
6052 case CORINFO_ACCESS_ILLEGAL:
6053 // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
6054 // method is verifiable. Otherwise, delay the exception to runtime.
6055 if (compIsForImportOnly())
6057 info.compCompHnd->ThrowExceptionForHelper(helperCall);
6061 impInsertHelperCall(helperCall);
6064 case CORINFO_ACCESS_RUNTIME_CHECK:
6065 impInsertHelperCall(helperCall);
6070 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
6072 // Construct the argument list
6073 GenTreeArgList* args = nullptr;
6074 assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6075 for (unsigned i = helperInfo->numArgs; i > 0; --i)
6077 const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1];
6078 GenTreePtr currentArg = nullptr;
6079 switch (helperArg.argType)
6081 case CORINFO_HELPER_ARG_TYPE_Field:
6082 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6083 info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6084 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6086 case CORINFO_HELPER_ARG_TYPE_Method:
6087 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6088 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6090 case CORINFO_HELPER_ARG_TYPE_Class:
6091 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6092 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6094 case CORINFO_HELPER_ARG_TYPE_Module:
6095 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6097 case CORINFO_HELPER_ARG_TYPE_Const:
6098 currentArg = gtNewIconNode(helperArg.constant);
6101 NO_WAY("Illegal helper arg type");
6103 args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6107 * Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee.
6108 * Also, consider sticking this in the first basic block.
6110 GenTreePtr callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, GTF_EXCEPT, args);
6111 impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6114 void Compiler::impInsertCalloutForDelegate(CORINFO_METHOD_HANDLE callerMethodHnd,
6115 CORINFO_METHOD_HANDLE calleeMethodHnd,
6116 CORINFO_CLASS_HANDLE delegateTypeHnd)
6118 #ifdef FEATURE_CORECLR
6119 if (!info.compCompHnd->isDelegateCreationAllowed(delegateTypeHnd, calleeMethodHnd))
6121 // Call the JIT_DelegateSecurityCheck helper before calling the actual function.
6122 // This helper throws an exception if the CLR host disallows the call.
6124 GenTreePtr helper = gtNewHelperCallNode(CORINFO_HELP_DELEGATE_SECURITY_CHECK, TYP_VOID, GTF_EXCEPT,
6125 gtNewArgList(gtNewIconEmbClsHndNode(delegateTypeHnd),
6126 gtNewIconEmbMethHndNode(calleeMethodHnd)));
6127 // Append the callout statement
6128 impAppendTree(helper, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6130 #endif // FEATURE_CORECLR
6133 // Checks whether the return types of caller and callee are compatible
6134 // so that callee can be tail called. Note that here we don't check
6135 // compatibility in IL Verifier sense, but on the lines of return type
6136 // sizes are equal and get returned in the same return register.
6137 bool Compiler::impTailCallRetTypeCompatible(var_types callerRetType,
6138 CORINFO_CLASS_HANDLE callerRetTypeClass,
6139 var_types calleeRetType,
6140 CORINFO_CLASS_HANDLE calleeRetTypeClass)
6142 // Note that we can not relax this condition with genActualType() as the
6143 // calling convention dictates that the caller of a function with a small
6144 // typed return value is responsible for normalizing the return val.
6145 if (callerRetType == calleeRetType)
6150 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6152 if (callerRetType == TYP_VOID)
6154 // This needs to be allowed to support the following IL pattern that Jit64 allows:
6159 // Note that the above IL pattern is not valid as per IL verification rules.
6160 // Therefore, only full trust code can take advantage of this pattern.
6164 // These checks return true if the return value type sizes are the same and
6165 // get returned in the same return register i.e. caller doesn't need to normalize
6166 // return value. Some of the tail calls permitted by below checks would have
6167 // been rejected by IL Verifier before we reached here. Therefore, only full
6168 // trust code can make those tail calls.
6169 unsigned callerRetTypeSize = 0;
6170 unsigned calleeRetTypeSize = 0;
6171 bool isCallerRetTypMBEnreg =
6172 VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6173 bool isCalleeRetTypMBEnreg =
6174 VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6176 if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6178 return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6180 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6188 PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6189 PREFIX_TAILCALL_IMPLICIT =
6190 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6191 PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6192 PREFIX_VOLATILE = 0x00000100,
6193 PREFIX_UNALIGNED = 0x00001000,
6194 PREFIX_CONSTRAINED = 0x00010000,
6195 PREFIX_READONLY = 0x00100000
6198 /********************************************************************************
6200 * Returns true if the current opcode and and the opcodes following it correspond
6201 * to a supported tail call IL pattern.
6204 bool Compiler::impIsTailCallILPattern(bool tailPrefixed,
6206 const BYTE* codeAddrOfNextOpcode,
6207 const BYTE* codeEnd,
6209 bool* isCallPopAndRet /* = nullptr */)
6211 // Bail out if the current opcode is not a call.
6212 if (!impOpcodeIsCallOpcode(curOpcode))
6217 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6218 // If shared ret tail opt is not enabled, we will enable
6219 // it for recursive methods.
6223 // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6224 // sequence. Make sure we don't go past the end of the IL however.
6225 codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6228 // Bail out if there is no next opcode after call
6229 if (codeAddrOfNextOpcode >= codeEnd)
6234 // Scan the opcodes to look for the following IL patterns if either
6235 // i) the call is not tail prefixed (i.e. implicit tail call) or
6236 // ii) if tail prefixed, IL verification is not needed for the method.
6238 // Only in the above two cases we can allow the below tail call patterns
6239 // violating ECMA spec.
6255 #ifdef _TARGET_AMD64_
6258 nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6259 codeAddrOfNextOpcode += sizeof(__int8);
6260 } while ((codeAddrOfNextOpcode < codeEnd) && // Haven't reached end of method
6261 (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6262 ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6263 // one pop seen so far.
6265 nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6268 if (isCallPopAndRet)
6270 // Allow call+pop+ret to be tail call optimized if caller ret type is void
6271 *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6274 #ifdef _TARGET_AMD64_
6276 // Tail call IL pattern could be either of the following
6277 // 1) call/callvirt/calli + ret
6278 // 2) call/callvirt/calli + pop + ret in a method returning void.
6279 return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6280 #else //!_TARGET_AMD64_
6281 return (nextOpcode == CEE_RET) && (cntPop == 0);
6285 /*****************************************************************************
6287 * Determine whether the call could be converted to an implicit tail call
6290 bool Compiler::impIsImplicitTailCallCandidate(
6291 OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6294 #if FEATURE_TAILCALL_OPT
6295 if (!opts.compTailCallOpt)
6300 if (opts.compDbgCode || opts.MinOpts())
6305 // must not be tail prefixed
6306 if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6311 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6312 // the block containing call is marked as BBJ_RETURN
6313 // We allow shared ret tail call optimization on recursive calls even under
6314 // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6315 if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6317 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6319 // must be call+ret or call+pop+ret
6320 if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6328 #endif // FEATURE_TAILCALL_OPT
6331 //------------------------------------------------------------------------
6332 // impImportCall: import a call-inspiring opcode
6335 // opcode - opcode that inspires the call
6336 // pResolvedToken - resolved token for the call target
6337 // pConstrainedResolvedToken - resolved constraint token (or nullptr)
6338 // newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr)
6339 // prefixFlags - IL prefix flags for the call
6340 // callInfo - EE supplied info for the call
6341 // rawILOffset - IL offset of the opcode
6344 // Type of the call's return value.
6347 // opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6349 // For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6350 // uninitalized object.
6353 #pragma warning(push)
6354 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6357 var_types Compiler::impImportCall(OPCODE opcode,
6358 CORINFO_RESOLVED_TOKEN* pResolvedToken,
6359 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6360 GenTreePtr newobjThis,
6362 CORINFO_CALL_INFO* callInfo,
6363 IL_OFFSET rawILOffset)
6365 assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6367 IL_OFFSETX ilOffset = impCurILOffset(rawILOffset, true);
6368 var_types callRetTyp = TYP_COUNT;
6369 CORINFO_SIG_INFO* sig = nullptr;
6370 CORINFO_METHOD_HANDLE methHnd = nullptr;
6371 CORINFO_CLASS_HANDLE clsHnd = nullptr;
6372 unsigned clsFlags = 0;
6373 unsigned mflags = 0;
6374 unsigned argFlags = 0;
6375 GenTreePtr call = nullptr;
6376 GenTreeArgList* args = nullptr;
6377 CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM;
6378 CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr;
6379 BOOL exactContextNeedsRuntimeLookup = FALSE;
6380 bool canTailCall = true;
6381 const char* szCanTailCallFailReason = nullptr;
6382 int tailCall = prefixFlags & PREFIX_TAILCALL;
6383 bool readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
6385 // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6386 // do that before tailcalls, but that is probably not the intended
6387 // semantic. So just disallow tailcalls from synchronized methods.
6388 // Also, popping arguments in a varargs function is more work and NYI
6389 // If we have a security object, we have to keep our frame around for callers
6390 // to see any imperative security.
6391 if (info.compFlags & CORINFO_FLG_SYNCH)
6393 canTailCall = false;
6394 szCanTailCallFailReason = "Caller is synchronized";
6396 #if !FEATURE_FIXED_OUT_ARGS
6397 else if (info.compIsVarArgs)
6399 canTailCall = false;
6400 szCanTailCallFailReason = "Caller is varargs";
6402 #endif // FEATURE_FIXED_OUT_ARGS
6403 else if (opts.compNeedSecurityCheck)
6405 canTailCall = false;
6406 szCanTailCallFailReason = "Caller requires a security check.";
6409 // We only need to cast the return value of pinvoke inlined calls that return small types
6411 // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6412 // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6413 // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6414 // the time being that the callee might be compiled by the other JIT and thus the return
6415 // value will need to be widened by us (or not widened at all...)
6417 // ReadyToRun code sticks with default calling convention that does not widen small return types.
6419 bool checkForSmallType = opts.IsJit64Compat() || opts.IsReadyToRun();
6420 bool bIntrinsicImported = false;
6422 CORINFO_SIG_INFO calliSig;
6423 GenTreeArgList* extraArg = nullptr;
6425 /*-------------------------------------------------------------------------
6426 * First create the call node
6429 if (opcode == CEE_CALLI)
6431 /* Get the call site sig */
6432 eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
6434 callRetTyp = JITtype2varType(calliSig.retType);
6435 clsHnd = calliSig.retTypeClass;
6437 call = impImportIndirectCall(&calliSig, ilOffset);
6439 // We don't know the target method, so we have to infer the flags, or
6440 // assume the worst-case.
6441 mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
6446 unsigned structSize =
6447 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
6448 printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6449 opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6452 // This should be checked in impImportBlockCode.
6453 assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
6458 // We cannot lazily obtain the signature of a CALLI call because it has no method
6459 // handle that we can use, so we need to save its full call signature here.
6460 assert(call->gtCall.callSig == nullptr);
6461 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6462 *call->gtCall.callSig = calliSig;
6465 if (IsTargetAbi(CORINFO_CORERT_ABI))
6467 bool managedCall = (calliSig.callConv & GTF_CALL_UNMANAGED) == 0;
6470 addFatPointerCandidate(call->AsCall());
6474 else // (opcode != CEE_CALLI)
6476 CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
6478 // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
6479 // supply the instantiation parameters necessary to make direct calls to underlying
6480 // shared generic code, rather than calling through instantiating stubs. If the
6481 // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
6482 // must indeed pass an instantiation parameter.
6484 methHnd = callInfo->hMethod;
6486 sig = &(callInfo->sig);
6487 callRetTyp = JITtype2varType(sig->retType);
6489 mflags = callInfo->methodFlags;
6494 unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
6495 printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6496 opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6499 if (compIsForInlining())
6501 /* Does this call site have security boundary restrictions? */
6503 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
6505 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
6509 /* Does the inlinee need a security check token on the frame */
6511 if (mflags & CORINFO_FLG_SECURITYCHECK)
6513 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6517 /* Does the inlinee use StackCrawlMark */
6519 if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
6521 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
6525 /* For now ignore delegate invoke */
6527 if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6529 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
6533 /* For now ignore varargs */
6534 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6536 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
6540 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
6542 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
6546 if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
6548 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
6553 clsHnd = pResolvedToken->hClass;
6555 clsFlags = callInfo->classFlags;
6558 // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
6560 // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
6561 // These should be in mscorlib.h, and available through a JIT/EE interface call.
6562 const char* modName;
6563 const char* className;
6564 const char* methodName;
6565 if ((className = eeGetClassName(clsHnd)) != nullptr &&
6566 strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
6567 (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
6569 return impImportJitTestLabelMark(sig->numArgs);
6573 // <NICE> Factor this into getCallInfo </NICE>
6574 if ((mflags & CORINFO_FLG_INTRINSIC) && !pConstrainedResolvedToken)
6576 call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, pResolvedToken->token, readonlyCall,
6577 (canTailCall && (tailCall != 0)), &intrinsicID);
6579 if (call != nullptr)
6581 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
6582 (clsFlags & CORINFO_FLG_FINAL));
6584 #ifdef FEATURE_READYTORUN_COMPILER
6585 if (call->OperGet() == GT_INTRINSIC)
6587 if (opts.IsReadyToRun())
6589 noway_assert(callInfo->kind == CORINFO_CALL);
6590 call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
6594 call->gtIntrinsic.gtEntryPoint.addr = nullptr;
6599 bIntrinsicImported = true;
6607 call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
6608 if (call != nullptr)
6610 bIntrinsicImported = true;
6614 #endif // FEATURE_SIMD
6616 if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
6618 NO_WAY("Virtual call to a function added via EnC is not supported");
6621 if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
6622 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6623 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
6625 BADCODE("Bad calling convention");
6628 //-------------------------------------------------------------------------
6629 // Construct the call node
6631 // Work out what sort of call we're making.
6632 // Dispense with virtual calls implemented via LDVIRTFTN immediately.
6634 constraintCallThisTransform = callInfo->thisTransform;
6636 exactContextHnd = callInfo->contextHandle;
6637 exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup;
6639 // Recursive call is treaded as a loop to the begining of the method.
6640 if (methHnd == info.compMethodHnd)
6645 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
6646 fgFirstBB->bbNum, compCurBB->bbNum);
6649 fgMarkBackwardJump(fgFirstBB, compCurBB);
6652 switch (callInfo->kind)
6655 case CORINFO_VIRTUALCALL_STUB:
6657 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6658 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6659 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
6662 if (compIsForInlining())
6664 // Don't import runtime lookups when inlining
6665 // Inlining has to be aborted in such a case
6666 /* XXX Fri 3/20/2009
6667 * By the way, this would never succeed. If the handle lookup is into the generic
6668 * dictionary for a candidate, you'll generate different dictionary offsets and the
6669 * inlined code will crash.
6671 * To anyone code reviewing this, when could this ever succeed in the future? It'll
6672 * always have a handle lookup. These lookups are safe intra-module, but we're just
6675 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
6679 GenTreePtr stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
6680 assert(!compDonotInline());
6682 // This is the rough code to set up an indirect stub call
6683 assert(stubAddr != nullptr);
6685 // The stubAddr may be a
6686 // complex expression. As it is evaluated after the args,
6687 // it may cause registered args to be spilled. Simply spill it.
6689 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
6690 impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
6691 stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6693 // Create the actual call node
6695 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6696 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6698 call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
6700 call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
6701 call->gtFlags |= GTF_CALL_VIRT_STUB;
6704 // No tailcalls allowed for these yet...
6705 canTailCall = false;
6706 szCanTailCallFailReason = "VirtualCall with runtime lookup";
6711 // ok, the stub is available at compile type.
6713 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6714 call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
6715 call->gtFlags |= GTF_CALL_VIRT_STUB;
6716 assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
6717 if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
6719 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
6723 #ifdef FEATURE_READYTORUN_COMPILER
6724 if (opts.IsReadyToRun())
6726 // Null check is sometimes needed for ready to run to handle
6727 // non-virtual <-> virtual changes between versions
6728 if (callInfo->nullInstanceCheck)
6730 call->gtFlags |= GTF_CALL_NULLCHECK;
6738 case CORINFO_VIRTUALCALL_VTABLE:
6740 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6741 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6742 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6743 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
6747 case CORINFO_VIRTUALCALL_LDVIRTFTN:
6749 if (compIsForInlining())
6751 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
6755 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6756 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6757 // OK, We've been told to call via LDVIRTFTN, so just
6758 // take the call now....
6760 args = impPopList(sig->numArgs, &argFlags, sig);
6762 GenTreePtr thisPtr = impPopStack().val;
6763 thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
6764 if (compDonotInline())
6769 // Clone the (possibly transformed) "this" pointer
6770 GenTreePtr thisPtrCopy;
6771 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
6772 nullptr DEBUGARG("LDVIRTFTN this pointer"));
6774 GenTreePtr fptr = nullptr;
6775 bool coreRTGenericVirtualMethod =
6776 ((sig->callConv & CORINFO_CALLCONV_GENERIC) != 0) && IsTargetAbi(CORINFO_CORERT_ABI);
6777 #if COR_JIT_EE_VERSION > 460
6778 if (coreRTGenericVirtualMethod)
6780 GenTreePtr runtimeMethodHandle = nullptr;
6781 if (callInfo->exactContextNeedsRuntimeLookup)
6783 runtimeMethodHandle =
6784 impRuntimeLookupToTree(pResolvedToken, &callInfo->codePointerLookup, methHnd);
6788 runtimeMethodHandle = gtNewIconEmbMethHndNode(pResolvedToken->hMethod);
6790 fptr = gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL, GTF_EXCEPT,
6791 gtNewArgList(thisPtr, runtimeMethodHandle));
6794 #endif // COR_JIT_EE_VERSION
6796 fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
6799 if (compDonotInline())
6804 thisPtr = nullptr; // can't reuse it
6806 // Now make an indirect call through the function pointer
6808 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
6809 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6810 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6812 // Create the actual call node
6814 call = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
6815 call->gtCall.gtCallObjp = thisPtrCopy;
6816 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6818 if (coreRTGenericVirtualMethod)
6820 addFatPointerCandidate(call->AsCall());
6822 #ifdef FEATURE_READYTORUN_COMPILER
6823 if (opts.IsReadyToRun())
6825 // Null check is needed for ready to run to handle
6826 // non-virtual <-> virtual changes between versions
6827 call->gtFlags |= GTF_CALL_NULLCHECK;
6831 // Sine we are jumping over some code, check that its OK to skip that code
6832 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6833 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6839 // This is for a non-virtual, non-interface etc. call
6840 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6842 // We remove the nullcheck for the GetType call instrinsic.
6843 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
6845 if (callInfo->nullInstanceCheck &&
6846 !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
6848 call->gtFlags |= GTF_CALL_NULLCHECK;
6851 #ifdef FEATURE_READYTORUN_COMPILER
6852 if (opts.IsReadyToRun())
6854 call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
6860 case CORINFO_CALL_CODE_POINTER:
6862 // The EE has asked us to call by computing a code pointer and then doing an
6863 // indirect call. This is because a runtime lookup is required to get the code entry point.
6865 // These calls always follow a uniform calling convention, i.e. no extra hidden params
6866 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
6868 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
6869 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6872 impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
6874 if (compDonotInline())
6879 // Now make an indirect call through the function pointer
6881 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
6882 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6883 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6885 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6886 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6887 if (callInfo->nullInstanceCheck)
6889 call->gtFlags |= GTF_CALL_NULLCHECK;
6896 assert(!"unknown call kind");
6900 //-------------------------------------------------------------------------
6903 PREFIX_ASSUME(call != nullptr);
6905 if (mflags & CORINFO_FLG_NOGCCHECK)
6907 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
6910 // Mark call if it's one of the ones we will maybe treat as an intrinsic
6911 if (intrinsicID == CORINFO_INTRINSIC_Object_GetType || intrinsicID == CORINFO_INTRINSIC_TypeEQ ||
6912 intrinsicID == CORINFO_INTRINSIC_TypeNEQ || intrinsicID == CORINFO_INTRINSIC_GetCurrentManagedThread ||
6913 intrinsicID == CORINFO_INTRINSIC_GetManagedThreadId)
6915 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
6919 assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
6921 /* Some sanity checks */
6923 // CALL_VIRT and NEWOBJ must have a THIS pointer
6924 assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
6925 // static bit and hasThis are negations of one another
6926 assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
6927 assert(call != nullptr);
6929 /*-------------------------------------------------------------------------
6930 * Check special-cases etc
6933 /* Special case - Check if it is a call to Delegate.Invoke(). */
6935 if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6937 assert(!compIsForInlining());
6938 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6939 assert(mflags & CORINFO_FLG_FINAL);
6941 /* Set the delegate flag */
6942 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
6944 if (callInfo->secureDelegateInvoke)
6946 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
6949 if (opcode == CEE_CALLVIRT)
6951 assert(mflags & CORINFO_FLG_FINAL);
6953 /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
6954 assert(call->gtFlags & GTF_CALL_NULLCHECK);
6955 call->gtFlags &= ~GTF_CALL_NULLCHECK;
6959 CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
6960 actualMethodRetTypeSigClass = sig->retTypeSigClass;
6961 if (varTypeIsStruct(callRetTyp))
6963 callRetTyp = impNormStructType(actualMethodRetTypeSigClass);
6964 call->gtType = callRetTyp;
6968 /* Check for varargs */
6969 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6970 (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6972 BADCODE("Varargs not supported.");
6974 #endif // !FEATURE_VARARG
6976 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6977 (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6979 assert(!compIsForInlining());
6981 /* Set the right flags */
6983 call->gtFlags |= GTF_CALL_POP_ARGS;
6984 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
6986 /* Can't allow tailcall for varargs as it is caller-pop. The caller
6987 will be expecting to pop a certain number of arguments, but if we
6988 tailcall to a function with a different number of arguments, we
6989 are hosed. There are ways around this (caller remembers esp value,
6990 varargs is not caller-pop, etc), but not worth it. */
6991 CLANG_FORMAT_COMMENT_ANCHOR;
6996 canTailCall = false;
6997 szCanTailCallFailReason = "Callee is varargs";
7001 /* Get the total number of arguments - this is already correct
7002 * for CALLI - for methods we have to get it from the call site */
7004 if (opcode != CEE_CALLI)
7007 unsigned numArgsDef = sig->numArgs;
7009 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
7012 // We cannot lazily obtain the signature of a vararg call because using its method
7013 // handle will give us only the declared argument list, not the full argument list.
7014 assert(call->gtCall.callSig == nullptr);
7015 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7016 *call->gtCall.callSig = *sig;
7019 // For vararg calls we must be sure to load the return type of the
7020 // method actually being called, as well as the return types of the
7021 // specified in the vararg signature. With type equivalency, these types
7022 // may not be the same.
7023 if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
7025 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
7026 sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
7027 sig->retType != CORINFO_TYPE_VAR)
7029 // Make sure that all valuetypes (including enums) that we push are loaded.
7030 // This is to guarantee that if a GC is triggerred from the prestub of this methods,
7031 // all valuetypes in the method signature are already loaded.
7032 // We need to be able to find the size of the valuetypes, but we cannot
7033 // do a class-load from within GC.
7034 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
7038 assert(numArgsDef <= sig->numArgs);
7041 /* We will have "cookie" as the last argument but we cannot push
7042 * it on the operand stack because we may overflow, so we append it
7043 * to the arg list next after we pop them */
7046 if (mflags & CORINFO_FLG_SECURITYCHECK)
7048 assert(!compIsForInlining());
7050 // Need security prolog/epilog callouts when there is
7051 // imperative security in the method. This is to give security a
7052 // chance to do any setup in the prolog and cleanup in the epilog if needed.
7054 if (compIsForInlining())
7056 // Cannot handle this if the method being imported is an inlinee by itself.
7057 // Because inlinee method does not have its own frame.
7059 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7064 tiSecurityCalloutNeeded = true;
7066 // If the current method calls a method which needs a security check,
7067 // (i.e. the method being compiled has imperative security)
7068 // we need to reserve a slot for the security object in
7069 // the current method's stack frame
7070 opts.compNeedSecurityCheck = true;
7074 //--------------------------- Inline NDirect ------------------------------
7076 // For inline cases we technically should look at both the current
7077 // block and the call site block (or just the latter if we've
7078 // fused the EH trees). However the block-related checks pertain to
7079 // EH and we currently won't inline a method with EH. So for
7080 // inlinees, just checking the call site block is sufficient.
7082 // New lexical block here to avoid compilation errors because of GOTOs.
7083 BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7084 impCheckForPInvokeCall(call, methHnd, sig, mflags, block);
7087 if (call->gtFlags & GTF_CALL_UNMANAGED)
7089 // We set up the unmanaged call by linking the frame, disabling GC, etc
7090 // This needs to be cleaned up on return
7093 canTailCall = false;
7094 szCanTailCallFailReason = "Callee is native";
7097 checkForSmallType = true;
7099 impPopArgsForUnmanagedCall(call, sig);
7103 else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
7104 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
7105 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
7106 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
7108 if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
7110 // Normally this only happens with inlining.
7111 // However, a generic method (or type) being NGENd into another module
7112 // can run into this issue as well. There's not an easy fall-back for NGEN
7113 // so instead we fallback to JIT.
7114 if (compIsForInlining())
7116 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7120 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7126 GenTreePtr cookie = eeGetPInvokeCookie(sig);
7128 // This cookie is required to be either a simple GT_CNS_INT or
7129 // an indirection of a GT_CNS_INT
7131 GenTreePtr cookieConst = cookie;
7132 if (cookie->gtOper == GT_IND)
7134 cookieConst = cookie->gtOp.gtOp1;
7136 assert(cookieConst->gtOper == GT_CNS_INT);
7138 // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7139 // we won't allow this tree to participate in any CSE logic
7141 cookie->gtFlags |= GTF_DONT_CSE;
7142 cookieConst->gtFlags |= GTF_DONT_CSE;
7144 call->gtCall.gtCallCookie = cookie;
7148 canTailCall = false;
7149 szCanTailCallFailReason = "PInvoke calli";
7153 /*-------------------------------------------------------------------------
7154 * Create the argument list
7157 //-------------------------------------------------------------------------
7158 // Special case - for varargs we have an implicit last argument
7160 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7162 assert(!compIsForInlining());
7164 void *varCookie, *pVarCookie;
7165 if (!info.compCompHnd->canGetVarArgsHandle(sig))
7167 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7171 varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7172 assert((!varCookie) != (!pVarCookie));
7173 GenTreePtr cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL);
7175 assert(extraArg == nullptr);
7176 extraArg = gtNewArgList(cookie);
7179 //-------------------------------------------------------------------------
7180 // Extra arg for shared generic code and array methods
7182 // Extra argument containing instantiation information is passed in the
7183 // following circumstances:
7184 // (a) To the "Address" method on array classes; the extra parameter is
7185 // the array's type handle (a TypeDesc)
7186 // (b) To shared-code instance methods in generic structs; the extra parameter
7187 // is the struct's type handle (a vtable ptr)
7188 // (c) To shared-code per-instantiation non-generic static methods in generic
7189 // classes and structs; the extra parameter is the type handle
7190 // (d) To shared-code generic methods; the extra parameter is an
7191 // exact-instantiation MethodDesc
7193 // We also set the exact type context associated with the call so we can
7194 // inline the call correctly later on.
7196 if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7198 assert(call->gtCall.gtCallType == CT_USER_FUNC);
7199 if (clsHnd == nullptr)
7201 NO_WAY("CALLI on parameterized type");
7204 assert(opcode != CEE_CALLI);
7206 GenTreePtr instParam;
7209 // Instantiated generic method
7210 if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7212 CORINFO_METHOD_HANDLE exactMethodHandle =
7213 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7215 if (!exactContextNeedsRuntimeLookup)
7217 #ifdef FEATURE_READYTORUN_COMPILER
7218 if (opts.IsReadyToRun())
7221 impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7222 if (instParam == nullptr)
7230 instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7231 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7236 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7237 if (instParam == nullptr)
7244 // otherwise must be an instance method in a generic struct,
7245 // a static method in a generic type, or a runtime-generated array method
7248 assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7249 CORINFO_CLASS_HANDLE exactClassHandle =
7250 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7252 if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7254 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7258 if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7260 // We indicate "readonly" to the Address operation by using a null
7262 instParam = gtNewIconNode(0, TYP_REF);
7265 if (!exactContextNeedsRuntimeLookup)
7267 #ifdef FEATURE_READYTORUN_COMPILER
7268 if (opts.IsReadyToRun())
7271 impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7272 if (instParam == nullptr)
7280 instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7281 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7286 instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7287 if (instParam == nullptr)
7294 assert(extraArg == nullptr);
7295 extraArg = gtNewArgList(instParam);
7298 // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7299 // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7300 // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7301 // exactContextHnd is not currently required when inlining shared generic code into shared
7302 // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7303 // (e.g. anything marked needsRuntimeLookup)
7304 if (exactContextNeedsRuntimeLookup)
7306 exactContextHnd = nullptr;
7309 //-------------------------------------------------------------------------
7310 // The main group of arguments
7312 args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, &argFlags, sig, extraArg);
7316 call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7319 //-------------------------------------------------------------------------
7320 // The "this" pointer
7322 if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7326 if (opcode == CEE_NEWOBJ)
7332 obj = impPopStack().val;
7333 obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7334 if (compDonotInline())
7340 /* Is this a virtual or interface call? */
7342 if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
7344 /* only true object pointers can be virtual */
7346 assert(obj->gtType == TYP_REF);
7352 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7356 /* Store the "this" value in the call */
7358 call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7359 call->gtCall.gtCallObjp = obj;
7362 //-------------------------------------------------------------------------
7363 // The "this" pointer for "newobj"
7365 if (opcode == CEE_NEWOBJ)
7367 if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7369 assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7370 // This is a 'new' of a variable sized object, wher
7371 // the constructor is to return the object. In this case
7372 // the constructor claims to return VOID but we know it
7373 // actually returns the new object
7374 assert(callRetTyp == TYP_VOID);
7375 callRetTyp = TYP_REF;
7376 call->gtType = TYP_REF;
7377 impSpillSpecialSideEff();
7379 impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7383 if (clsFlags & CORINFO_FLG_DELEGATE)
7385 // New inliner morph it in impImportCall.
7386 // This will allow us to inline the call to the delegate constructor.
7387 call = fgOptimizeDelegateConstructor(call, &exactContextHnd);
7390 if (!bIntrinsicImported)
7393 #if defined(DEBUG) || defined(INLINE_DATA)
7395 // Keep track of the raw IL offset of the call
7396 call->gtCall.gtRawILOffset = rawILOffset;
7398 #endif // defined(DEBUG) || defined(INLINE_DATA)
7400 // Is it an inline candidate?
7401 impMarkInlineCandidate(call, exactContextHnd, callInfo);
7404 // append the call node.
7405 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7407 // Now push the value of the 'new onto the stack
7409 // This is a 'new' of a non-variable sized object.
7410 // Append the new node (op1) to the statement list,
7411 // and then push the local holding the value of this
7412 // new instruction on the stack.
7414 if (clsFlags & CORINFO_FLG_VALUECLASS)
7416 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
7418 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
7419 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
7423 if (newobjThis->gtOper == GT_COMMA)
7425 // In coreclr the callout can be inserted even if verification is disabled
7426 // so we cannot rely on tiVerificationNeeded alone
7428 // We must have inserted the callout. Get the real newobj.
7429 newobjThis = newobjThis->gtOp.gtOp2;
7432 assert(newobjThis->gtOper == GT_LCL_VAR);
7433 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
7443 // This check cannot be performed for implicit tail calls for the reason
7444 // that impIsImplicitTailCallCandidate() is not checking whether return
7445 // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
7446 // As a result it is possible that in the following case, we find that
7447 // the type stack is non-empty if Callee() is considered for implicit
7449 // int Caller(..) { .... void Callee(); ret val; ... }
7451 // Note that we cannot check return type compatibility before ImpImportCall()
7452 // as we don't have required info or need to duplicate some of the logic of
7455 // For implicit tail calls, we perform this check after return types are
7456 // known to be compatible.
7457 if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
7459 BADCODE("Stack should be empty after tailcall");
7462 // Note that we can not relax this condition with genActualType() as
7463 // the calling convention dictates that the caller of a function with
7464 // a small-typed return value is responsible for normalizing the return val
7467 !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
7468 callInfo->sig.retTypeClass))
7470 canTailCall = false;
7471 szCanTailCallFailReason = "Return types are not tail call compatible";
7474 // Stack empty check for implicit tail calls.
7475 if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
7477 #ifdef _TARGET_AMD64_
7478 // JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException
7479 // in JIT64, not an InvalidProgramException.
7480 Verify(false, "Stack should be empty after tailcall");
7481 #else // _TARGET_64BIT_
7482 BADCODE("Stack should be empty after tailcall");
7483 #endif //!_TARGET_64BIT_
7486 // assert(compCurBB is not a catch, finally or filter block);
7487 // assert(compCurBB is not a try block protected by a finally block);
7489 // Check for permission to tailcall
7490 bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
7492 assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
7496 // True virtual or indirect calls, shouldn't pass in a callee handle.
7497 CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->gtCall.gtCallType != CT_USER_FUNC) ||
7498 ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT))
7501 GenTreePtr thisArg = call->gtCall.gtCallObjp;
7503 if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
7506 if (explicitTailCall)
7508 // In case of explicit tail calls, mark it so that it is not considered
7510 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
7514 printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
7522 #if FEATURE_TAILCALL_OPT
7523 // Must be an implicit tail call.
7524 assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
7526 // It is possible that a call node is both an inline candidate and marked
7527 // for opportunistic tail calling. In-lining happens before morhphing of
7528 // trees. If in-lining of an in-line candidate gets aborted for whatever
7529 // reason, it will survive to the morphing stage at which point it will be
7530 // transformed into a tail call after performing additional checks.
7532 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
7536 printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
7542 #else //! FEATURE_TAILCALL_OPT
7543 NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
7545 #endif // FEATURE_TAILCALL_OPT
7548 // we can't report success just yet...
7552 canTailCall = false;
7553 // canTailCall reported its reasons already
7557 printf("\ninfo.compCompHnd->canTailCall returned false for call ");
7566 // If this assert fires it means that canTailCall was set to false without setting a reason!
7567 assert(szCanTailCallFailReason != nullptr);
7572 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
7574 printf(": %s\n", szCanTailCallFailReason);
7577 info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
7578 szCanTailCallFailReason);
7582 // Note: we assume that small return types are already normalized by the managed callee
7583 // or by the pinvoke stub for calls to unmanaged code.
7585 if (!bIntrinsicImported)
7588 // Things needed to be checked when bIntrinsicImported is false.
7591 assert(call->gtOper == GT_CALL);
7592 assert(sig != nullptr);
7594 // Tail calls require us to save the call site's sig info so we can obtain an argument
7595 // copying thunk from the EE later on.
7596 if (call->gtCall.callSig == nullptr)
7598 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7599 *call->gtCall.callSig = *sig;
7602 if (compIsForInlining() && opcode == CEE_CALLVIRT)
7604 GenTreePtr callObj = call->gtCall.gtCallObjp;
7605 assert(callObj != nullptr);
7607 unsigned callKind = call->gtFlags & GTF_CALL_VIRT_KIND_MASK;
7609 if (((callKind != GTF_CALL_NONVIRT) || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
7610 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
7611 impInlineInfo->inlArgInfo))
7613 impInlineInfo->thisDereferencedFirst = true;
7617 #if defined(DEBUG) || defined(INLINE_DATA)
7619 // Keep track of the raw IL offset of the call
7620 call->gtCall.gtRawILOffset = rawILOffset;
7622 #endif // defined(DEBUG) || defined(INLINE_DATA)
7624 // Is it an inline candidate?
7625 impMarkInlineCandidate(call, exactContextHnd, callInfo);
7629 // Push or append the result of the call
7630 if (callRetTyp == TYP_VOID)
7632 if (opcode == CEE_NEWOBJ)
7634 // we actually did push something, so don't spill the thing we just pushed.
7635 assert(verCurrentState.esStackDepth > 0);
7636 impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
7640 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7645 impSpillSpecialSideEff();
7647 if (clsFlags & CORINFO_FLG_ARRAY)
7649 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
7652 // Find the return type used for verification by interpreting the method signature.
7653 // NB: we are clobbering the already established sig.
7654 if (tiVerificationNeeded)
7656 // Actually, we never get the sig for the original method.
7657 sig = &(callInfo->verSig);
7660 typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
7661 tiRetVal.NormaliseForStack();
7663 // The CEE_READONLY prefix modifies the verification semantics of an Address
7664 // operation on an array type.
7665 if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
7667 tiRetVal.SetIsReadonlyByRef();
7670 if (tiVerificationNeeded)
7672 // We assume all calls return permanent home byrefs. If they
7673 // didn't they wouldn't be verifiable. This is also covering
7674 // the Address() helper for multidimensional arrays.
7675 if (tiRetVal.IsByRef())
7677 tiRetVal.SetIsPermanentHomeByRef();
7683 // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
7685 bool fatPointerCandidate = call->AsCall()->IsFatPointerCandidate();
7686 if (varTypeIsStruct(callRetTyp))
7688 call = impFixupCallStructReturn(call, sig->retTypeClass);
7691 if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
7693 assert(opts.OptEnabled(CLFLG_INLINING));
7694 assert(!fatPointerCandidate); // We should not try to inline calli.
7696 // Make the call its own tree (spill the stack if needed).
7697 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7699 // TODO: Still using the widened type.
7700 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
7704 if (fatPointerCandidate)
7706 // fatPointer candidates should be in statements of the form call() or var = call().
7707 // Such form allows to find statements with fat calls without walking through whole trees
7708 // and removes problems with cutting trees.
7709 assert(!bIntrinsicImported);
7710 assert(IsTargetAbi(CORINFO_CORERT_ABI));
7711 if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
7713 unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli"));
7714 LclVarDsc* varDsc = &lvaTable[calliSlot];
7715 varDsc->lvVerTypeInfo = tiRetVal;
7716 impAssignTempGen(calliSlot, call, clsHnd, (unsigned)CHECK_SPILL_NONE);
7717 // impAssignTempGen can change src arg list and return type for call that returns struct.
7718 var_types type = genActualType(lvaTable[calliSlot].TypeGet());
7719 call = gtNewLclvNode(calliSlot, type);
7723 // For non-candidates we must also spill, since we
7724 // might have locals live on the eval stack that this
7727 // Suppress this for certain well-known call targets
7728 // that we know won't modify locals, eg calls that are
7729 // recognized in gtCanOptimizeTypeEquality. Otherwise
7730 // we may break key fragile pattern matches later on.
7731 bool spillStack = true;
7734 GenTreeCall* callNode = call->AsCall();
7735 if ((callNode->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHelper(callNode))
7739 else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
7747 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
7752 if (!bIntrinsicImported)
7754 //-------------------------------------------------------------------------
7756 /* If the call is of a small type and the callee is managed, the callee will normalize the result
7758 However, we need to normalize small type values returned by unmanaged
7759 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
7760 if we use the shorter inlined pinvoke stub. */
7762 if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
7764 call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
7768 impPushOnStack(call, tiRetVal);
7771 // VSD functions get a new call target each time we getCallInfo, so clear the cache.
7772 // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
7773 // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
7774 // callInfoCache.uncacheCallInfo();
7779 #pragma warning(pop)
7782 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
7784 CorInfoType corType = methInfo->args.retType;
7786 if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
7788 // We have some kind of STRUCT being returned
7790 structPassingKind howToReturnStruct = SPK_Unknown;
7792 var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
7794 if (howToReturnStruct == SPK_ByReference)
7805 var_types Compiler::impImportJitTestLabelMark(int numArgs)
7807 TestLabelAndNum tlAndN;
7811 StackEntry se = impPopStack();
7812 assert(se.seTypeInfo.GetType() == TI_INT);
7813 GenTreePtr val = se.val;
7814 assert(val->IsCnsIntOrI());
7815 tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7817 else if (numArgs == 3)
7819 StackEntry se = impPopStack();
7820 assert(se.seTypeInfo.GetType() == TI_INT);
7821 GenTreePtr val = se.val;
7822 assert(val->IsCnsIntOrI());
7823 tlAndN.m_num = val->AsIntConCommon()->IconValue();
7825 assert(se.seTypeInfo.GetType() == TI_INT);
7827 assert(val->IsCnsIntOrI());
7828 tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7835 StackEntry expSe = impPopStack();
7836 GenTreePtr node = expSe.val;
7838 // There are a small number of special cases, where we actually put the annotation on a subnode.
7839 if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
7841 // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
7842 // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
7843 // offset within the the static field block whose address is returned by the helper call.
7844 // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
7845 GenTreePtr helperCall = nullptr;
7846 assert(node->OperGet() == GT_IND);
7847 tlAndN.m_num -= 100;
7848 GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
7849 GetNodeTestData()->Remove(node);
7853 GetNodeTestData()->Set(node, tlAndN);
7856 impPushOnStack(node, expSe.seTypeInfo);
7857 return node->TypeGet();
7861 //-----------------------------------------------------------------------------------
7862 // impFixupCallStructReturn: For a call node that returns a struct type either
7863 // adjust the return type to an enregisterable type, or set the flag to indicate
7864 // struct return via retbuf arg.
7867 // call - GT_CALL GenTree node
7868 // retClsHnd - Class handle of return type of the call
7871 // Returns new GenTree node after fixing struct return of call node
7873 GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call, CORINFO_CLASS_HANDLE retClsHnd)
7875 assert(call->gtOper == GT_CALL);
7877 if (!varTypeIsStruct(call))
7882 call->gtCall.gtRetClsHnd = retClsHnd;
7884 GenTreeCall* callNode = call->AsCall();
7886 #if FEATURE_MULTIREG_RET
7887 // Initialize Return type descriptor of call node
7888 ReturnTypeDesc* retTypeDesc = callNode->GetReturnTypeDesc();
7889 retTypeDesc->InitializeStructReturnType(this, retClsHnd);
7890 #endif // FEATURE_MULTIREG_RET
7892 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7894 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
7895 assert(!callNode->IsVarargs() && "varargs not allowed for System V OSs.");
7897 // The return type will remain as the incoming struct type unless normalized to a
7898 // single eightbyte return type below.
7899 callNode->gtReturnType = call->gtType;
7901 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7902 if (retRegCount != 0)
7904 if (retRegCount == 1)
7906 // struct returned in a single register
7907 callNode->gtReturnType = retTypeDesc->GetReturnRegType(0);
7911 // must be a struct returned in two registers
7912 assert(retRegCount == 2);
7914 if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7916 // Force a call returning multi-reg struct to be always of the IR form
7919 // No need to assign a multi-reg struct to a local var if:
7920 // - It is a tail call or
7921 // - The call is marked for in-lining later
7922 return impAssignMultiRegTypeToVar(call, retClsHnd);
7928 // struct not returned in registers i.e returned via hiddden retbuf arg.
7929 callNode->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7932 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7934 #if FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7935 // There is no fixup necessary if the return type is a HFA struct.
7936 // HFA structs are returned in registers for ARM32 and ARM64
7938 if (!call->gtCall.IsVarargs() && IsHfa(retClsHnd))
7940 if (call->gtCall.CanTailCall())
7942 if (info.compIsVarArgs)
7944 // We cannot tail call because control needs to return to fixup the calling
7945 // convention for result return.
7946 call->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7950 // If we can tail call returning HFA, then don't assign it to
7951 // a variable back and forth.
7956 if (call->gtFlags & GTF_CALL_INLINE_CANDIDATE)
7961 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7962 if (retRegCount >= 2)
7964 return impAssignMultiRegTypeToVar(call, retClsHnd);
7967 #endif // _TARGET_ARM_
7969 // Check for TYP_STRUCT type that wraps a primitive type
7970 // Such structs are returned using a single register
7971 // and we change the return type on those calls here.
7973 structPassingKind howToReturnStruct;
7974 var_types returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
7976 if (howToReturnStruct == SPK_ByReference)
7978 assert(returnType == TYP_UNKNOWN);
7979 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7983 assert(returnType != TYP_UNKNOWN);
7984 call->gtCall.gtReturnType = returnType;
7986 // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
7987 if ((returnType == TYP_LONG) && (compLongUsed == false))
7989 compLongUsed = true;
7991 else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
7993 compFloatingPointUsed = true;
7996 #if FEATURE_MULTIREG_RET
7997 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7998 assert(retRegCount != 0);
8000 if (retRegCount >= 2)
8002 if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
8004 // Force a call returning multi-reg struct to be always of the IR form
8007 // No need to assign a multi-reg struct to a local var if:
8008 // - It is a tail call or
8009 // - The call is marked for in-lining later
8010 return impAssignMultiRegTypeToVar(call, retClsHnd);
8013 #endif // FEATURE_MULTIREG_RET
8016 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8021 /*****************************************************************************
8022 For struct return values, re-type the operand in the case where the ABI
8023 does not use a struct return buffer
8024 Note that this method is only call for !_TARGET_X86_
8027 GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd)
8029 assert(varTypeIsStruct(info.compRetType));
8030 assert(info.compRetBuffArg == BAD_VAR_NUM);
8032 #if defined(_TARGET_XARCH_)
8034 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
8035 // No VarArgs for CoreCLR on x64 Unix
8036 assert(!info.compIsVarArgs);
8038 // Is method returning a multi-reg struct?
8039 if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
8041 // In case of multi-reg struct return, we force IR to be one of the following:
8042 // GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a
8043 // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
8045 if (op->gtOper == GT_LCL_VAR)
8047 // Make sure that this struct stays in memory and doesn't get promoted.
8048 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8049 lvaTable[lclNum].lvIsMultiRegRet = true;
8051 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8052 op->gtFlags |= GTF_DONT_CSE;
8057 if (op->gtOper == GT_CALL)
8062 return impAssignMultiRegTypeToVar(op, retClsHnd);
8064 #else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8065 assert(info.compRetNativeType != TYP_STRUCT);
8066 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8068 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
8070 if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
8072 if (op->gtOper == GT_LCL_VAR)
8074 // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
8075 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8076 // Make sure this struct type stays as struct so that we can return it as an HFA
8077 lvaTable[lclNum].lvIsMultiRegRet = true;
8079 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8080 op->gtFlags |= GTF_DONT_CSE;
8085 if (op->gtOper == GT_CALL)
8087 if (op->gtCall.IsVarargs())
8089 // We cannot tail call because control needs to return to fixup the calling
8090 // convention for result return.
8091 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8092 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8099 return impAssignMultiRegTypeToVar(op, retClsHnd);
8102 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
8104 // Is method returning a multi-reg struct?
8105 if (IsMultiRegReturnedType(retClsHnd))
8107 if (op->gtOper == GT_LCL_VAR)
8109 // This LCL_VAR stays as a TYP_STRUCT
8110 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8112 // Make sure this struct type is not struct promoted
8113 lvaTable[lclNum].lvIsMultiRegRet = true;
8115 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8116 op->gtFlags |= GTF_DONT_CSE;
8121 if (op->gtOper == GT_CALL)
8123 if (op->gtCall.IsVarargs())
8125 // We cannot tail call because control needs to return to fixup the calling
8126 // convention for result return.
8127 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8128 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8135 return impAssignMultiRegTypeToVar(op, retClsHnd);
8138 #endif // FEATURE_MULTIREG_RET && FEATURE_HFA
8141 // adjust the type away from struct to integral
8142 // and no normalizing
8143 if (op->gtOper == GT_LCL_VAR)
8145 op->ChangeOper(GT_LCL_FLD);
8147 else if (op->gtOper == GT_OBJ)
8149 GenTreePtr op1 = op->AsObj()->Addr();
8151 // We will fold away OBJ/ADDR
8152 // except for OBJ/ADDR/INDEX
8153 // as the array type influences the array element's offset
8154 // Later in this method we change op->gtType to info.compRetNativeType
8155 // This is not correct when op is a GT_INDEX as the starting offset
8156 // for the array elements 'elemOffs' is different for an array of
8157 // TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8158 // Also refer to the GTF_INX_REFARR_LAYOUT flag
8160 if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8162 // Change '*(&X)' to 'X' and see if we can do better
8163 op = op1->gtOp.gtOp1;
8164 goto REDO_RETURN_NODE;
8166 op->gtObj.gtClass = NO_CLASS_HANDLE;
8167 op->ChangeOperUnchecked(GT_IND);
8168 op->gtFlags |= GTF_IND_TGTANYWHERE;
8170 else if (op->gtOper == GT_CALL)
8172 if (op->AsCall()->TreatAsHasRetBufArg(this))
8174 // This must be one of those 'special' helpers that don't
8175 // really have a return buffer, but instead use it as a way
8176 // to keep the trees cleaner with fewer address-taken temps.
8178 // Well now we have to materialize the the return buffer as
8179 // an address-taken temp. Then we can return the temp.
8181 // NOTE: this code assumes that since the call directly
8182 // feeds the return, then the call must be returning the
8183 // same structure/class/type.
8185 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8187 // No need to spill anything as we're about to return.
8188 impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8190 // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8191 // jump directly to a GT_LCL_FLD.
8192 op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8193 op->ChangeOper(GT_LCL_FLD);
8197 assert(info.compRetNativeType == op->gtCall.gtReturnType);
8199 // Don't change the gtType of the node just yet, it will get changed later.
8203 else if (op->gtOper == GT_COMMA)
8205 op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8208 op->gtType = info.compRetNativeType;
8213 /*****************************************************************************
8214 CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8215 finally-protected try. We find the finally blocks protecting the current
8216 offset (in order) by walking over the complete exception table and
8217 finding enclosing clauses. This assumes that the table is sorted.
8218 This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8220 If we are leaving a catch handler, we need to attach the
8221 CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8223 After this function, the BBJ_LEAVE block has been converted to a different type.
8226 #if !FEATURE_EH_FUNCLETS
8228 void Compiler::impImportLeave(BasicBlock* block)
8233 printf("\nBefore import CEE_LEAVE:\n");
8234 fgDispBasicBlocks();
8239 bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8240 unsigned blkAddr = block->bbCodeOffs;
8241 BasicBlock* leaveTarget = block->bbJumpDest;
8242 unsigned jmpAddr = leaveTarget->bbCodeOffs;
8244 // LEAVE clears the stack, spill side effects, and set stack to 0
8246 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8247 verCurrentState.esStackDepth = 0;
8249 assert(block->bbJumpKind == BBJ_LEAVE);
8250 assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8252 BasicBlock* step = DUMMY_INIT(NULL);
8253 unsigned encFinallies = 0; // Number of enclosing finallies.
8254 GenTreePtr endCatches = NULL;
8255 GenTreePtr endLFin = NULL; // The statement tree to indicate the end of locally-invoked finally.
8260 for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8262 // Grab the handler offsets
8264 IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8265 IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8266 IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8267 IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8269 /* Is this a catch-handler we are CEE_LEAVEing out of?
8270 * If so, we need to call CORINFO_HELP_ENDCATCH.
8273 if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8275 // Can't CEE_LEAVE out of a finally/fault handler
8276 if (HBtab->HasFinallyOrFaultHandler())
8277 BADCODE("leave out of fault/finally block");
8279 // Create the call to CORINFO_HELP_ENDCATCH
8280 GenTreePtr endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8282 // Make a list of all the currently pending endCatches
8284 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8286 endCatches = endCatch;
8291 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8292 "CORINFO_HELP_ENDCATCH\n",
8293 block->bbNum, XTnum);
8297 else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8298 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8300 /* This is a finally-protected try we are jumping out of */
8302 /* If there are any pending endCatches, and we have already
8303 jumped out of a finally-protected try, then the endCatches
8304 have to be put in a block in an outer try for async
8305 exceptions to work correctly.
8306 Else, just use append to the original block */
8308 BasicBlock* callBlock;
8310 assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8312 if (encFinallies == 0)
8314 assert(step == DUMMY_INIT(NULL));
8316 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8319 impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8324 printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8325 "block BB%02u [%08p]\n",
8326 callBlock->bbNum, dspPtr(callBlock));
8332 assert(step != DUMMY_INIT(NULL));
8334 /* Calling the finally block */
8335 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8336 assert(step->bbJumpKind == BBJ_ALWAYS);
8337 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8338 // finally in the chain)
8339 step->bbJumpDest->bbRefs++;
8341 /* The new block will inherit this block's weight */
8342 callBlock->setBBWeight(block->bbWeight);
8343 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8348 printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block BB%02u "
8350 callBlock->bbNum, dspPtr(callBlock));
8354 GenTreePtr lastStmt;
8358 lastStmt = gtNewStmt(endCatches);
8359 endLFin->gtNext = lastStmt;
8360 lastStmt->gtPrev = endLFin;
8367 // note that this sets BBF_IMPORTED on the block
8368 impEndTreeList(callBlock, endLFin, lastStmt);
8371 step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8372 /* The new block will inherit this block's weight */
8373 step->setBBWeight(block->bbWeight);
8374 step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8379 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block "
8381 step->bbNum, dspPtr(step));
8385 unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8386 assert(finallyNesting <= compHndBBtabCount);
8388 callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8389 endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8390 endLFin = gtNewStmt(endLFin);
8395 invalidatePreds = true;
8399 /* Append any remaining endCatches, if any */
8401 assert(!encFinallies == !endLFin);
8403 if (encFinallies == 0)
8405 assert(step == DUMMY_INIT(NULL));
8406 block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8409 impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8414 printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8415 "block BB%02u [%08p]\n",
8416 block->bbNum, dspPtr(block));
8422 // If leaveTarget is the start of another try block, we want to make sure that
8423 // we do not insert finalStep into that try block. Hence, we find the enclosing
8425 unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8427 // Insert a new BB either in the try region indicated by tryIndex or
8428 // the handler region indicated by leaveTarget->bbHndIndex,
8429 // depending on which is the inner region.
8430 BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8431 finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8432 step->bbJumpDest = finalStep;
8434 /* The new block will inherit this block's weight */
8435 finalStep->setBBWeight(block->bbWeight);
8436 finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8441 printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block BB%02u [%08p]\n",
8442 encFinallies, finalStep->bbNum, dspPtr(finalStep));
8446 GenTreePtr lastStmt;
8450 lastStmt = gtNewStmt(endCatches);
8451 endLFin->gtNext = lastStmt;
8452 lastStmt->gtPrev = endLFin;
8459 impEndTreeList(finalStep, endLFin, lastStmt);
8461 finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8463 // Queue up the jump target for importing
8465 impImportBlockPending(leaveTarget);
8467 invalidatePreds = true;
8470 if (invalidatePreds && fgComputePredsDone)
8472 JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8477 fgVerifyHandlerTab();
8481 printf("\nAfter import CEE_LEAVE:\n");
8482 fgDispBasicBlocks();
8488 #else // FEATURE_EH_FUNCLETS
8490 void Compiler::impImportLeave(BasicBlock* block)
8495 printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
8496 fgDispBasicBlocks();
8501 bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8502 unsigned blkAddr = block->bbCodeOffs;
8503 BasicBlock* leaveTarget = block->bbJumpDest;
8504 unsigned jmpAddr = leaveTarget->bbCodeOffs;
8506 // LEAVE clears the stack, spill side effects, and set stack to 0
8508 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8509 verCurrentState.esStackDepth = 0;
8511 assert(block->bbJumpKind == BBJ_LEAVE);
8512 assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
8514 BasicBlock* step = nullptr;
8518 // No step type; step == NULL.
8521 // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
8522 // That is, is step->bbJumpDest where a finally will return to?
8525 // The step block is a catch return.
8528 // The step block is in a "try", created as the target for a finally return or the target for a catch return.
8531 StepType stepType = ST_None;
8536 for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8538 // Grab the handler offsets
8540 IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8541 IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8542 IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8543 IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8545 /* Is this a catch-handler we are CEE_LEAVEing out of?
8548 if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8550 // Can't CEE_LEAVE out of a finally/fault handler
8551 if (HBtab->HasFinallyOrFaultHandler())
8553 BADCODE("leave out of fault/finally block");
8556 /* We are jumping out of a catch */
8558 if (step == nullptr)
8561 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
8562 stepType = ST_Catch;
8567 printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
8569 XTnum, step->bbNum);
8575 BasicBlock* exitBlock;
8577 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
8579 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
8581 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8582 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
8583 // exit) returns to this block
8584 step->bbJumpDest->bbRefs++;
8586 #if defined(_TARGET_ARM_)
8587 if (stepType == ST_FinallyReturn)
8589 assert(step->bbJumpKind == BBJ_ALWAYS);
8590 // Mark the target of a finally return
8591 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8593 #endif // defined(_TARGET_ARM_)
8595 /* The new block will inherit this block's weight */
8596 exitBlock->setBBWeight(block->bbWeight);
8597 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8599 /* This exit block is the new step */
8601 stepType = ST_Catch;
8603 invalidatePreds = true;
8608 printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
8614 else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8615 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8617 /* We are jumping out of a finally-protected try */
8619 BasicBlock* callBlock;
8621 if (step == nullptr)
8623 #if FEATURE_EH_CALLFINALLY_THUNKS
8625 // Put the call to the finally in the enclosing region.
8626 unsigned callFinallyTryIndex =
8627 (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8628 unsigned callFinallyHndIndex =
8629 (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8630 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
8632 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
8633 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
8634 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
8635 // next block, and flow optimizations will remove it.
8636 block->bbJumpKind = BBJ_ALWAYS;
8637 block->bbJumpDest = callBlock;
8638 block->bbJumpDest->bbRefs++;
8640 /* The new block will inherit this block's weight */
8641 callBlock->setBBWeight(block->bbWeight);
8642 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8647 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8648 "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
8649 XTnum, block->bbNum, callBlock->bbNum);
8653 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8656 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8661 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8662 "BBJ_CALLFINALLY block\n",
8663 XTnum, callBlock->bbNum);
8667 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8671 // Calling the finally block. We already have a step block that is either the call-to-finally from a
8672 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
8673 // a 'finally'), or the step block is the return from a catch.
8675 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
8676 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
8677 // automatically re-raise the exception, using the return address of the catch (that is, the target
8678 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
8679 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
8680 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
8681 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
8682 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
8683 // within the 'try' region protected by the finally, since we generate code in such a way that execution
8684 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
8687 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8689 #if FEATURE_EH_CALLFINALLY_THUNKS
8690 if (step->bbJumpKind == BBJ_EHCATCHRET)
8692 // Need to create another step block in the 'try' region that will actually branch to the
8693 // call-to-finally thunk.
8694 BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8695 step->bbJumpDest = step2;
8696 step->bbJumpDest->bbRefs++;
8697 step2->setBBWeight(block->bbWeight);
8698 step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8703 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
8704 "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
8705 XTnum, step->bbNum, step2->bbNum);
8710 assert(stepType == ST_Catch); // Leave it as catch type for now.
8712 #endif // FEATURE_EH_CALLFINALLY_THUNKS
8714 #if FEATURE_EH_CALLFINALLY_THUNKS
8715 unsigned callFinallyTryIndex =
8716 (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8717 unsigned callFinallyHndIndex =
8718 (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8719 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8720 unsigned callFinallyTryIndex = XTnum + 1;
8721 unsigned callFinallyHndIndex = 0; // don't care
8722 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8724 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
8725 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8726 // finally in the chain)
8727 step->bbJumpDest->bbRefs++;
8729 #if defined(_TARGET_ARM_)
8730 if (stepType == ST_FinallyReturn)
8732 assert(step->bbJumpKind == BBJ_ALWAYS);
8733 // Mark the target of a finally return
8734 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8736 #endif // defined(_TARGET_ARM_)
8738 /* The new block will inherit this block's weight */
8739 callBlock->setBBWeight(block->bbWeight);
8740 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8745 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
8747 XTnum, callBlock->bbNum);
8752 step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8753 stepType = ST_FinallyReturn;
8755 /* The new block will inherit this block's weight */
8756 step->setBBWeight(block->bbWeight);
8757 step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8762 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
8764 XTnum, step->bbNum);
8768 callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8770 invalidatePreds = true;
8772 else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8773 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8775 // We are jumping out of a catch-protected try.
8777 // If we are returning from a call to a finally, then we must have a step block within a try
8778 // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
8779 // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
8780 // and invoke the appropriate catch.
8782 // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
8783 // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
8784 // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
8785 // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
8786 // address of the catch return as the new exception address. That is, the re-raised exception appears to
8787 // occur at the catch return address. If this exception return address skips an enclosing try/catch that
8788 // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
8793 // // something here raises ThreadAbortException
8794 // LEAVE LABEL_1; // no need to stop at LABEL_2
8795 // } catch (Exception) {
8796 // // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
8797 // // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
8798 // // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
8799 // // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
8800 // // need to do this transformation if the current EH block is a try/catch that catches
8801 // // ThreadAbortException (or one of its parents), however we might not be able to find that
8802 // // information, so currently we do it for all catch types.
8803 // LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
8805 // LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
8806 // } catch (ThreadAbortException) {
8810 // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
8813 if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
8815 BasicBlock* catchStep;
8819 if (stepType == ST_FinallyReturn)
8821 assert(step->bbJumpKind == BBJ_ALWAYS);
8825 assert(stepType == ST_Catch);
8826 assert(step->bbJumpKind == BBJ_EHCATCHRET);
8829 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
8830 catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8831 step->bbJumpDest = catchStep;
8832 step->bbJumpDest->bbRefs++;
8834 #if defined(_TARGET_ARM_)
8835 if (stepType == ST_FinallyReturn)
8837 // Mark the target of a finally return
8838 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8840 #endif // defined(_TARGET_ARM_)
8842 /* The new block will inherit this block's weight */
8843 catchStep->setBBWeight(block->bbWeight);
8844 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8849 if (stepType == ST_FinallyReturn)
8851 printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
8852 "BBJ_ALWAYS block BB%02u\n",
8853 XTnum, catchStep->bbNum);
8857 assert(stepType == ST_Catch);
8858 printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
8859 "BBJ_ALWAYS block BB%02u\n",
8860 XTnum, catchStep->bbNum);
8865 /* This block is the new step */
8869 invalidatePreds = true;
8874 if (step == nullptr)
8876 block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8881 printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
8882 "block BB%02u to BBJ_ALWAYS\n",
8889 step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8891 #if defined(_TARGET_ARM_)
8892 if (stepType == ST_FinallyReturn)
8894 assert(step->bbJumpKind == BBJ_ALWAYS);
8895 // Mark the target of a finally return
8896 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8898 #endif // defined(_TARGET_ARM_)
8903 printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
8907 // Queue up the jump target for importing
8909 impImportBlockPending(leaveTarget);
8912 if (invalidatePreds && fgComputePredsDone)
8914 JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8919 fgVerifyHandlerTab();
8923 printf("\nAfter import CEE_LEAVE:\n");
8924 fgDispBasicBlocks();
8930 #endif // FEATURE_EH_FUNCLETS
8932 /*****************************************************************************/
8933 // This is called when reimporting a leave block. It resets the JumpKind,
8934 // JumpDest, and bbNext to the original values
8936 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
8938 #if FEATURE_EH_FUNCLETS
8939 // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
8940 // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0,
8941 // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
8942 // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
8943 // only predecessor are also considered orphans and attempted to be deleted.
8950 // leave OUTSIDE; // B0 is the block containing this leave, following this would be B1
8955 // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
8956 // where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block.
8957 // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To
8958 // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
8959 // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
8960 // will be treated as pair and handled correctly.
8961 if (block->bbJumpKind == BBJ_CALLFINALLY)
8963 BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
8964 dupBlock->bbFlags = block->bbFlags;
8965 dupBlock->bbJumpDest = block->bbJumpDest;
8966 dupBlock->copyEHRegion(block);
8967 dupBlock->bbCatchTyp = block->bbCatchTyp;
8969 // Mark this block as
8970 // a) not referenced by any other block to make sure that it gets deleted
8972 // c) prevent from being imported
8975 dupBlock->bbRefs = 0;
8976 dupBlock->bbWeight = 0;
8977 dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
8979 // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
8980 // will be next to each other.
8981 fgInsertBBafter(block, dupBlock);
8986 printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
8990 #endif // FEATURE_EH_FUNCLETS
8992 block->bbJumpKind = BBJ_LEAVE;
8994 block->bbJumpDest = fgLookupBB(jmpAddr);
8996 // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
8997 // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
8998 // reason we don't want to remove the block at this point is that if we call
8999 // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
9000 // added and the linked list length will be different than fgBBcount.
9003 /*****************************************************************************/
9004 // Get the first non-prefix opcode. Used for verification of valid combinations
9005 // of prefixes and actual opcodes.
9007 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
9009 while (codeAddr < codeEndp)
9011 OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9012 codeAddr += sizeof(__int8);
9014 if (opcode == CEE_PREFIX1)
9016 if (codeAddr >= codeEndp)
9020 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9021 codeAddr += sizeof(__int8);
9029 case CEE_CONSTRAINED:
9036 codeAddr += opcodeSizes[opcode];
9042 /*****************************************************************************/
9043 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
9045 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
9047 OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
9050 // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
9051 ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
9052 (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
9053 (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
9054 // volatile. prefix is allowed with the ldsfld and stsfld
9055 (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
9057 BADCODE("Invalid opcode for unaligned. or volatile. prefix");
9061 /*****************************************************************************/
9065 #undef RETURN // undef contracts RETURN macro
9080 const static controlFlow_t controlFlow[] = {
9081 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
9082 #include "opcode.def"
9088 /*****************************************************************************
9089 * Determine the result type of an arithemetic operation
9090 * On 64-bit inserts upcasts when native int is mixed with int32
9092 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr* pOp1, GenTreePtr* pOp2)
9094 var_types type = TYP_UNDEF;
9095 GenTreePtr op1 = *pOp1, op2 = *pOp2;
9097 // Arithemetic operations are generally only allowed with
9098 // primitive types, but certain operations are allowed
9101 if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9103 if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9105 // byref1-byref2 => gives a native int
9108 else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9110 // [native] int - byref => gives a native int
9113 // The reason is that it is possible, in managed C++,
9114 // to have a tree like this:
9121 // const(h) int addr byref
9123 // <BUGNUM> VSW 318822 </BUGNUM>
9125 // So here we decide to make the resulting type to be a native int.
9126 CLANG_FORMAT_COMMENT_ANCHOR;
9128 #ifdef _TARGET_64BIT_
9129 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9131 // insert an explicit upcast
9132 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9134 #endif // _TARGET_64BIT_
9140 // byref - [native] int => gives a byref
9141 assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
9143 #ifdef _TARGET_64BIT_
9144 if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
9146 // insert an explicit upcast
9147 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9149 #endif // _TARGET_64BIT_
9154 else if ((oper == GT_ADD) &&
9155 (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9157 // byref + [native] int => gives a byref
9159 // [native] int + byref => gives a byref
9161 // only one can be a byref : byref op byref not allowed
9162 assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9163 assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9165 #ifdef _TARGET_64BIT_
9166 if (genActualType(op2->TypeGet()) == TYP_BYREF)
9168 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9170 // insert an explicit upcast
9171 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9174 else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9176 // insert an explicit upcast
9177 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9179 #endif // _TARGET_64BIT_
9183 #ifdef _TARGET_64BIT_
9184 else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9186 assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9188 // int + long => gives long
9189 // long + int => gives long
9190 // we get this because in the IL the long isn't Int64, it's just IntPtr
9192 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9194 // insert an explicit upcast
9195 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9197 else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9199 // insert an explicit upcast
9200 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9205 #else // 32-bit TARGET
9206 else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9208 assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9210 // int + long => gives long
9211 // long + int => gives long
9215 #endif // _TARGET_64BIT_
9218 // int + int => gives an int
9219 assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9221 assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9222 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9224 type = genActualType(op1->gtType);
9226 #if FEATURE_X87_DOUBLES
9228 // For x87, since we only have 1 size of registers, prefer double
9229 // For everybody else, be more precise
9230 if (type == TYP_FLOAT)
9233 #else // !FEATURE_X87_DOUBLES
9235 // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9236 // Otherwise, turn floats into doubles
9237 if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9239 assert(genActualType(op2->gtType) == TYP_DOUBLE);
9243 #endif // FEATURE_X87_DOUBLES
9246 #if FEATURE_X87_DOUBLES
9247 assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9248 #else // FEATURE_X87_DOUBLES
9249 assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9250 #endif // FEATURE_X87_DOUBLES
9255 /*****************************************************************************
9256 * Casting Helper Function to service both CEE_CASTCLASS and CEE_ISINST
9258 * typeRef contains the token, op1 to contain the value being cast,
9259 * and op2 to contain code that creates the type handle corresponding to typeRef
9260 * isCastClass = true means CEE_CASTCLASS, false means CEE_ISINST
9262 GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr op1,
9264 CORINFO_RESOLVED_TOKEN* pResolvedToken,
9269 assert(op1->TypeGet() == TYP_REF);
9271 CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9275 // We only want to expand inline the normal CHKCASTCLASS helper;
9276 expandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9280 if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9282 // Get the Class Handle abd class attributes for the type we are casting to
9284 DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9287 // If the class handle is marked as final we can also expand the IsInst check inline
9289 expandInline = ((flags & CORINFO_FLG_FINAL) != 0);
9292 // But don't expand inline these two cases
9294 if (flags & CORINFO_FLG_MARSHAL_BYREF)
9296 expandInline = false;
9298 else if (flags & CORINFO_FLG_CONTEXTFUL)
9300 expandInline = false;
9306 // We can't expand inline any other helpers
9308 expandInline = false;
9314 if (compCurBB->isRunRarely())
9316 expandInline = false; // not worth the code expansion in a rarely run block
9319 if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9321 expandInline = false; // not worth creating an untracked local variable
9327 // If we CSE this class handle we prevent assertionProp from making SubType assertions
9328 // so instead we force the CSE logic to not consider CSE-ing this class handle.
9330 op2->gtFlags |= GTF_DONT_CSE;
9332 return gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2, op1));
9335 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9340 // expand the methodtable match:
9344 // GT_IND op2 (typically CNS_INT)
9349 // This can replace op1 with a GT_COMMA that evaluates op1 into a local
9351 op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
9353 // op1 is now known to be a non-complex tree
9354 // thus we can use gtClone(op1) from now on
9357 GenTreePtr op2Var = op2;
9360 op2Var = fgInsertCommaFormTemp(&op2);
9361 lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
9363 temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
9364 temp->gtFlags |= GTF_EXCEPT;
9365 condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
9367 GenTreePtr condNull;
9369 // expand the null check:
9371 // condNull ==> GT_EQ
9376 condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
9379 // expand the true and false trees for the condMT
9381 GenTreePtr condFalse = gtClone(op1);
9382 GenTreePtr condTrue;
9386 // use the special helper that skips the cases checked by our inlined cast
9388 helper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
9390 condTrue = gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2Var, gtClone(op1)));
9394 condTrue = gtNewIconNode(0, TYP_REF);
9397 #define USE_QMARK_TREES
9399 #ifdef USE_QMARK_TREES
9402 // Generate first QMARK - COLON tree
9404 // qmarkMT ==> GT_QMARK
9408 // condFalse condTrue
9410 temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
9411 qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
9412 condMT->gtFlags |= GTF_RELOP_QMARK;
9414 GenTreePtr qmarkNull;
9416 // Generate second QMARK - COLON tree
9418 // qmarkNull ==> GT_QMARK
9420 // condNull GT_COLON
9424 temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
9425 qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
9426 qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
9427 condNull->gtFlags |= GTF_RELOP_QMARK;
9429 // Make QMark node a top level node by spilling it.
9430 unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
9431 impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
9432 return gtNewLclvNode(tmp, TYP_REF);
9437 #define assertImp(cond) ((void)0)
9439 #define assertImp(cond) \
9444 const int cchAssertImpBuf = 600; \
9445 char* assertImpBuf = (char*)alloca(cchAssertImpBuf); \
9446 _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \
9447 "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \
9448 impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \
9449 op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \
9450 assertAbort(assertImpBuf, __FILE__, __LINE__); \
9456 #pragma warning(push)
9457 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
9459 /*****************************************************************************
9460 * Import the instr for the given basic block
9462 void Compiler::impImportBlockCode(BasicBlock* block)
9464 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
9470 printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
9474 unsigned nxtStmtIndex = impInitBlockLineInfo();
9475 IL_OFFSET nxtStmtOffs;
9477 GenTreePtr arrayNodeFrom, arrayNodeTo, arrayNodeToIndex;
9479 CorInfoHelpFunc helper;
9480 CorInfoIsAccessAllowedResult accessAllowedResult;
9481 CORINFO_HELPER_DESC calloutHelper;
9482 const BYTE* lastLoadToken = nullptr;
9484 // reject cyclic constraints
9485 if (tiVerificationNeeded)
9487 Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
9488 Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
9491 /* Get the tree list started */
9495 /* Walk the opcodes that comprise the basic block */
9497 const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
9498 const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
9500 IL_OFFSET opcodeOffs = block->bbCodeOffs;
9501 IL_OFFSET lastSpillOffs = opcodeOffs;
9505 /* remember the start of the delegate creation sequence (used for verification) */
9506 const BYTE* delegateCreateStart = nullptr;
9508 int prefixFlags = 0;
9509 bool explicitTailCall, constraintCall, readonlyCall;
9511 bool insertLdloc = false; // set by CEE_DUP and cleared by following store
9514 unsigned numArgs = info.compArgsCount;
9516 /* Now process all the opcodes in the block */
9518 var_types callTyp = TYP_COUNT;
9519 OPCODE prevOpcode = CEE_ILLEGAL;
9521 if (block->bbCatchTyp)
9523 if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
9525 impCurStmtOffsSet(block->bbCodeOffs);
9528 // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
9529 // to a temp. This is a trade off for code simplicity
9530 impSpillSpecialSideEff();
9533 while (codeAddr < codeEndp)
9535 bool usingReadyToRunHelper = false;
9536 CORINFO_RESOLVED_TOKEN resolvedToken;
9537 CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
9538 CORINFO_CALL_INFO callInfo;
9539 CORINFO_FIELD_INFO fieldInfo;
9541 tiRetVal = typeInfo(); // Default type info
9543 //---------------------------------------------------------------------
9545 /* We need to restrict the max tree depth as many of the Compiler
9546 functions are recursive. We do this by spilling the stack */
9548 if (verCurrentState.esStackDepth)
9550 /* Has it been a while since we last saw a non-empty stack (which
9551 guarantees that the tree depth isnt accumulating. */
9553 if ((opcodeOffs - lastSpillOffs) > 200)
9555 impSpillStackEnsure();
9556 lastSpillOffs = opcodeOffs;
9561 lastSpillOffs = opcodeOffs;
9562 impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
9565 /* Compute the current instr offset */
9567 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9570 if (opts.compDbgInfo)
9573 if (!compIsForInlining())
9576 (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
9578 /* Have we reached the next stmt boundary ? */
9580 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
9582 assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
9584 if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
9586 /* We need to provide accurate IP-mapping at this point.
9587 So spill anything on the stack so that it will form
9588 gtStmts with the correct stmt offset noted */
9590 impSpillStackEnsure(true);
9593 // Has impCurStmtOffs been reported in any tree?
9595 if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
9597 GenTreePtr placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
9598 impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9600 assert(impCurStmtOffs == BAD_IL_OFFSET);
9603 if (impCurStmtOffs == BAD_IL_OFFSET)
9605 /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
9606 If opcodeOffs has gone past nxtStmtIndex, catch up */
9608 while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
9609 info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
9614 /* Go to the new stmt */
9616 impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
9618 /* Update the stmt boundary index */
9621 assert(nxtStmtIndex <= info.compStmtOffsetsCount);
9623 /* Are there any more line# entries after this one? */
9625 if (nxtStmtIndex < info.compStmtOffsetsCount)
9627 /* Remember where the next line# starts */
9629 nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
9633 /* No more line# entries */
9635 nxtStmtOffs = BAD_IL_OFFSET;
9639 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
9640 (verCurrentState.esStackDepth == 0))
9642 /* At stack-empty locations, we have already added the tree to
9643 the stmt list with the last offset. We just need to update
9647 impCurStmtOffsSet(opcodeOffs);
9649 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
9650 impOpcodeIsCallSiteBoundary(prevOpcode))
9652 /* Make sure we have a type cached */
9653 assert(callTyp != TYP_COUNT);
9655 if (callTyp == TYP_VOID)
9657 impCurStmtOffsSet(opcodeOffs);
9659 else if (opts.compDbgCode)
9661 impSpillStackEnsure(true);
9662 impCurStmtOffsSet(opcodeOffs);
9665 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
9667 if (opts.compDbgCode)
9669 impSpillStackEnsure(true);
9672 impCurStmtOffsSet(opcodeOffs);
9675 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
9676 jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
9680 CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL);
9681 CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
9682 CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
9684 var_types lclTyp, ovflType = TYP_UNKNOWN;
9685 GenTreePtr op1 = DUMMY_INIT(NULL);
9686 GenTreePtr op2 = DUMMY_INIT(NULL);
9687 GenTreeArgList* args = nullptr; // What good do these "DUMMY_INIT"s do?
9688 GenTreePtr newObjThisPtr = DUMMY_INIT(NULL);
9689 bool uns = DUMMY_INIT(false);
9691 /* Get the next opcode and the size of its parameters */
9693 OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9694 codeAddr += sizeof(__int8);
9697 impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9698 JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
9703 // Return if any previous code has caused inline to fail.
9704 if (compDonotInline())
9709 /* Get the size of additional parameters */
9711 signed int sz = opcodeSizes[opcode];
9714 clsHnd = NO_CLASS_HANDLE;
9716 callTyp = TYP_COUNT;
9718 impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9719 impCurOpcName = opcodeNames[opcode];
9721 if (verbose && (opcode != CEE_PREFIX1))
9723 printf("%s", impCurOpcName);
9726 /* Use assertImp() to display the opcode */
9728 op1 = op2 = nullptr;
9731 /* See what kind of an opcode we have, then */
9733 unsigned mflags = 0;
9734 unsigned clsFlags = 0;
9747 CORINFO_SIG_INFO sig;
9750 bool ovfl, unordered, callNode;
9752 CORINFO_CLASS_HANDLE tokenType;
9762 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9763 codeAddr += sizeof(__int8);
9764 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9769 // We need to call impSpillLclRefs() for a struct type lclVar.
9770 // This is done for non-block assignments in the handling of stloc.
9771 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
9772 (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
9774 impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
9777 /* Append 'op1' to the list of statements */
9778 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
9783 /* Append 'op1' to the list of statements */
9785 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9791 // Remember at which BC offset the tree was finished
9792 impNoteLastILoffs();
9797 impPushNullObjRefOnStack();
9810 cval.intVal = (opcode - CEE_LDC_I4_0);
9811 assert(-1 <= cval.intVal && cval.intVal <= 8);
9815 cval.intVal = getI1LittleEndian(codeAddr);
9818 cval.intVal = getI4LittleEndian(codeAddr);
9821 JITDUMP(" %d", cval.intVal);
9822 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
9826 cval.lngVal = getI8LittleEndian(codeAddr);
9827 JITDUMP(" 0x%016llx", cval.lngVal);
9828 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
9832 cval.dblVal = getR8LittleEndian(codeAddr);
9833 JITDUMP(" %#.17g", cval.dblVal);
9834 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
9838 cval.dblVal = getR4LittleEndian(codeAddr);
9839 JITDUMP(" %#.17g", cval.dblVal);
9841 GenTreePtr cnsOp = gtNewDconNode(cval.dblVal);
9842 #if !FEATURE_X87_DOUBLES
9843 // X87 stack doesn't differentiate between float/double
9844 // so R4 is treated as R8, but everybody else does
9845 cnsOp->gtType = TYP_FLOAT;
9846 #endif // FEATURE_X87_DOUBLES
9847 impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
9853 if (compIsForInlining())
9855 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
9857 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
9862 val = getU4LittleEndian(codeAddr);
9863 JITDUMP(" %08X", val);
9864 if (tiVerificationNeeded)
9866 Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
9867 tiRetVal = typeInfo(TI_REF, impGetStringClass());
9869 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
9874 lclNum = getU2LittleEndian(codeAddr);
9875 JITDUMP(" %u", lclNum);
9876 impLoadArg(lclNum, opcodeOffs + sz + 1);
9880 lclNum = getU1LittleEndian(codeAddr);
9881 JITDUMP(" %u", lclNum);
9882 impLoadArg(lclNum, opcodeOffs + sz + 1);
9889 lclNum = (opcode - CEE_LDARG_0);
9890 assert(lclNum >= 0 && lclNum < 4);
9891 impLoadArg(lclNum, opcodeOffs + sz + 1);
9895 lclNum = getU2LittleEndian(codeAddr);
9896 JITDUMP(" %u", lclNum);
9897 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9901 lclNum = getU1LittleEndian(codeAddr);
9902 JITDUMP(" %u", lclNum);
9903 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9910 lclNum = (opcode - CEE_LDLOC_0);
9911 assert(lclNum >= 0 && lclNum < 4);
9912 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9916 lclNum = getU2LittleEndian(codeAddr);
9920 lclNum = getU1LittleEndian(codeAddr);
9922 JITDUMP(" %u", lclNum);
9924 if (tiVerificationNeeded)
9926 Verify(lclNum < info.compILargsCount, "bad arg num");
9929 if (compIsForInlining())
9931 op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
9932 noway_assert(op1->gtOper == GT_LCL_VAR);
9933 lclNum = op1->AsLclVar()->gtLclNum;
9938 lclNum = compMapILargNum(lclNum); // account for possible hidden param
9939 assertImp(lclNum < numArgs);
9941 if (lclNum == info.compThisArg)
9943 lclNum = lvaArg0Var;
9945 lvaTable[lclNum].lvArgWrite = 1;
9947 if (tiVerificationNeeded)
9949 typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
9950 Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
9953 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
9955 Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
9962 lclNum = getU2LittleEndian(codeAddr);
9963 JITDUMP(" %u", lclNum);
9967 lclNum = getU1LittleEndian(codeAddr);
9968 JITDUMP(" %u", lclNum);
9975 lclNum = (opcode - CEE_STLOC_0);
9976 assert(lclNum >= 0 && lclNum < 4);
9979 if (tiVerificationNeeded)
9981 Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
9982 Verify(tiCompatibleWith(impStackTop().seTypeInfo,
9983 NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
9987 if (compIsForInlining())
9989 lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
9991 /* Have we allocated a temp for this local? */
9993 lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
10002 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
10004 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10010 /* if it is a struct assignment, make certain we don't overflow the buffer */
10011 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
10013 if (lvaTable[lclNum].lvNormalizeOnLoad())
10015 lclTyp = lvaGetRealType(lclNum);
10019 lclTyp = lvaGetActualType(lclNum);
10023 /* Pop the value being assigned */
10026 StackEntry se = impPopStack(clsHnd);
10028 tiRetVal = se.seTypeInfo;
10031 #ifdef FEATURE_SIMD
10032 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
10034 assert(op1->TypeGet() == TYP_STRUCT);
10035 op1->gtType = lclTyp;
10037 #endif // FEATURE_SIMD
10039 op1 = impImplicitIorI4Cast(op1, lclTyp);
10041 #ifdef _TARGET_64BIT_
10042 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
10043 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
10045 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10046 op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
10048 #endif // _TARGET_64BIT_
10050 // We had better assign it a value of the correct type
10052 genActualType(lclTyp) == genActualType(op1->gtType) ||
10053 genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
10054 (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
10055 (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
10056 (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
10057 ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
10059 /* If op1 is "&var" then its type is the transient "*" and it can
10060 be used either as TYP_BYREF or TYP_I_IMPL */
10062 if (op1->IsVarAddr())
10064 assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
10066 /* When "&var" is created, we assume it is a byref. If it is
10067 being assigned to a TYP_I_IMPL var, change the type to
10068 prevent unnecessary GC info */
10070 if (genActualType(lclTyp) == TYP_I_IMPL)
10072 op1->gtType = TYP_I_IMPL;
10076 /* Filter out simple assignments to itself */
10078 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
10082 // This is a sequence of (ldloc, dup, stloc). Can simplify
10083 // to (ldloc, stloc). Goto LDVAR to reconstruct the ldloc node.
10084 CLANG_FORMAT_COMMENT_ANCHOR;
10087 if (tiVerificationNeeded)
10090 typeInfo::AreEquivalent(tiRetVal, NormaliseForStack(lvaTable[lclNum].lvVerTypeInfo)));
10095 insertLdloc = false;
10097 impLoadVar(lclNum, opcodeOffs + sz + 1);
10100 else if (opts.compDbgCode)
10102 op1 = gtNewNothingNode();
10111 /* Create the assignment node */
10113 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
10115 /* If the local is aliased, we need to spill calls and
10116 indirections from the stack. */
10118 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp) &&
10119 verCurrentState.esStackDepth > 0)
10121 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased"));
10124 /* Spill any refs to the local from the stack */
10126 impSpillLclRefs(lclNum);
10128 #if !FEATURE_X87_DOUBLES
10129 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
10130 // We insert a cast to the dest 'op2' type
10132 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
10133 varTypeIsFloating(op2->gtType))
10135 op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
10137 #endif // !FEATURE_X87_DOUBLES
10139 if (varTypeIsStruct(lclTyp))
10141 op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
10145 // The code generator generates GC tracking information
10146 // based on the RHS of the assignment. Later the LHS (which is
10147 // is a BYREF) gets used and the emitter checks that that variable
10148 // is being tracked. It is not (since the RHS was an int and did
10149 // not need tracking). To keep this assert happy, we change the RHS
10150 if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
10152 op1->gtType = TYP_BYREF;
10154 op1 = gtNewAssignNode(op2, op1);
10157 /* If insertLdloc is true, then we need to insert a ldloc following the
10158 stloc. This is done when converting a (dup, stloc) sequence into
10159 a (stloc, ldloc) sequence. */
10163 // From SPILL_APPEND
10164 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10167 // From DONE_APPEND
10168 impNoteLastILoffs();
10171 insertLdloc = false;
10173 impLoadVar(lclNum, opcodeOffs + sz + 1, tiRetVal);
10180 lclNum = getU2LittleEndian(codeAddr);
10184 lclNum = getU1LittleEndian(codeAddr);
10186 JITDUMP(" %u", lclNum);
10187 if (tiVerificationNeeded)
10189 Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10190 Verify(info.compInitMem, "initLocals not set");
10193 if (compIsForInlining())
10195 // Get the local type
10196 lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10198 /* Have we allocated a temp for this local? */
10200 lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10202 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10208 assertImp(lclNum < info.compLocalsCount);
10212 lclNum = getU2LittleEndian(codeAddr);
10216 lclNum = getU1LittleEndian(codeAddr);
10218 JITDUMP(" %u", lclNum);
10219 Verify(lclNum < info.compILargsCount, "bad arg num");
10221 if (compIsForInlining())
10223 // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10224 // followed by a ldfld to load the field.
10226 op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10227 if (op1->gtOper != GT_LCL_VAR)
10229 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10233 assert(op1->gtOper == GT_LCL_VAR);
10238 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10239 assertImp(lclNum < numArgs);
10241 if (lclNum == info.compThisArg)
10243 lclNum = lvaArg0Var;
10250 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10253 assert(op1->gtOper == GT_LCL_VAR);
10255 /* Note that this is supposed to create the transient type "*"
10256 which may be used as a TYP_I_IMPL. However we catch places
10257 where it is used as a TYP_I_IMPL and change the node if needed.
10258 Thus we are pessimistic and may report byrefs in the GC info
10259 where it was not absolutely needed, but it is safer this way.
10261 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10263 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10264 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10266 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10267 if (tiVerificationNeeded)
10269 // Don't allow taking address of uninit this ptr.
10270 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10272 Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10275 if (!tiRetVal.IsByRef())
10277 tiRetVal.MakeByRef();
10281 Verify(false, "byref to byref");
10285 impPushOnStack(op1, tiRetVal);
10290 if (!info.compIsVarArgs)
10292 BADCODE("arglist in non-vararg method");
10295 if (tiVerificationNeeded)
10297 tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10299 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10301 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10302 adjusted the arg count cos this is like fetching the last param */
10303 assertImp(0 < numArgs);
10304 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10305 lclNum = lvaVarargsHandleArg;
10306 op1 = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10307 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10308 impPushOnStack(op1, tiRetVal);
10311 case CEE_ENDFINALLY:
10313 if (compIsForInlining())
10315 assert(!"Shouldn't have exception handlers in the inliner!");
10316 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10320 if (verCurrentState.esStackDepth > 0)
10322 impEvalSideEffects();
10325 if (info.compXcptnsCount == 0)
10327 BADCODE("endfinally outside finally");
10330 assert(verCurrentState.esStackDepth == 0);
10332 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10335 case CEE_ENDFILTER:
10337 if (compIsForInlining())
10339 assert(!"Shouldn't have exception handlers in the inliner!");
10340 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10344 block->bbSetRunRarely(); // filters are rare
10346 if (info.compXcptnsCount == 0)
10348 BADCODE("endfilter outside filter");
10351 if (tiVerificationNeeded)
10353 Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
10356 op1 = impPopStack().val;
10357 assertImp(op1->gtType == TYP_INT);
10358 if (!bbInFilterILRange(block))
10360 BADCODE("EndFilter outside a filter handler");
10363 /* Mark current bb as end of filter */
10365 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
10366 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
10368 /* Mark catch handler as successor */
10370 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
10371 if (verCurrentState.esStackDepth != 0)
10373 verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
10374 DEBUGARG(__LINE__));
10379 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
10381 if (!impReturnInstruction(block, prefixFlags, opcode))
10392 assert(!compIsForInlining());
10394 if (tiVerificationNeeded)
10396 Verify(false, "Invalid opcode: CEE_JMP");
10399 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
10401 /* CEE_JMP does not make sense in some "protected" regions. */
10403 BADCODE("Jmp not allowed in protected region");
10406 if (verCurrentState.esStackDepth != 0)
10408 BADCODE("Stack must be empty after CEE_JMPs");
10411 _impResolveToken(CORINFO_TOKENKIND_Method);
10413 JITDUMP(" %08X", resolvedToken.token);
10415 /* The signature of the target has to be identical to ours.
10416 At least check that argCnt and returnType match */
10418 eeGetMethodSig(resolvedToken.hMethod, &sig);
10419 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
10420 sig.retType != info.compMethodInfo->args.retType ||
10421 sig.callConv != info.compMethodInfo->args.callConv)
10423 BADCODE("Incompatible target for CEE_JMPs");
10426 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
10428 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
10430 /* Mark the basic block as being a JUMP instead of RETURN */
10432 block->bbFlags |= BBF_HAS_JMP;
10434 /* Set this flag to make sure register arguments have a location assigned
10435 * even if we don't use them inside the method */
10437 compJmpOpUsed = true;
10439 fgNoStructPromotion = true;
10443 #else // !_TARGET_XARCH_ && !_TARGET_ARMARCH_
10445 // Import this just like a series of LDARGs + tail. + call + ret
10447 if (info.compIsVarArgs)
10449 // For now we don't implement true tail calls, so this breaks varargs.
10450 // So warn the user instead of generating bad code.
10451 // This is a semi-temporary workaround for DevDiv 173860, until we can properly
10452 // implement true tail calls.
10453 IMPL_LIMITATION("varags + CEE_JMP doesn't work yet");
10456 // First load up the arguments (0 - N)
10457 for (unsigned argNum = 0; argNum < info.compILargsCount; argNum++)
10459 impLoadArg(argNum, opcodeOffs + sz + 1);
10462 // Now generate the tail call
10463 noway_assert(prefixFlags == 0);
10464 prefixFlags = PREFIX_TAILCALL_EXPLICIT;
10467 eeGetCallInfo(&resolvedToken, NULL,
10468 combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), &callInfo);
10470 // All calls and delegates need a security callout.
10471 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
10473 callTyp = impImportCall(CEE_CALL, &resolvedToken, NULL, NULL, PREFIX_TAILCALL_EXPLICIT, &callInfo,
10476 // And finish with the ret
10479 #endif // _TARGET_XARCH_ || _TARGET_ARMARCH_
10482 assertImp(sz == sizeof(unsigned));
10484 _impResolveToken(CORINFO_TOKENKIND_Class);
10486 JITDUMP(" %08X", resolvedToken.token);
10488 ldelemClsHnd = resolvedToken.hClass;
10490 if (tiVerificationNeeded)
10492 typeInfo tiArray = impStackTop(1).seTypeInfo;
10493 typeInfo tiIndex = impStackTop().seTypeInfo;
10495 // As per ECMA 'index' specified can be either int32 or native int.
10496 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10498 typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
10499 Verify(tiArray.IsNullObjRef() ||
10500 typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
10503 tiRetVal = arrayElemType;
10504 tiRetVal.MakeByRef();
10505 if (prefixFlags & PREFIX_READONLY)
10507 tiRetVal.SetIsReadonlyByRef();
10510 // an array interior pointer is always in the heap
10511 tiRetVal.SetIsPermanentHomeByRef();
10514 // If it's a value class array we just do a simple address-of
10515 if (eeIsValueClass(ldelemClsHnd))
10517 CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
10518 if (cit == CORINFO_TYPE_UNDEF)
10520 lclTyp = TYP_STRUCT;
10524 lclTyp = JITtype2varType(cit);
10526 goto ARR_LD_POST_VERIFY;
10529 // Similarly, if its a readonly access, we can do a simple address-of
10530 // without doing a runtime type-check
10531 if (prefixFlags & PREFIX_READONLY)
10534 goto ARR_LD_POST_VERIFY;
10537 // Otherwise we need the full helper function with run-time type check
10538 op1 = impTokenToHandle(&resolvedToken);
10539 if (op1 == nullptr)
10540 { // compDonotInline()
10544 args = gtNewArgList(op1); // Type
10545 args = gtNewListNode(impPopStack().val, args); // index
10546 args = gtNewListNode(impPopStack().val, args); // array
10547 op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, GTF_EXCEPT, args);
10549 impPushOnStack(op1, tiRetVal);
10552 // ldelem for reference and value types
10554 assertImp(sz == sizeof(unsigned));
10556 _impResolveToken(CORINFO_TOKENKIND_Class);
10558 JITDUMP(" %08X", resolvedToken.token);
10560 ldelemClsHnd = resolvedToken.hClass;
10562 if (tiVerificationNeeded)
10564 typeInfo tiArray = impStackTop(1).seTypeInfo;
10565 typeInfo tiIndex = impStackTop().seTypeInfo;
10567 // As per ECMA 'index' specified can be either int32 or native int.
10568 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10569 tiRetVal = verMakeTypeInfo(ldelemClsHnd);
10571 Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
10572 "type of array incompatible with type operand");
10573 tiRetVal.NormaliseForStack();
10576 // If it's a reference type or generic variable type
10577 // then just generate code as though it's a ldelem.ref instruction
10578 if (!eeIsValueClass(ldelemClsHnd))
10581 opcode = CEE_LDELEM_REF;
10585 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
10586 lclTyp = JITtype2varType(jitTyp);
10587 tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
10588 tiRetVal.NormaliseForStack();
10590 goto ARR_LD_POST_VERIFY;
10592 case CEE_LDELEM_I1:
10595 case CEE_LDELEM_I2:
10596 lclTyp = TYP_SHORT;
10599 lclTyp = TYP_I_IMPL;
10602 // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
10603 // and treating it as TYP_INT avoids other asserts.
10604 case CEE_LDELEM_U4:
10608 case CEE_LDELEM_I4:
10611 case CEE_LDELEM_I8:
10614 case CEE_LDELEM_REF:
10617 case CEE_LDELEM_R4:
10618 lclTyp = TYP_FLOAT;
10620 case CEE_LDELEM_R8:
10621 lclTyp = TYP_DOUBLE;
10623 case CEE_LDELEM_U1:
10624 lclTyp = TYP_UBYTE;
10626 case CEE_LDELEM_U2:
10632 if (tiVerificationNeeded)
10634 typeInfo tiArray = impStackTop(1).seTypeInfo;
10635 typeInfo tiIndex = impStackTop().seTypeInfo;
10637 // As per ECMA 'index' specified can be either int32 or native int.
10638 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10639 if (tiArray.IsNullObjRef())
10641 if (lclTyp == TYP_REF)
10642 { // we will say a deref of a null array yields a null ref
10643 tiRetVal = typeInfo(TI_NULL);
10647 tiRetVal = typeInfo(lclTyp);
10652 tiRetVal = verGetArrayElemType(tiArray);
10653 typeInfo arrayElemTi = typeInfo(lclTyp);
10654 #ifdef _TARGET_64BIT_
10655 if (opcode == CEE_LDELEM_I)
10657 arrayElemTi = typeInfo::nativeInt();
10660 if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
10662 Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
10665 #endif // _TARGET_64BIT_
10667 Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
10670 tiRetVal.NormaliseForStack();
10672 ARR_LD_POST_VERIFY:
10674 /* Pull the index value and array address */
10675 op2 = impPopStack().val;
10676 op1 = impPopStack().val;
10677 assertImp(op1->gtType == TYP_REF);
10679 /* Check for null pointer - in the inliner case we simply abort */
10681 if (compIsForInlining())
10683 if (op1->gtOper == GT_CNS_INT)
10685 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
10690 op1 = impCheckForNullPointer(op1);
10692 /* Mark the block as containing an index expression */
10694 if (op1->gtOper == GT_LCL_VAR)
10696 if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
10698 block->bbFlags |= BBF_HAS_IDX_LEN;
10699 optMethodFlags |= OMF_HAS_ARRAYREF;
10703 /* Create the index node and push it on the stack */
10705 op1 = gtNewIndexRef(lclTyp, op1, op2);
10707 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
10709 if ((opcode == CEE_LDELEMA) || ldstruct ||
10710 (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
10712 assert(ldelemClsHnd != DUMMY_INIT(NULL));
10714 // remember the element size
10715 if (lclTyp == TYP_REF)
10717 op1->gtIndex.gtIndElemSize = sizeof(void*);
10721 // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
10722 if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
10724 op1->gtIndex.gtStructElemClass = ldelemClsHnd;
10726 assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
10727 if (lclTyp == TYP_STRUCT)
10729 size = info.compCompHnd->getClassSize(ldelemClsHnd);
10730 op1->gtIndex.gtIndElemSize = size;
10731 op1->gtType = lclTyp;
10735 if ((opcode == CEE_LDELEMA) || ldstruct)
10738 lclTyp = TYP_BYREF;
10740 op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
10744 assert(lclTyp != TYP_STRUCT);
10750 // Create an OBJ for the result
10751 op1 = gtNewObjNode(ldelemClsHnd, op1);
10752 op1->gtFlags |= GTF_EXCEPT;
10754 impPushOnStack(op1, tiRetVal);
10757 // stelem for reference and value types
10760 assertImp(sz == sizeof(unsigned));
10762 _impResolveToken(CORINFO_TOKENKIND_Class);
10764 JITDUMP(" %08X", resolvedToken.token);
10766 stelemClsHnd = resolvedToken.hClass;
10768 if (tiVerificationNeeded)
10770 typeInfo tiArray = impStackTop(2).seTypeInfo;
10771 typeInfo tiIndex = impStackTop(1).seTypeInfo;
10772 typeInfo tiValue = impStackTop().seTypeInfo;
10774 // As per ECMA 'index' specified can be either int32 or native int.
10775 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10776 typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
10778 Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
10779 "type operand incompatible with array element type");
10780 arrayElem.NormaliseForStack();
10781 Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
10784 // If it's a reference type just behave as though it's a stelem.ref instruction
10785 if (!eeIsValueClass(stelemClsHnd))
10787 goto STELEM_REF_POST_VERIFY;
10790 // Otherwise extract the type
10792 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
10793 lclTyp = JITtype2varType(jitTyp);
10794 goto ARR_ST_POST_VERIFY;
10797 case CEE_STELEM_REF:
10799 if (tiVerificationNeeded)
10801 typeInfo tiArray = impStackTop(2).seTypeInfo;
10802 typeInfo tiIndex = impStackTop(1).seTypeInfo;
10803 typeInfo tiValue = impStackTop().seTypeInfo;
10805 // As per ECMA 'index' specified can be either int32 or native int.
10806 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10807 Verify(tiValue.IsObjRef(), "bad value");
10809 // we only check that it is an object referece, The helper does additional checks
10810 Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
10813 arrayNodeTo = impStackTop(2).val;
10814 arrayNodeToIndex = impStackTop(1).val;
10815 arrayNodeFrom = impStackTop().val;
10818 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
10819 // lot of cases because of covariance. ie. foo[] can be cast to object[].
10822 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
10823 // This does not need CORINFO_HELP_ARRADDR_ST
10825 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
10826 arrayNodeTo->gtOper == GT_LCL_VAR &&
10827 arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
10828 !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
10831 goto ARR_ST_POST_VERIFY;
10834 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
10836 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
10838 assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
10841 goto ARR_ST_POST_VERIFY;
10844 STELEM_REF_POST_VERIFY:
10846 /* Call a helper function to do the assignment */
10847 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, 0, impPopList(3, &flags, nullptr));
10851 case CEE_STELEM_I1:
10854 case CEE_STELEM_I2:
10855 lclTyp = TYP_SHORT;
10858 lclTyp = TYP_I_IMPL;
10860 case CEE_STELEM_I4:
10863 case CEE_STELEM_I8:
10866 case CEE_STELEM_R4:
10867 lclTyp = TYP_FLOAT;
10869 case CEE_STELEM_R8:
10870 lclTyp = TYP_DOUBLE;
10875 if (tiVerificationNeeded)
10877 typeInfo tiArray = impStackTop(2).seTypeInfo;
10878 typeInfo tiIndex = impStackTop(1).seTypeInfo;
10879 typeInfo tiValue = impStackTop().seTypeInfo;
10881 // As per ECMA 'index' specified can be either int32 or native int.
10882 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10883 typeInfo arrayElem = typeInfo(lclTyp);
10884 #ifdef _TARGET_64BIT_
10885 if (opcode == CEE_STELEM_I)
10887 arrayElem = typeInfo::nativeInt();
10889 #endif // _TARGET_64BIT_
10890 Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
10893 Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
10897 ARR_ST_POST_VERIFY:
10898 /* The strict order of evaluation is LHS-operands, RHS-operands,
10899 range-check, and then assignment. However, codegen currently
10900 does the range-check before evaluation the RHS-operands. So to
10901 maintain strict ordering, we spill the stack. */
10903 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
10905 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
10906 "Strict ordering of exceptions for Array store"));
10909 /* Pull the new value from the stack */
10910 op2 = impPopStack().val;
10912 /* Pull the index value */
10913 op1 = impPopStack().val;
10915 /* Pull the array address */
10916 op3 = impPopStack().val;
10918 assertImp(op3->gtType == TYP_REF);
10919 if (op2->IsVarAddr())
10921 op2->gtType = TYP_I_IMPL;
10924 op3 = impCheckForNullPointer(op3);
10926 // Mark the block as containing an index expression
10928 if (op3->gtOper == GT_LCL_VAR)
10930 if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
10932 block->bbFlags |= BBF_HAS_IDX_LEN;
10933 optMethodFlags |= OMF_HAS_ARRAYREF;
10937 /* Create the index node */
10939 op1 = gtNewIndexRef(lclTyp, op3, op1);
10941 /* Create the assignment node and append it */
10943 if (lclTyp == TYP_STRUCT)
10945 assert(stelemClsHnd != DUMMY_INIT(NULL));
10947 op1->gtIndex.gtStructElemClass = stelemClsHnd;
10948 op1->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd);
10950 if (varTypeIsStruct(op1))
10952 op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
10956 op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
10957 op1 = gtNewAssignNode(op1, op2);
10960 /* Mark the expression as containing an assignment */
10962 op1->gtFlags |= GTF_ASG;
10973 case CEE_ADD_OVF_UN:
10981 goto MATH_OP2_FLAGS;
10990 case CEE_SUB_OVF_UN:
10998 goto MATH_OP2_FLAGS;
11002 goto MATH_MAYBE_CALL_NO_OVF;
11007 case CEE_MUL_OVF_UN:
11014 goto MATH_MAYBE_CALL_OVF;
11016 // Other binary math operations
11020 goto MATH_MAYBE_CALL_NO_OVF;
11024 goto MATH_MAYBE_CALL_NO_OVF;
11028 goto MATH_MAYBE_CALL_NO_OVF;
11032 goto MATH_MAYBE_CALL_NO_OVF;
11034 MATH_MAYBE_CALL_NO_OVF:
11036 MATH_MAYBE_CALL_OVF:
11037 // Morpher has some complex logic about when to turn different
11038 // typed nodes on different platforms into helper calls. We
11039 // need to either duplicate that logic here, or just
11040 // pessimistically make all the nodes large enough to become
11041 // call nodes. Since call nodes aren't that much larger and
11042 // these opcodes are infrequent enough I chose the latter.
11044 goto MATH_OP2_FLAGS;
11056 MATH_OP2: // For default values of 'ovfl' and 'callNode'
11061 MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
11063 /* Pull two values and push back the result */
11065 if (tiVerificationNeeded)
11067 const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
11068 const typeInfo& tiOp2 = impStackTop().seTypeInfo;
11070 Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
11071 if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
11073 Verify(tiOp1.IsNumberType(), "not number");
11077 Verify(tiOp1.IsIntegerType(), "not integer");
11080 Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
11084 #ifdef _TARGET_64BIT_
11085 if (tiOp2.IsNativeIntType())
11089 #endif // _TARGET_64BIT_
11092 op2 = impPopStack().val;
11093 op1 = impPopStack().val;
11095 #if !CPU_HAS_FP_SUPPORT
11096 if (varTypeIsFloating(op1->gtType))
11101 /* Can't do arithmetic with references */
11102 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
11104 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
11105 // if it is in the stack)
11106 impBashVarAddrsToI(op1, op2);
11108 type = impGetByRefResultType(oper, uns, &op1, &op2);
11110 assert(!ovfl || !varTypeIsFloating(op1->gtType));
11112 /* Special case: "int+0", "int-0", "int*1", "int/1" */
11114 if (op2->gtOper == GT_CNS_INT)
11116 if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
11117 (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
11120 impPushOnStack(op1, tiRetVal);
11125 #if !FEATURE_X87_DOUBLES
11126 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
11128 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
11130 if (op1->TypeGet() != type)
11132 // We insert a cast of op1 to 'type'
11133 op1 = gtNewCastNode(type, op1, type);
11135 if (op2->TypeGet() != type)
11137 // We insert a cast of op2 to 'type'
11138 op2 = gtNewCastNode(type, op2, type);
11141 #endif // !FEATURE_X87_DOUBLES
11143 #if SMALL_TREE_NODES
11146 /* These operators can later be transformed into 'GT_CALL' */
11148 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
11149 #ifndef _TARGET_ARM_
11150 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
11151 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
11152 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11153 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11155 // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11156 // that we'll need to transform into a general large node, but rather specifically
11157 // to a call: by doing it this way, things keep working if there are multiple sizes,
11158 // and a CALL is no longer the largest.
11159 // That said, as of now it *is* a large node, so we'll do this with an assert rather
11161 assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11162 op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11165 #endif // SMALL_TREE_NODES
11167 op1 = gtNewOperNode(oper, type, op1, op2);
11170 /* Special case: integer/long division may throw an exception */
11172 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow())
11174 op1->gtFlags |= GTF_EXCEPT;
11179 assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11180 if (ovflType != TYP_UNKNOWN)
11182 op1->gtType = ovflType;
11184 op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11187 op1->gtFlags |= GTF_UNSIGNED;
11191 impPushOnStack(op1, tiRetVal);
11206 if (tiVerificationNeeded)
11208 const typeInfo& tiVal = impStackTop(1).seTypeInfo;
11209 const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11210 Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11213 op2 = impPopStack().val;
11214 op1 = impPopStack().val; // operand to be shifted
11215 impBashVarAddrsToI(op1, op2);
11217 type = genActualType(op1->TypeGet());
11218 op1 = gtNewOperNode(oper, type, op1, op2);
11220 impPushOnStack(op1, tiRetVal);
11224 if (tiVerificationNeeded)
11226 tiRetVal = impStackTop().seTypeInfo;
11227 Verify(tiRetVal.IsIntegerType(), "bad int value");
11230 op1 = impPopStack().val;
11231 impBashVarAddrsToI(op1, nullptr);
11232 type = genActualType(op1->TypeGet());
11233 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11237 if (tiVerificationNeeded)
11239 tiRetVal = impStackTop().seTypeInfo;
11240 Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11242 op1 = impPopStack().val;
11243 type = op1->TypeGet();
11244 op1 = gtNewOperNode(GT_CKFINITE, type, op1);
11245 op1->gtFlags |= GTF_EXCEPT;
11247 impPushOnStack(op1, tiRetVal);
11252 val = getI4LittleEndian(codeAddr); // jump distance
11253 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11257 val = getI1LittleEndian(codeAddr); // jump distance
11258 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11262 if (compIsForInlining())
11264 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11268 JITDUMP(" %04X", jmpAddr);
11269 if (block->bbJumpKind != BBJ_LEAVE)
11271 impResetLeaveBlock(block, jmpAddr);
11274 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11275 impImportLeave(block);
11276 impNoteBranchOffs();
11282 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11284 if (compIsForInlining() && jmpDist == 0)
11289 impNoteBranchOffs();
11295 case CEE_BRFALSE_S:
11297 /* Pop the comparand (now there's a neat term) from the stack */
11298 if (tiVerificationNeeded)
11300 typeInfo& tiVal = impStackTop().seTypeInfo;
11301 Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11305 op1 = impPopStack().val;
11306 type = op1->TypeGet();
11308 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11309 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11311 block->bbJumpKind = BBJ_NONE;
11313 if (op1->gtFlags & GTF_GLOB_EFFECT)
11315 op1 = gtUnusedValNode(op1);
11324 if (op1->OperIsCompare())
11326 if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11328 // Flip the sense of the compare
11330 op1 = gtReverseCond(op1);
11335 /* We'll compare against an equally-sized integer 0 */
11336 /* For small types, we always compare against int */
11337 op2 = gtNewZeroConNode(genActualType(op1->gtType));
11339 /* Create the comparison operator and try to fold it */
11341 oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11342 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11349 /* Fold comparison if we can */
11351 op1 = gtFoldExpr(op1);
11353 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11354 /* Don't make any blocks unreachable in import only mode */
11356 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11358 /* gtFoldExpr() should prevent this as we don't want to make any blocks
11359 unreachable under compDbgCode */
11360 assert(!opts.compDbgCode);
11362 BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11363 assertImp((block->bbJumpKind == BBJ_COND) // normal case
11364 || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11365 // block for the second time
11367 block->bbJumpKind = foldedJumpKind;
11371 if (op1->gtIntCon.gtIconVal)
11373 printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11374 block->bbJumpDest->bbNum);
11378 printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11385 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11387 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
11388 in impImportBlock(block). For correct line numbers, spill stack. */
11390 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
11392 impSpillStackEnsure(true);
11419 if (tiVerificationNeeded)
11421 verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11422 tiRetVal = typeInfo(TI_INT);
11425 op2 = impPopStack().val;
11426 op1 = impPopStack().val;
11428 #ifdef _TARGET_64BIT_
11429 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
11431 op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11433 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
11435 op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11437 #endif // _TARGET_64BIT_
11439 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11440 varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11441 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11443 /* Create the comparison node */
11445 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11447 /* TODO: setting both flags when only one is appropriate */
11448 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
11450 op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
11453 impPushOnStack(op1, tiRetVal);
11459 goto CMP_2_OPs_AND_BR;
11464 goto CMP_2_OPs_AND_BR;
11469 goto CMP_2_OPs_AND_BR_UN;
11474 goto CMP_2_OPs_AND_BR;
11479 goto CMP_2_OPs_AND_BR_UN;
11484 goto CMP_2_OPs_AND_BR;
11489 goto CMP_2_OPs_AND_BR_UN;
11494 goto CMP_2_OPs_AND_BR;
11499 goto CMP_2_OPs_AND_BR_UN;
11504 goto CMP_2_OPs_AND_BR_UN;
11506 CMP_2_OPs_AND_BR_UN:
11509 goto CMP_2_OPs_AND_BR_ALL;
11513 goto CMP_2_OPs_AND_BR_ALL;
11514 CMP_2_OPs_AND_BR_ALL:
11516 if (tiVerificationNeeded)
11518 verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11521 /* Pull two values */
11522 op2 = impPopStack().val;
11523 op1 = impPopStack().val;
11525 #ifdef _TARGET_64BIT_
11526 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
11528 op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11530 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
11532 op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11534 #endif // _TARGET_64BIT_
11536 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11537 varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11538 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11540 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11542 block->bbJumpKind = BBJ_NONE;
11544 if (op1->gtFlags & GTF_GLOB_EFFECT)
11546 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11547 "Branch to next Optimization, op1 side effect"));
11548 impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11550 if (op2->gtFlags & GTF_GLOB_EFFECT)
11552 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11553 "Branch to next Optimization, op2 side effect"));
11554 impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11558 if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
11560 impNoteLastILoffs();
11565 #if !FEATURE_X87_DOUBLES
11566 // We can generate an compare of different sized floating point op1 and op2
11567 // We insert a cast
11569 if (varTypeIsFloating(op1->TypeGet()))
11571 if (op1->TypeGet() != op2->TypeGet())
11573 assert(varTypeIsFloating(op2->TypeGet()));
11575 // say op1=double, op2=float. To avoid loss of precision
11576 // while comparing, op2 is converted to double and double
11577 // comparison is done.
11578 if (op1->TypeGet() == TYP_DOUBLE)
11580 // We insert a cast of op2 to TYP_DOUBLE
11581 op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
11583 else if (op2->TypeGet() == TYP_DOUBLE)
11585 // We insert a cast of op1 to TYP_DOUBLE
11586 op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
11590 #endif // !FEATURE_X87_DOUBLES
11592 /* Create and append the operator */
11594 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11598 op1->gtFlags |= GTF_UNSIGNED;
11603 op1->gtFlags |= GTF_RELOP_NAN_UN;
11609 assert(!compIsForInlining());
11611 if (tiVerificationNeeded)
11613 Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
11615 /* Pop the switch value off the stack */
11616 op1 = impPopStack().val;
11617 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
11619 #ifdef _TARGET_64BIT_
11620 // Widen 'op1' on 64-bit targets
11621 if (op1->TypeGet() != TYP_I_IMPL)
11623 if (op1->OperGet() == GT_CNS_INT)
11625 op1->gtType = TYP_I_IMPL;
11629 op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
11632 #endif // _TARGET_64BIT_
11633 assert(genActualType(op1->TypeGet()) == TYP_I_IMPL);
11635 /* We can create a switch node */
11637 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
11639 val = (int)getU4LittleEndian(codeAddr);
11640 codeAddr += 4 + val * 4; // skip over the switch-table
11644 /************************** Casting OPCODES ***************************/
11646 case CEE_CONV_OVF_I1:
11649 case CEE_CONV_OVF_I2:
11650 lclTyp = TYP_SHORT;
11652 case CEE_CONV_OVF_I:
11653 lclTyp = TYP_I_IMPL;
11655 case CEE_CONV_OVF_I4:
11658 case CEE_CONV_OVF_I8:
11662 case CEE_CONV_OVF_U1:
11663 lclTyp = TYP_UBYTE;
11665 case CEE_CONV_OVF_U2:
11668 case CEE_CONV_OVF_U:
11669 lclTyp = TYP_U_IMPL;
11671 case CEE_CONV_OVF_U4:
11674 case CEE_CONV_OVF_U8:
11675 lclTyp = TYP_ULONG;
11678 case CEE_CONV_OVF_I1_UN:
11681 case CEE_CONV_OVF_I2_UN:
11682 lclTyp = TYP_SHORT;
11684 case CEE_CONV_OVF_I_UN:
11685 lclTyp = TYP_I_IMPL;
11687 case CEE_CONV_OVF_I4_UN:
11690 case CEE_CONV_OVF_I8_UN:
11694 case CEE_CONV_OVF_U1_UN:
11695 lclTyp = TYP_UBYTE;
11697 case CEE_CONV_OVF_U2_UN:
11700 case CEE_CONV_OVF_U_UN:
11701 lclTyp = TYP_U_IMPL;
11703 case CEE_CONV_OVF_U4_UN:
11706 case CEE_CONV_OVF_U8_UN:
11707 lclTyp = TYP_ULONG;
11712 goto CONV_OVF_COMMON;
11715 goto CONV_OVF_COMMON;
11725 lclTyp = TYP_SHORT;
11728 lclTyp = TYP_I_IMPL;
11738 lclTyp = TYP_UBYTE;
11743 #if (REGSIZE_BYTES == 8)
11745 lclTyp = TYP_U_IMPL;
11749 lclTyp = TYP_U_IMPL;
11756 lclTyp = TYP_ULONG;
11760 lclTyp = TYP_FLOAT;
11763 lclTyp = TYP_DOUBLE;
11766 case CEE_CONV_R_UN:
11767 lclTyp = TYP_DOUBLE;
11781 // just check that we have a number on the stack
11782 if (tiVerificationNeeded)
11784 const typeInfo& tiVal = impStackTop().seTypeInfo;
11785 Verify(tiVal.IsNumberType(), "bad arg");
11787 #ifdef _TARGET_64BIT_
11788 bool isNative = false;
11792 case CEE_CONV_OVF_I:
11793 case CEE_CONV_OVF_I_UN:
11795 case CEE_CONV_OVF_U:
11796 case CEE_CONV_OVF_U_UN:
11800 // leave 'isNative' = false;
11805 tiRetVal = typeInfo::nativeInt();
11808 #endif // _TARGET_64BIT_
11810 tiRetVal = typeInfo(lclTyp).NormaliseForStack();
11814 // only converts from FLOAT or DOUBLE to an integer type
11815 // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls
11817 if (varTypeIsFloating(lclTyp))
11819 callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
11820 #ifdef _TARGET_64BIT_
11821 // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
11822 // TYP_BYREF could be used as TYP_I_IMPL which is long.
11823 // TODO-CQ: remove this when we lower casts long/ulong --> float/double
11824 // and generate SSE2 code instead of going through helper calls.
11825 || (impStackTop().val->TypeGet() == TYP_BYREF)
11831 callNode = varTypeIsFloating(impStackTop().val->TypeGet());
11834 // At this point uns, ovf, callNode all set
11836 op1 = impPopStack().val;
11837 impBashVarAddrsToI(op1);
11839 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
11841 op2 = op1->gtOp.gtOp2;
11843 if (op2->gtOper == GT_CNS_INT)
11845 ssize_t ival = op2->gtIntCon.gtIconVal;
11846 ssize_t mask, umask;
11862 assert(!"unexpected type");
11866 if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
11868 /* Toss the cast, it's a waste of time */
11870 impPushOnStack(op1, tiRetVal);
11873 else if (ival == mask)
11875 /* Toss the masking, it's a waste of time, since
11876 we sign-extend from the small value anyways */
11878 op1 = op1->gtOp.gtOp1;
11883 /* The 'op2' sub-operand of a cast is the 'real' type number,
11884 since the result of a cast to one of the 'small' integer
11885 types is an integer.
11888 type = genActualType(lclTyp);
11890 #if SMALL_TREE_NODES
11893 op1 = gtNewCastNodeL(type, op1, lclTyp);
11896 #endif // SMALL_TREE_NODES
11898 op1 = gtNewCastNode(type, op1, lclTyp);
11903 op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
11907 op1->gtFlags |= GTF_UNSIGNED;
11909 impPushOnStack(op1, tiRetVal);
11913 if (tiVerificationNeeded)
11915 tiRetVal = impStackTop().seTypeInfo;
11916 Verify(tiRetVal.IsNumberType(), "Bad arg");
11919 op1 = impPopStack().val;
11920 impBashVarAddrsToI(op1, nullptr);
11921 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
11925 if (tiVerificationNeeded)
11930 /* Pull the top value from the stack */
11932 op1 = impPopStack(clsHnd).val;
11934 /* Get hold of the type of the value being duplicated */
11936 lclTyp = genActualType(op1->gtType);
11938 /* Does the value have any side effects? */
11940 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
11942 // Since we are throwing away the value, just normalize
11943 // it to its address. This is more efficient.
11945 if (varTypeIsStruct(op1))
11947 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
11948 // Non-calls, such as obj or ret_expr, have to go through this.
11949 // Calls with large struct return value have to go through this.
11950 // Helper calls with small struct return value also have to go
11951 // through this since they do not follow Unix calling convention.
11952 if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
11953 op1->AsCall()->gtCallType == CT_HELPER)
11954 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
11956 op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
11960 // If op1 is non-overflow cast, throw it away since it is useless.
11961 // Another reason for throwing away the useless cast is in the context of
11962 // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
11963 // The cast gets added as part of importing GT_CALL, which gets in the way
11964 // of fgMorphCall() on the forms of tail call nodes that we assert.
11965 if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
11967 op1 = op1->gtOp.gtOp1;
11970 // If 'op1' is an expression, create an assignment node.
11971 // Helps analyses (like CSE) to work fine.
11973 if (op1->gtOper != GT_CALL)
11975 op1 = gtUnusedValNode(op1);
11978 /* Append the value to the tree list */
11982 /* No side effects - just throw the <BEEP> thing away */
11987 if (tiVerificationNeeded)
11989 // Dup could start the begining of delegate creation sequence, remember that
11990 delegateCreateStart = codeAddr - 1;
11994 // Convert a (dup, stloc) sequence into a (stloc, ldloc) sequence in the following cases:
11995 // - If this is non-debug code - so that CSE will recognize the two as equal.
11996 // This helps eliminate a redundant bounds check in cases such as:
11997 // ariba[i+3] += some_value;
11998 // - If the top of the stack is a non-leaf that may be expensive to clone.
12000 if (codeAddr < codeEndp)
12002 OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddr);
12003 if (impIsAnySTLOC(nextOpcode))
12005 if (!opts.compDbgCode)
12007 insertLdloc = true;
12010 GenTree* stackTop = impStackTop().val;
12011 if (!stackTop->IsIntegralConst(0) && !stackTop->IsFPZero() && !stackTop->IsLocal())
12013 insertLdloc = true;
12019 /* Pull the top value from the stack */
12020 op1 = impPopStack(tiRetVal);
12022 /* Clone the value */
12023 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
12024 nullptr DEBUGARG("DUP instruction"));
12026 /* Either the tree started with no global effects, or impCloneExpr
12027 evaluated the tree to a temp and returned two copies of that
12028 temp. Either way, neither op1 nor op2 should have side effects.
12030 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
12032 /* Push the tree/temp back on the stack */
12033 impPushOnStack(op1, tiRetVal);
12035 /* Push the copy on the stack */
12036 impPushOnStack(op2, tiRetVal);
12044 lclTyp = TYP_SHORT;
12053 lclTyp = TYP_I_IMPL;
12055 case CEE_STIND_REF:
12059 lclTyp = TYP_FLOAT;
12062 lclTyp = TYP_DOUBLE;
12066 if (tiVerificationNeeded)
12068 typeInfo instrType(lclTyp);
12069 #ifdef _TARGET_64BIT_
12070 if (opcode == CEE_STIND_I)
12072 instrType = typeInfo::nativeInt();
12074 #endif // _TARGET_64BIT_
12075 verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
12079 compUnsafeCastUsed = true; // Have to go conservative
12084 op2 = impPopStack().val; // value to store
12085 op1 = impPopStack().val; // address to store to
12087 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
12088 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12090 impBashVarAddrsToI(op1, op2);
12092 op2 = impImplicitR4orR8Cast(op2, lclTyp);
12094 #ifdef _TARGET_64BIT_
12095 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
12096 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
12098 op2->gtType = TYP_I_IMPL;
12102 // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
12104 if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
12106 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12107 op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
12109 // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12111 if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
12113 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12114 op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
12117 #endif // _TARGET_64BIT_
12119 if (opcode == CEE_STIND_REF)
12121 // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
12122 assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
12123 lclTyp = genActualType(op2->TypeGet());
12126 // Check target type.
12128 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
12130 if (op2->gtType == TYP_BYREF)
12132 assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
12134 else if (lclTyp == TYP_BYREF)
12136 assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
12141 assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
12142 ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
12143 (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
12147 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12149 // stind could point anywhere, example a boxed class static int
12150 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12152 if (prefixFlags & PREFIX_VOLATILE)
12154 assert(op1->OperGet() == GT_IND);
12155 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
12156 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12157 op1->gtFlags |= GTF_IND_VOLATILE;
12160 if (prefixFlags & PREFIX_UNALIGNED)
12162 assert(op1->OperGet() == GT_IND);
12163 op1->gtFlags |= GTF_IND_UNALIGNED;
12166 op1 = gtNewAssignNode(op1, op2);
12167 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12169 // Spill side-effects AND global-data-accesses
12170 if (verCurrentState.esStackDepth > 0)
12172 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12181 lclTyp = TYP_SHORT;
12190 case CEE_LDIND_REF:
12194 lclTyp = TYP_I_IMPL;
12197 lclTyp = TYP_FLOAT;
12200 lclTyp = TYP_DOUBLE;
12203 lclTyp = TYP_UBYTE;
12210 if (tiVerificationNeeded)
12212 typeInfo lclTiType(lclTyp);
12213 #ifdef _TARGET_64BIT_
12214 if (opcode == CEE_LDIND_I)
12216 lclTiType = typeInfo::nativeInt();
12218 #endif // _TARGET_64BIT_
12219 tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12220 tiRetVal.NormaliseForStack();
12224 compUnsafeCastUsed = true; // Have to go conservative
12229 op1 = impPopStack().val; // address to load from
12230 impBashVarAddrsToI(op1);
12232 #ifdef _TARGET_64BIT_
12233 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12235 if (genActualType(op1->gtType) == TYP_INT)
12237 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12238 op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
12242 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12244 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12246 // ldind could point anywhere, example a boxed class static int
12247 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12249 if (prefixFlags & PREFIX_VOLATILE)
12251 assert(op1->OperGet() == GT_IND);
12252 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
12253 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12254 op1->gtFlags |= GTF_IND_VOLATILE;
12257 if (prefixFlags & PREFIX_UNALIGNED)
12259 assert(op1->OperGet() == GT_IND);
12260 op1->gtFlags |= GTF_IND_UNALIGNED;
12263 impPushOnStack(op1, tiRetVal);
12267 case CEE_UNALIGNED:
12270 val = getU1LittleEndian(codeAddr);
12272 JITDUMP(" %u", val);
12273 if ((val != 1) && (val != 2) && (val != 4))
12275 BADCODE("Alignment unaligned. must be 1, 2, or 4");
12278 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12279 prefixFlags |= PREFIX_UNALIGNED;
12281 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12284 opcode = (OPCODE)getU1LittleEndian(codeAddr);
12285 codeAddr += sizeof(__int8);
12286 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12287 goto DECODE_OPCODE;
12291 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12292 prefixFlags |= PREFIX_VOLATILE;
12294 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12301 // Need to do a lookup here so that we perform an access check
12302 // and do a NOWAY if protections are violated
12303 _impResolveToken(CORINFO_TOKENKIND_Method);
12305 JITDUMP(" %08X", resolvedToken.token);
12307 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12308 addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12311 // This check really only applies to intrinsic Array.Address methods
12312 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12314 NO_WAY("Currently do not support LDFTN of Parameterized functions");
12317 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12318 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12320 if (tiVerificationNeeded)
12322 // LDFTN could start the begining of delegate creation sequence, remember that
12323 delegateCreateStart = codeAddr - 2;
12325 // check any constraints on the callee's class and type parameters
12326 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12327 "method has unsatisfied class constraints");
12328 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12329 resolvedToken.hMethod),
12330 "method has unsatisfied method constraints");
12332 mflags = callInfo.verMethodFlags;
12333 Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12337 op1 = impMethodPointer(&resolvedToken, &callInfo);
12338 if (compDonotInline())
12343 impPushOnStack(op1, typeInfo(resolvedToken.hMethod));
12348 case CEE_LDVIRTFTN:
12350 /* Get the method token */
12352 _impResolveToken(CORINFO_TOKENKIND_Method);
12354 JITDUMP(" %08X", resolvedToken.token);
12356 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12357 addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12358 CORINFO_CALLINFO_CALLVIRT)),
12361 // This check really only applies to intrinsic Array.Address methods
12362 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12364 NO_WAY("Currently do not support LDFTN of Parameterized functions");
12367 mflags = callInfo.methodFlags;
12369 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12371 if (compIsForInlining())
12373 if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12375 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12380 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12382 if (tiVerificationNeeded)
12385 Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12386 Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12388 // JIT32 verifier rejects verifiable ldvirtftn pattern
12389 typeInfo declType =
12390 verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12392 typeInfo arg = impStackTop().seTypeInfo;
12393 Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12396 CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12397 if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12399 instanceClassHnd = arg.GetClassHandleForObjRef();
12402 // check any constraints on the method's class and type parameters
12403 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12404 "method has unsatisfied class constraints");
12405 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12406 resolvedToken.hMethod),
12407 "method has unsatisfied method constraints");
12409 if (mflags & CORINFO_FLG_PROTECTED)
12411 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12412 "Accessing protected method through wrong type.");
12416 /* Get the object-ref */
12417 op1 = impPopStack().val;
12418 assertImp(op1->gtType == TYP_REF);
12420 if (opts.IsReadyToRun())
12422 if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
12424 if (op1->gtFlags & GTF_SIDE_EFFECT)
12426 op1 = gtUnusedValNode(op1);
12427 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12432 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12434 if (op1->gtFlags & GTF_SIDE_EFFECT)
12436 op1 = gtUnusedValNode(op1);
12437 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12442 GenTreePtr fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
12443 if (compDonotInline())
12448 impPushOnStack(fptr, typeInfo(resolvedToken.hMethod));
12453 case CEE_CONSTRAINED:
12455 assertImp(sz == sizeof(unsigned));
12456 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
12457 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
12458 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
12460 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
12461 prefixFlags |= PREFIX_CONSTRAINED;
12464 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12465 if (actualOpcode != CEE_CALLVIRT)
12467 BADCODE("constrained. has to be followed by callvirt");
12474 JITDUMP(" readonly.");
12476 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
12477 prefixFlags |= PREFIX_READONLY;
12480 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12481 if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
12483 BADCODE("readonly. has to be followed by ldelema or call");
12493 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
12494 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12497 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12498 if (!impOpcodeIsCallOpcode(actualOpcode))
12500 BADCODE("tailcall. has to be followed by call, callvirt or calli");
12508 /* Since we will implicitly insert newObjThisPtr at the start of the
12509 argument list, spill any GTF_ORDER_SIDEEFF */
12510 impSpillSpecialSideEff();
12512 /* NEWOBJ does not respond to TAIL */
12513 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
12515 /* NEWOBJ does not respond to CONSTRAINED */
12516 prefixFlags &= ~PREFIX_CONSTRAINED;
12518 #if COR_JIT_EE_VERSION > 460
12519 _impResolveToken(CORINFO_TOKENKIND_NewObj);
12521 _impResolveToken(CORINFO_TOKENKIND_Method);
12524 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12525 addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
12528 if (compIsForInlining())
12530 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12532 // Check to see if this call violates the boundary.
12533 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
12538 mflags = callInfo.methodFlags;
12540 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
12542 BADCODE("newobj on static or abstract method");
12545 // Insert the security callout before any actual code is generated
12546 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12548 // There are three different cases for new
12549 // Object size is variable (depends on arguments)
12550 // 1) Object is an array (arrays treated specially by the EE)
12551 // 2) Object is some other variable sized object (e.g. String)
12552 // 3) Class Size can be determined beforehand (normal case)
12553 // In the first case, we need to call a NEWOBJ helper (multinewarray)
12554 // in the second case we call the constructor with a '0' this pointer
12555 // In the third case we alloc the memory, then call the constuctor
12557 clsFlags = callInfo.classFlags;
12558 if (clsFlags & CORINFO_FLG_ARRAY)
12560 if (tiVerificationNeeded)
12562 CORINFO_CLASS_HANDLE elemTypeHnd;
12563 INDEBUG(CorInfoType corType =)
12564 info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
12565 assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
12566 Verify(elemTypeHnd == nullptr ||
12567 !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
12568 "newarr of byref-like objects");
12569 verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
12570 ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
12571 &callInfo DEBUGARG(info.compFullName));
12573 // Arrays need to call the NEWOBJ helper.
12574 assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
12576 impImportNewObjArray(&resolvedToken, &callInfo);
12577 if (compDonotInline())
12585 // At present this can only be String
12586 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
12588 if (IsTargetAbi(CORINFO_CORERT_ABI))
12590 // The dummy argument does not exist in CoreRT
12591 newObjThisPtr = nullptr;
12595 // This is the case for variable-sized objects that are not
12596 // arrays. In this case, call the constructor with a null 'this'
12598 newObjThisPtr = gtNewIconNode(0, TYP_REF);
12601 /* Remember that this basic block contains 'new' of an object */
12602 block->bbFlags |= BBF_HAS_NEWOBJ;
12603 optMethodFlags |= OMF_HAS_NEWOBJ;
12607 // This is the normal case where the size of the object is
12608 // fixed. Allocate the memory and call the constructor.
12610 // Note: We cannot add a peep to avoid use of temp here
12611 // becase we don't have enough interference info to detect when
12612 // sources and destination interfere, example: s = new S(ref);
12614 // TODO: We find the correct place to introduce a general
12615 // reverse copy prop for struct return values from newobj or
12616 // any function returning structs.
12618 /* get a temporary for the new object */
12619 lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
12621 // In the value class case we only need clsHnd for size calcs.
12623 // The lookup of the code pointer will be handled by CALL in this case
12624 if (clsFlags & CORINFO_FLG_VALUECLASS)
12626 if (compIsForInlining())
12628 // If value class has GC fields, inform the inliner. It may choose to
12629 // bail out on the inline.
12630 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
12631 if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
12633 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
12634 if (compInlineResult->IsFailure())
12639 // Do further notification in the case where the call site is rare;
12640 // some policies do not track the relative hotness of call sites for
12641 // "always" inline cases.
12642 if (impInlineInfo->iciBlock->isRunRarely())
12644 compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
12645 if (compInlineResult->IsFailure())
12653 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
12654 unsigned size = info.compCompHnd->getClassSize(resolvedToken.hClass);
12656 if (impIsPrimitive(jitTyp))
12658 lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
12662 // The local variable itself is the allocated space.
12663 // Here we need unsafe value cls check, since the address of struct is taken for further use
12664 // and potentially exploitable.
12665 lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
12668 // Append a tree to zero-out the temp
12669 newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
12671 newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest
12672 gtNewIconNode(0), // Value
12674 false, // isVolatile
12675 false); // not copyBlock
12676 impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12678 // Obtain the address of the temp
12680 gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
12684 #ifdef FEATURE_READYTORUN_COMPILER
12685 if (opts.IsReadyToRun())
12687 op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
12688 usingReadyToRunHelper = (op1 != nullptr);
12691 if (!usingReadyToRunHelper)
12694 op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
12695 if (op1 == nullptr)
12696 { // compDonotInline()
12700 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
12701 // and the newfast call with a single call to a dynamic R2R cell that will:
12702 // 1) Load the context
12703 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate
12705 // 3) Allocate and return the new object
12706 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
12708 op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
12709 resolvedToken.hClass, TYP_REF, op1);
12712 // Remember that this basic block contains 'new' of an object
12713 block->bbFlags |= BBF_HAS_NEWOBJ;
12714 optMethodFlags |= OMF_HAS_NEWOBJ;
12716 // Append the assignment to the temp/local. Dont need to spill
12717 // at all as we are just calling an EE-Jit helper which can only
12718 // cause an (async) OutOfMemoryException.
12720 // We assign the newly allocated object (by a GT_ALLOCOBJ node)
12721 // to a temp. Note that the pattern "temp = allocObj" is required
12722 // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
12723 // without exhaustive walk over all expressions.
12725 impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
12727 newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
12734 /* CALLI does not respond to CONSTRAINED */
12735 prefixFlags &= ~PREFIX_CONSTRAINED;
12737 if (compIsForInlining())
12739 // CALLI doesn't have a method handle, so assume the worst.
12740 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12742 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
12752 // We can't call getCallInfo on the token from a CALLI, but we need it in
12753 // many other places. We unfortunately embed that knowledge here.
12754 if (opcode != CEE_CALLI)
12756 _impResolveToken(CORINFO_TOKENKIND_Method);
12758 eeGetCallInfo(&resolvedToken,
12759 (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
12760 // this is how impImportCall invokes getCallInfo
12762 combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
12763 (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
12764 : CORINFO_CALLINFO_NONE)),
12769 // Suppress uninitialized use warning.
12770 memset(&resolvedToken, 0, sizeof(resolvedToken));
12771 memset(&callInfo, 0, sizeof(callInfo));
12773 resolvedToken.token = getU4LittleEndian(codeAddr);
12776 CALL: // memberRef should be set.
12777 // newObjThisPtr should be set for CEE_NEWOBJ
12779 JITDUMP(" %08X", resolvedToken.token);
12780 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
12782 bool newBBcreatedForTailcallStress;
12784 newBBcreatedForTailcallStress = false;
12786 if (compIsForInlining())
12788 if (compDonotInline())
12792 // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
12793 assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
12797 if (compTailCallStress())
12799 // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
12800 // Tail call stress only recognizes call+ret patterns and forces them to be
12801 // explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress
12802 // doesn't import 'ret' opcode following the call into the basic block containing
12803 // the call instead imports it to a new basic block. Note that fgMakeBasicBlocks()
12804 // is already checking that there is an opcode following call and hence it is
12805 // safe here to read next opcode without bounds check.
12806 newBBcreatedForTailcallStress =
12807 impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
12808 // make it jump to RET.
12809 (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
12811 if (newBBcreatedForTailcallStress &&
12812 !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
12813 verCheckTailCallConstraint(opcode, &resolvedToken,
12814 constraintCall ? &constrainedResolvedToken : nullptr,
12815 true) // Is it legal to do talcall?
12818 // Stress the tailcall.
12819 JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
12820 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12825 // This is split up to avoid goto flow warnings.
12827 isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd);
12829 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
12830 // hence will not be considered for implicit tail calling.
12831 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
12833 if (compIsForInlining())
12835 #if FEATURE_TAILCALL_OPT_SHARED_RETURN
12836 // Are we inlining at an implicit tail call site? If so the we can flag
12837 // implicit tail call sites in the inline body. These call sites
12838 // often end up in non BBJ_RETURN blocks, so only flag them when
12839 // we're able to handle shared returns.
12840 if (impInlineInfo->iciCall->IsImplicitTailCall())
12842 JITDUMP(" (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
12843 prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
12845 #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN
12849 JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
12850 prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
12854 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
12855 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
12856 readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
12858 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
12860 // All calls and delegates need a security callout.
12861 // For delegates, this is the call to the delegate constructor, not the access check on the
12863 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12865 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
12867 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
12868 // and the field it is reading, thus it is now unverifiable to not immediately precede with
12869 // ldtoken <filed token>, and we now check accessibility
12870 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
12871 (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
12873 if (prevOpcode != CEE_LDTOKEN)
12875 Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
12879 assert(lastLoadToken != NULL);
12880 // Now that we know we have a token, verify that it is accessible for loading
12881 CORINFO_RESOLVED_TOKEN resolvedLoadField;
12882 impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
12883 eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
12884 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12888 #endif // DevDiv 410397
12891 if (tiVerificationNeeded)
12893 verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12894 explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
12895 &callInfo DEBUGARG(info.compFullName));
12898 // Insert delegate callout here.
12899 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
12902 // We should do this only if verification is enabled
12903 // If verification is disabled, delegateCreateStart will not be initialized correctly
12904 if (tiVerificationNeeded)
12906 mdMemberRef delegateMethodRef = mdMemberRefNil;
12907 // We should get here only for well formed delegate creation.
12908 assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
12912 #ifdef FEATURE_CORECLR
12913 // In coreclr the delegate transparency rule needs to be enforced even if verification is disabled
12914 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
12915 CORINFO_METHOD_HANDLE delegateMethodHandle = tiActualFtn.GetMethod2();
12917 impInsertCalloutForDelegate(info.compMethodHnd, delegateMethodHandle, resolvedToken.hClass);
12918 #endif // FEATURE_CORECLR
12921 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12922 newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
12923 if (compDonotInline())
12928 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
12929 // have created a new BB after the "call"
12930 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
12932 assert(!compIsForInlining());
12944 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
12945 BOOL isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
12947 /* Get the CP_Fieldref index */
12948 assertImp(sz == sizeof(unsigned));
12950 _impResolveToken(CORINFO_TOKENKIND_Field);
12952 JITDUMP(" %08X", resolvedToken.token);
12954 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
12956 GenTreePtr obj = nullptr;
12957 typeInfo* tiObj = nullptr;
12958 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
12960 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
12962 tiObj = &impStackTop().seTypeInfo;
12963 obj = impPopStack(objType).val;
12965 if (impIsThis(obj))
12967 aflags |= CORINFO_ACCESS_THIS;
12969 // An optimization for Contextful classes:
12970 // we unwrap the proxy when we have a 'this reference'
12972 if (info.compUnwrapContextful)
12974 aflags |= CORINFO_ACCESS_UNWRAP;
12979 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
12981 // Figure out the type of the member. We always call canAccessField, so you always need this
12983 CorInfoType ciType = fieldInfo.fieldType;
12984 clsHnd = fieldInfo.structType;
12986 lclTyp = JITtype2varType(ciType);
12988 #ifdef _TARGET_AMD64
12989 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
12990 #endif // _TARGET_AMD64
12992 if (compIsForInlining())
12994 switch (fieldInfo.fieldAccessor)
12996 case CORINFO_FIELD_INSTANCE_HELPER:
12997 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
12998 case CORINFO_FIELD_STATIC_ADDR_HELPER:
12999 case CORINFO_FIELD_STATIC_TLS:
13001 compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
13004 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13005 #if COR_JIT_EE_VERSION > 460
13006 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13008 /* We may be able to inline the field accessors in specific instantiations of generic
13010 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
13017 if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
13020 if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
13021 !(info.compFlags & CORINFO_FLG_FORCEINLINE))
13023 // Loading a static valuetype field usually will cause a JitHelper to be called
13024 // for the static base. This will bloat the code.
13025 compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
13027 if (compInlineResult->IsFailure())
13035 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
13038 tiRetVal.MakeByRef();
13042 tiRetVal.NormaliseForStack();
13045 // Perform this check always to ensure that we get field access exceptions even with
13046 // SkipVerification.
13047 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13049 if (tiVerificationNeeded)
13051 // You can also pass the unboxed struct to LDFLD
13052 BOOL bAllowPlainValueTypeAsThis = FALSE;
13053 if (opcode == CEE_LDFLD && impIsValueType(tiObj))
13055 bAllowPlainValueTypeAsThis = TRUE;
13058 verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
13060 // If we're doing this on a heap object or from a 'safe' byref
13061 // then the result is a safe byref too
13062 if (isLoadAddress) // load address
13064 if (fieldInfo.fieldFlags &
13065 CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
13067 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
13069 tiRetVal.SetIsPermanentHomeByRef();
13072 else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
13074 // ldflda of byref is safe if done on a gc object or on a
13076 tiRetVal.SetIsPermanentHomeByRef();
13082 // tiVerificationNeeded is false.
13083 // Raise InvalidProgramException if static load accesses non-static field
13084 if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13086 BADCODE("static access on an instance field");
13090 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
13091 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13093 if (obj->gtFlags & GTF_SIDE_EFFECT)
13095 obj = gtUnusedValNode(obj);
13096 impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13101 /* Preserve 'small' int types */
13102 if (lclTyp > TYP_INT)
13104 lclTyp = genActualType(lclTyp);
13107 bool usesHelper = false;
13109 switch (fieldInfo.fieldAccessor)
13111 case CORINFO_FIELD_INSTANCE:
13112 #ifdef FEATURE_READYTORUN_COMPILER
13113 case CORINFO_FIELD_INSTANCE_WITH_BASE:
13116 bool nullcheckNeeded = false;
13118 obj = impCheckForNullPointer(obj);
13120 if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
13122 nullcheckNeeded = true;
13125 // If the object is a struct, what we really want is
13126 // for the field to operate on the address of the struct.
13127 if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
13129 assert(opcode == CEE_LDFLD && objType != nullptr);
13131 obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
13134 /* Create the data member node */
13135 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
13137 #ifdef FEATURE_READYTORUN_COMPILER
13138 if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13140 op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13144 op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13146 if (fgAddrCouldBeNull(obj))
13148 op1->gtFlags |= GTF_EXCEPT;
13151 // If gtFldObj is a BYREF then our target is a value class and
13152 // it could point anywhere, example a boxed class static int
13153 if (obj->gtType == TYP_BYREF)
13155 op1->gtFlags |= GTF_IND_TGTANYWHERE;
13158 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13159 if (StructHasOverlappingFields(typeFlags))
13161 op1->gtField.gtFldMayOverlap = true;
13164 // wrap it in a address of operator if necessary
13167 op1 = gtNewOperNode(GT_ADDR,
13168 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
13172 if (compIsForInlining() &&
13173 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13174 impInlineInfo->inlArgInfo))
13176 impInlineInfo->thisDereferencedFirst = true;
13182 case CORINFO_FIELD_STATIC_TLS:
13183 #ifdef _TARGET_X86_
13184 // Legacy TLS access is implemented as intrinsic on x86 only
13186 /* Create the data member node */
13187 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13188 op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13192 op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13196 fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13201 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13202 case CORINFO_FIELD_INSTANCE_HELPER:
13203 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13204 op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13209 case CORINFO_FIELD_STATIC_ADDRESS:
13210 // Replace static read-only fields with constant if possible
13211 if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13212 !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13213 (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13215 CorInfoInitClassResult initClassResult =
13216 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13217 impTokenLookupContextHandle);
13219 if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13221 void** pFldAddr = nullptr;
13223 info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13225 // We should always be able to access this static's address directly
13226 assert(pFldAddr == nullptr);
13228 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13235 case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13236 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13237 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13238 #if COR_JIT_EE_VERSION > 460
13239 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13241 op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13245 case CORINFO_FIELD_INTRINSIC_ZERO:
13247 assert(aflags & CORINFO_ACCESS_GET);
13248 op1 = gtNewIconNode(0, lclTyp);
13253 case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13255 assert(aflags & CORINFO_ACCESS_GET);
13258 InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13259 op1 = gtNewStringLiteralNode(iat, pValue);
13265 assert(!"Unexpected fieldAccessor");
13268 if (!isLoadAddress)
13271 if (prefixFlags & PREFIX_VOLATILE)
13273 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
13274 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13278 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13279 (op1->OperGet() == GT_OBJ));
13280 op1->gtFlags |= GTF_IND_VOLATILE;
13284 if (prefixFlags & PREFIX_UNALIGNED)
13288 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13289 (op1->OperGet() == GT_OBJ));
13290 op1->gtFlags |= GTF_IND_UNALIGNED;
13295 /* Check if the class needs explicit initialization */
13297 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13299 GenTreePtr helperNode = impInitClass(&resolvedToken);
13300 if (compDonotInline())
13304 if (helperNode != nullptr)
13306 op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13311 impPushOnStack(op1, tiRetVal);
13319 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13321 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13323 /* Get the CP_Fieldref index */
13325 assertImp(sz == sizeof(unsigned));
13327 _impResolveToken(CORINFO_TOKENKIND_Field);
13329 JITDUMP(" %08X", resolvedToken.token);
13331 int aflags = CORINFO_ACCESS_SET;
13332 GenTreePtr obj = nullptr;
13333 typeInfo* tiObj = nullptr;
13336 /* Pull the value from the stack */
13337 op2 = impPopStack(tiVal);
13338 clsHnd = tiVal.GetClassHandle();
13340 if (opcode == CEE_STFLD)
13342 tiObj = &impStackTop().seTypeInfo;
13343 obj = impPopStack().val;
13345 if (impIsThis(obj))
13347 aflags |= CORINFO_ACCESS_THIS;
13349 // An optimization for Contextful classes:
13350 // we unwrap the proxy when we have a 'this reference'
13352 if (info.compUnwrapContextful)
13354 aflags |= CORINFO_ACCESS_UNWRAP;
13359 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13361 // Figure out the type of the member. We always call canAccessField, so you always need this
13363 CorInfoType ciType = fieldInfo.fieldType;
13364 fieldClsHnd = fieldInfo.structType;
13366 lclTyp = JITtype2varType(ciType);
13368 if (compIsForInlining())
13370 /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13371 * per-inst static? */
13373 switch (fieldInfo.fieldAccessor)
13375 case CORINFO_FIELD_INSTANCE_HELPER:
13376 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13377 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13378 case CORINFO_FIELD_STATIC_TLS:
13380 compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13383 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13384 #if COR_JIT_EE_VERSION > 460
13385 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13388 /* We may be able to inline the field accessors in specific instantiations of generic
13390 compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13398 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13400 if (tiVerificationNeeded)
13402 verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13403 typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13404 Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13408 // tiVerificationNeed is false.
13409 // Raise InvalidProgramException if static store accesses non-static field
13410 if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13412 BADCODE("static access on an instance field");
13416 // We are using stfld on a static field.
13417 // We allow it, but need to eval any side-effects for obj
13418 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13420 if (obj->gtFlags & GTF_SIDE_EFFECT)
13422 obj = gtUnusedValNode(obj);
13423 impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13428 /* Preserve 'small' int types */
13429 if (lclTyp > TYP_INT)
13431 lclTyp = genActualType(lclTyp);
13434 switch (fieldInfo.fieldAccessor)
13436 case CORINFO_FIELD_INSTANCE:
13437 #ifdef FEATURE_READYTORUN_COMPILER
13438 case CORINFO_FIELD_INSTANCE_WITH_BASE:
13441 obj = impCheckForNullPointer(obj);
13443 /* Create the data member node */
13444 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
13445 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13446 if (StructHasOverlappingFields(typeFlags))
13448 op1->gtField.gtFldMayOverlap = true;
13451 #ifdef FEATURE_READYTORUN_COMPILER
13452 if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13454 op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13458 op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13460 if (fgAddrCouldBeNull(obj))
13462 op1->gtFlags |= GTF_EXCEPT;
13465 // If gtFldObj is a BYREF then our target is a value class and
13466 // it could point anywhere, example a boxed class static int
13467 if (obj->gtType == TYP_BYREF)
13469 op1->gtFlags |= GTF_IND_TGTANYWHERE;
13472 if (compIsForInlining() &&
13473 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
13475 impInlineInfo->thisDereferencedFirst = true;
13480 case CORINFO_FIELD_STATIC_TLS:
13481 #ifdef _TARGET_X86_
13482 // Legacy TLS access is implemented as intrinsic on x86 only
13484 /* Create the data member node */
13485 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13486 op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13490 fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13495 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13496 case CORINFO_FIELD_INSTANCE_HELPER:
13497 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13498 op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13502 case CORINFO_FIELD_STATIC_ADDRESS:
13503 case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13504 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13505 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13506 #if COR_JIT_EE_VERSION > 460
13507 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13509 op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13514 assert(!"Unexpected fieldAccessor");
13517 // Create the member assignment, unless we have a struct.
13518 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
13519 bool deferStructAssign = varTypeIsStruct(lclTyp);
13521 if (!deferStructAssign)
13523 if (prefixFlags & PREFIX_VOLATILE)
13525 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13526 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
13527 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13528 op1->gtFlags |= GTF_IND_VOLATILE;
13530 if (prefixFlags & PREFIX_UNALIGNED)
13532 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13533 op1->gtFlags |= GTF_IND_UNALIGNED;
13536 /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
13538 apps). The reason this works is that JIT stores an i4 constant in Gentree union during
13540 and reads from the union as if it were a long during code generation. Though this can potentially
13541 read garbage, one can get lucky to have this working correctly.
13543 This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
13545 switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency
13547 it. To be backward compatible, we will explicitly add an upward cast here so that it works
13551 Note that this is limited to x86 alone as thereis no back compat to be addressed for Arm JIT for
13554 CLANG_FORMAT_COMMENT_ANCHOR;
13556 #ifdef _TARGET_X86_
13557 if (op1->TypeGet() != op2->TypeGet() && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
13558 varTypeIsLong(op1->TypeGet()))
13560 op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13564 #ifdef _TARGET_64BIT_
13565 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
13566 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
13568 op2->gtType = TYP_I_IMPL;
13572 // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
13574 if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
13576 op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
13578 // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13580 if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
13582 op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
13587 #if !FEATURE_X87_DOUBLES
13588 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
13589 // We insert a cast to the dest 'op1' type
13591 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
13592 varTypeIsFloating(op2->gtType))
13594 op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13596 #endif // !FEATURE_X87_DOUBLES
13598 op1 = gtNewAssignNode(op1, op2);
13600 /* Mark the expression as containing an assignment */
13602 op1->gtFlags |= GTF_ASG;
13605 /* Check if the class needs explicit initialization */
13607 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13609 GenTreePtr helperNode = impInitClass(&resolvedToken);
13610 if (compDonotInline())
13614 if (helperNode != nullptr)
13616 op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13620 /* stfld can interfere with value classes (consider the sequence
13621 ldloc, ldloca, ..., stfld, stloc). We will be conservative and
13622 spill all value class references from the stack. */
13624 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
13628 if (impIsValueType(tiObj))
13630 impSpillEvalStack();
13634 impSpillValueClasses();
13638 /* Spill any refs to the same member from the stack */
13640 impSpillLclRefs((ssize_t)resolvedToken.hField);
13642 /* stsfld also interferes with indirect accesses (for aliased
13643 statics) and calls. But don't need to spill other statics
13644 as we have explicitly spilled this particular static field. */
13646 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
13648 if (deferStructAssign)
13650 op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
13658 /* Get the class type index operand */
13660 _impResolveToken(CORINFO_TOKENKIND_Newarr);
13662 JITDUMP(" %08X", resolvedToken.token);
13664 if (!opts.IsReadyToRun())
13666 // Need to restore array classes before creating array objects on the heap
13667 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13668 if (op1 == nullptr)
13669 { // compDonotInline()
13674 if (tiVerificationNeeded)
13676 // As per ECMA 'numElems' specified can be either int32 or native int.
13677 Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
13679 CORINFO_CLASS_HANDLE elemTypeHnd;
13680 info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13681 Verify(elemTypeHnd == nullptr ||
13682 !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13683 "array of byref-like type");
13684 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13687 accessAllowedResult =
13688 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13689 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13691 /* Form the arglist: array class handle, size */
13692 op2 = impPopStack().val;
13693 assertImp(genActualTypeIsIntOrI(op2->gtType));
13695 #ifdef FEATURE_READYTORUN_COMPILER
13696 if (opts.IsReadyToRun())
13698 op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
13699 gtNewArgList(op2));
13700 usingReadyToRunHelper = (op1 != nullptr);
13702 if (!usingReadyToRunHelper)
13704 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13705 // and the newarr call with a single call to a dynamic R2R cell that will:
13706 // 1) Load the context
13707 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13708 // 3) Allocate the new array
13709 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13711 // Need to restore array classes before creating array objects on the heap
13712 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13713 if (op1 == nullptr)
13714 { // compDonotInline()
13720 if (!usingReadyToRunHelper)
13723 args = gtNewArgList(op1, op2);
13725 /* Create a call to 'new' */
13727 // Note that this only works for shared generic code because the same helper is used for all
13728 // reference array types
13730 gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, 0, args);
13733 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
13735 /* Remember that this basic block contains 'new' of an sd array */
13737 block->bbFlags |= BBF_HAS_NEWARRAY;
13738 optMethodFlags |= OMF_HAS_NEWARRAY;
13740 /* Push the result of the call on the stack */
13742 impPushOnStack(op1, tiRetVal);
13749 assert(!compIsForInlining());
13751 if (tiVerificationNeeded)
13753 Verify(false, "bad opcode");
13756 // We don't allow locallocs inside handlers
13757 if (block->hasHndIndex())
13759 BADCODE("Localloc can't be inside handler");
13762 /* The FP register may not be back to the original value at the end
13763 of the method, even if the frame size is 0, as localloc may
13764 have modified it. So we will HAVE to reset it */
13766 compLocallocUsed = true;
13767 setNeedsGSSecurityCookie();
13769 // Get the size to allocate
13771 op2 = impPopStack().val;
13772 assertImp(genActualTypeIsIntOrI(op2->gtType));
13774 if (verCurrentState.esStackDepth != 0)
13776 BADCODE("Localloc can only be used when the stack is empty");
13779 op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
13781 // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
13783 op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
13785 impPushOnStack(op1, tiRetVal);
13790 /* Get the type token */
13791 assertImp(sz == sizeof(unsigned));
13793 _impResolveToken(CORINFO_TOKENKIND_Casting);
13795 JITDUMP(" %08X", resolvedToken.token);
13797 if (!opts.IsReadyToRun())
13799 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13800 if (op2 == nullptr)
13801 { // compDonotInline()
13806 if (tiVerificationNeeded)
13808 Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
13809 // Even if this is a value class, we know it is boxed.
13810 tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
13812 accessAllowedResult =
13813 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13814 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13816 op1 = impPopStack().val;
13818 #ifdef FEATURE_READYTORUN_COMPILER
13819 if (opts.IsReadyToRun())
13821 GenTreePtr opLookup =
13822 impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
13823 gtNewArgList(op1));
13824 usingReadyToRunHelper = (opLookup != nullptr);
13825 op1 = (usingReadyToRunHelper ? opLookup : op1);
13827 if (!usingReadyToRunHelper)
13829 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13830 // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
13831 // 1) Load the context
13832 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13833 // 3) Perform the 'is instance' check on the input object
13834 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13836 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13837 if (op2 == nullptr)
13838 { // compDonotInline()
13844 if (!usingReadyToRunHelper)
13847 op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
13849 if (compDonotInline())
13854 impPushOnStack(op1, tiRetVal);
13858 case CEE_REFANYVAL:
13860 // get the class handle and make a ICON node out of it
13862 _impResolveToken(CORINFO_TOKENKIND_Class);
13864 JITDUMP(" %08X", resolvedToken.token);
13866 op2 = impTokenToHandle(&resolvedToken);
13867 if (op2 == nullptr)
13868 { // compDonotInline()
13872 if (tiVerificationNeeded)
13874 Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13876 tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
13879 op1 = impPopStack().val;
13880 // make certain it is normalized;
13881 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13883 // Call helper GETREFANY(classHandle, op1);
13884 args = gtNewArgList(op2, op1);
13885 op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, 0, args);
13887 impPushOnStack(op1, tiRetVal);
13890 case CEE_REFANYTYPE:
13892 if (tiVerificationNeeded)
13894 Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13898 op1 = impPopStack().val;
13900 // make certain it is normalized;
13901 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13903 if (op1->gtOper == GT_OBJ)
13905 // Get the address of the refany
13906 op1 = op1->gtOp.gtOp1;
13908 // Fetch the type from the correct slot
13909 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
13910 gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
13911 op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
13915 assertImp(op1->gtOper == GT_MKREFANY);
13917 // The pointer may have side-effects
13918 if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
13920 impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13922 impNoteLastILoffs();
13926 // We already have the class handle
13927 op1 = op1->gtOp.gtOp2;
13930 // convert native TypeHandle to RuntimeTypeHandle
13932 GenTreeArgList* helperArgs = gtNewArgList(op1);
13934 op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, GTF_EXCEPT,
13937 // The handle struct is returned in register
13938 op1->gtCall.gtReturnType = TYP_REF;
13940 tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
13943 impPushOnStack(op1, tiRetVal);
13948 /* Get the Class index */
13949 assertImp(sz == sizeof(unsigned));
13950 lastLoadToken = codeAddr;
13951 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
13953 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
13955 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
13956 if (op1 == nullptr)
13957 { // compDonotInline()
13961 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
13962 assert(resolvedToken.hClass != nullptr);
13964 if (resolvedToken.hMethod != nullptr)
13966 helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
13968 else if (resolvedToken.hField != nullptr)
13970 helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
13973 GenTreeArgList* helperArgs = gtNewArgList(op1);
13975 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, GTF_EXCEPT, helperArgs);
13977 // The handle struct is returned in register
13978 op1->gtCall.gtReturnType = TYP_REF;
13980 tiRetVal = verMakeTypeInfo(tokenType);
13981 impPushOnStack(op1, tiRetVal);
13986 case CEE_UNBOX_ANY:
13988 /* Get the Class index */
13989 assertImp(sz == sizeof(unsigned));
13991 _impResolveToken(CORINFO_TOKENKIND_Class);
13993 JITDUMP(" %08X", resolvedToken.token);
13995 BOOL runtimeLookup;
13996 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
13997 if (op2 == nullptr)
13998 { // compDonotInline()
14002 // Run this always so we can get access exceptions even with SkipVerification.
14003 accessAllowedResult =
14004 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14005 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14007 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
14009 if (tiVerificationNeeded)
14011 typeInfo tiUnbox = impStackTop().seTypeInfo;
14012 Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
14013 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14014 tiRetVal.NormaliseForStack();
14016 op1 = impPopStack().val;
14020 /* Pop the object and create the unbox helper call */
14021 /* You might think that for UNBOX_ANY we need to push a different */
14022 /* (non-byref) type, but here we're making the tiRetVal that is used */
14023 /* for the intermediate pointer which we then transfer onto the OBJ */
14024 /* instruction. OBJ then creates the appropriate tiRetVal. */
14025 if (tiVerificationNeeded)
14027 typeInfo tiUnbox = impStackTop().seTypeInfo;
14028 Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
14030 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14031 Verify(tiRetVal.IsValueClass(), "not value class");
14032 tiRetVal.MakeByRef();
14034 // We always come from an objref, so this is safe byref
14035 tiRetVal.SetIsPermanentHomeByRef();
14036 tiRetVal.SetIsReadonlyByRef();
14039 op1 = impPopStack().val;
14040 assertImp(op1->gtType == TYP_REF);
14042 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
14043 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
14045 // We only want to expand inline the normal UNBOX helper;
14046 expandInline = (helper == CORINFO_HELP_UNBOX);
14050 if (compCurBB->isRunRarely())
14052 expandInline = false; // not worth the code expansion
14058 // we are doing normal unboxing
14059 // inline the common case of the unbox helper
14060 // UNBOX(exp) morphs into
14061 // clone = pop(exp);
14062 // ((*clone == typeToken) ? nop : helper(clone, typeToken));
14063 // push(clone + sizeof(void*))
14065 GenTreePtr cloneOperand;
14066 op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14067 nullptr DEBUGARG("inline UNBOX clone1"));
14068 op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
14070 GenTreePtr condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
14072 op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14073 nullptr DEBUGARG("inline UNBOX clone2"));
14074 op2 = impTokenToHandle(&resolvedToken);
14075 if (op2 == nullptr)
14076 { // compDonotInline()
14079 args = gtNewArgList(op2, op1);
14080 op1 = gtNewHelperCallNode(helper, TYP_VOID, 0, args);
14082 op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
14083 op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
14084 condBox->gtFlags |= GTF_RELOP_QMARK;
14086 // QMARK nodes cannot reside on the evaluation stack. Because there
14087 // may be other trees on the evaluation stack that side-effect the
14088 // sources of the UNBOX operation we must spill the stack.
14090 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14092 // Create the address-expression to reference past the object header
14093 // to the beginning of the value-type. Today this means adjusting
14094 // past the base of the objects vtable field which is pointer sized.
14096 op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
14097 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
14101 unsigned callFlags = (helper == CORINFO_HELP_UNBOX) ? 0 : GTF_EXCEPT;
14103 // Don't optimize, just call the helper and be done with it
14104 args = gtNewArgList(op2, op1);
14105 op1 = gtNewHelperCallNode(helper,
14106 (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT),
14110 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
14111 helper == CORINFO_HELP_UNBOX_NULLABLE &&
14112 varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
14116 ----------------------------------------------------------------------
14119 | \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE |
14120 | \ | (which returns a BYREF) | (which returns a STRUCT) | |
14122 |---------------------------------------------------------------------
14123 | UNBOX | push the BYREF | spill the STRUCT to a local, |
14124 | | | push the BYREF to this local |
14125 |---------------------------------------------------------------------
14126 | UNBOX_ANY | push a GT_OBJ of | push the STRUCT |
14127 | | the BYREF | For Linux when the |
14128 | | | struct is returned in two |
14129 | | | registers create a temp |
14130 | | | which address is passed to |
14131 | | | the unbox_nullable helper. |
14132 |---------------------------------------------------------------------
14135 if (opcode == CEE_UNBOX)
14137 if (helper == CORINFO_HELP_UNBOX_NULLABLE)
14139 // Unbox nullable helper returns a struct type.
14140 // We need to spill it to a temp so than can take the address of it.
14141 // Here we need unsafe value cls check, since the address of struct is taken to be used
14142 // further along and potetially be exploitable.
14144 unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
14145 lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14147 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14148 op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14149 assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14151 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14152 op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14153 op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14156 assert(op1->gtType == TYP_BYREF);
14157 assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14161 assert(opcode == CEE_UNBOX_ANY);
14163 if (helper == CORINFO_HELP_UNBOX)
14165 // Normal unbox helper returns a TYP_BYREF.
14166 impPushOnStack(op1, tiRetVal);
14171 assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
14173 #if FEATURE_MULTIREG_RET
14175 if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14177 // Unbox nullable helper returns a TYP_STRUCT.
14178 // For the multi-reg case we need to spill it to a temp so that
14179 // we can pass the address to the unbox_nullable jit helper.
14181 unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14182 lvaTable[tmp].lvIsMultiRegArg = true;
14183 lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14185 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14186 op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14187 assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14189 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14190 op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14191 op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14193 // In this case the return value of the unbox helper is TYP_BYREF.
14194 // Make sure the right type is placed on the operand type stack.
14195 impPushOnStack(op1, tiRetVal);
14197 // Load the struct.
14200 assert(op1->gtType == TYP_BYREF);
14201 assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14207 #endif // !FEATURE_MULTIREG_RET
14210 // If non register passable struct we have it materialized in the RetBuf.
14211 assert(op1->gtType == TYP_STRUCT);
14212 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14213 assert(tiRetVal.IsValueClass());
14217 impPushOnStack(op1, tiRetVal);
14223 /* Get the Class index */
14224 assertImp(sz == sizeof(unsigned));
14226 _impResolveToken(CORINFO_TOKENKIND_Box);
14228 JITDUMP(" %08X", resolvedToken.token);
14230 if (tiVerificationNeeded)
14232 typeInfo tiActual = impStackTop().seTypeInfo;
14233 typeInfo tiBox = verMakeTypeInfo(resolvedToken.hClass);
14235 Verify(verIsBoxable(tiBox), "boxable type expected");
14237 // check the class constraints of the boxed type in case we are boxing an uninitialized value
14238 Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14239 "boxed type has unsatisfied class constraints");
14241 Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14243 // Observation: the following code introduces a boxed value class on the stack, but,
14244 // according to the ECMA spec, one would simply expect: tiRetVal =
14245 // typeInfo(TI_REF,impGetObjectClass());
14247 // Push the result back on the stack,
14248 // even if clsHnd is a value class we want the TI_REF
14249 // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14250 tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14253 accessAllowedResult =
14254 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14255 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14257 // Note BOX can be used on things that are not value classes, in which
14258 // case we get a NOP. However the verifier's view of the type on the
14259 // stack changes (in generic code a 'T' becomes a 'boxed T')
14260 if (!eeIsValueClass(resolvedToken.hClass))
14262 verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14266 // Look ahead for unbox.any
14267 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14269 DWORD classAttribs = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14270 if (!(classAttribs & CORINFO_FLG_SHAREDINST))
14272 CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14274 impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14276 if (unboxResolvedToken.hClass == resolvedToken.hClass)
14278 // Skip the next unbox.any instruction
14279 sz += sizeof(mdToken) + 1;
14285 impImportAndPushBox(&resolvedToken);
14286 if (compDonotInline())
14295 /* Get the Class index */
14296 assertImp(sz == sizeof(unsigned));
14298 _impResolveToken(CORINFO_TOKENKIND_Class);
14300 JITDUMP(" %08X", resolvedToken.token);
14302 if (tiVerificationNeeded)
14304 tiRetVal = typeInfo(TI_INT);
14307 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14308 impPushOnStack(op1, tiRetVal);
14311 case CEE_CASTCLASS:
14313 /* Get the Class index */
14315 assertImp(sz == sizeof(unsigned));
14317 _impResolveToken(CORINFO_TOKENKIND_Casting);
14319 JITDUMP(" %08X", resolvedToken.token);
14321 if (!opts.IsReadyToRun())
14323 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14324 if (op2 == nullptr)
14325 { // compDonotInline()
14330 if (tiVerificationNeeded)
14332 Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14334 tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14337 accessAllowedResult =
14338 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14339 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14341 op1 = impPopStack().val;
14343 /* Pop the address and create the 'checked cast' helper call */
14345 // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
14346 // and op2 to contain code that creates the type handle corresponding to typeRef
14349 #ifdef FEATURE_READYTORUN_COMPILER
14350 if (opts.IsReadyToRun())
14352 GenTreePtr opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST,
14353 TYP_REF, gtNewArgList(op1));
14354 usingReadyToRunHelper = (opLookup != nullptr);
14355 op1 = (usingReadyToRunHelper ? opLookup : op1);
14357 if (!usingReadyToRunHelper)
14359 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14360 // and the chkcastany call with a single call to a dynamic R2R cell that will:
14361 // 1) Load the context
14362 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14363 // 3) Check the object on the stack for the type-cast
14364 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14366 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14367 if (op2 == nullptr)
14368 { // compDonotInline()
14374 if (!usingReadyToRunHelper)
14377 op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
14379 if (compDonotInline())
14384 /* Push the result back on the stack */
14385 impPushOnStack(op1, tiRetVal);
14390 if (compIsForInlining())
14392 // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14393 // TODO: Will this be too strict, given that we will inline many basic blocks?
14394 // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14396 /* Do we have just the exception on the stack ?*/
14398 if (verCurrentState.esStackDepth != 1)
14400 /* if not, just don't inline the method */
14402 compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
14407 if (tiVerificationNeeded)
14409 tiRetVal = impStackTop().seTypeInfo;
14410 Verify(tiRetVal.IsObjRef(), "object ref expected");
14411 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
14413 Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
14417 block->bbSetRunRarely(); // any block with a throw is rare
14418 /* Pop the exception object and create the 'throw' helper call */
14420 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, GTF_EXCEPT, gtNewArgList(impPopStack().val));
14423 if (verCurrentState.esStackDepth > 0)
14425 impEvalSideEffects();
14428 assert(verCurrentState.esStackDepth == 0);
14434 assert(!compIsForInlining());
14436 if (info.compXcptnsCount == 0)
14438 BADCODE("rethrow outside catch");
14441 if (tiVerificationNeeded)
14443 Verify(block->hasHndIndex(), "rethrow outside catch");
14444 if (block->hasHndIndex())
14446 EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
14447 Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
14448 if (HBtab->HasFilter())
14450 // we better be in the handler clause part, not the filter part
14451 Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
14452 "rethrow in filter");
14457 /* Create the 'rethrow' helper call */
14459 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID, GTF_EXCEPT);
14465 assertImp(sz == sizeof(unsigned));
14467 _impResolveToken(CORINFO_TOKENKIND_Class);
14469 JITDUMP(" %08X", resolvedToken.token);
14471 if (tiVerificationNeeded)
14473 typeInfo tiTo = impStackTop().seTypeInfo;
14474 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14476 Verify(tiTo.IsByRef(), "byref expected");
14477 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14479 Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14480 "type operand incompatible with type of address");
14483 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
14484 op2 = gtNewIconNode(0); // Value
14485 op1 = impPopStack().val; // Dest
14486 op1 = gtNewBlockVal(op1, size);
14487 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14492 if (tiVerificationNeeded)
14494 Verify(false, "bad opcode");
14497 op3 = impPopStack().val; // Size
14498 op2 = impPopStack().val; // Value
14499 op1 = impPopStack().val; // Dest
14501 if (op3->IsCnsIntOrI())
14503 size = (unsigned)op3->AsIntConCommon()->IconValue();
14504 op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14508 op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14511 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14517 if (tiVerificationNeeded)
14519 Verify(false, "bad opcode");
14521 op3 = impPopStack().val; // Size
14522 op2 = impPopStack().val; // Src
14523 op1 = impPopStack().val; // Dest
14525 if (op3->IsCnsIntOrI())
14527 size = (unsigned)op3->AsIntConCommon()->IconValue();
14528 op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14532 op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14535 if (op2->OperGet() == GT_ADDR)
14537 op2 = op2->gtOp.gtOp1;
14541 op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
14544 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
14549 assertImp(sz == sizeof(unsigned));
14551 _impResolveToken(CORINFO_TOKENKIND_Class);
14553 JITDUMP(" %08X", resolvedToken.token);
14555 if (tiVerificationNeeded)
14557 typeInfo tiFrom = impStackTop().seTypeInfo;
14558 typeInfo tiTo = impStackTop(1).seTypeInfo;
14559 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14561 Verify(tiFrom.IsByRef(), "expected byref source");
14562 Verify(tiTo.IsByRef(), "expected byref destination");
14564 Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
14565 "type of source address incompatible with type operand");
14566 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14567 Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14568 "type operand incompatible with type of destination address");
14571 if (!eeIsValueClass(resolvedToken.hClass))
14573 op1 = impPopStack().val; // address to load from
14575 impBashVarAddrsToI(op1);
14577 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
14579 op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
14580 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
14582 impPushOnStackNoType(op1);
14583 opcode = CEE_STIND_REF;
14585 goto STIND_POST_VERIFY;
14588 op2 = impPopStack().val; // Src
14589 op1 = impPopStack().val; // Dest
14590 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
14595 assertImp(sz == sizeof(unsigned));
14597 _impResolveToken(CORINFO_TOKENKIND_Class);
14599 JITDUMP(" %08X", resolvedToken.token);
14601 if (eeIsValueClass(resolvedToken.hClass))
14603 lclTyp = TYP_STRUCT;
14610 if (tiVerificationNeeded)
14613 typeInfo tiPtr = impStackTop(1).seTypeInfo;
14615 // Make sure we have a good looking byref
14616 Verify(tiPtr.IsByRef(), "pointer not byref");
14617 Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
14618 if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
14620 compUnsafeCastUsed = true;
14623 typeInfo ptrVal = DereferenceByRef(tiPtr);
14624 typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
14626 if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
14628 Verify(false, "type of value incompatible with type operand");
14629 compUnsafeCastUsed = true;
14632 if (!tiCompatibleWith(argVal, ptrVal, false))
14634 Verify(false, "type operand incompatible with type of address");
14635 compUnsafeCastUsed = true;
14640 compUnsafeCastUsed = true;
14643 if (lclTyp == TYP_REF)
14645 opcode = CEE_STIND_REF;
14646 goto STIND_POST_VERIFY;
14649 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14650 if (impIsPrimitive(jitTyp))
14652 lclTyp = JITtype2varType(jitTyp);
14653 goto STIND_POST_VERIFY;
14656 op2 = impPopStack().val; // Value
14657 op1 = impPopStack().val; // Ptr
14659 assertImp(varTypeIsStruct(op2));
14661 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14667 assert(!compIsForInlining());
14669 // Being lazy here. Refanys are tricky in terms of gc tracking.
14670 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
14672 JITDUMP("disabling struct promotion because of mkrefany\n");
14673 fgNoStructPromotion = true;
14675 oper = GT_MKREFANY;
14676 assertImp(sz == sizeof(unsigned));
14678 _impResolveToken(CORINFO_TOKENKIND_Class);
14680 JITDUMP(" %08X", resolvedToken.token);
14682 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14683 if (op2 == nullptr)
14684 { // compDonotInline()
14688 if (tiVerificationNeeded)
14690 typeInfo tiPtr = impStackTop().seTypeInfo;
14691 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14693 Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
14694 Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
14695 Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
14698 accessAllowedResult =
14699 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14700 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14702 op1 = impPopStack().val;
14704 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
14705 // But JIT32 allowed it, so we continue to allow it.
14706 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
14708 // MKREFANY returns a struct. op2 is the class token.
14709 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
14711 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
14717 assertImp(sz == sizeof(unsigned));
14719 _impResolveToken(CORINFO_TOKENKIND_Class);
14721 JITDUMP(" %08X", resolvedToken.token);
14725 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14727 if (tiVerificationNeeded)
14729 typeInfo tiPtr = impStackTop().seTypeInfo;
14731 // Make sure we have a byref
14732 if (!tiPtr.IsByRef())
14734 Verify(false, "pointer not byref");
14735 compUnsafeCastUsed = true;
14737 typeInfo tiPtrVal = DereferenceByRef(tiPtr);
14739 if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
14741 Verify(false, "type of address incompatible with type operand");
14742 compUnsafeCastUsed = true;
14744 tiRetVal.NormaliseForStack();
14748 compUnsafeCastUsed = true;
14751 if (eeIsValueClass(resolvedToken.hClass))
14753 lclTyp = TYP_STRUCT;
14758 opcode = CEE_LDIND_REF;
14759 goto LDIND_POST_VERIFY;
14762 op1 = impPopStack().val;
14764 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
14766 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14767 if (impIsPrimitive(jitTyp))
14769 op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
14771 // Could point anywhere, example a boxed class static int
14772 op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
14773 assertImp(varTypeIsArithmetic(op1->gtType));
14777 // OBJ returns a struct
14778 // and an inline argument which is the class token of the loaded obj
14779 op1 = gtNewObjNode(resolvedToken.hClass, op1);
14781 op1->gtFlags |= GTF_EXCEPT;
14783 impPushOnStack(op1, tiRetVal);
14788 if (tiVerificationNeeded)
14790 typeInfo tiArray = impStackTop().seTypeInfo;
14791 Verify(verIsSDArray(tiArray), "bad array");
14792 tiRetVal = typeInfo(TI_INT);
14795 op1 = impPopStack().val;
14796 if (!opts.MinOpts() && !opts.compDbgCode)
14798 /* Use GT_ARR_LENGTH operator so rng check opts see this */
14799 GenTreeArrLen* arrLen =
14800 new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
14802 /* Mark the block as containing a length expression */
14804 if (op1->gtOper == GT_LCL_VAR)
14806 block->bbFlags |= BBF_HAS_IDX_LEN;
14813 /* Create the expression "*(array_addr + ArrLenOffs)" */
14814 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14815 gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
14816 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
14817 op1->gtFlags |= GTF_IND_ARR_LEN;
14820 /* An indirection will cause a GPF if the address is null */
14821 op1->gtFlags |= GTF_EXCEPT;
14823 /* Push the result back on the stack */
14824 impPushOnStack(op1, tiRetVal);
14828 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
14832 if (opts.compDbgCode)
14834 op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
14839 /******************************** NYI *******************************/
14842 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
14845 case CEE_MACRO_END:
14848 BADCODE3("unknown opcode", ": %02X", (int)opcode);
14852 prevOpcode = opcode;
14855 assert(!insertLdloc || opcode == CEE_DUP);
14858 assert(!insertLdloc);
14861 #undef _impResolveToken
14864 #pragma warning(pop)
14867 // Push a local/argument treeon the operand stack
14868 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
14870 tiRetVal.NormaliseForStack();
14872 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
14874 tiRetVal.SetUninitialisedObjRef();
14877 impPushOnStack(op, tiRetVal);
14880 // Load a local/argument on the operand stack
14881 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
14882 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
14886 if (lvaTable[lclNum].lvNormalizeOnLoad())
14888 lclTyp = lvaGetRealType(lclNum);
14892 lclTyp = lvaGetActualType(lclNum);
14895 impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
14898 // Load an argument on the operand stack
14899 // Shared by the various CEE_LDARG opcodes
14900 // ilArgNum is the argument index as specified in IL.
14901 // It will be mapped to the correct lvaTable index
14902 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
14904 Verify(ilArgNum < info.compILargsCount, "bad arg num");
14906 if (compIsForInlining())
14908 if (ilArgNum >= info.compArgsCount)
14910 compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
14914 impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
14915 impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
14919 if (ilArgNum >= info.compArgsCount)
14924 unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
14926 if (lclNum == info.compThisArg)
14928 lclNum = lvaArg0Var;
14931 impLoadVar(lclNum, offset);
14935 // Load a local on the operand stack
14936 // Shared by the various CEE_LDLOC opcodes
14937 // ilLclNum is the local index as specified in IL.
14938 // It will be mapped to the correct lvaTable index
14939 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
14941 if (tiVerificationNeeded)
14943 Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
14944 Verify(info.compInitMem, "initLocals not set");
14947 if (compIsForInlining())
14949 if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14951 compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
14955 // Get the local type
14956 var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
14958 typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
14960 /* Have we allocated a temp for this local? */
14962 unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
14964 // All vars of inlined methods should be !lvNormalizeOnLoad()
14966 assert(!lvaTable[lclNum].lvNormalizeOnLoad());
14967 lclTyp = genActualType(lclTyp);
14969 impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
14973 if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14978 unsigned lclNum = info.compArgsCount + ilLclNum;
14980 impLoadVar(lclNum, offset);
14984 #ifdef _TARGET_ARM_
14985 /**************************************************************************************
14987 * When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
14988 * dst struct, because struct promotion will turn it into a float/double variable while
14989 * the rhs will be an int/long variable. We don't code generate assignment of int into
14990 * a float, but there is nothing that might prevent us from doing so. The tree however
14991 * would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
14993 * tmpNum - the lcl dst variable num that is a struct.
14994 * src - the src tree assigned to the dest that is a struct/int (when varargs call.)
14995 * hClass - the type handle for the struct variable.
14997 * TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
14998 * however, we could do a codegen of transferring from int to float registers
14999 * (transfer, not a cast.)
15002 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr src, CORINFO_CLASS_HANDLE hClass)
15004 if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
15006 int hfaSlots = GetHfaCount(hClass);
15007 var_types hfaType = GetHfaType(hClass);
15009 // If we have varargs we morph the method's return type to be "int" irrespective of its original
15010 // type: struct/float at importer because the ABI calls out return in integer registers.
15011 // We don't want struct promotion to replace an expression like this:
15012 // lclFld_int = callvar_int() into lclFld_float = callvar_int();
15013 // This means an int is getting assigned to a float without a cast. Prevent the promotion.
15014 if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
15015 (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
15017 // Make sure this struct type stays as struct so we can receive the call in a struct.
15018 lvaTable[tmpNum].lvIsMultiRegRet = true;
15022 #endif // _TARGET_ARM_
15024 #if FEATURE_MULTIREG_RET
15025 GenTreePtr Compiler::impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HANDLE hClass)
15027 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
15028 impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_NONE);
15029 GenTreePtr ret = gtNewLclvNode(tmpNum, op->gtType);
15031 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
15032 ret->gtFlags |= GTF_DONT_CSE;
15034 assert(IsMultiRegReturnedType(hClass));
15036 // Mark the var so that fields are not promoted and stay together.
15037 lvaTable[tmpNum].lvIsMultiRegRet = true;
15041 #endif // FEATURE_MULTIREG_RET
15043 // do import for a return
15044 // returns false if inlining was aborted
15045 // opcode can be ret or call in the case of a tail.call
15046 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
15048 if (tiVerificationNeeded)
15050 verVerifyThisPtrInitialised();
15052 unsigned expectedStack = 0;
15053 if (info.compRetType != TYP_VOID)
15055 typeInfo tiVal = impStackTop().seTypeInfo;
15056 typeInfo tiDeclared =
15057 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
15059 Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
15061 Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
15064 Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
15067 GenTree* op2 = nullptr;
15068 GenTree* op1 = nullptr;
15069 CORINFO_CLASS_HANDLE retClsHnd = nullptr;
15071 if (info.compRetType != TYP_VOID)
15073 StackEntry se = impPopStack(retClsHnd);
15076 if (!compIsForInlining())
15078 impBashVarAddrsToI(op2);
15079 op2 = impImplicitIorI4Cast(op2, info.compRetType);
15080 op2 = impImplicitR4orR8Cast(op2, info.compRetType);
15081 assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
15082 ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
15083 ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
15084 (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
15085 (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
15088 if (opts.compGcChecks && info.compRetType == TYP_REF)
15090 // DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path
15091 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
15094 assert(op2->gtType == TYP_REF);
15096 // confirm that the argument is a GC pointer (for debugging (GC stress))
15097 GenTreeArgList* args = gtNewArgList(op2);
15098 op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, 0, args);
15102 printf("\ncompGcChecks tree:\n");
15110 // inlinee's stack should be empty now.
15111 assert(verCurrentState.esStackDepth == 0);
15116 printf("\n\n Inlinee Return expression (before normalization) =>\n");
15121 // Make sure the type matches the original call.
15123 var_types returnType = genActualType(op2->gtType);
15124 var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
15125 if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
15127 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
15130 if (returnType != originalCallType)
15132 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
15136 // Below, we are going to set impInlineInfo->retExpr to the tree with the return
15137 // expression. At this point, retExpr could already be set if there are multiple
15138 // return blocks (meaning lvaInlineeReturnSpillTemp != BAD_VAR_NUM) and one of
15139 // the other blocks already set it. If there is only a single return block,
15140 // retExpr shouldn't be set. However, this is not true if we reimport a block
15141 // with a return. In that case, retExpr will be set, then the block will be
15142 // reimported, but retExpr won't get cleared as part of setting the block to
15143 // be reimported. The reimported retExpr value should be the same, so even if
15144 // we don't unconditionally overwrite it, it shouldn't matter.
15145 if (info.compRetNativeType != TYP_STRUCT)
15147 // compRetNativeType is not TYP_STRUCT.
15148 // This implies it could be either a scalar type or SIMD vector type or
15149 // a struct type that can be normalized to a scalar type.
15151 if (varTypeIsStruct(info.compRetType))
15153 noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
15154 // adjust the type away from struct to integral
15155 // and no normalizing
15156 op2 = impFixupStructReturnType(op2, retClsHnd);
15160 // Do we have to normalize?
15161 var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
15162 if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
15163 fgCastNeeded(op2, fncRealRetType))
15165 // Small-typed return values are normalized by the callee
15166 op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
15170 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15172 assert(info.compRetNativeType != TYP_VOID &&
15173 (fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals));
15175 // This is a bit of a workaround...
15176 // If we are inlining a call that returns a struct, where the actual "native" return type is
15177 // not a struct (for example, the struct is composed of exactly one int, and the native
15178 // return type is thus an int), and the inlinee has multiple return blocks (thus,
15179 // lvaInlineeReturnSpillTemp is != BAD_VAR_NUM, and is the index of a local var that is set
15180 // to the *native* return type), and at least one of the return blocks is the result of
15181 // a call, then we have a problem. The situation is like this (from a failed test case):
15184 // // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15185 // call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15186 // plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15190 // ldobj !!T // this gets bashed to a GT_LCL_FLD, type TYP_INT
15193 // call !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15194 // object&, class System.Func`1<!!0>)
15197 // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15198 // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15199 // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15200 // inlining properly by leaving the correct type on the GT_CALL node through importing.
15202 // To fix this, for this case, we temporarily change the GT_CALL node type to the
15203 // native return type, which is what it will be set to eventually. We generate the
15204 // assignment to the return temp, using the correct type, and then restore the GT_CALL
15205 // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15207 bool restoreType = false;
15208 if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15210 noway_assert(op2->TypeGet() == TYP_STRUCT);
15211 op2->gtType = info.compRetNativeType;
15212 restoreType = true;
15215 impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15216 (unsigned)CHECK_SPILL_ALL);
15218 GenTreePtr tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15222 op2->gtType = TYP_STRUCT; // restore it to what it was
15228 if (impInlineInfo->retExpr)
15230 // Some other block(s) have seen the CEE_RET first.
15231 // Better they spilled to the same temp.
15232 assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15233 assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15241 printf("\n\n Inlinee Return expression (after normalization) =>\n");
15246 // Report the return expression
15247 impInlineInfo->retExpr = op2;
15251 // compRetNativeType is TYP_STRUCT.
15252 // This implies that struct return via RetBuf arg or multi-reg struct return
15254 GenTreePtr iciCall = impInlineInfo->iciCall;
15255 assert(iciCall->gtOper == GT_CALL);
15257 // Assign the inlinee return into a spill temp.
15258 // spill temp only exists if there are multiple return points
15259 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15261 // in this case we have to insert multiple struct copies to the temp
15262 // and the retexpr is just the temp.
15263 assert(info.compRetNativeType != TYP_VOID);
15264 assert(fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals);
15266 impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15267 (unsigned)CHECK_SPILL_ALL);
15270 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15271 #if defined(_TARGET_ARM_)
15272 // TODO-ARM64-NYI: HFA
15273 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15274 // next ifdefs could be refactored in a single method with the ifdef inside.
15275 if (IsHfa(retClsHnd))
15277 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15278 #else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15279 ReturnTypeDesc retTypeDesc;
15280 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15281 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15283 if (retRegCount != 0)
15285 // If single eightbyte, the return type would have been normalized and there won't be a temp var.
15286 // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
15288 assert(retRegCount == MAX_RET_REG_COUNT);
15289 // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
15290 CLANG_FORMAT_COMMENT_ANCHOR;
15291 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15293 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15295 if (!impInlineInfo->retExpr)
15297 #if defined(_TARGET_ARM_)
15298 impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
15299 #else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15300 // The inlinee compiler has figured out the type of the temp already. Use it here.
15301 impInlineInfo->retExpr =
15302 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15303 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15308 impInlineInfo->retExpr = op2;
15312 #elif defined(_TARGET_ARM64_)
15313 ReturnTypeDesc retTypeDesc;
15314 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15315 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15317 if (retRegCount != 0)
15319 assert(!iciCall->AsCall()->HasRetBufArg());
15320 assert(retRegCount >= 2);
15321 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15323 if (!impInlineInfo->retExpr)
15325 // The inlinee compiler has figured out the type of the temp already. Use it here.
15326 impInlineInfo->retExpr =
15327 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15332 impInlineInfo->retExpr = op2;
15336 #endif // defined(_TARGET_ARM64_)
15338 assert(iciCall->AsCall()->HasRetBufArg());
15339 GenTreePtr dest = gtCloneExpr(iciCall->gtCall.gtCallArgs->gtOp.gtOp1);
15340 // spill temp only exists if there are multiple return points
15341 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15343 // if this is the first return we have seen set the retExpr
15344 if (!impInlineInfo->retExpr)
15346 impInlineInfo->retExpr =
15347 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
15348 retClsHnd, (unsigned)CHECK_SPILL_ALL);
15353 impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15360 if (compIsForInlining())
15365 if (info.compRetType == TYP_VOID)
15368 op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15370 else if (info.compRetBuffArg != BAD_VAR_NUM)
15372 // Assign value to return buff (first param)
15373 GenTreePtr retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
15375 op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15376 impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15378 // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
15379 CLANG_FORMAT_COMMENT_ANCHOR;
15381 #if defined(_TARGET_AMD64_)
15383 // x64 (System V and Win64) calling convention requires to
15384 // return the implicit return buffer explicitly (in RAX).
15385 // Change the return type to be BYREF.
15386 op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15387 #else // !defined(_TARGET_AMD64_)
15388 // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
15389 // In such case the return value of the function is changed to BYREF.
15390 // If profiler hook is not needed the return type of the function is TYP_VOID.
15391 if (compIsProfilerHookNeeded())
15393 op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15398 op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15400 #endif // !defined(_TARGET_AMD64_)
15402 else if (varTypeIsStruct(info.compRetType))
15404 #if !FEATURE_MULTIREG_RET
15405 // For both ARM architectures the HFA native types are maintained as structs.
15406 // Also on System V AMD64 the multireg structs returns are also left as structs.
15407 noway_assert(info.compRetNativeType != TYP_STRUCT);
15409 op2 = impFixupStructReturnType(op2, retClsHnd);
15411 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
15416 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
15419 // We must have imported a tailcall and jumped to RET
15420 if (prefixFlags & PREFIX_TAILCALL)
15422 #ifndef _TARGET_AMD64_
15424 // This cannot be asserted on Amd64 since we permit the following IL pattern:
15428 assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
15431 opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
15433 // impImportCall() would have already appended TYP_VOID calls
15434 if (info.compRetType == TYP_VOID)
15440 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15442 // Remember at which BC offset the tree was finished
15443 impNoteLastILoffs();
15448 /*****************************************************************************
15449 * Mark the block as unimported.
15450 * Note that the caller is responsible for calling impImportBlockPending(),
15451 * with the appropriate stack-state
15454 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
15457 if (verbose && (block->bbFlags & BBF_IMPORTED))
15459 printf("\nBB%02u will be reimported\n", block->bbNum);
15463 block->bbFlags &= ~BBF_IMPORTED;
15466 /*****************************************************************************
15467 * Mark the successors of the given block as unimported.
15468 * Note that the caller is responsible for calling impImportBlockPending()
15469 * for all the successors, with the appropriate stack-state.
15472 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
15474 for (unsigned i = 0; i < block->NumSucc(); i++)
15476 impReimportMarkBlock(block->GetSucc(i));
15480 /*****************************************************************************
15482 * Filter wrapper to handle only passed in exception code
15486 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
15488 if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
15490 return EXCEPTION_EXECUTE_HANDLER;
15493 return EXCEPTION_CONTINUE_SEARCH;
15496 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
15498 assert(block->hasTryIndex());
15499 assert(!compIsForInlining());
15501 unsigned tryIndex = block->getTryIndex();
15502 EHblkDsc* HBtab = ehGetDsc(tryIndex);
15506 assert(block->bbFlags & BBF_TRY_BEG);
15508 // The Stack must be empty
15510 if (block->bbStkDepth != 0)
15512 BADCODE("Evaluation stack must be empty on entry into a try block");
15516 // Save the stack contents, we'll need to restore it later
15518 SavedStack blockState;
15519 impSaveStackState(&blockState, false);
15521 while (HBtab != nullptr)
15525 // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
15526 // We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
15528 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15530 // We trigger an invalid program exception here unless we have a try/fault region.
15532 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
15535 "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
15539 // Allow a try/fault region to proceed.
15540 assert(HBtab->HasFaultHandler());
15544 /* Recursively process the handler block */
15545 BasicBlock* hndBegBB = HBtab->ebdHndBeg;
15547 // Construct the proper verification stack state
15548 // either empty or one that contains just
15549 // the Exception Object that we are dealing with
15551 verCurrentState.esStackDepth = 0;
15553 if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
15555 CORINFO_CLASS_HANDLE clsHnd;
15557 if (HBtab->HasFilter())
15559 clsHnd = impGetObjectClass();
15563 CORINFO_RESOLVED_TOKEN resolvedToken;
15565 resolvedToken.tokenContext = impTokenLookupContextHandle;
15566 resolvedToken.tokenScope = info.compScopeHnd;
15567 resolvedToken.token = HBtab->ebdTyp;
15568 resolvedToken.tokenType = CORINFO_TOKENKIND_Class;
15569 info.compCompHnd->resolveToken(&resolvedToken);
15571 clsHnd = resolvedToken.hClass;
15574 // push catch arg the stack, spill to a temp if necessary
15575 // Note: can update HBtab->ebdHndBeg!
15576 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd);
15579 // Queue up the handler for importing
15581 impImportBlockPending(hndBegBB);
15583 if (HBtab->HasFilter())
15585 /* @VERIFICATION : Ideally the end of filter state should get
15586 propagated to the catch handler, this is an incompleteness,
15587 but is not a security/compliance issue, since the only
15588 interesting state is the 'thisInit' state.
15591 verCurrentState.esStackDepth = 0;
15593 BasicBlock* filterBB = HBtab->ebdFilter;
15595 // push catch arg the stack, spill to a temp if necessary
15596 // Note: can update HBtab->ebdFilter!
15597 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass());
15599 impImportBlockPending(filterBB);
15602 else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
15604 /* Recursively process the handler block */
15606 verCurrentState.esStackDepth = 0;
15608 // Queue up the fault handler for importing
15610 impImportBlockPending(HBtab->ebdHndBeg);
15613 // Now process our enclosing try index (if any)
15615 tryIndex = HBtab->ebdEnclosingTryIndex;
15616 if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
15622 HBtab = ehGetDsc(tryIndex);
15626 // Restore the stack contents
15627 impRestoreStackState(&blockState);
15630 //***************************************************************
15631 // Import the instructions for the given basic block. Perform
15632 // verification, throwing an exception on failure. Push any successor blocks that are enabled for the first
15633 // time, or whose verification pre-state is changed.
15636 #pragma warning(push)
15637 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
15639 void Compiler::impImportBlock(BasicBlock* block)
15641 // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
15642 // handle them specially. In particular, there is no IL to import for them, but we do need
15643 // to mark them as imported and put their successors on the pending import list.
15644 if (block->bbFlags & BBF_INTERNAL)
15646 JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
15647 block->bbFlags |= BBF_IMPORTED;
15649 for (unsigned i = 0; i < block->NumSucc(); i++)
15651 impImportBlockPending(block->GetSucc(i));
15661 /* Make the block globaly available */
15666 /* Initialize the debug variables */
15667 impCurOpcName = "unknown";
15668 impCurOpcOffs = block->bbCodeOffs;
15671 /* Set the current stack state to the merged result */
15672 verResetCurrentState(block, &verCurrentState);
15674 /* Now walk the code and import the IL into GenTrees */
15676 struct FilterVerificationExceptionsParam
15681 FilterVerificationExceptionsParam param;
15683 param.pThis = this;
15684 param.block = block;
15686 PAL_TRY(FilterVerificationExceptionsParam*, pParam, ¶m)
15688 /* @VERIFICATION : For now, the only state propagation from try
15689 to it's handler is "thisInit" state (stack is empty at start of try).
15690 In general, for state that we track in verification, we need to
15691 model the possibility that an exception might happen at any IL
15692 instruction, so we really need to merge all states that obtain
15693 between IL instructions in a try block into the start states of
15696 However we do not allow the 'this' pointer to be uninitialized when
15697 entering most kinds try regions (only try/fault are allowed to have
15698 an uninitialized this pointer on entry to the try)
15700 Fortunately, the stack is thrown away when an exception
15701 leads to a handler, so we don't have to worry about that.
15702 We DO, however, have to worry about the "thisInit" state.
15703 But only for the try/fault case.
15705 The only allowed transition is from TIS_Uninit to TIS_Init.
15707 So for a try/fault region for the fault handler block
15708 we will merge the start state of the try begin
15709 and the post-state of each block that is part of this try region
15712 // merge the start state of the try begin
15714 if (pParam->block->bbFlags & BBF_TRY_BEG)
15716 pParam->pThis->impVerifyEHBlock(pParam->block, true);
15719 pParam->pThis->impImportBlockCode(pParam->block);
15721 // As discussed above:
15722 // merge the post-state of each block that is part of this try region
15724 if (pParam->block->hasTryIndex())
15726 pParam->pThis->impVerifyEHBlock(pParam->block, false);
15729 PAL_EXCEPT_FILTER(FilterVerificationExceptions)
15731 verHandleVerificationFailure(block DEBUGARG(false));
15735 if (compDonotInline())
15740 assert(!compDonotInline());
15742 markImport = false;
15746 unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks
15747 bool reimportSpillClique = false;
15748 BasicBlock* tgtBlock = nullptr;
15750 /* If the stack is non-empty, we might have to spill its contents */
15752 if (verCurrentState.esStackDepth != 0)
15754 impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
15755 // on the stack, its lifetime is hard to determine, simply
15756 // don't reuse such temps.
15758 GenTreePtr addStmt = nullptr;
15760 /* Do the successors of 'block' have any other predecessors ?
15761 We do not want to do some of the optimizations related to multiRef
15762 if we can reimport blocks */
15764 unsigned multRef = impCanReimport ? unsigned(~0) : 0;
15766 switch (block->bbJumpKind)
15770 /* Temporarily remove the 'jtrue' from the end of the tree list */
15772 assert(impTreeLast);
15773 assert(impTreeLast->gtOper == GT_STMT);
15774 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
15776 addStmt = impTreeLast;
15777 impTreeLast = impTreeLast->gtPrev;
15779 /* Note if the next block has more than one ancestor */
15781 multRef |= block->bbNext->bbRefs;
15783 /* Does the next block have temps assigned? */
15785 baseTmp = block->bbNext->bbStkTempsIn;
15786 tgtBlock = block->bbNext;
15788 if (baseTmp != NO_BASE_TMP)
15793 /* Try the target of the jump then */
15795 multRef |= block->bbJumpDest->bbRefs;
15796 baseTmp = block->bbJumpDest->bbStkTempsIn;
15797 tgtBlock = block->bbJumpDest;
15801 multRef |= block->bbJumpDest->bbRefs;
15802 baseTmp = block->bbJumpDest->bbStkTempsIn;
15803 tgtBlock = block->bbJumpDest;
15807 multRef |= block->bbNext->bbRefs;
15808 baseTmp = block->bbNext->bbStkTempsIn;
15809 tgtBlock = block->bbNext;
15814 BasicBlock** jmpTab;
15817 /* Temporarily remove the GT_SWITCH from the end of the tree list */
15819 assert(impTreeLast);
15820 assert(impTreeLast->gtOper == GT_STMT);
15821 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
15823 addStmt = impTreeLast;
15824 impTreeLast = impTreeLast->gtPrev;
15826 jmpCnt = block->bbJumpSwt->bbsCount;
15827 jmpTab = block->bbJumpSwt->bbsDstTab;
15831 tgtBlock = (*jmpTab);
15833 multRef |= tgtBlock->bbRefs;
15835 // Thanks to spill cliques, we should have assigned all or none
15836 assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
15837 baseTmp = tgtBlock->bbStkTempsIn;
15842 } while (++jmpTab, --jmpCnt);
15846 case BBJ_CALLFINALLY:
15847 case BBJ_EHCATCHRET:
15849 case BBJ_EHFINALLYRET:
15850 case BBJ_EHFILTERRET:
15852 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
15856 noway_assert(!"Unexpected bbJumpKind");
15860 assert(multRef >= 1);
15862 /* Do we have a base temp number? */
15864 bool newTemps = (baseTmp == NO_BASE_TMP);
15868 /* Grab enough temps for the whole stack */
15869 baseTmp = impGetSpillTmpBase(block);
15872 /* Spill all stack entries into temps */
15873 unsigned level, tempNum;
15875 JITDUMP("\nSpilling stack entries into temps\n");
15876 for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
15878 GenTreePtr tree = verCurrentState.esStack[level].val;
15880 /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
15881 the other. This should merge to a byref in unverifiable code.
15882 However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
15883 successor would be imported assuming there was a TYP_I_IMPL on
15884 the stack. Thus the value would not get GC-tracked. Hence,
15885 change the temp to TYP_BYREF and reimport the successors.
15886 Note: We should only allow this in unverifiable code.
15888 if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
15890 lvaTable[tempNum].lvType = TYP_BYREF;
15891 impReimportMarkSuccessors(block);
15895 #ifdef _TARGET_64BIT_
15896 if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
15898 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
15899 (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
15901 // Merge the current state into the entry state of block;
15902 // the call to verMergeEntryStates must have changed
15903 // the entry state of the block by merging the int local var
15904 // and the native-int stack entry.
15905 bool changed = false;
15906 if (verMergeEntryStates(tgtBlock, &changed))
15908 impRetypeEntryStateTemps(tgtBlock);
15909 impReimportBlockPending(tgtBlock);
15914 tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
15919 // Some other block in the spill clique set this to "int", but now we have "native int".
15920 // Change the type and go back to re-import any blocks that used the wrong type.
15921 lvaTable[tempNum].lvType = TYP_I_IMPL;
15922 reimportSpillClique = true;
15924 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
15926 // Spill clique has decided this should be "native int", but this block only pushes an "int".
15927 // Insert a sign-extension to "native int" so we match the clique.
15928 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15931 // Consider the case where one branch left a 'byref' on the stack and the other leaves
15932 // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
15933 // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
15934 // behavior instead of asserting and then generating bad code (where we save/restore the
15935 // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
15936 // imported already, we need to change the type of the local and reimport the spill clique.
15937 // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
15938 // the 'byref' size.
15939 if (!tiVerificationNeeded)
15941 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
15943 // Some other block in the spill clique set this to "int", but now we have "byref".
15944 // Change the type and go back to re-import any blocks that used the wrong type.
15945 lvaTable[tempNum].lvType = TYP_BYREF;
15946 reimportSpillClique = true;
15948 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
15950 // Spill clique has decided this should be "byref", but this block only pushes an "int".
15951 // Insert a sign-extension to "native int" so we match the clique size.
15952 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15955 #endif // _TARGET_64BIT_
15957 #if FEATURE_X87_DOUBLES
15958 // X87 stack doesn't differentiate between float/double
15959 // so promoting is no big deal.
15960 // For everybody else keep it as float until we have a collision and then promote
15961 // Just like for x64's TYP_INT<->TYP_I_IMPL
15963 if (multRef > 1 && tree->gtType == TYP_FLOAT)
15965 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15968 #else // !FEATURE_X87_DOUBLES
15970 if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
15972 // Some other block in the spill clique set this to "float", but now we have "double".
15973 // Change the type and go back to re-import any blocks that used the wrong type.
15974 lvaTable[tempNum].lvType = TYP_DOUBLE;
15975 reimportSpillClique = true;
15977 else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
15979 // Spill clique has decided this should be "double", but this block only pushes a "float".
15980 // Insert a cast to "double" so we match the clique.
15981 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15984 #endif // FEATURE_X87_DOUBLES
15986 /* If addStmt has a reference to tempNum (can only happen if we
15987 are spilling to the temps already used by a previous block),
15988 we need to spill addStmt */
15990 if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
15992 GenTreePtr addTree = addStmt->gtStmt.gtStmtExpr;
15994 if (addTree->gtOper == GT_JTRUE)
15996 GenTreePtr relOp = addTree->gtOp.gtOp1;
15997 assert(relOp->OperIsCompare());
15999 var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
16001 if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
16003 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
16004 impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
16005 type = genActualType(lvaTable[temp].TypeGet());
16006 relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
16009 if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
16011 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
16012 impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
16013 type = genActualType(lvaTable[temp].TypeGet());
16014 relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
16019 assert(addTree->gtOper == GT_SWITCH && genActualType(addTree->gtOp.gtOp1->gtType) == TYP_I_IMPL);
16021 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
16022 impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
16023 addTree->gtOp.gtOp1 = gtNewLclvNode(temp, TYP_I_IMPL);
16027 /* Spill the stack entry, and replace with the temp */
16029 if (!impSpillStackEntry(level, tempNum
16032 true, "Spill Stack Entry"
16038 BADCODE("bad stack state");
16041 // Oops. Something went wrong when spilling. Bad code.
16042 verHandleVerificationFailure(block DEBUGARG(true));
16048 /* Put back the 'jtrue'/'switch' if we removed it earlier */
16052 impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
16056 // Some of the append/spill logic works on compCurBB
16058 assert(compCurBB == block);
16060 /* Save the tree list in the block */
16061 impEndTreeList(block);
16063 // impEndTreeList sets BBF_IMPORTED on the block
16064 // We do *NOT* want to set it later than this because
16065 // impReimportSpillClique might clear it if this block is both a
16066 // predecessor and successor in the current spill clique
16067 assert(block->bbFlags & BBF_IMPORTED);
16069 // If we had a int/native int, or float/double collision, we need to re-import
16070 if (reimportSpillClique)
16072 // This will re-import all the successors of block (as well as each of their predecessors)
16073 impReimportSpillClique(block);
16075 // For blocks that haven't been imported yet, we still need to mark them as pending import.
16076 for (unsigned i = 0; i < block->NumSucc(); i++)
16078 BasicBlock* succ = block->GetSucc(i);
16079 if ((succ->bbFlags & BBF_IMPORTED) == 0)
16081 impImportBlockPending(succ);
16085 else // the normal case
16087 // otherwise just import the successors of block
16089 /* Does this block jump to any other blocks? */
16090 for (unsigned i = 0; i < block->NumSucc(); i++)
16092 impImportBlockPending(block->GetSucc(i));
16097 #pragma warning(pop)
16100 /*****************************************************************************/
16102 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16103 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16104 // impPendingBlockMembers). Merges the current verification state into the verification state of "block"
16105 // (its "pre-state").
16107 void Compiler::impImportBlockPending(BasicBlock* block)
16112 printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
16116 // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
16117 // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
16118 // (When we're doing verification, we always attempt the merge to detect verification errors.)
16120 // If the block has not been imported, add to pending set.
16121 bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
16123 // Initialize bbEntryState just the first time we try to add this block to the pending list
16124 // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
16125 // We use NULL to indicate the 'common' state to avoid memory allocation
16126 if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
16127 (impGetPendingBlockMember(block) == 0))
16129 verInitBBEntryState(block, &verCurrentState);
16130 assert(block->bbStkDepth == 0);
16131 block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
16132 assert(addToPending);
16133 assert(impGetPendingBlockMember(block) == 0);
16137 // The stack should have the same height on entry to the block from all its predecessors.
16138 if (block->bbStkDepth != verCurrentState.esStackDepth)
16142 sprintf_s(buffer, sizeof(buffer),
16143 "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
16144 "Previous depth was %d, current depth is %d",
16145 block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
16146 verCurrentState.esStackDepth);
16147 buffer[400 - 1] = 0;
16150 NO_WAY("Block entered with different stack depths");
16154 // Additionally, if we need to verify, merge the verification state.
16155 if (tiVerificationNeeded)
16157 // Merge the current state into the entry state of block; if this does not change the entry state
16158 // by merging, do not add the block to the pending-list.
16159 bool changed = false;
16160 if (!verMergeEntryStates(block, &changed))
16162 block->bbFlags |= BBF_FAILED_VERIFICATION;
16163 addToPending = true; // We will pop it off, and check the flag set above.
16167 addToPending = true;
16169 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
16178 if (block->bbStkDepth > 0)
16180 // We need to fix the types of any spill temps that might have changed:
16181 // int->native int, float->double, int->byref, etc.
16182 impRetypeEntryStateTemps(block);
16185 // OK, we must add to the pending list, if it's not already in it.
16186 if (impGetPendingBlockMember(block) != 0)
16192 // Get an entry to add to the pending list
16196 if (impPendingFree)
16198 // We can reuse one of the freed up dscs.
16199 dsc = impPendingFree;
16200 impPendingFree = dsc->pdNext;
16204 // We have to create a new dsc
16205 dsc = new (this, CMK_Unknown) PendingDsc;
16209 dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16210 dsc->pdThisPtrInit = verCurrentState.thisInitialized;
16212 // Save the stack trees for later
16214 if (verCurrentState.esStackDepth)
16216 impSaveStackState(&dsc->pdSavedStack, false);
16219 // Add the entry to the pending list
16221 dsc->pdNext = impPendingList;
16222 impPendingList = dsc;
16223 impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16225 // Various assertions require us to now to consider the block as not imported (at least for
16226 // the final time...)
16227 block->bbFlags &= ~BBF_IMPORTED;
16232 printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16237 /*****************************************************************************/
16239 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16240 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16241 // impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block.
16243 void Compiler::impReimportBlockPending(BasicBlock* block)
16245 JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16247 assert(block->bbFlags & BBF_IMPORTED);
16249 // OK, we must add to the pending list, if it's not already in it.
16250 if (impGetPendingBlockMember(block) != 0)
16255 // Get an entry to add to the pending list
16259 if (impPendingFree)
16261 // We can reuse one of the freed up dscs.
16262 dsc = impPendingFree;
16263 impPendingFree = dsc->pdNext;
16267 // We have to create a new dsc
16268 dsc = new (this, CMK_ImpStack) PendingDsc;
16273 if (block->bbEntryState)
16275 dsc->pdThisPtrInit = block->bbEntryState->thisInitialized;
16276 dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
16277 dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
16281 dsc->pdThisPtrInit = TIS_Bottom;
16282 dsc->pdSavedStack.ssDepth = 0;
16283 dsc->pdSavedStack.ssTrees = nullptr;
16286 // Add the entry to the pending list
16288 dsc->pdNext = impPendingList;
16289 impPendingList = dsc;
16290 impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16292 // Various assertions require us to now to consider the block as not imported (at least for
16293 // the final time...)
16294 block->bbFlags &= ~BBF_IMPORTED;
16299 printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16304 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
16306 if (comp->impBlockListNodeFreeList == nullptr)
16308 return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
16312 BlockListNode* res = comp->impBlockListNodeFreeList;
16313 comp->impBlockListNodeFreeList = res->m_next;
16318 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
16320 node->m_next = impBlockListNodeFreeList;
16321 impBlockListNodeFreeList = node;
16324 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
16328 noway_assert(!fgComputePredsDone);
16329 if (!fgCheapPredsValid)
16331 fgComputeCheapPreds();
16334 BlockListNode* succCliqueToDo = nullptr;
16335 BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
16339 // Look at the successors of every member of the predecessor to-do list.
16340 while (predCliqueToDo != nullptr)
16342 BlockListNode* node = predCliqueToDo;
16343 predCliqueToDo = node->m_next;
16344 BasicBlock* blk = node->m_blk;
16345 FreeBlockListNode(node);
16347 for (unsigned succNum = 0; succNum < blk->NumSucc(); succNum++)
16349 BasicBlock* succ = blk->GetSucc(succNum);
16350 // If it's not already in the clique, add it, and also add it
16351 // as a member of the successor "toDo" set.
16352 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
16354 callback->Visit(SpillCliqueSucc, succ);
16355 impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
16356 succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
16361 // Look at the predecessors of every member of the successor to-do list.
16362 while (succCliqueToDo != nullptr)
16364 BlockListNode* node = succCliqueToDo;
16365 succCliqueToDo = node->m_next;
16366 BasicBlock* blk = node->m_blk;
16367 FreeBlockListNode(node);
16369 for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
16371 BasicBlock* predBlock = pred->block;
16372 // If it's not already in the clique, add it, and also add it
16373 // as a member of the predecessor "toDo" set.
16374 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
16376 callback->Visit(SpillCliquePred, predBlock);
16377 impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
16378 predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
16385 // If this fails, it means we didn't walk the spill clique properly and somehow managed
16386 // miss walking back to include the predecessor we started from.
16387 // This most likely cause: missing or out of date bbPreds
16388 assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
16391 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16393 if (predOrSucc == SpillCliqueSucc)
16395 assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
16396 blk->bbStkTempsIn = m_baseTmp;
16400 assert(predOrSucc == SpillCliquePred);
16401 assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
16402 blk->bbStkTempsOut = m_baseTmp;
16406 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16408 // For Preds we could be a little smarter and just find the existing store
16409 // and re-type it/add a cast, but that is complicated and hopefully very rare, so
16410 // just re-import the whole block (just like we do for successors)
16412 if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
16414 // If we haven't imported this block and we're not going to (because it isn't on
16415 // the pending list) then just ignore it for now.
16417 // This block has either never been imported (EntryState == NULL) or it failed
16418 // verification. Neither state requires us to force it to be imported now.
16419 assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
16423 // For successors we have a valid verCurrentState, so just mark them for reimport
16424 // the 'normal' way
16425 // Unlike predecessors, we *DO* need to reimport the current block because the
16426 // initial import had the wrong entry state types.
16427 // Similarly, blocks that are currently on the pending list, still need to call
16428 // impImportBlockPending to fixup their entry state.
16429 if (predOrSucc == SpillCliqueSucc)
16431 m_pComp->impReimportMarkBlock(blk);
16433 // Set the current stack state to that of the blk->bbEntryState
16434 m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
16435 assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
16437 m_pComp->impImportBlockPending(blk);
16439 else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
16441 // As described above, we are only visiting predecessors so they can
16442 // add the appropriate casts, since we have already done that for the current
16443 // block, it does not need to be reimported.
16444 // Nor do we need to reimport blocks that are still pending, but not yet
16447 // For predecessors, we have no state to seed the EntryState, so we just have
16448 // to assume the existing one is correct.
16449 // If the block is also a successor, it will get the EntryState properly
16450 // updated when it is visited as a successor in the above "if" block.
16451 assert(predOrSucc == SpillCliquePred);
16452 m_pComp->impReimportBlockPending(blk);
16456 // Re-type the incoming lclVar nodes to match the varDsc.
16457 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
16459 if (blk->bbEntryState != nullptr)
16461 EntryState* es = blk->bbEntryState;
16462 for (unsigned level = 0; level < es->esStackDepth; level++)
16464 GenTreePtr tree = es->esStack[level].val;
16465 if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
16467 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
16468 noway_assert(lclNum < lvaCount);
16469 LclVarDsc* varDsc = lvaTable + lclNum;
16470 es->esStack[level].val->gtType = varDsc->TypeGet();
16476 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
16478 if (block->bbStkTempsOut != NO_BASE_TMP)
16480 return block->bbStkTempsOut;
16486 printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
16490 // Otherwise, choose one, and propagate to all members of the spill clique.
16491 // Grab enough temps for the whole stack.
16492 unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
16493 SetSpillTempsBase callback(baseTmp);
16495 // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
16496 // to one spill clique, and similarly can only be the sucessor to one spill clique
16497 impWalkSpillCliqueFromPred(block, &callback);
16502 void Compiler::impReimportSpillClique(BasicBlock* block)
16507 printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
16511 // If we get here, it is because this block is already part of a spill clique
16512 // and one predecessor had an outgoing live stack slot of type int, and this
16513 // block has an outgoing live stack slot of type native int.
16514 // We need to reset these before traversal because they have already been set
16515 // by the previous walk to determine all the members of the spill clique.
16516 impInlineRoot()->impSpillCliquePredMembers.Reset();
16517 impInlineRoot()->impSpillCliqueSuccMembers.Reset();
16519 ReimportSpillClique callback(this);
16521 impWalkSpillCliqueFromPred(block, &callback);
16524 // Set the pre-state of "block" (which should not have a pre-state allocated) to
16525 // a copy of "srcState", cloning tree pointers as required.
16526 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
16528 if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
16530 block->bbEntryState = nullptr;
16534 block->bbEntryState = (EntryState*)compGetMemA(sizeof(EntryState));
16536 // block->bbEntryState.esRefcount = 1;
16538 block->bbEntryState->esStackDepth = srcState->esStackDepth;
16539 block->bbEntryState->thisInitialized = TIS_Bottom;
16541 if (srcState->esStackDepth > 0)
16543 block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
16544 unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
16546 memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
16547 for (unsigned level = 0; level < srcState->esStackDepth; level++)
16549 GenTreePtr tree = srcState->esStack[level].val;
16550 block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
16554 if (verTrackObjCtorInitState)
16556 verSetThisInit(block, srcState->thisInitialized);
16562 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
16564 assert(tis != TIS_Bottom); // Precondition.
16565 if (block->bbEntryState == nullptr)
16567 block->bbEntryState = new (this, CMK_Unknown) EntryState();
16570 block->bbEntryState->thisInitialized = tis;
16574 * Resets the current state to the state at the start of the basic block
16576 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
16579 if (block->bbEntryState == nullptr)
16581 destState->esStackDepth = 0;
16582 destState->thisInitialized = TIS_Bottom;
16586 destState->esStackDepth = block->bbEntryState->esStackDepth;
16588 if (destState->esStackDepth > 0)
16590 unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
16592 memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
16595 destState->thisInitialized = block->bbThisOnEntry();
16600 ThisInitState BasicBlock::bbThisOnEntry()
16602 return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
16605 unsigned BasicBlock::bbStackDepthOnEntry()
16607 return (bbEntryState ? bbEntryState->esStackDepth : 0);
16610 void BasicBlock::bbSetStack(void* stackBuffer)
16612 assert(bbEntryState);
16613 assert(stackBuffer);
16614 bbEntryState->esStack = (StackEntry*)stackBuffer;
16617 StackEntry* BasicBlock::bbStackOnEntry()
16619 assert(bbEntryState);
16620 return bbEntryState->esStack;
16623 void Compiler::verInitCurrentState()
16625 verTrackObjCtorInitState = FALSE;
16626 verCurrentState.thisInitialized = TIS_Bottom;
16628 if (tiVerificationNeeded)
16630 // Track this ptr initialization
16631 if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
16633 verTrackObjCtorInitState = TRUE;
16634 verCurrentState.thisInitialized = TIS_Uninit;
16638 // initialize stack info
16640 verCurrentState.esStackDepth = 0;
16641 assert(verCurrentState.esStack != nullptr);
16643 // copy current state to entry state of first BB
16644 verInitBBEntryState(fgFirstBB, &verCurrentState);
16647 Compiler* Compiler::impInlineRoot()
16649 if (impInlineInfo == nullptr)
16655 return impInlineInfo->InlineRoot;
16659 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
16661 if (predOrSucc == SpillCliquePred)
16663 return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
16667 assert(predOrSucc == SpillCliqueSucc);
16668 return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
16672 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
16674 if (predOrSucc == SpillCliquePred)
16676 impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
16680 assert(predOrSucc == SpillCliqueSucc);
16681 impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
16685 /*****************************************************************************
16687 * Convert the instrs ("import") into our internal format (trees). The
16688 * basic flowgraph has already been constructed and is passed in.
16691 void Compiler::impImport(BasicBlock* method)
16696 printf("*************** In impImport() for %s\n", info.compFullName);
16700 /* Allocate the stack contents */
16702 if (info.compMaxStack <= sizeof(impSmallStack) / sizeof(impSmallStack[0]))
16704 /* Use local variable, don't waste time allocating on the heap */
16706 impStkSize = sizeof(impSmallStack) / sizeof(impSmallStack[0]);
16707 verCurrentState.esStack = impSmallStack;
16711 impStkSize = info.compMaxStack;
16712 verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
16715 // initialize the entry state at start of method
16716 verInitCurrentState();
16718 // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
16719 Compiler* inlineRoot = impInlineRoot();
16720 if (this == inlineRoot) // These are only used on the root of the inlining tree.
16722 // We have initialized these previously, but to size 0. Make them larger.
16723 impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
16724 impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
16725 impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
16727 inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
16728 inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
16729 inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
16730 impBlockListNodeFreeList = nullptr;
16733 impLastILoffsStmt = nullptr;
16734 impNestedStackSpill = false;
16736 impBoxTemp = BAD_VAR_NUM;
16738 impPendingList = impPendingFree = nullptr;
16740 /* Add the entry-point to the worker-list */
16742 // Skip leading internal blocks. There can be one as a leading scratch BB, and more
16743 // from EH normalization.
16744 // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
16746 for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
16748 // Treat these as imported.
16749 assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
16750 JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
16751 method->bbFlags |= BBF_IMPORTED;
16754 impImportBlockPending(method);
16756 /* Import blocks in the worker-list until there are no more */
16758 while (impPendingList)
16760 /* Remove the entry at the front of the list */
16762 PendingDsc* dsc = impPendingList;
16763 impPendingList = impPendingList->pdNext;
16764 impSetPendingBlockMember(dsc->pdBB, 0);
16766 /* Restore the stack state */
16768 verCurrentState.thisInitialized = dsc->pdThisPtrInit;
16769 verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth;
16770 if (verCurrentState.esStackDepth)
16772 impRestoreStackState(&dsc->pdSavedStack);
16775 /* Add the entry to the free list for reuse */
16777 dsc->pdNext = impPendingFree;
16778 impPendingFree = dsc;
16780 /* Now import the block */
16782 if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
16785 #ifdef _TARGET_64BIT_
16786 // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
16787 // coupled with the JIT64 IL Verification logic. Look inside verHandleVerificationFailure
16788 // method for further explanation on why we raise this exception instead of making the jitted
16789 // code throw the verification exception during execution.
16790 if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
16792 BADCODE("Basic block marked as not verifiable");
16795 #endif // _TARGET_64BIT_
16797 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
16798 impEndTreeList(dsc->pdBB);
16803 impImportBlock(dsc->pdBB);
16805 if (compDonotInline())
16809 if (compIsForImportOnly() && !tiVerificationNeeded)
16817 if (verbose && info.compXcptnsCount)
16819 printf("\nAfter impImport() added block for try,catch,finally");
16820 fgDispBasicBlocks();
16824 // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
16825 for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
16827 block->bbFlags &= ~BBF_VISITED;
16831 assert(!compIsForInlining() || !tiVerificationNeeded);
16834 // Checks if a typeinfo (usually stored in the type stack) is a struct.
16835 // The invariant here is that if it's not a ref or a method and has a class handle
16836 // it's a valuetype
16837 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
16839 if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
16849 /*****************************************************************************
16850 * Check to see if the tree is the address of a local or
16851 the address of a field in a local.
16853 *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
16857 BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr* lclVarTreeOut)
16859 if (tree->gtOper != GT_ADDR)
16864 GenTreePtr op = tree->gtOp.gtOp1;
16865 while (op->gtOper == GT_FIELD)
16867 op = op->gtField.gtFldObj;
16868 if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
16870 op = op->gtOp.gtOp1;
16878 if (op->gtOper == GT_LCL_VAR)
16880 *lclVarTreeOut = op;
16889 //------------------------------------------------------------------------
16890 // impMakeDiscretionaryInlineObservations: make observations that help
16891 // determine the profitability of a discretionary inline
16894 // pInlineInfo -- InlineInfo for the inline, or null for the prejit root
16895 // inlineResult -- InlineResult accumulating information about this inline
16898 // If inlining or prejitting the root, this method also makes
16899 // various observations about the method that factor into inline
16900 // decisions. It sets `compNativeSizeEstimate` as a side effect.
16902 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
16904 assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
16905 pInlineInfo == nullptr && !compIsForInlining() // Calculate the static inlining hint for ngen.
16908 // If we're really inlining, we should just have one result in play.
16909 assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
16911 // If this is a "forceinline" method, the JIT probably shouldn't have gone
16912 // to the trouble of estimating the native code size. Even if it did, it
16913 // shouldn't be relying on the result of this method.
16914 assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
16916 // Note if the caller contains NEWOBJ or NEWARR.
16917 Compiler* rootCompiler = impInlineRoot();
16919 if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
16921 inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
16924 if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
16926 inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
16929 bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0;
16930 bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
16932 if (isSpecialMethod)
16934 if (calleeIsStatic)
16936 inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
16940 inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
16943 else if (!calleeIsStatic)
16945 // Callee is an instance method.
16947 // Check if the callee has the same 'this' as the root.
16948 if (pInlineInfo != nullptr)
16950 GenTreePtr thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
16952 bool isSameThis = impIsThis(thisArg);
16953 inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
16957 // Note if the callee's class is a promotable struct
16958 if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
16960 lvaStructPromotionInfo structPromotionInfo;
16961 lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
16962 if (structPromotionInfo.canPromote)
16964 inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
16968 #ifdef FEATURE_SIMD
16970 // Note if this method is has SIMD args or return value
16971 if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
16973 inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
16976 #endif // FEATURE_SIMD
16978 // Roughly classify callsite frequency.
16979 InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
16981 // If this is a prejit root, or a maximally hot block...
16982 if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
16984 frequency = InlineCallsiteFrequency::HOT;
16986 // No training data. Look for loop-like things.
16987 // We consider a recursive call loop-like. Do not give the inlining boost to the method itself.
16988 // However, give it to things nearby.
16989 else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
16990 (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
16992 frequency = InlineCallsiteFrequency::LOOP;
16994 else if ((pInlineInfo->iciBlock->bbFlags & BBF_PROF_WEIGHT) && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
16996 frequency = InlineCallsiteFrequency::WARM;
16998 // Now modify the multiplier based on where we're called from.
16999 else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
17001 frequency = InlineCallsiteFrequency::RARE;
17005 frequency = InlineCallsiteFrequency::BORING;
17008 // Also capture the block weight of the call site. In the prejit
17009 // root case, assume there's some hot call site for this method.
17010 unsigned weight = 0;
17012 if (pInlineInfo != nullptr)
17014 weight = pInlineInfo->iciBlock->bbWeight;
17018 weight = BB_MAX_WEIGHT;
17021 inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
17022 inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
17025 /*****************************************************************************
17026 This method makes STATIC inlining decision based on the IL code.
17027 It should not make any inlining decision based on the context.
17028 If forceInline is true, then the inlining decision should not depend on
17029 performance heuristics (code size, etc.).
17032 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
17033 CORINFO_METHOD_INFO* methInfo,
17035 InlineResult* inlineResult)
17037 unsigned codeSize = methInfo->ILCodeSize;
17039 // We shouldn't have made up our minds yet...
17040 assert(!inlineResult->IsDecided());
17042 if (methInfo->EHcount)
17044 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
17048 if ((methInfo->ILCode == nullptr) || (codeSize == 0))
17050 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
17054 // For now we don't inline varargs (import code can't handle it)
17056 if (methInfo->args.isVarArg())
17058 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
17062 // Reject if it has too many locals.
17063 // This is currently an implementation limit due to fixed-size arrays in the
17064 // inline info, rather than a performance heuristic.
17066 inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
17068 if (methInfo->locals.numArgs > MAX_INL_LCLS)
17070 inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
17074 // Make sure there aren't too many arguments.
17075 // This is currently an implementation limit due to fixed-size arrays in the
17076 // inline info, rather than a performance heuristic.
17078 inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
17080 if (methInfo->args.numArgs > MAX_INL_ARGS)
17082 inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
17086 // Note force inline state
17088 inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
17090 // Note IL code size
17092 inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
17094 if (inlineResult->IsFailure())
17099 // Make sure maxstack is not too big
17101 inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
17103 if (inlineResult->IsFailure())
17109 /*****************************************************************************
17112 void Compiler::impCheckCanInline(GenTreePtr call,
17113 CORINFO_METHOD_HANDLE fncHandle,
17115 CORINFO_CONTEXT_HANDLE exactContextHnd,
17116 InlineCandidateInfo** ppInlineCandidateInfo,
17117 InlineResult* inlineResult)
17119 // Either EE or JIT might throw exceptions below.
17120 // If that happens, just don't inline the method.
17126 CORINFO_METHOD_HANDLE fncHandle;
17128 CORINFO_CONTEXT_HANDLE exactContextHnd;
17129 InlineResult* result;
17130 InlineCandidateInfo** ppInlineCandidateInfo;
17131 } param = {nullptr};
17133 param.pThis = this;
17135 param.fncHandle = fncHandle;
17136 param.methAttr = methAttr;
17137 param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
17138 param.result = inlineResult;
17139 param.ppInlineCandidateInfo = ppInlineCandidateInfo;
17141 bool success = eeRunWithErrorTrap<Param>(
17142 [](Param* pParam) {
17143 DWORD dwRestrictions = 0;
17144 CorInfoInitClassResult initClassResult;
17147 const char* methodName;
17148 const char* className;
17149 methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
17151 if (JitConfig.JitNoInline())
17153 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
17158 /* Try to get the code address/size for the method */
17160 CORINFO_METHOD_INFO methInfo;
17161 if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
17163 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
17168 forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
17170 pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
17172 if (pParam->result->IsFailure())
17174 assert(pParam->result->IsNever());
17178 // Speculatively check if initClass() can be done.
17179 // If it can be done, we will try to inline the method. If inlining
17180 // succeeds, then we will do the non-speculative initClass() and commit it.
17181 // If this speculative call to initClass() fails, there is no point
17182 // trying to inline this method.
17184 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17185 pParam->exactContextHnd /* context */,
17186 TRUE /* speculative */);
17188 if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17190 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17194 // Given the EE the final say in whether to inline or not.
17195 // This should be last since for verifiable code, this can be expensive
17197 /* VM Inline check also ensures that the method is verifiable if needed */
17198 CorInfoInline vmResult;
17199 vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17202 if (vmResult == INLINE_FAIL)
17204 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17206 else if (vmResult == INLINE_NEVER)
17208 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17211 if (pParam->result->IsFailure())
17213 // Make sure not to report this one. It was already reported by the VM.
17214 pParam->result->SetReported();
17218 // check for unsupported inlining restrictions
17219 assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17221 if (dwRestrictions & INLINE_SAME_THIS)
17223 GenTreePtr thisArg = pParam->call->gtCall.gtCallObjp;
17226 if (!pParam->pThis->impIsThis(thisArg))
17228 pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17233 /* Get the method properties */
17235 CORINFO_CLASS_HANDLE clsHandle;
17236 clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17238 clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17240 /* Get the return type */
17242 var_types fncRetType;
17243 fncRetType = pParam->call->TypeGet();
17246 var_types fncRealRetType;
17247 fncRealRetType = JITtype2varType(methInfo.args.retType);
17249 assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17250 // <BUGNUM> VSW 288602 </BUGNUM>
17251 // In case of IJW, we allow to assign a native pointer to a BYREF.
17252 (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17253 (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17257 // Allocate an InlineCandidateInfo structure
17259 InlineCandidateInfo* pInfo;
17260 pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17262 pInfo->dwRestrictions = dwRestrictions;
17263 pInfo->methInfo = methInfo;
17264 pInfo->methAttr = pParam->methAttr;
17265 pInfo->clsHandle = clsHandle;
17266 pInfo->clsAttr = clsAttr;
17267 pInfo->fncRetType = fncRetType;
17268 pInfo->exactContextHnd = pParam->exactContextHnd;
17269 pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd;
17270 pInfo->initClassResult = initClassResult;
17272 *(pParam->ppInlineCandidateInfo) = pInfo;
17279 param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
17283 void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo,
17284 GenTreePtr curArgVal,
17286 InlineResult* inlineResult)
17288 InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
17290 if (curArgVal->gtOper == GT_MKREFANY)
17292 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
17296 inlCurArgInfo->argNode = curArgVal;
17298 GenTreePtr lclVarTree;
17299 if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
17301 inlCurArgInfo->argIsByRefToStructLocal = true;
17302 #ifdef FEATURE_SIMD
17303 if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
17305 pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
17307 #endif // FEATURE_SIMD
17310 if (curArgVal->gtFlags & GTF_ALL_EFFECT)
17312 inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
17313 inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
17316 if (curArgVal->gtOper == GT_LCL_VAR)
17318 inlCurArgInfo->argIsLclVar = true;
17320 /* Remember the "original" argument number */
17321 curArgVal->gtLclVar.gtLclILoffs = argNum;
17324 if ((curArgVal->OperKind() & GTK_CONST) ||
17325 ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
17327 inlCurArgInfo->argIsInvariant = true;
17328 if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
17330 /* Abort, but do not mark as not inlinable */
17331 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
17336 if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
17338 inlCurArgInfo->argHasLdargaOp = true;
17344 if (inlCurArgInfo->argIsThis)
17346 printf("thisArg:");
17350 printf("\nArgument #%u:", argNum);
17352 if (inlCurArgInfo->argIsLclVar)
17354 printf(" is a local var");
17356 if (inlCurArgInfo->argIsInvariant)
17358 printf(" is a constant");
17360 if (inlCurArgInfo->argHasGlobRef)
17362 printf(" has global refs");
17364 if (inlCurArgInfo->argHasSideEff)
17366 printf(" has side effects");
17368 if (inlCurArgInfo->argHasLdargaOp)
17370 printf(" has ldarga effect");
17372 if (inlCurArgInfo->argHasStargOp)
17374 printf(" has starg effect");
17376 if (inlCurArgInfo->argIsByRefToStructLocal)
17378 printf(" is byref to a struct local");
17382 gtDispTree(curArgVal);
17388 /*****************************************************************************
17392 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
17394 assert(!compIsForInlining());
17396 GenTreePtr call = pInlineInfo->iciCall;
17397 CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo;
17398 unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr;
17399 InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo;
17400 InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo;
17401 InlineResult* inlineResult = pInlineInfo->inlineResult;
17403 const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
17405 /* init the argument stuct */
17407 memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
17409 /* Get hold of the 'this' pointer and the argument list proper */
17411 GenTreePtr thisArg = call->gtCall.gtCallObjp;
17412 GenTreePtr argList = call->gtCall.gtCallArgs;
17413 unsigned argCnt = 0; // Count of the arguments
17415 assert((methInfo->args.hasThis()) == (thisArg != nullptr));
17419 inlArgInfo[0].argIsThis = true;
17421 impInlineRecordArgInfo(pInlineInfo, thisArg, argCnt, inlineResult);
17423 if (inlineResult->IsFailure())
17428 /* Increment the argument count */
17432 /* Record some information about each of the arguments */
17433 bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
17435 #if USER_ARGS_COME_LAST
17436 unsigned typeCtxtArg = thisArg ? 1 : 0;
17437 #else // USER_ARGS_COME_LAST
17438 unsigned typeCtxtArg = methInfo->args.totalILArgs();
17439 #endif // USER_ARGS_COME_LAST
17441 for (GenTreePtr argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
17443 if (argTmp == argList && hasRetBuffArg)
17448 // Ignore the type context argument
17449 if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
17451 typeCtxtArg = 0xFFFFFFFF;
17455 assert(argTmp->gtOper == GT_LIST);
17456 GenTreePtr argVal = argTmp->gtOp.gtOp1;
17458 impInlineRecordArgInfo(pInlineInfo, argVal, argCnt, inlineResult);
17460 if (inlineResult->IsFailure())
17465 /* Increment the argument count */
17469 /* Make sure we got the arg number right */
17470 assert(argCnt == methInfo->args.totalILArgs());
17472 #ifdef FEATURE_SIMD
17473 bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
17474 #endif // FEATURE_SIMD
17476 /* We have typeless opcodes, get type information from the signature */
17482 if (clsAttr & CORINFO_FLG_VALUECLASS)
17484 sigType = TYP_BYREF;
17491 lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
17492 lclVarInfo[0].lclHasLdlocaOp = false;
17494 #ifdef FEATURE_SIMD
17495 // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
17496 // the inlining multiplier) for anything in that assembly.
17497 // But we only need to normalize it if it is a TYP_STRUCT
17498 // (which we need to do even if we have already set foundSIMDType).
17499 if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
17501 if (sigType == TYP_STRUCT)
17503 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
17505 foundSIMDType = true;
17507 #endif // FEATURE_SIMD
17508 lclVarInfo[0].lclTypeInfo = sigType;
17510 assert(varTypeIsGC(thisArg->gtType) || // "this" is managed
17511 (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
17512 (clsAttr & CORINFO_FLG_VALUECLASS)));
17514 if (genActualType(thisArg->gtType) != genActualType(sigType))
17516 if (sigType == TYP_REF)
17518 /* The argument cannot be bashed into a ref (see bug 750871) */
17519 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
17523 /* This can only happen with byrefs <-> ints/shorts */
17525 assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
17526 assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
17528 if (sigType == TYP_BYREF)
17530 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17532 else if (thisArg->gtType == TYP_BYREF)
17534 assert(sigType == TYP_I_IMPL);
17536 /* If possible change the BYREF to an int */
17537 if (thisArg->IsVarAddr())
17539 thisArg->gtType = TYP_I_IMPL;
17540 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17544 /* Arguments 'int <- byref' cannot be bashed */
17545 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17552 /* Init the types of the arguments and make sure the types
17553 * from the trees match the types in the signature */
17555 CORINFO_ARG_LIST_HANDLE argLst;
17556 argLst = methInfo->args.args;
17559 for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
17561 var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
17563 lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
17565 #ifdef FEATURE_SIMD
17566 if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
17568 // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
17569 // found a SIMD type, even if this may not be a type we recognize (the assumption is that
17570 // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
17571 foundSIMDType = true;
17572 if (sigType == TYP_STRUCT)
17574 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
17575 sigType = structType;
17578 #endif // FEATURE_SIMD
17580 lclVarInfo[i].lclTypeInfo = sigType;
17581 lclVarInfo[i].lclHasLdlocaOp = false;
17583 /* Does the tree type match the signature type? */
17585 GenTreePtr inlArgNode = inlArgInfo[i].argNode;
17587 if (sigType != inlArgNode->gtType)
17589 /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
17590 but in bad IL cases with caller-callee signature mismatches we can see other types.
17591 Intentionally reject cases with mismatches so the jit is more flexible when
17592 encountering bad IL. */
17594 bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
17595 (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
17596 (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
17598 if (!isPlausibleTypeMatch)
17600 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
17604 /* Is it a narrowing or widening cast?
17605 * Widening casts are ok since the value computed is already
17606 * normalized to an int (on the IL stack) */
17608 if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
17610 if (sigType == TYP_BYREF)
17612 lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17614 else if (inlArgNode->gtType == TYP_BYREF)
17616 assert(varTypeIsIntOrI(sigType));
17618 /* If possible bash the BYREF to an int */
17619 if (inlArgNode->IsVarAddr())
17621 inlArgNode->gtType = TYP_I_IMPL;
17622 lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17626 /* Arguments 'int <- byref' cannot be changed */
17627 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17631 else if (genTypeSize(sigType) < EA_PTRSIZE)
17633 /* Narrowing cast */
17635 if (inlArgNode->gtOper == GT_LCL_VAR &&
17636 !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
17637 sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
17639 /* We don't need to insert a cast here as the variable
17640 was assigned a normalized value of the right type */
17645 inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
17647 inlArgInfo[i].argIsLclVar = false;
17649 /* Try to fold the node in case we have constant arguments */
17651 if (inlArgInfo[i].argIsInvariant)
17653 inlArgNode = gtFoldExprConst(inlArgNode);
17654 inlArgInfo[i].argNode = inlArgNode;
17655 assert(inlArgNode->OperIsConst());
17658 #ifdef _TARGET_64BIT_
17659 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
17661 // This should only happen for int -> native int widening
17662 inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
17664 inlArgInfo[i].argIsLclVar = false;
17666 /* Try to fold the node in case we have constant arguments */
17668 if (inlArgInfo[i].argIsInvariant)
17670 inlArgNode = gtFoldExprConst(inlArgNode);
17671 inlArgInfo[i].argNode = inlArgNode;
17672 assert(inlArgNode->OperIsConst());
17675 #endif // _TARGET_64BIT_
17680 /* Init the types of the local variables */
17682 CORINFO_ARG_LIST_HANDLE localsSig;
17683 localsSig = methInfo->locals.args;
17685 for (i = 0; i < methInfo->locals.numArgs; i++)
17688 var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
17690 lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
17691 lclVarInfo[i + argCnt].lclIsPinned = isPinned;
17692 lclVarInfo[i + argCnt].lclTypeInfo = type;
17696 // Pinned locals may cause inlines to fail.
17697 inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
17698 if (inlineResult->IsFailure())
17704 lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
17706 // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
17707 // out on the inline.
17708 if (type == TYP_STRUCT)
17710 CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
17711 DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
17712 if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
17714 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
17715 if (inlineResult->IsFailure())
17720 // Do further notification in the case where the call site is rare; some policies do
17721 // not track the relative hotness of call sites for "always" inline cases.
17722 if (pInlineInfo->iciBlock->isRunRarely())
17724 inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
17725 if (inlineResult->IsFailure())
17734 localsSig = info.compCompHnd->getArgNext(localsSig);
17736 #ifdef FEATURE_SIMD
17737 if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
17739 foundSIMDType = true;
17740 if (featureSIMD && type == TYP_STRUCT)
17742 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
17743 lclVarInfo[i + argCnt].lclTypeInfo = structType;
17746 #endif // FEATURE_SIMD
17749 #ifdef FEATURE_SIMD
17750 if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
17752 foundSIMDType = true;
17754 pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
17755 #endif // FEATURE_SIMD
17758 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
17760 assert(compIsForInlining());
17762 unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
17764 if (tmpNum == BAD_VAR_NUM)
17766 var_types lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
17768 // The lifetime of this local might span multiple BBs.
17769 // So it is a long lifetime local.
17770 impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
17772 lvaTable[tmpNum].lvType = lclTyp;
17773 if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclHasLdlocaOp)
17775 lvaTable[tmpNum].lvHasLdAddrOp = 1;
17778 if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclIsPinned)
17780 lvaTable[tmpNum].lvPinned = 1;
17782 if (!impInlineInfo->hasPinnedLocals)
17784 // If the inlinee returns a value, use a spill temp
17785 // for the return value to ensure that even in case
17786 // where the return expression refers to one of the
17787 // pinned locals, we can unpin the local right after
17788 // the inlined method body.
17789 if ((info.compRetNativeType != TYP_VOID) && (lvaInlineeReturnSpillTemp == BAD_VAR_NUM))
17791 lvaInlineeReturnSpillTemp =
17792 lvaGrabTemp(false DEBUGARG("Inline candidate pinned local return spill temp"));
17793 lvaTable[lvaInlineeReturnSpillTemp].lvType = info.compRetNativeType;
17797 impInlineInfo->hasPinnedLocals = true;
17800 if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.IsStruct())
17802 if (varTypeIsStruct(lclTyp))
17804 lvaSetStruct(tmpNum,
17805 impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.GetClassHandle(),
17806 true /* unsafe value cls check */);
17810 // This is a wrapped primitive. Make sure the verstate knows that
17811 lvaTable[tmpNum].lvVerTypeInfo =
17812 impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo;
17820 // A method used to return the GenTree (usually a GT_LCL_VAR) representing the arguments of the inlined method.
17821 // Only use this method for the arguments of the inlinee method.
17822 // !!! Do not use it for the locals of the inlinee method. !!!!
17824 GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
17826 /* Get the argument type */
17827 var_types lclTyp = lclVarInfo[lclNum].lclTypeInfo;
17829 GenTreePtr op1 = nullptr;
17831 // constant or address of local
17832 if (inlArgInfo[lclNum].argIsInvariant && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17834 /* Clone the constant. Note that we cannot directly use argNode
17835 in the trees even if inlArgInfo[lclNum].argIsUsed==false as this
17836 would introduce aliasing between inlArgInfo[].argNode and
17837 impInlineExpr. Then gtFoldExpr() could change it, causing further
17838 references to the argument working off of the bashed copy. */
17840 op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17841 PREFIX_ASSUME(op1 != nullptr);
17842 inlArgInfo[lclNum].argTmpNum = (unsigned)-1; // illegal temp
17844 else if (inlArgInfo[lclNum].argIsLclVar && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17846 /* Argument is a local variable (of the caller)
17847 * Can we re-use the passed argument node? */
17849 op1 = inlArgInfo[lclNum].argNode;
17850 inlArgInfo[lclNum].argTmpNum = op1->gtLclVarCommon.gtLclNum;
17852 if (inlArgInfo[lclNum].argIsUsed)
17854 assert(op1->gtOper == GT_LCL_VAR);
17855 assert(lclNum == op1->gtLclVar.gtLclILoffs);
17857 if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
17859 lclTyp = genActualType(lclTyp);
17862 /* Create a new lcl var node - remember the argument lclNum */
17863 op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, lclTyp, op1->gtLclVar.gtLclILoffs);
17866 else if (inlArgInfo[lclNum].argIsByRefToStructLocal && !inlArgInfo[lclNum].argHasStargOp)
17868 /* Argument is a by-ref address to a struct, a normed struct, or its field.
17869 In these cases, don't spill the byref to a local, simply clone the tree and use it.
17870 This way we will increase the chance for this byref to be optimized away by
17871 a subsequent "dereference" operation.
17873 From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
17874 (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
17875 For example, if the caller is:
17876 ldloca.s V_1 // V_1 is a local struct
17877 call void Test.ILPart::RunLdargaOnPointerArg(int32*)
17878 and the callee being inlined has:
17879 .method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed
17881 call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
17882 then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
17883 soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
17885 assert(inlArgInfo[lclNum].argNode->TypeGet() == TYP_BYREF ||
17886 inlArgInfo[lclNum].argNode->TypeGet() == TYP_I_IMPL);
17887 op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17891 /* Argument is a complex expression - it must be evaluated into a temp */
17893 if (inlArgInfo[lclNum].argHasTmp)
17895 assert(inlArgInfo[lclNum].argIsUsed);
17896 assert(inlArgInfo[lclNum].argTmpNum < lvaCount);
17898 /* Create a new lcl var node - remember the argument lclNum */
17899 op1 = gtNewLclvNode(inlArgInfo[lclNum].argTmpNum, genActualType(lclTyp));
17901 /* This is the second or later use of the this argument,
17902 so we have to use the temp (instead of the actual arg) */
17903 inlArgInfo[lclNum].argBashTmpNode = nullptr;
17907 /* First time use */
17908 assert(inlArgInfo[lclNum].argIsUsed == false);
17910 /* Reserve a temp for the expression.
17911 * Use a large size node as we may change it later */
17913 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
17915 lvaTable[tmpNum].lvType = lclTyp;
17916 assert(lvaTable[tmpNum].lvAddrExposed == 0);
17917 if (inlArgInfo[lclNum].argHasLdargaOp)
17919 lvaTable[tmpNum].lvHasLdAddrOp = 1;
17922 if (lclVarInfo[lclNum].lclVerTypeInfo.IsStruct())
17924 if (varTypeIsStruct(lclTyp))
17926 lvaSetStruct(tmpNum, impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo.GetClassHandle(),
17927 true /* unsafe value cls check */);
17931 // This is a wrapped primitive. Make sure the verstate knows that
17932 lvaTable[tmpNum].lvVerTypeInfo = impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo;
17936 inlArgInfo[lclNum].argHasTmp = true;
17937 inlArgInfo[lclNum].argTmpNum = tmpNum;
17939 // If we require strict exception order, then arguments must
17940 // be evaluated in sequence before the body of the inlined method.
17941 // So we need to evaluate them to a temp.
17942 // Also, if arguments have global references, we need to
17943 // evaluate them to a temp before the inlined body as the
17944 // inlined body may be modifying the global ref.
17945 // TODO-1stClassStructs: We currently do not reuse an existing lclVar
17946 // if it is a struct, because it requires some additional handling.
17948 if (!varTypeIsStruct(lclTyp) && (!inlArgInfo[lclNum].argHasSideEff) && (!inlArgInfo[lclNum].argHasGlobRef))
17950 /* Get a *LARGE* LCL_VAR node */
17951 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
17953 /* Record op1 as the very first use of this argument.
17954 If there are no further uses of the arg, we may be
17955 able to use the actual arg node instead of the temp.
17956 If we do see any further uses, we will clear this. */
17957 inlArgInfo[lclNum].argBashTmpNode = op1;
17961 /* Get a small LCL_VAR node */
17962 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
17963 /* No bashing of this argument */
17964 inlArgInfo[lclNum].argBashTmpNode = nullptr;
17969 /* Mark the argument as used */
17971 inlArgInfo[lclNum].argIsUsed = true;
17976 /******************************************************************************
17977 Is this the original "this" argument to the call being inlined?
17979 Note that we do not inline methods with "starg 0", and so we do not need to
17983 BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo* inlArgInfo)
17985 assert(compIsForInlining());
17986 return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
17989 //-----------------------------------------------------------------------------
17990 // This function checks if a dereference in the inlinee can guarantee that
17991 // the "this" is non-NULL.
17992 // If we haven't hit a branch or a side effect, and we are dereferencing
17993 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
17994 // then we can avoid a separate null pointer check.
17996 // "additionalTreesToBeEvaluatedBefore"
17997 // is the set of pending trees that have not yet been added to the statement list,
17998 // and which have been removed from verCurrentState.esStack[]
18000 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTreePtr additionalTreesToBeEvaluatedBefore,
18001 GenTreePtr variableBeingDereferenced,
18002 InlArgInfo* inlArgInfo)
18004 assert(compIsForInlining());
18005 assert(opts.OptEnabled(CLFLG_INLINING));
18007 BasicBlock* block = compCurBB;
18012 if (block != fgFirstBB)
18017 if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
18022 if (additionalTreesToBeEvaluatedBefore &&
18023 GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
18028 for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
18030 expr = stmt->gtStmt.gtStmtExpr;
18032 if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
18038 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
18040 unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
18041 if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
18050 /******************************************************************************/
18051 // Check the inlining eligibility of this GT_CALL node.
18052 // Mark GTF_CALL_INLINE_CANDIDATE on the GT_CALL node
18054 // Todo: find a way to record the failure reasons in the IR (or
18055 // otherwise build tree context) so when we do the inlining pass we
18056 // can capture these reasons
18058 void Compiler::impMarkInlineCandidate(GenTreePtr callNode,
18059 CORINFO_CONTEXT_HANDLE exactContextHnd,
18060 CORINFO_CALL_INFO* callInfo)
18062 // Let the strategy know there's another call
18063 impInlineRoot()->m_inlineStrategy->NoteCall();
18065 if (!opts.OptEnabled(CLFLG_INLINING))
18067 /* XXX Mon 8/18/2008
18068 * This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before
18069 * calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and
18070 * CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and
18071 * figure out why we did not set MAXOPT for this compile.
18073 assert(!compIsForInlining());
18077 if (compIsForImportOnly())
18079 // Don't bother creating the inline candidate during verification.
18080 // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
18081 // that leads to the creation of multiple instances of Compiler.
18085 GenTreeCall* call = callNode->AsCall();
18086 InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
18088 // Don't inline if not optimizing root method
18089 if (opts.compDbgCode)
18091 inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
18095 // Don't inline if inlining into root method is disabled.
18096 if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
18098 inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
18102 // Inlining candidate determination needs to honor only IL tail prefix.
18103 // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
18104 if (call->IsTailPrefixedCall())
18106 inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
18110 // Tail recursion elimination takes precedence over inlining.
18111 // TODO: We may want to do some of the additional checks from fgMorphCall
18112 // here to reduce the chance we don't inline a call that won't be optimized
18113 // as a fast tail call or turned into a loop.
18114 if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
18116 inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
18120 if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
18122 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
18126 /* Ignore helper calls */
18128 if (call->gtCallType == CT_HELPER)
18130 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
18134 /* Ignore indirect calls */
18135 if (call->gtCallType == CT_INDIRECT)
18137 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
18141 /* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less
18142 * restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding
18143 * inlining in throw blocks. I should consider the same thing for catch and filter regions. */
18145 CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
18148 // Reuse method flags from the original callInfo if possible
18149 if (fncHandle == callInfo->hMethod)
18151 methAttr = callInfo->methodFlags;
18155 methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
18159 if (compStressCompile(STRESS_FORCE_INLINE, 0))
18161 methAttr |= CORINFO_FLG_FORCEINLINE;
18165 // Check for COMPlus_AggressiveInlining
18166 if (compDoAggressiveInlining)
18168 methAttr |= CORINFO_FLG_FORCEINLINE;
18171 if (!(methAttr & CORINFO_FLG_FORCEINLINE))
18173 /* Don't bother inline blocks that are in the filter region */
18174 if (bbInCatchHandlerILRange(compCurBB))
18179 printf("\nWill not inline blocks that are in the catch handler region\n");
18184 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
18188 if (bbInFilterILRange(compCurBB))
18193 printf("\nWill not inline blocks that are in the filter region\n");
18197 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
18202 /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
18204 if (opts.compNeedSecurityCheck)
18206 inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
18210 /* Check if we tried to inline this method before */
18212 if (methAttr & CORINFO_FLG_DONT_INLINE)
18214 inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
18218 /* Cannot inline synchronized methods */
18220 if (methAttr & CORINFO_FLG_SYNCH)
18222 inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
18226 /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
18228 if (methAttr & CORINFO_FLG_SECURITYCHECK)
18230 inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
18234 InlineCandidateInfo* inlineCandidateInfo = nullptr;
18235 impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
18237 if (inlineResult.IsFailure())
18242 // The old value should be NULL
18243 assert(call->gtInlineCandidateInfo == nullptr);
18245 call->gtInlineCandidateInfo = inlineCandidateInfo;
18247 // Mark the call node as inline candidate.
18248 call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
18250 // Let the strategy know there's another candidate.
18251 impInlineRoot()->m_inlineStrategy->NoteCandidate();
18253 // Since we're not actually inlining yet, and this call site is
18254 // still just an inline candidate, there's nothing to report.
18255 inlineResult.SetReported();
18258 /******************************************************************************/
18259 // Returns true if the given intrinsic will be implemented by target-specific
18262 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
18264 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
18265 switch (intrinsicId)
18267 // Amd64 only has SSE2 instruction to directly compute sqrt/abs.
18269 // TODO: Because the x86 backend only targets SSE for floating-point code,
18270 // it does not treat Sine, Cosine, or Round as intrinsics (JIT32
18271 // implemented those intrinsics as x87 instructions). If this poses
18272 // a CQ problem, it may be necessary to change the implementation of
18273 // the helper calls to decrease call overhead or switch back to the
18274 // x87 instructions. This is tracked by #7097.
18275 case CORINFO_INTRINSIC_Sqrt:
18276 case CORINFO_INTRINSIC_Abs:
18282 #elif defined(_TARGET_ARM64_)
18283 switch (intrinsicId)
18285 case CORINFO_INTRINSIC_Sqrt:
18286 case CORINFO_INTRINSIC_Abs:
18287 case CORINFO_INTRINSIC_Round:
18293 #elif defined(_TARGET_ARM_)
18294 switch (intrinsicId)
18296 case CORINFO_INTRINSIC_Sqrt:
18297 case CORINFO_INTRINSIC_Abs:
18298 case CORINFO_INTRINSIC_Round:
18304 #elif defined(_TARGET_X86_)
18305 switch (intrinsicId)
18307 case CORINFO_INTRINSIC_Sin:
18308 case CORINFO_INTRINSIC_Cos:
18309 case CORINFO_INTRINSIC_Sqrt:
18310 case CORINFO_INTRINSIC_Abs:
18311 case CORINFO_INTRINSIC_Round:
18318 // TODO: This portion of logic is not implemented for other arch.
18319 // The reason for returning true is that on all other arch the only intrinsic
18320 // enabled are target intrinsics.
18322 #endif //_TARGET_AMD64_
18325 /******************************************************************************/
18326 // Returns true if the given intrinsic will be implemented by calling System.Math
18329 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
18331 // Currently, if an math intrisic is not implemented by target-specific
18332 // intructions, it will be implemented by a System.Math call. In the
18333 // future, if we turn to implementing some of them with helper callers,
18334 // this predicate needs to be revisited.
18335 return !IsTargetIntrinsic(intrinsicId);
18338 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
18340 switch (intrinsicId)
18342 case CORINFO_INTRINSIC_Sin:
18343 case CORINFO_INTRINSIC_Sqrt:
18344 case CORINFO_INTRINSIC_Abs:
18345 case CORINFO_INTRINSIC_Cos:
18346 case CORINFO_INTRINSIC_Round:
18347 case CORINFO_INTRINSIC_Cosh:
18348 case CORINFO_INTRINSIC_Sinh:
18349 case CORINFO_INTRINSIC_Tan:
18350 case CORINFO_INTRINSIC_Tanh:
18351 case CORINFO_INTRINSIC_Asin:
18352 case CORINFO_INTRINSIC_Acos:
18353 case CORINFO_INTRINSIC_Atan:
18354 case CORINFO_INTRINSIC_Atan2:
18355 case CORINFO_INTRINSIC_Log10:
18356 case CORINFO_INTRINSIC_Pow:
18357 case CORINFO_INTRINSIC_Exp:
18358 case CORINFO_INTRINSIC_Ceiling:
18359 case CORINFO_INTRINSIC_Floor:
18366 bool Compiler::IsMathIntrinsic(GenTreePtr tree)
18368 return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
18370 /*****************************************************************************/