1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
10 XX Imports the given method and converts it to semantic trees XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
23 #define Verify(cond, msg) \
28 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
32 #define VerifyOrReturn(cond, msg) \
37 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
42 #define VerifyOrReturnSpeculative(cond, msg, speculative) \
56 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
62 /*****************************************************************************/
64 void Compiler::impInit()
68 impTreeList = nullptr;
69 impTreeLast = nullptr;
70 impInlinedCodeSize = 0;
74 /*****************************************************************************
76 * Pushes the given tree on the stack.
79 void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
81 /* Check for overflow. If inlining, we may be using a bigger stack */
83 if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84 (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
86 BADCODE("stack overflow");
90 // If we are pushing a struct, make certain we know the precise type!
91 if (tree->TypeGet() == TYP_STRUCT)
93 assert(ti.IsType(TI_STRUCT));
94 CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95 assert(clsHnd != NO_CLASS_HANDLE);
98 if (tiVerificationNeeded && !ti.IsDead())
100 assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
102 // The ti type is consistent with the tree type.
105 // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106 // In the verification type system, we always transform "native int" to "TI_INT".
107 // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108 // attempts to do that have proved too difficult. Instead, we'll assume that in checks like this,
109 // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110 // method used in the last disjunct allows exactly this mismatch.
111 assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112 ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113 ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114 ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115 typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116 NormaliseForStack(typeInfo(tree->TypeGet()))));
118 // If it is a struct type, make certain we normalized the primitive types
119 assert(!ti.IsType(TI_STRUCT) ||
120 info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
124 if (VERBOSE && tiVerificationNeeded)
127 printf(TI_DUMP_PADDING);
128 printf("About to push to stack: ");
131 #endif // VERBOSE_VERIFY
135 verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136 verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
138 if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
142 else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
144 compFloatingPointUsed = true;
148 /******************************************************************************/
149 // used in the inliner, where we can assume typesafe code. please don't use in the importer!!
150 inline void Compiler::impPushOnStackNoType(GenTreePtr tree)
152 assert(verCurrentState.esStackDepth < impStkSize);
153 INDEBUG(verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = typeInfo());
154 verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
156 if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
160 else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
162 compFloatingPointUsed = true;
166 inline void Compiler::impPushNullObjRefOnStack()
168 impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
171 // This method gets called when we run into unverifiable code
172 // (and we are verifying the method)
174 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
175 DEBUGARG(unsigned line))
177 // Remember that the code is not verifiable
178 // Note that the method may yet pass canSkipMethodVerification(),
179 // and so the presence of unverifiable code may not be an issue.
180 tiIsVerifiableCode = FALSE;
183 const char* tail = strrchr(file, '\\');
189 if (JitConfig.JitBreakOnUnsafeCode())
191 assert(!"Unsafe code detected");
195 JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
196 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
198 if (verNeedsVerification() || compIsForImportOnly())
200 JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
201 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
202 verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
206 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
207 DEBUGARG(unsigned line))
209 JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
210 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
213 // BreakIfDebuggerPresent();
214 if (getBreakOnBadCode())
216 assert(!"Typechecking error");
220 RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
224 // helper function that will tell us if the IL instruction at the addr passed
225 // by param consumes an address at the top of the stack. We use it to save
227 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
229 assert(!compIsForInlining());
233 opcode = (OPCODE)getU1LittleEndian(codeAddr);
237 // case CEE_LDFLDA: We're taking this one out as if you have a sequence
243 // of a primitivelike struct, you end up after morphing with addr of a local
244 // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
245 // for structs that contain other structs, which isnt a case we handle very
246 // well now for other reasons.
250 // We won't collapse small fields. This is probably not the right place to have this
251 // check, but we're only using the function for this purpose, and is easy to factor
252 // out if we need to do so.
254 CORINFO_RESOLVED_TOKEN resolvedToken;
255 impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
257 CORINFO_CLASS_HANDLE clsHnd;
258 var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
260 // Preserve 'small' int types
261 if (lclTyp > TYP_INT)
263 lclTyp = genActualType(lclTyp);
266 if (varTypeIsSmall(lclTyp))
280 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
282 pResolvedToken->tokenContext = impTokenLookupContextHandle;
283 pResolvedToken->tokenScope = info.compScopeHnd;
284 pResolvedToken->token = getU4LittleEndian(addr);
285 pResolvedToken->tokenType = kind;
287 if (!tiVerificationNeeded)
289 info.compCompHnd->resolveToken(pResolvedToken);
293 Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
297 /*****************************************************************************
299 * Pop one tree from the stack.
302 StackEntry Compiler::impPopStack()
304 if (verCurrentState.esStackDepth == 0)
306 BADCODE("stack underflow");
311 if (VERBOSE && tiVerificationNeeded)
314 printf(TI_DUMP_PADDING);
315 printf("About to pop from the stack: ");
316 const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
319 #endif // VERBOSE_VERIFY
322 return verCurrentState.esStack[--verCurrentState.esStackDepth];
325 StackEntry Compiler::impPopStack(CORINFO_CLASS_HANDLE& structType)
327 StackEntry ret = impPopStack();
328 structType = verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo.GetClassHandle();
332 GenTreePtr Compiler::impPopStack(typeInfo& ti)
334 StackEntry ret = impPopStack();
339 /*****************************************************************************
341 * Peep at n'th (0-based) tree on the top of the stack.
344 StackEntry& Compiler::impStackTop(unsigned n)
346 if (verCurrentState.esStackDepth <= n)
348 BADCODE("stack underflow");
351 return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
353 /*****************************************************************************
354 * Some of the trees are spilled specially. While unspilling them, or
355 * making a copy, these need to be handled specially. The function
356 * enumerates the operators possible after spilling.
359 #ifdef DEBUG // only used in asserts
360 static bool impValidSpilledStackEntry(GenTreePtr tree)
362 if (tree->gtOper == GT_LCL_VAR)
367 if (tree->OperIsConst())
376 /*****************************************************************************
378 * The following logic is used to save/restore stack contents.
379 * If 'copy' is true, then we make a copy of the trees on the stack. These
380 * have to all be cloneable/spilled values.
383 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
385 savePtr->ssDepth = verCurrentState.esStackDepth;
387 if (verCurrentState.esStackDepth)
389 savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
390 size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
394 StackEntry* table = savePtr->ssTrees;
396 /* Make a fresh copy of all the stack entries */
398 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
400 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
401 GenTreePtr tree = verCurrentState.esStack[level].val;
403 assert(impValidSpilledStackEntry(tree));
405 switch (tree->gtOper)
412 table->val = gtCloneExpr(tree);
416 assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
423 memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
428 void Compiler::impRestoreStackState(SavedStack* savePtr)
430 verCurrentState.esStackDepth = savePtr->ssDepth;
432 if (verCurrentState.esStackDepth)
434 memcpy(verCurrentState.esStack, savePtr->ssTrees,
435 verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
439 /*****************************************************************************
441 * Get the tree list started for a new basic block.
443 inline void Compiler::impBeginTreeList()
445 assert(impTreeList == nullptr && impTreeLast == nullptr);
447 impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
450 /*****************************************************************************
452 * Store the given start and end stmt in the given basic block. This is
453 * mostly called by impEndTreeList(BasicBlock *block). It is called
454 * directly only for handling CEE_LEAVEs out of finally-protected try's.
457 inline void Compiler::impEndTreeList(BasicBlock* block, GenTreePtr firstStmt, GenTreePtr lastStmt)
459 assert(firstStmt->gtOper == GT_STMT);
460 assert(lastStmt->gtOper == GT_STMT);
462 /* Make the list circular, so that we can easily walk it backwards */
464 firstStmt->gtPrev = lastStmt;
466 /* Store the tree list in the basic block */
468 block->bbTreeList = firstStmt;
470 /* The block should not already be marked as imported */
471 assert((block->bbFlags & BBF_IMPORTED) == 0);
473 block->bbFlags |= BBF_IMPORTED;
476 /*****************************************************************************
478 * Store the current tree list in the given basic block.
481 inline void Compiler::impEndTreeList(BasicBlock* block)
483 assert(impTreeList->gtOper == GT_BEG_STMTS);
485 GenTreePtr firstTree = impTreeList->gtNext;
489 /* The block should not already be marked as imported */
490 assert((block->bbFlags & BBF_IMPORTED) == 0);
492 // Empty block. Just mark it as imported
493 block->bbFlags |= BBF_IMPORTED;
497 // Ignore the GT_BEG_STMTS
498 assert(firstTree->gtPrev == impTreeList);
500 impEndTreeList(block, firstTree, impTreeLast);
504 if (impLastILoffsStmt != nullptr)
506 impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
507 impLastILoffsStmt = nullptr;
510 impTreeList = impTreeLast = nullptr;
514 /*****************************************************************************
516 * Check that storing the given tree doesnt mess up the semantic order. Note
517 * that this has only limited value as we can only check [0..chkLevel).
520 inline void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel)
525 assert(stmt->gtOper == GT_STMT);
527 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
529 chkLevel = verCurrentState.esStackDepth;
532 if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
537 GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
539 // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
541 if (tree->gtFlags & GTF_CALL)
543 for (unsigned level = 0; level < chkLevel; level++)
545 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
549 if (tree->gtOper == GT_ASG)
551 // For an assignment to a local variable, all references of that
552 // variable have to be spilled. If it is aliased, all calls and
553 // indirect accesses have to be spilled
555 if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
557 unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
558 for (unsigned level = 0; level < chkLevel; level++)
560 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
561 assert(!lvaTable[lclNum].lvAddrExposed ||
562 (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
566 // If the access may be to global memory, all side effects have to be spilled.
568 else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
570 for (unsigned level = 0; level < chkLevel; level++)
572 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
579 /*****************************************************************************
581 * Append the given GT_STMT node to the current block's tree list.
582 * [0..chkLevel) is the portion of the stack which we will check for
583 * interference with stmt and spill if needed.
586 inline void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
588 assert(stmt->gtOper == GT_STMT);
589 noway_assert(impTreeLast != nullptr);
591 /* If the statement being appended has any side-effects, check the stack
592 to see if anything needs to be spilled to preserve correct ordering. */
594 GenTreePtr expr = stmt->gtStmt.gtStmtExpr;
595 unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
597 // Assignment to (unaliased) locals don't count as a side-effect as
598 // we handle them specially using impSpillLclRefs(). Temp locals should
601 if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
602 !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
604 unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
605 assert(flags == (op2Flags | GTF_ASG));
609 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
611 chkLevel = verCurrentState.esStackDepth;
614 if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
616 assert(chkLevel <= verCurrentState.esStackDepth);
620 // If there is a call, we have to spill global refs
621 bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
623 if (expr->gtOper == GT_ASG)
625 GenTree* lhs = expr->gtGetOp1();
626 // If we are assigning to a global ref, we have to spill global refs on stack.
627 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
628 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
629 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
630 if (!expr->OperIsBlkOp())
632 // If we are assigning to a global ref, we have to spill global refs on stack
633 if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
635 spillGlobEffects = true;
638 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
639 ((lhs->OperGet() == GT_LCL_VAR) &&
640 (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
642 spillGlobEffects = true;
646 impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
650 impSpillSpecialSideEff();
654 impAppendStmtCheck(stmt, chkLevel);
656 /* Point 'prev' at the previous node, so that we can walk backwards */
658 stmt->gtPrev = impTreeLast;
660 /* Append the expression statement to the list */
662 impTreeLast->gtNext = stmt;
666 impMarkContiguousSIMDFieldAssignments(stmt);
669 /* Once we set impCurStmtOffs in an appended tree, we are ready to
670 report the following offsets. So reset impCurStmtOffs */
672 if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
674 impCurStmtOffsSet(BAD_IL_OFFSET);
678 if (impLastILoffsStmt == nullptr)
680 impLastILoffsStmt = stmt;
691 /*****************************************************************************
693 * Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
696 inline void Compiler::impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore)
698 assert(stmt->gtOper == GT_STMT);
699 assert(stmtBefore->gtOper == GT_STMT);
701 GenTreePtr stmtPrev = stmtBefore->gtPrev;
702 stmt->gtPrev = stmtPrev;
703 stmt->gtNext = stmtBefore;
704 stmtPrev->gtNext = stmt;
705 stmtBefore->gtPrev = stmt;
708 /*****************************************************************************
710 * Append the given expression tree to the current block's tree list.
711 * Return the newly created statement.
714 GenTreePtr Compiler::impAppendTree(GenTreePtr tree, unsigned chkLevel, IL_OFFSETX offset)
718 /* Allocate an 'expression statement' node */
720 GenTreePtr expr = gtNewStmt(tree, offset);
722 /* Append the statement to the current block's stmt list */
724 impAppendStmt(expr, chkLevel);
729 /*****************************************************************************
731 * Insert the given exression tree before GT_STMT "stmtBefore"
734 void Compiler::impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore)
736 assert(stmtBefore->gtOper == GT_STMT);
738 /* Allocate an 'expression statement' node */
740 GenTreePtr expr = gtNewStmt(tree, offset);
742 /* Append the statement to the current block's stmt list */
744 impInsertStmtBefore(expr, stmtBefore);
747 /*****************************************************************************
749 * Append an assignment of the given value to a temp to the current tree list.
750 * curLevel is the stack level for which the spill to the temp is being done.
753 void Compiler::impAssignTempGen(unsigned tmp,
756 GenTreePtr* pAfterStmt, /* = NULL */
757 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
758 BasicBlock* block /* = NULL */
761 GenTreePtr asg = gtNewTempAssign(tmp, val);
763 if (!asg->IsNothingNode())
767 GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
768 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
772 impAppendTree(asg, curLevel, impCurStmtOffs);
777 /*****************************************************************************
778 * same as above, but handle the valueclass case too
781 void Compiler::impAssignTempGen(unsigned tmpNum,
783 CORINFO_CLASS_HANDLE structType,
785 GenTreePtr* pAfterStmt, /* = NULL */
786 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
787 BasicBlock* block /* = NULL */
792 if (varTypeIsStruct(val))
794 assert(tmpNum < lvaCount);
795 assert(structType != NO_CLASS_HANDLE);
797 // if the method is non-verifiable the assert is not true
798 // so at least ignore it in the case when verification is turned on
799 // since any block that tries to use the temp would have failed verification.
800 var_types varType = lvaTable[tmpNum].lvType;
801 assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
802 lvaSetStruct(tmpNum, structType, false);
804 // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
805 // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
806 // that has been passed in for the value being assigned to the temp, in which case we
807 // need to set 'val' to that same type.
808 // Note also that if we always normalized the types of any node that might be a struct
809 // type, this would not be necessary - but that requires additional JIT/EE interface
810 // calls that may not actually be required - e.g. if we only access a field of a struct.
812 val->gtType = lvaTable[tmpNum].lvType;
814 GenTreePtr dst = gtNewLclvNode(tmpNum, val->gtType);
815 asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
819 asg = gtNewTempAssign(tmpNum, val);
822 if (!asg->IsNothingNode())
826 GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
827 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
831 impAppendTree(asg, curLevel, impCurStmtOffs);
836 /*****************************************************************************
838 * Pop the given number of values from the stack and return a list node with
840 * The 'prefixTree' argument may optionally contain an argument
841 * list that is prepended to the list returned from this function.
843 * The notion of prepended is a bit misleading in that the list is backwards
844 * from the way I would expect: The first element popped is at the end of
845 * the returned list, and prefixTree is 'before' that, meaning closer to
846 * the end of the list. To get to prefixTree, you have to walk to the
849 * For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
850 * such we reverse its meaning such that returnValue has a reversed
851 * prefixTree at the head of the list.
854 GenTreeArgList* Compiler::impPopList(unsigned count,
856 CORINFO_SIG_INFO* sig,
857 GenTreeArgList* prefixTree)
859 assert(sig == nullptr || count == sig->numArgs);
862 CORINFO_CLASS_HANDLE structType;
863 GenTreeArgList* treeList;
865 if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
871 treeList = prefixTree;
876 StackEntry se = impPopStack();
877 typeInfo ti = se.seTypeInfo;
878 GenTreePtr temp = se.val;
880 if (varTypeIsStruct(temp))
882 // Morph trees that aren't already OBJs or MKREFANY to be OBJs
883 assert(ti.IsType(TI_STRUCT));
884 structType = ti.GetClassHandleForValueClass();
885 temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
888 /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
889 flags |= temp->gtFlags;
890 treeList = gtNewListNode(temp, treeList);
897 if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
898 sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
900 // Make sure that all valuetypes (including enums) that we push are loaded.
901 // This is to guarantee that if a GC is triggerred from the prestub of this methods,
902 // all valuetypes in the method signature are already loaded.
903 // We need to be able to find the size of the valuetypes, but we cannot
904 // do a class-load from within GC.
905 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
908 CORINFO_ARG_LIST_HANDLE argLst = sig->args;
909 CORINFO_CLASS_HANDLE argClass;
910 CORINFO_CLASS_HANDLE argRealClass;
911 GenTreeArgList* args;
914 for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
916 PREFIX_ASSUME(args != nullptr);
918 CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
920 // insert implied casts (from float to double or double to float)
922 if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
924 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
926 else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
928 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
931 // insert any widening or narrowing casts for backwards compatibility
933 args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
935 if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
936 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
938 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
939 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
941 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
943 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
945 args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
948 // Make sure that all valuetypes (including enums) that we push are loaded.
949 // This is to guarantee that if a GC is triggered from the prestub of this methods,
950 // all valuetypes in the method signature are already loaded.
951 // We need to be able to find the size of the valuetypes, but we cannot
952 // do a class-load from within GC.
953 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
956 argLst = info.compCompHnd->getArgNext(argLst);
960 if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
962 // Prepend the prefixTree
964 // Simple in-place reversal to place treeList
965 // at the end of a reversed prefixTree
966 while (prefixTree != nullptr)
968 GenTreeArgList* next = prefixTree->Rest();
969 prefixTree->Rest() = treeList;
970 treeList = prefixTree;
977 /*****************************************************************************
979 * Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
980 * The first "skipReverseCount" items are not reversed.
983 GenTreeArgList* Compiler::impPopRevList(unsigned count,
985 CORINFO_SIG_INFO* sig,
986 unsigned skipReverseCount)
989 assert(skipReverseCount <= count);
991 GenTreeArgList* list = impPopList(count, flagsPtr, sig);
994 if (list == nullptr || skipReverseCount == count)
999 GenTreeArgList* ptr = nullptr; // Initialized to the first node that needs to be reversed
1000 GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
1002 if (skipReverseCount == 0)
1008 lastSkipNode = list;
1009 // Get to the first node that needs to be reversed
1010 for (unsigned i = 0; i < skipReverseCount - 1; i++)
1012 lastSkipNode = lastSkipNode->Rest();
1015 PREFIX_ASSUME(lastSkipNode != nullptr);
1016 ptr = lastSkipNode->Rest();
1019 GenTreeArgList* reversedList = nullptr;
1023 GenTreeArgList* tmp = ptr->Rest();
1024 ptr->Rest() = reversedList;
1027 } while (ptr != nullptr);
1029 if (skipReverseCount)
1031 lastSkipNode->Rest() = reversedList;
1036 return reversedList;
1040 /*****************************************************************************
1041 Assign (copy) the structure from 'src' to 'dest'. The structure is a value
1042 class of type 'clsHnd'. It returns the tree that should be appended to the
1043 statement list that represents the assignment.
1044 Temp assignments may be appended to impTreeList if spilling is necessary.
1045 curLevel is the stack level for which a spill may be being done.
1048 GenTreePtr Compiler::impAssignStruct(GenTreePtr dest,
1050 CORINFO_CLASS_HANDLE structHnd,
1052 GenTreePtr* pAfterStmt, /* = NULL */
1053 BasicBlock* block /* = NULL */
1056 assert(varTypeIsStruct(dest));
1058 while (dest->gtOper == GT_COMMA)
1060 assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1062 // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1065 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1069 impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1072 // set dest to the second thing
1073 dest = dest->gtOp.gtOp2;
1076 assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1077 dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1079 if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1080 src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1083 return gtNewNothingNode();
1086 // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1087 // or re-creating a Blk node if it is.
1088 GenTreePtr destAddr;
1090 if (dest->gtOper == GT_IND || dest->OperIsBlk())
1092 destAddr = dest->gtOp.gtOp1;
1096 destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1099 return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1102 /*****************************************************************************/
1104 GenTreePtr Compiler::impAssignStructPtr(GenTreePtr destAddr,
1106 CORINFO_CLASS_HANDLE structHnd,
1108 GenTreePtr* pAfterStmt, /* = NULL */
1109 BasicBlock* block /* = NULL */
1113 GenTreePtr dest = nullptr;
1114 unsigned destFlags = 0;
1116 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1117 assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1118 // TODO-ARM-BUG: Does ARM need this?
1119 // TODO-ARM64-BUG: Does ARM64 need this?
1120 assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1121 src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1122 src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1123 (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1124 #else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1125 assert(varTypeIsStruct(src));
1127 assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1128 src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1129 src->gtOper == GT_COMMA ||
1130 (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1131 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1132 if (destAddr->OperGet() == GT_ADDR)
1134 GenTree* destNode = destAddr->gtGetOp1();
1135 // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1136 // will be morphed, don't insert an OBJ(ADDR).
1137 if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1138 #ifndef LEGACY_BACKEND
1139 || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1140 #endif // !LEGACY_BACKEND
1145 destType = destNode->TypeGet();
1149 destType = src->TypeGet();
1152 var_types asgType = src->TypeGet();
1154 if (src->gtOper == GT_CALL)
1156 if (src->AsCall()->TreatAsHasRetBufArg(this))
1158 // Case of call returning a struct via hidden retbuf arg
1160 // insert the return value buffer into the argument list as first byref parameter
1161 src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1163 // now returns void, not a struct
1164 src->gtType = TYP_VOID;
1166 // return the morphed call node
1171 // Case of call returning a struct in one or more registers.
1173 var_types returnType = (var_types)src->gtCall.gtReturnType;
1175 // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1176 src->gtType = genActualType(returnType);
1178 // First we try to change this to "LclVar/LclFld = call"
1180 if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1182 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1183 // That is, the IR will be of the form lclVar = call for multi-reg return
1185 GenTreePtr lcl = destAddr->gtOp.gtOp1;
1186 if (src->AsCall()->HasMultiRegRetVal())
1188 // Mark the struct LclVar as used in a MultiReg return context
1189 // which currently makes it non promotable.
1190 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1191 // handle multireg returns.
1192 lcl->gtFlags |= GTF_DONT_CSE;
1193 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1195 else // The call result is not a multireg return
1197 // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1198 lcl->ChangeOper(GT_LCL_FLD);
1199 fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1202 lcl->gtType = src->gtType;
1203 asgType = src->gtType;
1206 #if defined(_TARGET_ARM_)
1207 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1208 // but that method has not been updadted to include ARM.
1209 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1210 lcl->gtFlags |= GTF_DONT_CSE;
1211 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1212 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1213 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1215 // Make the struct non promotable. The eightbytes could contain multiple fields.
1216 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1217 // handle multireg returns.
1218 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1219 // non-multireg returns.
1220 lcl->gtFlags |= GTF_DONT_CSE;
1221 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1224 else // we don't have a GT_ADDR of a GT_LCL_VAR
1226 // !!! The destination could be on stack. !!!
1227 // This flag will let us choose the correct write barrier.
1228 asgType = returnType;
1229 destFlags = GTF_IND_TGTANYWHERE;
1233 else if (src->gtOper == GT_RET_EXPR)
1235 GenTreePtr call = src->gtRetExpr.gtInlineCandidate;
1236 noway_assert(call->gtOper == GT_CALL);
1238 if (call->AsCall()->HasRetBufArg())
1240 // insert the return value buffer into the argument list as first byref parameter
1241 call->gtCall.gtCallArgs = gtNewListNode(destAddr, call->gtCall.gtCallArgs);
1243 // now returns void, not a struct
1244 src->gtType = TYP_VOID;
1245 call->gtType = TYP_VOID;
1247 // We already have appended the write to 'dest' GT_CALL's args
1248 // So now we just return an empty node (pruning the GT_RET_EXPR)
1253 // Case of inline method returning a struct in one or more registers.
1255 var_types returnType = (var_types)call->gtCall.gtReturnType;
1257 // We won't need a return buffer
1258 asgType = returnType;
1259 src->gtType = genActualType(returnType);
1260 call->gtType = src->gtType;
1262 // If we've changed the type, and it no longer matches a local destination,
1263 // we must use an indirection.
1264 if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1269 // !!! The destination could be on stack. !!!
1270 // This flag will let us choose the correct write barrier.
1271 destFlags = GTF_IND_TGTANYWHERE;
1274 else if (src->OperIsBlk())
1276 asgType = impNormStructType(structHnd);
1277 if (src->gtOper == GT_OBJ)
1279 assert(src->gtObj.gtClass == structHnd);
1282 else if (src->gtOper == GT_INDEX)
1284 asgType = impNormStructType(structHnd);
1285 assert(src->gtIndex.gtStructElemClass == structHnd);
1287 else if (src->gtOper == GT_MKREFANY)
1289 // Since we are assigning the result of a GT_MKREFANY,
1290 // "destAddr" must point to a refany.
1292 GenTreePtr destAddrClone;
1294 impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1296 assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1297 assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1298 GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1299 GenTreePtr ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1300 GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1301 typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1302 GenTreePtr typeSlot =
1303 gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1305 // append the assign of the pointer value
1306 GenTreePtr asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1309 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1313 impAppendTree(asg, curLevel, impCurStmtOffs);
1316 // return the assign of the type value, to be appended
1317 return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1319 else if (src->gtOper == GT_COMMA)
1321 // The second thing is the struct or its address.
1322 assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1325 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1329 impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1332 // Evaluate the second thing using recursion.
1333 return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1335 else if (src->IsLocal())
1337 asgType = src->TypeGet();
1339 else if (asgType == TYP_STRUCT)
1341 asgType = impNormStructType(structHnd);
1342 src->gtType = asgType;
1343 #ifdef LEGACY_BACKEND
1344 if (asgType == TYP_STRUCT)
1346 GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1347 src = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1351 if (dest == nullptr)
1353 // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1354 // if this is a known struct type.
1355 if (asgType == TYP_STRUCT)
1357 dest = gtNewObjNode(structHnd, destAddr);
1358 gtSetObjGcInfo(dest->AsObj());
1359 // Although an obj as a call argument was always assumed to be a globRef
1360 // (which is itself overly conservative), that is not true of the operands
1361 // of a block assignment.
1362 dest->gtFlags &= ~GTF_GLOB_REF;
1363 dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1365 else if (varTypeIsStruct(asgType))
1367 dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1371 dest = gtNewOperNode(GT_IND, asgType, destAddr);
1376 dest->gtType = asgType;
1379 dest->gtFlags |= destFlags;
1380 destFlags = dest->gtFlags;
1382 // return an assignment node, to be appended
1383 GenTree* asgNode = gtNewAssignNode(dest, src);
1384 gtBlockOpInit(asgNode, dest, src, false);
1386 // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1388 if ((destFlags & GTF_DONT_CSE) == 0)
1390 dest->gtFlags &= ~(GTF_DONT_CSE);
1395 /*****************************************************************************
1396 Given a struct value, and the class handle for that structure, return
1397 the expression for the address for that structure value.
1399 willDeref - does the caller guarantee to dereference the pointer.
1402 GenTreePtr Compiler::impGetStructAddr(GenTreePtr structVal,
1403 CORINFO_CLASS_HANDLE structHnd,
1407 assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1409 var_types type = structVal->TypeGet();
1411 genTreeOps oper = structVal->gtOper;
1413 if (oper == GT_OBJ && willDeref)
1415 assert(structVal->gtObj.gtClass == structHnd);
1416 return (structVal->gtObj.Addr());
1418 else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
1420 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1422 impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1424 // The 'return value' is now the temp itself
1426 type = genActualType(lvaTable[tmpNum].TypeGet());
1427 GenTreePtr temp = gtNewLclvNode(tmpNum, type);
1428 temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1431 else if (oper == GT_COMMA)
1433 assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1435 GenTreePtr oldTreeLast = impTreeLast;
1436 structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1437 structVal->gtType = TYP_BYREF;
1439 if (oldTreeLast != impTreeLast)
1441 // Some temp assignment statement was placed on the statement list
1442 // for Op2, but that would be out of order with op1, so we need to
1443 // spill op1 onto the statement list after whatever was last
1444 // before we recursed on Op2 (i.e. before whatever Op2 appended).
1445 impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1446 structVal->gtOp.gtOp1 = gtNewNothingNode();
1452 return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1455 //------------------------------------------------------------------------
1456 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1457 // and optionally determine the GC layout of the struct.
1460 // structHnd - The class handle for the struct type of interest.
1461 // gcLayout - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1462 // into which the gcLayout will be written.
1463 // pNumGCVars - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1464 // which will be set to the number of GC fields in the struct.
1465 // pSimdBaseType - (optional, default nullptr) - if non-null, and the struct is a SIMD
1466 // type, set to the SIMD base type
1469 // The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1470 // The gcLayout will be returned using the pointers provided by the caller, if non-null.
1471 // It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1474 // The caller must set gcLayout to nullptr OR ensure that it is large enough
1475 // (see ICorStaticInfo::getClassGClayout in corinfo.h).
1478 // Normalizing the type involves examining the struct type to determine if it should
1479 // be modified to one that is handled specially by the JIT, possibly being a candidate
1480 // for full enregistration, e.g. TYP_SIMD16.
1482 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1484 unsigned* pNumGCVars,
1485 var_types* pSimdBaseType)
1487 assert(structHnd != NO_CLASS_HANDLE);
1489 const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1490 var_types structType = TYP_STRUCT;
1492 #ifdef FEATURE_CORECLR
1493 const bool hasGCPtrs = (structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0;
1495 // Desktop CLR won't report FLG_CONTAINS_GC_PTR for RefAnyClass - need to check explicitly.
1496 const bool isRefAny = (structHnd == impGetRefAnyClass());
1497 const bool hasGCPtrs = isRefAny || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0);
1501 // Check to see if this is a SIMD type.
1502 if (featureSIMD && !hasGCPtrs)
1504 unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1506 if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1508 unsigned int sizeBytes;
1509 var_types simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1510 if (simdBaseType != TYP_UNKNOWN)
1512 assert(sizeBytes == originalSize);
1513 structType = getSIMDTypeForSize(sizeBytes);
1514 if (pSimdBaseType != nullptr)
1516 *pSimdBaseType = simdBaseType;
1518 #ifdef _TARGET_AMD64_
1519 // Amd64: also indicate that we use floating point registers
1520 compFloatingPointUsed = true;
1525 #endif // FEATURE_SIMD
1527 // Fetch GC layout info if requested
1528 if (gcLayout != nullptr)
1530 unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1532 // Verify that the quick test up above via the class attributes gave a
1533 // safe view of the type's GCness.
1535 // Note there are cases where hasGCPtrs is true but getClassGClayout
1536 // does not report any gc fields.
1537 assert(hasGCPtrs || (numGCVars == 0));
1539 if (pNumGCVars != nullptr)
1541 *pNumGCVars = numGCVars;
1546 // Can't safely ask for number of GC pointers without also
1547 // asking for layout.
1548 assert(pNumGCVars == nullptr);
1554 //****************************************************************************
1555 // Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1556 // it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1558 GenTreePtr Compiler::impNormStructVal(GenTreePtr structVal,
1559 CORINFO_CLASS_HANDLE structHnd,
1561 bool forceNormalization /*=false*/)
1563 assert(forceNormalization || varTypeIsStruct(structVal));
1564 assert(structHnd != NO_CLASS_HANDLE);
1565 var_types structType = structVal->TypeGet();
1566 bool makeTemp = false;
1567 if (structType == TYP_STRUCT)
1569 structType = impNormStructType(structHnd);
1571 bool alreadyNormalized = false;
1572 GenTreeLclVarCommon* structLcl = nullptr;
1574 genTreeOps oper = structVal->OperGet();
1577 // GT_RETURN and GT_MKREFANY don't capture the handle.
1581 alreadyNormalized = true;
1585 structVal->gtCall.gtRetClsHnd = structHnd;
1590 structVal->gtRetExpr.gtRetClsHnd = structHnd;
1595 structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1599 // This will be transformed to an OBJ later.
1600 alreadyNormalized = true;
1601 structVal->gtIndex.gtStructElemClass = structHnd;
1602 structVal->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(structHnd);
1606 // Wrap it in a GT_OBJ.
1607 structVal->gtType = structType;
1608 structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1613 structLcl = structVal->AsLclVarCommon();
1614 // Wrap it in a GT_OBJ.
1615 structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1622 // These should already have the appropriate type.
1623 assert(structVal->gtType == structType);
1624 alreadyNormalized = true;
1628 assert(structVal->gtType == structType);
1629 structVal = gtNewObjNode(structHnd, structVal->gtGetOp1());
1630 alreadyNormalized = true;
1635 assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1637 #endif // FEATURE_SIMD
1641 // The second thing is the block node.
1642 GenTree* blockNode = structVal->gtOp.gtOp2;
1643 assert(blockNode->gtType == structType);
1644 // It had better be a block node - any others should not occur here.
1645 assert(blockNode->OperIsBlk());
1647 // Sink the GT_COMMA below the blockNode addr.
1648 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1649 assert(blockNodeAddr->gtType == TYP_BYREF);
1650 GenTree* commaNode = structVal;
1651 commaNode->gtType = TYP_BYREF;
1652 commaNode->gtOp.gtOp2 = blockNodeAddr;
1653 blockNode->gtOp.gtOp1 = commaNode;
1654 structVal = blockNode;
1655 alreadyNormalized = true;
1660 assert(!"Unexpected node in impNormStructVal()");
1663 structVal->gtType = structType;
1664 GenTree* structObj = structVal;
1666 if (!alreadyNormalized || forceNormalization)
1670 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1672 impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1674 // The structVal is now the temp itself
1676 structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1677 // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1678 structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1680 else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1682 // Wrap it in a GT_OBJ
1683 structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1687 if (structLcl != nullptr)
1689 // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1690 // so we don't set GTF_EXCEPT here.
1691 if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1693 structObj->gtFlags &= ~GTF_GLOB_REF;
1698 // In general a OBJ is an indirection and could raise an exception.
1699 structObj->gtFlags |= GTF_EXCEPT;
1704 /******************************************************************************/
1705 // Given a type token, generate code that will evaluate to the correct
1706 // handle representation of that token (type handle, field handle, or method handle)
1708 // For most cases, the handle is determined at compile-time, and the code
1709 // generated is simply an embedded handle.
1711 // Run-time lookup is required if the enclosing method is shared between instantiations
1712 // and the token refers to formal type parameters whose instantiation is not known
1715 GenTreePtr Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1716 BOOL* pRuntimeLookup /* = NULL */,
1717 BOOL mustRestoreHandle /* = FALSE */,
1718 BOOL importParent /* = FALSE */)
1720 assert(!fgGlobalMorph);
1722 CORINFO_GENERICHANDLE_RESULT embedInfo;
1723 info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1727 *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1730 if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1732 switch (embedInfo.handleType)
1734 case CORINFO_HANDLETYPE_CLASS:
1735 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1738 case CORINFO_HANDLETYPE_METHOD:
1739 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1742 case CORINFO_HANDLETYPE_FIELD:
1743 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1744 info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1752 return impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1753 embedInfo.compileTimeHandle);
1756 GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1757 CORINFO_LOOKUP* pLookup,
1758 unsigned handleFlags,
1759 void* compileTimeHandle)
1761 if (!pLookup->lookupKind.needsRuntimeLookup)
1763 // No runtime lookup is required.
1764 // Access is direct or memory-indirect (of a fixed address) reference
1766 CORINFO_GENERIC_HANDLE handle = nullptr;
1767 void* pIndirection = nullptr;
1768 assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1770 if (pLookup->constLookup.accessType == IAT_VALUE)
1772 handle = pLookup->constLookup.handle;
1774 else if (pLookup->constLookup.accessType == IAT_PVALUE)
1776 pIndirection = pLookup->constLookup.addr;
1778 return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1780 else if (compIsForInlining())
1782 // Don't import runtime lookups when inlining
1783 // Inlining has to be aborted in such a case
1784 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1789 // Need to use dictionary-based access which depends on the typeContext
1790 // which is only available at runtime, not at compile-time.
1792 return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1796 #ifdef FEATURE_READYTORUN_COMPILER
1797 GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1798 unsigned handleFlags,
1799 void* compileTimeHandle)
1801 CORINFO_GENERIC_HANDLE handle = nullptr;
1802 void* pIndirection = nullptr;
1803 assert(pLookup->accessType != IAT_PPVALUE);
1805 if (pLookup->accessType == IAT_VALUE)
1807 handle = pLookup->handle;
1809 else if (pLookup->accessType == IAT_PVALUE)
1811 pIndirection = pLookup->addr;
1813 return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1816 GenTreePtr Compiler::impReadyToRunHelperToTree(
1817 CORINFO_RESOLVED_TOKEN* pResolvedToken,
1818 CorInfoHelpFunc helper,
1820 GenTreeArgList* args /* =NULL*/,
1821 CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */)
1823 CORINFO_CONST_LOOKUP lookup;
1824 #if COR_JIT_EE_VERSION > 460
1825 if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1830 info.compCompHnd->getReadyToRunHelper(pResolvedToken, helper, &lookup);
1833 GenTreePtr op1 = gtNewHelperCallNode(helper, type, GTF_EXCEPT, args);
1835 op1->gtCall.setEntryPoint(lookup);
1841 GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1843 GenTreePtr op1 = nullptr;
1845 switch (pCallInfo->kind)
1848 op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1850 #ifdef FEATURE_READYTORUN_COMPILER
1851 if (opts.IsReadyToRun())
1853 op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
1854 op1->gtFptrVal.gtLdftnResolvedToken = new (this, CMK_Unknown) CORINFO_RESOLVED_TOKEN;
1855 *op1->gtFptrVal.gtLdftnResolvedToken = *pResolvedToken;
1859 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1864 case CORINFO_CALL_CODE_POINTER:
1865 if (compIsForInlining())
1867 // Don't import runtime lookups when inlining
1868 // Inlining has to be aborted in such a case
1869 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1873 op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1877 noway_assert(!"unknown call kind");
1884 //------------------------------------------------------------------------
1885 // getRuntimeContextTree: find pointer to context for runtime lookup.
1888 // kind - lookup kind.
1891 // Return GenTree pointer to generic shared context.
1894 // Reports about generic context using.
1896 GenTreePtr Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1898 GenTreePtr ctxTree = nullptr;
1900 // Collectible types requires that for shared generic code, if we use the generic context parameter
1901 // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1902 // context parameter is this that we don't need the eager reporting logic.)
1903 lvaGenericsContextUsed = true;
1905 if (kind == CORINFO_LOOKUP_THISOBJ)
1908 ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1910 // Vtable pointer of this object
1911 ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1912 ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1913 ctxTree->gtFlags |= GTF_IND_INVARIANT;
1917 assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1919 ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1924 /*****************************************************************************/
1925 /* Import a dictionary lookup to access a handle in code shared between
1926 generic instantiations.
1927 The lookup depends on the typeContext which is only available at
1928 runtime, and not at compile-time.
1929 pLookup->token1 and pLookup->token2 specify the handle that is needed.
1932 1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1933 instantiation-specific handle, and the tokens to lookup the handle.
1934 2. pLookup->indirections != CORINFO_USEHELPER :
1935 2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1937 2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1938 If it is non-NULL, it is the handle required. Else, call a helper
1939 to lookup the handle.
1942 GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1943 CORINFO_LOOKUP* pLookup,
1944 void* compileTimeHandle)
1947 // This method can only be called from the importer instance of the Compiler.
1948 // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1949 assert(!compIsForInlining());
1951 GenTreePtr ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1953 #ifdef FEATURE_READYTORUN_COMPILER
1954 if (opts.IsReadyToRun())
1956 return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1957 gtNewArgList(ctxTree), &pLookup->lookupKind);
1961 CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1962 // It's available only via the run-time helper function
1963 if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1965 GenTreeArgList* helperArgs =
1966 gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0,
1967 nullptr, compileTimeHandle));
1969 return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
1973 GenTreePtr slotPtrTree = ctxTree;
1975 if (pRuntimeLookup->testForNull)
1977 slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
1978 nullptr DEBUGARG("impRuntimeLookup slot"));
1981 // Applied repeated indirections
1982 for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
1986 slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
1987 slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
1988 slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
1990 if (pRuntimeLookup->offsets[i] != 0)
1993 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
1997 // No null test required
1998 if (!pRuntimeLookup->testForNull)
2000 if (pRuntimeLookup->indirections == 0)
2005 slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2006 slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2008 if (!pRuntimeLookup->testForFixup)
2013 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2015 GenTreePtr op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2016 nullptr DEBUGARG("impRuntimeLookup test"));
2017 op1 = impImplicitIorI4Cast(op1, TYP_INT); // downcast the pointer to a TYP_INT on 64-bit targets
2019 // Use a GT_AND to check for the lowest bit and indirect if it is set
2020 GenTreePtr testTree = gtNewOperNode(GT_AND, TYP_INT, op1, gtNewIconNode(1));
2021 GenTreePtr relop = gtNewOperNode(GT_EQ, TYP_INT, testTree, gtNewIconNode(0));
2022 relop->gtFlags |= GTF_RELOP_QMARK;
2024 op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2025 nullptr DEBUGARG("impRuntimeLookup indir"));
2026 op1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, op1, gtNewIconNode(-1, TYP_I_IMPL)); // subtract 1 from the pointer
2027 GenTreePtr indirTree = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
2028 GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, slotPtrTree, indirTree);
2030 GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2032 unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark0"));
2033 impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2034 return gtNewLclvNode(tmp, TYP_I_IMPL);
2037 assert(pRuntimeLookup->indirections != 0);
2039 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2041 // Extract the handle
2042 GenTreePtr handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2043 handle->gtFlags |= GTF_IND_NONFAULTING;
2045 GenTreePtr handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2046 nullptr DEBUGARG("impRuntimeLookup typehandle"));
2049 GenTreeArgList* helperArgs =
2050 gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0, nullptr,
2051 compileTimeHandle));
2052 GenTreePtr helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2054 // Check for null and possibly call helper
2055 GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2056 relop->gtFlags |= GTF_RELOP_QMARK;
2058 GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2059 gtNewNothingNode(), // do nothing if nonnull
2062 GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2065 if (handleCopy->IsLocal())
2067 tmp = handleCopy->gtLclVarCommon.gtLclNum;
2071 tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2074 impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2075 return gtNewLclvNode(tmp, TYP_I_IMPL);
2078 /******************************************************************************
2079 * Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2080 * If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2081 * else, grab a new temp.
2082 * For structs (which can be pushed on the stack using obj, etc),
2083 * special handling is needed
2086 struct RecursiveGuard
2091 m_pAddress = nullptr;
2098 *m_pAddress = false;
2102 void Init(bool* pAddress, bool bInitialize)
2104 assert(pAddress && *pAddress == false && "Recursive guard violation");
2105 m_pAddress = pAddress;
2117 bool Compiler::impSpillStackEntry(unsigned level,
2121 bool bAssertOnRecursion,
2128 RecursiveGuard guard;
2129 guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2132 GenTreePtr tree = verCurrentState.esStack[level].val;
2134 /* Allocate a temp if we haven't been asked to use a particular one */
2136 if (tiVerificationNeeded)
2138 // Ignore bad temp requests (they will happen with bad code and will be
2139 // catched when importing the destblock)
2140 if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2147 if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2153 if (tnum == BAD_VAR_NUM)
2155 tnum = lvaGrabTemp(true DEBUGARG(reason));
2157 else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2159 // if verification is needed and tnum's type is incompatible with
2160 // type on that stack, we grab a new temp. This is safe since
2161 // we will throw a verification exception in the dest block.
2163 var_types valTyp = tree->TypeGet();
2164 var_types dstTyp = lvaTable[tnum].TypeGet();
2166 // if the two types are different, we return. This will only happen with bad code and will
2167 // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2168 if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2170 #ifndef _TARGET_64BIT_
2171 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2172 #endif // !_TARGET_64BIT_
2173 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2175 if (verNeedsVerification())
2182 /* Assign the spilled entry to the temp */
2183 impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2185 // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2186 var_types type = genActualType(lvaTable[tnum].TypeGet());
2187 GenTreePtr temp = gtNewLclvNode(tnum, type);
2188 verCurrentState.esStack[level].val = temp;
2193 /*****************************************************************************
2195 * Ensure that the stack has only spilled values
2198 void Compiler::impSpillStackEnsure(bool spillLeaves)
2200 assert(!spillLeaves || opts.compDbgCode);
2202 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2204 GenTreePtr tree = verCurrentState.esStack[level].val;
2206 if (!spillLeaves && tree->OperIsLeaf())
2211 // Temps introduced by the importer itself don't need to be spilled
2213 bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2220 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2224 void Compiler::impSpillEvalStack()
2226 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2228 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2232 /*****************************************************************************
2234 * If the stack contains any trees with side effects in them, assign those
2235 * trees to temps and append the assignments to the statement list.
2236 * On return the stack is guaranteed to be empty.
2239 inline void Compiler::impEvalSideEffects()
2241 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2242 verCurrentState.esStackDepth = 0;
2245 /*****************************************************************************
2247 * If the stack contains any trees with side effects in them, assign those
2248 * trees to temps and replace them on the stack with refs to their temps.
2249 * [0..chkLevel) is the portion of the stack which will be checked and spilled.
2252 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2254 assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2256 /* Before we make any appends to the tree list we must spill the
2257 * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2259 impSpillSpecialSideEff();
2261 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2263 chkLevel = verCurrentState.esStackDepth;
2266 assert(chkLevel <= verCurrentState.esStackDepth);
2268 unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2270 for (unsigned i = 0; i < chkLevel; i++)
2272 GenTreePtr tree = verCurrentState.esStack[i].val;
2274 GenTreePtr lclVarTree;
2276 if ((tree->gtFlags & spillFlags) != 0 ||
2277 (spillGlobEffects && // Only consider the following when spillGlobEffects == TRUE
2278 !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2279 gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2280 // lvAddrTaken flag.
2282 impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2287 /*****************************************************************************
2289 * If the stack contains any trees with special side effects in them, assign
2290 * those trees to temps and replace them on the stack with refs to their temps.
2293 inline void Compiler::impSpillSpecialSideEff()
2295 // Only exception objects need to be carefully handled
2297 if (!compCurBB->bbCatchTyp)
2302 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2304 GenTreePtr tree = verCurrentState.esStack[level].val;
2305 // Make sure if we have an exception object in the sub tree we spill ourselves.
2306 if (gtHasCatchArg(tree))
2308 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2313 /*****************************************************************************
2315 * Spill all stack references to value classes (TYP_STRUCT nodes)
2318 void Compiler::impSpillValueClasses()
2320 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2322 GenTreePtr tree = verCurrentState.esStack[level].val;
2324 if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2326 // Tree walk was aborted, which means that we found a
2327 // value class on the stack. Need to spill that
2330 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2335 /*****************************************************************************
2337 * Callback that checks if a tree node is TYP_STRUCT
2340 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr* pTree, fgWalkData* data)
2342 fgWalkResult walkResult = WALK_CONTINUE;
2344 if ((*pTree)->gtType == TYP_STRUCT)
2346 // Abort the walk and indicate that we found a value class
2348 walkResult = WALK_ABORT;
2354 /*****************************************************************************
2356 * If the stack contains any trees with references to local #lclNum, assign
2357 * those trees to temps and replace their place on the stack with refs to
2361 void Compiler::impSpillLclRefs(ssize_t lclNum)
2363 /* Before we make any appends to the tree list we must spill the
2364 * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2366 impSpillSpecialSideEff();
2368 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2370 GenTreePtr tree = verCurrentState.esStack[level].val;
2372 /* If the tree may throw an exception, and the block has a handler,
2373 then we need to spill assignments to the local if the local is
2374 live on entry to the handler.
2375 Just spill 'em all without considering the liveness */
2377 bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2379 /* Skip the tree if it doesn't have an affected reference,
2380 unless xcptnCaught */
2382 if (xcptnCaught || gtHasRef(tree, lclNum, false))
2384 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2389 /*****************************************************************************
2391 * Push catch arg onto the stack.
2392 * If there are jumps to the beginning of the handler, insert basic block
2393 * and spill catch arg to a temp. Update the handler block if necessary.
2395 * Returns the basic block of the actual handler.
2398 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd)
2400 // Do not inject the basic block twice on reimport. This should be
2401 // hit only under JIT stress. See if the block is the one we injected.
2402 // Note that EH canonicalization can inject internal blocks here. We might
2403 // be able to re-use such a block (but we don't, right now).
2404 if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2405 (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2407 GenTreePtr tree = hndBlk->bbTreeList;
2409 if (tree != nullptr && tree->gtOper == GT_STMT)
2411 tree = tree->gtStmt.gtStmtExpr;
2412 assert(tree != nullptr);
2414 if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2415 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2417 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2419 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2421 return hndBlk->bbNext;
2425 // If we get here, it must have been some other kind of internal block. It's possible that
2426 // someone prepended something to our injected block, but that's unlikely.
2429 /* Push the exception address value on the stack */
2430 GenTreePtr arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2432 /* Mark the node as having a side-effect - i.e. cannot be
2433 * moved around since it is tied to a fixed location (EAX) */
2434 arg->gtFlags |= GTF_ORDER_SIDEEFF;
2436 /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2437 if (hndBlk->bbRefs > 1 || compStressCompile(STRESS_CATCH_ARG, 5))
2439 if (hndBlk->bbRefs == 1)
2444 /* Create extra basic block for the spill */
2445 BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2446 newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2447 newBlk->setBBWeight(hndBlk->bbWeight);
2448 newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2450 /* Account for the new link we are about to create */
2453 /* Spill into a temp */
2454 unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2455 lvaTable[tempNum].lvType = TYP_REF;
2456 arg = gtNewTempAssign(tempNum, arg);
2458 hndBlk->bbStkTempsIn = tempNum;
2460 /* Report the debug info. impImportBlockCode won't treat
2461 * the actual handler as exception block and thus won't do it for us. */
2462 if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2464 impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2465 arg = gtNewStmt(arg, impCurStmtOffs);
2468 fgInsertStmtAtEnd(newBlk, arg);
2470 arg = gtNewLclvNode(tempNum, TYP_REF);
2473 impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2478 /*****************************************************************************
2480 * Given a tree, clone it. *pClone is set to the cloned tree.
2481 * Returns the original tree if the cloning was easy,
2482 * else returns the temp to which the tree had to be spilled to.
2483 * If the tree has side-effects, it will be spilled to a temp.
2486 GenTreePtr Compiler::impCloneExpr(GenTreePtr tree,
2488 CORINFO_CLASS_HANDLE structHnd,
2490 GenTreePtr* pAfterStmt DEBUGARG(const char* reason))
2492 if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2494 GenTreePtr clone = gtClone(tree, true);
2503 /* Store the operand in a temp and return the temp */
2505 unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2507 // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2508 // return a struct type. It also may modify the struct type to a more
2509 // specialized type (e.g. a SIMD type). So we will get the type from
2510 // the lclVar AFTER calling impAssignTempGen().
2512 impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2513 var_types type = genActualType(lvaTable[temp].TypeGet());
2515 *pClone = gtNewLclvNode(temp, type);
2516 return gtNewLclvNode(temp, type);
2519 /*****************************************************************************
2520 * Remember the IL offset (including stack-empty info) for the trees we will
2524 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2526 if (compIsForInlining())
2528 GenTreePtr callStmt = impInlineInfo->iciStmt;
2529 assert(callStmt->gtOper == GT_STMT);
2530 impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2534 assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2535 IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2536 impCurStmtOffs = offs | stkBit;
2540 /*****************************************************************************
2541 * Returns current IL offset with stack-empty and call-instruction info incorporated
2543 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2545 if (compIsForInlining())
2547 return BAD_IL_OFFSET;
2551 assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2552 IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2553 IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2554 return offs | stkBit | callInstructionBit;
2558 /*****************************************************************************
2560 * Remember the instr offset for the statements
2562 * When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2563 * impCurOpcOffs, if the append was done because of a partial stack spill,
2564 * as some of the trees corresponding to code up to impCurOpcOffs might
2565 * still be sitting on the stack.
2566 * So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2567 * This should be called when an opcode finally/explicitly causes
2568 * impAppendTree(tree) to be called (as opposed to being called because of
2569 * a spill caused by the opcode)
2574 void Compiler::impNoteLastILoffs()
2576 if (impLastILoffsStmt == nullptr)
2578 // We should have added a statement for the current basic block
2579 // Is this assert correct ?
2581 assert(impTreeLast);
2582 assert(impTreeLast->gtOper == GT_STMT);
2584 impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2588 impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2589 impLastILoffsStmt = nullptr;
2595 /*****************************************************************************
2596 * We don't create any GenTree (excluding spills) for a branch.
2597 * For debugging info, we need a placeholder so that we can note
2598 * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2601 void Compiler::impNoteBranchOffs()
2603 if (opts.compDbgCode)
2605 impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2609 /*****************************************************************************
2610 * Locate the next stmt boundary for which we need to record info.
2611 * We will have to spill the stack at such boundaries if it is not
2613 * Returns the next stmt boundary (after the start of the block)
2616 unsigned Compiler::impInitBlockLineInfo()
2618 /* Assume the block does not correspond with any IL offset. This prevents
2619 us from reporting extra offsets. Extra mappings can cause confusing
2620 stepping, especially if the extra mapping is a jump-target, and the
2621 debugger does not ignore extra mappings, but instead rewinds to the
2622 nearest known offset */
2624 impCurStmtOffsSet(BAD_IL_OFFSET);
2626 if (compIsForInlining())
2631 IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2633 if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2635 impCurStmtOffsSet(blockOffs);
2638 if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2640 impCurStmtOffsSet(blockOffs);
2643 /* Always report IL offset 0 or some tests get confused.
2644 Probably a good idea anyways */
2648 impCurStmtOffsSet(blockOffs);
2651 if (!info.compStmtOffsetsCount)
2656 /* Find the lowest explicit stmt boundary within the block */
2658 /* Start looking at an entry that is based on our instr offset */
2660 unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2662 if (index >= info.compStmtOffsetsCount)
2664 index = info.compStmtOffsetsCount - 1;
2667 /* If we've guessed too far, back up */
2669 while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2674 /* If we guessed short, advance ahead */
2676 while (info.compStmtOffsets[index] < blockOffs)
2680 if (index == info.compStmtOffsetsCount)
2682 return info.compStmtOffsetsCount;
2686 assert(index < info.compStmtOffsetsCount);
2688 if (info.compStmtOffsets[index] == blockOffs)
2690 /* There is an explicit boundary for the start of this basic block.
2691 So we will start with bbCodeOffs. Else we will wait until we
2692 get to the next explicit boundary */
2694 impCurStmtOffsSet(blockOffs);
2702 /*****************************************************************************/
2704 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2718 /*****************************************************************************/
2720 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2737 /*****************************************************************************/
2739 // One might think it is worth caching these values, but results indicate
2741 // In addition, caching them causes SuperPMI to be unable to completely
2742 // encapsulate an individual method context.
2743 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2745 CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2746 assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2750 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2752 CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2753 assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2754 return typeHandleClass;
2757 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2759 CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2760 assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2761 return argIteratorClass;
2764 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2766 CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2767 assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2771 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2773 CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2774 assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2778 /*****************************************************************************
2779 * "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2780 * set its type to TYP_BYREF when we create it. We know if it can be
2781 * changed to TYP_I_IMPL only at the point where we use it
2785 void Compiler::impBashVarAddrsToI(GenTreePtr tree1, GenTreePtr tree2)
2787 if (tree1->IsVarAddr())
2789 tree1->gtType = TYP_I_IMPL;
2792 if (tree2 && tree2->IsVarAddr())
2794 tree2->gtType = TYP_I_IMPL;
2798 /*****************************************************************************
2799 * TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2800 * to make that an explicit cast in our trees, so any implicit casts that
2801 * exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2802 * turned into explicit casts here.
2803 * We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2806 GenTreePtr Compiler::impImplicitIorI4Cast(GenTreePtr tree, var_types dstTyp)
2808 var_types currType = genActualType(tree->gtType);
2809 var_types wantedType = genActualType(dstTyp);
2811 if (wantedType != currType)
2813 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2814 if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2816 if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2818 tree->gtType = TYP_I_IMPL;
2821 #ifdef _TARGET_64BIT_
2822 else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2824 // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2825 tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
2827 else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2829 // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2830 tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
2832 #endif // _TARGET_64BIT_
2838 /*****************************************************************************
2839 * TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2840 * but we want to make that an explicit cast in our trees, so any implicit casts
2841 * that exist in the IL are turned into explicit casts here.
2844 GenTreePtr Compiler::impImplicitR4orR8Cast(GenTreePtr tree, var_types dstTyp)
2846 #ifndef LEGACY_BACKEND
2847 if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2849 tree = gtNewCastNode(dstTyp, tree, dstTyp);
2851 #endif // !LEGACY_BACKEND
2856 //------------------------------------------------------------------------
2857 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2858 // with a GT_COPYBLK node.
2861 // sig - The InitializeArray signature.
2864 // A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2865 // nullptr otherwise.
2868 // The function recognizes the following IL pattern:
2869 // ldc <length> or a list of ldc <lower bound>/<length>
2872 // ldtoken <field handle>
2873 // call InitializeArray
2874 // The lower bounds need not be constant except when the array rank is 1.
2875 // The function recognizes all kinds of arrays thus enabling a small runtime
2876 // such as CoreRT to skip providing an implementation for InitializeArray.
2878 GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2880 assert(sig->numArgs == 2);
2882 GenTreePtr fieldTokenNode = impStackTop(0).val;
2883 GenTreePtr arrayLocalNode = impStackTop(1).val;
2886 // Verify that the field token is known and valid. Note that It's also
2887 // possible for the token to come from reflection, in which case we cannot do
2888 // the optimization and must therefore revert to calling the helper. You can
2889 // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2892 // Check to see if the ldtoken helper call is what we see here.
2893 if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2894 (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2899 // Strip helper call away
2900 fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2902 if (fieldTokenNode->gtOper == GT_IND)
2904 fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2907 // Check for constant
2908 if (fieldTokenNode->gtOper != GT_CNS_INT)
2913 CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2914 if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2920 // We need to get the number of elements in the array and the size of each element.
2921 // We verify that the newarr statement is exactly what we expect it to be.
2922 // If it's not then we just return NULL and we don't optimize this call
2926 // It is possible the we don't have any statements in the block yet
2928 if (impTreeLast->gtOper != GT_STMT)
2930 assert(impTreeLast->gtOper == GT_BEG_STMTS);
2935 // We start by looking at the last statement, making sure it's an assignment, and
2936 // that the target of the assignment is the array passed to InitializeArray.
2938 GenTreePtr arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
2939 if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
2940 (arrayLocalNode->gtOper != GT_LCL_VAR) ||
2941 (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
2947 // Make sure that the object being assigned is a helper call.
2950 GenTreePtr newArrayCall = arrayAssignment->gtOp.gtOp2;
2951 if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
2957 // Verify that it is one of the new array helpers.
2960 bool isMDArray = false;
2962 if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
2963 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
2964 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
2965 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
2966 #ifdef FEATURE_READYTORUN_COMPILER
2967 && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
2971 #if COR_JIT_EE_VERSION > 460
2972 if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
2981 CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
2984 // Make sure we found a compile time handle to the array
2993 S_UINT32 numElements;
2997 rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3004 GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3005 assert(tokenArg != nullptr);
3006 GenTreeArgList* numArgsArg = tokenArg->Rest();
3007 assert(numArgsArg != nullptr);
3008 GenTreeArgList* argsArg = numArgsArg->Rest();
3009 assert(argsArg != nullptr);
3012 // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3013 // so at least one length must be present and the rank can't exceed 32 so there can
3014 // be at most 64 arguments - 32 lengths and 32 lower bounds.
3017 if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3018 (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3023 unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3024 bool lowerBoundsSpecified;
3026 if (numArgs == rank * 2)
3028 lowerBoundsSpecified = true;
3030 else if (numArgs == rank)
3032 lowerBoundsSpecified = false;
3035 // If the rank is 1 and a lower bound isn't specified then the runtime creates
3036 // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3037 // we get a SDArray as well, see the for loop below.
3051 // The rank is known to be at least 1 so we can start with numElements being 1
3052 // to avoid the need to special case the first dimension.
3055 numElements = S_UINT32(1);
3059 static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3061 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3062 IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3065 static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3067 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3068 (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3069 IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3072 static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3074 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3075 (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3078 static bool IsComma(GenTree* tree)
3080 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3084 unsigned argIndex = 0;
3087 for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3089 if (lowerBoundsSpecified)
3092 // In general lower bounds can be ignored because they're not needed to
3093 // calculate the total number of elements. But for single dimensional arrays
3094 // we need to know if the lower bound is 0 because in this case the runtime
3095 // creates a SDArray and this affects the way the array data offset is calculated.
3100 GenTree* lowerBoundAssign = comma->gtGetOp1();
3101 assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3102 GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3104 if (lowerBoundNode->IsIntegralConst(0))
3110 comma = comma->gtGetOp2();
3114 GenTree* lengthNodeAssign = comma->gtGetOp1();
3115 assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3116 GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3118 if (!lengthNode->IsCnsIntOrI())
3123 numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3127 assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3129 if (argIndex != numArgs)
3137 // Make sure there are exactly two arguments: the array class and
3138 // the number of elements.
3141 GenTreePtr arrayLengthNode;
3143 GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3144 #ifdef FEATURE_READYTORUN_COMPILER
3145 if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3147 // Array length is 1st argument for readytorun helper
3148 arrayLengthNode = args->Current();
3153 // Array length is 2nd argument for regular helper
3154 arrayLengthNode = args->Rest()->Current();
3158 // Make sure that the number of elements look valid.
3160 if (arrayLengthNode->gtOper != GT_CNS_INT)
3165 numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3167 if (!info.compCompHnd->isSDArray(arrayClsHnd))
3173 CORINFO_CLASS_HANDLE elemClsHnd;
3174 var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3177 // Note that genTypeSize will return zero for non primitive types, which is exactly
3178 // what we want (size will then be 0, and we will catch this in the conditional below).
3179 // Note that we don't expect this to fail for valid binaries, so we assert in the
3180 // non-verification case (the verification case should not assert but rather correctly
3181 // handle bad binaries). This assert is not guarding any specific invariant, but rather
3182 // saying that we don't expect this to happen, and if it is hit, we need to investigate
3186 S_UINT32 elemSize(genTypeSize(elementType));
3187 S_UINT32 size = elemSize * S_UINT32(numElements);
3189 if (size.IsOverflow())
3194 if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3196 assert(verNeedsVerification());
3200 void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3207 // At this point we are ready to commit to implementing the InitializeArray
3208 // intrinsic using a struct assignment. Pop the arguments from the stack and
3209 // return the struct assignment node.
3215 const unsigned blkSize = size.Value();
3220 unsigned dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3222 dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3226 dst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewIndexRef(elementType, arrayLocalNode, gtNewIconNode(0)));
3228 GenTreePtr blk = gtNewBlockVal(dst, blkSize);
3229 GenTreePtr srcAddr = gtNewIconHandleNode((size_t)initData, GTF_ICON_STATIC_HDL);
3230 GenTreePtr src = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
3232 return gtNewBlkOpNode(blk, // dst
3239 /*****************************************************************************/
3240 // Returns the GenTree that should be used to do the intrinsic instead of the call.
3241 // Returns NULL if an intrinsic cannot be used
3243 GenTreePtr Compiler::impIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
3244 CORINFO_METHOD_HANDLE method,
3245 CORINFO_SIG_INFO* sig,
3249 CorInfoIntrinsics* pIntrinsicID)
3251 bool mustExpand = false;
3252 #if COR_JIT_EE_VERSION > 460
3253 CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3255 CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method);
3257 *pIntrinsicID = intrinsicID;
3259 #ifndef _TARGET_ARM_
3260 genTreeOps interlockedOperator;
3263 if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3265 // must be done regardless of DbgCode and MinOpts
3266 return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3268 #ifdef _TARGET_64BIT_
3269 if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3271 // must be done regardless of DbgCode and MinOpts
3272 return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3275 assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3278 GenTreePtr retNode = nullptr;
3281 // We disable the inlining of instrinsics for MinOpts.
3283 if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3285 *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3289 // Currently we don't have CORINFO_INTRINSIC_Exp because it does not
3290 // seem to work properly for Infinity values, we don't do
3291 // CORINFO_INTRINSIC_Pow because it needs a Helper which we currently don't have
3293 var_types callType = JITtype2varType(sig->retType);
3295 /* First do the intrinsics which are always smaller than a call */
3297 switch (intrinsicID)
3299 GenTreePtr op1, op2;
3301 case CORINFO_INTRINSIC_Sin:
3302 case CORINFO_INTRINSIC_Sqrt:
3303 case CORINFO_INTRINSIC_Abs:
3304 case CORINFO_INTRINSIC_Cos:
3305 case CORINFO_INTRINSIC_Round:
3306 case CORINFO_INTRINSIC_Cosh:
3307 case CORINFO_INTRINSIC_Sinh:
3308 case CORINFO_INTRINSIC_Tan:
3309 case CORINFO_INTRINSIC_Tanh:
3310 case CORINFO_INTRINSIC_Asin:
3311 case CORINFO_INTRINSIC_Acos:
3312 case CORINFO_INTRINSIC_Atan:
3313 case CORINFO_INTRINSIC_Atan2:
3314 case CORINFO_INTRINSIC_Log10:
3315 case CORINFO_INTRINSIC_Pow:
3316 case CORINFO_INTRINSIC_Exp:
3317 case CORINFO_INTRINSIC_Ceiling:
3318 case CORINFO_INTRINSIC_Floor:
3320 // These are math intrinsics
3322 assert(callType != TYP_STRUCT);
3326 #if defined(LEGACY_BACKEND)
3327 if (IsTargetIntrinsic(intrinsicID))
3328 #elif !defined(_TARGET_X86_)
3329 // Intrinsics that are not implemented directly by target instructions will
3330 // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3331 // don't do this optimization, because
3332 // a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3333 // b) It will be non-trivial task or too late to re-materialize a surviving
3334 // tail prefixed GT_INTRINSIC as tail call in rationalizer.
3335 if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3337 // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3338 // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3339 // code generation for certain EH constructs.
3340 if (!IsIntrinsicImplementedByUserCall(intrinsicID))
3343 switch (sig->numArgs)
3346 op1 = impPopStack().val;
3348 #if FEATURE_X87_DOUBLES
3350 // X87 stack doesn't differentiate between float/double
3351 // so it doesn't need a cast, but everybody else does
3352 // Just double check it is at least a FP type
3353 noway_assert(varTypeIsFloating(op1));
3355 #else // FEATURE_X87_DOUBLES
3357 if (op1->TypeGet() != callType)
3359 op1 = gtNewCastNode(callType, op1, callType);
3362 #endif // FEATURE_X87_DOUBLES
3364 op1 = new (this, GT_INTRINSIC)
3365 GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3369 op2 = impPopStack().val;
3370 op1 = impPopStack().val;
3372 #if FEATURE_X87_DOUBLES
3374 // X87 stack doesn't differentiate between float/double
3375 // so it doesn't need a cast, but everybody else does
3376 // Just double check it is at least a FP type
3377 noway_assert(varTypeIsFloating(op2));
3378 noway_assert(varTypeIsFloating(op1));
3380 #else // FEATURE_X87_DOUBLES
3382 if (op2->TypeGet() != callType)
3384 op2 = gtNewCastNode(callType, op2, callType);
3386 if (op1->TypeGet() != callType)
3388 op1 = gtNewCastNode(callType, op1, callType);
3391 #endif // FEATURE_X87_DOUBLES
3393 op1 = new (this, GT_INTRINSIC)
3394 GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
3398 NO_WAY("Unsupported number of args for Math Instrinsic");
3401 #ifndef LEGACY_BACKEND
3402 if (IsIntrinsicImplementedByUserCall(intrinsicID))
3404 op1->gtFlags |= GTF_CALL;
3412 #ifdef _TARGET_XARCH_
3413 // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3414 case CORINFO_INTRINSIC_InterlockedAdd32:
3415 interlockedOperator = GT_LOCKADD;
3416 goto InterlockedBinOpCommon;
3417 case CORINFO_INTRINSIC_InterlockedXAdd32:
3418 interlockedOperator = GT_XADD;
3419 goto InterlockedBinOpCommon;
3420 case CORINFO_INTRINSIC_InterlockedXchg32:
3421 interlockedOperator = GT_XCHG;
3422 goto InterlockedBinOpCommon;
3424 #ifdef _TARGET_AMD64_
3425 case CORINFO_INTRINSIC_InterlockedAdd64:
3426 interlockedOperator = GT_LOCKADD;
3427 goto InterlockedBinOpCommon;
3428 case CORINFO_INTRINSIC_InterlockedXAdd64:
3429 interlockedOperator = GT_XADD;
3430 goto InterlockedBinOpCommon;
3431 case CORINFO_INTRINSIC_InterlockedXchg64:
3432 interlockedOperator = GT_XCHG;
3433 goto InterlockedBinOpCommon;
3434 #endif // _TARGET_AMD64_
3436 InterlockedBinOpCommon:
3437 assert(callType != TYP_STRUCT);
3438 assert(sig->numArgs == 2);
3440 op2 = impPopStack().val;
3441 op1 = impPopStack().val;
3447 // field (for example)
3449 // In the case where the first argument is the address of a local, we might
3450 // want to make this *not* make the var address-taken -- but atomic instructions
3451 // on a local are probably pretty useless anyway, so we probably don't care.
3453 op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3454 op1->gtFlags |= GTF_GLOB_EFFECT;
3457 #endif // _TARGET_XARCH_
3459 case CORINFO_INTRINSIC_MemoryBarrier:
3461 assert(sig->numArgs == 0);
3463 op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3464 op1->gtFlags |= GTF_GLOB_EFFECT;
3468 #ifdef _TARGET_XARCH_
3469 // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3470 case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3471 #ifdef _TARGET_AMD64_
3472 case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3475 assert(callType != TYP_STRUCT);
3476 assert(sig->numArgs == 3);
3479 op3 = impPopStack().val; // comparand
3480 op2 = impPopStack().val; // value
3481 op1 = impPopStack().val; // location
3483 GenTreePtr node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3485 node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3491 case CORINFO_INTRINSIC_StringLength:
3492 op1 = impPopStack().val;
3493 if (!opts.MinOpts() && !opts.compDbgCode)
3495 GenTreeArrLen* arrLen =
3496 new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3501 /* Create the expression "*(str_addr + stringLengthOffset)" */
3502 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3503 gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3504 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3509 case CORINFO_INTRINSIC_StringGetChar:
3510 op2 = impPopStack().val;
3511 op1 = impPopStack().val;
3512 op1 = gtNewIndexRef(TYP_CHAR, op1, op2);
3513 op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3517 case CORINFO_INTRINSIC_InitializeArray:
3518 retNode = impInitializeArrayIntrinsic(sig);
3521 case CORINFO_INTRINSIC_Array_Address:
3522 case CORINFO_INTRINSIC_Array_Get:
3523 case CORINFO_INTRINSIC_Array_Set:
3524 retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3527 case CORINFO_INTRINSIC_GetTypeFromHandle:
3528 op1 = impStackTop(0).val;
3529 if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3530 gtIsTypeHandleToRuntimeTypeHelper(op1))
3532 op1 = impPopStack().val;
3533 // Change call to return RuntimeType directly.
3534 op1->gtType = TYP_REF;
3537 // Call the regular function.
3540 case CORINFO_INTRINSIC_RTH_GetValueInternal:
3541 op1 = impStackTop(0).val;
3542 if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3543 gtIsTypeHandleToRuntimeTypeHelper(op1))
3546 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3549 // TreeToGetNativeTypeHandle
3551 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3554 op1 = impPopStack().val;
3556 // Get native TypeHandle argument to old helper
3557 op1 = op1->gtCall.gtCallArgs;
3558 assert(op1->OperIsList());
3559 assert(op1->gtOp.gtOp2 == nullptr);
3560 op1 = op1->gtOp.gtOp1;
3563 // Call the regular function.
3566 #ifndef LEGACY_BACKEND
3567 case CORINFO_INTRINSIC_Object_GetType:
3569 op1 = impPopStack().val;
3570 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3572 // Set the CALL flag to indicate that the operator is implemented by a call.
3573 // Set also the EXCEPTION flag because the native implementation of
3574 // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3575 op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3581 /* Unknown intrinsic */
3587 if (retNode == nullptr)
3589 NO_WAY("JIT must expand the intrinsic!");
3596 /*****************************************************************************/
3598 GenTreePtr Compiler::impArrayAccessIntrinsic(
3599 CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
3601 /* If we are generating SMALL_CODE, we don't want to use intrinsics for
3602 the following, as it generates fatter code.
3605 if (compCodeOpt() == SMALL_CODE)
3610 /* These intrinsics generate fatter (but faster) code and are only
3611 done if we don't need SMALL_CODE */
3613 unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
3615 // The rank 1 case is special because it has to handle two array formats
3616 // we will simply not do that case
3617 if (rank > GT_ARR_MAX_RANK || rank <= 1)
3622 CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
3623 var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
3625 // For the ref case, we will only be able to inline if the types match
3626 // (verifier checks for this, we don't care for the nonverified case and the
3627 // type is final (so we don't need to do the cast)
3628 if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
3630 // Get the call site signature
3631 CORINFO_SIG_INFO LocalSig;
3632 eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
3633 assert(LocalSig.hasThis());
3635 CORINFO_CLASS_HANDLE actualElemClsHnd;
3637 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3639 // Fetch the last argument, the one that indicates the type we are setting.
3640 CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
3641 for (unsigned r = 0; r < rank; r++)
3643 argType = info.compCompHnd->getArgNext(argType);
3646 typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
3647 actualElemClsHnd = argInfo.GetClassHandle();
3651 assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
3653 // Fetch the return type
3654 typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
3655 assert(retInfo.IsByRef());
3656 actualElemClsHnd = retInfo.GetClassHandle();
3659 // if it's not final, we can't do the optimization
3660 if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
3666 unsigned arrayElemSize;
3667 if (elemType == TYP_STRUCT)
3669 assert(arrElemClsHnd);
3671 arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
3675 arrayElemSize = genTypeSize(elemType);
3678 if ((unsigned char)arrayElemSize != arrayElemSize)
3680 // arrayElemSize would be truncated as an unsigned char.
3681 // This means the array element is too large. Don't do the optimization.
3685 GenTreePtr val = nullptr;
3687 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3689 // Assignment of a struct is more work, and there are more gets than sets.
3690 if (elemType == TYP_STRUCT)
3695 val = impPopStack().val;
3696 assert(genActualType(elemType) == genActualType(val->gtType) ||
3697 (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
3698 (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
3699 (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
3702 noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
3704 GenTreePtr inds[GT_ARR_MAX_RANK];
3705 for (unsigned k = rank; k > 0; k--)
3707 inds[k - 1] = impPopStack().val;
3710 GenTreePtr arr = impPopStack().val;
3711 assert(arr->gtType == TYP_REF);
3713 GenTreePtr arrElem =
3714 new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
3715 static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
3717 if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
3719 arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
3722 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3724 assert(val != nullptr);
3725 return gtNewAssignNode(arrElem, val);
3733 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
3737 // do some basic checks first
3738 if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
3743 if (verCurrentState.esStackDepth > 0)
3745 // merge stack types
3746 StackEntry* parentStack = block->bbStackOnEntry();
3747 StackEntry* childStack = verCurrentState.esStack;
3749 for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
3751 if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
3758 // merge initialization status of this ptr
3760 if (verTrackObjCtorInitState)
3762 // If we're tracking the CtorInitState, then it must not be unknown in the current state.
3763 assert(verCurrentState.thisInitialized != TIS_Bottom);
3765 // If the successor block's thisInit state is unknown, copy it from the current state.
3766 if (block->bbThisOnEntry() == TIS_Bottom)
3769 verSetThisInit(block, verCurrentState.thisInitialized);
3771 else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
3773 if (block->bbThisOnEntry() != TIS_Top)
3776 verSetThisInit(block, TIS_Top);
3778 if (block->bbFlags & BBF_FAILED_VERIFICATION)
3780 // The block is bad. Control can flow through the block to any handler that catches the
3781 // verification exception, but the importer ignores bad blocks and therefore won't model
3782 // this flow in the normal way. To complete the merge into the bad block, the new state
3783 // needs to be manually pushed to the handlers that may be reached after the verification
3784 // exception occurs.
3786 // Usually, the new state was already propagated to the relevant handlers while processing
3787 // the predecessors of the bad block. The exception is when the bad block is at the start
3788 // of a try region, meaning it is protected by additional handlers that do not protect its
3791 if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
3793 // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
3794 // recursive calls back into this code path (if successors of the current bad block are
3795 // also bad blocks).
3797 ThisInitState origTIS = verCurrentState.thisInitialized;
3798 verCurrentState.thisInitialized = TIS_Top;
3799 impVerifyEHBlock(block, true);
3800 verCurrentState.thisInitialized = origTIS;
3808 assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
3814 /*****************************************************************************
3815 * 'logMsg' is true if a log message needs to be logged. false if the caller has
3816 * already logged it (presumably in a more detailed fashion than done here)
3817 * 'bVerificationException' is true for a verification exception, false for a
3818 * "call unauthorized by host" exception.
3821 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
3823 block->bbJumpKind = BBJ_THROW;
3824 block->bbFlags |= BBF_FAILED_VERIFICATION;
3826 impCurStmtOffsSet(block->bbCodeOffs);
3829 // we need this since BeginTreeList asserts otherwise
3830 impTreeList = impTreeLast = nullptr;
3831 block->bbFlags &= ~BBF_IMPORTED;
3835 JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
3836 block->bbCodeOffs, block->bbCodeOffsEnd));
3839 printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
3843 if (JitConfig.DebugBreakOnVerificationFailure())
3851 // if the stack is non-empty evaluate all the side-effects
3852 if (verCurrentState.esStackDepth > 0)
3854 impEvalSideEffects();
3856 assert(verCurrentState.esStackDepth == 0);
3858 GenTreePtr op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, GTF_EXCEPT,
3859 gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
3860 // verCurrentState.esStackDepth = 0;
3861 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
3863 // The inliner is not able to handle methods that require throw block, so
3864 // make sure this methods never gets inlined.
3865 info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
3868 /*****************************************************************************
3871 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
3874 // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
3875 // slightly different mechanism in which it calls the JIT to perform IL verification:
3876 // in the case of transparent methods the VM calls for a predicate IsVerifiable()
3877 // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
3878 // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
3879 // it bubble up until reported by the runtime. Currently in RyuJIT, this method doesn't bubble
3880 // up the exception, instead it embeds a throw inside the offending basic block and lets this
3881 // to fail upon runtime of the jitted method.
3883 // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
3884 // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
3885 // just try to find out whether to fail this method before even actually jitting it. So, in case
3886 // we detect these two conditions, instead of generating a throw statement inside the offending
3887 // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
3888 // to return false and make RyuJIT behave the same way JIT64 does.
3890 // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
3891 // RyuJIT for the time being until we completely replace JIT64.
3892 // TODO-ARM64-Cleanup: We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
3894 // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
3895 // exception if we are only importing and verifying. The method verNeedsVerification() can also modify the
3896 // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
3897 // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
3898 // be turned off during importation).
3899 CLANG_FORMAT_COMMENT_ANCHOR;
3901 #ifdef _TARGET_64BIT_
3904 bool canSkipVerificationResult =
3905 info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
3906 assert(tiVerificationNeeded || canSkipVerificationResult);
3909 // Add the non verifiable flag to the compiler
3910 if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
3912 tiIsVerifiableCode = FALSE;
3914 #endif //_TARGET_64BIT_
3915 verResetCurrentState(block, &verCurrentState);
3916 verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
3919 impNoteLastILoffs(); // Remember at which BC offset the tree was finished
3923 /******************************************************************************/
3924 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
3926 assert(ciType < CORINFO_TYPE_COUNT);
3931 case CORINFO_TYPE_STRING:
3932 case CORINFO_TYPE_CLASS:
3933 tiResult = verMakeTypeInfo(clsHnd);
3934 if (!tiResult.IsType(TI_REF))
3935 { // type must be consistent with element type
3940 #ifdef _TARGET_64BIT_
3941 case CORINFO_TYPE_NATIVEINT:
3942 case CORINFO_TYPE_NATIVEUINT:
3945 // If we have more precise information, use it
3946 return verMakeTypeInfo(clsHnd);
3950 return typeInfo::nativeInt();
3953 #endif // _TARGET_64BIT_
3955 case CORINFO_TYPE_VALUECLASS:
3956 case CORINFO_TYPE_REFANY:
3957 tiResult = verMakeTypeInfo(clsHnd);
3958 // type must be constant with element type;
3959 if (!tiResult.IsValueClass())
3964 case CORINFO_TYPE_VAR:
3965 return verMakeTypeInfo(clsHnd);
3967 case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
3968 case CORINFO_TYPE_VOID:
3972 case CORINFO_TYPE_BYREF:
3974 CORINFO_CLASS_HANDLE childClassHandle;
3975 CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
3976 return ByRef(verMakeTypeInfo(childType, childClassHandle));
3982 { // If we have more precise information, use it
3983 return typeInfo(TI_STRUCT, clsHnd);
3987 return typeInfo(JITtype2tiType(ciType));
3993 /******************************************************************************/
3995 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
3997 if (clsHnd == nullptr)
4002 // Byrefs should only occur in method and local signatures, which are accessed
4003 // using ICorClassInfo and ICorClassInfo.getChildType.
4004 // So findClass() and getClassAttribs() should not be called for byrefs
4006 if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4008 assert(!"Did findClass() return a Byref?");
4012 unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4014 if (attribs & CORINFO_FLG_VALUECLASS)
4016 CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4018 // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4019 // not occur here, so we may want to change this to an assert instead.
4020 if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4025 #ifdef _TARGET_64BIT_
4026 if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4028 return typeInfo::nativeInt();
4030 #endif // _TARGET_64BIT_
4032 if (t != CORINFO_TYPE_UNDEF)
4034 return (typeInfo(JITtype2tiType(t)));
4036 else if (bashStructToRef)
4038 return (typeInfo(TI_REF, clsHnd));
4042 return (typeInfo(TI_STRUCT, clsHnd));
4045 else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4047 // See comment in _typeInfo.h for why we do it this way.
4048 return (typeInfo(TI_REF, clsHnd, true));
4052 return (typeInfo(TI_REF, clsHnd));
4056 /******************************************************************************/
4057 BOOL Compiler::verIsSDArray(typeInfo ti)
4059 if (ti.IsNullObjRef())
4060 { // nulls are SD arrays
4064 if (!ti.IsType(TI_REF))
4069 if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4076 /******************************************************************************/
4077 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4078 /* Returns an error type if anything goes wrong */
4080 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4082 assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4084 if (!verIsSDArray(arrayObjectType))
4089 CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4090 CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4092 return verMakeTypeInfo(ciType, childClassHandle);
4095 /*****************************************************************************
4097 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4099 CORINFO_CLASS_HANDLE classHandle;
4100 CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4102 var_types type = JITtype2varType(ciType);
4103 if (varTypeIsGC(type))
4105 // For efficiency, getArgType only returns something in classHandle for
4106 // value types. For other types that have addition type info, you
4107 // have to call back explicitly
4108 classHandle = info.compCompHnd->getArgClass(sig, args);
4111 NO_WAY("Could not figure out Class specified in argument or local signature");
4115 return verMakeTypeInfo(ciType, classHandle);
4118 /*****************************************************************************/
4120 // This does the expensive check to figure out whether the method
4121 // needs to be verified. It is called only when we fail verification,
4122 // just before throwing the verification exception.
4124 BOOL Compiler::verNeedsVerification()
4126 // If we have previously determined that verification is NOT needed
4127 // (for example in Compiler::compCompile), that means verification is really not needed.
4128 // Return the same decision we made before.
4129 // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4131 if (!tiVerificationNeeded)
4133 return tiVerificationNeeded;
4136 assert(tiVerificationNeeded);
4138 // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4139 // obtain the answer.
4140 CorInfoCanSkipVerificationResult canSkipVerificationResult =
4141 info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4143 // canSkipVerification will return one of the following three values:
4144 // CORINFO_VERIFICATION_CANNOT_SKIP = 0, // Cannot skip verification during jit time.
4145 // CORINFO_VERIFICATION_CAN_SKIP = 1, // Can skip verification during jit time.
4146 // CORINFO_VERIFICATION_RUNTIME_CHECK = 2, // Skip verification during jit time,
4147 // but need to insert a callout to the VM to ask during runtime
4148 // whether to skip verification or not.
4150 // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4151 if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4153 tiRuntimeCalloutNeeded = true;
4156 if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4158 // Dev10 706080 - Testers don't like the assert, so just silence it
4159 // by not using the macros that invoke debugAssert.
4163 // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4164 // The following line means we will NOT do jit time verification if canSkipVerification
4165 // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4166 tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4167 return tiVerificationNeeded;
4170 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4176 if (!ti.IsType(TI_STRUCT))
4180 return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4183 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4185 if (ti.IsPermanentHomeByRef())
4195 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4197 return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4198 || ti.IsUnboxedGenericTypeVar() ||
4199 (ti.IsType(TI_STRUCT) &&
4200 // exclude byreflike structs
4201 !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4204 // Is it a boxed value type?
4205 bool Compiler::verIsBoxedValueType(typeInfo ti)
4207 if (ti.GetType() == TI_REF)
4209 CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4210 return !!eeIsValueClass(clsHnd);
4218 /*****************************************************************************
4220 * Check if a TailCall is legal.
4223 bool Compiler::verCheckTailCallConstraint(
4225 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4226 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4227 bool speculative // If true, won't throw if verificatoin fails. Instead it will
4228 // return false to the caller.
4229 // If false, it will throw.
4233 CORINFO_SIG_INFO sig;
4234 unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
4235 // this counter is used to keep track of how many items have been
4238 CORINFO_METHOD_HANDLE methodHnd = nullptr;
4239 CORINFO_CLASS_HANDLE methodClassHnd = nullptr;
4240 unsigned methodClassFlgs = 0;
4242 assert(impOpcodeIsCallOpcode(opcode));
4244 if (compIsForInlining())
4249 // for calli, VerifyOrReturn that this is not a virtual method
4250 if (opcode == CEE_CALLI)
4252 /* Get the call sig */
4253 eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4255 // We don't know the target method, so we have to infer the flags, or
4256 // assume the worst-case.
4257 mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4261 methodHnd = pResolvedToken->hMethod;
4263 mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4265 // When verifying generic code we pair the method handle with its
4266 // owning class to get the exact method signature.
4267 methodClassHnd = pResolvedToken->hClass;
4268 assert(methodClassHnd);
4270 eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4272 // opcode specific check
4273 methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4276 // We must have got the methodClassHnd if opcode is not CEE_CALLI
4277 assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4279 if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4281 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4284 // check compatibility of the arguments
4285 unsigned int argCount;
4286 argCount = sig.numArgs;
4287 CORINFO_ARG_LIST_HANDLE args;
4291 typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4293 // check that the argument is not a byref for tailcalls
4294 VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4296 // For unsafe code, we might have parameters containing pointer to the stack location.
4297 // Disallow the tailcall for this kind.
4298 CORINFO_CLASS_HANDLE classHandle;
4299 CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4300 VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4302 args = info.compCompHnd->getArgNext(args);
4306 popCount += sig.numArgs;
4308 // check for 'this' which is on non-static methods, not called via NEWOBJ
4309 if (!(mflags & CORINFO_FLG_STATIC))
4311 // Always update the popCount.
4312 // This is crucial for the stack calculation to be correct.
4313 typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4316 if (opcode == CEE_CALLI)
4318 // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4320 if (tiThis.IsValueClass())
4324 VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4328 // Check type compatibility of the this argument
4329 typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4330 if (tiDeclaredThis.IsValueClass())
4332 tiDeclaredThis.MakeByRef();
4335 VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4339 // Tail calls on constrained calls should be illegal too:
4340 // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4341 VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4343 // Get the exact view of the signature for an array method
4344 if (sig.retType != CORINFO_TYPE_VOID)
4346 if (methodClassFlgs & CORINFO_FLG_ARRAY)
4348 assert(opcode != CEE_CALLI);
4349 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4353 typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4354 typeInfo tiCallerRetType =
4355 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4357 // void return type gets morphed into the error type, so we have to treat them specially here
4358 if (sig.retType == CORINFO_TYPE_VOID)
4360 VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4365 VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4366 NormaliseForStack(tiCallerRetType), true),
4367 "tailcall return mismatch", speculative);
4370 // for tailcall, stack must be empty
4371 VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4373 return true; // Yes, tailcall is legal
4376 /*****************************************************************************
4378 * Checks the IL verification rules for the call
4381 void Compiler::verVerifyCall(OPCODE opcode,
4382 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4383 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4386 const BYTE* delegateCreateStart,
4387 const BYTE* codeAddr,
4388 CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4391 CORINFO_SIG_INFO* sig = nullptr;
4392 unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
4393 // this counter is used to keep track of how many items have been
4396 // for calli, VerifyOrReturn that this is not a virtual method
4397 if (opcode == CEE_CALLI)
4399 Verify(false, "Calli not verifiable");
4403 //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4404 mflags = callInfo->verMethodFlags;
4406 sig = &callInfo->verSig;
4408 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4410 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4413 // opcode specific check
4414 unsigned methodClassFlgs = callInfo->classFlags;
4418 // cannot do callvirt on valuetypes
4419 VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4420 VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4425 assert(!tailCall); // Importer should not allow this
4426 VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4427 "newobj must be on instance");
4429 if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4431 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4432 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4433 typeInfo tiDeclaredFtn =
4434 verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4435 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4437 assert(popCount == 0);
4438 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4439 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4441 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4442 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4443 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4444 "delegate object type mismatch");
4446 CORINFO_CLASS_HANDLE objTypeHandle =
4447 tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4449 // the method signature must be compatible with the delegate's invoke method
4451 // check that for virtual functions, the type of the object used to get the
4452 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4453 // since this is a bit of work to determine in general, we pattern match stylized
4456 // the delegate creation code check, which used to be done later, is now done here
4457 // so we can read delegateMethodRef directly from
4458 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
4459 // we then use it in our call to isCompatibleDelegate().
4461 mdMemberRef delegateMethodRef = mdMemberRefNil;
4462 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
4463 "must create delegates with certain IL");
4465 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
4466 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
4467 delegateResolvedToken.tokenScope = info.compScopeHnd;
4468 delegateResolvedToken.token = delegateMethodRef;
4469 delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method;
4470 info.compCompHnd->resolveToken(&delegateResolvedToken);
4472 CORINFO_CALL_INFO delegateCallInfo;
4473 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
4474 addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
4476 BOOL isOpenDelegate = FALSE;
4477 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
4478 tiActualFtn.GetMethod(), pResolvedToken->hClass,
4480 "function incompatible with delegate");
4482 // check the constraints on the target method
4483 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
4484 "delegate target has unsatisfied class constraints");
4485 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
4486 tiActualFtn.GetMethod()),
4487 "delegate target has unsatisfied method constraints");
4489 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
4490 // for additional verification rules for delegates
4491 CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod();
4492 DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
4493 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4496 if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
4498 && StrictCheckForNonVirtualCallToVirtualMethod()
4502 if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4504 VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
4505 verIsBoxedValueType(tiActualObj),
4506 "The 'this' parameter to the call must be either the calling method's "
4507 "'this' parameter or "
4508 "a boxed value type.");
4513 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
4515 BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
4517 Verify(targetIsStatic || !isOpenDelegate,
4518 "Unverifiable creation of an open instance delegate for a protected member.");
4520 CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
4522 : tiActualObj.GetClassHandleForObjRef();
4524 // In the case of protected methods, it is a requirement that the 'this'
4525 // pointer be a subclass of the current context. Perform this check.
4526 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4527 "Accessing protected method through wrong type.");
4532 // fall thru to default checks
4534 VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
4536 VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
4537 "can only newobj a delegate constructor");
4539 // check compatibility of the arguments
4540 unsigned int argCount;
4541 argCount = sig->numArgs;
4542 CORINFO_ARG_LIST_HANDLE args;
4546 typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
4548 typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
4549 VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
4551 args = info.compCompHnd->getArgNext(args);
4557 popCount += sig->numArgs;
4559 // check for 'this' which are is non-static methods, not called via NEWOBJ
4560 CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
4561 if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
4563 typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4566 // If it is null, we assume we can access it (since it will AV shortly)
4567 // If it is anything but a reference class, there is no hierarchy, so
4568 // again, we don't need the precise instance class to compute 'protected' access
4569 if (tiThis.IsType(TI_REF))
4571 instanceClassHnd = tiThis.GetClassHandleForObjRef();
4574 // Check type compatibility of the this argument
4575 typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
4576 if (tiDeclaredThis.IsValueClass())
4578 tiDeclaredThis.MakeByRef();
4581 // If this is a call to the base class .ctor, set thisPtr Init for
4583 if (mflags & CORINFO_FLG_CONSTRUCTOR)
4585 if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
4586 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
4588 assert(verCurrentState.thisInitialized !=
4589 TIS_Bottom); // This should never be the case just from the logic of the verifier.
4590 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
4591 "Call to base class constructor when 'this' is possibly initialized");
4592 // Otherwise, 'this' is now initialized.
4593 verCurrentState.thisInitialized = TIS_Init;
4594 tiThis.SetInitialisedObjRef();
4598 // We allow direct calls to value type constructors
4599 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
4600 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
4601 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
4602 "Bad call to a constructor");
4606 if (pConstrainedResolvedToken != nullptr)
4608 VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
4610 typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
4612 // We just dereference this and test for equality
4613 tiThis.DereferenceByRef();
4614 VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
4615 "this type mismatch with constrained type operand");
4617 // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
4618 tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
4621 // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
4622 if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
4624 tiDeclaredThis.SetIsReadonlyByRef();
4627 VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
4629 if (tiThis.IsByRef())
4631 // Find the actual type where the method exists (as opposed to what is declared
4632 // in the metadata). This is to prevent passing a byref as the "this" argument
4633 // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
4635 CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
4636 VerifyOrReturn(eeIsValueClass(actualClassHnd),
4637 "Call to base type of valuetype (which is never a valuetype)");
4640 // Rules for non-virtual call to a non-final virtual method:
4643 // The "this" pointer is considered to be "possibly written" if
4644 // 1. Its address have been taken (LDARGA 0) anywhere in the method.
4646 // 2. It has been stored to (STARG.0) anywhere in the method.
4648 // A non-virtual call to a non-final virtual method is only allowed if
4649 // 1. The this pointer passed to the callee is an instance of a boxed value type.
4651 // 2. The this pointer passed to the callee is the current method's this pointer.
4652 // (and) The current method's this pointer is not "possibly written".
4654 // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
4655 // virtual methods. (Luckily this does affect .ctors, since they are not virtual).
4656 // This is stronger that is strictly needed, but implementing a laxer rule is significantly
4657 // hard and more error prone.
4659 if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
4661 && StrictCheckForNonVirtualCallToVirtualMethod()
4665 if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4668 tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
4669 "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
4670 "a boxed value type.");
4675 // check any constraints on the callee's class and type parameters
4676 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
4677 "method has unsatisfied class constraints");
4678 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
4679 "method has unsatisfied method constraints");
4681 if (mflags & CORINFO_FLG_PROTECTED)
4683 VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4684 "Can't access protected method");
4687 // Get the exact view of the signature for an array method
4688 if (sig->retType != CORINFO_TYPE_VOID)
4690 eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
4693 // "readonly." prefixed calls only allowed for the Address operation on arrays.
4694 // The methods supported by array types are under the control of the EE
4695 // so we can trust that only the Address operation returns a byref.
4698 typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
4699 VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
4700 "unexpected use of readonly prefix");
4703 // Verify the tailcall
4706 verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
4710 /*****************************************************************************
4711 * Checks that a delegate creation is done using the following pattern:
4713 * ldvirtftn targetMemberRef
4715 * ldftn targetMemberRef
4717 * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
4718 * not in this basic block)
4720 * targetMemberRef is read from the code sequence.
4721 * targetMemberRef is validated iff verificationNeeded.
4724 BOOL Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart,
4725 const BYTE* codeAddr,
4726 mdMemberRef& targetMemberRef)
4728 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4730 targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
4733 else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
4735 targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
4742 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
4744 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
4745 typeInfo ptrVal = verVerifyLDIND(tiTo, instrType);
4746 typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
4747 if (!tiCompatibleWith(value, normPtrVal, true))
4749 Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
4750 compUnsafeCastUsed = true;
4755 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
4757 assert(!instrType.IsStruct());
4762 ptrVal = DereferenceByRef(ptr);
4763 if (instrType.IsObjRef() && !ptrVal.IsObjRef())
4765 Verify(false, "bad pointer");
4766 compUnsafeCastUsed = true;
4768 else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
4770 Verify(false, "pointer not consistent with instr");
4771 compUnsafeCastUsed = true;
4776 Verify(false, "pointer not byref");
4777 compUnsafeCastUsed = true;
4783 // Verify that the field is used properly. 'tiThis' is NULL for statics,
4784 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
4785 // ld*flda or a st*fld.
4786 // 'enclosingClass' is given if we are accessing a field in some specific type.
4788 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken,
4789 const CORINFO_FIELD_INFO& fieldInfo,
4790 const typeInfo* tiThis,
4792 BOOL allowPlainStructAsThis)
4794 CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
4795 unsigned fieldFlags = fieldInfo.fieldFlags;
4796 CORINFO_CLASS_HANDLE instanceClass =
4797 info.compClassHnd; // for statics, we imagine the instance is the current class.
4799 bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
4802 Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
4803 if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
4805 Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
4806 info.compIsStatic == isStaticField,
4807 "bad use of initonly field (set or address taken)");
4811 if (tiThis == nullptr)
4813 Verify(isStaticField, "used static opcode with non-static field");
4817 typeInfo tThis = *tiThis;
4819 if (allowPlainStructAsThis && tThis.IsValueClass())
4824 // If it is null, we assume we can access it (since it will AV shortly)
4825 // If it is anything but a refernce class, there is no hierarchy, so
4826 // again, we don't need the precise instance class to compute 'protected' access
4827 if (tiThis->IsType(TI_REF))
4829 instanceClass = tiThis->GetClassHandleForObjRef();
4832 // Note that even if the field is static, we require that the this pointer
4833 // satisfy the same constraints as a non-static field This happens to
4834 // be simpler and seems reasonable
4835 typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
4836 if (tiDeclaredThis.IsValueClass())
4838 tiDeclaredThis.MakeByRef();
4840 // we allow read-only tThis, on any field access (even stores!), because if the
4841 // class implementor wants to prohibit stores he should make the field private.
4842 // we do this by setting the read-only bit on the type we compare tThis to.
4843 tiDeclaredThis.SetIsReadonlyByRef();
4845 else if (verTrackObjCtorInitState && tThis.IsThisPtr())
4847 // Any field access is legal on "uninitialized" this pointers.
4848 // The easiest way to implement this is to simply set the
4849 // initialized bit for the duration of the type check on the
4850 // field access only. It does not change the state of the "this"
4851 // for the function as a whole. Note that the "tThis" is a copy
4852 // of the original "this" type (*tiThis) passed in.
4853 tThis.SetInitialisedObjRef();
4856 Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
4859 // Presently the JIT does not check that we don't store or take the address of init-only fields
4860 // since we cannot guarantee their immutability and it is not a security issue.
4862 // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
4863 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
4864 "field has unsatisfied class constraints");
4865 if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
4867 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
4868 "Accessing protected method through wrong type.");
4872 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
4874 if (tiOp1.IsNumberType())
4876 #ifdef _TARGET_64BIT_
4877 Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
4878 #else // _TARGET_64BIT
4879 // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
4880 // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
4881 // but compatible, since we can coalesce native int with int32 (see section III.1.5).
4882 Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
4883 #endif // !_TARGET_64BIT_
4885 else if (tiOp1.IsObjRef())
4897 Verify(FALSE, "Cond not allowed on object types");
4899 Verify(tiOp2.IsObjRef(), "Cond type mismatch");
4901 else if (tiOp1.IsByRef())
4903 Verify(tiOp2.IsByRef(), "Cond type mismatch");
4907 Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
4911 void Compiler::verVerifyThisPtrInitialised()
4913 if (verTrackObjCtorInitState)
4915 Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
4919 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
4921 // Either target == context, in this case calling an alternate .ctor
4922 // Or target is the immediate parent of context
4924 return ((target == context) || (target == info.compCompHnd->getParentType(context)));
4927 GenTreePtr Compiler::impImportLdvirtftn(GenTreePtr thisPtr,
4928 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4929 CORINFO_CALL_INFO* pCallInfo)
4931 if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
4933 NO_WAY("Virtual call to a function added via EnC is not supported");
4936 #ifdef FEATURE_READYTORUN_COMPILER
4937 if (opts.IsReadyToRun() && !pCallInfo->exactContextNeedsRuntimeLookup)
4939 GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT,
4940 gtNewArgList(thisPtr));
4942 call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
4948 // Get the exact descriptor for the static callsite
4949 GenTreePtr exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
4950 if (exactTypeDesc == nullptr)
4951 { // compDonotInline()
4955 GenTreePtr exactMethodDesc = impTokenToHandle(pResolvedToken);
4956 if (exactMethodDesc == nullptr)
4957 { // compDonotInline()
4961 GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
4963 helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
4965 helpArgs = gtNewListNode(thisPtr, helpArgs);
4967 // Call helper function. This gets the target address of the final destination callsite.
4969 return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT, helpArgs);
4972 /*****************************************************************************
4974 * Build and import a box node
4977 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
4979 // Get the tree for the type handle for the boxed object. In the case
4980 // of shared generic code or ngen'd code this might be an embedded
4982 // Note we can only box do it if the class construtor has been called
4983 // We can always do it on primitive types
4985 GenTreePtr op1 = nullptr;
4986 GenTreePtr op2 = nullptr;
4989 impSpillSpecialSideEff();
4991 // Now get the expression to box from the stack.
4992 CORINFO_CLASS_HANDLE operCls;
4993 GenTreePtr exprToBox = impPopStack(operCls).val;
4995 CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
4996 if (boxHelper == CORINFO_HELP_BOX)
4998 // we are doing 'normal' boxing. This means that we can inline the box operation
4999 // Box(expr) gets morphed into
5000 // temp = new(clsHnd)
5001 // cpobj(temp+4, expr, clsHnd)
5003 // The code paths differ slightly below for structs and primitives because
5004 // "cpobj" differs in these cases. In one case you get
5005 // impAssignStructPtr(temp+4, expr, clsHnd)
5006 // and the other you get
5009 if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5011 impBoxTemp = lvaGrabTemp(true DEBUGARG("Box Helper"));
5014 // needs to stay in use until this box expression is appended
5015 // some other node. We approximate this by keeping it alive until
5016 // the opcode stack becomes empty
5017 impBoxTempInUse = true;
5019 #ifdef FEATURE_READYTORUN_COMPILER
5020 bool usingReadyToRunHelper = false;
5022 if (opts.IsReadyToRun())
5024 op1 = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5025 usingReadyToRunHelper = (op1 != nullptr);
5028 if (!usingReadyToRunHelper)
5031 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5032 // and the newfast call with a single call to a dynamic R2R cell that will:
5033 // 1) Load the context
5034 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5035 // 3) Allocate and return the new object for boxing
5036 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5038 // Ensure that the value class is restored
5039 op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5041 { // compDonotInline()
5045 op1 = gtNewHelperCallNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd), TYP_REF, 0,
5049 /* Remember that this basic block contains 'new' of an array */
5050 compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5052 GenTreePtr asg = gtNewTempAssign(impBoxTemp, op1);
5054 GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5056 op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5057 op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
5058 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5060 if (varTypeIsStruct(exprToBox))
5062 assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5063 op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5067 lclTyp = exprToBox->TypeGet();
5068 if (lclTyp == TYP_BYREF)
5070 lclTyp = TYP_I_IMPL;
5072 CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5073 if (impIsPrimitive(jitType))
5075 lclTyp = JITtype2varType(jitType);
5077 assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5078 varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5079 var_types srcTyp = exprToBox->TypeGet();
5080 var_types dstTyp = lclTyp;
5082 if (srcTyp != dstTyp)
5084 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5085 (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5086 exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
5088 op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5091 op2 = gtNewLclvNode(impBoxTemp, TYP_REF);
5092 op1 = gtNewOperNode(GT_COMMA, TYP_REF, op1, op2);
5094 // Record that this is a "box" node.
5095 op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt);
5097 // If it is a value class, mark the "box" node. We can use this information
5098 // to optimise several cases:
5099 // "box(x) == null" --> false
5100 // "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5101 // "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5103 op1->gtFlags |= GTF_BOX_VALUE;
5104 assert(op1->IsBoxedValue());
5105 assert(asg->gtOper == GT_ASG);
5109 // Don't optimize, just call the helper and be done with it
5111 // Ensure that the value class is restored
5112 op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5114 { // compDonotInline()
5118 GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5119 op1 = gtNewHelperCallNode(boxHelper, TYP_REF, GTF_EXCEPT, args);
5122 /* Push the result back on the stack, */
5123 /* even if clsHnd is a value class we want the TI_REF */
5124 typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5125 impPushOnStack(op1, tiRetVal);
5128 //------------------------------------------------------------------------
5129 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5132 // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5133 // by a call to CEEInfo::resolveToken().
5134 // pCallInfo - The CORINFO_CALL_INFO that has been initialized
5135 // by a call to CEEInfo::getCallInfo().
5138 // The multi-dimensional array constructor arguments (array dimensions) are
5139 // pushed on the IL stack on entry to this method.
5142 // Multi-dimensional array constructors are imported as calls to a JIT
5143 // helper, not as regular calls.
5145 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5147 GenTreePtr classHandle = impParentClassTokenToHandle(pResolvedToken);
5148 if (classHandle == nullptr)
5149 { // compDonotInline()
5153 assert(pCallInfo->sig.numArgs);
5156 GenTreeArgList* args;
5159 // There are two different JIT helpers that can be used to allocate
5160 // multi-dimensional arrays:
5162 // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5163 // This variant is deprecated. It should be eventually removed.
5165 // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5166 // pointer to block of int32s. This variant is more portable.
5168 // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5169 // unconditionally would require ReadyToRun version bump.
5171 CLANG_FORMAT_COMMENT_ANCHOR;
5173 #if COR_JIT_EE_VERSION > 460
5174 if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5176 LclVarDsc* newObjArrayArgsVar;
5178 // Reuse the temp used to pass the array dimensions to avoid bloating
5179 // the stack frame in case there are multiple calls to multi-dim array
5180 // constructors within a single method.
5181 if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5183 lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5184 lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK;
5185 lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5188 // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5189 // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5190 lvaTable[lvaNewObjArrayArgs].lvExactSize =
5191 max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5193 // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5194 // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5195 // to one allocation at a time.
5196 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5199 // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5200 // - Array class handle
5201 // - Number of dimension arguments
5202 // - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp.
5205 node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5206 node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5208 // Pop dimension arguments from the stack one at a time and store it
5209 // into lvaNewObjArrayArgs temp.
5210 for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5212 GenTreePtr arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5214 GenTreePtr dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5215 dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5216 dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5217 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5218 dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5220 node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5223 args = gtNewArgList(node);
5225 // pass number of arguments to the helper
5226 args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5228 args = gtNewListNode(classHandle, args);
5230 node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, 0, args);
5236 // The varargs helper needs the type and method handles as last
5237 // and last-1 param (this is a cdecl call, so args will be
5238 // pushed in reverse order on the CPU stack)
5241 args = gtNewArgList(classHandle);
5243 // pass number of arguments to the helper
5244 args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5246 unsigned argFlags = 0;
5247 args = impPopList(pCallInfo->sig.numArgs, &argFlags, &pCallInfo->sig, args);
5249 node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, 0, args);
5251 // varargs, so we pop the arguments
5252 node->gtFlags |= GTF_CALL_POP_ARGS;
5255 // At the present time we don't track Caller pop arguments
5256 // that have GC references in them
5257 for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5259 assert(temp->Current()->gtType != TYP_REF);
5264 node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5265 node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5267 // Remember that this basic block contains 'new' of a md array
5268 compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5270 impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5273 GenTreePtr Compiler::impTransformThis(GenTreePtr thisPtr,
5274 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5275 CORINFO_THIS_TRANSFORM transform)
5279 case CORINFO_DEREF_THIS:
5281 GenTreePtr obj = thisPtr;
5283 // This does a LDIND on the obj, which should be a byref. pointing to a ref
5284 impBashVarAddrsToI(obj);
5285 assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5286 CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5288 obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5289 // ldind could point anywhere, example a boxed class static int
5290 obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5295 case CORINFO_BOX_THIS:
5297 // Constraint calls where there might be no
5298 // unboxed entry point require us to implement the call via helper.
5299 // These only occur when a possible target of the call
5300 // may have inherited an implementation of an interface
5301 // method from System.Object or System.ValueType. The EE does not provide us with
5302 // "unboxed" versions of these methods.
5304 GenTreePtr obj = thisPtr;
5306 assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5307 obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5308 obj->gtFlags |= GTF_EXCEPT;
5310 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5311 var_types objType = JITtype2varType(jitTyp);
5312 if (impIsPrimitive(jitTyp))
5314 if (obj->OperIsBlk())
5316 obj->ChangeOperUnchecked(GT_IND);
5318 // Obj could point anywhere, example a boxed class static int
5319 obj->gtFlags |= GTF_IND_TGTANYWHERE;
5320 obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5323 obj->gtType = JITtype2varType(jitTyp);
5324 assert(varTypeIsArithmetic(obj->gtType));
5327 // This pushes on the dereferenced byref
5328 // This is then used immediately to box.
5329 impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5331 // This pops off the byref-to-a-value-type remaining on the stack and
5332 // replaces it with a boxed object.
5333 // This is then used as the object to the virtual call immediately below.
5334 impImportAndPushBox(pConstrainedResolvedToken);
5335 if (compDonotInline())
5340 obj = impPopStack().val;
5343 case CORINFO_NO_THIS_TRANSFORM:
5349 //------------------------------------------------------------------------
5350 // impCanPInvokeInline: examine information from a call to see if the call
5351 // qualifies as an inline pinvoke.
5354 // block - block contaning the call, or for inlinees, block
5355 // containing the call being inlined
5358 // true if this call qualifies as an inline pinvoke, false otherwise
5361 // Checks basic legality and then a number of ambient conditions
5362 // where we could pinvoke but choose not to
5364 bool Compiler::impCanPInvokeInline(BasicBlock* block)
5366 return impCanPInvokeInlineCallSite(block) && getInlinePInvokeEnabled() && (!opts.compDbgCode) &&
5367 (compCodeOpt() != SMALL_CODE) && (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
5371 //------------------------------------------------------------------------
5372 // impCanPInvokeInlineSallSite: basic legality checks using information
5373 // from a call to see if the call qualifies as an inline pinvoke.
5376 // block - block contaning the call, or for inlinees, block
5377 // containing the call being inlined
5380 // true if this call can legally qualify as an inline pinvoke, false otherwise
5383 // For runtimes that support exception handling interop there are
5384 // restrictions on using inline pinvoke in handler regions.
5386 // * We have to disable pinvoke inlining inside of filters because
5387 // in case the main execution (i.e. in the try block) is inside
5388 // unmanaged code, we cannot reuse the inlined stub (we still need
5389 // the original state until we are in the catch handler)
5391 // * We disable pinvoke inlining inside handlers since the GSCookie
5392 // is in the inlined Frame (see
5393 // CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
5394 // this would not protect framelets/return-address of handlers.
5396 // These restrictions are currently also in place for CoreCLR but
5397 // can be relaxed when coreclr/#8459 is addressed.
5399 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
5401 #ifdef _TARGET_AMD64_
5402 // On x64, we disable pinvoke inlining inside of try regions.
5403 // Here is the comment from JIT64 explaining why:
5405 // [VSWhidbey: 611015] - because the jitted code links in the
5406 // Frame (instead of the stub) we rely on the Frame not being
5407 // 'active' until inside the stub. This normally happens by the
5408 // stub setting the return address pointer in the Frame object
5409 // inside the stub. On a normal return, the return address
5410 // pointer is zeroed out so the Frame can be safely re-used, but
5411 // if an exception occurs, nobody zeros out the return address
5412 // pointer. Thus if we re-used the Frame object, it would go
5413 // 'active' as soon as we link it into the Frame chain.
5415 // Technically we only need to disable PInvoke inlining if we're
5416 // in a handler or if we're in a try body with a catch or
5417 // filter/except where other non-handler code in this method
5418 // might run and try to re-use the dirty Frame object.
5420 // A desktop test case where this seems to matter is
5421 // jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
5422 const bool inX64Try = block->hasTryIndex();
5424 const bool inX64Try = false;
5425 #endif // _TARGET_AMD64_
5427 return !inX64Try && !block->hasHndIndex();
5430 //------------------------------------------------------------------------
5431 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
5432 // if it can be expressed as an inline pinvoke.
5435 // call - tree for the call
5436 // methHnd - handle for the method being called (may be null)
5437 // sig - signature of the method being called
5438 // mflags - method flags for the method being called
5439 // block - block contaning the call, or for inlinees, block
5440 // containing the call being inlined
5443 // Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
5445 // Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
5446 // call passes a combination of legality and profitabilty checks.
5448 // If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
5450 void Compiler::impCheckForPInvokeCall(
5451 GenTreePtr call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
5453 CorInfoUnmanagedCallConv unmanagedCallConv;
5455 // If VM flagged it as Pinvoke, flag the call node accordingly
5456 if ((mflags & CORINFO_FLG_PINVOKE) != 0)
5458 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
5463 if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
5468 unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
5472 CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
5473 if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
5475 // Used by the IL Stubs.
5476 callConv = CORINFO_CALLCONV_C;
5478 static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
5479 static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
5480 static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
5481 unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
5483 assert(!call->gtCall.gtCallCookie);
5486 if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
5487 unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
5491 optNativeCallCount++;
5493 if (opts.compMustInlinePInvokeCalli && methHnd == nullptr)
5495 // Always inline pinvoke.
5499 // Check legality and profitability.
5500 if (!impCanPInvokeInline(block))
5505 if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
5510 // Size-speed tradeoff: don't use inline pinvoke at rarely
5511 // executed call sites. The non-inline version is more
5513 if (block->isRunRarely())
5519 JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
5521 call->gtFlags |= GTF_CALL_UNMANAGED;
5522 info.compCallUnmanaged++;
5524 // AMD64 convention is same for native and managed
5525 if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
5527 call->gtFlags |= GTF_CALL_POP_ARGS;
5530 if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
5532 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
5536 GenTreePtr Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
5538 var_types callRetTyp = JITtype2varType(sig->retType);
5540 /* The function pointer is on top of the stack - It may be a
5541 * complex expression. As it is evaluated after the args,
5542 * it may cause registered args to be spilled. Simply spill it.
5545 // Ignore this trivial case.
5546 if (impStackTop().val->gtOper != GT_LCL_VAR)
5548 impSpillStackEntry(verCurrentState.esStackDepth - 1,
5549 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
5552 /* Get the function pointer */
5554 GenTreePtr fptr = impPopStack().val;
5555 assert(genActualType(fptr->gtType) == TYP_I_IMPL);
5558 // This temporary must never be converted to a double in stress mode,
5559 // because that can introduce a call to the cast helper after the
5560 // arguments have already been evaluated.
5562 if (fptr->OperGet() == GT_LCL_VAR)
5564 lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
5568 /* Create the call node */
5570 GenTreePtr call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
5572 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
5577 /*****************************************************************************/
5579 void Compiler::impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig)
5581 assert(call->gtFlags & GTF_CALL_UNMANAGED);
5583 /* Since we push the arguments in reverse order (i.e. right -> left)
5584 * spill any side effects from the stack
5586 * OBS: If there is only one side effect we do not need to spill it
5587 * thus we have to spill all side-effects except last one
5590 unsigned lastLevelWithSideEffects = UINT_MAX;
5592 unsigned argsToReverse = sig->numArgs;
5594 // For "thiscall", the first argument goes in a register. Since its
5595 // order does not need to be changed, we do not need to spill it
5597 if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5599 assert(argsToReverse);
5603 #ifndef _TARGET_X86_
5604 // Don't reverse args on ARM or x64 - first four args always placed in regs in order
5608 for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
5610 if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
5612 assert(lastLevelWithSideEffects == UINT_MAX);
5614 impSpillStackEntry(level,
5615 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
5617 else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
5619 if (lastLevelWithSideEffects != UINT_MAX)
5621 /* We had a previous side effect - must spill it */
5622 impSpillStackEntry(lastLevelWithSideEffects,
5623 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
5625 /* Record the level for the current side effect in case we will spill it */
5626 lastLevelWithSideEffects = level;
5630 /* This is the first side effect encountered - record its level */
5632 lastLevelWithSideEffects = level;
5637 /* The argument list is now "clean" - no out-of-order side effects
5638 * Pop the argument list in reverse order */
5640 unsigned argFlags = 0;
5641 GenTreePtr args = call->gtCall.gtCallArgs =
5642 impPopRevList(sig->numArgs, &argFlags, sig, sig->numArgs - argsToReverse);
5644 if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5646 GenTreePtr thisPtr = args->Current();
5647 impBashVarAddrsToI(thisPtr);
5648 assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
5653 call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5657 //------------------------------------------------------------------------
5658 // impInitClass: Build a node to initialize the class before accessing the
5659 // field if necessary
5662 // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5663 // by a call to CEEInfo::resolveToken().
5665 // Return Value: If needed, a pointer to the node that will perform the class
5666 // initializtion. Otherwise, nullptr.
5669 GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5671 CorInfoInitClassResult initClassResult =
5672 info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
5674 if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
5680 GenTreePtr node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
5682 if (node == nullptr)
5684 assert(compDonotInline());
5690 node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, 0, gtNewArgList(node));
5694 // Call the shared non gc static helper, as its the fastest
5695 node = fgGetSharedCCtor(pResolvedToken->hClass);
5701 GenTreePtr Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
5703 GenTreePtr op1 = nullptr;
5712 ival = *((bool*)fldAddr);
5716 ival = *((signed char*)fldAddr);
5720 ival = *((unsigned char*)fldAddr);
5724 ival = *((short*)fldAddr);
5729 ival = *((unsigned short*)fldAddr);
5734 ival = *((int*)fldAddr);
5736 op1 = gtNewIconNode(ival);
5741 lval = *((__int64*)fldAddr);
5742 op1 = gtNewLconNode(lval);
5746 dval = *((float*)fldAddr);
5747 op1 = gtNewDconNode(dval);
5748 #if !FEATURE_X87_DOUBLES
5749 // X87 stack doesn't differentiate between float/double
5750 // so R4 is treated as R8, but everybody else does
5751 op1->gtType = TYP_FLOAT;
5752 #endif // FEATURE_X87_DOUBLES
5756 dval = *((double*)fldAddr);
5757 op1 = gtNewDconNode(dval);
5761 assert(!"Unexpected lclTyp");
5768 GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
5769 CORINFO_ACCESS_FLAGS access,
5770 CORINFO_FIELD_INFO* pFieldInfo,
5775 switch (pFieldInfo->fieldAccessor)
5777 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
5779 assert(!compIsForInlining());
5781 // We first call a special helper to get the statics base pointer
5782 op1 = impParentClassTokenToHandle(pResolvedToken);
5784 // compIsForInlining() is false so we should not neve get NULL here
5785 assert(op1 != nullptr);
5787 var_types type = TYP_BYREF;
5789 switch (pFieldInfo->helper)
5791 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
5794 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
5795 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
5796 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
5799 assert(!"unknown generic statics helper");
5803 op1 = gtNewHelperCallNode(pFieldInfo->helper, type, 0, gtNewArgList(op1));
5805 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5806 op1 = gtNewOperNode(GT_ADD, type, op1,
5807 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5811 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
5813 #ifdef FEATURE_READYTORUN_COMPILER
5814 if (opts.IsReadyToRun())
5816 unsigned callFlags = 0;
5818 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5820 callFlags |= GTF_CALL_HOISTABLE;
5823 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF, callFlags);
5825 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5830 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
5834 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5835 op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
5836 new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
5840 #if COR_JIT_EE_VERSION > 460
5841 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
5843 #ifdef FEATURE_READYTORUN_COMPILER
5844 noway_assert(opts.IsReadyToRun());
5845 CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
5846 assert(kind.needsRuntimeLookup);
5848 GenTreePtr ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
5849 GenTreeArgList* args = gtNewArgList(ctxTree);
5851 unsigned callFlags = 0;
5853 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5855 callFlags |= GTF_CALL_HOISTABLE;
5857 var_types type = TYP_BYREF;
5858 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, callFlags, args);
5860 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5861 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5862 op1 = gtNewOperNode(GT_ADD, type, op1,
5863 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5866 #endif // FEATURE_READYTORUN_COMPILER
5869 #endif // COR_JIT_EE_VERSION > 460
5872 if (!(access & CORINFO_ACCESS_ADDRESS))
5874 // In future, it may be better to just create the right tree here instead of folding it later.
5875 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
5877 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
5879 op1->gtType = TYP_REF; // points at boxed object
5880 FieldSeqNode* firstElemFldSeq =
5881 GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
5883 gtNewOperNode(GT_ADD, TYP_BYREF, op1,
5884 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
5886 if (varTypeIsStruct(lclTyp))
5888 // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT.
5889 op1 = gtNewObjNode(pFieldInfo->structType, op1);
5893 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
5894 op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
5902 void** pFldAddr = nullptr;
5903 void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
5905 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5907 /* Create the data member node */
5908 if (pFldAddr == nullptr)
5910 op1 = gtNewIconHandleNode((size_t)fldAddr, GTF_ICON_STATIC_HDL, fldSeq);
5914 op1 = gtNewIconHandleNode((size_t)pFldAddr, GTF_ICON_STATIC_HDL, fldSeq);
5916 // There are two cases here, either the static is RVA based,
5917 // in which case the type of the FIELD node is not a GC type
5918 // and the handle to the RVA is a TYP_I_IMPL. Or the FIELD node is
5919 // a GC type and the handle to it is a TYP_BYREF in the GC heap
5920 // because handles to statics now go into the large object heap
5922 var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
5923 op1 = gtNewOperNode(GT_IND, handleTyp, op1);
5924 op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
5931 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
5933 op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
5935 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
5937 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
5938 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
5941 if (!(access & CORINFO_ACCESS_ADDRESS))
5943 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
5944 op1->gtFlags |= GTF_GLOB_REF;
5950 // In general try to call this before most of the verification work. Most people expect the access
5951 // exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns
5952 // out if you can't access something we also think that you're unverifiable for other reasons.
5953 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
5955 if (result != CORINFO_ACCESS_ALLOWED)
5957 impHandleAccessAllowedInternal(result, helperCall);
5961 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
5965 case CORINFO_ACCESS_ALLOWED:
5967 case CORINFO_ACCESS_ILLEGAL:
5968 // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
5969 // method is verifiable. Otherwise, delay the exception to runtime.
5970 if (compIsForImportOnly())
5972 info.compCompHnd->ThrowExceptionForHelper(helperCall);
5976 impInsertHelperCall(helperCall);
5979 case CORINFO_ACCESS_RUNTIME_CHECK:
5980 impInsertHelperCall(helperCall);
5985 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
5987 // Construct the argument list
5988 GenTreeArgList* args = nullptr;
5989 assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
5990 for (unsigned i = helperInfo->numArgs; i > 0; --i)
5992 const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1];
5993 GenTreePtr currentArg = nullptr;
5994 switch (helperArg.argType)
5996 case CORINFO_HELPER_ARG_TYPE_Field:
5997 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
5998 info.compCompHnd->getFieldClass(helperArg.fieldHandle));
5999 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6001 case CORINFO_HELPER_ARG_TYPE_Method:
6002 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6003 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6005 case CORINFO_HELPER_ARG_TYPE_Class:
6006 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6007 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6009 case CORINFO_HELPER_ARG_TYPE_Module:
6010 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6012 case CORINFO_HELPER_ARG_TYPE_Const:
6013 currentArg = gtNewIconNode(helperArg.constant);
6016 NO_WAY("Illegal helper arg type");
6018 args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6022 * Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee.
6023 * Also, consider sticking this in the first basic block.
6025 GenTreePtr callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, GTF_EXCEPT, args);
6026 impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6029 void Compiler::impInsertCalloutForDelegate(CORINFO_METHOD_HANDLE callerMethodHnd,
6030 CORINFO_METHOD_HANDLE calleeMethodHnd,
6031 CORINFO_CLASS_HANDLE delegateTypeHnd)
6033 #ifdef FEATURE_CORECLR
6034 if (!info.compCompHnd->isDelegateCreationAllowed(delegateTypeHnd, calleeMethodHnd))
6036 // Call the JIT_DelegateSecurityCheck helper before calling the actual function.
6037 // This helper throws an exception if the CLR host disallows the call.
6039 GenTreePtr helper = gtNewHelperCallNode(CORINFO_HELP_DELEGATE_SECURITY_CHECK, TYP_VOID, GTF_EXCEPT,
6040 gtNewArgList(gtNewIconEmbClsHndNode(delegateTypeHnd),
6041 gtNewIconEmbMethHndNode(calleeMethodHnd)));
6042 // Append the callout statement
6043 impAppendTree(helper, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6045 #endif // FEATURE_CORECLR
6048 // Checks whether the return types of caller and callee are compatible
6049 // so that callee can be tail called. Note that here we don't check
6050 // compatibility in IL Verifier sense, but on the lines of return type
6051 // sizes are equal and get returned in the same return register.
6052 bool Compiler::impTailCallRetTypeCompatible(var_types callerRetType,
6053 CORINFO_CLASS_HANDLE callerRetTypeClass,
6054 var_types calleeRetType,
6055 CORINFO_CLASS_HANDLE calleeRetTypeClass)
6057 // Note that we can not relax this condition with genActualType() as the
6058 // calling convention dictates that the caller of a function with a small
6059 // typed return value is responsible for normalizing the return val.
6060 if (callerRetType == calleeRetType)
6065 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6067 if (callerRetType == TYP_VOID)
6069 // This needs to be allowed to support the following IL pattern that Jit64 allows:
6074 // Note that the above IL pattern is not valid as per IL verification rules.
6075 // Therefore, only full trust code can take advantage of this pattern.
6079 // These checks return true if the return value type sizes are the same and
6080 // get returned in the same return register i.e. caller doesn't need to normalize
6081 // return value. Some of the tail calls permitted by below checks would have
6082 // been rejected by IL Verifier before we reached here. Therefore, only full
6083 // trust code can make those tail calls.
6084 unsigned callerRetTypeSize = 0;
6085 unsigned calleeRetTypeSize = 0;
6086 bool isCallerRetTypMBEnreg =
6087 VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6088 bool isCalleeRetTypMBEnreg =
6089 VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6091 if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6093 return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6095 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6103 PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6104 PREFIX_TAILCALL_IMPLICIT =
6105 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6106 PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6107 PREFIX_VOLATILE = 0x00000100,
6108 PREFIX_UNALIGNED = 0x00001000,
6109 PREFIX_CONSTRAINED = 0x00010000,
6110 PREFIX_READONLY = 0x00100000
6113 /********************************************************************************
6115 * Returns true if the current opcode and and the opcodes following it correspond
6116 * to a supported tail call IL pattern.
6119 bool Compiler::impIsTailCallILPattern(bool tailPrefixed,
6121 const BYTE* codeAddrOfNextOpcode,
6122 const BYTE* codeEnd,
6124 bool* isCallPopAndRet /* = nullptr */)
6126 // Bail out if the current opcode is not a call.
6127 if (!impOpcodeIsCallOpcode(curOpcode))
6132 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6133 // If shared ret tail opt is not enabled, we will enable
6134 // it for recursive methods.
6138 // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6139 // sequence. Make sure we don't go past the end of the IL however.
6140 codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6143 // Bail out if there is no next opcode after call
6144 if (codeAddrOfNextOpcode >= codeEnd)
6149 // Scan the opcodes to look for the following IL patterns if either
6150 // i) the call is not tail prefixed (i.e. implicit tail call) or
6151 // ii) if tail prefixed, IL verification is not needed for the method.
6153 // Only in the above two cases we can allow the below tail call patterns
6154 // violating ECMA spec.
6170 #ifdef _TARGET_AMD64_
6173 nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6174 codeAddrOfNextOpcode += sizeof(__int8);
6175 } while ((codeAddrOfNextOpcode < codeEnd) && // Haven't reached end of method
6176 (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6177 ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6178 // one pop seen so far.
6180 nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6183 if (isCallPopAndRet)
6185 // Allow call+pop+ret to be tail call optimized if caller ret type is void
6186 *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6189 #ifdef _TARGET_AMD64_
6191 // Tail call IL pattern could be either of the following
6192 // 1) call/callvirt/calli + ret
6193 // 2) call/callvirt/calli + pop + ret in a method returning void.
6194 return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6195 #else //!_TARGET_AMD64_
6196 return (nextOpcode == CEE_RET) && (cntPop == 0);
6200 /*****************************************************************************
6202 * Determine whether the call could be converted to an implicit tail call
6205 bool Compiler::impIsImplicitTailCallCandidate(
6206 OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6209 #if FEATURE_TAILCALL_OPT
6210 if (!opts.compTailCallOpt)
6215 if (opts.compDbgCode || opts.MinOpts())
6220 // must not be tail prefixed
6221 if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6226 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6227 // the block containing call is marked as BBJ_RETURN
6228 // We allow shared ret tail call optimization on recursive calls even under
6229 // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6230 if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6232 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6234 // must be call+ret or call+pop+ret
6235 if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6243 #endif // FEATURE_TAILCALL_OPT
6246 //------------------------------------------------------------------------
6247 // impImportCall: import a call-inspiring opcode
6250 // opcode - opcode that inspires the call
6251 // pResolvedToken - resolved token for the call target
6252 // pConstrainedResolvedToken - resolved constraint token (or nullptr)
6253 // newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr)
6254 // prefixFlags - IL prefix flags for the call
6255 // callInfo - EE supplied info for the call
6256 // rawILOffset - IL offset of the opcode
6259 // Type of the call's return value.
6262 // opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6264 // For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6265 // uninitalized object.
6268 #pragma warning(push)
6269 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6272 var_types Compiler::impImportCall(OPCODE opcode,
6273 CORINFO_RESOLVED_TOKEN* pResolvedToken,
6274 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6275 GenTreePtr newobjThis,
6277 CORINFO_CALL_INFO* callInfo,
6278 IL_OFFSET rawILOffset)
6280 assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6282 IL_OFFSETX ilOffset = impCurILOffset(rawILOffset, true);
6283 var_types callRetTyp = TYP_COUNT;
6284 CORINFO_SIG_INFO* sig = nullptr;
6285 CORINFO_METHOD_HANDLE methHnd = nullptr;
6286 CORINFO_CLASS_HANDLE clsHnd = nullptr;
6287 unsigned clsFlags = 0;
6288 unsigned mflags = 0;
6289 unsigned argFlags = 0;
6290 GenTreePtr call = nullptr;
6291 GenTreeArgList* args = nullptr;
6292 CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM;
6293 CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr;
6294 BOOL exactContextNeedsRuntimeLookup = FALSE;
6295 bool canTailCall = true;
6296 const char* szCanTailCallFailReason = nullptr;
6297 int tailCall = prefixFlags & PREFIX_TAILCALL;
6298 bool readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
6300 // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6301 // do that before tailcalls, but that is probably not the intended
6302 // semantic. So just disallow tailcalls from synchronized methods.
6303 // Also, popping arguments in a varargs function is more work and NYI
6304 // If we have a security object, we have to keep our frame around for callers
6305 // to see any imperative security.
6306 if (info.compFlags & CORINFO_FLG_SYNCH)
6308 canTailCall = false;
6309 szCanTailCallFailReason = "Caller is synchronized";
6311 #if !FEATURE_FIXED_OUT_ARGS
6312 else if (info.compIsVarArgs)
6314 canTailCall = false;
6315 szCanTailCallFailReason = "Caller is varargs";
6317 #endif // FEATURE_FIXED_OUT_ARGS
6318 else if (opts.compNeedSecurityCheck)
6320 canTailCall = false;
6321 szCanTailCallFailReason = "Caller requires a security check.";
6324 // We only need to cast the return value of pinvoke inlined calls that return small types
6326 // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6327 // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6328 // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6329 // the time being that the callee might be compiled by the other JIT and thus the return
6330 // value will need to be widened by us (or not widened at all...)
6332 // ReadyToRun code sticks with default calling convention that does not widen small return types.
6334 bool checkForSmallType = opts.IsJit64Compat() || opts.IsReadyToRun();
6335 bool bIntrinsicImported = false;
6337 CORINFO_SIG_INFO calliSig;
6338 GenTreeArgList* extraArg = nullptr;
6340 /*-------------------------------------------------------------------------
6341 * First create the call node
6344 if (opcode == CEE_CALLI)
6346 /* Get the call site sig */
6347 eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
6349 callRetTyp = JITtype2varType(calliSig.retType);
6351 call = impImportIndirectCall(&calliSig, ilOffset);
6353 // We don't know the target method, so we have to infer the flags, or
6354 // assume the worst-case.
6355 mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
6360 unsigned structSize =
6361 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
6362 printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6363 opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6366 // This should be checked in impImportBlockCode.
6367 assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
6372 // We cannot lazily obtain the signature of a CALLI call because it has no method
6373 // handle that we can use, so we need to save its full call signature here.
6374 assert(call->gtCall.callSig == nullptr);
6375 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6376 *call->gtCall.callSig = calliSig;
6379 else // (opcode != CEE_CALLI)
6381 CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
6383 // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
6384 // supply the instantiation parameters necessary to make direct calls to underlying
6385 // shared generic code, rather than calling through instantiating stubs. If the
6386 // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
6387 // must indeed pass an instantiation parameter.
6389 methHnd = callInfo->hMethod;
6391 sig = &(callInfo->sig);
6392 callRetTyp = JITtype2varType(sig->retType);
6394 mflags = callInfo->methodFlags;
6399 unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
6400 printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6401 opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6404 if (compIsForInlining())
6406 /* Does this call site have security boundary restrictions? */
6408 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
6410 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
6414 /* Does the inlinee need a security check token on the frame */
6416 if (mflags & CORINFO_FLG_SECURITYCHECK)
6418 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6422 /* Does the inlinee use StackCrawlMark */
6424 if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
6426 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
6430 /* For now ignore delegate invoke */
6432 if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6434 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
6438 /* For now ignore varargs */
6439 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6441 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
6445 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
6447 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
6451 if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
6453 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
6458 clsHnd = pResolvedToken->hClass;
6460 clsFlags = callInfo->classFlags;
6463 // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
6465 // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
6466 // These should be in mscorlib.h, and available through a JIT/EE interface call.
6467 const char* modName;
6468 const char* className;
6469 const char* methodName;
6470 if ((className = eeGetClassName(clsHnd)) != nullptr &&
6471 strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
6472 (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
6474 return impImportJitTestLabelMark(sig->numArgs);
6478 // <NICE> Factor this into getCallInfo </NICE>
6479 if ((mflags & CORINFO_FLG_INTRINSIC) && !pConstrainedResolvedToken)
6481 call = impIntrinsic(clsHnd, methHnd, sig, pResolvedToken->token, readonlyCall,
6482 (canTailCall && (tailCall != 0)), &intrinsicID);
6484 if (call != nullptr)
6486 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
6487 (clsFlags & CORINFO_FLG_FINAL));
6489 #ifdef FEATURE_READYTORUN_COMPILER
6490 if (call->OperGet() == GT_INTRINSIC)
6492 if (opts.IsReadyToRun())
6494 noway_assert(callInfo->kind == CORINFO_CALL);
6495 call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
6499 call->gtIntrinsic.gtEntryPoint.addr = nullptr;
6504 bIntrinsicImported = true;
6512 call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
6513 if (call != nullptr)
6515 bIntrinsicImported = true;
6519 #endif // FEATURE_SIMD
6521 if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
6523 NO_WAY("Virtual call to a function added via EnC is not supported");
6527 if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
6528 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6529 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
6531 BADCODE("Bad calling convention");
6534 //-------------------------------------------------------------------------
6535 // Construct the call node
6537 // Work out what sort of call we're making.
6538 // Dispense with virtual calls implemented via LDVIRTFTN immediately.
6540 constraintCallThisTransform = callInfo->thisTransform;
6542 exactContextHnd = callInfo->contextHandle;
6543 exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup;
6545 // Recursive call is treaded as a loop to the begining of the method.
6546 if (methHnd == info.compMethodHnd)
6551 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
6552 fgFirstBB->bbNum, compCurBB->bbNum);
6555 fgMarkBackwardJump(fgFirstBB, compCurBB);
6558 switch (callInfo->kind)
6561 case CORINFO_VIRTUALCALL_STUB:
6563 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6564 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6565 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
6568 if (compIsForInlining())
6570 // Don't import runtime lookups when inlining
6571 // Inlining has to be aborted in such a case
6572 /* XXX Fri 3/20/2009
6573 * By the way, this would never succeed. If the handle lookup is into the generic
6574 * dictionary for a candidate, you'll generate different dictionary offsets and the
6575 * inlined code will crash.
6577 * To anyone code reviewing this, when could this ever succeed in the future? It'll
6578 * always have a handle lookup. These lookups are safe intra-module, but we're just
6581 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
6585 GenTreePtr stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
6586 assert(!compDonotInline());
6588 // This is the rough code to set up an indirect stub call
6589 assert(stubAddr != nullptr);
6591 // The stubAddr may be a
6592 // complex expression. As it is evaluated after the args,
6593 // it may cause registered args to be spilled. Simply spill it.
6595 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
6596 impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
6597 stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6599 // Create the actual call node
6601 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6602 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6604 call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
6606 call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
6607 call->gtFlags |= GTF_CALL_VIRT_STUB;
6610 // No tailcalls allowed for these yet...
6611 canTailCall = false;
6612 szCanTailCallFailReason = "VirtualCall with runtime lookup";
6617 // ok, the stub is available at compile type.
6619 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6620 call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
6621 call->gtFlags |= GTF_CALL_VIRT_STUB;
6622 assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
6623 if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
6625 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
6629 #ifdef FEATURE_READYTORUN_COMPILER
6630 if (opts.IsReadyToRun())
6632 // Null check is sometimes needed for ready to run to handle
6633 // non-virtual <-> virtual changes between versions
6634 if (callInfo->nullInstanceCheck)
6636 call->gtFlags |= GTF_CALL_NULLCHECK;
6644 case CORINFO_VIRTUALCALL_VTABLE:
6646 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6647 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6648 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6649 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
6653 case CORINFO_VIRTUALCALL_LDVIRTFTN:
6655 if (compIsForInlining())
6657 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
6661 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6662 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6663 // OK, We've been told to call via LDVIRTFTN, so just
6664 // take the call now....
6666 args = impPopList(sig->numArgs, &argFlags, sig);
6668 GenTreePtr thisPtr = impPopStack().val;
6669 thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
6670 if (compDonotInline())
6675 // Clone the (possibly transformed) "this" pointer
6676 GenTreePtr thisPtrCopy;
6677 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
6678 nullptr DEBUGARG("LDVIRTFTN this pointer"));
6680 GenTreePtr fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
6681 if (compDonotInline())
6686 thisPtr = nullptr; // can't reuse it
6688 // Now make an indirect call through the function pointer
6690 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
6691 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6692 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6694 // Create the actual call node
6696 call = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
6697 call->gtCall.gtCallObjp = thisPtrCopy;
6698 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6700 #ifdef FEATURE_READYTORUN_COMPILER
6701 if (opts.IsReadyToRun())
6703 // Null check is needed for ready to run to handle
6704 // non-virtual <-> virtual changes between versions
6705 call->gtFlags |= GTF_CALL_NULLCHECK;
6709 // Sine we are jumping over some code, check that its OK to skip that code
6710 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6711 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6717 // This is for a non-virtual, non-interface etc. call
6718 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6720 // We remove the nullcheck for the GetType call instrinsic.
6721 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
6723 if (callInfo->nullInstanceCheck &&
6724 !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
6726 call->gtFlags |= GTF_CALL_NULLCHECK;
6729 #ifdef FEATURE_READYTORUN_COMPILER
6730 if (opts.IsReadyToRun())
6732 call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
6738 case CORINFO_CALL_CODE_POINTER:
6740 // The EE has asked us to call by computing a code pointer and then doing an
6741 // indirect call. This is because a runtime lookup is required to get the code entry point.
6743 // These calls always follow a uniform calling convention, i.e. no extra hidden params
6744 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
6746 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
6747 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6750 impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
6752 if (compDonotInline())
6757 // Now make an indirect call through the function pointer
6759 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
6760 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6761 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6763 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6764 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6765 if (callInfo->nullInstanceCheck)
6767 call->gtFlags |= GTF_CALL_NULLCHECK;
6774 assert(!"unknown call kind");
6778 //-------------------------------------------------------------------------
6781 PREFIX_ASSUME(call != nullptr);
6783 if (mflags & CORINFO_FLG_NOGCCHECK)
6785 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
6788 // Mark call if it's one of the ones we will maybe treat as an intrinsic
6789 if (intrinsicID == CORINFO_INTRINSIC_Object_GetType || intrinsicID == CORINFO_INTRINSIC_TypeEQ ||
6790 intrinsicID == CORINFO_INTRINSIC_TypeNEQ || intrinsicID == CORINFO_INTRINSIC_GetCurrentManagedThread ||
6791 intrinsicID == CORINFO_INTRINSIC_GetManagedThreadId)
6793 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
6797 assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
6799 /* Some sanity checks */
6801 // CALL_VIRT and NEWOBJ must have a THIS pointer
6802 assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
6803 // static bit and hasThis are negations of one another
6804 assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
6805 assert(call != nullptr);
6807 /*-------------------------------------------------------------------------
6808 * Check special-cases etc
6811 /* Special case - Check if it is a call to Delegate.Invoke(). */
6813 if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6815 assert(!compIsForInlining());
6816 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6817 assert(mflags & CORINFO_FLG_FINAL);
6819 /* Set the delegate flag */
6820 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
6822 if (callInfo->secureDelegateInvoke)
6824 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
6827 if (opcode == CEE_CALLVIRT)
6829 assert(mflags & CORINFO_FLG_FINAL);
6831 /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
6832 assert(call->gtFlags & GTF_CALL_NULLCHECK);
6833 call->gtFlags &= ~GTF_CALL_NULLCHECK;
6837 CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
6838 actualMethodRetTypeSigClass = sig->retTypeSigClass;
6839 if (varTypeIsStruct(callRetTyp))
6841 callRetTyp = impNormStructType(actualMethodRetTypeSigClass);
6842 call->gtType = callRetTyp;
6846 /* Check for varargs */
6847 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6848 (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6850 BADCODE("Varargs not supported.");
6852 #endif // !FEATURE_VARARG
6854 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6855 (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6857 assert(!compIsForInlining());
6859 /* Set the right flags */
6861 call->gtFlags |= GTF_CALL_POP_ARGS;
6862 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
6864 /* Can't allow tailcall for varargs as it is caller-pop. The caller
6865 will be expecting to pop a certain number of arguments, but if we
6866 tailcall to a function with a different number of arguments, we
6867 are hosed. There are ways around this (caller remembers esp value,
6868 varargs is not caller-pop, etc), but not worth it. */
6869 CLANG_FORMAT_COMMENT_ANCHOR;
6874 canTailCall = false;
6875 szCanTailCallFailReason = "Callee is varargs";
6879 /* Get the total number of arguments - this is already correct
6880 * for CALLI - for methods we have to get it from the call site */
6882 if (opcode != CEE_CALLI)
6885 unsigned numArgsDef = sig->numArgs;
6887 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
6890 // We cannot lazily obtain the signature of a vararg call because using its method
6891 // handle will give us only the declared argument list, not the full argument list.
6892 assert(call->gtCall.callSig == nullptr);
6893 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6894 *call->gtCall.callSig = *sig;
6897 // For vararg calls we must be sure to load the return type of the
6898 // method actually being called, as well as the return types of the
6899 // specified in the vararg signature. With type equivalency, these types
6900 // may not be the same.
6901 if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
6903 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
6904 sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
6905 sig->retType != CORINFO_TYPE_VAR)
6907 // Make sure that all valuetypes (including enums) that we push are loaded.
6908 // This is to guarantee that if a GC is triggerred from the prestub of this methods,
6909 // all valuetypes in the method signature are already loaded.
6910 // We need to be able to find the size of the valuetypes, but we cannot
6911 // do a class-load from within GC.
6912 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
6916 assert(numArgsDef <= sig->numArgs);
6919 /* We will have "cookie" as the last argument but we cannot push
6920 * it on the operand stack because we may overflow, so we append it
6921 * to the arg list next after we pop them */
6924 if (mflags & CORINFO_FLG_SECURITYCHECK)
6926 assert(!compIsForInlining());
6928 // Need security prolog/epilog callouts when there is
6929 // imperative security in the method. This is to give security a
6930 // chance to do any setup in the prolog and cleanup in the epilog if needed.
6932 if (compIsForInlining())
6934 // Cannot handle this if the method being imported is an inlinee by itself.
6935 // Because inlinee method does not have its own frame.
6937 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6942 tiSecurityCalloutNeeded = true;
6944 // If the current method calls a method which needs a security check,
6945 // (i.e. the method being compiled has imperative security)
6946 // we need to reserve a slot for the security object in
6947 // the current method's stack frame
6948 opts.compNeedSecurityCheck = true;
6952 //--------------------------- Inline NDirect ------------------------------
6954 // For inline cases we technically should look at both the current
6955 // block and the call site block (or just the latter if we've
6956 // fused the EH trees). However the block-related checks pertain to
6957 // EH and we currently won't inline a method with EH. So for
6958 // inlinees, just checking the call site block is sufficient.
6960 // New lexical block here to avoid compilation errors because of GOTOs.
6961 BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
6962 impCheckForPInvokeCall(call, methHnd, sig, mflags, block);
6965 if (call->gtFlags & GTF_CALL_UNMANAGED)
6967 // We set up the unmanaged call by linking the frame, disabling GC, etc
6968 // This needs to be cleaned up on return
6971 canTailCall = false;
6972 szCanTailCallFailReason = "Callee is native";
6975 checkForSmallType = true;
6977 impPopArgsForUnmanagedCall(call, sig);
6981 else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
6982 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
6983 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
6984 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
6986 if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
6988 // Normally this only happens with inlining.
6989 // However, a generic method (or type) being NGENd into another module
6990 // can run into this issue as well. There's not an easy fall-back for NGEN
6991 // so instead we fallback to JIT.
6992 if (compIsForInlining())
6994 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
6998 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7004 GenTreePtr cookie = eeGetPInvokeCookie(sig);
7006 // This cookie is required to be either a simple GT_CNS_INT or
7007 // an indirection of a GT_CNS_INT
7009 GenTreePtr cookieConst = cookie;
7010 if (cookie->gtOper == GT_IND)
7012 cookieConst = cookie->gtOp.gtOp1;
7014 assert(cookieConst->gtOper == GT_CNS_INT);
7016 // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7017 // we won't allow this tree to participate in any CSE logic
7019 cookie->gtFlags |= GTF_DONT_CSE;
7020 cookieConst->gtFlags |= GTF_DONT_CSE;
7022 call->gtCall.gtCallCookie = cookie;
7026 canTailCall = false;
7027 szCanTailCallFailReason = "PInvoke calli";
7031 /*-------------------------------------------------------------------------
7032 * Create the argument list
7035 //-------------------------------------------------------------------------
7036 // Special case - for varargs we have an implicit last argument
7038 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7040 assert(!compIsForInlining());
7042 void *varCookie, *pVarCookie;
7043 if (!info.compCompHnd->canGetVarArgsHandle(sig))
7045 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7049 varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7050 assert((!varCookie) != (!pVarCookie));
7051 GenTreePtr cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL);
7053 assert(extraArg == nullptr);
7054 extraArg = gtNewArgList(cookie);
7057 //-------------------------------------------------------------------------
7058 // Extra arg for shared generic code and array methods
7060 // Extra argument containing instantiation information is passed in the
7061 // following circumstances:
7062 // (a) To the "Address" method on array classes; the extra parameter is
7063 // the array's type handle (a TypeDesc)
7064 // (b) To shared-code instance methods in generic structs; the extra parameter
7065 // is the struct's type handle (a vtable ptr)
7066 // (c) To shared-code per-instantiation non-generic static methods in generic
7067 // classes and structs; the extra parameter is the type handle
7068 // (d) To shared-code generic methods; the extra parameter is an
7069 // exact-instantiation MethodDesc
7071 // We also set the exact type context associated with the call so we can
7072 // inline the call correctly later on.
7074 if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7076 assert(call->gtCall.gtCallType == CT_USER_FUNC);
7077 if (clsHnd == nullptr)
7079 NO_WAY("CALLI on parameterized type");
7082 assert(opcode != CEE_CALLI);
7084 GenTreePtr instParam;
7087 // Instantiated generic method
7088 if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7090 CORINFO_METHOD_HANDLE exactMethodHandle =
7091 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7093 if (!exactContextNeedsRuntimeLookup)
7095 #ifdef FEATURE_READYTORUN_COMPILER
7096 if (opts.IsReadyToRun())
7099 impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7100 if (instParam == nullptr)
7108 instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7109 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7114 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7115 if (instParam == nullptr)
7122 // otherwise must be an instance method in a generic struct,
7123 // a static method in a generic type, or a runtime-generated array method
7126 assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7127 CORINFO_CLASS_HANDLE exactClassHandle =
7128 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7130 if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7132 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7136 if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7138 // We indicate "readonly" to the Address operation by using a null
7140 instParam = gtNewIconNode(0, TYP_REF);
7143 if (!exactContextNeedsRuntimeLookup)
7145 #ifdef FEATURE_READYTORUN_COMPILER
7146 if (opts.IsReadyToRun())
7149 impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7150 if (instParam == nullptr)
7158 instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7159 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7164 instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7165 if (instParam == nullptr)
7172 assert(extraArg == nullptr);
7173 extraArg = gtNewArgList(instParam);
7176 // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7177 // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7178 // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7179 // exactContextHnd is not currently required when inlining shared generic code into shared
7180 // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7181 // (e.g. anything marked needsRuntimeLookup)
7182 if (exactContextNeedsRuntimeLookup)
7184 exactContextHnd = nullptr;
7187 //-------------------------------------------------------------------------
7188 // The main group of arguments
7190 args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, &argFlags, sig, extraArg);
7194 call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7197 //-------------------------------------------------------------------------
7198 // The "this" pointer
7200 if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7204 if (opcode == CEE_NEWOBJ)
7210 obj = impPopStack().val;
7211 obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7212 if (compDonotInline())
7218 /* Is this a virtual or interface call? */
7220 if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
7222 /* only true object pointers can be virtual */
7224 assert(obj->gtType == TYP_REF);
7230 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7234 /* Store the "this" value in the call */
7236 call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7237 call->gtCall.gtCallObjp = obj;
7240 //-------------------------------------------------------------------------
7241 // The "this" pointer for "newobj"
7243 if (opcode == CEE_NEWOBJ)
7245 if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7247 assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7248 // This is a 'new' of a variable sized object, wher
7249 // the constructor is to return the object. In this case
7250 // the constructor claims to return VOID but we know it
7251 // actually returns the new object
7252 assert(callRetTyp == TYP_VOID);
7253 callRetTyp = TYP_REF;
7254 call->gtType = TYP_REF;
7255 impSpillSpecialSideEff();
7257 impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7261 if (clsFlags & CORINFO_FLG_DELEGATE)
7263 // New inliner morph it in impImportCall.
7264 // This will allow us to inline the call to the delegate constructor.
7265 call = fgOptimizeDelegateConstructor(call, &exactContextHnd);
7268 if (!bIntrinsicImported)
7271 #if defined(DEBUG) || defined(INLINE_DATA)
7273 // Keep track of the raw IL offset of the call
7274 call->gtCall.gtRawILOffset = rawILOffset;
7276 #endif // defined(DEBUG) || defined(INLINE_DATA)
7278 // Is it an inline candidate?
7279 impMarkInlineCandidate(call, exactContextHnd, callInfo);
7282 // append the call node.
7283 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7285 // Now push the value of the 'new onto the stack
7287 // This is a 'new' of a non-variable sized object.
7288 // Append the new node (op1) to the statement list,
7289 // and then push the local holding the value of this
7290 // new instruction on the stack.
7292 if (clsFlags & CORINFO_FLG_VALUECLASS)
7294 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
7296 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
7297 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
7301 if (newobjThis->gtOper == GT_COMMA)
7303 // In coreclr the callout can be inserted even if verification is disabled
7304 // so we cannot rely on tiVerificationNeeded alone
7306 // We must have inserted the callout. Get the real newobj.
7307 newobjThis = newobjThis->gtOp.gtOp2;
7310 assert(newobjThis->gtOper == GT_LCL_VAR);
7311 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
7321 // This check cannot be performed for implicit tail calls for the reason
7322 // that impIsImplicitTailCallCandidate() is not checking whether return
7323 // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
7324 // As a result it is possible that in the following case, we find that
7325 // the type stack is non-empty if Callee() is considered for implicit
7327 // int Caller(..) { .... void Callee(); ret val; ... }
7329 // Note that we cannot check return type compatibility before ImpImportCall()
7330 // as we don't have required info or need to duplicate some of the logic of
7333 // For implicit tail calls, we perform this check after return types are
7334 // known to be compatible.
7335 if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
7337 BADCODE("Stack should be empty after tailcall");
7340 // Note that we can not relax this condition with genActualType() as
7341 // the calling convention dictates that the caller of a function with
7342 // a small-typed return value is responsible for normalizing the return val
7345 !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
7346 callInfo->sig.retTypeClass))
7348 canTailCall = false;
7349 szCanTailCallFailReason = "Return types are not tail call compatible";
7352 // Stack empty check for implicit tail calls.
7353 if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
7355 #ifdef _TARGET_AMD64_
7356 // JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException
7357 // in JIT64, not an InvalidProgramException.
7358 Verify(false, "Stack should be empty after tailcall");
7359 #else // _TARGET_64BIT_
7360 BADCODE("Stack should be empty after tailcall");
7361 #endif //!_TARGET_64BIT_
7364 // assert(compCurBB is not a catch, finally or filter block);
7365 // assert(compCurBB is not a try block protected by a finally block);
7367 // Check for permission to tailcall
7368 bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
7370 assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
7374 // True virtual or indirect calls, shouldn't pass in a callee handle.
7375 CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->gtCall.gtCallType != CT_USER_FUNC) ||
7376 ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT))
7379 GenTreePtr thisArg = call->gtCall.gtCallObjp;
7381 if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
7384 if (explicitTailCall)
7386 // In case of explicit tail calls, mark it so that it is not considered
7388 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
7392 printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
7400 #if FEATURE_TAILCALL_OPT
7401 // Must be an implicit tail call.
7402 assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
7404 // It is possible that a call node is both an inline candidate and marked
7405 // for opportunistic tail calling. In-lining happens before morhphing of
7406 // trees. If in-lining of an in-line candidate gets aborted for whatever
7407 // reason, it will survive to the morphing stage at which point it will be
7408 // transformed into a tail call after performing additional checks.
7410 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
7414 printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
7420 #else //! FEATURE_TAILCALL_OPT
7421 NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
7423 #endif // FEATURE_TAILCALL_OPT
7426 // we can't report success just yet...
7430 canTailCall = false;
7431 // canTailCall reported its reasons already
7435 printf("\ninfo.compCompHnd->canTailCall returned false for call ");
7444 // If this assert fires it means that canTailCall was set to false without setting a reason!
7445 assert(szCanTailCallFailReason != nullptr);
7450 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
7452 printf(": %s\n", szCanTailCallFailReason);
7455 info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
7456 szCanTailCallFailReason);
7460 // Note: we assume that small return types are already normalized by the managed callee
7461 // or by the pinvoke stub for calls to unmanaged code.
7465 if (!bIntrinsicImported)
7468 // Things needed to be checked when bIntrinsicImported is false.
7471 assert(call->gtOper == GT_CALL);
7472 assert(sig != nullptr);
7474 // Tail calls require us to save the call site's sig info so we can obtain an argument
7475 // copying thunk from the EE later on.
7476 if (call->gtCall.callSig == nullptr)
7478 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7479 *call->gtCall.callSig = *sig;
7482 if (compIsForInlining() && opcode == CEE_CALLVIRT)
7484 GenTreePtr callObj = call->gtCall.gtCallObjp;
7485 assert(callObj != nullptr);
7487 unsigned callKind = call->gtFlags & GTF_CALL_VIRT_KIND_MASK;
7489 if (((callKind != GTF_CALL_NONVIRT) || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
7490 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
7491 impInlineInfo->inlArgInfo))
7493 impInlineInfo->thisDereferencedFirst = true;
7497 #if defined(DEBUG) || defined(INLINE_DATA)
7499 // Keep track of the raw IL offset of the call
7500 call->gtCall.gtRawILOffset = rawILOffset;
7502 #endif // defined(DEBUG) || defined(INLINE_DATA)
7504 // Is it an inline candidate?
7505 impMarkInlineCandidate(call, exactContextHnd, callInfo);
7508 // Push or append the result of the call
7509 if (callRetTyp == TYP_VOID)
7511 if (opcode == CEE_NEWOBJ)
7513 // we actually did push something, so don't spill the thing we just pushed.
7514 assert(verCurrentState.esStackDepth > 0);
7515 impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
7519 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7524 impSpillSpecialSideEff();
7526 if (clsFlags & CORINFO_FLG_ARRAY)
7528 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
7531 // Find the return type used for verification by interpreting the method signature.
7532 // NB: we are clobbering the already established sig.
7533 if (tiVerificationNeeded)
7535 // Actually, we never get the sig for the original method.
7536 sig = &(callInfo->verSig);
7539 typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
7540 tiRetVal.NormaliseForStack();
7542 // The CEE_READONLY prefix modifies the verification semantics of an Address
7543 // operation on an array type.
7544 if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
7546 tiRetVal.SetIsReadonlyByRef();
7549 if (tiVerificationNeeded)
7551 // We assume all calls return permanent home byrefs. If they
7552 // didn't they wouldn't be verifiable. This is also covering
7553 // the Address() helper for multidimensional arrays.
7554 if (tiRetVal.IsByRef())
7556 tiRetVal.SetIsPermanentHomeByRef();
7560 if (call->gtOper == GT_CALL)
7562 // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
7563 if (varTypeIsStruct(callRetTyp))
7565 call = impFixupCallStructReturn(call, sig->retTypeClass);
7568 if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
7570 assert(opts.OptEnabled(CLFLG_INLINING));
7572 // Make the call its own tree (spill the stack if needed).
7573 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7575 // TODO: Still using the widened type.
7576 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
7580 // For non-candidates we must also spill, since we
7581 // might have locals live on the eval stack that this
7583 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
7587 if (!bIntrinsicImported)
7589 //-------------------------------------------------------------------------
7591 /* If the call is of a small type and the callee is managed, the callee will normalize the result
7593 However, we need to normalize small type values returned by unmanaged
7594 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
7595 if we use the shorter inlined pinvoke stub. */
7597 if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
7599 call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
7603 impPushOnStack(call, tiRetVal);
7606 // VSD functions get a new call target each time we getCallInfo, so clear the cache.
7607 // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
7608 // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
7609 // callInfoCache.uncacheCallInfo();
7614 #pragma warning(pop)
7617 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
7619 CorInfoType corType = methInfo->args.retType;
7621 if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
7623 // We have some kind of STRUCT being returned
7625 structPassingKind howToReturnStruct = SPK_Unknown;
7627 var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
7629 if (howToReturnStruct == SPK_ByReference)
7640 var_types Compiler::impImportJitTestLabelMark(int numArgs)
7642 TestLabelAndNum tlAndN;
7646 StackEntry se = impPopStack();
7647 assert(se.seTypeInfo.GetType() == TI_INT);
7648 GenTreePtr val = se.val;
7649 assert(val->IsCnsIntOrI());
7650 tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7652 else if (numArgs == 3)
7654 StackEntry se = impPopStack();
7655 assert(se.seTypeInfo.GetType() == TI_INT);
7656 GenTreePtr val = se.val;
7657 assert(val->IsCnsIntOrI());
7658 tlAndN.m_num = val->AsIntConCommon()->IconValue();
7660 assert(se.seTypeInfo.GetType() == TI_INT);
7662 assert(val->IsCnsIntOrI());
7663 tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7670 StackEntry expSe = impPopStack();
7671 GenTreePtr node = expSe.val;
7673 // There are a small number of special cases, where we actually put the annotation on a subnode.
7674 if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
7676 // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
7677 // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
7678 // offset within the the static field block whose address is returned by the helper call.
7679 // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
7680 GenTreePtr helperCall = nullptr;
7681 assert(node->OperGet() == GT_IND);
7682 tlAndN.m_num -= 100;
7683 GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
7684 GetNodeTestData()->Remove(node);
7688 GetNodeTestData()->Set(node, tlAndN);
7691 impPushOnStack(node, expSe.seTypeInfo);
7692 return node->TypeGet();
7696 //-----------------------------------------------------------------------------------
7697 // impFixupCallStructReturn: For a call node that returns a struct type either
7698 // adjust the return type to an enregisterable type, or set the flag to indicate
7699 // struct return via retbuf arg.
7702 // call - GT_CALL GenTree node
7703 // retClsHnd - Class handle of return type of the call
7706 // Returns new GenTree node after fixing struct return of call node
7708 GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call, CORINFO_CLASS_HANDLE retClsHnd)
7710 assert(call->gtOper == GT_CALL);
7712 if (!varTypeIsStruct(call))
7717 call->gtCall.gtRetClsHnd = retClsHnd;
7719 GenTreeCall* callNode = call->AsCall();
7721 #if FEATURE_MULTIREG_RET
7722 // Initialize Return type descriptor of call node
7723 ReturnTypeDesc* retTypeDesc = callNode->GetReturnTypeDesc();
7724 retTypeDesc->InitializeStructReturnType(this, retClsHnd);
7725 #endif // FEATURE_MULTIREG_RET
7727 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7729 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
7730 assert(!callNode->IsVarargs() && "varargs not allowed for System V OSs.");
7732 // The return type will remain as the incoming struct type unless normalized to a
7733 // single eightbyte return type below.
7734 callNode->gtReturnType = call->gtType;
7736 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7737 if (retRegCount != 0)
7739 if (retRegCount == 1)
7741 // struct returned in a single register
7742 callNode->gtReturnType = retTypeDesc->GetReturnRegType(0);
7746 // must be a struct returned in two registers
7747 assert(retRegCount == 2);
7749 if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7751 // Force a call returning multi-reg struct to be always of the IR form
7754 // No need to assign a multi-reg struct to a local var if:
7755 // - It is a tail call or
7756 // - The call is marked for in-lining later
7757 return impAssignMultiRegTypeToVar(call, retClsHnd);
7763 // struct not returned in registers i.e returned via hiddden retbuf arg.
7764 callNode->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7767 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7769 #if FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7770 // There is no fixup necessary if the return type is a HFA struct.
7771 // HFA structs are returned in registers for ARM32 and ARM64
7773 if (!call->gtCall.IsVarargs() && IsHfa(retClsHnd))
7775 if (call->gtCall.CanTailCall())
7777 if (info.compIsVarArgs)
7779 // We cannot tail call because control needs to return to fixup the calling
7780 // convention for result return.
7781 call->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7785 // If we can tail call returning HFA, then don't assign it to
7786 // a variable back and forth.
7791 if (call->gtFlags & GTF_CALL_INLINE_CANDIDATE)
7796 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7797 if (retRegCount >= 2)
7799 return impAssignMultiRegTypeToVar(call, retClsHnd);
7802 #endif // _TARGET_ARM_
7804 // Check for TYP_STRUCT type that wraps a primitive type
7805 // Such structs are returned using a single register
7806 // and we change the return type on those calls here.
7808 structPassingKind howToReturnStruct;
7809 var_types returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
7811 if (howToReturnStruct == SPK_ByReference)
7813 assert(returnType == TYP_UNKNOWN);
7814 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7818 assert(returnType != TYP_UNKNOWN);
7819 call->gtCall.gtReturnType = returnType;
7821 // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
7822 if ((returnType == TYP_LONG) && (compLongUsed == false))
7824 compLongUsed = true;
7826 else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
7828 compFloatingPointUsed = true;
7831 #if FEATURE_MULTIREG_RET
7832 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7833 assert(retRegCount != 0);
7835 if (retRegCount >= 2)
7837 if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7839 // Force a call returning multi-reg struct to be always of the IR form
7842 // No need to assign a multi-reg struct to a local var if:
7843 // - It is a tail call or
7844 // - The call is marked for in-lining later
7845 return impAssignMultiRegTypeToVar(call, retClsHnd);
7848 #endif // FEATURE_MULTIREG_RET
7851 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7856 /*****************************************************************************
7857 For struct return values, re-type the operand in the case where the ABI
7858 does not use a struct return buffer
7859 Note that this method is only call for !_TARGET_X86_
7862 GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd)
7864 assert(varTypeIsStruct(info.compRetType));
7865 assert(info.compRetBuffArg == BAD_VAR_NUM);
7867 #if defined(_TARGET_XARCH_)
7869 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7870 // No VarArgs for CoreCLR on x64 Unix
7871 assert(!info.compIsVarArgs);
7873 // Is method returning a multi-reg struct?
7874 if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
7876 // In case of multi-reg struct return, we force IR to be one of the following:
7877 // GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a
7878 // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
7880 if (op->gtOper == GT_LCL_VAR)
7882 // Make sure that this struct stays in memory and doesn't get promoted.
7883 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
7884 lvaTable[lclNum].lvIsMultiRegRet = true;
7886 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
7887 op->gtFlags |= GTF_DONT_CSE;
7892 if (op->gtOper == GT_CALL)
7897 return impAssignMultiRegTypeToVar(op, retClsHnd);
7899 #else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7900 assert(info.compRetNativeType != TYP_STRUCT);
7901 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7903 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7905 if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
7907 if (op->gtOper == GT_LCL_VAR)
7909 // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
7910 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
7911 // Make sure this struct type stays as struct so that we can return it as an HFA
7912 lvaTable[lclNum].lvIsMultiRegRet = true;
7914 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
7915 op->gtFlags |= GTF_DONT_CSE;
7920 if (op->gtOper == GT_CALL)
7922 if (op->gtCall.IsVarargs())
7924 // We cannot tail call because control needs to return to fixup the calling
7925 // convention for result return.
7926 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
7927 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7934 return impAssignMultiRegTypeToVar(op, retClsHnd);
7937 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
7939 // Is method returning a multi-reg struct?
7940 if (IsMultiRegReturnedType(retClsHnd))
7942 if (op->gtOper == GT_LCL_VAR)
7944 // This LCL_VAR stays as a TYP_STRUCT
7945 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
7947 // Make sure this struct type is not struct promoted
7948 lvaTable[lclNum].lvIsMultiRegRet = true;
7950 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
7951 op->gtFlags |= GTF_DONT_CSE;
7956 if (op->gtOper == GT_CALL)
7958 if (op->gtCall.IsVarargs())
7960 // We cannot tail call because control needs to return to fixup the calling
7961 // convention for result return.
7962 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
7963 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7970 return impAssignMultiRegTypeToVar(op, retClsHnd);
7973 #endif // FEATURE_MULTIREG_RET && FEATURE_HFA
7976 // adjust the type away from struct to integral
7977 // and no normalizing
7978 if (op->gtOper == GT_LCL_VAR)
7980 op->ChangeOper(GT_LCL_FLD);
7982 else if (op->gtOper == GT_OBJ)
7984 GenTreePtr op1 = op->AsObj()->Addr();
7986 // We will fold away OBJ/ADDR
7987 // except for OBJ/ADDR/INDEX
7988 // as the array type influences the array element's offset
7989 // Later in this method we change op->gtType to info.compRetNativeType
7990 // This is not correct when op is a GT_INDEX as the starting offset
7991 // for the array elements 'elemOffs' is different for an array of
7992 // TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
7993 // Also refer to the GTF_INX_REFARR_LAYOUT flag
7995 if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
7997 // Change '*(&X)' to 'X' and see if we can do better
7998 op = op1->gtOp.gtOp1;
7999 goto REDO_RETURN_NODE;
8001 op->gtObj.gtClass = NO_CLASS_HANDLE;
8002 op->ChangeOperUnchecked(GT_IND);
8003 op->gtFlags |= GTF_IND_TGTANYWHERE;
8005 else if (op->gtOper == GT_CALL)
8007 if (op->AsCall()->TreatAsHasRetBufArg(this))
8009 // This must be one of those 'special' helpers that don't
8010 // really have a return buffer, but instead use it as a way
8011 // to keep the trees cleaner with fewer address-taken temps.
8013 // Well now we have to materialize the the return buffer as
8014 // an address-taken temp. Then we can return the temp.
8016 // NOTE: this code assumes that since the call directly
8017 // feeds the return, then the call must be returning the
8018 // same structure/class/type.
8020 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8022 // No need to spill anything as we're about to return.
8023 impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8025 // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8026 // jump directly to a GT_LCL_FLD.
8027 op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8028 op->ChangeOper(GT_LCL_FLD);
8032 assert(info.compRetNativeType == op->gtCall.gtReturnType);
8034 // Don't change the gtType of the node just yet, it will get changed later.
8038 else if (op->gtOper == GT_COMMA)
8040 op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8043 op->gtType = info.compRetNativeType;
8048 /*****************************************************************************
8049 CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8050 finally-protected try. We find the finally blocks protecting the current
8051 offset (in order) by walking over the complete exception table and
8052 finding enclosing clauses. This assumes that the table is sorted.
8053 This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8055 If we are leaving a catch handler, we need to attach the
8056 CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8058 After this function, the BBJ_LEAVE block has been converted to a different type.
8061 #if !FEATURE_EH_FUNCLETS
8063 void Compiler::impImportLeave(BasicBlock* block)
8068 printf("\nBefore import CEE_LEAVE:\n");
8069 fgDispBasicBlocks();
8074 bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8075 unsigned blkAddr = block->bbCodeOffs;
8076 BasicBlock* leaveTarget = block->bbJumpDest;
8077 unsigned jmpAddr = leaveTarget->bbCodeOffs;
8079 // LEAVE clears the stack, spill side effects, and set stack to 0
8081 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8082 verCurrentState.esStackDepth = 0;
8084 assert(block->bbJumpKind == BBJ_LEAVE);
8085 assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8087 BasicBlock* step = DUMMY_INIT(NULL);
8088 unsigned encFinallies = 0; // Number of enclosing finallies.
8089 GenTreePtr endCatches = NULL;
8090 GenTreePtr endLFin = NULL; // The statement tree to indicate the end of locally-invoked finally.
8095 for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8097 // Grab the handler offsets
8099 IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8100 IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8101 IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8102 IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8104 /* Is this a catch-handler we are CEE_LEAVEing out of?
8105 * If so, we need to call CORINFO_HELP_ENDCATCH.
8108 if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8110 // Can't CEE_LEAVE out of a finally/fault handler
8111 if (HBtab->HasFinallyOrFaultHandler())
8112 BADCODE("leave out of fault/finally block");
8114 // Create the call to CORINFO_HELP_ENDCATCH
8115 GenTreePtr endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8117 // Make a list of all the currently pending endCatches
8119 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8121 endCatches = endCatch;
8126 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8127 "CORINFO_HELP_ENDCATCH\n",
8128 block->bbNum, XTnum);
8132 else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8133 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8135 /* This is a finally-protected try we are jumping out of */
8137 /* If there are any pending endCatches, and we have already
8138 jumped out of a finally-protected try, then the endCatches
8139 have to be put in a block in an outer try for async
8140 exceptions to work correctly.
8141 Else, just use append to the original block */
8143 BasicBlock* callBlock;
8145 assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8147 if (encFinallies == 0)
8149 assert(step == DUMMY_INIT(NULL));
8151 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8154 impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8159 printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8160 "block BB%02u [%08p]\n",
8161 callBlock->bbNum, dspPtr(callBlock));
8167 assert(step != DUMMY_INIT(NULL));
8169 /* Calling the finally block */
8170 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8171 assert(step->bbJumpKind == BBJ_ALWAYS);
8172 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8173 // finally in the chain)
8174 step->bbJumpDest->bbRefs++;
8176 /* The new block will inherit this block's weight */
8177 callBlock->setBBWeight(block->bbWeight);
8178 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8183 printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block BB%02u "
8185 callBlock->bbNum, dspPtr(callBlock));
8189 GenTreePtr lastStmt;
8193 lastStmt = gtNewStmt(endCatches);
8194 endLFin->gtNext = lastStmt;
8195 lastStmt->gtPrev = endLFin;
8202 // note that this sets BBF_IMPORTED on the block
8203 impEndTreeList(callBlock, endLFin, lastStmt);
8206 step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8207 /* The new block will inherit this block's weight */
8208 step->setBBWeight(block->bbWeight);
8209 step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8214 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block "
8216 step->bbNum, dspPtr(step));
8220 unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8221 assert(finallyNesting <= compHndBBtabCount);
8223 callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8224 endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8225 endLFin = gtNewStmt(endLFin);
8230 invalidatePreds = true;
8234 /* Append any remaining endCatches, if any */
8236 assert(!encFinallies == !endLFin);
8238 if (encFinallies == 0)
8240 assert(step == DUMMY_INIT(NULL));
8241 block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8244 impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8249 printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8250 "block BB%02u [%08p]\n",
8251 block->bbNum, dspPtr(block));
8257 // If leaveTarget is the start of another try block, we want to make sure that
8258 // we do not insert finalStep into that try block. Hence, we find the enclosing
8260 unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8262 // Insert a new BB either in the try region indicated by tryIndex or
8263 // the handler region indicated by leaveTarget->bbHndIndex,
8264 // depending on which is the inner region.
8265 BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8266 finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8267 step->bbJumpDest = finalStep;
8269 /* The new block will inherit this block's weight */
8270 finalStep->setBBWeight(block->bbWeight);
8271 finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8276 printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block BB%02u [%08p]\n",
8277 encFinallies, finalStep->bbNum, dspPtr(finalStep));
8281 GenTreePtr lastStmt;
8285 lastStmt = gtNewStmt(endCatches);
8286 endLFin->gtNext = lastStmt;
8287 lastStmt->gtPrev = endLFin;
8294 impEndTreeList(finalStep, endLFin, lastStmt);
8296 finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8298 // Queue up the jump target for importing
8300 impImportBlockPending(leaveTarget);
8302 invalidatePreds = true;
8305 if (invalidatePreds && fgComputePredsDone)
8307 JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8312 fgVerifyHandlerTab();
8316 printf("\nAfter import CEE_LEAVE:\n");
8317 fgDispBasicBlocks();
8323 #else // FEATURE_EH_FUNCLETS
8325 void Compiler::impImportLeave(BasicBlock* block)
8330 printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
8331 fgDispBasicBlocks();
8336 bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8337 unsigned blkAddr = block->bbCodeOffs;
8338 BasicBlock* leaveTarget = block->bbJumpDest;
8339 unsigned jmpAddr = leaveTarget->bbCodeOffs;
8341 // LEAVE clears the stack, spill side effects, and set stack to 0
8343 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8344 verCurrentState.esStackDepth = 0;
8346 assert(block->bbJumpKind == BBJ_LEAVE);
8347 assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
8349 BasicBlock* step = nullptr;
8353 // No step type; step == NULL.
8356 // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
8357 // That is, is step->bbJumpDest where a finally will return to?
8360 // The step block is a catch return.
8363 // The step block is in a "try", created as the target for a finally return or the target for a catch return.
8366 StepType stepType = ST_None;
8371 for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8373 // Grab the handler offsets
8375 IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8376 IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8377 IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8378 IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8380 /* Is this a catch-handler we are CEE_LEAVEing out of?
8383 if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8385 // Can't CEE_LEAVE out of a finally/fault handler
8386 if (HBtab->HasFinallyOrFaultHandler())
8388 BADCODE("leave out of fault/finally block");
8391 /* We are jumping out of a catch */
8393 if (step == nullptr)
8396 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
8397 stepType = ST_Catch;
8402 printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
8404 XTnum, step->bbNum);
8410 BasicBlock* exitBlock;
8412 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
8414 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
8416 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8417 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
8418 // exit) returns to this block
8419 step->bbJumpDest->bbRefs++;
8421 #if defined(_TARGET_ARM_)
8422 if (stepType == ST_FinallyReturn)
8424 assert(step->bbJumpKind == BBJ_ALWAYS);
8425 // Mark the target of a finally return
8426 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8428 #endif // defined(_TARGET_ARM_)
8430 /* The new block will inherit this block's weight */
8431 exitBlock->setBBWeight(block->bbWeight);
8432 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8434 /* This exit block is the new step */
8436 stepType = ST_Catch;
8438 invalidatePreds = true;
8443 printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
8449 else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8450 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8452 /* We are jumping out of a finally-protected try */
8454 BasicBlock* callBlock;
8456 if (step == nullptr)
8458 #if FEATURE_EH_CALLFINALLY_THUNKS
8460 // Put the call to the finally in the enclosing region.
8461 unsigned callFinallyTryIndex =
8462 (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8463 unsigned callFinallyHndIndex =
8464 (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8465 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
8467 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
8468 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
8469 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
8470 // next block, and flow optimizations will remove it.
8471 block->bbJumpKind = BBJ_ALWAYS;
8472 block->bbJumpDest = callBlock;
8473 block->bbJumpDest->bbRefs++;
8475 /* The new block will inherit this block's weight */
8476 callBlock->setBBWeight(block->bbWeight);
8477 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8482 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8483 "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
8484 XTnum, block->bbNum, callBlock->bbNum);
8488 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8491 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8496 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8497 "BBJ_CALLFINALLY block\n",
8498 XTnum, callBlock->bbNum);
8502 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8506 // Calling the finally block. We already have a step block that is either the call-to-finally from a
8507 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
8508 // a 'finally'), or the step block is the return from a catch.
8510 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
8511 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
8512 // automatically re-raise the exception, using the return address of the catch (that is, the target
8513 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
8514 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
8515 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
8516 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
8517 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
8518 // within the 'try' region protected by the finally, since we generate code in such a way that execution
8519 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
8522 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8524 #if FEATURE_EH_CALLFINALLY_THUNKS
8525 if (step->bbJumpKind == BBJ_EHCATCHRET)
8527 // Need to create another step block in the 'try' region that will actually branch to the
8528 // call-to-finally thunk.
8529 BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8530 step->bbJumpDest = step2;
8531 step->bbJumpDest->bbRefs++;
8532 step2->setBBWeight(block->bbWeight);
8533 step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8538 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
8539 "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
8540 XTnum, step->bbNum, step2->bbNum);
8545 assert(stepType == ST_Catch); // Leave it as catch type for now.
8547 #endif // FEATURE_EH_CALLFINALLY_THUNKS
8549 #if FEATURE_EH_CALLFINALLY_THUNKS
8550 unsigned callFinallyTryIndex =
8551 (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8552 unsigned callFinallyHndIndex =
8553 (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8554 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8555 unsigned callFinallyTryIndex = XTnum + 1;
8556 unsigned callFinallyHndIndex = 0; // don't care
8557 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8559 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
8560 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8561 // finally in the chain)
8562 step->bbJumpDest->bbRefs++;
8564 #if defined(_TARGET_ARM_)
8565 if (stepType == ST_FinallyReturn)
8567 assert(step->bbJumpKind == BBJ_ALWAYS);
8568 // Mark the target of a finally return
8569 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8571 #endif // defined(_TARGET_ARM_)
8573 /* The new block will inherit this block's weight */
8574 callBlock->setBBWeight(block->bbWeight);
8575 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8580 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
8582 XTnum, callBlock->bbNum);
8587 step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8588 stepType = ST_FinallyReturn;
8590 /* The new block will inherit this block's weight */
8591 step->setBBWeight(block->bbWeight);
8592 step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8597 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
8599 XTnum, step->bbNum);
8603 callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8605 invalidatePreds = true;
8607 else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8608 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8610 // We are jumping out of a catch-protected try.
8612 // If we are returning from a call to a finally, then we must have a step block within a try
8613 // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
8614 // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
8615 // and invoke the appropriate catch.
8617 // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
8618 // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
8619 // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
8620 // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
8621 // address of the catch return as the new exception address. That is, the re-raised exception appears to
8622 // occur at the catch return address. If this exception return address skips an enclosing try/catch that
8623 // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
8628 // // something here raises ThreadAbortException
8629 // LEAVE LABEL_1; // no need to stop at LABEL_2
8630 // } catch (Exception) {
8631 // // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
8632 // // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
8633 // // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
8634 // // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
8635 // // need to do this transformation if the current EH block is a try/catch that catches
8636 // // ThreadAbortException (or one of its parents), however we might not be able to find that
8637 // // information, so currently we do it for all catch types.
8638 // LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
8640 // LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
8641 // } catch (ThreadAbortException) {
8645 // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
8648 if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
8650 BasicBlock* catchStep;
8654 if (stepType == ST_FinallyReturn)
8656 assert(step->bbJumpKind == BBJ_ALWAYS);
8660 assert(stepType == ST_Catch);
8661 assert(step->bbJumpKind == BBJ_EHCATCHRET);
8664 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
8665 catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8666 step->bbJumpDest = catchStep;
8667 step->bbJumpDest->bbRefs++;
8669 #if defined(_TARGET_ARM_)
8670 if (stepType == ST_FinallyReturn)
8672 // Mark the target of a finally return
8673 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8675 #endif // defined(_TARGET_ARM_)
8677 /* The new block will inherit this block's weight */
8678 catchStep->setBBWeight(block->bbWeight);
8679 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8684 if (stepType == ST_FinallyReturn)
8686 printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
8687 "BBJ_ALWAYS block BB%02u\n",
8688 XTnum, catchStep->bbNum);
8692 assert(stepType == ST_Catch);
8693 printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
8694 "BBJ_ALWAYS block BB%02u\n",
8695 XTnum, catchStep->bbNum);
8700 /* This block is the new step */
8704 invalidatePreds = true;
8709 if (step == nullptr)
8711 block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8716 printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
8717 "block BB%02u to BBJ_ALWAYS\n",
8724 step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8726 #if defined(_TARGET_ARM_)
8727 if (stepType == ST_FinallyReturn)
8729 assert(step->bbJumpKind == BBJ_ALWAYS);
8730 // Mark the target of a finally return
8731 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8733 #endif // defined(_TARGET_ARM_)
8738 printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
8742 // Queue up the jump target for importing
8744 impImportBlockPending(leaveTarget);
8747 if (invalidatePreds && fgComputePredsDone)
8749 JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8754 fgVerifyHandlerTab();
8758 printf("\nAfter import CEE_LEAVE:\n");
8759 fgDispBasicBlocks();
8765 #endif // FEATURE_EH_FUNCLETS
8767 /*****************************************************************************/
8768 // This is called when reimporting a leave block. It resets the JumpKind,
8769 // JumpDest, and bbNext to the original values
8771 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
8773 #if FEATURE_EH_FUNCLETS
8774 // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
8775 // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0,
8776 // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
8777 // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
8778 // only predecessor are also considered orphans and attempted to be deleted.
8785 // leave OUTSIDE; // B0 is the block containing this leave, following this would be B1
8790 // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
8791 // where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block.
8792 // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To
8793 // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
8794 // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
8795 // will be treated as pair and handled correctly.
8796 if (block->bbJumpKind == BBJ_CALLFINALLY)
8798 BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
8799 dupBlock->bbFlags = block->bbFlags;
8800 dupBlock->bbJumpDest = block->bbJumpDest;
8801 dupBlock->copyEHRegion(block);
8802 dupBlock->bbCatchTyp = block->bbCatchTyp;
8804 // Mark this block as
8805 // a) not referenced by any other block to make sure that it gets deleted
8807 // c) prevent from being imported
8810 dupBlock->bbRefs = 0;
8811 dupBlock->bbWeight = 0;
8812 dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
8814 // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
8815 // will be next to each other.
8816 fgInsertBBafter(block, dupBlock);
8821 printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
8825 #endif // FEATURE_EH_FUNCLETS
8827 block->bbJumpKind = BBJ_LEAVE;
8829 block->bbJumpDest = fgLookupBB(jmpAddr);
8831 // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
8832 // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
8833 // reason we don't want to remove the block at this point is that if we call
8834 // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
8835 // added and the linked list length will be different than fgBBcount.
8838 /*****************************************************************************/
8839 // Get the first non-prefix opcode. Used for verification of valid combinations
8840 // of prefixes and actual opcodes.
8842 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
8844 while (codeAddr < codeEndp)
8846 OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
8847 codeAddr += sizeof(__int8);
8849 if (opcode == CEE_PREFIX1)
8851 if (codeAddr >= codeEndp)
8855 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
8856 codeAddr += sizeof(__int8);
8864 case CEE_CONSTRAINED:
8871 codeAddr += opcodeSizes[opcode];
8877 /*****************************************************************************/
8878 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
8880 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
8882 OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
8885 // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
8886 ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
8887 (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
8888 (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
8889 // volatile. prefix is allowed with the ldsfld and stsfld
8890 (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
8892 BADCODE("Invalid opcode for unaligned. or volatile. prefix");
8896 /*****************************************************************************/
8900 #undef RETURN // undef contracts RETURN macro
8915 const static controlFlow_t controlFlow[] = {
8916 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
8917 #include "opcode.def"
8923 /*****************************************************************************
8924 * Determine the result type of an arithemetic operation
8925 * On 64-bit inserts upcasts when native int is mixed with int32
8927 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr* pOp1, GenTreePtr* pOp2)
8929 var_types type = TYP_UNDEF;
8930 GenTreePtr op1 = *pOp1, op2 = *pOp2;
8932 // Arithemetic operations are generally only allowed with
8933 // primitive types, but certain operations are allowed
8936 if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
8938 if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
8940 // byref1-byref2 => gives a native int
8943 else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
8945 // [native] int - byref => gives a native int
8948 // The reason is that it is possible, in managed C++,
8949 // to have a tree like this:
8956 // const(h) int addr byref
8958 // <BUGNUM> VSW 318822 </BUGNUM>
8960 // So here we decide to make the resulting type to be a native int.
8961 CLANG_FORMAT_COMMENT_ANCHOR;
8963 #ifdef _TARGET_64BIT_
8964 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
8966 // insert an explicit upcast
8967 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
8969 #endif // _TARGET_64BIT_
8975 // byref - [native] int => gives a byref
8976 assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
8978 #ifdef _TARGET_64BIT_
8979 if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
8981 // insert an explicit upcast
8982 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
8984 #endif // _TARGET_64BIT_
8989 else if ((oper == GT_ADD) &&
8990 (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
8992 // byref + [native] int => gives a byref
8994 // [native] int + byref => gives a byref
8996 // only one can be a byref : byref op byref not allowed
8997 assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
8998 assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9000 #ifdef _TARGET_64BIT_
9001 if (genActualType(op2->TypeGet()) == TYP_BYREF)
9003 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9005 // insert an explicit upcast
9006 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9009 else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9011 // insert an explicit upcast
9012 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9014 #endif // _TARGET_64BIT_
9018 #ifdef _TARGET_64BIT_
9019 else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9021 assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9023 // int + long => gives long
9024 // long + int => gives long
9025 // we get this because in the IL the long isn't Int64, it's just IntPtr
9027 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9029 // insert an explicit upcast
9030 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9032 else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9034 // insert an explicit upcast
9035 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9040 #else // 32-bit TARGET
9041 else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9043 assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9045 // int + long => gives long
9046 // long + int => gives long
9050 #endif // _TARGET_64BIT_
9053 // int + int => gives an int
9054 assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9056 assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9057 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9059 type = genActualType(op1->gtType);
9061 #if FEATURE_X87_DOUBLES
9063 // For x87, since we only have 1 size of registers, prefer double
9064 // For everybody else, be more precise
9065 if (type == TYP_FLOAT)
9068 #else // !FEATURE_X87_DOUBLES
9070 // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9071 // Otherwise, turn floats into doubles
9072 if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9074 assert(genActualType(op2->gtType) == TYP_DOUBLE);
9078 #endif // FEATURE_X87_DOUBLES
9081 #if FEATURE_X87_DOUBLES
9082 assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9083 #else // FEATURE_X87_DOUBLES
9084 assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9085 #endif // FEATURE_X87_DOUBLES
9090 /*****************************************************************************
9091 * Casting Helper Function to service both CEE_CASTCLASS and CEE_ISINST
9093 * typeRef contains the token, op1 to contain the value being cast,
9094 * and op2 to contain code that creates the type handle corresponding to typeRef
9095 * isCastClass = true means CEE_CASTCLASS, false means CEE_ISINST
9097 GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr op1,
9099 CORINFO_RESOLVED_TOKEN* pResolvedToken,
9104 assert(op1->TypeGet() == TYP_REF);
9106 CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9110 // We only want to expand inline the normal CHKCASTCLASS helper;
9111 expandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9115 if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9117 // Get the Class Handle abd class attributes for the type we are casting to
9119 DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9122 // If the class handle is marked as final we can also expand the IsInst check inline
9124 expandInline = ((flags & CORINFO_FLG_FINAL) != 0);
9127 // But don't expand inline these two cases
9129 if (flags & CORINFO_FLG_MARSHAL_BYREF)
9131 expandInline = false;
9133 else if (flags & CORINFO_FLG_CONTEXTFUL)
9135 expandInline = false;
9141 // We can't expand inline any other helpers
9143 expandInline = false;
9149 if (compCurBB->isRunRarely())
9151 expandInline = false; // not worth the code expansion in a rarely run block
9154 if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9156 expandInline = false; // not worth creating an untracked local variable
9162 // If we CSE this class handle we prevent assertionProp from making SubType assertions
9163 // so instead we force the CSE logic to not consider CSE-ing this class handle.
9165 op2->gtFlags |= GTF_DONT_CSE;
9167 return gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2, op1));
9170 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9175 // expand the methodtable match:
9179 // GT_IND op2 (typically CNS_INT)
9184 // This can replace op1 with a GT_COMMA that evaluates op1 into a local
9186 op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
9188 // op1 is now known to be a non-complex tree
9189 // thus we can use gtClone(op1) from now on
9192 GenTreePtr op2Var = op2;
9195 op2Var = fgInsertCommaFormTemp(&op2);
9196 lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
9198 temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
9199 temp->gtFlags |= GTF_EXCEPT;
9200 condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
9202 GenTreePtr condNull;
9204 // expand the null check:
9206 // condNull ==> GT_EQ
9211 condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
9214 // expand the true and false trees for the condMT
9216 GenTreePtr condFalse = gtClone(op1);
9217 GenTreePtr condTrue;
9221 // use the special helper that skips the cases checked by our inlined cast
9223 helper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
9225 condTrue = gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2Var, gtClone(op1)));
9229 condTrue = gtNewIconNode(0, TYP_REF);
9232 #define USE_QMARK_TREES
9234 #ifdef USE_QMARK_TREES
9237 // Generate first QMARK - COLON tree
9239 // qmarkMT ==> GT_QMARK
9243 // condFalse condTrue
9245 temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
9246 qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
9247 condMT->gtFlags |= GTF_RELOP_QMARK;
9249 GenTreePtr qmarkNull;
9251 // Generate second QMARK - COLON tree
9253 // qmarkNull ==> GT_QMARK
9255 // condNull GT_COLON
9259 temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
9260 qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
9261 qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
9262 condNull->gtFlags |= GTF_RELOP_QMARK;
9264 // Make QMark node a top level node by spilling it.
9265 unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
9266 impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
9267 return gtNewLclvNode(tmp, TYP_REF);
9272 #define assertImp(cond) ((void)0)
9274 #define assertImp(cond) \
9279 const int cchAssertImpBuf = 600; \
9280 char* assertImpBuf = (char*)alloca(cchAssertImpBuf); \
9281 _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \
9282 "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \
9283 impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \
9284 op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \
9285 assertAbort(assertImpBuf, __FILE__, __LINE__); \
9291 #pragma warning(push)
9292 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
9294 /*****************************************************************************
9295 * Import the instr for the given basic block
9297 void Compiler::impImportBlockCode(BasicBlock* block)
9299 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
9305 printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
9309 unsigned nxtStmtIndex = impInitBlockLineInfo();
9310 IL_OFFSET nxtStmtOffs;
9312 GenTreePtr arrayNodeFrom, arrayNodeTo, arrayNodeToIndex;
9314 CorInfoHelpFunc helper;
9315 CorInfoIsAccessAllowedResult accessAllowedResult;
9316 CORINFO_HELPER_DESC calloutHelper;
9317 const BYTE* lastLoadToken = nullptr;
9319 // reject cyclic constraints
9320 if (tiVerificationNeeded)
9322 Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
9323 Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
9326 /* Get the tree list started */
9330 /* Walk the opcodes that comprise the basic block */
9332 const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
9333 const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
9335 IL_OFFSET opcodeOffs = block->bbCodeOffs;
9336 IL_OFFSET lastSpillOffs = opcodeOffs;
9340 /* remember the start of the delegate creation sequence (used for verification) */
9341 const BYTE* delegateCreateStart = nullptr;
9343 int prefixFlags = 0;
9344 bool explicitTailCall, constraintCall, readonlyCall;
9346 bool insertLdloc = false; // set by CEE_DUP and cleared by following store
9349 unsigned numArgs = info.compArgsCount;
9351 /* Now process all the opcodes in the block */
9353 var_types callTyp = TYP_COUNT;
9354 OPCODE prevOpcode = CEE_ILLEGAL;
9356 if (block->bbCatchTyp)
9358 if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
9360 impCurStmtOffsSet(block->bbCodeOffs);
9363 // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
9364 // to a temp. This is a trade off for code simplicity
9365 impSpillSpecialSideEff();
9368 while (codeAddr < codeEndp)
9370 bool usingReadyToRunHelper = false;
9371 CORINFO_RESOLVED_TOKEN resolvedToken;
9372 CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
9373 CORINFO_CALL_INFO callInfo;
9374 CORINFO_FIELD_INFO fieldInfo;
9376 tiRetVal = typeInfo(); // Default type info
9378 //---------------------------------------------------------------------
9380 /* We need to restrict the max tree depth as many of the Compiler
9381 functions are recursive. We do this by spilling the stack */
9383 if (verCurrentState.esStackDepth)
9385 /* Has it been a while since we last saw a non-empty stack (which
9386 guarantees that the tree depth isnt accumulating. */
9388 if ((opcodeOffs - lastSpillOffs) > 200)
9390 impSpillStackEnsure();
9391 lastSpillOffs = opcodeOffs;
9396 lastSpillOffs = opcodeOffs;
9397 impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
9400 /* Compute the current instr offset */
9402 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9405 if (opts.compDbgInfo)
9408 if (!compIsForInlining())
9411 (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
9413 /* Have we reached the next stmt boundary ? */
9415 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
9417 assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
9419 if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
9421 /* We need to provide accurate IP-mapping at this point.
9422 So spill anything on the stack so that it will form
9423 gtStmts with the correct stmt offset noted */
9425 impSpillStackEnsure(true);
9428 // Has impCurStmtOffs been reported in any tree?
9430 if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
9432 GenTreePtr placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
9433 impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9435 assert(impCurStmtOffs == BAD_IL_OFFSET);
9438 if (impCurStmtOffs == BAD_IL_OFFSET)
9440 /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
9441 If opcodeOffs has gone past nxtStmtIndex, catch up */
9443 while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
9444 info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
9449 /* Go to the new stmt */
9451 impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
9453 /* Update the stmt boundary index */
9456 assert(nxtStmtIndex <= info.compStmtOffsetsCount);
9458 /* Are there any more line# entries after this one? */
9460 if (nxtStmtIndex < info.compStmtOffsetsCount)
9462 /* Remember where the next line# starts */
9464 nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
9468 /* No more line# entries */
9470 nxtStmtOffs = BAD_IL_OFFSET;
9474 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
9475 (verCurrentState.esStackDepth == 0))
9477 /* At stack-empty locations, we have already added the tree to
9478 the stmt list with the last offset. We just need to update
9482 impCurStmtOffsSet(opcodeOffs);
9484 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
9485 impOpcodeIsCallSiteBoundary(prevOpcode))
9487 /* Make sure we have a type cached */
9488 assert(callTyp != TYP_COUNT);
9490 if (callTyp == TYP_VOID)
9492 impCurStmtOffsSet(opcodeOffs);
9494 else if (opts.compDbgCode)
9496 impSpillStackEnsure(true);
9497 impCurStmtOffsSet(opcodeOffs);
9500 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
9502 if (opts.compDbgCode)
9504 impSpillStackEnsure(true);
9507 impCurStmtOffsSet(opcodeOffs);
9510 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
9511 jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
9515 CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL);
9516 CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
9517 CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
9519 var_types lclTyp, ovflType = TYP_UNKNOWN;
9520 GenTreePtr op1 = DUMMY_INIT(NULL);
9521 GenTreePtr op2 = DUMMY_INIT(NULL);
9522 GenTreeArgList* args = nullptr; // What good do these "DUMMY_INIT"s do?
9523 GenTreePtr newObjThisPtr = DUMMY_INIT(NULL);
9524 bool uns = DUMMY_INIT(false);
9526 /* Get the next opcode and the size of its parameters */
9528 OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9529 codeAddr += sizeof(__int8);
9532 impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9533 JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
9538 // Return if any previous code has caused inline to fail.
9539 if (compDonotInline())
9544 /* Get the size of additional parameters */
9546 signed int sz = opcodeSizes[opcode];
9549 clsHnd = NO_CLASS_HANDLE;
9551 callTyp = TYP_COUNT;
9553 impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9554 impCurOpcName = opcodeNames[opcode];
9556 if (verbose && (opcode != CEE_PREFIX1))
9558 printf("%s", impCurOpcName);
9561 /* Use assertImp() to display the opcode */
9563 op1 = op2 = nullptr;
9566 /* See what kind of an opcode we have, then */
9568 unsigned mflags = 0;
9569 unsigned clsFlags = 0;
9582 CORINFO_SIG_INFO sig;
9585 bool ovfl, unordered, callNode;
9587 CORINFO_CLASS_HANDLE tokenType;
9597 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9598 codeAddr += sizeof(__int8);
9599 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9604 // We need to call impSpillLclRefs() for a struct type lclVar.
9605 // This is done for non-block assignments in the handling of stloc.
9606 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
9607 (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
9609 impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
9612 /* Append 'op1' to the list of statements */
9613 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
9618 /* Append 'op1' to the list of statements */
9620 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9626 // Remember at which BC offset the tree was finished
9627 impNoteLastILoffs();
9632 impPushNullObjRefOnStack();
9645 cval.intVal = (opcode - CEE_LDC_I4_0);
9646 assert(-1 <= cval.intVal && cval.intVal <= 8);
9650 cval.intVal = getI1LittleEndian(codeAddr);
9653 cval.intVal = getI4LittleEndian(codeAddr);
9656 JITDUMP(" %d", cval.intVal);
9657 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
9661 cval.lngVal = getI8LittleEndian(codeAddr);
9662 JITDUMP(" 0x%016llx", cval.lngVal);
9663 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
9667 cval.dblVal = getR8LittleEndian(codeAddr);
9668 JITDUMP(" %#.17g", cval.dblVal);
9669 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
9673 cval.dblVal = getR4LittleEndian(codeAddr);
9674 JITDUMP(" %#.17g", cval.dblVal);
9676 GenTreePtr cnsOp = gtNewDconNode(cval.dblVal);
9677 #if !FEATURE_X87_DOUBLES
9678 // X87 stack doesn't differentiate between float/double
9679 // so R4 is treated as R8, but everybody else does
9680 cnsOp->gtType = TYP_FLOAT;
9681 #endif // FEATURE_X87_DOUBLES
9682 impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
9688 if (compIsForInlining())
9690 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
9692 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
9697 val = getU4LittleEndian(codeAddr);
9698 JITDUMP(" %08X", val);
9699 if (tiVerificationNeeded)
9701 Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
9702 tiRetVal = typeInfo(TI_REF, impGetStringClass());
9704 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
9709 lclNum = getU2LittleEndian(codeAddr);
9710 JITDUMP(" %u", lclNum);
9711 impLoadArg(lclNum, opcodeOffs + sz + 1);
9715 lclNum = getU1LittleEndian(codeAddr);
9716 JITDUMP(" %u", lclNum);
9717 impLoadArg(lclNum, opcodeOffs + sz + 1);
9724 lclNum = (opcode - CEE_LDARG_0);
9725 assert(lclNum >= 0 && lclNum < 4);
9726 impLoadArg(lclNum, opcodeOffs + sz + 1);
9730 lclNum = getU2LittleEndian(codeAddr);
9731 JITDUMP(" %u", lclNum);
9732 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9736 lclNum = getU1LittleEndian(codeAddr);
9737 JITDUMP(" %u", lclNum);
9738 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9745 lclNum = (opcode - CEE_LDLOC_0);
9746 assert(lclNum >= 0 && lclNum < 4);
9747 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9751 lclNum = getU2LittleEndian(codeAddr);
9755 lclNum = getU1LittleEndian(codeAddr);
9757 JITDUMP(" %u", lclNum);
9759 if (tiVerificationNeeded)
9761 Verify(lclNum < info.compILargsCount, "bad arg num");
9764 if (compIsForInlining())
9766 op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
9767 noway_assert(op1->gtOper == GT_LCL_VAR);
9768 lclNum = op1->AsLclVar()->gtLclNum;
9773 lclNum = compMapILargNum(lclNum); // account for possible hidden param
9774 assertImp(lclNum < numArgs);
9776 if (lclNum == info.compThisArg)
9778 lclNum = lvaArg0Var;
9780 lvaTable[lclNum].lvArgWrite = 1;
9782 if (tiVerificationNeeded)
9784 typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
9785 Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
9788 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
9790 Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
9797 lclNum = getU2LittleEndian(codeAddr);
9798 JITDUMP(" %u", lclNum);
9802 lclNum = getU1LittleEndian(codeAddr);
9803 JITDUMP(" %u", lclNum);
9810 lclNum = (opcode - CEE_STLOC_0);
9811 assert(lclNum >= 0 && lclNum < 4);
9814 if (tiVerificationNeeded)
9816 Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
9817 Verify(tiCompatibleWith(impStackTop().seTypeInfo,
9818 NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
9822 if (compIsForInlining())
9824 lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
9826 /* Have we allocated a temp for this local? */
9828 lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
9837 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
9839 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9845 /* if it is a struct assignment, make certain we don't overflow the buffer */
9846 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
9848 if (lvaTable[lclNum].lvNormalizeOnLoad())
9850 lclTyp = lvaGetRealType(lclNum);
9854 lclTyp = lvaGetActualType(lclNum);
9858 /* Pop the value being assigned */
9861 StackEntry se = impPopStack(clsHnd);
9863 tiRetVal = se.seTypeInfo;
9867 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
9869 assert(op1->TypeGet() == TYP_STRUCT);
9870 op1->gtType = lclTyp;
9872 #endif // FEATURE_SIMD
9874 op1 = impImplicitIorI4Cast(op1, lclTyp);
9876 #ifdef _TARGET_64BIT_
9877 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
9878 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
9880 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9881 op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
9883 #endif // _TARGET_64BIT_
9885 // We had better assign it a value of the correct type
9887 genActualType(lclTyp) == genActualType(op1->gtType) ||
9888 genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
9889 (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
9890 (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
9891 (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
9892 ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
9894 /* If op1 is "&var" then its type is the transient "*" and it can
9895 be used either as TYP_BYREF or TYP_I_IMPL */
9897 if (op1->IsVarAddr())
9899 assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
9901 /* When "&var" is created, we assume it is a byref. If it is
9902 being assigned to a TYP_I_IMPL var, change the type to
9903 prevent unnecessary GC info */
9905 if (genActualType(lclTyp) == TYP_I_IMPL)
9907 op1->gtType = TYP_I_IMPL;
9911 /* Filter out simple assignments to itself */
9913 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
9917 // This is a sequence of (ldloc, dup, stloc). Can simplify
9918 // to (ldloc, stloc). Goto LDVAR to reconstruct the ldloc node.
9919 CLANG_FORMAT_COMMENT_ANCHOR;
9922 if (tiVerificationNeeded)
9925 typeInfo::AreEquivalent(tiRetVal, NormaliseForStack(lvaTable[lclNum].lvVerTypeInfo)));
9930 insertLdloc = false;
9932 impLoadVar(lclNum, opcodeOffs + sz + 1);
9935 else if (opts.compDbgCode)
9937 op1 = gtNewNothingNode();
9946 /* Create the assignment node */
9948 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
9950 /* If the local is aliased, we need to spill calls and
9951 indirections from the stack. */
9953 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp) &&
9954 verCurrentState.esStackDepth > 0)
9956 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased"));
9959 /* Spill any refs to the local from the stack */
9961 impSpillLclRefs(lclNum);
9963 #if !FEATURE_X87_DOUBLES
9964 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
9965 // We insert a cast to the dest 'op2' type
9967 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
9968 varTypeIsFloating(op2->gtType))
9970 op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
9972 #endif // !FEATURE_X87_DOUBLES
9974 if (varTypeIsStruct(lclTyp))
9976 op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
9980 // The code generator generates GC tracking information
9981 // based on the RHS of the assignment. Later the LHS (which is
9982 // is a BYREF) gets used and the emitter checks that that variable
9983 // is being tracked. It is not (since the RHS was an int and did
9984 // not need tracking). To keep this assert happy, we change the RHS
9985 if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
9987 op1->gtType = TYP_BYREF;
9989 op1 = gtNewAssignNode(op2, op1);
9992 /* If insertLdloc is true, then we need to insert a ldloc following the
9993 stloc. This is done when converting a (dup, stloc) sequence into
9994 a (stloc, ldloc) sequence. */
9998 // From SPILL_APPEND
9999 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10002 // From DONE_APPEND
10003 impNoteLastILoffs();
10006 insertLdloc = false;
10008 impLoadVar(lclNum, opcodeOffs + sz + 1, tiRetVal);
10015 lclNum = getU2LittleEndian(codeAddr);
10019 lclNum = getU1LittleEndian(codeAddr);
10021 JITDUMP(" %u", lclNum);
10022 if (tiVerificationNeeded)
10024 Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10025 Verify(info.compInitMem, "initLocals not set");
10028 if (compIsForInlining())
10030 // Get the local type
10031 lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10033 /* Have we allocated a temp for this local? */
10035 lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10037 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10043 assertImp(lclNum < info.compLocalsCount);
10047 lclNum = getU2LittleEndian(codeAddr);
10051 lclNum = getU1LittleEndian(codeAddr);
10053 JITDUMP(" %u", lclNum);
10054 Verify(lclNum < info.compILargsCount, "bad arg num");
10056 if (compIsForInlining())
10058 // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10059 // followed by a ldfld to load the field.
10061 op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10062 if (op1->gtOper != GT_LCL_VAR)
10064 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10068 assert(op1->gtOper == GT_LCL_VAR);
10073 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10074 assertImp(lclNum < numArgs);
10076 if (lclNum == info.compThisArg)
10078 lclNum = lvaArg0Var;
10085 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10088 assert(op1->gtOper == GT_LCL_VAR);
10090 /* Note that this is supposed to create the transient type "*"
10091 which may be used as a TYP_I_IMPL. However we catch places
10092 where it is used as a TYP_I_IMPL and change the node if needed.
10093 Thus we are pessimistic and may report byrefs in the GC info
10094 where it was not absolutely needed, but it is safer this way.
10096 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10098 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10099 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10101 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10102 if (tiVerificationNeeded)
10104 // Don't allow taking address of uninit this ptr.
10105 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10107 Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10110 if (!tiRetVal.IsByRef())
10112 tiRetVal.MakeByRef();
10116 Verify(false, "byref to byref");
10120 impPushOnStack(op1, tiRetVal);
10125 if (!info.compIsVarArgs)
10127 BADCODE("arglist in non-vararg method");
10130 if (tiVerificationNeeded)
10132 tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10134 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10136 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10137 adjusted the arg count cos this is like fetching the last param */
10138 assertImp(0 < numArgs);
10139 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10140 lclNum = lvaVarargsHandleArg;
10141 op1 = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10142 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10143 impPushOnStack(op1, tiRetVal);
10146 case CEE_ENDFINALLY:
10148 if (compIsForInlining())
10150 assert(!"Shouldn't have exception handlers in the inliner!");
10151 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10155 if (verCurrentState.esStackDepth > 0)
10157 impEvalSideEffects();
10160 if (info.compXcptnsCount == 0)
10162 BADCODE("endfinally outside finally");
10165 assert(verCurrentState.esStackDepth == 0);
10167 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10170 case CEE_ENDFILTER:
10172 if (compIsForInlining())
10174 assert(!"Shouldn't have exception handlers in the inliner!");
10175 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10179 block->bbSetRunRarely(); // filters are rare
10181 if (info.compXcptnsCount == 0)
10183 BADCODE("endfilter outside filter");
10186 if (tiVerificationNeeded)
10188 Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
10191 op1 = impPopStack().val;
10192 assertImp(op1->gtType == TYP_INT);
10193 if (!bbInFilterILRange(block))
10195 BADCODE("EndFilter outside a filter handler");
10198 /* Mark current bb as end of filter */
10200 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
10201 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
10203 /* Mark catch handler as successor */
10205 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
10206 if (verCurrentState.esStackDepth != 0)
10208 verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
10209 DEBUGARG(__LINE__));
10214 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
10216 if (!impReturnInstruction(block, prefixFlags, opcode))
10227 assert(!compIsForInlining());
10229 if (tiVerificationNeeded)
10231 Verify(false, "Invalid opcode: CEE_JMP");
10234 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
10236 /* CEE_JMP does not make sense in some "protected" regions. */
10238 BADCODE("Jmp not allowed in protected region");
10241 if (verCurrentState.esStackDepth != 0)
10243 BADCODE("Stack must be empty after CEE_JMPs");
10246 _impResolveToken(CORINFO_TOKENKIND_Method);
10248 JITDUMP(" %08X", resolvedToken.token);
10250 /* The signature of the target has to be identical to ours.
10251 At least check that argCnt and returnType match */
10253 eeGetMethodSig(resolvedToken.hMethod, &sig);
10254 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
10255 sig.retType != info.compMethodInfo->args.retType ||
10256 sig.callConv != info.compMethodInfo->args.callConv)
10258 BADCODE("Incompatible target for CEE_JMPs");
10261 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
10263 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
10265 /* Mark the basic block as being a JUMP instead of RETURN */
10267 block->bbFlags |= BBF_HAS_JMP;
10269 /* Set this flag to make sure register arguments have a location assigned
10270 * even if we don't use them inside the method */
10272 compJmpOpUsed = true;
10274 fgNoStructPromotion = true;
10278 #else // !_TARGET_XARCH_ && !_TARGET_ARMARCH_
10280 // Import this just like a series of LDARGs + tail. + call + ret
10282 if (info.compIsVarArgs)
10284 // For now we don't implement true tail calls, so this breaks varargs.
10285 // So warn the user instead of generating bad code.
10286 // This is a semi-temporary workaround for DevDiv 173860, until we can properly
10287 // implement true tail calls.
10288 IMPL_LIMITATION("varags + CEE_JMP doesn't work yet");
10291 // First load up the arguments (0 - N)
10292 for (unsigned argNum = 0; argNum < info.compILargsCount; argNum++)
10294 impLoadArg(argNum, opcodeOffs + sz + 1);
10297 // Now generate the tail call
10298 noway_assert(prefixFlags == 0);
10299 prefixFlags = PREFIX_TAILCALL_EXPLICIT;
10302 eeGetCallInfo(&resolvedToken, NULL,
10303 combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), &callInfo);
10305 // All calls and delegates need a security callout.
10306 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
10308 callTyp = impImportCall(CEE_CALL, &resolvedToken, NULL, NULL, PREFIX_TAILCALL_EXPLICIT, &callInfo,
10311 // And finish with the ret
10314 #endif // _TARGET_XARCH_ || _TARGET_ARMARCH_
10317 assertImp(sz == sizeof(unsigned));
10319 _impResolveToken(CORINFO_TOKENKIND_Class);
10321 JITDUMP(" %08X", resolvedToken.token);
10323 ldelemClsHnd = resolvedToken.hClass;
10325 if (tiVerificationNeeded)
10327 typeInfo tiArray = impStackTop(1).seTypeInfo;
10328 typeInfo tiIndex = impStackTop().seTypeInfo;
10330 // As per ECMA 'index' specified can be either int32 or native int.
10331 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10333 typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
10334 Verify(tiArray.IsNullObjRef() ||
10335 typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
10338 tiRetVal = arrayElemType;
10339 tiRetVal.MakeByRef();
10340 if (prefixFlags & PREFIX_READONLY)
10342 tiRetVal.SetIsReadonlyByRef();
10345 // an array interior pointer is always in the heap
10346 tiRetVal.SetIsPermanentHomeByRef();
10349 // If it's a value class array we just do a simple address-of
10350 if (eeIsValueClass(ldelemClsHnd))
10352 CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
10353 if (cit == CORINFO_TYPE_UNDEF)
10355 lclTyp = TYP_STRUCT;
10359 lclTyp = JITtype2varType(cit);
10361 goto ARR_LD_POST_VERIFY;
10364 // Similarly, if its a readonly access, we can do a simple address-of
10365 // without doing a runtime type-check
10366 if (prefixFlags & PREFIX_READONLY)
10369 goto ARR_LD_POST_VERIFY;
10372 // Otherwise we need the full helper function with run-time type check
10373 op1 = impTokenToHandle(&resolvedToken);
10374 if (op1 == nullptr)
10375 { // compDonotInline()
10379 args = gtNewArgList(op1); // Type
10380 args = gtNewListNode(impPopStack().val, args); // index
10381 args = gtNewListNode(impPopStack().val, args); // array
10382 op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, GTF_EXCEPT, args);
10384 impPushOnStack(op1, tiRetVal);
10387 // ldelem for reference and value types
10389 assertImp(sz == sizeof(unsigned));
10391 _impResolveToken(CORINFO_TOKENKIND_Class);
10393 JITDUMP(" %08X", resolvedToken.token);
10395 ldelemClsHnd = resolvedToken.hClass;
10397 if (tiVerificationNeeded)
10399 typeInfo tiArray = impStackTop(1).seTypeInfo;
10400 typeInfo tiIndex = impStackTop().seTypeInfo;
10402 // As per ECMA 'index' specified can be either int32 or native int.
10403 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10404 tiRetVal = verMakeTypeInfo(ldelemClsHnd);
10406 Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
10407 "type of array incompatible with type operand");
10408 tiRetVal.NormaliseForStack();
10411 // If it's a reference type or generic variable type
10412 // then just generate code as though it's a ldelem.ref instruction
10413 if (!eeIsValueClass(ldelemClsHnd))
10416 opcode = CEE_LDELEM_REF;
10420 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
10421 lclTyp = JITtype2varType(jitTyp);
10422 tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
10423 tiRetVal.NormaliseForStack();
10425 goto ARR_LD_POST_VERIFY;
10427 case CEE_LDELEM_I1:
10430 case CEE_LDELEM_I2:
10431 lclTyp = TYP_SHORT;
10434 lclTyp = TYP_I_IMPL;
10437 // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
10438 // and treating it as TYP_INT avoids other asserts.
10439 case CEE_LDELEM_U4:
10443 case CEE_LDELEM_I4:
10446 case CEE_LDELEM_I8:
10449 case CEE_LDELEM_REF:
10452 case CEE_LDELEM_R4:
10453 lclTyp = TYP_FLOAT;
10455 case CEE_LDELEM_R8:
10456 lclTyp = TYP_DOUBLE;
10458 case CEE_LDELEM_U1:
10459 lclTyp = TYP_UBYTE;
10461 case CEE_LDELEM_U2:
10467 if (tiVerificationNeeded)
10469 typeInfo tiArray = impStackTop(1).seTypeInfo;
10470 typeInfo tiIndex = impStackTop().seTypeInfo;
10472 // As per ECMA 'index' specified can be either int32 or native int.
10473 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10474 if (tiArray.IsNullObjRef())
10476 if (lclTyp == TYP_REF)
10477 { // we will say a deref of a null array yields a null ref
10478 tiRetVal = typeInfo(TI_NULL);
10482 tiRetVal = typeInfo(lclTyp);
10487 tiRetVal = verGetArrayElemType(tiArray);
10488 typeInfo arrayElemTi = typeInfo(lclTyp);
10489 #ifdef _TARGET_64BIT_
10490 if (opcode == CEE_LDELEM_I)
10492 arrayElemTi = typeInfo::nativeInt();
10495 if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
10497 Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
10500 #endif // _TARGET_64BIT_
10502 Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
10505 tiRetVal.NormaliseForStack();
10507 ARR_LD_POST_VERIFY:
10509 /* Pull the index value and array address */
10510 op2 = impPopStack().val;
10511 op1 = impPopStack().val;
10512 assertImp(op1->gtType == TYP_REF);
10514 /* Check for null pointer - in the inliner case we simply abort */
10516 if (compIsForInlining())
10518 if (op1->gtOper == GT_CNS_INT)
10520 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
10525 op1 = impCheckForNullPointer(op1);
10527 /* Mark the block as containing an index expression */
10529 if (op1->gtOper == GT_LCL_VAR)
10531 if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
10533 block->bbFlags |= BBF_HAS_IDX_LEN;
10534 optMethodFlags |= OMF_HAS_ARRAYREF;
10538 /* Create the index node and push it on the stack */
10540 op1 = gtNewIndexRef(lclTyp, op1, op2);
10542 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
10544 if ((opcode == CEE_LDELEMA) || ldstruct ||
10545 (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
10547 assert(ldelemClsHnd != DUMMY_INIT(NULL));
10549 // remember the element size
10550 if (lclTyp == TYP_REF)
10552 op1->gtIndex.gtIndElemSize = sizeof(void*);
10556 // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
10557 if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
10559 op1->gtIndex.gtStructElemClass = ldelemClsHnd;
10561 assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
10562 if (lclTyp == TYP_STRUCT)
10564 size = info.compCompHnd->getClassSize(ldelemClsHnd);
10565 op1->gtIndex.gtIndElemSize = size;
10566 op1->gtType = lclTyp;
10570 if ((opcode == CEE_LDELEMA) || ldstruct)
10573 lclTyp = TYP_BYREF;
10575 op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
10579 assert(lclTyp != TYP_STRUCT);
10585 // Create an OBJ for the result
10586 op1 = gtNewObjNode(ldelemClsHnd, op1);
10587 op1->gtFlags |= GTF_EXCEPT;
10589 impPushOnStack(op1, tiRetVal);
10592 // stelem for reference and value types
10595 assertImp(sz == sizeof(unsigned));
10597 _impResolveToken(CORINFO_TOKENKIND_Class);
10599 JITDUMP(" %08X", resolvedToken.token);
10601 stelemClsHnd = resolvedToken.hClass;
10603 if (tiVerificationNeeded)
10605 typeInfo tiArray = impStackTop(2).seTypeInfo;
10606 typeInfo tiIndex = impStackTop(1).seTypeInfo;
10607 typeInfo tiValue = impStackTop().seTypeInfo;
10609 // As per ECMA 'index' specified can be either int32 or native int.
10610 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10611 typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
10613 Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
10614 "type operand incompatible with array element type");
10615 arrayElem.NormaliseForStack();
10616 Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
10619 // If it's a reference type just behave as though it's a stelem.ref instruction
10620 if (!eeIsValueClass(stelemClsHnd))
10622 goto STELEM_REF_POST_VERIFY;
10625 // Otherwise extract the type
10627 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
10628 lclTyp = JITtype2varType(jitTyp);
10629 goto ARR_ST_POST_VERIFY;
10632 case CEE_STELEM_REF:
10634 if (tiVerificationNeeded)
10636 typeInfo tiArray = impStackTop(2).seTypeInfo;
10637 typeInfo tiIndex = impStackTop(1).seTypeInfo;
10638 typeInfo tiValue = impStackTop().seTypeInfo;
10640 // As per ECMA 'index' specified can be either int32 or native int.
10641 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10642 Verify(tiValue.IsObjRef(), "bad value");
10644 // we only check that it is an object referece, The helper does additional checks
10645 Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
10648 arrayNodeTo = impStackTop(2).val;
10649 arrayNodeToIndex = impStackTop(1).val;
10650 arrayNodeFrom = impStackTop().val;
10653 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
10654 // lot of cases because of covariance. ie. foo[] can be cast to object[].
10657 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
10658 // This does not need CORINFO_HELP_ARRADDR_ST
10660 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
10661 arrayNodeTo->gtOper == GT_LCL_VAR &&
10662 arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
10663 !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
10666 goto ARR_ST_POST_VERIFY;
10669 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
10671 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
10673 assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
10676 goto ARR_ST_POST_VERIFY;
10679 STELEM_REF_POST_VERIFY:
10681 /* Call a helper function to do the assignment */
10682 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, 0, impPopList(3, &flags, nullptr));
10686 case CEE_STELEM_I1:
10689 case CEE_STELEM_I2:
10690 lclTyp = TYP_SHORT;
10693 lclTyp = TYP_I_IMPL;
10695 case CEE_STELEM_I4:
10698 case CEE_STELEM_I8:
10701 case CEE_STELEM_R4:
10702 lclTyp = TYP_FLOAT;
10704 case CEE_STELEM_R8:
10705 lclTyp = TYP_DOUBLE;
10710 if (tiVerificationNeeded)
10712 typeInfo tiArray = impStackTop(2).seTypeInfo;
10713 typeInfo tiIndex = impStackTop(1).seTypeInfo;
10714 typeInfo tiValue = impStackTop().seTypeInfo;
10716 // As per ECMA 'index' specified can be either int32 or native int.
10717 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10718 typeInfo arrayElem = typeInfo(lclTyp);
10719 #ifdef _TARGET_64BIT_
10720 if (opcode == CEE_STELEM_I)
10722 arrayElem = typeInfo::nativeInt();
10724 #endif // _TARGET_64BIT_
10725 Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
10728 Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
10732 ARR_ST_POST_VERIFY:
10733 /* The strict order of evaluation is LHS-operands, RHS-operands,
10734 range-check, and then assignment. However, codegen currently
10735 does the range-check before evaluation the RHS-operands. So to
10736 maintain strict ordering, we spill the stack. */
10738 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
10740 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
10741 "Strict ordering of exceptions for Array store"));
10744 /* Pull the new value from the stack */
10745 op2 = impPopStack().val;
10747 /* Pull the index value */
10748 op1 = impPopStack().val;
10750 /* Pull the array address */
10751 op3 = impPopStack().val;
10753 assertImp(op3->gtType == TYP_REF);
10754 if (op2->IsVarAddr())
10756 op2->gtType = TYP_I_IMPL;
10759 op3 = impCheckForNullPointer(op3);
10761 // Mark the block as containing an index expression
10763 if (op3->gtOper == GT_LCL_VAR)
10765 if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
10767 block->bbFlags |= BBF_HAS_IDX_LEN;
10768 optMethodFlags |= OMF_HAS_ARRAYREF;
10772 /* Create the index node */
10774 op1 = gtNewIndexRef(lclTyp, op3, op1);
10776 /* Create the assignment node and append it */
10778 if (lclTyp == TYP_STRUCT)
10780 assert(stelemClsHnd != DUMMY_INIT(NULL));
10782 op1->gtIndex.gtStructElemClass = stelemClsHnd;
10783 op1->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd);
10785 if (varTypeIsStruct(op1))
10787 op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
10791 op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
10792 op1 = gtNewAssignNode(op1, op2);
10795 /* Mark the expression as containing an assignment */
10797 op1->gtFlags |= GTF_ASG;
10808 case CEE_ADD_OVF_UN:
10816 goto MATH_OP2_FLAGS;
10825 case CEE_SUB_OVF_UN:
10833 goto MATH_OP2_FLAGS;
10837 goto MATH_MAYBE_CALL_NO_OVF;
10842 case CEE_MUL_OVF_UN:
10849 goto MATH_MAYBE_CALL_OVF;
10851 // Other binary math operations
10855 goto MATH_MAYBE_CALL_NO_OVF;
10859 goto MATH_MAYBE_CALL_NO_OVF;
10863 goto MATH_MAYBE_CALL_NO_OVF;
10867 goto MATH_MAYBE_CALL_NO_OVF;
10869 MATH_MAYBE_CALL_NO_OVF:
10871 MATH_MAYBE_CALL_OVF:
10872 // Morpher has some complex logic about when to turn different
10873 // typed nodes on different platforms into helper calls. We
10874 // need to either duplicate that logic here, or just
10875 // pessimistically make all the nodes large enough to become
10876 // call nodes. Since call nodes aren't that much larger and
10877 // these opcodes are infrequent enough I chose the latter.
10879 goto MATH_OP2_FLAGS;
10891 MATH_OP2: // For default values of 'ovfl' and 'callNode'
10896 MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
10898 /* Pull two values and push back the result */
10900 if (tiVerificationNeeded)
10902 const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
10903 const typeInfo& tiOp2 = impStackTop().seTypeInfo;
10905 Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
10906 if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
10908 Verify(tiOp1.IsNumberType(), "not number");
10912 Verify(tiOp1.IsIntegerType(), "not integer");
10915 Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
10919 #ifdef _TARGET_64BIT_
10920 if (tiOp2.IsNativeIntType())
10924 #endif // _TARGET_64BIT_
10927 op2 = impPopStack().val;
10928 op1 = impPopStack().val;
10930 #if !CPU_HAS_FP_SUPPORT
10931 if (varTypeIsFloating(op1->gtType))
10936 /* Can't do arithmetic with references */
10937 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
10939 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
10940 // if it is in the stack)
10941 impBashVarAddrsToI(op1, op2);
10943 type = impGetByRefResultType(oper, uns, &op1, &op2);
10945 assert(!ovfl || !varTypeIsFloating(op1->gtType));
10947 /* Special case: "int+0", "int-0", "int*1", "int/1" */
10949 if (op2->gtOper == GT_CNS_INT)
10951 if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
10952 (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
10955 impPushOnStack(op1, tiRetVal);
10960 #if !FEATURE_X87_DOUBLES
10961 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
10963 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
10965 if (op1->TypeGet() != type)
10967 // We insert a cast of op1 to 'type'
10968 op1 = gtNewCastNode(type, op1, type);
10970 if (op2->TypeGet() != type)
10972 // We insert a cast of op2 to 'type'
10973 op2 = gtNewCastNode(type, op2, type);
10976 #endif // !FEATURE_X87_DOUBLES
10978 #if SMALL_TREE_NODES
10981 /* These operators can later be transformed into 'GT_CALL' */
10983 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
10984 #ifndef _TARGET_ARM_
10985 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
10986 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
10987 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
10988 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
10990 // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
10991 // that we'll need to transform into a general large node, but rather specifically
10992 // to a call: by doing it this way, things keep working if there are multiple sizes,
10993 // and a CALL is no longer the largest.
10994 // That said, as of now it *is* a large node, so we'll do this with an assert rather
10996 assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
10997 op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11000 #endif // SMALL_TREE_NODES
11002 op1 = gtNewOperNode(oper, type, op1, op2);
11005 /* Special case: integer/long division may throw an exception */
11007 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow())
11009 op1->gtFlags |= GTF_EXCEPT;
11014 assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11015 if (ovflType != TYP_UNKNOWN)
11017 op1->gtType = ovflType;
11019 op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11022 op1->gtFlags |= GTF_UNSIGNED;
11026 impPushOnStack(op1, tiRetVal);
11041 if (tiVerificationNeeded)
11043 const typeInfo& tiVal = impStackTop(1).seTypeInfo;
11044 const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11045 Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11048 op2 = impPopStack().val;
11049 op1 = impPopStack().val; // operand to be shifted
11050 impBashVarAddrsToI(op1, op2);
11052 type = genActualType(op1->TypeGet());
11053 op1 = gtNewOperNode(oper, type, op1, op2);
11055 impPushOnStack(op1, tiRetVal);
11059 if (tiVerificationNeeded)
11061 tiRetVal = impStackTop().seTypeInfo;
11062 Verify(tiRetVal.IsIntegerType(), "bad int value");
11065 op1 = impPopStack().val;
11066 impBashVarAddrsToI(op1, nullptr);
11067 type = genActualType(op1->TypeGet());
11068 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11072 if (tiVerificationNeeded)
11074 tiRetVal = impStackTop().seTypeInfo;
11075 Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11077 op1 = impPopStack().val;
11078 type = op1->TypeGet();
11079 op1 = gtNewOperNode(GT_CKFINITE, type, op1);
11080 op1->gtFlags |= GTF_EXCEPT;
11082 impPushOnStack(op1, tiRetVal);
11087 val = getI4LittleEndian(codeAddr); // jump distance
11088 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11092 val = getI1LittleEndian(codeAddr); // jump distance
11093 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11097 if (compIsForInlining())
11099 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11103 JITDUMP(" %04X", jmpAddr);
11104 if (block->bbJumpKind != BBJ_LEAVE)
11106 impResetLeaveBlock(block, jmpAddr);
11109 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11110 impImportLeave(block);
11111 impNoteBranchOffs();
11117 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11119 if (compIsForInlining() && jmpDist == 0)
11124 impNoteBranchOffs();
11130 case CEE_BRFALSE_S:
11132 /* Pop the comparand (now there's a neat term) from the stack */
11133 if (tiVerificationNeeded)
11135 typeInfo& tiVal = impStackTop().seTypeInfo;
11136 Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11140 op1 = impPopStack().val;
11141 type = op1->TypeGet();
11143 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11144 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11146 block->bbJumpKind = BBJ_NONE;
11148 if (op1->gtFlags & GTF_GLOB_EFFECT)
11150 op1 = gtUnusedValNode(op1);
11159 if (op1->OperIsCompare())
11161 if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11163 // Flip the sense of the compare
11165 op1 = gtReverseCond(op1);
11170 /* We'll compare against an equally-sized integer 0 */
11171 /* For small types, we always compare against int */
11172 op2 = gtNewZeroConNode(genActualType(op1->gtType));
11174 /* Create the comparison operator and try to fold it */
11176 oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11177 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11184 /* Fold comparison if we can */
11186 op1 = gtFoldExpr(op1);
11188 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11189 /* Don't make any blocks unreachable in import only mode */
11191 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11193 /* gtFoldExpr() should prevent this as we don't want to make any blocks
11194 unreachable under compDbgCode */
11195 assert(!opts.compDbgCode);
11197 BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11198 assertImp((block->bbJumpKind == BBJ_COND) // normal case
11199 || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11200 // block for the second time
11202 block->bbJumpKind = foldedJumpKind;
11206 if (op1->gtIntCon.gtIconVal)
11208 printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11209 block->bbJumpDest->bbNum);
11213 printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11220 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11222 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
11223 in impImportBlock(block). For correct line numbers, spill stack. */
11225 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
11227 impSpillStackEnsure(true);
11254 if (tiVerificationNeeded)
11256 verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11257 tiRetVal = typeInfo(TI_INT);
11260 op2 = impPopStack().val;
11261 op1 = impPopStack().val;
11263 #ifdef _TARGET_64BIT_
11264 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
11266 op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11268 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
11270 op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11272 #endif // _TARGET_64BIT_
11274 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11275 varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11276 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11278 /* Create the comparison node */
11280 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11282 /* TODO: setting both flags when only one is appropriate */
11283 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
11285 op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
11288 impPushOnStack(op1, tiRetVal);
11294 goto CMP_2_OPs_AND_BR;
11299 goto CMP_2_OPs_AND_BR;
11304 goto CMP_2_OPs_AND_BR_UN;
11309 goto CMP_2_OPs_AND_BR;
11314 goto CMP_2_OPs_AND_BR_UN;
11319 goto CMP_2_OPs_AND_BR;
11324 goto CMP_2_OPs_AND_BR_UN;
11329 goto CMP_2_OPs_AND_BR;
11334 goto CMP_2_OPs_AND_BR_UN;
11339 goto CMP_2_OPs_AND_BR_UN;
11341 CMP_2_OPs_AND_BR_UN:
11344 goto CMP_2_OPs_AND_BR_ALL;
11348 goto CMP_2_OPs_AND_BR_ALL;
11349 CMP_2_OPs_AND_BR_ALL:
11351 if (tiVerificationNeeded)
11353 verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11356 /* Pull two values */
11357 op2 = impPopStack().val;
11358 op1 = impPopStack().val;
11360 #ifdef _TARGET_64BIT_
11361 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
11363 op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11365 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
11367 op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11369 #endif // _TARGET_64BIT_
11371 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11372 varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11373 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11375 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11377 block->bbJumpKind = BBJ_NONE;
11379 if (op1->gtFlags & GTF_GLOB_EFFECT)
11381 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11382 "Branch to next Optimization, op1 side effect"));
11383 impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11385 if (op2->gtFlags & GTF_GLOB_EFFECT)
11387 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11388 "Branch to next Optimization, op2 side effect"));
11389 impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11393 if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
11395 impNoteLastILoffs();
11400 #if !FEATURE_X87_DOUBLES
11401 // We can generate an compare of different sized floating point op1 and op2
11402 // We insert a cast
11404 if (varTypeIsFloating(op1->TypeGet()))
11406 if (op1->TypeGet() != op2->TypeGet())
11408 assert(varTypeIsFloating(op2->TypeGet()));
11410 // say op1=double, op2=float. To avoid loss of precision
11411 // while comparing, op2 is converted to double and double
11412 // comparison is done.
11413 if (op1->TypeGet() == TYP_DOUBLE)
11415 // We insert a cast of op2 to TYP_DOUBLE
11416 op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
11418 else if (op2->TypeGet() == TYP_DOUBLE)
11420 // We insert a cast of op1 to TYP_DOUBLE
11421 op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
11425 #endif // !FEATURE_X87_DOUBLES
11427 /* Create and append the operator */
11429 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11433 op1->gtFlags |= GTF_UNSIGNED;
11438 op1->gtFlags |= GTF_RELOP_NAN_UN;
11444 assert(!compIsForInlining());
11446 if (tiVerificationNeeded)
11448 Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
11450 /* Pop the switch value off the stack */
11451 op1 = impPopStack().val;
11452 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
11454 #ifdef _TARGET_64BIT_
11455 // Widen 'op1' on 64-bit targets
11456 if (op1->TypeGet() != TYP_I_IMPL)
11458 if (op1->OperGet() == GT_CNS_INT)
11460 op1->gtType = TYP_I_IMPL;
11464 op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
11467 #endif // _TARGET_64BIT_
11468 assert(genActualType(op1->TypeGet()) == TYP_I_IMPL);
11470 /* We can create a switch node */
11472 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
11474 val = (int)getU4LittleEndian(codeAddr);
11475 codeAddr += 4 + val * 4; // skip over the switch-table
11479 /************************** Casting OPCODES ***************************/
11481 case CEE_CONV_OVF_I1:
11484 case CEE_CONV_OVF_I2:
11485 lclTyp = TYP_SHORT;
11487 case CEE_CONV_OVF_I:
11488 lclTyp = TYP_I_IMPL;
11490 case CEE_CONV_OVF_I4:
11493 case CEE_CONV_OVF_I8:
11497 case CEE_CONV_OVF_U1:
11498 lclTyp = TYP_UBYTE;
11500 case CEE_CONV_OVF_U2:
11503 case CEE_CONV_OVF_U:
11504 lclTyp = TYP_U_IMPL;
11506 case CEE_CONV_OVF_U4:
11509 case CEE_CONV_OVF_U8:
11510 lclTyp = TYP_ULONG;
11513 case CEE_CONV_OVF_I1_UN:
11516 case CEE_CONV_OVF_I2_UN:
11517 lclTyp = TYP_SHORT;
11519 case CEE_CONV_OVF_I_UN:
11520 lclTyp = TYP_I_IMPL;
11522 case CEE_CONV_OVF_I4_UN:
11525 case CEE_CONV_OVF_I8_UN:
11529 case CEE_CONV_OVF_U1_UN:
11530 lclTyp = TYP_UBYTE;
11532 case CEE_CONV_OVF_U2_UN:
11535 case CEE_CONV_OVF_U_UN:
11536 lclTyp = TYP_U_IMPL;
11538 case CEE_CONV_OVF_U4_UN:
11541 case CEE_CONV_OVF_U8_UN:
11542 lclTyp = TYP_ULONG;
11547 goto CONV_OVF_COMMON;
11550 goto CONV_OVF_COMMON;
11560 lclTyp = TYP_SHORT;
11563 lclTyp = TYP_I_IMPL;
11573 lclTyp = TYP_UBYTE;
11578 #if (REGSIZE_BYTES == 8)
11580 lclTyp = TYP_U_IMPL;
11584 lclTyp = TYP_U_IMPL;
11591 lclTyp = TYP_ULONG;
11595 lclTyp = TYP_FLOAT;
11598 lclTyp = TYP_DOUBLE;
11601 case CEE_CONV_R_UN:
11602 lclTyp = TYP_DOUBLE;
11616 // just check that we have a number on the stack
11617 if (tiVerificationNeeded)
11619 const typeInfo& tiVal = impStackTop().seTypeInfo;
11620 Verify(tiVal.IsNumberType(), "bad arg");
11622 #ifdef _TARGET_64BIT_
11623 bool isNative = false;
11627 case CEE_CONV_OVF_I:
11628 case CEE_CONV_OVF_I_UN:
11630 case CEE_CONV_OVF_U:
11631 case CEE_CONV_OVF_U_UN:
11635 // leave 'isNative' = false;
11640 tiRetVal = typeInfo::nativeInt();
11643 #endif // _TARGET_64BIT_
11645 tiRetVal = typeInfo(lclTyp).NormaliseForStack();
11649 // only converts from FLOAT or DOUBLE to an integer type
11650 // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls
11652 if (varTypeIsFloating(lclTyp))
11654 callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
11655 #ifdef _TARGET_64BIT_
11656 // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
11657 // TYP_BYREF could be used as TYP_I_IMPL which is long.
11658 // TODO-CQ: remove this when we lower casts long/ulong --> float/double
11659 // and generate SSE2 code instead of going through helper calls.
11660 || (impStackTop().val->TypeGet() == TYP_BYREF)
11666 callNode = varTypeIsFloating(impStackTop().val->TypeGet());
11669 // At this point uns, ovf, callNode all set
11671 op1 = impPopStack().val;
11672 impBashVarAddrsToI(op1);
11674 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
11676 op2 = op1->gtOp.gtOp2;
11678 if (op2->gtOper == GT_CNS_INT)
11680 ssize_t ival = op2->gtIntCon.gtIconVal;
11681 ssize_t mask, umask;
11697 assert(!"unexpected type");
11701 if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
11703 /* Toss the cast, it's a waste of time */
11705 impPushOnStack(op1, tiRetVal);
11708 else if (ival == mask)
11710 /* Toss the masking, it's a waste of time, since
11711 we sign-extend from the small value anyways */
11713 op1 = op1->gtOp.gtOp1;
11718 /* The 'op2' sub-operand of a cast is the 'real' type number,
11719 since the result of a cast to one of the 'small' integer
11720 types is an integer.
11723 type = genActualType(lclTyp);
11725 #if SMALL_TREE_NODES
11728 op1 = gtNewCastNodeL(type, op1, lclTyp);
11731 #endif // SMALL_TREE_NODES
11733 op1 = gtNewCastNode(type, op1, lclTyp);
11738 op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
11742 op1->gtFlags |= GTF_UNSIGNED;
11744 impPushOnStack(op1, tiRetVal);
11748 if (tiVerificationNeeded)
11750 tiRetVal = impStackTop().seTypeInfo;
11751 Verify(tiRetVal.IsNumberType(), "Bad arg");
11754 op1 = impPopStack().val;
11755 impBashVarAddrsToI(op1, nullptr);
11756 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
11760 if (tiVerificationNeeded)
11765 /* Pull the top value from the stack */
11767 op1 = impPopStack(clsHnd).val;
11769 /* Get hold of the type of the value being duplicated */
11771 lclTyp = genActualType(op1->gtType);
11773 /* Does the value have any side effects? */
11775 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
11777 // Since we are throwing away the value, just normalize
11778 // it to its address. This is more efficient.
11780 if (varTypeIsStruct(op1))
11782 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
11783 // Non-calls, such as obj or ret_expr, have to go through this.
11784 // Calls with large struct return value have to go through this.
11785 // Helper calls with small struct return value also have to go
11786 // through this since they do not follow Unix calling convention.
11787 if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
11788 op1->AsCall()->gtCallType == CT_HELPER)
11789 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
11791 op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
11795 // If op1 is non-overflow cast, throw it away since it is useless.
11796 // Another reason for throwing away the useless cast is in the context of
11797 // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
11798 // The cast gets added as part of importing GT_CALL, which gets in the way
11799 // of fgMorphCall() on the forms of tail call nodes that we assert.
11800 if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
11802 op1 = op1->gtOp.gtOp1;
11805 // If 'op1' is an expression, create an assignment node.
11806 // Helps analyses (like CSE) to work fine.
11808 if (op1->gtOper != GT_CALL)
11810 op1 = gtUnusedValNode(op1);
11813 /* Append the value to the tree list */
11817 /* No side effects - just throw the <BEEP> thing away */
11822 if (tiVerificationNeeded)
11824 // Dup could start the begining of delegate creation sequence, remember that
11825 delegateCreateStart = codeAddr - 1;
11829 // Convert a (dup, stloc) sequence into a (stloc, ldloc) sequence in the following cases:
11830 // - If this is non-debug code - so that CSE will recognize the two as equal.
11831 // This helps eliminate a redundant bounds check in cases such as:
11832 // ariba[i+3] += some_value;
11833 // - If the top of the stack is a non-leaf that may be expensive to clone.
11835 if (codeAddr < codeEndp)
11837 OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddr);
11838 if (impIsAnySTLOC(nextOpcode))
11840 if (!opts.compDbgCode)
11842 insertLdloc = true;
11845 GenTree* stackTop = impStackTop().val;
11846 if (!stackTop->IsIntegralConst(0) && !stackTop->IsFPZero() && !stackTop->IsLocal())
11848 insertLdloc = true;
11854 /* Pull the top value from the stack */
11855 op1 = impPopStack(tiRetVal);
11857 /* Clone the value */
11858 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
11859 nullptr DEBUGARG("DUP instruction"));
11861 /* Either the tree started with no global effects, or impCloneExpr
11862 evaluated the tree to a temp and returned two copies of that
11863 temp. Either way, neither op1 nor op2 should have side effects.
11865 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
11867 /* Push the tree/temp back on the stack */
11868 impPushOnStack(op1, tiRetVal);
11870 /* Push the copy on the stack */
11871 impPushOnStack(op2, tiRetVal);
11879 lclTyp = TYP_SHORT;
11888 lclTyp = TYP_I_IMPL;
11890 case CEE_STIND_REF:
11894 lclTyp = TYP_FLOAT;
11897 lclTyp = TYP_DOUBLE;
11901 if (tiVerificationNeeded)
11903 typeInfo instrType(lclTyp);
11904 #ifdef _TARGET_64BIT_
11905 if (opcode == CEE_STIND_I)
11907 instrType = typeInfo::nativeInt();
11909 #endif // _TARGET_64BIT_
11910 verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
11914 compUnsafeCastUsed = true; // Have to go conservative
11919 op2 = impPopStack().val; // value to store
11920 op1 = impPopStack().val; // address to store to
11922 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
11923 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
11925 impBashVarAddrsToI(op1, op2);
11927 op2 = impImplicitR4orR8Cast(op2, lclTyp);
11929 #ifdef _TARGET_64BIT_
11930 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
11931 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
11933 op2->gtType = TYP_I_IMPL;
11937 // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
11939 if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
11941 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
11942 op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
11944 // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
11946 if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
11948 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
11949 op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
11952 #endif // _TARGET_64BIT_
11954 if (opcode == CEE_STIND_REF)
11956 // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
11957 assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
11958 lclTyp = genActualType(op2->TypeGet());
11961 // Check target type.
11963 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
11965 if (op2->gtType == TYP_BYREF)
11967 assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
11969 else if (lclTyp == TYP_BYREF)
11971 assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
11976 assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
11977 ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
11978 (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
11982 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
11984 // stind could point anywhere, example a boxed class static int
11985 op1->gtFlags |= GTF_IND_TGTANYWHERE;
11987 if (prefixFlags & PREFIX_VOLATILE)
11989 assert(op1->OperGet() == GT_IND);
11990 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
11991 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
11992 op1->gtFlags |= GTF_IND_VOLATILE;
11995 if (prefixFlags & PREFIX_UNALIGNED)
11997 assert(op1->OperGet() == GT_IND);
11998 op1->gtFlags |= GTF_IND_UNALIGNED;
12001 op1 = gtNewAssignNode(op1, op2);
12002 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12004 // Spill side-effects AND global-data-accesses
12005 if (verCurrentState.esStackDepth > 0)
12007 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12016 lclTyp = TYP_SHORT;
12025 case CEE_LDIND_REF:
12029 lclTyp = TYP_I_IMPL;
12032 lclTyp = TYP_FLOAT;
12035 lclTyp = TYP_DOUBLE;
12038 lclTyp = TYP_UBYTE;
12045 if (tiVerificationNeeded)
12047 typeInfo lclTiType(lclTyp);
12048 #ifdef _TARGET_64BIT_
12049 if (opcode == CEE_LDIND_I)
12051 lclTiType = typeInfo::nativeInt();
12053 #endif // _TARGET_64BIT_
12054 tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12055 tiRetVal.NormaliseForStack();
12059 compUnsafeCastUsed = true; // Have to go conservative
12064 op1 = impPopStack().val; // address to load from
12065 impBashVarAddrsToI(op1);
12067 #ifdef _TARGET_64BIT_
12068 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12070 if (genActualType(op1->gtType) == TYP_INT)
12072 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12073 op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
12077 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12079 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12081 // ldind could point anywhere, example a boxed class static int
12082 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12084 if (prefixFlags & PREFIX_VOLATILE)
12086 assert(op1->OperGet() == GT_IND);
12087 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
12088 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12089 op1->gtFlags |= GTF_IND_VOLATILE;
12092 if (prefixFlags & PREFIX_UNALIGNED)
12094 assert(op1->OperGet() == GT_IND);
12095 op1->gtFlags |= GTF_IND_UNALIGNED;
12098 impPushOnStack(op1, tiRetVal);
12102 case CEE_UNALIGNED:
12105 val = getU1LittleEndian(codeAddr);
12107 JITDUMP(" %u", val);
12108 if ((val != 1) && (val != 2) && (val != 4))
12110 BADCODE("Alignment unaligned. must be 1, 2, or 4");
12113 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12114 prefixFlags |= PREFIX_UNALIGNED;
12116 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12119 opcode = (OPCODE)getU1LittleEndian(codeAddr);
12120 codeAddr += sizeof(__int8);
12121 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12122 goto DECODE_OPCODE;
12126 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12127 prefixFlags |= PREFIX_VOLATILE;
12129 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12136 // Need to do a lookup here so that we perform an access check
12137 // and do a NOWAY if protections are violated
12138 _impResolveToken(CORINFO_TOKENKIND_Method);
12140 JITDUMP(" %08X", resolvedToken.token);
12142 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12143 addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12146 // This check really only applies to intrinsic Array.Address methods
12147 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12149 NO_WAY("Currently do not support LDFTN of Parameterized functions");
12152 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12153 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12155 if (tiVerificationNeeded)
12157 // LDFTN could start the begining of delegate creation sequence, remember that
12158 delegateCreateStart = codeAddr - 2;
12160 // check any constraints on the callee's class and type parameters
12161 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12162 "method has unsatisfied class constraints");
12163 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12164 resolvedToken.hMethod),
12165 "method has unsatisfied method constraints");
12167 mflags = callInfo.verMethodFlags;
12168 Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12172 op1 = impMethodPointer(&resolvedToken, &callInfo);
12173 if (compDonotInline())
12178 impPushOnStack(op1, typeInfo(resolvedToken.hMethod));
12183 case CEE_LDVIRTFTN:
12185 /* Get the method token */
12187 _impResolveToken(CORINFO_TOKENKIND_Method);
12189 JITDUMP(" %08X", resolvedToken.token);
12191 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12192 addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12193 CORINFO_CALLINFO_CALLVIRT)),
12196 // This check really only applies to intrinsic Array.Address methods
12197 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12199 NO_WAY("Currently do not support LDFTN of Parameterized functions");
12202 mflags = callInfo.methodFlags;
12204 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12206 if (compIsForInlining())
12208 if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12210 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12215 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12217 if (tiVerificationNeeded)
12220 Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12221 Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12223 // JIT32 verifier rejects verifiable ldvirtftn pattern
12224 typeInfo declType =
12225 verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12227 typeInfo arg = impStackTop().seTypeInfo;
12228 Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12231 CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12232 if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12234 instanceClassHnd = arg.GetClassHandleForObjRef();
12237 // check any constraints on the method's class and type parameters
12238 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12239 "method has unsatisfied class constraints");
12240 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12241 resolvedToken.hMethod),
12242 "method has unsatisfied method constraints");
12244 if (mflags & CORINFO_FLG_PROTECTED)
12246 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12247 "Accessing protected method through wrong type.");
12251 /* Get the object-ref */
12252 op1 = impPopStack().val;
12253 assertImp(op1->gtType == TYP_REF);
12255 if (opts.IsReadyToRun())
12257 if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
12259 if (op1->gtFlags & GTF_SIDE_EFFECT)
12261 op1 = gtUnusedValNode(op1);
12262 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12267 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12269 if (op1->gtFlags & GTF_SIDE_EFFECT)
12271 op1 = gtUnusedValNode(op1);
12272 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12277 GenTreePtr fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
12278 if (compDonotInline())
12283 impPushOnStack(fptr, typeInfo(resolvedToken.hMethod));
12288 case CEE_CONSTRAINED:
12290 assertImp(sz == sizeof(unsigned));
12291 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
12292 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
12293 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
12295 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
12296 prefixFlags |= PREFIX_CONSTRAINED;
12299 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12300 if (actualOpcode != CEE_CALLVIRT)
12302 BADCODE("constrained. has to be followed by callvirt");
12309 JITDUMP(" readonly.");
12311 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
12312 prefixFlags |= PREFIX_READONLY;
12315 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12316 if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
12318 BADCODE("readonly. has to be followed by ldelema or call");
12328 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
12329 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12332 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12333 if (!impOpcodeIsCallOpcode(actualOpcode))
12335 BADCODE("tailcall. has to be followed by call, callvirt or calli");
12343 /* Since we will implicitly insert newObjThisPtr at the start of the
12344 argument list, spill any GTF_ORDER_SIDEEFF */
12345 impSpillSpecialSideEff();
12347 /* NEWOBJ does not respond to TAIL */
12348 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
12350 /* NEWOBJ does not respond to CONSTRAINED */
12351 prefixFlags &= ~PREFIX_CONSTRAINED;
12353 #if COR_JIT_EE_VERSION > 460
12354 _impResolveToken(CORINFO_TOKENKIND_NewObj);
12356 _impResolveToken(CORINFO_TOKENKIND_Method);
12359 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12360 addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
12363 if (compIsForInlining())
12365 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12367 // Check to see if this call violates the boundary.
12368 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
12373 mflags = callInfo.methodFlags;
12375 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
12377 BADCODE("newobj on static or abstract method");
12380 // Insert the security callout before any actual code is generated
12381 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12383 // There are three different cases for new
12384 // Object size is variable (depends on arguments)
12385 // 1) Object is an array (arrays treated specially by the EE)
12386 // 2) Object is some other variable sized object (e.g. String)
12387 // 3) Class Size can be determined beforehand (normal case)
12388 // In the first case, we need to call a NEWOBJ helper (multinewarray)
12389 // in the second case we call the constructor with a '0' this pointer
12390 // In the third case we alloc the memory, then call the constuctor
12392 clsFlags = callInfo.classFlags;
12393 if (clsFlags & CORINFO_FLG_ARRAY)
12395 if (tiVerificationNeeded)
12397 CORINFO_CLASS_HANDLE elemTypeHnd;
12398 INDEBUG(CorInfoType corType =)
12399 info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
12400 assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
12401 Verify(elemTypeHnd == nullptr ||
12402 !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
12403 "newarr of byref-like objects");
12404 verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
12405 ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
12406 &callInfo DEBUGARG(info.compFullName));
12408 // Arrays need to call the NEWOBJ helper.
12409 assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
12411 impImportNewObjArray(&resolvedToken, &callInfo);
12412 if (compDonotInline())
12420 // At present this can only be String
12421 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
12423 if (IsTargetAbi(CORINFO_CORERT_ABI))
12425 // The dummy argument does not exist in CoreRT
12426 newObjThisPtr = nullptr;
12430 // This is the case for variable-sized objects that are not
12431 // arrays. In this case, call the constructor with a null 'this'
12433 newObjThisPtr = gtNewIconNode(0, TYP_REF);
12436 /* Remember that this basic block contains 'new' of an object */
12437 block->bbFlags |= BBF_HAS_NEWOBJ;
12438 optMethodFlags |= OMF_HAS_NEWOBJ;
12442 // This is the normal case where the size of the object is
12443 // fixed. Allocate the memory and call the constructor.
12445 // Note: We cannot add a peep to avoid use of temp here
12446 // becase we don't have enough interference info to detect when
12447 // sources and destination interfere, example: s = new S(ref);
12449 // TODO: We find the correct place to introduce a general
12450 // reverse copy prop for struct return values from newobj or
12451 // any function returning structs.
12453 /* get a temporary for the new object */
12454 lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
12456 // In the value class case we only need clsHnd for size calcs.
12458 // The lookup of the code pointer will be handled by CALL in this case
12459 if (clsFlags & CORINFO_FLG_VALUECLASS)
12461 if (compIsForInlining())
12463 // If value class has GC fields, inform the inliner. It may choose to
12464 // bail out on the inline.
12465 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
12466 if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
12468 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
12469 if (compInlineResult->IsFailure())
12474 // Do further notification in the case where the call site is rare;
12475 // some policies do not track the relative hotness of call sites for
12476 // "always" inline cases.
12477 if (impInlineInfo->iciBlock->isRunRarely())
12479 compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
12480 if (compInlineResult->IsFailure())
12488 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
12489 unsigned size = info.compCompHnd->getClassSize(resolvedToken.hClass);
12491 if (impIsPrimitive(jitTyp))
12493 lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
12497 // The local variable itself is the allocated space.
12498 // Here we need unsafe value cls check, since the address of struct is taken for further use
12499 // and potentially exploitable.
12500 lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
12503 // Append a tree to zero-out the temp
12504 newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
12506 newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest
12507 gtNewIconNode(0), // Value
12509 false, // isVolatile
12510 false); // not copyBlock
12511 impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12513 // Obtain the address of the temp
12515 gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
12519 #ifdef FEATURE_READYTORUN_COMPILER
12520 if (opts.IsReadyToRun())
12522 op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
12523 usingReadyToRunHelper = (op1 != nullptr);
12526 if (!usingReadyToRunHelper)
12529 op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
12530 if (op1 == nullptr)
12531 { // compDonotInline()
12535 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
12536 // and the newfast call with a single call to a dynamic R2R cell that will:
12537 // 1) Load the context
12538 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate
12540 // 3) Allocate and return the new object
12541 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
12543 op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
12544 resolvedToken.hClass, TYP_REF, op1);
12547 // Remember that this basic block contains 'new' of an object
12548 block->bbFlags |= BBF_HAS_NEWOBJ;
12549 optMethodFlags |= OMF_HAS_NEWOBJ;
12551 // Append the assignment to the temp/local. Dont need to spill
12552 // at all as we are just calling an EE-Jit helper which can only
12553 // cause an (async) OutOfMemoryException.
12555 // We assign the newly allocated object (by a GT_ALLOCOBJ node)
12556 // to a temp. Note that the pattern "temp = allocObj" is required
12557 // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
12558 // without exhaustive walk over all expressions.
12560 impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
12562 newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
12569 /* CALLI does not respond to CONSTRAINED */
12570 prefixFlags &= ~PREFIX_CONSTRAINED;
12572 if (compIsForInlining())
12574 // CALLI doesn't have a method handle, so assume the worst.
12575 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12577 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
12587 // We can't call getCallInfo on the token from a CALLI, but we need it in
12588 // many other places. We unfortunately embed that knowledge here.
12589 if (opcode != CEE_CALLI)
12591 _impResolveToken(CORINFO_TOKENKIND_Method);
12593 eeGetCallInfo(&resolvedToken,
12594 (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
12595 // this is how impImportCall invokes getCallInfo
12597 combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
12598 (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
12599 : CORINFO_CALLINFO_NONE)),
12604 // Suppress uninitialized use warning.
12605 memset(&resolvedToken, 0, sizeof(resolvedToken));
12606 memset(&callInfo, 0, sizeof(callInfo));
12608 resolvedToken.token = getU4LittleEndian(codeAddr);
12611 CALL: // memberRef should be set.
12612 // newObjThisPtr should be set for CEE_NEWOBJ
12614 JITDUMP(" %08X", resolvedToken.token);
12615 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
12617 bool newBBcreatedForTailcallStress;
12619 newBBcreatedForTailcallStress = false;
12621 if (compIsForInlining())
12623 if (compDonotInline())
12627 // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
12628 assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
12632 if (compTailCallStress())
12634 // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
12635 // Tail call stress only recognizes call+ret patterns and forces them to be
12636 // explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress
12637 // doesn't import 'ret' opcode following the call into the basic block containing
12638 // the call instead imports it to a new basic block. Note that fgMakeBasicBlocks()
12639 // is already checking that there is an opcode following call and hence it is
12640 // safe here to read next opcode without bounds check.
12641 newBBcreatedForTailcallStress =
12642 impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
12643 // make it jump to RET.
12644 (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
12646 if (newBBcreatedForTailcallStress &&
12647 !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
12648 verCheckTailCallConstraint(opcode, &resolvedToken,
12649 constraintCall ? &constrainedResolvedToken : nullptr,
12650 true) // Is it legal to do talcall?
12653 // Stress the tailcall.
12654 JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
12655 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12659 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
12660 // hence will not be considered for implicit tail calling.
12661 bool isRecursive = (callInfo.hMethod == info.compMethodHnd);
12662 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
12664 JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
12665 prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
12669 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
12670 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
12671 readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
12673 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
12675 // All calls and delegates need a security callout.
12676 // For delegates, this is the call to the delegate constructor, not the access check on the
12678 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12680 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
12682 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
12683 // and the field it is reading, thus it is now unverifiable to not immediately precede with
12684 // ldtoken <filed token>, and we now check accessibility
12685 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
12686 (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
12688 if (prevOpcode != CEE_LDTOKEN)
12690 Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
12694 assert(lastLoadToken != NULL);
12695 // Now that we know we have a token, verify that it is accessible for loading
12696 CORINFO_RESOLVED_TOKEN resolvedLoadField;
12697 impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
12698 eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
12699 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12703 #endif // DevDiv 410397
12706 if (tiVerificationNeeded)
12708 verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12709 explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
12710 &callInfo DEBUGARG(info.compFullName));
12713 // Insert delegate callout here.
12714 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
12717 // We should do this only if verification is enabled
12718 // If verification is disabled, delegateCreateStart will not be initialized correctly
12719 if (tiVerificationNeeded)
12721 mdMemberRef delegateMethodRef = mdMemberRefNil;
12722 // We should get here only for well formed delegate creation.
12723 assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
12727 #ifdef FEATURE_CORECLR
12728 // In coreclr the delegate transparency rule needs to be enforced even if verification is disabled
12729 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
12730 CORINFO_METHOD_HANDLE delegateMethodHandle = tiActualFtn.GetMethod2();
12732 impInsertCalloutForDelegate(info.compMethodHnd, delegateMethodHandle, resolvedToken.hClass);
12733 #endif // FEATURE_CORECLR
12736 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12737 newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
12738 if (compDonotInline())
12743 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
12744 // have created a new BB after the "call"
12745 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
12747 assert(!compIsForInlining());
12759 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
12760 BOOL isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
12762 /* Get the CP_Fieldref index */
12763 assertImp(sz == sizeof(unsigned));
12765 _impResolveToken(CORINFO_TOKENKIND_Field);
12767 JITDUMP(" %08X", resolvedToken.token);
12769 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
12771 GenTreePtr obj = nullptr;
12772 typeInfo* tiObj = nullptr;
12773 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
12775 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
12777 tiObj = &impStackTop().seTypeInfo;
12778 obj = impPopStack(objType).val;
12780 if (impIsThis(obj))
12782 aflags |= CORINFO_ACCESS_THIS;
12784 // An optimization for Contextful classes:
12785 // we unwrap the proxy when we have a 'this reference'
12787 if (info.compUnwrapContextful)
12789 aflags |= CORINFO_ACCESS_UNWRAP;
12794 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
12796 // Figure out the type of the member. We always call canAccessField, so you always need this
12798 CorInfoType ciType = fieldInfo.fieldType;
12799 clsHnd = fieldInfo.structType;
12801 lclTyp = JITtype2varType(ciType);
12803 #ifdef _TARGET_AMD64
12804 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
12805 #endif // _TARGET_AMD64
12807 if (compIsForInlining())
12809 switch (fieldInfo.fieldAccessor)
12811 case CORINFO_FIELD_INSTANCE_HELPER:
12812 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
12813 case CORINFO_FIELD_STATIC_ADDR_HELPER:
12814 case CORINFO_FIELD_STATIC_TLS:
12816 compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
12819 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
12820 #if COR_JIT_EE_VERSION > 460
12821 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
12823 /* We may be able to inline the field accessors in specific instantiations of generic
12825 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
12832 if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
12835 if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
12836 !(info.compFlags & CORINFO_FLG_FORCEINLINE))
12838 // Loading a static valuetype field usually will cause a JitHelper to be called
12839 // for the static base. This will bloat the code.
12840 compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
12842 if (compInlineResult->IsFailure())
12850 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
12853 tiRetVal.MakeByRef();
12857 tiRetVal.NormaliseForStack();
12860 // Perform this check always to ensure that we get field access exceptions even with
12861 // SkipVerification.
12862 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12864 if (tiVerificationNeeded)
12866 // You can also pass the unboxed struct to LDFLD
12867 BOOL bAllowPlainValueTypeAsThis = FALSE;
12868 if (opcode == CEE_LDFLD && impIsValueType(tiObj))
12870 bAllowPlainValueTypeAsThis = TRUE;
12873 verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
12875 // If we're doing this on a heap object or from a 'safe' byref
12876 // then the result is a safe byref too
12877 if (isLoadAddress) // load address
12879 if (fieldInfo.fieldFlags &
12880 CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
12882 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
12884 tiRetVal.SetIsPermanentHomeByRef();
12887 else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
12889 // ldflda of byref is safe if done on a gc object or on a
12891 tiRetVal.SetIsPermanentHomeByRef();
12897 // tiVerificationNeeded is false.
12898 // Raise InvalidProgramException if static load accesses non-static field
12899 if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
12901 BADCODE("static access on an instance field");
12905 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
12906 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
12908 if (obj->gtFlags & GTF_SIDE_EFFECT)
12910 obj = gtUnusedValNode(obj);
12911 impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12916 /* Preserve 'small' int types */
12917 if (lclTyp > TYP_INT)
12919 lclTyp = genActualType(lclTyp);
12922 bool usesHelper = false;
12924 switch (fieldInfo.fieldAccessor)
12926 case CORINFO_FIELD_INSTANCE:
12927 #ifdef FEATURE_READYTORUN_COMPILER
12928 case CORINFO_FIELD_INSTANCE_WITH_BASE:
12931 bool nullcheckNeeded = false;
12933 obj = impCheckForNullPointer(obj);
12935 if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
12937 nullcheckNeeded = true;
12940 // If the object is a struct, what we really want is
12941 // for the field to operate on the address of the struct.
12942 if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
12944 assert(opcode == CEE_LDFLD && objType != nullptr);
12946 obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
12949 /* Create the data member node */
12950 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
12952 #ifdef FEATURE_READYTORUN_COMPILER
12953 if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
12955 op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
12959 op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
12961 if (fgAddrCouldBeNull(obj))
12963 op1->gtFlags |= GTF_EXCEPT;
12966 // If gtFldObj is a BYREF then our target is a value class and
12967 // it could point anywhere, example a boxed class static int
12968 if (obj->gtType == TYP_BYREF)
12970 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12973 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
12974 if (StructHasOverlappingFields(typeFlags))
12976 op1->gtField.gtFldMayOverlap = true;
12979 // wrap it in a address of operator if necessary
12982 op1 = gtNewOperNode(GT_ADDR,
12983 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
12987 if (compIsForInlining() &&
12988 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
12989 impInlineInfo->inlArgInfo))
12991 impInlineInfo->thisDereferencedFirst = true;
12997 case CORINFO_FIELD_STATIC_TLS:
12998 #ifdef _TARGET_X86_
12999 // Legacy TLS access is implemented as intrinsic on x86 only
13001 /* Create the data member node */
13002 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13003 op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13007 op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13011 fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13016 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13017 case CORINFO_FIELD_INSTANCE_HELPER:
13018 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13019 op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13024 case CORINFO_FIELD_STATIC_ADDRESS:
13025 // Replace static read-only fields with constant if possible
13026 if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13027 !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13028 (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13030 CorInfoInitClassResult initClassResult =
13031 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13032 impTokenLookupContextHandle);
13034 if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13036 void** pFldAddr = nullptr;
13038 info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13040 // We should always be able to access this static's address directly
13041 assert(pFldAddr == nullptr);
13043 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13050 case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13051 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13052 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13053 #if COR_JIT_EE_VERSION > 460
13054 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13056 op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13060 case CORINFO_FIELD_INTRINSIC_ZERO:
13062 assert(aflags & CORINFO_ACCESS_GET);
13063 op1 = gtNewIconNode(0, lclTyp);
13068 case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13070 assert(aflags & CORINFO_ACCESS_GET);
13073 InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13074 op1 = gtNewStringLiteralNode(iat, pValue);
13080 assert(!"Unexpected fieldAccessor");
13083 if (!isLoadAddress)
13086 if (prefixFlags & PREFIX_VOLATILE)
13088 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
13089 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13093 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13094 (op1->OperGet() == GT_OBJ));
13095 op1->gtFlags |= GTF_IND_VOLATILE;
13099 if (prefixFlags & PREFIX_UNALIGNED)
13103 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13104 (op1->OperGet() == GT_OBJ));
13105 op1->gtFlags |= GTF_IND_UNALIGNED;
13110 /* Check if the class needs explicit initialization */
13112 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13114 GenTreePtr helperNode = impInitClass(&resolvedToken);
13115 if (compDonotInline())
13119 if (helperNode != nullptr)
13121 op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13126 impPushOnStack(op1, tiRetVal);
13134 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13136 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13138 /* Get the CP_Fieldref index */
13140 assertImp(sz == sizeof(unsigned));
13142 _impResolveToken(CORINFO_TOKENKIND_Field);
13144 JITDUMP(" %08X", resolvedToken.token);
13146 int aflags = CORINFO_ACCESS_SET;
13147 GenTreePtr obj = nullptr;
13148 typeInfo* tiObj = nullptr;
13151 /* Pull the value from the stack */
13152 op2 = impPopStack(tiVal);
13153 clsHnd = tiVal.GetClassHandle();
13155 if (opcode == CEE_STFLD)
13157 tiObj = &impStackTop().seTypeInfo;
13158 obj = impPopStack().val;
13160 if (impIsThis(obj))
13162 aflags |= CORINFO_ACCESS_THIS;
13164 // An optimization for Contextful classes:
13165 // we unwrap the proxy when we have a 'this reference'
13167 if (info.compUnwrapContextful)
13169 aflags |= CORINFO_ACCESS_UNWRAP;
13174 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13176 // Figure out the type of the member. We always call canAccessField, so you always need this
13178 CorInfoType ciType = fieldInfo.fieldType;
13179 fieldClsHnd = fieldInfo.structType;
13181 lclTyp = JITtype2varType(ciType);
13183 if (compIsForInlining())
13185 /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13186 * per-inst static? */
13188 switch (fieldInfo.fieldAccessor)
13190 case CORINFO_FIELD_INSTANCE_HELPER:
13191 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13192 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13193 case CORINFO_FIELD_STATIC_TLS:
13195 compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13198 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13199 #if COR_JIT_EE_VERSION > 460
13200 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13203 /* We may be able to inline the field accessors in specific instantiations of generic
13205 compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13213 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13215 if (tiVerificationNeeded)
13217 verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13218 typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13219 Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13223 // tiVerificationNeed is false.
13224 // Raise InvalidProgramException if static store accesses non-static field
13225 if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13227 BADCODE("static access on an instance field");
13231 // We are using stfld on a static field.
13232 // We allow it, but need to eval any side-effects for obj
13233 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13235 if (obj->gtFlags & GTF_SIDE_EFFECT)
13237 obj = gtUnusedValNode(obj);
13238 impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13243 /* Preserve 'small' int types */
13244 if (lclTyp > TYP_INT)
13246 lclTyp = genActualType(lclTyp);
13249 switch (fieldInfo.fieldAccessor)
13251 case CORINFO_FIELD_INSTANCE:
13252 #ifdef FEATURE_READYTORUN_COMPILER
13253 case CORINFO_FIELD_INSTANCE_WITH_BASE:
13256 obj = impCheckForNullPointer(obj);
13258 /* Create the data member node */
13259 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
13260 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13261 if (StructHasOverlappingFields(typeFlags))
13263 op1->gtField.gtFldMayOverlap = true;
13266 #ifdef FEATURE_READYTORUN_COMPILER
13267 if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13269 op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13273 op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13275 if (fgAddrCouldBeNull(obj))
13277 op1->gtFlags |= GTF_EXCEPT;
13280 // If gtFldObj is a BYREF then our target is a value class and
13281 // it could point anywhere, example a boxed class static int
13282 if (obj->gtType == TYP_BYREF)
13284 op1->gtFlags |= GTF_IND_TGTANYWHERE;
13287 if (compIsForInlining() &&
13288 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
13290 impInlineInfo->thisDereferencedFirst = true;
13295 case CORINFO_FIELD_STATIC_TLS:
13296 #ifdef _TARGET_X86_
13297 // Legacy TLS access is implemented as intrinsic on x86 only
13299 /* Create the data member node */
13300 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13301 op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13305 fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13310 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13311 case CORINFO_FIELD_INSTANCE_HELPER:
13312 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13313 op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13317 case CORINFO_FIELD_STATIC_ADDRESS:
13318 case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13319 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13320 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13321 #if COR_JIT_EE_VERSION > 460
13322 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13324 op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13329 assert(!"Unexpected fieldAccessor");
13332 // Create the member assignment, unless we have a struct.
13333 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
13334 bool deferStructAssign = varTypeIsStruct(lclTyp);
13336 if (!deferStructAssign)
13338 if (prefixFlags & PREFIX_VOLATILE)
13340 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13341 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
13342 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13343 op1->gtFlags |= GTF_IND_VOLATILE;
13345 if (prefixFlags & PREFIX_UNALIGNED)
13347 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13348 op1->gtFlags |= GTF_IND_UNALIGNED;
13351 /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
13353 apps). The reason this works is that JIT stores an i4 constant in Gentree union during
13355 and reads from the union as if it were a long during code generation. Though this can potentially
13356 read garbage, one can get lucky to have this working correctly.
13358 This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
13360 switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency
13362 it. To be backward compatible, we will explicitly add an upward cast here so that it works
13366 Note that this is limited to x86 alone as thereis no back compat to be addressed for Arm JIT for
13369 CLANG_FORMAT_COMMENT_ANCHOR;
13371 #ifdef _TARGET_X86_
13372 if (op1->TypeGet() != op2->TypeGet() && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
13373 varTypeIsLong(op1->TypeGet()))
13375 op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13379 #ifdef _TARGET_64BIT_
13380 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
13381 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
13383 op2->gtType = TYP_I_IMPL;
13387 // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
13389 if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
13391 op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
13393 // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13395 if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
13397 op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
13402 #if !FEATURE_X87_DOUBLES
13403 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
13404 // We insert a cast to the dest 'op1' type
13406 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
13407 varTypeIsFloating(op2->gtType))
13409 op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13411 #endif // !FEATURE_X87_DOUBLES
13413 op1 = gtNewAssignNode(op1, op2);
13415 /* Mark the expression as containing an assignment */
13417 op1->gtFlags |= GTF_ASG;
13420 /* Check if the class needs explicit initialization */
13422 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13424 GenTreePtr helperNode = impInitClass(&resolvedToken);
13425 if (compDonotInline())
13429 if (helperNode != nullptr)
13431 op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13435 /* stfld can interfere with value classes (consider the sequence
13436 ldloc, ldloca, ..., stfld, stloc). We will be conservative and
13437 spill all value class references from the stack. */
13439 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
13443 if (impIsValueType(tiObj))
13445 impSpillEvalStack();
13449 impSpillValueClasses();
13453 /* Spill any refs to the same member from the stack */
13455 impSpillLclRefs((ssize_t)resolvedToken.hField);
13457 /* stsfld also interferes with indirect accesses (for aliased
13458 statics) and calls. But don't need to spill other statics
13459 as we have explicitly spilled this particular static field. */
13461 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
13463 if (deferStructAssign)
13465 op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
13473 /* Get the class type index operand */
13475 _impResolveToken(CORINFO_TOKENKIND_Newarr);
13477 JITDUMP(" %08X", resolvedToken.token);
13479 if (!opts.IsReadyToRun())
13481 // Need to restore array classes before creating array objects on the heap
13482 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13483 if (op1 == nullptr)
13484 { // compDonotInline()
13489 if (tiVerificationNeeded)
13491 // As per ECMA 'numElems' specified can be either int32 or native int.
13492 Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
13494 CORINFO_CLASS_HANDLE elemTypeHnd;
13495 info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13496 Verify(elemTypeHnd == nullptr ||
13497 !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13498 "array of byref-like type");
13499 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13502 accessAllowedResult =
13503 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13504 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13506 /* Form the arglist: array class handle, size */
13507 op2 = impPopStack().val;
13508 assertImp(genActualTypeIsIntOrI(op2->gtType));
13510 #ifdef FEATURE_READYTORUN_COMPILER
13511 if (opts.IsReadyToRun())
13513 op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
13514 gtNewArgList(op2));
13515 usingReadyToRunHelper = (op1 != nullptr);
13517 if (!usingReadyToRunHelper)
13519 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13520 // and the newarr call with a single call to a dynamic R2R cell that will:
13521 // 1) Load the context
13522 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13523 // 3) Allocate the new array
13524 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13526 // Need to restore array classes before creating array objects on the heap
13527 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13528 if (op1 == nullptr)
13529 { // compDonotInline()
13535 if (!usingReadyToRunHelper)
13538 args = gtNewArgList(op1, op2);
13540 /* Create a call to 'new' */
13542 // Note that this only works for shared generic code because the same helper is used for all
13543 // reference array types
13545 gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, 0, args);
13548 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
13550 /* Remember that this basic block contains 'new' of an sd array */
13552 block->bbFlags |= BBF_HAS_NEWARRAY;
13553 optMethodFlags |= OMF_HAS_NEWARRAY;
13555 /* Push the result of the call on the stack */
13557 impPushOnStack(op1, tiRetVal);
13564 assert(!compIsForInlining());
13566 if (tiVerificationNeeded)
13568 Verify(false, "bad opcode");
13571 // We don't allow locallocs inside handlers
13572 if (block->hasHndIndex())
13574 BADCODE("Localloc can't be inside handler");
13577 /* The FP register may not be back to the original value at the end
13578 of the method, even if the frame size is 0, as localloc may
13579 have modified it. So we will HAVE to reset it */
13581 compLocallocUsed = true;
13582 setNeedsGSSecurityCookie();
13584 // Get the size to allocate
13586 op2 = impPopStack().val;
13587 assertImp(genActualTypeIsIntOrI(op2->gtType));
13589 if (verCurrentState.esStackDepth != 0)
13591 BADCODE("Localloc can only be used when the stack is empty");
13594 op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
13596 // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
13598 op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
13600 impPushOnStack(op1, tiRetVal);
13605 /* Get the type token */
13606 assertImp(sz == sizeof(unsigned));
13608 _impResolveToken(CORINFO_TOKENKIND_Casting);
13610 JITDUMP(" %08X", resolvedToken.token);
13612 if (!opts.IsReadyToRun())
13614 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13615 if (op2 == nullptr)
13616 { // compDonotInline()
13621 if (tiVerificationNeeded)
13623 Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
13624 // Even if this is a value class, we know it is boxed.
13625 tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
13627 accessAllowedResult =
13628 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13629 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13631 op1 = impPopStack().val;
13633 #ifdef FEATURE_READYTORUN_COMPILER
13634 if (opts.IsReadyToRun())
13636 GenTreePtr opLookup =
13637 impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
13638 gtNewArgList(op1));
13639 usingReadyToRunHelper = (opLookup != nullptr);
13640 op1 = (usingReadyToRunHelper ? opLookup : op1);
13642 if (!usingReadyToRunHelper)
13644 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13645 // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
13646 // 1) Load the context
13647 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13648 // 3) Perform the 'is instance' check on the input object
13649 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13651 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13652 if (op2 == nullptr)
13653 { // compDonotInline()
13659 if (!usingReadyToRunHelper)
13662 op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
13664 if (compDonotInline())
13669 impPushOnStack(op1, tiRetVal);
13673 case CEE_REFANYVAL:
13675 // get the class handle and make a ICON node out of it
13677 _impResolveToken(CORINFO_TOKENKIND_Class);
13679 JITDUMP(" %08X", resolvedToken.token);
13681 op2 = impTokenToHandle(&resolvedToken);
13682 if (op2 == nullptr)
13683 { // compDonotInline()
13687 if (tiVerificationNeeded)
13689 Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13691 tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
13694 op1 = impPopStack().val;
13695 // make certain it is normalized;
13696 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13698 // Call helper GETREFANY(classHandle, op1);
13699 args = gtNewArgList(op2, op1);
13700 op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, 0, args);
13702 impPushOnStack(op1, tiRetVal);
13705 case CEE_REFANYTYPE:
13707 if (tiVerificationNeeded)
13709 Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13713 op1 = impPopStack().val;
13715 // make certain it is normalized;
13716 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13718 if (op1->gtOper == GT_OBJ)
13720 // Get the address of the refany
13721 op1 = op1->gtOp.gtOp1;
13723 // Fetch the type from the correct slot
13724 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
13725 gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
13726 op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
13730 assertImp(op1->gtOper == GT_MKREFANY);
13732 // The pointer may have side-effects
13733 if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
13735 impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13737 impNoteLastILoffs();
13741 // We already have the class handle
13742 op1 = op1->gtOp.gtOp2;
13745 // convert native TypeHandle to RuntimeTypeHandle
13747 GenTreeArgList* helperArgs = gtNewArgList(op1);
13749 op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, GTF_EXCEPT,
13752 // The handle struct is returned in register
13753 op1->gtCall.gtReturnType = TYP_REF;
13755 tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
13758 impPushOnStack(op1, tiRetVal);
13763 /* Get the Class index */
13764 assertImp(sz == sizeof(unsigned));
13765 lastLoadToken = codeAddr;
13766 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
13768 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
13770 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
13771 if (op1 == nullptr)
13772 { // compDonotInline()
13776 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
13777 assert(resolvedToken.hClass != nullptr);
13779 if (resolvedToken.hMethod != nullptr)
13781 helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
13783 else if (resolvedToken.hField != nullptr)
13785 helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
13788 GenTreeArgList* helperArgs = gtNewArgList(op1);
13790 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, GTF_EXCEPT, helperArgs);
13792 // The handle struct is returned in register
13793 op1->gtCall.gtReturnType = TYP_REF;
13795 tiRetVal = verMakeTypeInfo(tokenType);
13796 impPushOnStack(op1, tiRetVal);
13801 case CEE_UNBOX_ANY:
13803 /* Get the Class index */
13804 assertImp(sz == sizeof(unsigned));
13806 _impResolveToken(CORINFO_TOKENKIND_Class);
13808 JITDUMP(" %08X", resolvedToken.token);
13810 BOOL runtimeLookup;
13811 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
13812 if (op2 == nullptr)
13813 { // compDonotInline()
13817 // Run this always so we can get access exceptions even with SkipVerification.
13818 accessAllowedResult =
13819 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13820 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13822 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
13824 if (tiVerificationNeeded)
13826 typeInfo tiUnbox = impStackTop().seTypeInfo;
13827 Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
13828 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13829 tiRetVal.NormaliseForStack();
13831 op1 = impPopStack().val;
13835 /* Pop the object and create the unbox helper call */
13836 /* You might think that for UNBOX_ANY we need to push a different */
13837 /* (non-byref) type, but here we're making the tiRetVal that is used */
13838 /* for the intermediate pointer which we then transfer onto the OBJ */
13839 /* instruction. OBJ then creates the appropriate tiRetVal. */
13840 if (tiVerificationNeeded)
13842 typeInfo tiUnbox = impStackTop().seTypeInfo;
13843 Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
13845 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13846 Verify(tiRetVal.IsValueClass(), "not value class");
13847 tiRetVal.MakeByRef();
13849 // We always come from an objref, so this is safe byref
13850 tiRetVal.SetIsPermanentHomeByRef();
13851 tiRetVal.SetIsReadonlyByRef();
13854 op1 = impPopStack().val;
13855 assertImp(op1->gtType == TYP_REF);
13857 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
13858 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
13860 // We only want to expand inline the normal UNBOX helper;
13861 expandInline = (helper == CORINFO_HELP_UNBOX);
13865 if (compCurBB->isRunRarely())
13867 expandInline = false; // not worth the code expansion
13873 // we are doing normal unboxing
13874 // inline the common case of the unbox helper
13875 // UNBOX(exp) morphs into
13876 // clone = pop(exp);
13877 // ((*clone == typeToken) ? nop : helper(clone, typeToken));
13878 // push(clone + sizeof(void*))
13880 GenTreePtr cloneOperand;
13881 op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
13882 nullptr DEBUGARG("inline UNBOX clone1"));
13883 op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
13885 GenTreePtr condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
13887 op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
13888 nullptr DEBUGARG("inline UNBOX clone2"));
13889 op2 = impTokenToHandle(&resolvedToken);
13890 if (op2 == nullptr)
13891 { // compDonotInline()
13894 args = gtNewArgList(op2, op1);
13895 op1 = gtNewHelperCallNode(helper, TYP_VOID, 0, args);
13897 op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
13898 op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
13899 condBox->gtFlags |= GTF_RELOP_QMARK;
13901 // QMARK nodes cannot reside on the evaluation stack. Because there
13902 // may be other trees on the evaluation stack that side-effect the
13903 // sources of the UNBOX operation we must spill the stack.
13905 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13907 // Create the address-expression to reference past the object header
13908 // to the beginning of the value-type. Today this means adjusting
13909 // past the base of the objects vtable field which is pointer sized.
13911 op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
13912 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
13916 unsigned callFlags = (helper == CORINFO_HELP_UNBOX) ? 0 : GTF_EXCEPT;
13918 // Don't optimize, just call the helper and be done with it
13919 args = gtNewArgList(op2, op1);
13920 op1 = gtNewHelperCallNode(helper,
13921 (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT),
13925 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
13926 helper == CORINFO_HELP_UNBOX_NULLABLE &&
13927 varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
13931 ----------------------------------------------------------------------
13934 | \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE |
13935 | \ | (which returns a BYREF) | (which returns a STRUCT) | |
13937 |---------------------------------------------------------------------
13938 | UNBOX | push the BYREF | spill the STRUCT to a local, |
13939 | | | push the BYREF to this local |
13940 |---------------------------------------------------------------------
13941 | UNBOX_ANY | push a GT_OBJ of | push the STRUCT |
13942 | | the BYREF | For Linux when the |
13943 | | | struct is returned in two |
13944 | | | registers create a temp |
13945 | | | which address is passed to |
13946 | | | the unbox_nullable helper. |
13947 |---------------------------------------------------------------------
13950 if (opcode == CEE_UNBOX)
13952 if (helper == CORINFO_HELP_UNBOX_NULLABLE)
13954 // Unbox nullable helper returns a struct type.
13955 // We need to spill it to a temp so than can take the address of it.
13956 // Here we need unsafe value cls check, since the address of struct is taken to be used
13957 // further along and potetially be exploitable.
13959 unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
13960 lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
13962 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
13963 op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
13964 assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
13966 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
13967 op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
13968 op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
13971 assert(op1->gtType == TYP_BYREF);
13972 assert(!tiVerificationNeeded || tiRetVal.IsByRef());
13976 assert(opcode == CEE_UNBOX_ANY);
13978 if (helper == CORINFO_HELP_UNBOX)
13980 // Normal unbox helper returns a TYP_BYREF.
13981 impPushOnStack(op1, tiRetVal);
13986 assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
13988 #if FEATURE_MULTIREG_RET
13990 if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
13992 // Unbox nullable helper returns a TYP_STRUCT.
13993 // For the multi-reg case we need to spill it to a temp so that
13994 // we can pass the address to the unbox_nullable jit helper.
13996 unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
13997 lvaTable[tmp].lvIsMultiRegArg = true;
13998 lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14000 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14001 op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14002 assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14004 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14005 op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14006 op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14008 // In this case the return value of the unbox helper is TYP_BYREF.
14009 // Make sure the right type is placed on the operand type stack.
14010 impPushOnStack(op1, tiRetVal);
14012 // Load the struct.
14015 assert(op1->gtType == TYP_BYREF);
14016 assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14022 #endif // !FEATURE_MULTIREG_RET
14025 // If non register passable struct we have it materialized in the RetBuf.
14026 assert(op1->gtType == TYP_STRUCT);
14027 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14028 assert(tiRetVal.IsValueClass());
14032 impPushOnStack(op1, tiRetVal);
14038 /* Get the Class index */
14039 assertImp(sz == sizeof(unsigned));
14041 _impResolveToken(CORINFO_TOKENKIND_Box);
14043 JITDUMP(" %08X", resolvedToken.token);
14045 if (tiVerificationNeeded)
14047 typeInfo tiActual = impStackTop().seTypeInfo;
14048 typeInfo tiBox = verMakeTypeInfo(resolvedToken.hClass);
14050 Verify(verIsBoxable(tiBox), "boxable type expected");
14052 // check the class constraints of the boxed type in case we are boxing an uninitialized value
14053 Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14054 "boxed type has unsatisfied class constraints");
14056 Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14058 // Observation: the following code introduces a boxed value class on the stack, but,
14059 // according to the ECMA spec, one would simply expect: tiRetVal =
14060 // typeInfo(TI_REF,impGetObjectClass());
14062 // Push the result back on the stack,
14063 // even if clsHnd is a value class we want the TI_REF
14064 // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14065 tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14068 accessAllowedResult =
14069 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14070 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14072 // Note BOX can be used on things that are not value classes, in which
14073 // case we get a NOP. However the verifier's view of the type on the
14074 // stack changes (in generic code a 'T' becomes a 'boxed T')
14075 if (!eeIsValueClass(resolvedToken.hClass))
14077 verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14081 // Look ahead for unbox.any
14082 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14084 DWORD classAttribs = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14085 if (!(classAttribs & CORINFO_FLG_SHAREDINST))
14087 CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14089 impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14091 if (unboxResolvedToken.hClass == resolvedToken.hClass)
14093 // Skip the next unbox.any instruction
14094 sz += sizeof(mdToken) + 1;
14100 impImportAndPushBox(&resolvedToken);
14101 if (compDonotInline())
14110 /* Get the Class index */
14111 assertImp(sz == sizeof(unsigned));
14113 _impResolveToken(CORINFO_TOKENKIND_Class);
14115 JITDUMP(" %08X", resolvedToken.token);
14117 if (tiVerificationNeeded)
14119 tiRetVal = typeInfo(TI_INT);
14122 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14123 impPushOnStack(op1, tiRetVal);
14126 case CEE_CASTCLASS:
14128 /* Get the Class index */
14130 assertImp(sz == sizeof(unsigned));
14132 _impResolveToken(CORINFO_TOKENKIND_Casting);
14134 JITDUMP(" %08X", resolvedToken.token);
14136 if (!opts.IsReadyToRun())
14138 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14139 if (op2 == nullptr)
14140 { // compDonotInline()
14145 if (tiVerificationNeeded)
14147 Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14149 tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14152 accessAllowedResult =
14153 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14154 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14156 op1 = impPopStack().val;
14158 /* Pop the address and create the 'checked cast' helper call */
14160 // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
14161 // and op2 to contain code that creates the type handle corresponding to typeRef
14164 #ifdef FEATURE_READYTORUN_COMPILER
14165 if (opts.IsReadyToRun())
14167 GenTreePtr opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST,
14168 TYP_REF, gtNewArgList(op1));
14169 usingReadyToRunHelper = (opLookup != nullptr);
14170 op1 = (usingReadyToRunHelper ? opLookup : op1);
14172 if (!usingReadyToRunHelper)
14174 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14175 // and the chkcastany call with a single call to a dynamic R2R cell that will:
14176 // 1) Load the context
14177 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14178 // 3) Check the object on the stack for the type-cast
14179 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14181 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14182 if (op2 == nullptr)
14183 { // compDonotInline()
14189 if (!usingReadyToRunHelper)
14192 op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
14194 if (compDonotInline())
14199 /* Push the result back on the stack */
14200 impPushOnStack(op1, tiRetVal);
14205 if (compIsForInlining())
14207 // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14208 // TODO: Will this be too strict, given that we will inline many basic blocks?
14209 // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14211 /* Do we have just the exception on the stack ?*/
14213 if (verCurrentState.esStackDepth != 1)
14215 /* if not, just don't inline the method */
14217 compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
14222 if (tiVerificationNeeded)
14224 tiRetVal = impStackTop().seTypeInfo;
14225 Verify(tiRetVal.IsObjRef(), "object ref expected");
14226 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
14228 Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
14232 block->bbSetRunRarely(); // any block with a throw is rare
14233 /* Pop the exception object and create the 'throw' helper call */
14235 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, GTF_EXCEPT, gtNewArgList(impPopStack().val));
14238 if (verCurrentState.esStackDepth > 0)
14240 impEvalSideEffects();
14243 assert(verCurrentState.esStackDepth == 0);
14249 assert(!compIsForInlining());
14251 if (info.compXcptnsCount == 0)
14253 BADCODE("rethrow outside catch");
14256 if (tiVerificationNeeded)
14258 Verify(block->hasHndIndex(), "rethrow outside catch");
14259 if (block->hasHndIndex())
14261 EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
14262 Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
14263 if (HBtab->HasFilter())
14265 // we better be in the handler clause part, not the filter part
14266 Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
14267 "rethrow in filter");
14272 /* Create the 'rethrow' helper call */
14274 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID, GTF_EXCEPT);
14280 assertImp(sz == sizeof(unsigned));
14282 _impResolveToken(CORINFO_TOKENKIND_Class);
14284 JITDUMP(" %08X", resolvedToken.token);
14286 if (tiVerificationNeeded)
14288 typeInfo tiTo = impStackTop().seTypeInfo;
14289 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14291 Verify(tiTo.IsByRef(), "byref expected");
14292 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14294 Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14295 "type operand incompatible with type of address");
14298 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
14299 op2 = gtNewIconNode(0); // Value
14300 op1 = impPopStack().val; // Dest
14301 op1 = gtNewBlockVal(op1, size);
14302 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14307 if (tiVerificationNeeded)
14309 Verify(false, "bad opcode");
14312 op3 = impPopStack().val; // Size
14313 op2 = impPopStack().val; // Value
14314 op1 = impPopStack().val; // Dest
14316 if (op3->IsCnsIntOrI())
14318 size = (unsigned)op3->AsIntConCommon()->IconValue();
14319 op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14323 op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14326 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14332 if (tiVerificationNeeded)
14334 Verify(false, "bad opcode");
14336 op3 = impPopStack().val; // Size
14337 op2 = impPopStack().val; // Src
14338 op1 = impPopStack().val; // Dest
14340 if (op3->IsCnsIntOrI())
14342 size = (unsigned)op3->AsIntConCommon()->IconValue();
14343 op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14347 op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14350 if (op2->OperGet() == GT_ADDR)
14352 op2 = op2->gtOp.gtOp1;
14356 op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
14359 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
14364 assertImp(sz == sizeof(unsigned));
14366 _impResolveToken(CORINFO_TOKENKIND_Class);
14368 JITDUMP(" %08X", resolvedToken.token);
14370 if (tiVerificationNeeded)
14372 typeInfo tiFrom = impStackTop().seTypeInfo;
14373 typeInfo tiTo = impStackTop(1).seTypeInfo;
14374 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14376 Verify(tiFrom.IsByRef(), "expected byref source");
14377 Verify(tiTo.IsByRef(), "expected byref destination");
14379 Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
14380 "type of source address incompatible with type operand");
14381 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14382 Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14383 "type operand incompatible with type of destination address");
14386 if (!eeIsValueClass(resolvedToken.hClass))
14388 op1 = impPopStack().val; // address to load from
14390 impBashVarAddrsToI(op1);
14392 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
14394 op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
14395 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
14397 impPushOnStackNoType(op1);
14398 opcode = CEE_STIND_REF;
14400 goto STIND_POST_VERIFY;
14403 op2 = impPopStack().val; // Src
14404 op1 = impPopStack().val; // Dest
14405 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
14410 assertImp(sz == sizeof(unsigned));
14412 _impResolveToken(CORINFO_TOKENKIND_Class);
14414 JITDUMP(" %08X", resolvedToken.token);
14416 if (eeIsValueClass(resolvedToken.hClass))
14418 lclTyp = TYP_STRUCT;
14425 if (tiVerificationNeeded)
14428 typeInfo tiPtr = impStackTop(1).seTypeInfo;
14430 // Make sure we have a good looking byref
14431 Verify(tiPtr.IsByRef(), "pointer not byref");
14432 Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
14433 if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
14435 compUnsafeCastUsed = true;
14438 typeInfo ptrVal = DereferenceByRef(tiPtr);
14439 typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
14441 if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
14443 Verify(false, "type of value incompatible with type operand");
14444 compUnsafeCastUsed = true;
14447 if (!tiCompatibleWith(argVal, ptrVal, false))
14449 Verify(false, "type operand incompatible with type of address");
14450 compUnsafeCastUsed = true;
14455 compUnsafeCastUsed = true;
14458 if (lclTyp == TYP_REF)
14460 opcode = CEE_STIND_REF;
14461 goto STIND_POST_VERIFY;
14464 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14465 if (impIsPrimitive(jitTyp))
14467 lclTyp = JITtype2varType(jitTyp);
14468 goto STIND_POST_VERIFY;
14471 op2 = impPopStack().val; // Value
14472 op1 = impPopStack().val; // Ptr
14474 assertImp(varTypeIsStruct(op2));
14476 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14482 assert(!compIsForInlining());
14484 // Being lazy here. Refanys are tricky in terms of gc tracking.
14485 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
14487 JITDUMP("disabling struct promotion because of mkrefany\n");
14488 fgNoStructPromotion = true;
14490 oper = GT_MKREFANY;
14491 assertImp(sz == sizeof(unsigned));
14493 _impResolveToken(CORINFO_TOKENKIND_Class);
14495 JITDUMP(" %08X", resolvedToken.token);
14497 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14498 if (op2 == nullptr)
14499 { // compDonotInline()
14503 if (tiVerificationNeeded)
14505 typeInfo tiPtr = impStackTop().seTypeInfo;
14506 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14508 Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
14509 Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
14510 Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
14513 accessAllowedResult =
14514 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14515 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14517 op1 = impPopStack().val;
14519 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
14520 // But JIT32 allowed it, so we continue to allow it.
14521 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
14523 // MKREFANY returns a struct. op2 is the class token.
14524 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
14526 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
14532 assertImp(sz == sizeof(unsigned));
14534 _impResolveToken(CORINFO_TOKENKIND_Class);
14536 JITDUMP(" %08X", resolvedToken.token);
14540 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14542 if (tiVerificationNeeded)
14544 typeInfo tiPtr = impStackTop().seTypeInfo;
14546 // Make sure we have a byref
14547 if (!tiPtr.IsByRef())
14549 Verify(false, "pointer not byref");
14550 compUnsafeCastUsed = true;
14552 typeInfo tiPtrVal = DereferenceByRef(tiPtr);
14554 if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
14556 Verify(false, "type of address incompatible with type operand");
14557 compUnsafeCastUsed = true;
14559 tiRetVal.NormaliseForStack();
14563 compUnsafeCastUsed = true;
14566 if (eeIsValueClass(resolvedToken.hClass))
14568 lclTyp = TYP_STRUCT;
14573 opcode = CEE_LDIND_REF;
14574 goto LDIND_POST_VERIFY;
14577 op1 = impPopStack().val;
14579 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
14581 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14582 if (impIsPrimitive(jitTyp))
14584 op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
14586 // Could point anywhere, example a boxed class static int
14587 op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
14588 assertImp(varTypeIsArithmetic(op1->gtType));
14592 // OBJ returns a struct
14593 // and an inline argument which is the class token of the loaded obj
14594 op1 = gtNewObjNode(resolvedToken.hClass, op1);
14596 op1->gtFlags |= GTF_EXCEPT;
14598 impPushOnStack(op1, tiRetVal);
14603 if (tiVerificationNeeded)
14605 typeInfo tiArray = impStackTop().seTypeInfo;
14606 Verify(verIsSDArray(tiArray), "bad array");
14607 tiRetVal = typeInfo(TI_INT);
14610 op1 = impPopStack().val;
14611 if (!opts.MinOpts() && !opts.compDbgCode)
14613 /* Use GT_ARR_LENGTH operator so rng check opts see this */
14614 GenTreeArrLen* arrLen =
14615 new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
14617 /* Mark the block as containing a length expression */
14619 if (op1->gtOper == GT_LCL_VAR)
14621 block->bbFlags |= BBF_HAS_IDX_LEN;
14628 /* Create the expression "*(array_addr + ArrLenOffs)" */
14629 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14630 gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
14631 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
14632 op1->gtFlags |= GTF_IND_ARR_LEN;
14635 /* An indirection will cause a GPF if the address is null */
14636 op1->gtFlags |= GTF_EXCEPT;
14638 /* Push the result back on the stack */
14639 impPushOnStack(op1, tiRetVal);
14643 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
14647 if (opts.compDbgCode)
14649 op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
14654 /******************************** NYI *******************************/
14657 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
14660 case CEE_MACRO_END:
14663 BADCODE3("unknown opcode", ": %02X", (int)opcode);
14667 prevOpcode = opcode;
14670 assert(!insertLdloc || opcode == CEE_DUP);
14673 assert(!insertLdloc);
14676 #undef _impResolveToken
14679 #pragma warning(pop)
14682 // Push a local/argument treeon the operand stack
14683 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
14685 tiRetVal.NormaliseForStack();
14687 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
14689 tiRetVal.SetUninitialisedObjRef();
14692 impPushOnStack(op, tiRetVal);
14695 // Load a local/argument on the operand stack
14696 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
14697 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
14701 if (lvaTable[lclNum].lvNormalizeOnLoad())
14703 lclTyp = lvaGetRealType(lclNum);
14707 lclTyp = lvaGetActualType(lclNum);
14710 impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
14713 // Load an argument on the operand stack
14714 // Shared by the various CEE_LDARG opcodes
14715 // ilArgNum is the argument index as specified in IL.
14716 // It will be mapped to the correct lvaTable index
14717 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
14719 Verify(ilArgNum < info.compILargsCount, "bad arg num");
14721 if (compIsForInlining())
14723 if (ilArgNum >= info.compArgsCount)
14725 compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
14729 impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
14730 impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
14734 if (ilArgNum >= info.compArgsCount)
14739 unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
14741 if (lclNum == info.compThisArg)
14743 lclNum = lvaArg0Var;
14746 impLoadVar(lclNum, offset);
14750 // Load a local on the operand stack
14751 // Shared by the various CEE_LDLOC opcodes
14752 // ilLclNum is the local index as specified in IL.
14753 // It will be mapped to the correct lvaTable index
14754 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
14756 if (tiVerificationNeeded)
14758 Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
14759 Verify(info.compInitMem, "initLocals not set");
14762 if (compIsForInlining())
14764 if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14766 compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
14770 // Get the local type
14771 var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
14773 typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
14775 /* Have we allocated a temp for this local? */
14777 unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
14779 // All vars of inlined methods should be !lvNormalizeOnLoad()
14781 assert(!lvaTable[lclNum].lvNormalizeOnLoad());
14782 lclTyp = genActualType(lclTyp);
14784 impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
14788 if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14793 unsigned lclNum = info.compArgsCount + ilLclNum;
14795 impLoadVar(lclNum, offset);
14799 #ifdef _TARGET_ARM_
14800 /**************************************************************************************
14802 * When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
14803 * dst struct, because struct promotion will turn it into a float/double variable while
14804 * the rhs will be an int/long variable. We don't code generate assignment of int into
14805 * a float, but there is nothing that might prevent us from doing so. The tree however
14806 * would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
14808 * tmpNum - the lcl dst variable num that is a struct.
14809 * src - the src tree assigned to the dest that is a struct/int (when varargs call.)
14810 * hClass - the type handle for the struct variable.
14812 * TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
14813 * however, we could do a codegen of transferring from int to float registers
14814 * (transfer, not a cast.)
14817 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr src, CORINFO_CLASS_HANDLE hClass)
14819 if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
14821 int hfaSlots = GetHfaCount(hClass);
14822 var_types hfaType = GetHfaType(hClass);
14824 // If we have varargs we morph the method's return type to be "int" irrespective of its original
14825 // type: struct/float at importer because the ABI calls out return in integer registers.
14826 // We don't want struct promotion to replace an expression like this:
14827 // lclFld_int = callvar_int() into lclFld_float = callvar_int();
14828 // This means an int is getting assigned to a float without a cast. Prevent the promotion.
14829 if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
14830 (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
14832 // Make sure this struct type stays as struct so we can receive the call in a struct.
14833 lvaTable[tmpNum].lvIsMultiRegRet = true;
14837 #endif // _TARGET_ARM_
14839 #if FEATURE_MULTIREG_RET
14840 GenTreePtr Compiler::impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HANDLE hClass)
14842 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
14843 impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_NONE);
14844 GenTreePtr ret = gtNewLclvNode(tmpNum, op->gtType);
14846 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
14847 ret->gtFlags |= GTF_DONT_CSE;
14849 assert(IsMultiRegReturnedType(hClass));
14851 // Mark the var so that fields are not promoted and stay together.
14852 lvaTable[tmpNum].lvIsMultiRegRet = true;
14856 #endif // FEATURE_MULTIREG_RET
14858 // do import for a return
14859 // returns false if inlining was aborted
14860 // opcode can be ret or call in the case of a tail.call
14861 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
14863 if (tiVerificationNeeded)
14865 verVerifyThisPtrInitialised();
14867 unsigned expectedStack = 0;
14868 if (info.compRetType != TYP_VOID)
14870 typeInfo tiVal = impStackTop().seTypeInfo;
14871 typeInfo tiDeclared =
14872 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
14874 Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
14876 Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
14879 Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
14882 GenTree* op2 = nullptr;
14883 GenTree* op1 = nullptr;
14884 CORINFO_CLASS_HANDLE retClsHnd = nullptr;
14886 if (info.compRetType != TYP_VOID)
14888 StackEntry se = impPopStack(retClsHnd);
14891 if (!compIsForInlining())
14893 impBashVarAddrsToI(op2);
14894 op2 = impImplicitIorI4Cast(op2, info.compRetType);
14895 op2 = impImplicitR4orR8Cast(op2, info.compRetType);
14896 assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
14897 ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
14898 ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
14899 (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
14900 (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
14903 if (opts.compGcChecks && info.compRetType == TYP_REF)
14905 // DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path
14906 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
14909 assert(op2->gtType == TYP_REF);
14911 // confirm that the argument is a GC pointer (for debugging (GC stress))
14912 GenTreeArgList* args = gtNewArgList(op2);
14913 op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, 0, args);
14917 printf("\ncompGcChecks tree:\n");
14925 // inlinee's stack should be empty now.
14926 assert(verCurrentState.esStackDepth == 0);
14931 printf("\n\n Inlinee Return expression (before normalization) =>\n");
14936 // Make sure the type matches the original call.
14938 var_types returnType = genActualType(op2->gtType);
14939 var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
14940 if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
14942 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
14945 if (returnType != originalCallType)
14947 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
14951 // Below, we are going to set impInlineInfo->retExpr to the tree with the return
14952 // expression. At this point, retExpr could already be set if there are multiple
14953 // return blocks (meaning lvaInlineeReturnSpillTemp != BAD_VAR_NUM) and one of
14954 // the other blocks already set it. If there is only a single return block,
14955 // retExpr shouldn't be set. However, this is not true if we reimport a block
14956 // with a return. In that case, retExpr will be set, then the block will be
14957 // reimported, but retExpr won't get cleared as part of setting the block to
14958 // be reimported. The reimported retExpr value should be the same, so even if
14959 // we don't unconditionally overwrite it, it shouldn't matter.
14960 if (info.compRetNativeType != TYP_STRUCT)
14962 // compRetNativeType is not TYP_STRUCT.
14963 // This implies it could be either a scalar type or SIMD vector type or
14964 // a struct type that can be normalized to a scalar type.
14966 if (varTypeIsStruct(info.compRetType))
14968 noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
14969 // adjust the type away from struct to integral
14970 // and no normalizing
14971 op2 = impFixupStructReturnType(op2, retClsHnd);
14975 // Do we have to normalize?
14976 var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
14977 if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
14978 fgCastNeeded(op2, fncRealRetType))
14980 // Small-typed return values are normalized by the callee
14981 op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
14985 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
14987 assert(info.compRetNativeType != TYP_VOID &&
14988 (fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals));
14990 // This is a bit of a workaround...
14991 // If we are inlining a call that returns a struct, where the actual "native" return type is
14992 // not a struct (for example, the struct is composed of exactly one int, and the native
14993 // return type is thus an int), and the inlinee has multiple return blocks (thus,
14994 // lvaInlineeReturnSpillTemp is != BAD_VAR_NUM, and is the index of a local var that is set
14995 // to the *native* return type), and at least one of the return blocks is the result of
14996 // a call, then we have a problem. The situation is like this (from a failed test case):
14999 // // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15000 // call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15001 // plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15005 // ldobj !!T // this gets bashed to a GT_LCL_FLD, type TYP_INT
15008 // call !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15009 // object&, class System.Func`1<!!0>)
15012 // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15013 // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15014 // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15015 // inlining properly by leaving the correct type on the GT_CALL node through importing.
15017 // To fix this, for this case, we temporarily change the GT_CALL node type to the
15018 // native return type, which is what it will be set to eventually. We generate the
15019 // assignment to the return temp, using the correct type, and then restore the GT_CALL
15020 // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15022 bool restoreType = false;
15023 if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15025 noway_assert(op2->TypeGet() == TYP_STRUCT);
15026 op2->gtType = info.compRetNativeType;
15027 restoreType = true;
15030 impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15031 (unsigned)CHECK_SPILL_ALL);
15033 GenTreePtr tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15037 op2->gtType = TYP_STRUCT; // restore it to what it was
15043 if (impInlineInfo->retExpr)
15045 // Some other block(s) have seen the CEE_RET first.
15046 // Better they spilled to the same temp.
15047 assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15048 assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15056 printf("\n\n Inlinee Return expression (after normalization) =>\n");
15061 // Report the return expression
15062 impInlineInfo->retExpr = op2;
15066 // compRetNativeType is TYP_STRUCT.
15067 // This implies that struct return via RetBuf arg or multi-reg struct return
15069 GenTreePtr iciCall = impInlineInfo->iciCall;
15070 assert(iciCall->gtOper == GT_CALL);
15072 // Assign the inlinee return into a spill temp.
15073 // spill temp only exists if there are multiple return points
15074 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15076 // in this case we have to insert multiple struct copies to the temp
15077 // and the retexpr is just the temp.
15078 assert(info.compRetNativeType != TYP_VOID);
15079 assert(fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals);
15081 impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15082 (unsigned)CHECK_SPILL_ALL);
15085 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15086 #if defined(_TARGET_ARM_)
15087 // TODO-ARM64-NYI: HFA
15088 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15089 // next ifdefs could be refactored in a single method with the ifdef inside.
15090 if (IsHfa(retClsHnd))
15092 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15093 #else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15094 ReturnTypeDesc retTypeDesc;
15095 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15096 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15098 if (retRegCount != 0)
15100 // If single eightbyte, the return type would have been normalized and there won't be a temp var.
15101 // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
15103 assert(retRegCount == MAX_RET_REG_COUNT);
15104 // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
15105 CLANG_FORMAT_COMMENT_ANCHOR;
15106 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15108 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15110 if (!impInlineInfo->retExpr)
15112 #if defined(_TARGET_ARM_)
15113 impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
15114 #else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15115 // The inlinee compiler has figured out the type of the temp already. Use it here.
15116 impInlineInfo->retExpr =
15117 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15118 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15123 impInlineInfo->retExpr = op2;
15127 #elif defined(_TARGET_ARM64_)
15128 ReturnTypeDesc retTypeDesc;
15129 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15130 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15132 if (retRegCount != 0)
15134 assert(!iciCall->AsCall()->HasRetBufArg());
15135 assert(retRegCount >= 2);
15136 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15138 if (!impInlineInfo->retExpr)
15140 // The inlinee compiler has figured out the type of the temp already. Use it here.
15141 impInlineInfo->retExpr =
15142 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15147 impInlineInfo->retExpr = op2;
15151 #endif // defined(_TARGET_ARM64_)
15153 assert(iciCall->AsCall()->HasRetBufArg());
15154 GenTreePtr dest = gtCloneExpr(iciCall->gtCall.gtCallArgs->gtOp.gtOp1);
15155 // spill temp only exists if there are multiple return points
15156 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15158 // if this is the first return we have seen set the retExpr
15159 if (!impInlineInfo->retExpr)
15161 impInlineInfo->retExpr =
15162 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
15163 retClsHnd, (unsigned)CHECK_SPILL_ALL);
15168 impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15175 if (compIsForInlining())
15180 if (info.compRetType == TYP_VOID)
15183 op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15185 else if (info.compRetBuffArg != BAD_VAR_NUM)
15187 // Assign value to return buff (first param)
15188 GenTreePtr retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
15190 op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15191 impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15193 // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
15194 CLANG_FORMAT_COMMENT_ANCHOR;
15196 #if defined(_TARGET_AMD64_)
15198 // x64 (System V and Win64) calling convention requires to
15199 // return the implicit return buffer explicitly (in RAX).
15200 // Change the return type to be BYREF.
15201 op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15202 #else // !defined(_TARGET_AMD64_)
15203 // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
15204 // In such case the return value of the function is changed to BYREF.
15205 // If profiler hook is not needed the return type of the function is TYP_VOID.
15206 if (compIsProfilerHookNeeded())
15208 op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15213 op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15215 #endif // !defined(_TARGET_AMD64_)
15217 else if (varTypeIsStruct(info.compRetType))
15219 #if !FEATURE_MULTIREG_RET
15220 // For both ARM architectures the HFA native types are maintained as structs.
15221 // Also on System V AMD64 the multireg structs returns are also left as structs.
15222 noway_assert(info.compRetNativeType != TYP_STRUCT);
15224 op2 = impFixupStructReturnType(op2, retClsHnd);
15226 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
15231 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
15234 // We must have imported a tailcall and jumped to RET
15235 if (prefixFlags & PREFIX_TAILCALL)
15237 #ifndef _TARGET_AMD64_
15239 // This cannot be asserted on Amd64 since we permit the following IL pattern:
15243 assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
15246 opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
15248 // impImportCall() would have already appended TYP_VOID calls
15249 if (info.compRetType == TYP_VOID)
15255 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15257 // Remember at which BC offset the tree was finished
15258 impNoteLastILoffs();
15263 /*****************************************************************************
15264 * Mark the block as unimported.
15265 * Note that the caller is responsible for calling impImportBlockPending(),
15266 * with the appropriate stack-state
15269 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
15272 if (verbose && (block->bbFlags & BBF_IMPORTED))
15274 printf("\nBB%02u will be reimported\n", block->bbNum);
15278 block->bbFlags &= ~BBF_IMPORTED;
15281 /*****************************************************************************
15282 * Mark the successors of the given block as unimported.
15283 * Note that the caller is responsible for calling impImportBlockPending()
15284 * for all the successors, with the appropriate stack-state.
15287 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
15289 for (unsigned i = 0; i < block->NumSucc(); i++)
15291 impReimportMarkBlock(block->GetSucc(i));
15295 /*****************************************************************************
15297 * Filter wrapper to handle only passed in exception code
15301 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
15303 if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
15305 return EXCEPTION_EXECUTE_HANDLER;
15308 return EXCEPTION_CONTINUE_SEARCH;
15311 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
15313 assert(block->hasTryIndex());
15314 assert(!compIsForInlining());
15316 unsigned tryIndex = block->getTryIndex();
15317 EHblkDsc* HBtab = ehGetDsc(tryIndex);
15321 assert(block->bbFlags & BBF_TRY_BEG);
15323 // The Stack must be empty
15325 if (block->bbStkDepth != 0)
15327 BADCODE("Evaluation stack must be empty on entry into a try block");
15331 // Save the stack contents, we'll need to restore it later
15333 SavedStack blockState;
15334 impSaveStackState(&blockState, false);
15336 while (HBtab != nullptr)
15340 // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
15341 // We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
15343 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15345 // We trigger an invalid program exception here unless we have a try/fault region.
15347 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
15350 "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
15354 // Allow a try/fault region to proceed.
15355 assert(HBtab->HasFaultHandler());
15359 /* Recursively process the handler block */
15360 BasicBlock* hndBegBB = HBtab->ebdHndBeg;
15362 // Construct the proper verification stack state
15363 // either empty or one that contains just
15364 // the Exception Object that we are dealing with
15366 verCurrentState.esStackDepth = 0;
15368 if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
15370 CORINFO_CLASS_HANDLE clsHnd;
15372 if (HBtab->HasFilter())
15374 clsHnd = impGetObjectClass();
15378 CORINFO_RESOLVED_TOKEN resolvedToken;
15380 resolvedToken.tokenContext = impTokenLookupContextHandle;
15381 resolvedToken.tokenScope = info.compScopeHnd;
15382 resolvedToken.token = HBtab->ebdTyp;
15383 resolvedToken.tokenType = CORINFO_TOKENKIND_Class;
15384 info.compCompHnd->resolveToken(&resolvedToken);
15386 clsHnd = resolvedToken.hClass;
15389 // push catch arg the stack, spill to a temp if necessary
15390 // Note: can update HBtab->ebdHndBeg!
15391 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd);
15394 // Queue up the handler for importing
15396 impImportBlockPending(hndBegBB);
15398 if (HBtab->HasFilter())
15400 /* @VERIFICATION : Ideally the end of filter state should get
15401 propagated to the catch handler, this is an incompleteness,
15402 but is not a security/compliance issue, since the only
15403 interesting state is the 'thisInit' state.
15406 verCurrentState.esStackDepth = 0;
15408 BasicBlock* filterBB = HBtab->ebdFilter;
15410 // push catch arg the stack, spill to a temp if necessary
15411 // Note: can update HBtab->ebdFilter!
15412 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass());
15414 impImportBlockPending(filterBB);
15417 else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
15419 /* Recursively process the handler block */
15421 verCurrentState.esStackDepth = 0;
15423 // Queue up the fault handler for importing
15425 impImportBlockPending(HBtab->ebdHndBeg);
15428 // Now process our enclosing try index (if any)
15430 tryIndex = HBtab->ebdEnclosingTryIndex;
15431 if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
15437 HBtab = ehGetDsc(tryIndex);
15441 // Restore the stack contents
15442 impRestoreStackState(&blockState);
15445 //***************************************************************
15446 // Import the instructions for the given basic block. Perform
15447 // verification, throwing an exception on failure. Push any successor blocks that are enabled for the first
15448 // time, or whose verification pre-state is changed.
15451 #pragma warning(push)
15452 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
15454 void Compiler::impImportBlock(BasicBlock* block)
15456 // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
15457 // handle them specially. In particular, there is no IL to import for them, but we do need
15458 // to mark them as imported and put their successors on the pending import list.
15459 if (block->bbFlags & BBF_INTERNAL)
15461 JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
15462 block->bbFlags |= BBF_IMPORTED;
15464 for (unsigned i = 0; i < block->NumSucc(); i++)
15466 impImportBlockPending(block->GetSucc(i));
15476 /* Make the block globaly available */
15481 /* Initialize the debug variables */
15482 impCurOpcName = "unknown";
15483 impCurOpcOffs = block->bbCodeOffs;
15486 /* Set the current stack state to the merged result */
15487 verResetCurrentState(block, &verCurrentState);
15489 /* Now walk the code and import the IL into GenTrees */
15491 struct FilterVerificationExceptionsParam
15496 FilterVerificationExceptionsParam param;
15498 param.pThis = this;
15499 param.block = block;
15501 PAL_TRY(FilterVerificationExceptionsParam*, pParam, ¶m)
15503 /* @VERIFICATION : For now, the only state propagation from try
15504 to it's handler is "thisInit" state (stack is empty at start of try).
15505 In general, for state that we track in verification, we need to
15506 model the possibility that an exception might happen at any IL
15507 instruction, so we really need to merge all states that obtain
15508 between IL instructions in a try block into the start states of
15511 However we do not allow the 'this' pointer to be uninitialized when
15512 entering most kinds try regions (only try/fault are allowed to have
15513 an uninitialized this pointer on entry to the try)
15515 Fortunately, the stack is thrown away when an exception
15516 leads to a handler, so we don't have to worry about that.
15517 We DO, however, have to worry about the "thisInit" state.
15518 But only for the try/fault case.
15520 The only allowed transition is from TIS_Uninit to TIS_Init.
15522 So for a try/fault region for the fault handler block
15523 we will merge the start state of the try begin
15524 and the post-state of each block that is part of this try region
15527 // merge the start state of the try begin
15529 if (pParam->block->bbFlags & BBF_TRY_BEG)
15531 pParam->pThis->impVerifyEHBlock(pParam->block, true);
15534 pParam->pThis->impImportBlockCode(pParam->block);
15536 // As discussed above:
15537 // merge the post-state of each block that is part of this try region
15539 if (pParam->block->hasTryIndex())
15541 pParam->pThis->impVerifyEHBlock(pParam->block, false);
15544 PAL_EXCEPT_FILTER(FilterVerificationExceptions)
15546 verHandleVerificationFailure(block DEBUGARG(false));
15550 if (compDonotInline())
15555 assert(!compDonotInline());
15557 markImport = false;
15561 unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks
15562 bool reimportSpillClique = false;
15563 BasicBlock* tgtBlock = nullptr;
15565 /* If the stack is non-empty, we might have to spill its contents */
15567 if (verCurrentState.esStackDepth != 0)
15569 impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
15570 // on the stack, its lifetime is hard to determine, simply
15571 // don't reuse such temps.
15573 GenTreePtr addStmt = nullptr;
15575 /* Do the successors of 'block' have any other predecessors ?
15576 We do not want to do some of the optimizations related to multiRef
15577 if we can reimport blocks */
15579 unsigned multRef = impCanReimport ? unsigned(~0) : 0;
15581 switch (block->bbJumpKind)
15585 /* Temporarily remove the 'jtrue' from the end of the tree list */
15587 assert(impTreeLast);
15588 assert(impTreeLast->gtOper == GT_STMT);
15589 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
15591 addStmt = impTreeLast;
15592 impTreeLast = impTreeLast->gtPrev;
15594 /* Note if the next block has more than one ancestor */
15596 multRef |= block->bbNext->bbRefs;
15598 /* Does the next block have temps assigned? */
15600 baseTmp = block->bbNext->bbStkTempsIn;
15601 tgtBlock = block->bbNext;
15603 if (baseTmp != NO_BASE_TMP)
15608 /* Try the target of the jump then */
15610 multRef |= block->bbJumpDest->bbRefs;
15611 baseTmp = block->bbJumpDest->bbStkTempsIn;
15612 tgtBlock = block->bbJumpDest;
15616 multRef |= block->bbJumpDest->bbRefs;
15617 baseTmp = block->bbJumpDest->bbStkTempsIn;
15618 tgtBlock = block->bbJumpDest;
15622 multRef |= block->bbNext->bbRefs;
15623 baseTmp = block->bbNext->bbStkTempsIn;
15624 tgtBlock = block->bbNext;
15629 BasicBlock** jmpTab;
15632 /* Temporarily remove the GT_SWITCH from the end of the tree list */
15634 assert(impTreeLast);
15635 assert(impTreeLast->gtOper == GT_STMT);
15636 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
15638 addStmt = impTreeLast;
15639 impTreeLast = impTreeLast->gtPrev;
15641 jmpCnt = block->bbJumpSwt->bbsCount;
15642 jmpTab = block->bbJumpSwt->bbsDstTab;
15646 tgtBlock = (*jmpTab);
15648 multRef |= tgtBlock->bbRefs;
15650 // Thanks to spill cliques, we should have assigned all or none
15651 assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
15652 baseTmp = tgtBlock->bbStkTempsIn;
15657 } while (++jmpTab, --jmpCnt);
15661 case BBJ_CALLFINALLY:
15662 case BBJ_EHCATCHRET:
15664 case BBJ_EHFINALLYRET:
15665 case BBJ_EHFILTERRET:
15667 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
15671 noway_assert(!"Unexpected bbJumpKind");
15675 assert(multRef >= 1);
15677 /* Do we have a base temp number? */
15679 bool newTemps = (baseTmp == NO_BASE_TMP);
15683 /* Grab enough temps for the whole stack */
15684 baseTmp = impGetSpillTmpBase(block);
15687 /* Spill all stack entries into temps */
15688 unsigned level, tempNum;
15690 JITDUMP("\nSpilling stack entries into temps\n");
15691 for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
15693 GenTreePtr tree = verCurrentState.esStack[level].val;
15695 /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
15696 the other. This should merge to a byref in unverifiable code.
15697 However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
15698 successor would be imported assuming there was a TYP_I_IMPL on
15699 the stack. Thus the value would not get GC-tracked. Hence,
15700 change the temp to TYP_BYREF and reimport the successors.
15701 Note: We should only allow this in unverifiable code.
15703 if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
15705 lvaTable[tempNum].lvType = TYP_BYREF;
15706 impReimportMarkSuccessors(block);
15710 #ifdef _TARGET_64BIT_
15711 if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
15713 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
15714 (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
15716 // Merge the current state into the entry state of block;
15717 // the call to verMergeEntryStates must have changed
15718 // the entry state of the block by merging the int local var
15719 // and the native-int stack entry.
15720 bool changed = false;
15721 if (verMergeEntryStates(tgtBlock, &changed))
15723 impRetypeEntryStateTemps(tgtBlock);
15724 impReimportBlockPending(tgtBlock);
15729 tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
15734 // Some other block in the spill clique set this to "int", but now we have "native int".
15735 // Change the type and go back to re-import any blocks that used the wrong type.
15736 lvaTable[tempNum].lvType = TYP_I_IMPL;
15737 reimportSpillClique = true;
15739 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
15741 // Spill clique has decided this should be "native int", but this block only pushes an "int".
15742 // Insert a sign-extension to "native int" so we match the clique.
15743 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15746 // Consider the case where one branch left a 'byref' on the stack and the other leaves
15747 // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
15748 // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
15749 // behavior instead of asserting and then generating bad code (where we save/restore the
15750 // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
15751 // imported already, we need to change the type of the local and reimport the spill clique.
15752 // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
15753 // the 'byref' size.
15754 if (!tiVerificationNeeded)
15756 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
15758 // Some other block in the spill clique set this to "int", but now we have "byref".
15759 // Change the type and go back to re-import any blocks that used the wrong type.
15760 lvaTable[tempNum].lvType = TYP_BYREF;
15761 reimportSpillClique = true;
15763 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
15765 // Spill clique has decided this should be "byref", but this block only pushes an "int".
15766 // Insert a sign-extension to "native int" so we match the clique size.
15767 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15770 #endif // _TARGET_64BIT_
15772 #if FEATURE_X87_DOUBLES
15773 // X87 stack doesn't differentiate between float/double
15774 // so promoting is no big deal.
15775 // For everybody else keep it as float until we have a collision and then promote
15776 // Just like for x64's TYP_INT<->TYP_I_IMPL
15778 if (multRef > 1 && tree->gtType == TYP_FLOAT)
15780 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15783 #else // !FEATURE_X87_DOUBLES
15785 if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
15787 // Some other block in the spill clique set this to "float", but now we have "double".
15788 // Change the type and go back to re-import any blocks that used the wrong type.
15789 lvaTable[tempNum].lvType = TYP_DOUBLE;
15790 reimportSpillClique = true;
15792 else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
15794 // Spill clique has decided this should be "double", but this block only pushes a "float".
15795 // Insert a cast to "double" so we match the clique.
15796 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15799 #endif // FEATURE_X87_DOUBLES
15801 /* If addStmt has a reference to tempNum (can only happen if we
15802 are spilling to the temps already used by a previous block),
15803 we need to spill addStmt */
15805 if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
15807 GenTreePtr addTree = addStmt->gtStmt.gtStmtExpr;
15809 if (addTree->gtOper == GT_JTRUE)
15811 GenTreePtr relOp = addTree->gtOp.gtOp1;
15812 assert(relOp->OperIsCompare());
15814 var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
15816 if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
15818 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
15819 impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
15820 type = genActualType(lvaTable[temp].TypeGet());
15821 relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
15824 if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
15826 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
15827 impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
15828 type = genActualType(lvaTable[temp].TypeGet());
15829 relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
15834 assert(addTree->gtOper == GT_SWITCH && genActualType(addTree->gtOp.gtOp1->gtType) == TYP_I_IMPL);
15836 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
15837 impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
15838 addTree->gtOp.gtOp1 = gtNewLclvNode(temp, TYP_I_IMPL);
15842 /* Spill the stack entry, and replace with the temp */
15844 if (!impSpillStackEntry(level, tempNum
15847 true, "Spill Stack Entry"
15853 BADCODE("bad stack state");
15856 // Oops. Something went wrong when spilling. Bad code.
15857 verHandleVerificationFailure(block DEBUGARG(true));
15863 /* Put back the 'jtrue'/'switch' if we removed it earlier */
15867 impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
15871 // Some of the append/spill logic works on compCurBB
15873 assert(compCurBB == block);
15875 /* Save the tree list in the block */
15876 impEndTreeList(block);
15878 // impEndTreeList sets BBF_IMPORTED on the block
15879 // We do *NOT* want to set it later than this because
15880 // impReimportSpillClique might clear it if this block is both a
15881 // predecessor and successor in the current spill clique
15882 assert(block->bbFlags & BBF_IMPORTED);
15884 // If we had a int/native int, or float/double collision, we need to re-import
15885 if (reimportSpillClique)
15887 // This will re-import all the successors of block (as well as each of their predecessors)
15888 impReimportSpillClique(block);
15890 // For blocks that haven't been imported yet, we still need to mark them as pending import.
15891 for (unsigned i = 0; i < block->NumSucc(); i++)
15893 BasicBlock* succ = block->GetSucc(i);
15894 if ((succ->bbFlags & BBF_IMPORTED) == 0)
15896 impImportBlockPending(succ);
15900 else // the normal case
15902 // otherwise just import the successors of block
15904 /* Does this block jump to any other blocks? */
15905 for (unsigned i = 0; i < block->NumSucc(); i++)
15907 impImportBlockPending(block->GetSucc(i));
15912 #pragma warning(pop)
15915 /*****************************************************************************/
15917 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
15918 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
15919 // impPendingBlockMembers). Merges the current verification state into the verification state of "block"
15920 // (its "pre-state").
15922 void Compiler::impImportBlockPending(BasicBlock* block)
15927 printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
15931 // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
15932 // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
15933 // (When we're doing verification, we always attempt the merge to detect verification errors.)
15935 // If the block has not been imported, add to pending set.
15936 bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
15938 // Initialize bbEntryState just the first time we try to add this block to the pending list
15939 // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
15940 // We use NULL to indicate the 'common' state to avoid memory allocation
15941 if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
15942 (impGetPendingBlockMember(block) == 0))
15944 verInitBBEntryState(block, &verCurrentState);
15945 assert(block->bbStkDepth == 0);
15946 block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
15947 assert(addToPending);
15948 assert(impGetPendingBlockMember(block) == 0);
15952 // The stack should have the same height on entry to the block from all its predecessors.
15953 if (block->bbStkDepth != verCurrentState.esStackDepth)
15957 sprintf_s(buffer, sizeof(buffer),
15958 "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
15959 "Previous depth was %d, current depth is %d",
15960 block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
15961 verCurrentState.esStackDepth);
15962 buffer[400 - 1] = 0;
15965 NO_WAY("Block entered with different stack depths");
15969 // Additionally, if we need to verify, merge the verification state.
15970 if (tiVerificationNeeded)
15972 // Merge the current state into the entry state of block; if this does not change the entry state
15973 // by merging, do not add the block to the pending-list.
15974 bool changed = false;
15975 if (!verMergeEntryStates(block, &changed))
15977 block->bbFlags |= BBF_FAILED_VERIFICATION;
15978 addToPending = true; // We will pop it off, and check the flag set above.
15982 addToPending = true;
15984 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
15993 if (block->bbStkDepth > 0)
15995 // We need to fix the types of any spill temps that might have changed:
15996 // int->native int, float->double, int->byref, etc.
15997 impRetypeEntryStateTemps(block);
16000 // OK, we must add to the pending list, if it's not already in it.
16001 if (impGetPendingBlockMember(block) != 0)
16007 // Get an entry to add to the pending list
16011 if (impPendingFree)
16013 // We can reuse one of the freed up dscs.
16014 dsc = impPendingFree;
16015 impPendingFree = dsc->pdNext;
16019 // We have to create a new dsc
16020 dsc = new (this, CMK_Unknown) PendingDsc;
16024 dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16025 dsc->pdThisPtrInit = verCurrentState.thisInitialized;
16027 // Save the stack trees for later
16029 if (verCurrentState.esStackDepth)
16031 impSaveStackState(&dsc->pdSavedStack, false);
16034 // Add the entry to the pending list
16036 dsc->pdNext = impPendingList;
16037 impPendingList = dsc;
16038 impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16040 // Various assertions require us to now to consider the block as not imported (at least for
16041 // the final time...)
16042 block->bbFlags &= ~BBF_IMPORTED;
16047 printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16052 /*****************************************************************************/
16054 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16055 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16056 // impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block.
16058 void Compiler::impReimportBlockPending(BasicBlock* block)
16060 JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16062 assert(block->bbFlags & BBF_IMPORTED);
16064 // OK, we must add to the pending list, if it's not already in it.
16065 if (impGetPendingBlockMember(block) != 0)
16070 // Get an entry to add to the pending list
16074 if (impPendingFree)
16076 // We can reuse one of the freed up dscs.
16077 dsc = impPendingFree;
16078 impPendingFree = dsc->pdNext;
16082 // We have to create a new dsc
16083 dsc = new (this, CMK_ImpStack) PendingDsc;
16088 if (block->bbEntryState)
16090 dsc->pdThisPtrInit = block->bbEntryState->thisInitialized;
16091 dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
16092 dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
16096 dsc->pdThisPtrInit = TIS_Bottom;
16097 dsc->pdSavedStack.ssDepth = 0;
16098 dsc->pdSavedStack.ssTrees = nullptr;
16101 // Add the entry to the pending list
16103 dsc->pdNext = impPendingList;
16104 impPendingList = dsc;
16105 impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16107 // Various assertions require us to now to consider the block as not imported (at least for
16108 // the final time...)
16109 block->bbFlags &= ~BBF_IMPORTED;
16114 printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16119 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
16121 if (comp->impBlockListNodeFreeList == nullptr)
16123 return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
16127 BlockListNode* res = comp->impBlockListNodeFreeList;
16128 comp->impBlockListNodeFreeList = res->m_next;
16133 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
16135 node->m_next = impBlockListNodeFreeList;
16136 impBlockListNodeFreeList = node;
16139 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
16143 noway_assert(!fgComputePredsDone);
16144 if (!fgCheapPredsValid)
16146 fgComputeCheapPreds();
16149 BlockListNode* succCliqueToDo = nullptr;
16150 BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
16154 // Look at the successors of every member of the predecessor to-do list.
16155 while (predCliqueToDo != nullptr)
16157 BlockListNode* node = predCliqueToDo;
16158 predCliqueToDo = node->m_next;
16159 BasicBlock* blk = node->m_blk;
16160 FreeBlockListNode(node);
16162 for (unsigned succNum = 0; succNum < blk->NumSucc(); succNum++)
16164 BasicBlock* succ = blk->GetSucc(succNum);
16165 // If it's not already in the clique, add it, and also add it
16166 // as a member of the successor "toDo" set.
16167 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
16169 callback->Visit(SpillCliqueSucc, succ);
16170 impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
16171 succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
16176 // Look at the predecessors of every member of the successor to-do list.
16177 while (succCliqueToDo != nullptr)
16179 BlockListNode* node = succCliqueToDo;
16180 succCliqueToDo = node->m_next;
16181 BasicBlock* blk = node->m_blk;
16182 FreeBlockListNode(node);
16184 for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
16186 BasicBlock* predBlock = pred->block;
16187 // If it's not already in the clique, add it, and also add it
16188 // as a member of the predecessor "toDo" set.
16189 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
16191 callback->Visit(SpillCliquePred, predBlock);
16192 impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
16193 predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
16200 // If this fails, it means we didn't walk the spill clique properly and somehow managed
16201 // miss walking back to include the predecessor we started from.
16202 // This most likely cause: missing or out of date bbPreds
16203 assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
16206 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16208 if (predOrSucc == SpillCliqueSucc)
16210 assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
16211 blk->bbStkTempsIn = m_baseTmp;
16215 assert(predOrSucc == SpillCliquePred);
16216 assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
16217 blk->bbStkTempsOut = m_baseTmp;
16221 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16223 // For Preds we could be a little smarter and just find the existing store
16224 // and re-type it/add a cast, but that is complicated and hopefully very rare, so
16225 // just re-import the whole block (just like we do for successors)
16227 if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
16229 // If we haven't imported this block and we're not going to (because it isn't on
16230 // the pending list) then just ignore it for now.
16232 // This block has either never been imported (EntryState == NULL) or it failed
16233 // verification. Neither state requires us to force it to be imported now.
16234 assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
16238 // For successors we have a valid verCurrentState, so just mark them for reimport
16239 // the 'normal' way
16240 // Unlike predecessors, we *DO* need to reimport the current block because the
16241 // initial import had the wrong entry state types.
16242 // Similarly, blocks that are currently on the pending list, still need to call
16243 // impImportBlockPending to fixup their entry state.
16244 if (predOrSucc == SpillCliqueSucc)
16246 m_pComp->impReimportMarkBlock(blk);
16248 // Set the current stack state to that of the blk->bbEntryState
16249 m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
16250 assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
16252 m_pComp->impImportBlockPending(blk);
16254 else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
16256 // As described above, we are only visiting predecessors so they can
16257 // add the appropriate casts, since we have already done that for the current
16258 // block, it does not need to be reimported.
16259 // Nor do we need to reimport blocks that are still pending, but not yet
16262 // For predecessors, we have no state to seed the EntryState, so we just have
16263 // to assume the existing one is correct.
16264 // If the block is also a successor, it will get the EntryState properly
16265 // updated when it is visited as a successor in the above "if" block.
16266 assert(predOrSucc == SpillCliquePred);
16267 m_pComp->impReimportBlockPending(blk);
16271 // Re-type the incoming lclVar nodes to match the varDsc.
16272 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
16274 if (blk->bbEntryState != nullptr)
16276 EntryState* es = blk->bbEntryState;
16277 for (unsigned level = 0; level < es->esStackDepth; level++)
16279 GenTreePtr tree = es->esStack[level].val;
16280 if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
16282 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
16283 noway_assert(lclNum < lvaCount);
16284 LclVarDsc* varDsc = lvaTable + lclNum;
16285 es->esStack[level].val->gtType = varDsc->TypeGet();
16291 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
16293 if (block->bbStkTempsOut != NO_BASE_TMP)
16295 return block->bbStkTempsOut;
16301 printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
16305 // Otherwise, choose one, and propagate to all members of the spill clique.
16306 // Grab enough temps for the whole stack.
16307 unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
16308 SetSpillTempsBase callback(baseTmp);
16310 // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
16311 // to one spill clique, and similarly can only be the sucessor to one spill clique
16312 impWalkSpillCliqueFromPred(block, &callback);
16317 void Compiler::impReimportSpillClique(BasicBlock* block)
16322 printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
16326 // If we get here, it is because this block is already part of a spill clique
16327 // and one predecessor had an outgoing live stack slot of type int, and this
16328 // block has an outgoing live stack slot of type native int.
16329 // We need to reset these before traversal because they have already been set
16330 // by the previous walk to determine all the members of the spill clique.
16331 impInlineRoot()->impSpillCliquePredMembers.Reset();
16332 impInlineRoot()->impSpillCliqueSuccMembers.Reset();
16334 ReimportSpillClique callback(this);
16336 impWalkSpillCliqueFromPred(block, &callback);
16339 // Set the pre-state of "block" (which should not have a pre-state allocated) to
16340 // a copy of "srcState", cloning tree pointers as required.
16341 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
16343 if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
16345 block->bbEntryState = nullptr;
16349 block->bbEntryState = (EntryState*)compGetMemA(sizeof(EntryState));
16351 // block->bbEntryState.esRefcount = 1;
16353 block->bbEntryState->esStackDepth = srcState->esStackDepth;
16354 block->bbEntryState->thisInitialized = TIS_Bottom;
16356 if (srcState->esStackDepth > 0)
16358 block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
16359 unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
16361 memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
16362 for (unsigned level = 0; level < srcState->esStackDepth; level++)
16364 GenTreePtr tree = srcState->esStack[level].val;
16365 block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
16369 if (verTrackObjCtorInitState)
16371 verSetThisInit(block, srcState->thisInitialized);
16377 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
16379 assert(tis != TIS_Bottom); // Precondition.
16380 if (block->bbEntryState == nullptr)
16382 block->bbEntryState = new (this, CMK_Unknown) EntryState();
16385 block->bbEntryState->thisInitialized = tis;
16389 * Resets the current state to the state at the start of the basic block
16391 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
16394 if (block->bbEntryState == nullptr)
16396 destState->esStackDepth = 0;
16397 destState->thisInitialized = TIS_Bottom;
16401 destState->esStackDepth = block->bbEntryState->esStackDepth;
16403 if (destState->esStackDepth > 0)
16405 unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
16407 memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
16410 destState->thisInitialized = block->bbThisOnEntry();
16415 ThisInitState BasicBlock::bbThisOnEntry()
16417 return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
16420 unsigned BasicBlock::bbStackDepthOnEntry()
16422 return (bbEntryState ? bbEntryState->esStackDepth : 0);
16425 void BasicBlock::bbSetStack(void* stackBuffer)
16427 assert(bbEntryState);
16428 assert(stackBuffer);
16429 bbEntryState->esStack = (StackEntry*)stackBuffer;
16432 StackEntry* BasicBlock::bbStackOnEntry()
16434 assert(bbEntryState);
16435 return bbEntryState->esStack;
16438 void Compiler::verInitCurrentState()
16440 verTrackObjCtorInitState = FALSE;
16441 verCurrentState.thisInitialized = TIS_Bottom;
16443 if (tiVerificationNeeded)
16445 // Track this ptr initialization
16446 if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
16448 verTrackObjCtorInitState = TRUE;
16449 verCurrentState.thisInitialized = TIS_Uninit;
16453 // initialize stack info
16455 verCurrentState.esStackDepth = 0;
16456 assert(verCurrentState.esStack != nullptr);
16458 // copy current state to entry state of first BB
16459 verInitBBEntryState(fgFirstBB, &verCurrentState);
16462 Compiler* Compiler::impInlineRoot()
16464 if (impInlineInfo == nullptr)
16470 return impInlineInfo->InlineRoot;
16474 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
16476 if (predOrSucc == SpillCliquePred)
16478 return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
16482 assert(predOrSucc == SpillCliqueSucc);
16483 return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
16487 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
16489 if (predOrSucc == SpillCliquePred)
16491 impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
16495 assert(predOrSucc == SpillCliqueSucc);
16496 impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
16500 /*****************************************************************************
16502 * Convert the instrs ("import") into our internal format (trees). The
16503 * basic flowgraph has already been constructed and is passed in.
16506 void Compiler::impImport(BasicBlock* method)
16511 printf("*************** In impImport() for %s\n", info.compFullName);
16515 /* Allocate the stack contents */
16517 if (info.compMaxStack <= sizeof(impSmallStack) / sizeof(impSmallStack[0]))
16519 /* Use local variable, don't waste time allocating on the heap */
16521 impStkSize = sizeof(impSmallStack) / sizeof(impSmallStack[0]);
16522 verCurrentState.esStack = impSmallStack;
16526 impStkSize = info.compMaxStack;
16527 verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
16530 // initialize the entry state at start of method
16531 verInitCurrentState();
16533 // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
16534 Compiler* inlineRoot = impInlineRoot();
16535 if (this == inlineRoot) // These are only used on the root of the inlining tree.
16537 // We have initialized these previously, but to size 0. Make them larger.
16538 impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
16539 impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
16540 impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
16542 inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
16543 inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
16544 inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
16545 impBlockListNodeFreeList = nullptr;
16548 impLastILoffsStmt = nullptr;
16549 impNestedStackSpill = false;
16551 impBoxTemp = BAD_VAR_NUM;
16553 impPendingList = impPendingFree = nullptr;
16555 /* Add the entry-point to the worker-list */
16557 // Skip leading internal blocks. There can be one as a leading scratch BB, and more
16558 // from EH normalization.
16559 // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
16561 for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
16563 // Treat these as imported.
16564 assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
16565 JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
16566 method->bbFlags |= BBF_IMPORTED;
16569 impImportBlockPending(method);
16571 /* Import blocks in the worker-list until there are no more */
16573 while (impPendingList)
16575 /* Remove the entry at the front of the list */
16577 PendingDsc* dsc = impPendingList;
16578 impPendingList = impPendingList->pdNext;
16579 impSetPendingBlockMember(dsc->pdBB, 0);
16581 /* Restore the stack state */
16583 verCurrentState.thisInitialized = dsc->pdThisPtrInit;
16584 verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth;
16585 if (verCurrentState.esStackDepth)
16587 impRestoreStackState(&dsc->pdSavedStack);
16590 /* Add the entry to the free list for reuse */
16592 dsc->pdNext = impPendingFree;
16593 impPendingFree = dsc;
16595 /* Now import the block */
16597 if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
16600 #ifdef _TARGET_64BIT_
16601 // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
16602 // coupled with the JIT64 IL Verification logic. Look inside verHandleVerificationFailure
16603 // method for further explanation on why we raise this exception instead of making the jitted
16604 // code throw the verification exception during execution.
16605 if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
16607 BADCODE("Basic block marked as not verifiable");
16610 #endif // _TARGET_64BIT_
16612 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
16613 impEndTreeList(dsc->pdBB);
16618 impImportBlock(dsc->pdBB);
16620 if (compDonotInline())
16624 if (compIsForImportOnly() && !tiVerificationNeeded)
16632 if (verbose && info.compXcptnsCount)
16634 printf("\nAfter impImport() added block for try,catch,finally");
16635 fgDispBasicBlocks();
16639 // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
16640 for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
16642 block->bbFlags &= ~BBF_VISITED;
16646 assert(!compIsForInlining() || !tiVerificationNeeded);
16649 // Checks if a typeinfo (usually stored in the type stack) is a struct.
16650 // The invariant here is that if it's not a ref or a method and has a class handle
16651 // it's a valuetype
16652 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
16654 if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
16664 /*****************************************************************************
16665 * Check to see if the tree is the address of a local or
16666 the address of a field in a local.
16668 *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
16672 BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr* lclVarTreeOut)
16674 if (tree->gtOper != GT_ADDR)
16679 GenTreePtr op = tree->gtOp.gtOp1;
16680 while (op->gtOper == GT_FIELD)
16682 op = op->gtField.gtFldObj;
16683 if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
16685 op = op->gtOp.gtOp1;
16693 if (op->gtOper == GT_LCL_VAR)
16695 *lclVarTreeOut = op;
16704 //------------------------------------------------------------------------
16705 // impMakeDiscretionaryInlineObservations: make observations that help
16706 // determine the profitability of a discretionary inline
16709 // pInlineInfo -- InlineInfo for the inline, or null for the prejit root
16710 // inlineResult -- InlineResult accumulating information about this inline
16713 // If inlining or prejitting the root, this method also makes
16714 // various observations about the method that factor into inline
16715 // decisions. It sets `compNativeSizeEstimate` as a side effect.
16717 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
16719 assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
16720 pInlineInfo == nullptr && !compIsForInlining() // Calculate the static inlining hint for ngen.
16723 // If we're really inlining, we should just have one result in play.
16724 assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
16726 // If this is a "forceinline" method, the JIT probably shouldn't have gone
16727 // to the trouble of estimating the native code size. Even if it did, it
16728 // shouldn't be relying on the result of this method.
16729 assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
16731 // Note if the caller contains NEWOBJ or NEWARR.
16732 Compiler* rootCompiler = impInlineRoot();
16734 if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
16736 inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
16739 if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
16741 inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
16744 bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0;
16745 bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
16747 if (isSpecialMethod)
16749 if (calleeIsStatic)
16751 inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
16755 inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
16758 else if (!calleeIsStatic)
16760 // Callee is an instance method.
16762 // Check if the callee has the same 'this' as the root.
16763 if (pInlineInfo != nullptr)
16765 GenTreePtr thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
16767 bool isSameThis = impIsThis(thisArg);
16768 inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
16772 // Note if the callee's class is a promotable struct
16773 if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
16775 lvaStructPromotionInfo structPromotionInfo;
16776 lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
16777 if (structPromotionInfo.canPromote)
16779 inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
16783 #ifdef FEATURE_SIMD
16785 // Note if this method is has SIMD args or return value
16786 if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
16788 inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
16791 #endif // FEATURE_SIMD
16793 // Roughly classify callsite frequency.
16794 InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
16796 // If this is a prejit root, or a maximally hot block...
16797 if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
16799 frequency = InlineCallsiteFrequency::HOT;
16801 // No training data. Look for loop-like things.
16802 // We consider a recursive call loop-like. Do not give the inlining boost to the method itself.
16803 // However, give it to things nearby.
16804 else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
16805 (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
16807 frequency = InlineCallsiteFrequency::LOOP;
16809 else if ((pInlineInfo->iciBlock->bbFlags & BBF_PROF_WEIGHT) && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
16811 frequency = InlineCallsiteFrequency::WARM;
16813 // Now modify the multiplier based on where we're called from.
16814 else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
16816 frequency = InlineCallsiteFrequency::RARE;
16820 frequency = InlineCallsiteFrequency::BORING;
16823 // Also capture the block weight of the call site. In the prejit
16824 // root case, assume there's some hot call site for this method.
16825 unsigned weight = 0;
16827 if (pInlineInfo != nullptr)
16829 weight = pInlineInfo->iciBlock->bbWeight;
16833 weight = BB_MAX_WEIGHT;
16836 inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
16837 inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
16840 /*****************************************************************************
16841 This method makes STATIC inlining decision based on the IL code.
16842 It should not make any inlining decision based on the context.
16843 If forceInline is true, then the inlining decision should not depend on
16844 performance heuristics (code size, etc.).
16847 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
16848 CORINFO_METHOD_INFO* methInfo,
16850 InlineResult* inlineResult)
16852 unsigned codeSize = methInfo->ILCodeSize;
16854 // We shouldn't have made up our minds yet...
16855 assert(!inlineResult->IsDecided());
16857 if (methInfo->EHcount)
16859 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
16863 if ((methInfo->ILCode == nullptr) || (codeSize == 0))
16865 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
16869 // For now we don't inline varargs (import code can't handle it)
16871 if (methInfo->args.isVarArg())
16873 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
16877 // Reject if it has too many locals.
16878 // This is currently an implementation limit due to fixed-size arrays in the
16879 // inline info, rather than a performance heuristic.
16881 inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
16883 if (methInfo->locals.numArgs > MAX_INL_LCLS)
16885 inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
16889 // Make sure there aren't too many arguments.
16890 // This is currently an implementation limit due to fixed-size arrays in the
16891 // inline info, rather than a performance heuristic.
16893 inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
16895 if (methInfo->args.numArgs > MAX_INL_ARGS)
16897 inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
16901 // Note force inline state
16903 inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
16905 // Note IL code size
16907 inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
16909 if (inlineResult->IsFailure())
16914 // Make sure maxstack is not too big
16916 inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
16918 if (inlineResult->IsFailure())
16924 /*****************************************************************************
16927 void Compiler::impCheckCanInline(GenTreePtr call,
16928 CORINFO_METHOD_HANDLE fncHandle,
16930 CORINFO_CONTEXT_HANDLE exactContextHnd,
16931 InlineCandidateInfo** ppInlineCandidateInfo,
16932 InlineResult* inlineResult)
16934 // Either EE or JIT might throw exceptions below.
16935 // If that happens, just don't inline the method.
16941 CORINFO_METHOD_HANDLE fncHandle;
16943 CORINFO_CONTEXT_HANDLE exactContextHnd;
16944 InlineResult* result;
16945 InlineCandidateInfo** ppInlineCandidateInfo;
16946 } param = {nullptr};
16948 param.pThis = this;
16950 param.fncHandle = fncHandle;
16951 param.methAttr = methAttr;
16952 param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
16953 param.result = inlineResult;
16954 param.ppInlineCandidateInfo = ppInlineCandidateInfo;
16956 bool success = eeRunWithErrorTrap<Param>(
16957 [](Param* pParam) {
16958 DWORD dwRestrictions = 0;
16959 CorInfoInitClassResult initClassResult;
16962 const char* methodName;
16963 const char* className;
16964 methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
16966 if (JitConfig.JitNoInline())
16968 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
16973 /* Try to get the code address/size for the method */
16975 CORINFO_METHOD_INFO methInfo;
16976 if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
16978 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
16983 forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
16985 pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
16987 if (pParam->result->IsFailure())
16989 assert(pParam->result->IsNever());
16993 // Speculatively check if initClass() can be done.
16994 // If it can be done, we will try to inline the method. If inlining
16995 // succeeds, then we will do the non-speculative initClass() and commit it.
16996 // If this speculative call to initClass() fails, there is no point
16997 // trying to inline this method.
16999 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17000 pParam->exactContextHnd /* context */,
17001 TRUE /* speculative */);
17003 if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17005 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17009 // Given the EE the final say in whether to inline or not.
17010 // This should be last since for verifiable code, this can be expensive
17012 /* VM Inline check also ensures that the method is verifiable if needed */
17013 CorInfoInline vmResult;
17014 vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17017 if (vmResult == INLINE_FAIL)
17019 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17021 else if (vmResult == INLINE_NEVER)
17023 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17026 if (pParam->result->IsFailure())
17028 // Make sure not to report this one. It was already reported by the VM.
17029 pParam->result->SetReported();
17033 // check for unsupported inlining restrictions
17034 assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17036 if (dwRestrictions & INLINE_SAME_THIS)
17038 GenTreePtr thisArg = pParam->call->gtCall.gtCallObjp;
17041 if (!pParam->pThis->impIsThis(thisArg))
17043 pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17048 /* Get the method properties */
17050 CORINFO_CLASS_HANDLE clsHandle;
17051 clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17053 clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17055 /* Get the return type */
17057 var_types fncRetType;
17058 fncRetType = pParam->call->TypeGet();
17061 var_types fncRealRetType;
17062 fncRealRetType = JITtype2varType(methInfo.args.retType);
17064 assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17065 // <BUGNUM> VSW 288602 </BUGNUM>
17066 // In case of IJW, we allow to assign a native pointer to a BYREF.
17067 (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17068 (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17072 // Allocate an InlineCandidateInfo structure
17074 InlineCandidateInfo* pInfo;
17075 pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17077 pInfo->dwRestrictions = dwRestrictions;
17078 pInfo->methInfo = methInfo;
17079 pInfo->methAttr = pParam->methAttr;
17080 pInfo->clsHandle = clsHandle;
17081 pInfo->clsAttr = clsAttr;
17082 pInfo->fncRetType = fncRetType;
17083 pInfo->exactContextHnd = pParam->exactContextHnd;
17084 pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd;
17085 pInfo->initClassResult = initClassResult;
17087 *(pParam->ppInlineCandidateInfo) = pInfo;
17094 param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
17098 void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo,
17099 GenTreePtr curArgVal,
17101 InlineResult* inlineResult)
17103 InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
17105 if (curArgVal->gtOper == GT_MKREFANY)
17107 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
17111 inlCurArgInfo->argNode = curArgVal;
17113 GenTreePtr lclVarTree;
17114 if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
17116 inlCurArgInfo->argIsByRefToStructLocal = true;
17117 #ifdef FEATURE_SIMD
17118 if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
17120 pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
17122 #endif // FEATURE_SIMD
17125 if (curArgVal->gtFlags & GTF_ALL_EFFECT)
17127 inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
17128 inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
17131 if (curArgVal->gtOper == GT_LCL_VAR)
17133 inlCurArgInfo->argIsLclVar = true;
17135 /* Remember the "original" argument number */
17136 curArgVal->gtLclVar.gtLclILoffs = argNum;
17139 if ((curArgVal->OperKind() & GTK_CONST) ||
17140 ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
17142 inlCurArgInfo->argIsInvariant = true;
17143 if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
17145 /* Abort, but do not mark as not inlinable */
17146 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
17151 if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
17153 inlCurArgInfo->argHasLdargaOp = true;
17159 if (inlCurArgInfo->argIsThis)
17161 printf("thisArg:");
17165 printf("\nArgument #%u:", argNum);
17167 if (inlCurArgInfo->argIsLclVar)
17169 printf(" is a local var");
17171 if (inlCurArgInfo->argIsInvariant)
17173 printf(" is a constant");
17175 if (inlCurArgInfo->argHasGlobRef)
17177 printf(" has global refs");
17179 if (inlCurArgInfo->argHasSideEff)
17181 printf(" has side effects");
17183 if (inlCurArgInfo->argHasLdargaOp)
17185 printf(" has ldarga effect");
17187 if (inlCurArgInfo->argHasStargOp)
17189 printf(" has starg effect");
17191 if (inlCurArgInfo->argIsByRefToStructLocal)
17193 printf(" is byref to a struct local");
17197 gtDispTree(curArgVal);
17203 /*****************************************************************************
17207 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
17209 assert(!compIsForInlining());
17211 GenTreePtr call = pInlineInfo->iciCall;
17212 CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo;
17213 unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr;
17214 InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo;
17215 InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo;
17216 InlineResult* inlineResult = pInlineInfo->inlineResult;
17218 const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
17220 /* init the argument stuct */
17222 memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
17224 /* Get hold of the 'this' pointer and the argument list proper */
17226 GenTreePtr thisArg = call->gtCall.gtCallObjp;
17227 GenTreePtr argList = call->gtCall.gtCallArgs;
17228 unsigned argCnt = 0; // Count of the arguments
17230 assert((methInfo->args.hasThis()) == (thisArg != nullptr));
17234 inlArgInfo[0].argIsThis = true;
17236 impInlineRecordArgInfo(pInlineInfo, thisArg, argCnt, inlineResult);
17238 if (inlineResult->IsFailure())
17243 /* Increment the argument count */
17247 /* Record some information about each of the arguments */
17248 bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
17250 #if USER_ARGS_COME_LAST
17251 unsigned typeCtxtArg = thisArg ? 1 : 0;
17252 #else // USER_ARGS_COME_LAST
17253 unsigned typeCtxtArg = methInfo->args.totalILArgs();
17254 #endif // USER_ARGS_COME_LAST
17256 for (GenTreePtr argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
17258 if (argTmp == argList && hasRetBuffArg)
17263 // Ignore the type context argument
17264 if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
17266 typeCtxtArg = 0xFFFFFFFF;
17270 assert(argTmp->gtOper == GT_LIST);
17271 GenTreePtr argVal = argTmp->gtOp.gtOp1;
17273 impInlineRecordArgInfo(pInlineInfo, argVal, argCnt, inlineResult);
17275 if (inlineResult->IsFailure())
17280 /* Increment the argument count */
17284 /* Make sure we got the arg number right */
17285 assert(argCnt == methInfo->args.totalILArgs());
17287 #ifdef FEATURE_SIMD
17288 bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
17289 #endif // FEATURE_SIMD
17291 /* We have typeless opcodes, get type information from the signature */
17297 if (clsAttr & CORINFO_FLG_VALUECLASS)
17299 sigType = TYP_BYREF;
17306 lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
17307 lclVarInfo[0].lclHasLdlocaOp = false;
17309 #ifdef FEATURE_SIMD
17310 // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
17311 // the inlining multiplier) for anything in that assembly.
17312 // But we only need to normalize it if it is a TYP_STRUCT
17313 // (which we need to do even if we have already set foundSIMDType).
17314 if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
17316 if (sigType == TYP_STRUCT)
17318 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
17320 foundSIMDType = true;
17322 #endif // FEATURE_SIMD
17323 lclVarInfo[0].lclTypeInfo = sigType;
17325 assert(varTypeIsGC(thisArg->gtType) || // "this" is managed
17326 (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
17327 (clsAttr & CORINFO_FLG_VALUECLASS)));
17329 if (genActualType(thisArg->gtType) != genActualType(sigType))
17331 if (sigType == TYP_REF)
17333 /* The argument cannot be bashed into a ref (see bug 750871) */
17334 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
17338 /* This can only happen with byrefs <-> ints/shorts */
17340 assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
17341 assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
17343 if (sigType == TYP_BYREF)
17345 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17347 else if (thisArg->gtType == TYP_BYREF)
17349 assert(sigType == TYP_I_IMPL);
17351 /* If possible change the BYREF to an int */
17352 if (thisArg->IsVarAddr())
17354 thisArg->gtType = TYP_I_IMPL;
17355 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17359 /* Arguments 'int <- byref' cannot be bashed */
17360 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17367 /* Init the types of the arguments and make sure the types
17368 * from the trees match the types in the signature */
17370 CORINFO_ARG_LIST_HANDLE argLst;
17371 argLst = methInfo->args.args;
17374 for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
17376 var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
17378 lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
17380 #ifdef FEATURE_SIMD
17381 if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
17383 // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
17384 // found a SIMD type, even if this may not be a type we recognize (the assumption is that
17385 // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
17386 foundSIMDType = true;
17387 if (sigType == TYP_STRUCT)
17389 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
17390 sigType = structType;
17393 #endif // FEATURE_SIMD
17395 lclVarInfo[i].lclTypeInfo = sigType;
17396 lclVarInfo[i].lclHasLdlocaOp = false;
17398 /* Does the tree type match the signature type? */
17400 GenTreePtr inlArgNode = inlArgInfo[i].argNode;
17402 if (sigType != inlArgNode->gtType)
17404 /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
17405 but in bad IL cases with caller-callee signature mismatches we can see other types.
17406 Intentionally reject cases with mismatches so the jit is more flexible when
17407 encountering bad IL. */
17409 bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
17410 (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
17411 (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
17413 if (!isPlausibleTypeMatch)
17415 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
17419 /* Is it a narrowing or widening cast?
17420 * Widening casts are ok since the value computed is already
17421 * normalized to an int (on the IL stack) */
17423 if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
17425 if (sigType == TYP_BYREF)
17427 lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17429 else if (inlArgNode->gtType == TYP_BYREF)
17431 assert(varTypeIsIntOrI(sigType));
17433 /* If possible bash the BYREF to an int */
17434 if (inlArgNode->IsVarAddr())
17436 inlArgNode->gtType = TYP_I_IMPL;
17437 lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17441 /* Arguments 'int <- byref' cannot be changed */
17442 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17446 else if (genTypeSize(sigType) < EA_PTRSIZE)
17448 /* Narrowing cast */
17450 if (inlArgNode->gtOper == GT_LCL_VAR &&
17451 !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
17452 sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
17454 /* We don't need to insert a cast here as the variable
17455 was assigned a normalized value of the right type */
17460 inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
17462 inlArgInfo[i].argIsLclVar = false;
17464 /* Try to fold the node in case we have constant arguments */
17466 if (inlArgInfo[i].argIsInvariant)
17468 inlArgNode = gtFoldExprConst(inlArgNode);
17469 inlArgInfo[i].argNode = inlArgNode;
17470 assert(inlArgNode->OperIsConst());
17473 #ifdef _TARGET_64BIT_
17474 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
17476 // This should only happen for int -> native int widening
17477 inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
17479 inlArgInfo[i].argIsLclVar = false;
17481 /* Try to fold the node in case we have constant arguments */
17483 if (inlArgInfo[i].argIsInvariant)
17485 inlArgNode = gtFoldExprConst(inlArgNode);
17486 inlArgInfo[i].argNode = inlArgNode;
17487 assert(inlArgNode->OperIsConst());
17490 #endif // _TARGET_64BIT_
17495 /* Init the types of the local variables */
17497 CORINFO_ARG_LIST_HANDLE localsSig;
17498 localsSig = methInfo->locals.args;
17500 for (i = 0; i < methInfo->locals.numArgs; i++)
17503 var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
17505 lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
17506 lclVarInfo[i + argCnt].lclIsPinned = isPinned;
17507 lclVarInfo[i + argCnt].lclTypeInfo = type;
17511 // Pinned locals may cause inlines to fail.
17512 inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
17513 if (inlineResult->IsFailure())
17519 lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
17521 // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
17522 // out on the inline.
17523 if (type == TYP_STRUCT)
17525 CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
17526 DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
17527 if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
17529 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
17530 if (inlineResult->IsFailure())
17535 // Do further notification in the case where the call site is rare; some policies do
17536 // not track the relative hotness of call sites for "always" inline cases.
17537 if (pInlineInfo->iciBlock->isRunRarely())
17539 inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
17540 if (inlineResult->IsFailure())
17549 localsSig = info.compCompHnd->getArgNext(localsSig);
17551 #ifdef FEATURE_SIMD
17552 if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
17554 foundSIMDType = true;
17555 if (featureSIMD && type == TYP_STRUCT)
17557 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
17558 lclVarInfo[i + argCnt].lclTypeInfo = structType;
17561 #endif // FEATURE_SIMD
17564 #ifdef FEATURE_SIMD
17565 if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
17567 foundSIMDType = true;
17569 pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
17570 #endif // FEATURE_SIMD
17573 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
17575 assert(compIsForInlining());
17577 unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
17579 if (tmpNum == BAD_VAR_NUM)
17581 var_types lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
17583 // The lifetime of this local might span multiple BBs.
17584 // So it is a long lifetime local.
17585 impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
17587 lvaTable[tmpNum].lvType = lclTyp;
17588 if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclHasLdlocaOp)
17590 lvaTable[tmpNum].lvHasLdAddrOp = 1;
17593 if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclIsPinned)
17595 lvaTable[tmpNum].lvPinned = 1;
17597 if (!impInlineInfo->hasPinnedLocals)
17599 // If the inlinee returns a value, use a spill temp
17600 // for the return value to ensure that even in case
17601 // where the return expression refers to one of the
17602 // pinned locals, we can unpin the local right after
17603 // the inlined method body.
17604 if ((info.compRetNativeType != TYP_VOID) && (lvaInlineeReturnSpillTemp == BAD_VAR_NUM))
17606 lvaInlineeReturnSpillTemp =
17607 lvaGrabTemp(false DEBUGARG("Inline candidate pinned local return spill temp"));
17608 lvaTable[lvaInlineeReturnSpillTemp].lvType = info.compRetNativeType;
17612 impInlineInfo->hasPinnedLocals = true;
17615 if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.IsStruct())
17617 if (varTypeIsStruct(lclTyp))
17619 lvaSetStruct(tmpNum,
17620 impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.GetClassHandle(),
17621 true /* unsafe value cls check */);
17625 // This is a wrapped primitive. Make sure the verstate knows that
17626 lvaTable[tmpNum].lvVerTypeInfo =
17627 impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo;
17635 // A method used to return the GenTree (usually a GT_LCL_VAR) representing the arguments of the inlined method.
17636 // Only use this method for the arguments of the inlinee method.
17637 // !!! Do not use it for the locals of the inlinee method. !!!!
17639 GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
17641 /* Get the argument type */
17642 var_types lclTyp = lclVarInfo[lclNum].lclTypeInfo;
17644 GenTreePtr op1 = nullptr;
17646 // constant or address of local
17647 if (inlArgInfo[lclNum].argIsInvariant && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17649 /* Clone the constant. Note that we cannot directly use argNode
17650 in the trees even if inlArgInfo[lclNum].argIsUsed==false as this
17651 would introduce aliasing between inlArgInfo[].argNode and
17652 impInlineExpr. Then gtFoldExpr() could change it, causing further
17653 references to the argument working off of the bashed copy. */
17655 op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17656 PREFIX_ASSUME(op1 != nullptr);
17657 inlArgInfo[lclNum].argTmpNum = (unsigned)-1; // illegal temp
17659 else if (inlArgInfo[lclNum].argIsLclVar && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17661 /* Argument is a local variable (of the caller)
17662 * Can we re-use the passed argument node? */
17664 op1 = inlArgInfo[lclNum].argNode;
17665 inlArgInfo[lclNum].argTmpNum = op1->gtLclVarCommon.gtLclNum;
17667 if (inlArgInfo[lclNum].argIsUsed)
17669 assert(op1->gtOper == GT_LCL_VAR);
17670 assert(lclNum == op1->gtLclVar.gtLclILoffs);
17672 if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
17674 lclTyp = genActualType(lclTyp);
17677 /* Create a new lcl var node - remember the argument lclNum */
17678 op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, lclTyp, op1->gtLclVar.gtLclILoffs);
17681 else if (inlArgInfo[lclNum].argIsByRefToStructLocal && !inlArgInfo[lclNum].argHasStargOp)
17683 /* Argument is a by-ref address to a struct, a normed struct, or its field.
17684 In these cases, don't spill the byref to a local, simply clone the tree and use it.
17685 This way we will increase the chance for this byref to be optimized away by
17686 a subsequent "dereference" operation.
17688 From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
17689 (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
17690 For example, if the caller is:
17691 ldloca.s V_1 // V_1 is a local struct
17692 call void Test.ILPart::RunLdargaOnPointerArg(int32*)
17693 and the callee being inlined has:
17694 .method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed
17696 call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
17697 then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
17698 soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
17700 assert(inlArgInfo[lclNum].argNode->TypeGet() == TYP_BYREF ||
17701 inlArgInfo[lclNum].argNode->TypeGet() == TYP_I_IMPL);
17702 op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17706 /* Argument is a complex expression - it must be evaluated into a temp */
17708 if (inlArgInfo[lclNum].argHasTmp)
17710 assert(inlArgInfo[lclNum].argIsUsed);
17711 assert(inlArgInfo[lclNum].argTmpNum < lvaCount);
17713 /* Create a new lcl var node - remember the argument lclNum */
17714 op1 = gtNewLclvNode(inlArgInfo[lclNum].argTmpNum, genActualType(lclTyp));
17716 /* This is the second or later use of the this argument,
17717 so we have to use the temp (instead of the actual arg) */
17718 inlArgInfo[lclNum].argBashTmpNode = nullptr;
17722 /* First time use */
17723 assert(inlArgInfo[lclNum].argIsUsed == false);
17725 /* Reserve a temp for the expression.
17726 * Use a large size node as we may change it later */
17728 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
17730 lvaTable[tmpNum].lvType = lclTyp;
17731 assert(lvaTable[tmpNum].lvAddrExposed == 0);
17732 if (inlArgInfo[lclNum].argHasLdargaOp)
17734 lvaTable[tmpNum].lvHasLdAddrOp = 1;
17737 if (lclVarInfo[lclNum].lclVerTypeInfo.IsStruct())
17739 if (varTypeIsStruct(lclTyp))
17741 lvaSetStruct(tmpNum, impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo.GetClassHandle(),
17742 true /* unsafe value cls check */);
17746 // This is a wrapped primitive. Make sure the verstate knows that
17747 lvaTable[tmpNum].lvVerTypeInfo = impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo;
17751 inlArgInfo[lclNum].argHasTmp = true;
17752 inlArgInfo[lclNum].argTmpNum = tmpNum;
17754 // If we require strict exception order, then arguments must
17755 // be evaluated in sequence before the body of the inlined method.
17756 // So we need to evaluate them to a temp.
17757 // Also, if arguments have global references, we need to
17758 // evaluate them to a temp before the inlined body as the
17759 // inlined body may be modifying the global ref.
17760 // TODO-1stClassStructs: We currently do not reuse an existing lclVar
17761 // if it is a struct, because it requires some additional handling.
17763 if (!varTypeIsStruct(lclTyp) && (!inlArgInfo[lclNum].argHasSideEff) && (!inlArgInfo[lclNum].argHasGlobRef))
17765 /* Get a *LARGE* LCL_VAR node */
17766 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
17768 /* Record op1 as the very first use of this argument.
17769 If there are no further uses of the arg, we may be
17770 able to use the actual arg node instead of the temp.
17771 If we do see any further uses, we will clear this. */
17772 inlArgInfo[lclNum].argBashTmpNode = op1;
17776 /* Get a small LCL_VAR node */
17777 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
17778 /* No bashing of this argument */
17779 inlArgInfo[lclNum].argBashTmpNode = nullptr;
17784 /* Mark the argument as used */
17786 inlArgInfo[lclNum].argIsUsed = true;
17791 /******************************************************************************
17792 Is this the original "this" argument to the call being inlined?
17794 Note that we do not inline methods with "starg 0", and so we do not need to
17798 BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo* inlArgInfo)
17800 assert(compIsForInlining());
17801 return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
17804 //-----------------------------------------------------------------------------
17805 // This function checks if a dereference in the inlinee can guarantee that
17806 // the "this" is non-NULL.
17807 // If we haven't hit a branch or a side effect, and we are dereferencing
17808 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
17809 // then we can avoid a separate null pointer check.
17811 // "additionalTreesToBeEvaluatedBefore"
17812 // is the set of pending trees that have not yet been added to the statement list,
17813 // and which have been removed from verCurrentState.esStack[]
17815 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTreePtr additionalTreesToBeEvaluatedBefore,
17816 GenTreePtr variableBeingDereferenced,
17817 InlArgInfo* inlArgInfo)
17819 assert(compIsForInlining());
17820 assert(opts.OptEnabled(CLFLG_INLINING));
17822 BasicBlock* block = compCurBB;
17827 if (block != fgFirstBB)
17832 if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
17837 if (additionalTreesToBeEvaluatedBefore &&
17838 GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
17843 for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
17845 expr = stmt->gtStmt.gtStmtExpr;
17847 if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
17853 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
17855 unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
17856 if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
17865 /******************************************************************************/
17866 // Check the inlining eligibility of this GT_CALL node.
17867 // Mark GTF_CALL_INLINE_CANDIDATE on the GT_CALL node
17869 // Todo: find a way to record the failure reasons in the IR (or
17870 // otherwise build tree context) so when we do the inlining pass we
17871 // can capture these reasons
17873 void Compiler::impMarkInlineCandidate(GenTreePtr callNode,
17874 CORINFO_CONTEXT_HANDLE exactContextHnd,
17875 CORINFO_CALL_INFO* callInfo)
17877 // Let the strategy know there's another call
17878 impInlineRoot()->m_inlineStrategy->NoteCall();
17880 if (!opts.OptEnabled(CLFLG_INLINING))
17882 /* XXX Mon 8/18/2008
17883 * This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before
17884 * calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and
17885 * CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and
17886 * figure out why we did not set MAXOPT for this compile.
17888 assert(!compIsForInlining());
17892 if (compIsForImportOnly())
17894 // Don't bother creating the inline candidate during verification.
17895 // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
17896 // that leads to the creation of multiple instances of Compiler.
17900 GenTreeCall* call = callNode->AsCall();
17901 InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
17903 // Don't inline if not optimizing root method
17904 if (opts.compDbgCode)
17906 inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
17910 // Don't inline if inlining into root method is disabled.
17911 if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
17913 inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
17917 // Inlining candidate determination needs to honor only IL tail prefix.
17918 // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
17919 if (call->IsTailPrefixedCall())
17921 inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
17925 // Tail recursion elimination takes precedence over inlining.
17926 // TODO: We may want to do some of the additional checks from fgMorphCall
17927 // here to reduce the chance we don't inline a call that won't be optimized
17928 // as a fast tail call or turned into a loop.
17929 if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
17931 inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
17935 if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
17937 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
17941 /* Ignore helper calls */
17943 if (call->gtCallType == CT_HELPER)
17945 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
17949 /* Ignore indirect calls */
17950 if (call->gtCallType == CT_INDIRECT)
17952 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
17956 /* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less
17957 * restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding
17958 * inlining in throw blocks. I should consider the same thing for catch and filter regions. */
17960 CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
17963 // Reuse method flags from the original callInfo if possible
17964 if (fncHandle == callInfo->hMethod)
17966 methAttr = callInfo->methodFlags;
17970 methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
17974 if (compStressCompile(STRESS_FORCE_INLINE, 0))
17976 methAttr |= CORINFO_FLG_FORCEINLINE;
17980 // Check for COMPlus_AggressiveInlining
17981 if (compDoAggressiveInlining)
17983 methAttr |= CORINFO_FLG_FORCEINLINE;
17986 if (!(methAttr & CORINFO_FLG_FORCEINLINE))
17988 /* Don't bother inline blocks that are in the filter region */
17989 if (bbInCatchHandlerILRange(compCurBB))
17994 printf("\nWill not inline blocks that are in the catch handler region\n");
17999 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
18003 if (bbInFilterILRange(compCurBB))
18008 printf("\nWill not inline blocks that are in the filter region\n");
18012 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
18017 /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
18019 if (opts.compNeedSecurityCheck)
18021 inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
18025 /* Check if we tried to inline this method before */
18027 if (methAttr & CORINFO_FLG_DONT_INLINE)
18029 inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
18033 /* Cannot inline synchronized methods */
18035 if (methAttr & CORINFO_FLG_SYNCH)
18037 inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
18041 /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
18043 if (methAttr & CORINFO_FLG_SECURITYCHECK)
18045 inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
18049 InlineCandidateInfo* inlineCandidateInfo = nullptr;
18050 impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
18052 if (inlineResult.IsFailure())
18057 // The old value should be NULL
18058 assert(call->gtInlineCandidateInfo == nullptr);
18060 call->gtInlineCandidateInfo = inlineCandidateInfo;
18062 // Mark the call node as inline candidate.
18063 call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
18065 // Let the strategy know there's another candidate.
18066 impInlineRoot()->m_inlineStrategy->NoteCandidate();
18068 // Since we're not actually inlining yet, and this call site is
18069 // still just an inline candidate, there's nothing to report.
18070 inlineResult.SetReported();
18073 /******************************************************************************/
18074 // Returns true if the given intrinsic will be implemented by target-specific
18077 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
18079 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
18080 switch (intrinsicId)
18082 // Amd64 only has SSE2 instruction to directly compute sqrt/abs.
18084 // TODO: Because the x86 backend only targets SSE for floating-point code,
18085 // it does not treat Sine, Cosine, or Round as intrinsics (JIT32
18086 // implemented those intrinsics as x87 instructions). If this poses
18087 // a CQ problem, it may be necessary to change the implementation of
18088 // the helper calls to decrease call overhead or switch back to the
18089 // x87 instructions. This is tracked by #7097.
18090 case CORINFO_INTRINSIC_Sqrt:
18091 case CORINFO_INTRINSIC_Abs:
18097 #elif defined(_TARGET_ARM64_)
18098 switch (intrinsicId)
18100 case CORINFO_INTRINSIC_Sqrt:
18101 case CORINFO_INTRINSIC_Abs:
18102 case CORINFO_INTRINSIC_Round:
18108 #elif defined(_TARGET_ARM_)
18109 switch (intrinsicId)
18111 case CORINFO_INTRINSIC_Sqrt:
18112 case CORINFO_INTRINSIC_Abs:
18113 case CORINFO_INTRINSIC_Round:
18119 #elif defined(_TARGET_X86_)
18120 switch (intrinsicId)
18122 case CORINFO_INTRINSIC_Sin:
18123 case CORINFO_INTRINSIC_Cos:
18124 case CORINFO_INTRINSIC_Sqrt:
18125 case CORINFO_INTRINSIC_Abs:
18126 case CORINFO_INTRINSIC_Round:
18133 // TODO: This portion of logic is not implemented for other arch.
18134 // The reason for returning true is that on all other arch the only intrinsic
18135 // enabled are target intrinsics.
18137 #endif //_TARGET_AMD64_
18140 /******************************************************************************/
18141 // Returns true if the given intrinsic will be implemented by calling System.Math
18144 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
18146 // Currently, if an math intrisic is not implemented by target-specific
18147 // intructions, it will be implemented by a System.Math call. In the
18148 // future, if we turn to implementing some of them with helper callers,
18149 // this predicate needs to be revisited.
18150 return !IsTargetIntrinsic(intrinsicId);
18153 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
18155 switch (intrinsicId)
18157 case CORINFO_INTRINSIC_Sin:
18158 case CORINFO_INTRINSIC_Sqrt:
18159 case CORINFO_INTRINSIC_Abs:
18160 case CORINFO_INTRINSIC_Cos:
18161 case CORINFO_INTRINSIC_Round:
18162 case CORINFO_INTRINSIC_Cosh:
18163 case CORINFO_INTRINSIC_Sinh:
18164 case CORINFO_INTRINSIC_Tan:
18165 case CORINFO_INTRINSIC_Tanh:
18166 case CORINFO_INTRINSIC_Asin:
18167 case CORINFO_INTRINSIC_Acos:
18168 case CORINFO_INTRINSIC_Atan:
18169 case CORINFO_INTRINSIC_Atan2:
18170 case CORINFO_INTRINSIC_Log10:
18171 case CORINFO_INTRINSIC_Pow:
18172 case CORINFO_INTRINSIC_Exp:
18173 case CORINFO_INTRINSIC_Ceiling:
18174 case CORINFO_INTRINSIC_Floor:
18181 bool Compiler::IsMathIntrinsic(GenTreePtr tree)
18183 return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
18185 /*****************************************************************************/