1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
10 XX Imports the given method and converts it to semantic trees XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
23 #define Verify(cond, msg) \
28 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
32 #define VerifyOrReturn(cond, msg) \
37 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
42 #define VerifyOrReturnSpeculative(cond, msg, speculative) \
56 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
62 /*****************************************************************************/
64 void Compiler::impInit()
68 impTreeList = nullptr;
69 impTreeLast = nullptr;
70 impInlinedCodeSize = 0;
74 /*****************************************************************************
76 * Pushes the given tree on the stack.
79 void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
81 /* Check for overflow. If inlining, we may be using a bigger stack */
83 if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84 (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
86 BADCODE("stack overflow");
90 // If we are pushing a struct, make certain we know the precise type!
91 if (tree->TypeGet() == TYP_STRUCT)
93 assert(ti.IsType(TI_STRUCT));
94 CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95 assert(clsHnd != NO_CLASS_HANDLE);
98 if (tiVerificationNeeded && !ti.IsDead())
100 assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
102 // The ti type is consistent with the tree type.
105 // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106 // In the verification type system, we always transform "native int" to "TI_INT".
107 // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108 // attempts to do that have proved too difficult. Instead, we'll assume that in checks like this,
109 // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110 // method used in the last disjunct allows exactly this mismatch.
111 assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112 ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113 ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114 ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115 typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116 NormaliseForStack(typeInfo(tree->TypeGet()))));
118 // If it is a struct type, make certain we normalized the primitive types
119 assert(!ti.IsType(TI_STRUCT) ||
120 info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
124 if (VERBOSE && tiVerificationNeeded)
127 printf(TI_DUMP_PADDING);
128 printf("About to push to stack: ");
131 #endif // VERBOSE_VERIFY
135 verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136 verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
138 if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
142 else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
144 compFloatingPointUsed = true;
148 /******************************************************************************/
149 // used in the inliner, where we can assume typesafe code. please don't use in the importer!!
150 inline void Compiler::impPushOnStackNoType(GenTreePtr tree)
152 assert(verCurrentState.esStackDepth < impStkSize);
153 INDEBUG(verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = typeInfo());
154 verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
156 if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
160 else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
162 compFloatingPointUsed = true;
166 inline void Compiler::impPushNullObjRefOnStack()
168 impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
171 // This method gets called when we run into unverifiable code
172 // (and we are verifying the method)
174 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
175 DEBUGARG(unsigned line))
177 // Remember that the code is not verifiable
178 // Note that the method may yet pass canSkipMethodVerification(),
179 // and so the presence of unverifiable code may not be an issue.
180 tiIsVerifiableCode = FALSE;
183 const char* tail = strrchr(file, '\\');
189 if (JitConfig.JitBreakOnUnsafeCode())
191 assert(!"Unsafe code detected");
195 JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
196 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
198 if (verNeedsVerification() || compIsForImportOnly())
200 JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
201 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
202 verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
206 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
207 DEBUGARG(unsigned line))
209 JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
210 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
213 // BreakIfDebuggerPresent();
214 if (getBreakOnBadCode())
216 assert(!"Typechecking error");
220 RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
224 // helper function that will tell us if the IL instruction at the addr passed
225 // by param consumes an address at the top of the stack. We use it to save
227 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
229 assert(!compIsForInlining());
233 opcode = (OPCODE)getU1LittleEndian(codeAddr);
237 // case CEE_LDFLDA: We're taking this one out as if you have a sequence
243 // of a primitivelike struct, you end up after morphing with addr of a local
244 // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
245 // for structs that contain other structs, which isnt a case we handle very
246 // well now for other reasons.
250 // We won't collapse small fields. This is probably not the right place to have this
251 // check, but we're only using the function for this purpose, and is easy to factor
252 // out if we need to do so.
254 CORINFO_RESOLVED_TOKEN resolvedToken;
255 impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
257 CORINFO_CLASS_HANDLE clsHnd;
258 var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
260 // Preserve 'small' int types
261 if (lclTyp > TYP_INT)
263 lclTyp = genActualType(lclTyp);
266 if (varTypeIsSmall(lclTyp))
280 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
282 pResolvedToken->tokenContext = impTokenLookupContextHandle;
283 pResolvedToken->tokenScope = info.compScopeHnd;
284 pResolvedToken->token = getU4LittleEndian(addr);
285 pResolvedToken->tokenType = kind;
287 if (!tiVerificationNeeded)
289 info.compCompHnd->resolveToken(pResolvedToken);
293 Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
297 /*****************************************************************************
299 * Pop one tree from the stack.
302 StackEntry Compiler::impPopStack()
304 if (verCurrentState.esStackDepth == 0)
306 BADCODE("stack underflow");
311 if (VERBOSE && tiVerificationNeeded)
314 printf(TI_DUMP_PADDING);
315 printf("About to pop from the stack: ");
316 const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
319 #endif // VERBOSE_VERIFY
322 return verCurrentState.esStack[--verCurrentState.esStackDepth];
325 StackEntry Compiler::impPopStack(CORINFO_CLASS_HANDLE& structType)
327 StackEntry ret = impPopStack();
328 structType = verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo.GetClassHandle();
332 GenTreePtr Compiler::impPopStack(typeInfo& ti)
334 StackEntry ret = impPopStack();
339 /*****************************************************************************
341 * Peep at n'th (0-based) tree on the top of the stack.
344 StackEntry& Compiler::impStackTop(unsigned n)
346 if (verCurrentState.esStackDepth <= n)
348 BADCODE("stack underflow");
351 return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
353 /*****************************************************************************
354 * Some of the trees are spilled specially. While unspilling them, or
355 * making a copy, these need to be handled specially. The function
356 * enumerates the operators possible after spilling.
359 #ifdef DEBUG // only used in asserts
360 static bool impValidSpilledStackEntry(GenTreePtr tree)
362 if (tree->gtOper == GT_LCL_VAR)
367 if (tree->OperIsConst())
376 /*****************************************************************************
378 * The following logic is used to save/restore stack contents.
379 * If 'copy' is true, then we make a copy of the trees on the stack. These
380 * have to all be cloneable/spilled values.
383 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
385 savePtr->ssDepth = verCurrentState.esStackDepth;
387 if (verCurrentState.esStackDepth)
389 savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
390 size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
394 StackEntry* table = savePtr->ssTrees;
396 /* Make a fresh copy of all the stack entries */
398 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
400 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
401 GenTreePtr tree = verCurrentState.esStack[level].val;
403 assert(impValidSpilledStackEntry(tree));
405 switch (tree->gtOper)
412 table->val = gtCloneExpr(tree);
416 assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
423 memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
428 void Compiler::impRestoreStackState(SavedStack* savePtr)
430 verCurrentState.esStackDepth = savePtr->ssDepth;
432 if (verCurrentState.esStackDepth)
434 memcpy(verCurrentState.esStack, savePtr->ssTrees,
435 verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
439 /*****************************************************************************
441 * Get the tree list started for a new basic block.
443 inline void Compiler::impBeginTreeList()
445 assert(impTreeList == nullptr && impTreeLast == nullptr);
447 impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
450 /*****************************************************************************
452 * Store the given start and end stmt in the given basic block. This is
453 * mostly called by impEndTreeList(BasicBlock *block). It is called
454 * directly only for handling CEE_LEAVEs out of finally-protected try's.
457 inline void Compiler::impEndTreeList(BasicBlock* block, GenTreePtr firstStmt, GenTreePtr lastStmt)
459 assert(firstStmt->gtOper == GT_STMT);
460 assert(lastStmt->gtOper == GT_STMT);
462 /* Make the list circular, so that we can easily walk it backwards */
464 firstStmt->gtPrev = lastStmt;
466 /* Store the tree list in the basic block */
468 block->bbTreeList = firstStmt;
470 /* The block should not already be marked as imported */
471 assert((block->bbFlags & BBF_IMPORTED) == 0);
473 block->bbFlags |= BBF_IMPORTED;
476 /*****************************************************************************
478 * Store the current tree list in the given basic block.
481 inline void Compiler::impEndTreeList(BasicBlock* block)
483 assert(impTreeList->gtOper == GT_BEG_STMTS);
485 GenTreePtr firstTree = impTreeList->gtNext;
489 /* The block should not already be marked as imported */
490 assert((block->bbFlags & BBF_IMPORTED) == 0);
492 // Empty block. Just mark it as imported
493 block->bbFlags |= BBF_IMPORTED;
497 // Ignore the GT_BEG_STMTS
498 assert(firstTree->gtPrev == impTreeList);
500 impEndTreeList(block, firstTree, impTreeLast);
504 if (impLastILoffsStmt != nullptr)
506 impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
507 impLastILoffsStmt = nullptr;
510 impTreeList = impTreeLast = nullptr;
514 /*****************************************************************************
516 * Check that storing the given tree doesnt mess up the semantic order. Note
517 * that this has only limited value as we can only check [0..chkLevel).
520 inline void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel)
525 assert(stmt->gtOper == GT_STMT);
527 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
529 chkLevel = verCurrentState.esStackDepth;
532 if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
537 GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
539 // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
541 if (tree->gtFlags & GTF_CALL)
543 for (unsigned level = 0; level < chkLevel; level++)
545 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
549 if (tree->gtOper == GT_ASG)
551 // For an assignment to a local variable, all references of that
552 // variable have to be spilled. If it is aliased, all calls and
553 // indirect accesses have to be spilled
555 if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
557 unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
558 for (unsigned level = 0; level < chkLevel; level++)
560 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
561 assert(!lvaTable[lclNum].lvAddrExposed ||
562 (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
566 // If the access may be to global memory, all side effects have to be spilled.
568 else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
570 for (unsigned level = 0; level < chkLevel; level++)
572 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
579 /*****************************************************************************
581 * Append the given GT_STMT node to the current block's tree list.
582 * [0..chkLevel) is the portion of the stack which we will check for
583 * interference with stmt and spill if needed.
586 inline void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
588 assert(stmt->gtOper == GT_STMT);
589 noway_assert(impTreeLast != nullptr);
591 /* If the statement being appended has any side-effects, check the stack
592 to see if anything needs to be spilled to preserve correct ordering. */
594 GenTreePtr expr = stmt->gtStmt.gtStmtExpr;
595 unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
597 // Assignment to (unaliased) locals don't count as a side-effect as
598 // we handle them specially using impSpillLclRefs(). Temp locals should
601 if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
602 !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
604 unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
605 assert(flags == (op2Flags | GTF_ASG));
609 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
611 chkLevel = verCurrentState.esStackDepth;
614 if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
616 assert(chkLevel <= verCurrentState.esStackDepth);
620 // If there is a call, we have to spill global refs
621 bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
623 if (expr->gtOper == GT_ASG)
625 GenTree* lhs = expr->gtGetOp1();
626 // If we are assigning to a global ref, we have to spill global refs on stack.
627 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
628 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
629 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
630 if (!expr->OperIsBlkOp())
632 // If we are assigning to a global ref, we have to spill global refs on stack
633 if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
635 spillGlobEffects = true;
638 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
639 ((lhs->OperGet() == GT_LCL_VAR) &&
640 (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
642 spillGlobEffects = true;
646 impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
650 impSpillSpecialSideEff();
654 impAppendStmtCheck(stmt, chkLevel);
656 /* Point 'prev' at the previous node, so that we can walk backwards */
658 stmt->gtPrev = impTreeLast;
660 /* Append the expression statement to the list */
662 impTreeLast->gtNext = stmt;
666 impMarkContiguousSIMDFieldAssignments(stmt);
669 /* Once we set impCurStmtOffs in an appended tree, we are ready to
670 report the following offsets. So reset impCurStmtOffs */
672 if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
674 impCurStmtOffsSet(BAD_IL_OFFSET);
678 if (impLastILoffsStmt == nullptr)
680 impLastILoffsStmt = stmt;
691 /*****************************************************************************
693 * Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
696 inline void Compiler::impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore)
698 assert(stmt->gtOper == GT_STMT);
699 assert(stmtBefore->gtOper == GT_STMT);
701 GenTreePtr stmtPrev = stmtBefore->gtPrev;
702 stmt->gtPrev = stmtPrev;
703 stmt->gtNext = stmtBefore;
704 stmtPrev->gtNext = stmt;
705 stmtBefore->gtPrev = stmt;
708 /*****************************************************************************
710 * Append the given expression tree to the current block's tree list.
711 * Return the newly created statement.
714 GenTreePtr Compiler::impAppendTree(GenTreePtr tree, unsigned chkLevel, IL_OFFSETX offset)
718 /* Allocate an 'expression statement' node */
720 GenTreePtr expr = gtNewStmt(tree, offset);
722 /* Append the statement to the current block's stmt list */
724 impAppendStmt(expr, chkLevel);
729 /*****************************************************************************
731 * Insert the given exression tree before GT_STMT "stmtBefore"
734 void Compiler::impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore)
736 assert(stmtBefore->gtOper == GT_STMT);
738 /* Allocate an 'expression statement' node */
740 GenTreePtr expr = gtNewStmt(tree, offset);
742 /* Append the statement to the current block's stmt list */
744 impInsertStmtBefore(expr, stmtBefore);
747 /*****************************************************************************
749 * Append an assignment of the given value to a temp to the current tree list.
750 * curLevel is the stack level for which the spill to the temp is being done.
753 void Compiler::impAssignTempGen(unsigned tmp,
756 GenTreePtr* pAfterStmt, /* = NULL */
757 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
758 BasicBlock* block /* = NULL */
761 GenTreePtr asg = gtNewTempAssign(tmp, val);
763 if (!asg->IsNothingNode())
767 GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
768 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
772 impAppendTree(asg, curLevel, impCurStmtOffs);
777 /*****************************************************************************
778 * same as above, but handle the valueclass case too
781 void Compiler::impAssignTempGen(unsigned tmpNum,
783 CORINFO_CLASS_HANDLE structType,
785 GenTreePtr* pAfterStmt, /* = NULL */
786 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
787 BasicBlock* block /* = NULL */
792 if (varTypeIsStruct(val))
794 assert(tmpNum < lvaCount);
795 assert(structType != NO_CLASS_HANDLE);
797 // if the method is non-verifiable the assert is not true
798 // so at least ignore it in the case when verification is turned on
799 // since any block that tries to use the temp would have failed verification.
800 var_types varType = lvaTable[tmpNum].lvType;
801 assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
802 lvaSetStruct(tmpNum, structType, false);
804 // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
805 // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
806 // that has been passed in for the value being assigned to the temp, in which case we
807 // need to set 'val' to that same type.
808 // Note also that if we always normalized the types of any node that might be a struct
809 // type, this would not be necessary - but that requires additional JIT/EE interface
810 // calls that may not actually be required - e.g. if we only access a field of a struct.
812 val->gtType = lvaTable[tmpNum].lvType;
814 GenTreePtr dst = gtNewLclvNode(tmpNum, val->gtType);
815 asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
819 asg = gtNewTempAssign(tmpNum, val);
822 if (!asg->IsNothingNode())
826 GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
827 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
831 impAppendTree(asg, curLevel, impCurStmtOffs);
836 /*****************************************************************************
838 * Pop the given number of values from the stack and return a list node with
840 * The 'prefixTree' argument may optionally contain an argument
841 * list that is prepended to the list returned from this function.
843 * The notion of prepended is a bit misleading in that the list is backwards
844 * from the way I would expect: The first element popped is at the end of
845 * the returned list, and prefixTree is 'before' that, meaning closer to
846 * the end of the list. To get to prefixTree, you have to walk to the
849 * For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
850 * such we reverse its meaning such that returnValue has a reversed
851 * prefixTree at the head of the list.
854 GenTreeArgList* Compiler::impPopList(unsigned count,
856 CORINFO_SIG_INFO* sig,
857 GenTreeArgList* prefixTree)
859 assert(sig == nullptr || count == sig->numArgs);
862 CORINFO_CLASS_HANDLE structType;
863 GenTreeArgList* treeList;
865 if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
871 treeList = prefixTree;
876 StackEntry se = impPopStack();
877 typeInfo ti = se.seTypeInfo;
878 GenTreePtr temp = se.val;
880 if (varTypeIsStruct(temp))
882 // Morph trees that aren't already OBJs or MKREFANY to be OBJs
883 assert(ti.IsType(TI_STRUCT));
884 structType = ti.GetClassHandleForValueClass();
885 temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
888 /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
889 flags |= temp->gtFlags;
890 treeList = gtNewListNode(temp, treeList);
897 if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
898 sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
900 // Make sure that all valuetypes (including enums) that we push are loaded.
901 // This is to guarantee that if a GC is triggerred from the prestub of this methods,
902 // all valuetypes in the method signature are already loaded.
903 // We need to be able to find the size of the valuetypes, but we cannot
904 // do a class-load from within GC.
905 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
908 CORINFO_ARG_LIST_HANDLE argLst = sig->args;
909 CORINFO_CLASS_HANDLE argClass;
910 CORINFO_CLASS_HANDLE argRealClass;
911 GenTreeArgList* args;
914 for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
916 PREFIX_ASSUME(args != nullptr);
918 CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
920 // insert implied casts (from float to double or double to float)
922 if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
924 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
926 else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
928 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
931 // insert any widening or narrowing casts for backwards compatibility
933 args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
935 if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
936 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
938 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
939 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
941 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
943 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
945 args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
948 // Make sure that all valuetypes (including enums) that we push are loaded.
949 // This is to guarantee that if a GC is triggered from the prestub of this methods,
950 // all valuetypes in the method signature are already loaded.
951 // We need to be able to find the size of the valuetypes, but we cannot
952 // do a class-load from within GC.
953 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
956 argLst = info.compCompHnd->getArgNext(argLst);
960 if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
962 // Prepend the prefixTree
964 // Simple in-place reversal to place treeList
965 // at the end of a reversed prefixTree
966 while (prefixTree != nullptr)
968 GenTreeArgList* next = prefixTree->Rest();
969 prefixTree->Rest() = treeList;
970 treeList = prefixTree;
977 /*****************************************************************************
979 * Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
980 * The first "skipReverseCount" items are not reversed.
983 GenTreeArgList* Compiler::impPopRevList(unsigned count,
985 CORINFO_SIG_INFO* sig,
986 unsigned skipReverseCount)
989 assert(skipReverseCount <= count);
991 GenTreeArgList* list = impPopList(count, flagsPtr, sig);
994 if (list == nullptr || skipReverseCount == count)
999 GenTreeArgList* ptr = nullptr; // Initialized to the first node that needs to be reversed
1000 GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
1002 if (skipReverseCount == 0)
1008 lastSkipNode = list;
1009 // Get to the first node that needs to be reversed
1010 for (unsigned i = 0; i < skipReverseCount - 1; i++)
1012 lastSkipNode = lastSkipNode->Rest();
1015 PREFIX_ASSUME(lastSkipNode != nullptr);
1016 ptr = lastSkipNode->Rest();
1019 GenTreeArgList* reversedList = nullptr;
1023 GenTreeArgList* tmp = ptr->Rest();
1024 ptr->Rest() = reversedList;
1027 } while (ptr != nullptr);
1029 if (skipReverseCount)
1031 lastSkipNode->Rest() = reversedList;
1036 return reversedList;
1040 /*****************************************************************************
1041 Assign (copy) the structure from 'src' to 'dest'. The structure is a value
1042 class of type 'clsHnd'. It returns the tree that should be appended to the
1043 statement list that represents the assignment.
1044 Temp assignments may be appended to impTreeList if spilling is necessary.
1045 curLevel is the stack level for which a spill may be being done.
1048 GenTreePtr Compiler::impAssignStruct(GenTreePtr dest,
1050 CORINFO_CLASS_HANDLE structHnd,
1052 GenTreePtr* pAfterStmt, /* = NULL */
1053 BasicBlock* block /* = NULL */
1056 assert(varTypeIsStruct(dest));
1058 while (dest->gtOper == GT_COMMA)
1060 assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1062 // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1065 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1069 impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1072 // set dest to the second thing
1073 dest = dest->gtOp.gtOp2;
1076 assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1077 dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1079 if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1080 src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1083 return gtNewNothingNode();
1086 // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1087 // or re-creating a Blk node if it is.
1088 GenTreePtr destAddr;
1090 if (dest->gtOper == GT_IND || dest->OperIsBlk())
1092 destAddr = dest->gtOp.gtOp1;
1096 destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1099 return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1102 /*****************************************************************************/
1104 GenTreePtr Compiler::impAssignStructPtr(GenTreePtr destAddr,
1106 CORINFO_CLASS_HANDLE structHnd,
1108 GenTreePtr* pAfterStmt, /* = NULL */
1109 BasicBlock* block /* = NULL */
1113 GenTreePtr dest = nullptr;
1114 unsigned destFlags = 0;
1116 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1117 assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1118 // TODO-ARM-BUG: Does ARM need this?
1119 // TODO-ARM64-BUG: Does ARM64 need this?
1120 assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1121 src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1122 src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1123 (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1124 #else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1125 assert(varTypeIsStruct(src));
1127 assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1128 src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1129 src->gtOper == GT_COMMA ||
1130 (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1131 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1132 if (destAddr->OperGet() == GT_ADDR)
1134 GenTree* destNode = destAddr->gtGetOp1();
1135 // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1136 // will be morphed, don't insert an OBJ(ADDR).
1137 if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1138 #ifndef LEGACY_BACKEND
1139 || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1140 #endif // !LEGACY_BACKEND
1145 destType = destNode->TypeGet();
1149 destType = src->TypeGet();
1152 var_types asgType = src->TypeGet();
1154 if (src->gtOper == GT_CALL)
1156 if (src->AsCall()->TreatAsHasRetBufArg(this))
1158 // Case of call returning a struct via hidden retbuf arg
1160 // insert the return value buffer into the argument list as first byref parameter
1161 src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1163 // now returns void, not a struct
1164 src->gtType = TYP_VOID;
1166 // return the morphed call node
1171 // Case of call returning a struct in one or more registers.
1173 var_types returnType = (var_types)src->gtCall.gtReturnType;
1175 // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1176 src->gtType = genActualType(returnType);
1178 // First we try to change this to "LclVar/LclFld = call"
1180 if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1182 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1183 // That is, the IR will be of the form lclVar = call for multi-reg return
1185 GenTreePtr lcl = destAddr->gtOp.gtOp1;
1186 if (src->AsCall()->HasMultiRegRetVal())
1188 // Mark the struct LclVar as used in a MultiReg return context
1189 // which currently makes it non promotable.
1190 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1191 // handle multireg returns.
1192 lcl->gtFlags |= GTF_DONT_CSE;
1193 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1195 else // The call result is not a multireg return
1197 // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1198 lcl->ChangeOper(GT_LCL_FLD);
1199 fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1202 lcl->gtType = src->gtType;
1203 asgType = src->gtType;
1206 #if defined(_TARGET_ARM_)
1207 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1208 // but that method has not been updadted to include ARM.
1209 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1210 lcl->gtFlags |= GTF_DONT_CSE;
1211 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1212 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1213 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1215 // Make the struct non promotable. The eightbytes could contain multiple fields.
1216 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1217 // handle multireg returns.
1218 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1219 // non-multireg returns.
1220 lcl->gtFlags |= GTF_DONT_CSE;
1221 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1224 else // we don't have a GT_ADDR of a GT_LCL_VAR
1226 // !!! The destination could be on stack. !!!
1227 // This flag will let us choose the correct write barrier.
1228 asgType = returnType;
1229 destFlags = GTF_IND_TGTANYWHERE;
1233 else if (src->gtOper == GT_RET_EXPR)
1235 GenTreePtr call = src->gtRetExpr.gtInlineCandidate;
1236 noway_assert(call->gtOper == GT_CALL);
1238 if (call->AsCall()->HasRetBufArg())
1240 // insert the return value buffer into the argument list as first byref parameter
1241 call->gtCall.gtCallArgs = gtNewListNode(destAddr, call->gtCall.gtCallArgs);
1243 // now returns void, not a struct
1244 src->gtType = TYP_VOID;
1245 call->gtType = TYP_VOID;
1247 // We already have appended the write to 'dest' GT_CALL's args
1248 // So now we just return an empty node (pruning the GT_RET_EXPR)
1253 // Case of inline method returning a struct in one or more registers.
1255 var_types returnType = (var_types)call->gtCall.gtReturnType;
1257 // We won't need a return buffer
1258 asgType = returnType;
1259 src->gtType = genActualType(returnType);
1260 call->gtType = src->gtType;
1262 // If we've changed the type, and it no longer matches a local destination,
1263 // we must use an indirection.
1264 if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1269 // !!! The destination could be on stack. !!!
1270 // This flag will let us choose the correct write barrier.
1271 destFlags = GTF_IND_TGTANYWHERE;
1274 else if (src->OperIsBlk())
1276 asgType = impNormStructType(structHnd);
1277 if (src->gtOper == GT_OBJ)
1279 assert(src->gtObj.gtClass == structHnd);
1282 else if (src->gtOper == GT_INDEX)
1284 asgType = impNormStructType(structHnd);
1285 assert(src->gtIndex.gtStructElemClass == structHnd);
1287 else if (src->gtOper == GT_MKREFANY)
1289 // Since we are assigning the result of a GT_MKREFANY,
1290 // "destAddr" must point to a refany.
1292 GenTreePtr destAddrClone;
1294 impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1296 assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1297 assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1298 GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1299 GenTreePtr ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1300 GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1301 typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1302 GenTreePtr typeSlot =
1303 gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1305 // append the assign of the pointer value
1306 GenTreePtr asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1309 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1313 impAppendTree(asg, curLevel, impCurStmtOffs);
1316 // return the assign of the type value, to be appended
1317 return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1319 else if (src->gtOper == GT_COMMA)
1321 // The second thing is the struct or its address.
1322 assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1325 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1329 impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1332 // Evaluate the second thing using recursion.
1333 return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1335 else if (src->IsLocal())
1337 asgType = src->TypeGet();
1339 else if (asgType == TYP_STRUCT)
1341 asgType = impNormStructType(structHnd);
1342 src->gtType = asgType;
1343 #ifdef LEGACY_BACKEND
1344 if (asgType == TYP_STRUCT)
1346 GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1347 src = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1351 if (dest == nullptr)
1353 // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1354 // if this is a known struct type.
1355 if (asgType == TYP_STRUCT)
1357 dest = gtNewObjNode(structHnd, destAddr);
1358 gtSetObjGcInfo(dest->AsObj());
1359 // Although an obj as a call argument was always assumed to be a globRef
1360 // (which is itself overly conservative), that is not true of the operands
1361 // of a block assignment.
1362 dest->gtFlags &= ~GTF_GLOB_REF;
1363 dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1365 else if (varTypeIsStruct(asgType))
1367 dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1371 dest = gtNewOperNode(GT_IND, asgType, destAddr);
1376 dest->gtType = asgType;
1379 dest->gtFlags |= destFlags;
1380 destFlags = dest->gtFlags;
1382 // return an assignment node, to be appended
1383 GenTree* asgNode = gtNewAssignNode(dest, src);
1384 gtBlockOpInit(asgNode, dest, src, false);
1386 // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1388 if ((destFlags & GTF_DONT_CSE) == 0)
1390 dest->gtFlags &= ~(GTF_DONT_CSE);
1395 /*****************************************************************************
1396 Given a struct value, and the class handle for that structure, return
1397 the expression for the address for that structure value.
1399 willDeref - does the caller guarantee to dereference the pointer.
1402 GenTreePtr Compiler::impGetStructAddr(GenTreePtr structVal,
1403 CORINFO_CLASS_HANDLE structHnd,
1407 assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1409 var_types type = structVal->TypeGet();
1411 genTreeOps oper = structVal->gtOper;
1413 if (oper == GT_OBJ && willDeref)
1415 assert(structVal->gtObj.gtClass == structHnd);
1416 return (structVal->gtObj.Addr());
1418 else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
1420 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1422 impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1424 // The 'return value' is now the temp itself
1426 type = genActualType(lvaTable[tmpNum].TypeGet());
1427 GenTreePtr temp = gtNewLclvNode(tmpNum, type);
1428 temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1431 else if (oper == GT_COMMA)
1433 assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1435 GenTreePtr oldTreeLast = impTreeLast;
1436 structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1437 structVal->gtType = TYP_BYREF;
1439 if (oldTreeLast != impTreeLast)
1441 // Some temp assignment statement was placed on the statement list
1442 // for Op2, but that would be out of order with op1, so we need to
1443 // spill op1 onto the statement list after whatever was last
1444 // before we recursed on Op2 (i.e. before whatever Op2 appended).
1445 impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1446 structVal->gtOp.gtOp1 = gtNewNothingNode();
1452 return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1455 //------------------------------------------------------------------------
1456 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1457 // and optionally determine the GC layout of the struct.
1460 // structHnd - The class handle for the struct type of interest.
1461 // gcLayout - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1462 // into which the gcLayout will be written.
1463 // pNumGCVars - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1464 // which will be set to the number of GC fields in the struct.
1465 // pSimdBaseType - (optional, default nullptr) - if non-null, and the struct is a SIMD
1466 // type, set to the SIMD base type
1469 // The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1470 // The gcLayout will be returned using the pointers provided by the caller, if non-null.
1471 // It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1474 // The caller must set gcLayout to nullptr OR ensure that it is large enough
1475 // (see ICorStaticInfo::getClassGClayout in corinfo.h).
1478 // Normalizing the type involves examining the struct type to determine if it should
1479 // be modified to one that is handled specially by the JIT, possibly being a candidate
1480 // for full enregistration, e.g. TYP_SIMD16.
1482 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1484 unsigned* pNumGCVars,
1485 var_types* pSimdBaseType)
1487 assert(structHnd != NO_CLASS_HANDLE);
1489 const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1490 var_types structType = TYP_STRUCT;
1492 // On coreclr the check for GC includes a "may" to account for the special
1493 // ByRef like span structs. The added check for "CONTAINS_STACK_PTR" is the particular bit.
1494 // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1496 const bool mayContainGCPtrs =
1497 ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1500 // Check to see if this is a SIMD type.
1501 if (featureSIMD && !mayContainGCPtrs)
1503 unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1505 if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1507 unsigned int sizeBytes;
1508 var_types simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1509 if (simdBaseType != TYP_UNKNOWN)
1511 assert(sizeBytes == originalSize);
1512 structType = getSIMDTypeForSize(sizeBytes);
1513 if (pSimdBaseType != nullptr)
1515 *pSimdBaseType = simdBaseType;
1517 #ifdef _TARGET_AMD64_
1518 // Amd64: also indicate that we use floating point registers
1519 compFloatingPointUsed = true;
1524 #endif // FEATURE_SIMD
1526 // Fetch GC layout info if requested
1527 if (gcLayout != nullptr)
1529 unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1531 // Verify that the quick test up above via the class attributes gave a
1532 // safe view of the type's GCness.
1534 // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1535 // does not report any gc fields.
1537 assert(mayContainGCPtrs || (numGCVars == 0));
1539 if (pNumGCVars != nullptr)
1541 *pNumGCVars = numGCVars;
1546 // Can't safely ask for number of GC pointers without also
1547 // asking for layout.
1548 assert(pNumGCVars == nullptr);
1554 //****************************************************************************
1555 // Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1556 // it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1558 GenTreePtr Compiler::impNormStructVal(GenTreePtr structVal,
1559 CORINFO_CLASS_HANDLE structHnd,
1561 bool forceNormalization /*=false*/)
1563 assert(forceNormalization || varTypeIsStruct(structVal));
1564 assert(structHnd != NO_CLASS_HANDLE);
1565 var_types structType = structVal->TypeGet();
1566 bool makeTemp = false;
1567 if (structType == TYP_STRUCT)
1569 structType = impNormStructType(structHnd);
1571 bool alreadyNormalized = false;
1572 GenTreeLclVarCommon* structLcl = nullptr;
1574 genTreeOps oper = structVal->OperGet();
1577 // GT_RETURN and GT_MKREFANY don't capture the handle.
1581 alreadyNormalized = true;
1585 structVal->gtCall.gtRetClsHnd = structHnd;
1590 structVal->gtRetExpr.gtRetClsHnd = structHnd;
1595 structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1599 // This will be transformed to an OBJ later.
1600 alreadyNormalized = true;
1601 structVal->gtIndex.gtStructElemClass = structHnd;
1602 structVal->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(structHnd);
1606 // Wrap it in a GT_OBJ.
1607 structVal->gtType = structType;
1608 structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1613 structLcl = structVal->AsLclVarCommon();
1614 // Wrap it in a GT_OBJ.
1615 structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1622 // These should already have the appropriate type.
1623 assert(structVal->gtType == structType);
1624 alreadyNormalized = true;
1628 assert(structVal->gtType == structType);
1629 structVal = gtNewObjNode(structHnd, structVal->gtGetOp1());
1630 alreadyNormalized = true;
1635 assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1637 #endif // FEATURE_SIMD
1641 // The second thing could either be a block node or a GT_SIMD or a GT_COMMA node.
1642 GenTree* blockNode = structVal->gtOp.gtOp2;
1643 assert(blockNode->gtType == structType);
1645 // Is this GT_COMMA(op1, GT_COMMA())?
1646 GenTree* parent = structVal;
1647 if (blockNode->OperGet() == GT_COMMA)
1649 // Find the last node in the comma chain.
1652 assert(blockNode->gtType == structType);
1654 blockNode = blockNode->gtOp.gtOp2;
1655 } while (blockNode->OperGet() == GT_COMMA);
1659 if (blockNode->OperGet() == GT_SIMD)
1661 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1662 alreadyNormalized = true;
1667 assert(blockNode->OperIsBlk());
1669 // Sink the GT_COMMA below the blockNode addr.
1670 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1671 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1673 // In case of a chained GT_COMMA case, we sink the last
1674 // GT_COMMA below the blockNode addr.
1675 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1676 assert(blockNodeAddr->gtType == TYP_BYREF);
1677 GenTree* commaNode = parent;
1678 commaNode->gtType = TYP_BYREF;
1679 commaNode->gtOp.gtOp2 = blockNodeAddr;
1680 blockNode->gtOp.gtOp1 = commaNode;
1681 if (parent == structVal)
1683 structVal = blockNode;
1685 alreadyNormalized = true;
1691 assert(!"Unexpected node in impNormStructVal()");
1694 structVal->gtType = structType;
1695 GenTree* structObj = structVal;
1697 if (!alreadyNormalized || forceNormalization)
1701 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1703 impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1705 // The structVal is now the temp itself
1707 structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1708 // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1709 structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1711 else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1713 // Wrap it in a GT_OBJ
1714 structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1718 if (structLcl != nullptr)
1720 // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1721 // so we don't set GTF_EXCEPT here.
1722 if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1724 structObj->gtFlags &= ~GTF_GLOB_REF;
1729 // In general a OBJ is an indirection and could raise an exception.
1730 structObj->gtFlags |= GTF_EXCEPT;
1735 /******************************************************************************/
1736 // Given a type token, generate code that will evaluate to the correct
1737 // handle representation of that token (type handle, field handle, or method handle)
1739 // For most cases, the handle is determined at compile-time, and the code
1740 // generated is simply an embedded handle.
1742 // Run-time lookup is required if the enclosing method is shared between instantiations
1743 // and the token refers to formal type parameters whose instantiation is not known
1746 GenTreePtr Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1747 BOOL* pRuntimeLookup /* = NULL */,
1748 BOOL mustRestoreHandle /* = FALSE */,
1749 BOOL importParent /* = FALSE */)
1751 assert(!fgGlobalMorph);
1753 CORINFO_GENERICHANDLE_RESULT embedInfo;
1754 info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1758 *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1761 if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1763 switch (embedInfo.handleType)
1765 case CORINFO_HANDLETYPE_CLASS:
1766 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1769 case CORINFO_HANDLETYPE_METHOD:
1770 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1773 case CORINFO_HANDLETYPE_FIELD:
1774 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1775 info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1783 return impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1784 embedInfo.compileTimeHandle);
1787 GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1788 CORINFO_LOOKUP* pLookup,
1789 unsigned handleFlags,
1790 void* compileTimeHandle)
1792 if (!pLookup->lookupKind.needsRuntimeLookup)
1794 // No runtime lookup is required.
1795 // Access is direct or memory-indirect (of a fixed address) reference
1797 CORINFO_GENERIC_HANDLE handle = nullptr;
1798 void* pIndirection = nullptr;
1799 assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1801 if (pLookup->constLookup.accessType == IAT_VALUE)
1803 handle = pLookup->constLookup.handle;
1805 else if (pLookup->constLookup.accessType == IAT_PVALUE)
1807 pIndirection = pLookup->constLookup.addr;
1809 return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1811 else if (compIsForInlining())
1813 // Don't import runtime lookups when inlining
1814 // Inlining has to be aborted in such a case
1815 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1820 // Need to use dictionary-based access which depends on the typeContext
1821 // which is only available at runtime, not at compile-time.
1823 return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1827 #ifdef FEATURE_READYTORUN_COMPILER
1828 GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1829 unsigned handleFlags,
1830 void* compileTimeHandle)
1832 CORINFO_GENERIC_HANDLE handle = nullptr;
1833 void* pIndirection = nullptr;
1834 assert(pLookup->accessType != IAT_PPVALUE);
1836 if (pLookup->accessType == IAT_VALUE)
1838 handle = pLookup->handle;
1840 else if (pLookup->accessType == IAT_PVALUE)
1842 pIndirection = pLookup->addr;
1844 return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1847 GenTreePtr Compiler::impReadyToRunHelperToTree(
1848 CORINFO_RESOLVED_TOKEN* pResolvedToken,
1849 CorInfoHelpFunc helper,
1851 GenTreeArgList* args /* =NULL*/,
1852 CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */)
1854 CORINFO_CONST_LOOKUP lookup;
1855 #if COR_JIT_EE_VERSION > 460
1856 if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1861 info.compCompHnd->getReadyToRunHelper(pResolvedToken, helper, &lookup);
1864 GenTreePtr op1 = gtNewHelperCallNode(helper, type, GTF_EXCEPT, args);
1866 op1->gtCall.setEntryPoint(lookup);
1872 GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1874 GenTreePtr op1 = nullptr;
1876 switch (pCallInfo->kind)
1879 op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1881 #ifdef FEATURE_READYTORUN_COMPILER
1882 if (opts.IsReadyToRun())
1884 op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
1885 op1->gtFptrVal.gtLdftnResolvedToken = new (this, CMK_Unknown) CORINFO_RESOLVED_TOKEN;
1886 *op1->gtFptrVal.gtLdftnResolvedToken = *pResolvedToken;
1890 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1895 case CORINFO_CALL_CODE_POINTER:
1896 if (compIsForInlining())
1898 // Don't import runtime lookups when inlining
1899 // Inlining has to be aborted in such a case
1900 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1904 op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1908 noway_assert(!"unknown call kind");
1915 //------------------------------------------------------------------------
1916 // getRuntimeContextTree: find pointer to context for runtime lookup.
1919 // kind - lookup kind.
1922 // Return GenTree pointer to generic shared context.
1925 // Reports about generic context using.
1927 GenTreePtr Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1929 GenTreePtr ctxTree = nullptr;
1931 // Collectible types requires that for shared generic code, if we use the generic context parameter
1932 // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1933 // context parameter is this that we don't need the eager reporting logic.)
1934 lvaGenericsContextUsed = true;
1936 if (kind == CORINFO_LOOKUP_THISOBJ)
1939 ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1941 // Vtable pointer of this object
1942 ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1943 ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1944 ctxTree->gtFlags |= GTF_IND_INVARIANT;
1948 assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1950 ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1955 /*****************************************************************************/
1956 /* Import a dictionary lookup to access a handle in code shared between
1957 generic instantiations.
1958 The lookup depends on the typeContext which is only available at
1959 runtime, and not at compile-time.
1960 pLookup->token1 and pLookup->token2 specify the handle that is needed.
1963 1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1964 instantiation-specific handle, and the tokens to lookup the handle.
1965 2. pLookup->indirections != CORINFO_USEHELPER :
1966 2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1968 2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1969 If it is non-NULL, it is the handle required. Else, call a helper
1970 to lookup the handle.
1973 GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1974 CORINFO_LOOKUP* pLookup,
1975 void* compileTimeHandle)
1978 // This method can only be called from the importer instance of the Compiler.
1979 // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1980 assert(!compIsForInlining());
1982 GenTreePtr ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1984 #ifdef FEATURE_READYTORUN_COMPILER
1985 if (opts.IsReadyToRun())
1987 return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1988 gtNewArgList(ctxTree), &pLookup->lookupKind);
1992 CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1993 // It's available only via the run-time helper function
1994 if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1996 GenTreeArgList* helperArgs =
1997 gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0,
1998 nullptr, compileTimeHandle));
2000 return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2004 GenTreePtr slotPtrTree = ctxTree;
2006 if (pRuntimeLookup->testForNull)
2008 slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2009 nullptr DEBUGARG("impRuntimeLookup slot"));
2012 // Applied repeated indirections
2013 for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
2017 slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2018 slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2019 slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
2021 if (pRuntimeLookup->offsets[i] != 0)
2024 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
2028 // No null test required
2029 if (!pRuntimeLookup->testForNull)
2031 if (pRuntimeLookup->indirections == 0)
2036 slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2037 slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2039 if (!pRuntimeLookup->testForFixup)
2044 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2046 GenTreePtr op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2047 nullptr DEBUGARG("impRuntimeLookup test"));
2048 op1 = impImplicitIorI4Cast(op1, TYP_INT); // downcast the pointer to a TYP_INT on 64-bit targets
2050 // Use a GT_AND to check for the lowest bit and indirect if it is set
2051 GenTreePtr testTree = gtNewOperNode(GT_AND, TYP_INT, op1, gtNewIconNode(1));
2052 GenTreePtr relop = gtNewOperNode(GT_EQ, TYP_INT, testTree, gtNewIconNode(0));
2053 relop->gtFlags |= GTF_RELOP_QMARK;
2055 op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2056 nullptr DEBUGARG("impRuntimeLookup indir"));
2057 op1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, op1, gtNewIconNode(-1, TYP_I_IMPL)); // subtract 1 from the pointer
2058 GenTreePtr indirTree = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
2059 GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, slotPtrTree, indirTree);
2061 GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2063 unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark0"));
2064 impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2065 return gtNewLclvNode(tmp, TYP_I_IMPL);
2068 assert(pRuntimeLookup->indirections != 0);
2070 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2072 // Extract the handle
2073 GenTreePtr handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2074 handle->gtFlags |= GTF_IND_NONFAULTING;
2076 GenTreePtr handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2077 nullptr DEBUGARG("impRuntimeLookup typehandle"));
2080 GenTreeArgList* helperArgs =
2081 gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0, nullptr,
2082 compileTimeHandle));
2083 GenTreePtr helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2085 // Check for null and possibly call helper
2086 GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2087 relop->gtFlags |= GTF_RELOP_QMARK;
2089 GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2090 gtNewNothingNode(), // do nothing if nonnull
2093 GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2096 if (handleCopy->IsLocal())
2098 tmp = handleCopy->gtLclVarCommon.gtLclNum;
2102 tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2105 impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2106 return gtNewLclvNode(tmp, TYP_I_IMPL);
2109 /******************************************************************************
2110 * Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2111 * If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2112 * else, grab a new temp.
2113 * For structs (which can be pushed on the stack using obj, etc),
2114 * special handling is needed
2117 struct RecursiveGuard
2122 m_pAddress = nullptr;
2129 *m_pAddress = false;
2133 void Init(bool* pAddress, bool bInitialize)
2135 assert(pAddress && *pAddress == false && "Recursive guard violation");
2136 m_pAddress = pAddress;
2148 bool Compiler::impSpillStackEntry(unsigned level,
2152 bool bAssertOnRecursion,
2159 RecursiveGuard guard;
2160 guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2163 GenTreePtr tree = verCurrentState.esStack[level].val;
2165 /* Allocate a temp if we haven't been asked to use a particular one */
2167 if (tiVerificationNeeded)
2169 // Ignore bad temp requests (they will happen with bad code and will be
2170 // catched when importing the destblock)
2171 if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2178 if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2184 if (tnum == BAD_VAR_NUM)
2186 tnum = lvaGrabTemp(true DEBUGARG(reason));
2188 else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2190 // if verification is needed and tnum's type is incompatible with
2191 // type on that stack, we grab a new temp. This is safe since
2192 // we will throw a verification exception in the dest block.
2194 var_types valTyp = tree->TypeGet();
2195 var_types dstTyp = lvaTable[tnum].TypeGet();
2197 // if the two types are different, we return. This will only happen with bad code and will
2198 // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2199 if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2201 #ifndef _TARGET_64BIT_
2202 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2203 #endif // !_TARGET_64BIT_
2204 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2206 if (verNeedsVerification())
2213 /* Assign the spilled entry to the temp */
2214 impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2216 // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2217 var_types type = genActualType(lvaTable[tnum].TypeGet());
2218 GenTreePtr temp = gtNewLclvNode(tnum, type);
2219 verCurrentState.esStack[level].val = temp;
2224 /*****************************************************************************
2226 * Ensure that the stack has only spilled values
2229 void Compiler::impSpillStackEnsure(bool spillLeaves)
2231 assert(!spillLeaves || opts.compDbgCode);
2233 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2235 GenTreePtr tree = verCurrentState.esStack[level].val;
2237 if (!spillLeaves && tree->OperIsLeaf())
2242 // Temps introduced by the importer itself don't need to be spilled
2244 bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2251 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2255 void Compiler::impSpillEvalStack()
2257 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2259 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2263 /*****************************************************************************
2265 * If the stack contains any trees with side effects in them, assign those
2266 * trees to temps and append the assignments to the statement list.
2267 * On return the stack is guaranteed to be empty.
2270 inline void Compiler::impEvalSideEffects()
2272 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2273 verCurrentState.esStackDepth = 0;
2276 /*****************************************************************************
2278 * If the stack contains any trees with side effects in them, assign those
2279 * trees to temps and replace them on the stack with refs to their temps.
2280 * [0..chkLevel) is the portion of the stack which will be checked and spilled.
2283 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2285 assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2287 /* Before we make any appends to the tree list we must spill the
2288 * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2290 impSpillSpecialSideEff();
2292 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2294 chkLevel = verCurrentState.esStackDepth;
2297 assert(chkLevel <= verCurrentState.esStackDepth);
2299 unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2301 for (unsigned i = 0; i < chkLevel; i++)
2303 GenTreePtr tree = verCurrentState.esStack[i].val;
2305 GenTreePtr lclVarTree;
2307 if ((tree->gtFlags & spillFlags) != 0 ||
2308 (spillGlobEffects && // Only consider the following when spillGlobEffects == TRUE
2309 !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2310 gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2311 // lvAddrTaken flag.
2313 impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2318 /*****************************************************************************
2320 * If the stack contains any trees with special side effects in them, assign
2321 * those trees to temps and replace them on the stack with refs to their temps.
2324 inline void Compiler::impSpillSpecialSideEff()
2326 // Only exception objects need to be carefully handled
2328 if (!compCurBB->bbCatchTyp)
2333 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2335 GenTreePtr tree = verCurrentState.esStack[level].val;
2336 // Make sure if we have an exception object in the sub tree we spill ourselves.
2337 if (gtHasCatchArg(tree))
2339 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2344 /*****************************************************************************
2346 * Spill all stack references to value classes (TYP_STRUCT nodes)
2349 void Compiler::impSpillValueClasses()
2351 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2353 GenTreePtr tree = verCurrentState.esStack[level].val;
2355 if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2357 // Tree walk was aborted, which means that we found a
2358 // value class on the stack. Need to spill that
2361 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2366 /*****************************************************************************
2368 * Callback that checks if a tree node is TYP_STRUCT
2371 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr* pTree, fgWalkData* data)
2373 fgWalkResult walkResult = WALK_CONTINUE;
2375 if ((*pTree)->gtType == TYP_STRUCT)
2377 // Abort the walk and indicate that we found a value class
2379 walkResult = WALK_ABORT;
2385 /*****************************************************************************
2387 * If the stack contains any trees with references to local #lclNum, assign
2388 * those trees to temps and replace their place on the stack with refs to
2392 void Compiler::impSpillLclRefs(ssize_t lclNum)
2394 /* Before we make any appends to the tree list we must spill the
2395 * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2397 impSpillSpecialSideEff();
2399 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2401 GenTreePtr tree = verCurrentState.esStack[level].val;
2403 /* If the tree may throw an exception, and the block has a handler,
2404 then we need to spill assignments to the local if the local is
2405 live on entry to the handler.
2406 Just spill 'em all without considering the liveness */
2408 bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2410 /* Skip the tree if it doesn't have an affected reference,
2411 unless xcptnCaught */
2413 if (xcptnCaught || gtHasRef(tree, lclNum, false))
2415 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2420 /*****************************************************************************
2422 * Push catch arg onto the stack.
2423 * If there are jumps to the beginning of the handler, insert basic block
2424 * and spill catch arg to a temp. Update the handler block if necessary.
2426 * Returns the basic block of the actual handler.
2429 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd)
2431 // Do not inject the basic block twice on reimport. This should be
2432 // hit only under JIT stress. See if the block is the one we injected.
2433 // Note that EH canonicalization can inject internal blocks here. We might
2434 // be able to re-use such a block (but we don't, right now).
2435 if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2436 (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2438 GenTreePtr tree = hndBlk->bbTreeList;
2440 if (tree != nullptr && tree->gtOper == GT_STMT)
2442 tree = tree->gtStmt.gtStmtExpr;
2443 assert(tree != nullptr);
2445 if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2446 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2448 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2450 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2452 return hndBlk->bbNext;
2456 // If we get here, it must have been some other kind of internal block. It's possible that
2457 // someone prepended something to our injected block, but that's unlikely.
2460 /* Push the exception address value on the stack */
2461 GenTreePtr arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2463 /* Mark the node as having a side-effect - i.e. cannot be
2464 * moved around since it is tied to a fixed location (EAX) */
2465 arg->gtFlags |= GTF_ORDER_SIDEEFF;
2467 /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2468 if (hndBlk->bbRefs > 1 || compStressCompile(STRESS_CATCH_ARG, 5))
2470 if (hndBlk->bbRefs == 1)
2475 /* Create extra basic block for the spill */
2476 BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2477 newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2478 newBlk->setBBWeight(hndBlk->bbWeight);
2479 newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2481 /* Account for the new link we are about to create */
2484 /* Spill into a temp */
2485 unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2486 lvaTable[tempNum].lvType = TYP_REF;
2487 arg = gtNewTempAssign(tempNum, arg);
2489 hndBlk->bbStkTempsIn = tempNum;
2491 /* Report the debug info. impImportBlockCode won't treat
2492 * the actual handler as exception block and thus won't do it for us. */
2493 if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2495 impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2496 arg = gtNewStmt(arg, impCurStmtOffs);
2499 fgInsertStmtAtEnd(newBlk, arg);
2501 arg = gtNewLclvNode(tempNum, TYP_REF);
2504 impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2509 /*****************************************************************************
2511 * Given a tree, clone it. *pClone is set to the cloned tree.
2512 * Returns the original tree if the cloning was easy,
2513 * else returns the temp to which the tree had to be spilled to.
2514 * If the tree has side-effects, it will be spilled to a temp.
2517 GenTreePtr Compiler::impCloneExpr(GenTreePtr tree,
2519 CORINFO_CLASS_HANDLE structHnd,
2521 GenTreePtr* pAfterStmt DEBUGARG(const char* reason))
2523 if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2525 GenTreePtr clone = gtClone(tree, true);
2534 /* Store the operand in a temp and return the temp */
2536 unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2538 // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2539 // return a struct type. It also may modify the struct type to a more
2540 // specialized type (e.g. a SIMD type). So we will get the type from
2541 // the lclVar AFTER calling impAssignTempGen().
2543 impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2544 var_types type = genActualType(lvaTable[temp].TypeGet());
2546 *pClone = gtNewLclvNode(temp, type);
2547 return gtNewLclvNode(temp, type);
2550 /*****************************************************************************
2551 * Remember the IL offset (including stack-empty info) for the trees we will
2555 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2557 if (compIsForInlining())
2559 GenTreePtr callStmt = impInlineInfo->iciStmt;
2560 assert(callStmt->gtOper == GT_STMT);
2561 impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2565 assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2566 IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2567 impCurStmtOffs = offs | stkBit;
2571 /*****************************************************************************
2572 * Returns current IL offset with stack-empty and call-instruction info incorporated
2574 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2576 if (compIsForInlining())
2578 return BAD_IL_OFFSET;
2582 assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2583 IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2584 IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2585 return offs | stkBit | callInstructionBit;
2589 /*****************************************************************************
2591 * Remember the instr offset for the statements
2593 * When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2594 * impCurOpcOffs, if the append was done because of a partial stack spill,
2595 * as some of the trees corresponding to code up to impCurOpcOffs might
2596 * still be sitting on the stack.
2597 * So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2598 * This should be called when an opcode finally/explicitly causes
2599 * impAppendTree(tree) to be called (as opposed to being called because of
2600 * a spill caused by the opcode)
2605 void Compiler::impNoteLastILoffs()
2607 if (impLastILoffsStmt == nullptr)
2609 // We should have added a statement for the current basic block
2610 // Is this assert correct ?
2612 assert(impTreeLast);
2613 assert(impTreeLast->gtOper == GT_STMT);
2615 impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2619 impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2620 impLastILoffsStmt = nullptr;
2626 /*****************************************************************************
2627 * We don't create any GenTree (excluding spills) for a branch.
2628 * For debugging info, we need a placeholder so that we can note
2629 * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2632 void Compiler::impNoteBranchOffs()
2634 if (opts.compDbgCode)
2636 impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2640 /*****************************************************************************
2641 * Locate the next stmt boundary for which we need to record info.
2642 * We will have to spill the stack at such boundaries if it is not
2644 * Returns the next stmt boundary (after the start of the block)
2647 unsigned Compiler::impInitBlockLineInfo()
2649 /* Assume the block does not correspond with any IL offset. This prevents
2650 us from reporting extra offsets. Extra mappings can cause confusing
2651 stepping, especially if the extra mapping is a jump-target, and the
2652 debugger does not ignore extra mappings, but instead rewinds to the
2653 nearest known offset */
2655 impCurStmtOffsSet(BAD_IL_OFFSET);
2657 if (compIsForInlining())
2662 IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2664 if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2666 impCurStmtOffsSet(blockOffs);
2669 if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2671 impCurStmtOffsSet(blockOffs);
2674 /* Always report IL offset 0 or some tests get confused.
2675 Probably a good idea anyways */
2679 impCurStmtOffsSet(blockOffs);
2682 if (!info.compStmtOffsetsCount)
2687 /* Find the lowest explicit stmt boundary within the block */
2689 /* Start looking at an entry that is based on our instr offset */
2691 unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2693 if (index >= info.compStmtOffsetsCount)
2695 index = info.compStmtOffsetsCount - 1;
2698 /* If we've guessed too far, back up */
2700 while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2705 /* If we guessed short, advance ahead */
2707 while (info.compStmtOffsets[index] < blockOffs)
2711 if (index == info.compStmtOffsetsCount)
2713 return info.compStmtOffsetsCount;
2717 assert(index < info.compStmtOffsetsCount);
2719 if (info.compStmtOffsets[index] == blockOffs)
2721 /* There is an explicit boundary for the start of this basic block.
2722 So we will start with bbCodeOffs. Else we will wait until we
2723 get to the next explicit boundary */
2725 impCurStmtOffsSet(blockOffs);
2733 /*****************************************************************************/
2735 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2749 /*****************************************************************************/
2751 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2768 /*****************************************************************************/
2770 // One might think it is worth caching these values, but results indicate
2772 // In addition, caching them causes SuperPMI to be unable to completely
2773 // encapsulate an individual method context.
2774 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2776 CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2777 assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2781 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2783 CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2784 assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2785 return typeHandleClass;
2788 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2790 CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2791 assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2792 return argIteratorClass;
2795 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2797 CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2798 assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2802 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2804 CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2805 assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2809 /*****************************************************************************
2810 * "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2811 * set its type to TYP_BYREF when we create it. We know if it can be
2812 * changed to TYP_I_IMPL only at the point where we use it
2816 void Compiler::impBashVarAddrsToI(GenTreePtr tree1, GenTreePtr tree2)
2818 if (tree1->IsVarAddr())
2820 tree1->gtType = TYP_I_IMPL;
2823 if (tree2 && tree2->IsVarAddr())
2825 tree2->gtType = TYP_I_IMPL;
2829 /*****************************************************************************
2830 * TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2831 * to make that an explicit cast in our trees, so any implicit casts that
2832 * exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2833 * turned into explicit casts here.
2834 * We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2837 GenTreePtr Compiler::impImplicitIorI4Cast(GenTreePtr tree, var_types dstTyp)
2839 var_types currType = genActualType(tree->gtType);
2840 var_types wantedType = genActualType(dstTyp);
2842 if (wantedType != currType)
2844 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2845 if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2847 if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2849 tree->gtType = TYP_I_IMPL;
2852 #ifdef _TARGET_64BIT_
2853 else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2855 // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2856 tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
2858 else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2860 // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2861 tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
2863 #endif // _TARGET_64BIT_
2869 /*****************************************************************************
2870 * TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2871 * but we want to make that an explicit cast in our trees, so any implicit casts
2872 * that exist in the IL are turned into explicit casts here.
2875 GenTreePtr Compiler::impImplicitR4orR8Cast(GenTreePtr tree, var_types dstTyp)
2877 #ifndef LEGACY_BACKEND
2878 if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2880 tree = gtNewCastNode(dstTyp, tree, dstTyp);
2882 #endif // !LEGACY_BACKEND
2887 //------------------------------------------------------------------------
2888 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2889 // with a GT_COPYBLK node.
2892 // sig - The InitializeArray signature.
2895 // A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2896 // nullptr otherwise.
2899 // The function recognizes the following IL pattern:
2900 // ldc <length> or a list of ldc <lower bound>/<length>
2903 // ldtoken <field handle>
2904 // call InitializeArray
2905 // The lower bounds need not be constant except when the array rank is 1.
2906 // The function recognizes all kinds of arrays thus enabling a small runtime
2907 // such as CoreRT to skip providing an implementation for InitializeArray.
2909 GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2911 assert(sig->numArgs == 2);
2913 GenTreePtr fieldTokenNode = impStackTop(0).val;
2914 GenTreePtr arrayLocalNode = impStackTop(1).val;
2917 // Verify that the field token is known and valid. Note that It's also
2918 // possible for the token to come from reflection, in which case we cannot do
2919 // the optimization and must therefore revert to calling the helper. You can
2920 // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2923 // Check to see if the ldtoken helper call is what we see here.
2924 if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2925 (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2930 // Strip helper call away
2931 fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2933 if (fieldTokenNode->gtOper == GT_IND)
2935 fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2938 // Check for constant
2939 if (fieldTokenNode->gtOper != GT_CNS_INT)
2944 CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2945 if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2951 // We need to get the number of elements in the array and the size of each element.
2952 // We verify that the newarr statement is exactly what we expect it to be.
2953 // If it's not then we just return NULL and we don't optimize this call
2957 // It is possible the we don't have any statements in the block yet
2959 if (impTreeLast->gtOper != GT_STMT)
2961 assert(impTreeLast->gtOper == GT_BEG_STMTS);
2966 // We start by looking at the last statement, making sure it's an assignment, and
2967 // that the target of the assignment is the array passed to InitializeArray.
2969 GenTreePtr arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
2970 if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
2971 (arrayLocalNode->gtOper != GT_LCL_VAR) ||
2972 (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
2978 // Make sure that the object being assigned is a helper call.
2981 GenTreePtr newArrayCall = arrayAssignment->gtOp.gtOp2;
2982 if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
2988 // Verify that it is one of the new array helpers.
2991 bool isMDArray = false;
2993 if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
2994 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
2995 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
2996 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
2997 #ifdef FEATURE_READYTORUN_COMPILER
2998 && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
3002 #if COR_JIT_EE_VERSION > 460
3003 if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3012 CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3015 // Make sure we found a compile time handle to the array
3024 S_UINT32 numElements;
3028 rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3035 GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3036 assert(tokenArg != nullptr);
3037 GenTreeArgList* numArgsArg = tokenArg->Rest();
3038 assert(numArgsArg != nullptr);
3039 GenTreeArgList* argsArg = numArgsArg->Rest();
3040 assert(argsArg != nullptr);
3043 // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3044 // so at least one length must be present and the rank can't exceed 32 so there can
3045 // be at most 64 arguments - 32 lengths and 32 lower bounds.
3048 if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3049 (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3054 unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3055 bool lowerBoundsSpecified;
3057 if (numArgs == rank * 2)
3059 lowerBoundsSpecified = true;
3061 else if (numArgs == rank)
3063 lowerBoundsSpecified = false;
3066 // If the rank is 1 and a lower bound isn't specified then the runtime creates
3067 // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3068 // we get a SDArray as well, see the for loop below.
3082 // The rank is known to be at least 1 so we can start with numElements being 1
3083 // to avoid the need to special case the first dimension.
3086 numElements = S_UINT32(1);
3090 static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3092 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3093 IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3096 static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3098 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3099 (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3100 IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3103 static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3105 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3106 (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3109 static bool IsComma(GenTree* tree)
3111 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3115 unsigned argIndex = 0;
3118 for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3120 if (lowerBoundsSpecified)
3123 // In general lower bounds can be ignored because they're not needed to
3124 // calculate the total number of elements. But for single dimensional arrays
3125 // we need to know if the lower bound is 0 because in this case the runtime
3126 // creates a SDArray and this affects the way the array data offset is calculated.
3131 GenTree* lowerBoundAssign = comma->gtGetOp1();
3132 assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3133 GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3135 if (lowerBoundNode->IsIntegralConst(0))
3141 comma = comma->gtGetOp2();
3145 GenTree* lengthNodeAssign = comma->gtGetOp1();
3146 assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3147 GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3149 if (!lengthNode->IsCnsIntOrI())
3154 numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3158 assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3160 if (argIndex != numArgs)
3168 // Make sure there are exactly two arguments: the array class and
3169 // the number of elements.
3172 GenTreePtr arrayLengthNode;
3174 GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3175 #ifdef FEATURE_READYTORUN_COMPILER
3176 if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3178 // Array length is 1st argument for readytorun helper
3179 arrayLengthNode = args->Current();
3184 // Array length is 2nd argument for regular helper
3185 arrayLengthNode = args->Rest()->Current();
3189 // Make sure that the number of elements look valid.
3191 if (arrayLengthNode->gtOper != GT_CNS_INT)
3196 numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3198 if (!info.compCompHnd->isSDArray(arrayClsHnd))
3204 CORINFO_CLASS_HANDLE elemClsHnd;
3205 var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3208 // Note that genTypeSize will return zero for non primitive types, which is exactly
3209 // what we want (size will then be 0, and we will catch this in the conditional below).
3210 // Note that we don't expect this to fail for valid binaries, so we assert in the
3211 // non-verification case (the verification case should not assert but rather correctly
3212 // handle bad binaries). This assert is not guarding any specific invariant, but rather
3213 // saying that we don't expect this to happen, and if it is hit, we need to investigate
3217 S_UINT32 elemSize(genTypeSize(elementType));
3218 S_UINT32 size = elemSize * S_UINT32(numElements);
3220 if (size.IsOverflow())
3225 if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3227 assert(verNeedsVerification());
3231 void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3238 // At this point we are ready to commit to implementing the InitializeArray
3239 // intrinsic using a struct assignment. Pop the arguments from the stack and
3240 // return the struct assignment node.
3246 const unsigned blkSize = size.Value();
3251 unsigned dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3253 dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3257 dst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewIndexRef(elementType, arrayLocalNode, gtNewIconNode(0)));
3259 GenTreePtr blk = gtNewBlockVal(dst, blkSize);
3260 GenTreePtr srcAddr = gtNewIconHandleNode((size_t)initData, GTF_ICON_STATIC_HDL);
3261 GenTreePtr src = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
3263 return gtNewBlkOpNode(blk, // dst
3270 /*****************************************************************************/
3271 // Returns the GenTree that should be used to do the intrinsic instead of the call.
3272 // Returns NULL if an intrinsic cannot be used
3274 GenTreePtr Compiler::impIntrinsic(GenTreePtr newobjThis,
3275 CORINFO_CLASS_HANDLE clsHnd,
3276 CORINFO_METHOD_HANDLE method,
3277 CORINFO_SIG_INFO* sig,
3281 CorInfoIntrinsics* pIntrinsicID)
3283 bool mustExpand = false;
3284 #if COR_JIT_EE_VERSION > 460
3285 CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3287 CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method);
3289 *pIntrinsicID = intrinsicID;
3291 #ifndef _TARGET_ARM_
3292 genTreeOps interlockedOperator;
3295 if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3297 // must be done regardless of DbgCode and MinOpts
3298 return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3300 #ifdef _TARGET_64BIT_
3301 if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3303 // must be done regardless of DbgCode and MinOpts
3304 return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3307 assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3310 GenTreePtr retNode = nullptr;
3313 // We disable the inlining of instrinsics for MinOpts.
3315 if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3317 *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3321 // Currently we don't have CORINFO_INTRINSIC_Exp because it does not
3322 // seem to work properly for Infinity values, we don't do
3323 // CORINFO_INTRINSIC_Pow because it needs a Helper which we currently don't have
3325 var_types callType = JITtype2varType(sig->retType);
3327 /* First do the intrinsics which are always smaller than a call */
3329 switch (intrinsicID)
3331 GenTreePtr op1, op2;
3333 case CORINFO_INTRINSIC_Sin:
3334 case CORINFO_INTRINSIC_Sqrt:
3335 case CORINFO_INTRINSIC_Abs:
3336 case CORINFO_INTRINSIC_Cos:
3337 case CORINFO_INTRINSIC_Round:
3338 case CORINFO_INTRINSIC_Cosh:
3339 case CORINFO_INTRINSIC_Sinh:
3340 case CORINFO_INTRINSIC_Tan:
3341 case CORINFO_INTRINSIC_Tanh:
3342 case CORINFO_INTRINSIC_Asin:
3343 case CORINFO_INTRINSIC_Acos:
3344 case CORINFO_INTRINSIC_Atan:
3345 case CORINFO_INTRINSIC_Atan2:
3346 case CORINFO_INTRINSIC_Log10:
3347 case CORINFO_INTRINSIC_Pow:
3348 case CORINFO_INTRINSIC_Exp:
3349 case CORINFO_INTRINSIC_Ceiling:
3350 case CORINFO_INTRINSIC_Floor:
3352 // These are math intrinsics
3354 assert(callType != TYP_STRUCT);
3358 #if defined(LEGACY_BACKEND)
3359 if (IsTargetIntrinsic(intrinsicID))
3360 #elif !defined(_TARGET_X86_)
3361 // Intrinsics that are not implemented directly by target instructions will
3362 // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3363 // don't do this optimization, because
3364 // a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3365 // b) It will be non-trivial task or too late to re-materialize a surviving
3366 // tail prefixed GT_INTRINSIC as tail call in rationalizer.
3367 if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3369 // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3370 // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3371 // code generation for certain EH constructs.
3372 if (!IsIntrinsicImplementedByUserCall(intrinsicID))
3375 switch (sig->numArgs)
3378 op1 = impPopStack().val;
3380 #if FEATURE_X87_DOUBLES
3382 // X87 stack doesn't differentiate between float/double
3383 // so it doesn't need a cast, but everybody else does
3384 // Just double check it is at least a FP type
3385 noway_assert(varTypeIsFloating(op1));
3387 #else // FEATURE_X87_DOUBLES
3389 if (op1->TypeGet() != callType)
3391 op1 = gtNewCastNode(callType, op1, callType);
3394 #endif // FEATURE_X87_DOUBLES
3396 op1 = new (this, GT_INTRINSIC)
3397 GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3401 op2 = impPopStack().val;
3402 op1 = impPopStack().val;
3404 #if FEATURE_X87_DOUBLES
3406 // X87 stack doesn't differentiate between float/double
3407 // so it doesn't need a cast, but everybody else does
3408 // Just double check it is at least a FP type
3409 noway_assert(varTypeIsFloating(op2));
3410 noway_assert(varTypeIsFloating(op1));
3412 #else // FEATURE_X87_DOUBLES
3414 if (op2->TypeGet() != callType)
3416 op2 = gtNewCastNode(callType, op2, callType);
3418 if (op1->TypeGet() != callType)
3420 op1 = gtNewCastNode(callType, op1, callType);
3423 #endif // FEATURE_X87_DOUBLES
3425 op1 = new (this, GT_INTRINSIC)
3426 GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
3430 NO_WAY("Unsupported number of args for Math Instrinsic");
3433 #ifndef LEGACY_BACKEND
3434 if (IsIntrinsicImplementedByUserCall(intrinsicID))
3436 op1->gtFlags |= GTF_CALL;
3444 #ifdef _TARGET_XARCH_
3445 // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3446 case CORINFO_INTRINSIC_InterlockedAdd32:
3447 interlockedOperator = GT_LOCKADD;
3448 goto InterlockedBinOpCommon;
3449 case CORINFO_INTRINSIC_InterlockedXAdd32:
3450 interlockedOperator = GT_XADD;
3451 goto InterlockedBinOpCommon;
3452 case CORINFO_INTRINSIC_InterlockedXchg32:
3453 interlockedOperator = GT_XCHG;
3454 goto InterlockedBinOpCommon;
3456 #ifdef _TARGET_AMD64_
3457 case CORINFO_INTRINSIC_InterlockedAdd64:
3458 interlockedOperator = GT_LOCKADD;
3459 goto InterlockedBinOpCommon;
3460 case CORINFO_INTRINSIC_InterlockedXAdd64:
3461 interlockedOperator = GT_XADD;
3462 goto InterlockedBinOpCommon;
3463 case CORINFO_INTRINSIC_InterlockedXchg64:
3464 interlockedOperator = GT_XCHG;
3465 goto InterlockedBinOpCommon;
3466 #endif // _TARGET_AMD64_
3468 InterlockedBinOpCommon:
3469 assert(callType != TYP_STRUCT);
3470 assert(sig->numArgs == 2);
3472 op2 = impPopStack().val;
3473 op1 = impPopStack().val;
3479 // field (for example)
3481 // In the case where the first argument is the address of a local, we might
3482 // want to make this *not* make the var address-taken -- but atomic instructions
3483 // on a local are probably pretty useless anyway, so we probably don't care.
3485 op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3486 op1->gtFlags |= GTF_GLOB_EFFECT;
3489 #endif // _TARGET_XARCH_
3491 case CORINFO_INTRINSIC_MemoryBarrier:
3493 assert(sig->numArgs == 0);
3495 op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3496 op1->gtFlags |= GTF_GLOB_EFFECT;
3500 #ifdef _TARGET_XARCH_
3501 // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3502 case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3503 #ifdef _TARGET_AMD64_
3504 case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3507 assert(callType != TYP_STRUCT);
3508 assert(sig->numArgs == 3);
3511 op3 = impPopStack().val; // comparand
3512 op2 = impPopStack().val; // value
3513 op1 = impPopStack().val; // location
3515 GenTreePtr node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3517 node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3523 case CORINFO_INTRINSIC_StringLength:
3524 op1 = impPopStack().val;
3525 if (!opts.MinOpts() && !opts.compDbgCode)
3527 GenTreeArrLen* arrLen =
3528 new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3533 /* Create the expression "*(str_addr + stringLengthOffset)" */
3534 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3535 gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3536 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3541 case CORINFO_INTRINSIC_StringGetChar:
3542 op2 = impPopStack().val;
3543 op1 = impPopStack().val;
3544 op1 = gtNewIndexRef(TYP_CHAR, op1, op2);
3545 op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3549 case CORINFO_INTRINSIC_InitializeArray:
3550 retNode = impInitializeArrayIntrinsic(sig);
3553 case CORINFO_INTRINSIC_Array_Address:
3554 case CORINFO_INTRINSIC_Array_Get:
3555 case CORINFO_INTRINSIC_Array_Set:
3556 retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3559 case CORINFO_INTRINSIC_GetTypeFromHandle:
3560 op1 = impStackTop(0).val;
3561 if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3562 gtIsTypeHandleToRuntimeTypeHelper(op1))
3564 op1 = impPopStack().val;
3565 // Change call to return RuntimeType directly.
3566 op1->gtType = TYP_REF;
3569 // Call the regular function.
3572 case CORINFO_INTRINSIC_RTH_GetValueInternal:
3573 op1 = impStackTop(0).val;
3574 if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3575 gtIsTypeHandleToRuntimeTypeHelper(op1))
3578 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3581 // TreeToGetNativeTypeHandle
3583 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3586 op1 = impPopStack().val;
3588 // Get native TypeHandle argument to old helper
3589 op1 = op1->gtCall.gtCallArgs;
3590 assert(op1->OperIsList());
3591 assert(op1->gtOp.gtOp2 == nullptr);
3592 op1 = op1->gtOp.gtOp1;
3595 // Call the regular function.
3598 #ifndef LEGACY_BACKEND
3599 case CORINFO_INTRINSIC_Object_GetType:
3601 op1 = impPopStack().val;
3602 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3604 // Set the CALL flag to indicate that the operator is implemented by a call.
3605 // Set also the EXCEPTION flag because the native implementation of
3606 // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3607 op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3611 // Implement ByReference Ctor. This wraps the assignment of the ref into a byref-like field
3612 // in a value type. The canonical example of this is Span<T>. In effect this is just a
3613 // substitution. The parameter byref will be assigned into the newly allocated object.
3614 case CORINFO_INTRINSIC_ByReference_Ctor:
3616 // Remove call to constructor and directly assign the byref passed
3617 // to the call to the first slot of the ByReference struct.
3618 op1 = impPopStack().val;
3619 GenTreePtr thisptr = newobjThis;
3620 CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3621 GenTreePtr field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0, false);
3622 GenTreePtr assign = gtNewAssignNode(field, op1);
3623 GenTreePtr byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3624 assert(byReferenceStruct != nullptr);
3625 impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3629 // Implement ptr value getter for ByReference struct.
3630 case CORINFO_INTRINSIC_ByReference_Value:
3632 op1 = impPopStack().val;
3633 CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3634 GenTreePtr field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0, false);
3639 /* Unknown intrinsic */
3645 if (retNode == nullptr)
3647 NO_WAY("JIT must expand the intrinsic!");
3654 /*****************************************************************************/
3656 GenTreePtr Compiler::impArrayAccessIntrinsic(
3657 CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
3659 /* If we are generating SMALL_CODE, we don't want to use intrinsics for
3660 the following, as it generates fatter code.
3663 if (compCodeOpt() == SMALL_CODE)
3668 /* These intrinsics generate fatter (but faster) code and are only
3669 done if we don't need SMALL_CODE */
3671 unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
3673 // The rank 1 case is special because it has to handle two array formats
3674 // we will simply not do that case
3675 if (rank > GT_ARR_MAX_RANK || rank <= 1)
3680 CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
3681 var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
3683 // For the ref case, we will only be able to inline if the types match
3684 // (verifier checks for this, we don't care for the nonverified case and the
3685 // type is final (so we don't need to do the cast)
3686 if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
3688 // Get the call site signature
3689 CORINFO_SIG_INFO LocalSig;
3690 eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
3691 assert(LocalSig.hasThis());
3693 CORINFO_CLASS_HANDLE actualElemClsHnd;
3695 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3697 // Fetch the last argument, the one that indicates the type we are setting.
3698 CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
3699 for (unsigned r = 0; r < rank; r++)
3701 argType = info.compCompHnd->getArgNext(argType);
3704 typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
3705 actualElemClsHnd = argInfo.GetClassHandle();
3709 assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
3711 // Fetch the return type
3712 typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
3713 assert(retInfo.IsByRef());
3714 actualElemClsHnd = retInfo.GetClassHandle();
3717 // if it's not final, we can't do the optimization
3718 if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
3724 unsigned arrayElemSize;
3725 if (elemType == TYP_STRUCT)
3727 assert(arrElemClsHnd);
3729 arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
3733 arrayElemSize = genTypeSize(elemType);
3736 if ((unsigned char)arrayElemSize != arrayElemSize)
3738 // arrayElemSize would be truncated as an unsigned char.
3739 // This means the array element is too large. Don't do the optimization.
3743 GenTreePtr val = nullptr;
3745 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3747 // Assignment of a struct is more work, and there are more gets than sets.
3748 if (elemType == TYP_STRUCT)
3753 val = impPopStack().val;
3754 assert(genActualType(elemType) == genActualType(val->gtType) ||
3755 (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
3756 (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
3757 (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
3760 noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
3762 GenTreePtr inds[GT_ARR_MAX_RANK];
3763 for (unsigned k = rank; k > 0; k--)
3765 inds[k - 1] = impPopStack().val;
3768 GenTreePtr arr = impPopStack().val;
3769 assert(arr->gtType == TYP_REF);
3771 GenTreePtr arrElem =
3772 new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
3773 static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
3775 if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
3777 arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
3780 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3782 assert(val != nullptr);
3783 return gtNewAssignNode(arrElem, val);
3791 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
3795 // do some basic checks first
3796 if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
3801 if (verCurrentState.esStackDepth > 0)
3803 // merge stack types
3804 StackEntry* parentStack = block->bbStackOnEntry();
3805 StackEntry* childStack = verCurrentState.esStack;
3807 for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
3809 if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
3816 // merge initialization status of this ptr
3818 if (verTrackObjCtorInitState)
3820 // If we're tracking the CtorInitState, then it must not be unknown in the current state.
3821 assert(verCurrentState.thisInitialized != TIS_Bottom);
3823 // If the successor block's thisInit state is unknown, copy it from the current state.
3824 if (block->bbThisOnEntry() == TIS_Bottom)
3827 verSetThisInit(block, verCurrentState.thisInitialized);
3829 else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
3831 if (block->bbThisOnEntry() != TIS_Top)
3834 verSetThisInit(block, TIS_Top);
3836 if (block->bbFlags & BBF_FAILED_VERIFICATION)
3838 // The block is bad. Control can flow through the block to any handler that catches the
3839 // verification exception, but the importer ignores bad blocks and therefore won't model
3840 // this flow in the normal way. To complete the merge into the bad block, the new state
3841 // needs to be manually pushed to the handlers that may be reached after the verification
3842 // exception occurs.
3844 // Usually, the new state was already propagated to the relevant handlers while processing
3845 // the predecessors of the bad block. The exception is when the bad block is at the start
3846 // of a try region, meaning it is protected by additional handlers that do not protect its
3849 if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
3851 // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
3852 // recursive calls back into this code path (if successors of the current bad block are
3853 // also bad blocks).
3855 ThisInitState origTIS = verCurrentState.thisInitialized;
3856 verCurrentState.thisInitialized = TIS_Top;
3857 impVerifyEHBlock(block, true);
3858 verCurrentState.thisInitialized = origTIS;
3866 assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
3872 /*****************************************************************************
3873 * 'logMsg' is true if a log message needs to be logged. false if the caller has
3874 * already logged it (presumably in a more detailed fashion than done here)
3875 * 'bVerificationException' is true for a verification exception, false for a
3876 * "call unauthorized by host" exception.
3879 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
3881 block->bbJumpKind = BBJ_THROW;
3882 block->bbFlags |= BBF_FAILED_VERIFICATION;
3884 impCurStmtOffsSet(block->bbCodeOffs);
3887 // we need this since BeginTreeList asserts otherwise
3888 impTreeList = impTreeLast = nullptr;
3889 block->bbFlags &= ~BBF_IMPORTED;
3893 JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
3894 block->bbCodeOffs, block->bbCodeOffsEnd));
3897 printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
3901 if (JitConfig.DebugBreakOnVerificationFailure())
3909 // if the stack is non-empty evaluate all the side-effects
3910 if (verCurrentState.esStackDepth > 0)
3912 impEvalSideEffects();
3914 assert(verCurrentState.esStackDepth == 0);
3916 GenTreePtr op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, GTF_EXCEPT,
3917 gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
3918 // verCurrentState.esStackDepth = 0;
3919 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
3921 // The inliner is not able to handle methods that require throw block, so
3922 // make sure this methods never gets inlined.
3923 info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
3926 /*****************************************************************************
3929 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
3932 // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
3933 // slightly different mechanism in which it calls the JIT to perform IL verification:
3934 // in the case of transparent methods the VM calls for a predicate IsVerifiable()
3935 // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
3936 // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
3937 // it bubble up until reported by the runtime. Currently in RyuJIT, this method doesn't bubble
3938 // up the exception, instead it embeds a throw inside the offending basic block and lets this
3939 // to fail upon runtime of the jitted method.
3941 // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
3942 // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
3943 // just try to find out whether to fail this method before even actually jitting it. So, in case
3944 // we detect these two conditions, instead of generating a throw statement inside the offending
3945 // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
3946 // to return false and make RyuJIT behave the same way JIT64 does.
3948 // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
3949 // RyuJIT for the time being until we completely replace JIT64.
3950 // TODO-ARM64-Cleanup: We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
3952 // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
3953 // exception if we are only importing and verifying. The method verNeedsVerification() can also modify the
3954 // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
3955 // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
3956 // be turned off during importation).
3957 CLANG_FORMAT_COMMENT_ANCHOR;
3959 #ifdef _TARGET_64BIT_
3962 bool canSkipVerificationResult =
3963 info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
3964 assert(tiVerificationNeeded || canSkipVerificationResult);
3967 // Add the non verifiable flag to the compiler
3968 if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
3970 tiIsVerifiableCode = FALSE;
3972 #endif //_TARGET_64BIT_
3973 verResetCurrentState(block, &verCurrentState);
3974 verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
3977 impNoteLastILoffs(); // Remember at which BC offset the tree was finished
3981 /******************************************************************************/
3982 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
3984 assert(ciType < CORINFO_TYPE_COUNT);
3989 case CORINFO_TYPE_STRING:
3990 case CORINFO_TYPE_CLASS:
3991 tiResult = verMakeTypeInfo(clsHnd);
3992 if (!tiResult.IsType(TI_REF))
3993 { // type must be consistent with element type
3998 #ifdef _TARGET_64BIT_
3999 case CORINFO_TYPE_NATIVEINT:
4000 case CORINFO_TYPE_NATIVEUINT:
4003 // If we have more precise information, use it
4004 return verMakeTypeInfo(clsHnd);
4008 return typeInfo::nativeInt();
4011 #endif // _TARGET_64BIT_
4013 case CORINFO_TYPE_VALUECLASS:
4014 case CORINFO_TYPE_REFANY:
4015 tiResult = verMakeTypeInfo(clsHnd);
4016 // type must be constant with element type;
4017 if (!tiResult.IsValueClass())
4022 case CORINFO_TYPE_VAR:
4023 return verMakeTypeInfo(clsHnd);
4025 case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4026 case CORINFO_TYPE_VOID:
4030 case CORINFO_TYPE_BYREF:
4032 CORINFO_CLASS_HANDLE childClassHandle;
4033 CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4034 return ByRef(verMakeTypeInfo(childType, childClassHandle));
4040 { // If we have more precise information, use it
4041 return typeInfo(TI_STRUCT, clsHnd);
4045 return typeInfo(JITtype2tiType(ciType));
4051 /******************************************************************************/
4053 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4055 if (clsHnd == nullptr)
4060 // Byrefs should only occur in method and local signatures, which are accessed
4061 // using ICorClassInfo and ICorClassInfo.getChildType.
4062 // So findClass() and getClassAttribs() should not be called for byrefs
4064 if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4066 assert(!"Did findClass() return a Byref?");
4070 unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4072 if (attribs & CORINFO_FLG_VALUECLASS)
4074 CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4076 // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4077 // not occur here, so we may want to change this to an assert instead.
4078 if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4083 #ifdef _TARGET_64BIT_
4084 if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4086 return typeInfo::nativeInt();
4088 #endif // _TARGET_64BIT_
4090 if (t != CORINFO_TYPE_UNDEF)
4092 return (typeInfo(JITtype2tiType(t)));
4094 else if (bashStructToRef)
4096 return (typeInfo(TI_REF, clsHnd));
4100 return (typeInfo(TI_STRUCT, clsHnd));
4103 else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4105 // See comment in _typeInfo.h for why we do it this way.
4106 return (typeInfo(TI_REF, clsHnd, true));
4110 return (typeInfo(TI_REF, clsHnd));
4114 /******************************************************************************/
4115 BOOL Compiler::verIsSDArray(typeInfo ti)
4117 if (ti.IsNullObjRef())
4118 { // nulls are SD arrays
4122 if (!ti.IsType(TI_REF))
4127 if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4134 /******************************************************************************/
4135 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4136 /* Returns an error type if anything goes wrong */
4138 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4140 assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4142 if (!verIsSDArray(arrayObjectType))
4147 CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4148 CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4150 return verMakeTypeInfo(ciType, childClassHandle);
4153 /*****************************************************************************
4155 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4157 CORINFO_CLASS_HANDLE classHandle;
4158 CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4160 var_types type = JITtype2varType(ciType);
4161 if (varTypeIsGC(type))
4163 // For efficiency, getArgType only returns something in classHandle for
4164 // value types. For other types that have addition type info, you
4165 // have to call back explicitly
4166 classHandle = info.compCompHnd->getArgClass(sig, args);
4169 NO_WAY("Could not figure out Class specified in argument or local signature");
4173 return verMakeTypeInfo(ciType, classHandle);
4176 /*****************************************************************************/
4178 // This does the expensive check to figure out whether the method
4179 // needs to be verified. It is called only when we fail verification,
4180 // just before throwing the verification exception.
4182 BOOL Compiler::verNeedsVerification()
4184 // If we have previously determined that verification is NOT needed
4185 // (for example in Compiler::compCompile), that means verification is really not needed.
4186 // Return the same decision we made before.
4187 // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4189 if (!tiVerificationNeeded)
4191 return tiVerificationNeeded;
4194 assert(tiVerificationNeeded);
4196 // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4197 // obtain the answer.
4198 CorInfoCanSkipVerificationResult canSkipVerificationResult =
4199 info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4201 // canSkipVerification will return one of the following three values:
4202 // CORINFO_VERIFICATION_CANNOT_SKIP = 0, // Cannot skip verification during jit time.
4203 // CORINFO_VERIFICATION_CAN_SKIP = 1, // Can skip verification during jit time.
4204 // CORINFO_VERIFICATION_RUNTIME_CHECK = 2, // Skip verification during jit time,
4205 // but need to insert a callout to the VM to ask during runtime
4206 // whether to skip verification or not.
4208 // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4209 if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4211 tiRuntimeCalloutNeeded = true;
4214 if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4216 // Dev10 706080 - Testers don't like the assert, so just silence it
4217 // by not using the macros that invoke debugAssert.
4221 // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4222 // The following line means we will NOT do jit time verification if canSkipVerification
4223 // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4224 tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4225 return tiVerificationNeeded;
4228 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4234 if (!ti.IsType(TI_STRUCT))
4238 return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4241 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4243 if (ti.IsPermanentHomeByRef())
4253 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4255 return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4256 || ti.IsUnboxedGenericTypeVar() ||
4257 (ti.IsType(TI_STRUCT) &&
4258 // exclude byreflike structs
4259 !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4262 // Is it a boxed value type?
4263 bool Compiler::verIsBoxedValueType(typeInfo ti)
4265 if (ti.GetType() == TI_REF)
4267 CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4268 return !!eeIsValueClass(clsHnd);
4276 /*****************************************************************************
4278 * Check if a TailCall is legal.
4281 bool Compiler::verCheckTailCallConstraint(
4283 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4284 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4285 bool speculative // If true, won't throw if verificatoin fails. Instead it will
4286 // return false to the caller.
4287 // If false, it will throw.
4291 CORINFO_SIG_INFO sig;
4292 unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
4293 // this counter is used to keep track of how many items have been
4296 CORINFO_METHOD_HANDLE methodHnd = nullptr;
4297 CORINFO_CLASS_HANDLE methodClassHnd = nullptr;
4298 unsigned methodClassFlgs = 0;
4300 assert(impOpcodeIsCallOpcode(opcode));
4302 if (compIsForInlining())
4307 // for calli, VerifyOrReturn that this is not a virtual method
4308 if (opcode == CEE_CALLI)
4310 /* Get the call sig */
4311 eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4313 // We don't know the target method, so we have to infer the flags, or
4314 // assume the worst-case.
4315 mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4319 methodHnd = pResolvedToken->hMethod;
4321 mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4323 // When verifying generic code we pair the method handle with its
4324 // owning class to get the exact method signature.
4325 methodClassHnd = pResolvedToken->hClass;
4326 assert(methodClassHnd);
4328 eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4330 // opcode specific check
4331 methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4334 // We must have got the methodClassHnd if opcode is not CEE_CALLI
4335 assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4337 if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4339 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4342 // check compatibility of the arguments
4343 unsigned int argCount;
4344 argCount = sig.numArgs;
4345 CORINFO_ARG_LIST_HANDLE args;
4349 typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4351 // check that the argument is not a byref for tailcalls
4352 VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4354 // For unsafe code, we might have parameters containing pointer to the stack location.
4355 // Disallow the tailcall for this kind.
4356 CORINFO_CLASS_HANDLE classHandle;
4357 CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4358 VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4360 args = info.compCompHnd->getArgNext(args);
4364 popCount += sig.numArgs;
4366 // check for 'this' which is on non-static methods, not called via NEWOBJ
4367 if (!(mflags & CORINFO_FLG_STATIC))
4369 // Always update the popCount.
4370 // This is crucial for the stack calculation to be correct.
4371 typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4374 if (opcode == CEE_CALLI)
4376 // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4378 if (tiThis.IsValueClass())
4382 VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4386 // Check type compatibility of the this argument
4387 typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4388 if (tiDeclaredThis.IsValueClass())
4390 tiDeclaredThis.MakeByRef();
4393 VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4397 // Tail calls on constrained calls should be illegal too:
4398 // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4399 VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4401 // Get the exact view of the signature for an array method
4402 if (sig.retType != CORINFO_TYPE_VOID)
4404 if (methodClassFlgs & CORINFO_FLG_ARRAY)
4406 assert(opcode != CEE_CALLI);
4407 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4411 typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4412 typeInfo tiCallerRetType =
4413 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4415 // void return type gets morphed into the error type, so we have to treat them specially here
4416 if (sig.retType == CORINFO_TYPE_VOID)
4418 VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4423 VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4424 NormaliseForStack(tiCallerRetType), true),
4425 "tailcall return mismatch", speculative);
4428 // for tailcall, stack must be empty
4429 VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4431 return true; // Yes, tailcall is legal
4434 /*****************************************************************************
4436 * Checks the IL verification rules for the call
4439 void Compiler::verVerifyCall(OPCODE opcode,
4440 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4441 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4444 const BYTE* delegateCreateStart,
4445 const BYTE* codeAddr,
4446 CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4449 CORINFO_SIG_INFO* sig = nullptr;
4450 unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
4451 // this counter is used to keep track of how many items have been
4454 // for calli, VerifyOrReturn that this is not a virtual method
4455 if (opcode == CEE_CALLI)
4457 Verify(false, "Calli not verifiable");
4461 //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4462 mflags = callInfo->verMethodFlags;
4464 sig = &callInfo->verSig;
4466 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4468 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4471 // opcode specific check
4472 unsigned methodClassFlgs = callInfo->classFlags;
4476 // cannot do callvirt on valuetypes
4477 VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4478 VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4483 assert(!tailCall); // Importer should not allow this
4484 VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4485 "newobj must be on instance");
4487 if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4489 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4490 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4491 typeInfo tiDeclaredFtn =
4492 verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4493 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4495 assert(popCount == 0);
4496 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4497 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4499 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4500 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4501 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4502 "delegate object type mismatch");
4504 CORINFO_CLASS_HANDLE objTypeHandle =
4505 tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4507 // the method signature must be compatible with the delegate's invoke method
4509 // check that for virtual functions, the type of the object used to get the
4510 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4511 // since this is a bit of work to determine in general, we pattern match stylized
4514 // the delegate creation code check, which used to be done later, is now done here
4515 // so we can read delegateMethodRef directly from
4516 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
4517 // we then use it in our call to isCompatibleDelegate().
4519 mdMemberRef delegateMethodRef = mdMemberRefNil;
4520 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
4521 "must create delegates with certain IL");
4523 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
4524 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
4525 delegateResolvedToken.tokenScope = info.compScopeHnd;
4526 delegateResolvedToken.token = delegateMethodRef;
4527 delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method;
4528 info.compCompHnd->resolveToken(&delegateResolvedToken);
4530 CORINFO_CALL_INFO delegateCallInfo;
4531 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
4532 addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
4534 BOOL isOpenDelegate = FALSE;
4535 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
4536 tiActualFtn.GetMethod(), pResolvedToken->hClass,
4538 "function incompatible with delegate");
4540 // check the constraints on the target method
4541 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
4542 "delegate target has unsatisfied class constraints");
4543 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
4544 tiActualFtn.GetMethod()),
4545 "delegate target has unsatisfied method constraints");
4547 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
4548 // for additional verification rules for delegates
4549 CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod();
4550 DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
4551 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4554 if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
4556 && StrictCheckForNonVirtualCallToVirtualMethod()
4560 if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4562 VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
4563 verIsBoxedValueType(tiActualObj),
4564 "The 'this' parameter to the call must be either the calling method's "
4565 "'this' parameter or "
4566 "a boxed value type.");
4571 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
4573 BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
4575 Verify(targetIsStatic || !isOpenDelegate,
4576 "Unverifiable creation of an open instance delegate for a protected member.");
4578 CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
4580 : tiActualObj.GetClassHandleForObjRef();
4582 // In the case of protected methods, it is a requirement that the 'this'
4583 // pointer be a subclass of the current context. Perform this check.
4584 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4585 "Accessing protected method through wrong type.");
4590 // fall thru to default checks
4592 VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
4594 VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
4595 "can only newobj a delegate constructor");
4597 // check compatibility of the arguments
4598 unsigned int argCount;
4599 argCount = sig->numArgs;
4600 CORINFO_ARG_LIST_HANDLE args;
4604 typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
4606 typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
4607 VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
4609 args = info.compCompHnd->getArgNext(args);
4615 popCount += sig->numArgs;
4617 // check for 'this' which are is non-static methods, not called via NEWOBJ
4618 CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
4619 if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
4621 typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4624 // If it is null, we assume we can access it (since it will AV shortly)
4625 // If it is anything but a reference class, there is no hierarchy, so
4626 // again, we don't need the precise instance class to compute 'protected' access
4627 if (tiThis.IsType(TI_REF))
4629 instanceClassHnd = tiThis.GetClassHandleForObjRef();
4632 // Check type compatibility of the this argument
4633 typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
4634 if (tiDeclaredThis.IsValueClass())
4636 tiDeclaredThis.MakeByRef();
4639 // If this is a call to the base class .ctor, set thisPtr Init for
4641 if (mflags & CORINFO_FLG_CONSTRUCTOR)
4643 if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
4644 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
4646 assert(verCurrentState.thisInitialized !=
4647 TIS_Bottom); // This should never be the case just from the logic of the verifier.
4648 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
4649 "Call to base class constructor when 'this' is possibly initialized");
4650 // Otherwise, 'this' is now initialized.
4651 verCurrentState.thisInitialized = TIS_Init;
4652 tiThis.SetInitialisedObjRef();
4656 // We allow direct calls to value type constructors
4657 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
4658 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
4659 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
4660 "Bad call to a constructor");
4664 if (pConstrainedResolvedToken != nullptr)
4666 VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
4668 typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
4670 // We just dereference this and test for equality
4671 tiThis.DereferenceByRef();
4672 VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
4673 "this type mismatch with constrained type operand");
4675 // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
4676 tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
4679 // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
4680 if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
4682 tiDeclaredThis.SetIsReadonlyByRef();
4685 VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
4687 if (tiThis.IsByRef())
4689 // Find the actual type where the method exists (as opposed to what is declared
4690 // in the metadata). This is to prevent passing a byref as the "this" argument
4691 // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
4693 CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
4694 VerifyOrReturn(eeIsValueClass(actualClassHnd),
4695 "Call to base type of valuetype (which is never a valuetype)");
4698 // Rules for non-virtual call to a non-final virtual method:
4701 // The "this" pointer is considered to be "possibly written" if
4702 // 1. Its address have been taken (LDARGA 0) anywhere in the method.
4704 // 2. It has been stored to (STARG.0) anywhere in the method.
4706 // A non-virtual call to a non-final virtual method is only allowed if
4707 // 1. The this pointer passed to the callee is an instance of a boxed value type.
4709 // 2. The this pointer passed to the callee is the current method's this pointer.
4710 // (and) The current method's this pointer is not "possibly written".
4712 // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
4713 // virtual methods. (Luckily this does affect .ctors, since they are not virtual).
4714 // This is stronger that is strictly needed, but implementing a laxer rule is significantly
4715 // hard and more error prone.
4717 if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
4719 && StrictCheckForNonVirtualCallToVirtualMethod()
4723 if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4726 tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
4727 "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
4728 "a boxed value type.");
4733 // check any constraints on the callee's class and type parameters
4734 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
4735 "method has unsatisfied class constraints");
4736 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
4737 "method has unsatisfied method constraints");
4739 if (mflags & CORINFO_FLG_PROTECTED)
4741 VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4742 "Can't access protected method");
4745 // Get the exact view of the signature for an array method
4746 if (sig->retType != CORINFO_TYPE_VOID)
4748 eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
4751 // "readonly." prefixed calls only allowed for the Address operation on arrays.
4752 // The methods supported by array types are under the control of the EE
4753 // so we can trust that only the Address operation returns a byref.
4756 typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
4757 VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
4758 "unexpected use of readonly prefix");
4761 // Verify the tailcall
4764 verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
4768 /*****************************************************************************
4769 * Checks that a delegate creation is done using the following pattern:
4771 * ldvirtftn targetMemberRef
4773 * ldftn targetMemberRef
4775 * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
4776 * not in this basic block)
4778 * targetMemberRef is read from the code sequence.
4779 * targetMemberRef is validated iff verificationNeeded.
4782 BOOL Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart,
4783 const BYTE* codeAddr,
4784 mdMemberRef& targetMemberRef)
4786 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4788 targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
4791 else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
4793 targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
4800 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
4802 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
4803 typeInfo ptrVal = verVerifyLDIND(tiTo, instrType);
4804 typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
4805 if (!tiCompatibleWith(value, normPtrVal, true))
4807 Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
4808 compUnsafeCastUsed = true;
4813 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
4815 assert(!instrType.IsStruct());
4820 ptrVal = DereferenceByRef(ptr);
4821 if (instrType.IsObjRef() && !ptrVal.IsObjRef())
4823 Verify(false, "bad pointer");
4824 compUnsafeCastUsed = true;
4826 else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
4828 Verify(false, "pointer not consistent with instr");
4829 compUnsafeCastUsed = true;
4834 Verify(false, "pointer not byref");
4835 compUnsafeCastUsed = true;
4841 // Verify that the field is used properly. 'tiThis' is NULL for statics,
4842 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
4843 // ld*flda or a st*fld.
4844 // 'enclosingClass' is given if we are accessing a field in some specific type.
4846 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken,
4847 const CORINFO_FIELD_INFO& fieldInfo,
4848 const typeInfo* tiThis,
4850 BOOL allowPlainStructAsThis)
4852 CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
4853 unsigned fieldFlags = fieldInfo.fieldFlags;
4854 CORINFO_CLASS_HANDLE instanceClass =
4855 info.compClassHnd; // for statics, we imagine the instance is the current class.
4857 bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
4860 Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
4861 if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
4863 Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
4864 info.compIsStatic == isStaticField,
4865 "bad use of initonly field (set or address taken)");
4869 if (tiThis == nullptr)
4871 Verify(isStaticField, "used static opcode with non-static field");
4875 typeInfo tThis = *tiThis;
4877 if (allowPlainStructAsThis && tThis.IsValueClass())
4882 // If it is null, we assume we can access it (since it will AV shortly)
4883 // If it is anything but a refernce class, there is no hierarchy, so
4884 // again, we don't need the precise instance class to compute 'protected' access
4885 if (tiThis->IsType(TI_REF))
4887 instanceClass = tiThis->GetClassHandleForObjRef();
4890 // Note that even if the field is static, we require that the this pointer
4891 // satisfy the same constraints as a non-static field This happens to
4892 // be simpler and seems reasonable
4893 typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
4894 if (tiDeclaredThis.IsValueClass())
4896 tiDeclaredThis.MakeByRef();
4898 // we allow read-only tThis, on any field access (even stores!), because if the
4899 // class implementor wants to prohibit stores he should make the field private.
4900 // we do this by setting the read-only bit on the type we compare tThis to.
4901 tiDeclaredThis.SetIsReadonlyByRef();
4903 else if (verTrackObjCtorInitState && tThis.IsThisPtr())
4905 // Any field access is legal on "uninitialized" this pointers.
4906 // The easiest way to implement this is to simply set the
4907 // initialized bit for the duration of the type check on the
4908 // field access only. It does not change the state of the "this"
4909 // for the function as a whole. Note that the "tThis" is a copy
4910 // of the original "this" type (*tiThis) passed in.
4911 tThis.SetInitialisedObjRef();
4914 Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
4917 // Presently the JIT does not check that we don't store or take the address of init-only fields
4918 // since we cannot guarantee their immutability and it is not a security issue.
4920 // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
4921 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
4922 "field has unsatisfied class constraints");
4923 if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
4925 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
4926 "Accessing protected method through wrong type.");
4930 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
4932 if (tiOp1.IsNumberType())
4934 #ifdef _TARGET_64BIT_
4935 Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
4936 #else // _TARGET_64BIT
4937 // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
4938 // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
4939 // but compatible, since we can coalesce native int with int32 (see section III.1.5).
4940 Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
4941 #endif // !_TARGET_64BIT_
4943 else if (tiOp1.IsObjRef())
4955 Verify(FALSE, "Cond not allowed on object types");
4957 Verify(tiOp2.IsObjRef(), "Cond type mismatch");
4959 else if (tiOp1.IsByRef())
4961 Verify(tiOp2.IsByRef(), "Cond type mismatch");
4965 Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
4969 void Compiler::verVerifyThisPtrInitialised()
4971 if (verTrackObjCtorInitState)
4973 Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
4977 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
4979 // Either target == context, in this case calling an alternate .ctor
4980 // Or target is the immediate parent of context
4982 return ((target == context) || (target == info.compCompHnd->getParentType(context)));
4985 GenTreePtr Compiler::impImportLdvirtftn(GenTreePtr thisPtr,
4986 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4987 CORINFO_CALL_INFO* pCallInfo)
4989 if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
4991 NO_WAY("Virtual call to a function added via EnC is not supported");
4994 #ifdef FEATURE_READYTORUN_COMPILER
4995 if (opts.IsReadyToRun())
4997 if (!pCallInfo->exactContextNeedsRuntimeLookup)
4999 GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT,
5000 gtNewArgList(thisPtr));
5002 call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5007 // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5008 if (IsTargetAbi(CORINFO_CORERT_ABI))
5010 GenTreePtr ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5012 return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5013 gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5018 // Get the exact descriptor for the static callsite
5019 GenTreePtr exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5020 if (exactTypeDesc == nullptr)
5021 { // compDonotInline()
5025 GenTreePtr exactMethodDesc = impTokenToHandle(pResolvedToken);
5026 if (exactMethodDesc == nullptr)
5027 { // compDonotInline()
5031 GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5033 helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5035 helpArgs = gtNewListNode(thisPtr, helpArgs);
5037 // Call helper function. This gets the target address of the final destination callsite.
5039 return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT, helpArgs);
5042 /*****************************************************************************
5044 * Build and import a box node
5047 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5049 // Get the tree for the type handle for the boxed object. In the case
5050 // of shared generic code or ngen'd code this might be an embedded
5052 // Note we can only box do it if the class construtor has been called
5053 // We can always do it on primitive types
5055 GenTreePtr op1 = nullptr;
5056 GenTreePtr op2 = nullptr;
5059 impSpillSpecialSideEff();
5061 // Now get the expression to box from the stack.
5062 CORINFO_CLASS_HANDLE operCls;
5063 GenTreePtr exprToBox = impPopStack(operCls).val;
5065 CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5066 if (boxHelper == CORINFO_HELP_BOX)
5068 // we are doing 'normal' boxing. This means that we can inline the box operation
5069 // Box(expr) gets morphed into
5070 // temp = new(clsHnd)
5071 // cpobj(temp+4, expr, clsHnd)
5073 // The code paths differ slightly below for structs and primitives because
5074 // "cpobj" differs in these cases. In one case you get
5075 // impAssignStructPtr(temp+4, expr, clsHnd)
5076 // and the other you get
5079 if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5081 impBoxTemp = lvaGrabTemp(true DEBUGARG("Box Helper"));
5084 // needs to stay in use until this box expression is appended
5085 // some other node. We approximate this by keeping it alive until
5086 // the opcode stack becomes empty
5087 impBoxTempInUse = true;
5089 #ifdef FEATURE_READYTORUN_COMPILER
5090 bool usingReadyToRunHelper = false;
5092 if (opts.IsReadyToRun())
5094 op1 = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5095 usingReadyToRunHelper = (op1 != nullptr);
5098 if (!usingReadyToRunHelper)
5101 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5102 // and the newfast call with a single call to a dynamic R2R cell that will:
5103 // 1) Load the context
5104 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5105 // 3) Allocate and return the new object for boxing
5106 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5108 // Ensure that the value class is restored
5109 op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5111 { // compDonotInline()
5115 op1 = gtNewHelperCallNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd), TYP_REF, 0,
5119 /* Remember that this basic block contains 'new' of an array */
5120 compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5122 GenTreePtr asg = gtNewTempAssign(impBoxTemp, op1);
5124 GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5126 op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5127 op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
5128 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5130 if (varTypeIsStruct(exprToBox))
5132 assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5133 op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5137 lclTyp = exprToBox->TypeGet();
5138 if (lclTyp == TYP_BYREF)
5140 lclTyp = TYP_I_IMPL;
5142 CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5143 if (impIsPrimitive(jitType))
5145 lclTyp = JITtype2varType(jitType);
5147 assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5148 varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5149 var_types srcTyp = exprToBox->TypeGet();
5150 var_types dstTyp = lclTyp;
5152 if (srcTyp != dstTyp)
5154 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5155 (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5156 exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
5158 op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5161 op2 = gtNewLclvNode(impBoxTemp, TYP_REF);
5162 op1 = gtNewOperNode(GT_COMMA, TYP_REF, op1, op2);
5164 // Record that this is a "box" node.
5165 op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt);
5167 // If it is a value class, mark the "box" node. We can use this information
5168 // to optimise several cases:
5169 // "box(x) == null" --> false
5170 // "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5171 // "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5173 op1->gtFlags |= GTF_BOX_VALUE;
5174 assert(op1->IsBoxedValue());
5175 assert(asg->gtOper == GT_ASG);
5179 // Don't optimize, just call the helper and be done with it
5181 // Ensure that the value class is restored
5182 op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5184 { // compDonotInline()
5188 GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5189 op1 = gtNewHelperCallNode(boxHelper, TYP_REF, GTF_EXCEPT, args);
5192 /* Push the result back on the stack, */
5193 /* even if clsHnd is a value class we want the TI_REF */
5194 typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5195 impPushOnStack(op1, tiRetVal);
5198 //------------------------------------------------------------------------
5199 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5202 // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5203 // by a call to CEEInfo::resolveToken().
5204 // pCallInfo - The CORINFO_CALL_INFO that has been initialized
5205 // by a call to CEEInfo::getCallInfo().
5208 // The multi-dimensional array constructor arguments (array dimensions) are
5209 // pushed on the IL stack on entry to this method.
5212 // Multi-dimensional array constructors are imported as calls to a JIT
5213 // helper, not as regular calls.
5215 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5217 GenTreePtr classHandle = impParentClassTokenToHandle(pResolvedToken);
5218 if (classHandle == nullptr)
5219 { // compDonotInline()
5223 assert(pCallInfo->sig.numArgs);
5226 GenTreeArgList* args;
5229 // There are two different JIT helpers that can be used to allocate
5230 // multi-dimensional arrays:
5232 // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5233 // This variant is deprecated. It should be eventually removed.
5235 // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5236 // pointer to block of int32s. This variant is more portable.
5238 // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5239 // unconditionally would require ReadyToRun version bump.
5241 CLANG_FORMAT_COMMENT_ANCHOR;
5243 #if COR_JIT_EE_VERSION > 460
5244 if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5246 LclVarDsc* newObjArrayArgsVar;
5248 // Reuse the temp used to pass the array dimensions to avoid bloating
5249 // the stack frame in case there are multiple calls to multi-dim array
5250 // constructors within a single method.
5251 if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5253 lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5254 lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK;
5255 lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5258 // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5259 // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5260 lvaTable[lvaNewObjArrayArgs].lvExactSize =
5261 max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5263 // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5264 // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5265 // to one allocation at a time.
5266 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5269 // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5270 // - Array class handle
5271 // - Number of dimension arguments
5272 // - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp.
5275 node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5276 node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5278 // Pop dimension arguments from the stack one at a time and store it
5279 // into lvaNewObjArrayArgs temp.
5280 for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5282 GenTreePtr arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5284 GenTreePtr dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5285 dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5286 dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5287 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5288 dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5290 node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5293 args = gtNewArgList(node);
5295 // pass number of arguments to the helper
5296 args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5298 args = gtNewListNode(classHandle, args);
5300 node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, 0, args);
5306 // The varargs helper needs the type and method handles as last
5307 // and last-1 param (this is a cdecl call, so args will be
5308 // pushed in reverse order on the CPU stack)
5311 args = gtNewArgList(classHandle);
5313 // pass number of arguments to the helper
5314 args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5316 unsigned argFlags = 0;
5317 args = impPopList(pCallInfo->sig.numArgs, &argFlags, &pCallInfo->sig, args);
5319 node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, 0, args);
5321 // varargs, so we pop the arguments
5322 node->gtFlags |= GTF_CALL_POP_ARGS;
5325 // At the present time we don't track Caller pop arguments
5326 // that have GC references in them
5327 for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5329 assert(temp->Current()->gtType != TYP_REF);
5334 node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5335 node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5337 // Remember that this basic block contains 'new' of a md array
5338 compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5340 impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5343 GenTreePtr Compiler::impTransformThis(GenTreePtr thisPtr,
5344 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5345 CORINFO_THIS_TRANSFORM transform)
5349 case CORINFO_DEREF_THIS:
5351 GenTreePtr obj = thisPtr;
5353 // This does a LDIND on the obj, which should be a byref. pointing to a ref
5354 impBashVarAddrsToI(obj);
5355 assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5356 CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5358 obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5359 // ldind could point anywhere, example a boxed class static int
5360 obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5365 case CORINFO_BOX_THIS:
5367 // Constraint calls where there might be no
5368 // unboxed entry point require us to implement the call via helper.
5369 // These only occur when a possible target of the call
5370 // may have inherited an implementation of an interface
5371 // method from System.Object or System.ValueType. The EE does not provide us with
5372 // "unboxed" versions of these methods.
5374 GenTreePtr obj = thisPtr;
5376 assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5377 obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5378 obj->gtFlags |= GTF_EXCEPT;
5380 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5381 var_types objType = JITtype2varType(jitTyp);
5382 if (impIsPrimitive(jitTyp))
5384 if (obj->OperIsBlk())
5386 obj->ChangeOperUnchecked(GT_IND);
5388 // Obj could point anywhere, example a boxed class static int
5389 obj->gtFlags |= GTF_IND_TGTANYWHERE;
5390 obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5393 obj->gtType = JITtype2varType(jitTyp);
5394 assert(varTypeIsArithmetic(obj->gtType));
5397 // This pushes on the dereferenced byref
5398 // This is then used immediately to box.
5399 impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5401 // This pops off the byref-to-a-value-type remaining on the stack and
5402 // replaces it with a boxed object.
5403 // This is then used as the object to the virtual call immediately below.
5404 impImportAndPushBox(pConstrainedResolvedToken);
5405 if (compDonotInline())
5410 obj = impPopStack().val;
5413 case CORINFO_NO_THIS_TRANSFORM:
5419 //------------------------------------------------------------------------
5420 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
5423 // true if PInvoke inlining should be enabled in current method, false otherwise
5426 // Checks a number of ambient conditions where we could pinvoke but choose not to
5428 bool Compiler::impCanPInvokeInline()
5430 return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
5431 (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
5435 //------------------------------------------------------------------------
5436 // impCanPInvokeInlineCallSite: basic legality checks using information
5437 // from a call to see if the call qualifies as an inline pinvoke.
5440 // block - block contaning the call, or for inlinees, block
5441 // containing the call being inlined
5444 // true if this call can legally qualify as an inline pinvoke, false otherwise
5447 // For runtimes that support exception handling interop there are
5448 // restrictions on using inline pinvoke in handler regions.
5450 // * We have to disable pinvoke inlining inside of filters because
5451 // in case the main execution (i.e. in the try block) is inside
5452 // unmanaged code, we cannot reuse the inlined stub (we still need
5453 // the original state until we are in the catch handler)
5455 // * We disable pinvoke inlining inside handlers since the GSCookie
5456 // is in the inlined Frame (see
5457 // CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
5458 // this would not protect framelets/return-address of handlers.
5460 // These restrictions are currently also in place for CoreCLR but
5461 // can be relaxed when coreclr/#8459 is addressed.
5463 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
5465 if (block->hasHndIndex())
5470 // The remaining limitations do not apply to CoreRT
5471 if (IsTargetAbi(CORINFO_CORERT_ABI))
5476 #ifdef _TARGET_AMD64_
5477 // On x64, we disable pinvoke inlining inside of try regions.
5478 // Here is the comment from JIT64 explaining why:
5480 // [VSWhidbey: 611015] - because the jitted code links in the
5481 // Frame (instead of the stub) we rely on the Frame not being
5482 // 'active' until inside the stub. This normally happens by the
5483 // stub setting the return address pointer in the Frame object
5484 // inside the stub. On a normal return, the return address
5485 // pointer is zeroed out so the Frame can be safely re-used, but
5486 // if an exception occurs, nobody zeros out the return address
5487 // pointer. Thus if we re-used the Frame object, it would go
5488 // 'active' as soon as we link it into the Frame chain.
5490 // Technically we only need to disable PInvoke inlining if we're
5491 // in a handler or if we're in a try body with a catch or
5492 // filter/except where other non-handler code in this method
5493 // might run and try to re-use the dirty Frame object.
5495 // A desktop test case where this seems to matter is
5496 // jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
5497 if (block->hasTryIndex())
5501 #endif // _TARGET_AMD64_
5506 //------------------------------------------------------------------------
5507 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
5508 // if it can be expressed as an inline pinvoke.
5511 // call - tree for the call
5512 // methHnd - handle for the method being called (may be null)
5513 // sig - signature of the method being called
5514 // mflags - method flags for the method being called
5515 // block - block contaning the call, or for inlinees, block
5516 // containing the call being inlined
5519 // Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
5521 // Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
5522 // call passes a combination of legality and profitabilty checks.
5524 // If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
5526 void Compiler::impCheckForPInvokeCall(
5527 GenTreePtr call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
5529 CorInfoUnmanagedCallConv unmanagedCallConv;
5531 // If VM flagged it as Pinvoke, flag the call node accordingly
5532 if ((mflags & CORINFO_FLG_PINVOKE) != 0)
5534 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
5539 if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
5544 unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
5548 CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
5549 if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
5551 // Used by the IL Stubs.
5552 callConv = CORINFO_CALLCONV_C;
5554 static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
5555 static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
5556 static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
5557 unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
5559 assert(!call->gtCall.gtCallCookie);
5562 if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
5563 unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
5567 optNativeCallCount++;
5569 if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && methHnd == nullptr)
5571 // PInvoke CALLI in IL stubs must be inlined
5576 if (!impCanPInvokeInlineCallSite(block))
5581 // PInvoke CALL in IL stubs must be inlined on CoreRT. Skip the ambient conditions checks and
5582 // profitability checks
5583 if (!(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && IsTargetAbi(CORINFO_CORERT_ABI)))
5585 if (!impCanPInvokeInline())
5590 // Size-speed tradeoff: don't use inline pinvoke at rarely
5591 // executed call sites. The non-inline version is more
5593 if (block->isRunRarely())
5599 // The expensive check should be last
5600 if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
5606 JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
5608 call->gtFlags |= GTF_CALL_UNMANAGED;
5609 info.compCallUnmanaged++;
5611 // AMD64 convention is same for native and managed
5612 if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
5614 call->gtFlags |= GTF_CALL_POP_ARGS;
5617 if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
5619 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
5623 GenTreePtr Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
5625 var_types callRetTyp = JITtype2varType(sig->retType);
5627 /* The function pointer is on top of the stack - It may be a
5628 * complex expression. As it is evaluated after the args,
5629 * it may cause registered args to be spilled. Simply spill it.
5632 // Ignore this trivial case.
5633 if (impStackTop().val->gtOper != GT_LCL_VAR)
5635 impSpillStackEntry(verCurrentState.esStackDepth - 1,
5636 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
5639 /* Get the function pointer */
5641 GenTreePtr fptr = impPopStack().val;
5642 assert(genActualType(fptr->gtType) == TYP_I_IMPL);
5645 // This temporary must never be converted to a double in stress mode,
5646 // because that can introduce a call to the cast helper after the
5647 // arguments have already been evaluated.
5649 if (fptr->OperGet() == GT_LCL_VAR)
5651 lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
5655 /* Create the call node */
5657 GenTreePtr call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
5659 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
5664 /*****************************************************************************/
5666 void Compiler::impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig)
5668 assert(call->gtFlags & GTF_CALL_UNMANAGED);
5670 /* Since we push the arguments in reverse order (i.e. right -> left)
5671 * spill any side effects from the stack
5673 * OBS: If there is only one side effect we do not need to spill it
5674 * thus we have to spill all side-effects except last one
5677 unsigned lastLevelWithSideEffects = UINT_MAX;
5679 unsigned argsToReverse = sig->numArgs;
5681 // For "thiscall", the first argument goes in a register. Since its
5682 // order does not need to be changed, we do not need to spill it
5684 if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5686 assert(argsToReverse);
5690 #ifndef _TARGET_X86_
5691 // Don't reverse args on ARM or x64 - first four args always placed in regs in order
5695 for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
5697 if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
5699 assert(lastLevelWithSideEffects == UINT_MAX);
5701 impSpillStackEntry(level,
5702 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
5704 else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
5706 if (lastLevelWithSideEffects != UINT_MAX)
5708 /* We had a previous side effect - must spill it */
5709 impSpillStackEntry(lastLevelWithSideEffects,
5710 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
5712 /* Record the level for the current side effect in case we will spill it */
5713 lastLevelWithSideEffects = level;
5717 /* This is the first side effect encountered - record its level */
5719 lastLevelWithSideEffects = level;
5724 /* The argument list is now "clean" - no out-of-order side effects
5725 * Pop the argument list in reverse order */
5727 unsigned argFlags = 0;
5728 GenTreePtr args = call->gtCall.gtCallArgs =
5729 impPopRevList(sig->numArgs, &argFlags, sig, sig->numArgs - argsToReverse);
5731 if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5733 GenTreePtr thisPtr = args->Current();
5734 impBashVarAddrsToI(thisPtr);
5735 assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
5740 call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5744 //------------------------------------------------------------------------
5745 // impInitClass: Build a node to initialize the class before accessing the
5746 // field if necessary
5749 // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5750 // by a call to CEEInfo::resolveToken().
5752 // Return Value: If needed, a pointer to the node that will perform the class
5753 // initializtion. Otherwise, nullptr.
5756 GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5758 CorInfoInitClassResult initClassResult =
5759 info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
5761 if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
5767 GenTreePtr node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
5769 if (node == nullptr)
5771 assert(compDonotInline());
5777 node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, 0, gtNewArgList(node));
5781 // Call the shared non gc static helper, as its the fastest
5782 node = fgGetSharedCCtor(pResolvedToken->hClass);
5788 GenTreePtr Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
5790 GenTreePtr op1 = nullptr;
5799 ival = *((bool*)fldAddr);
5803 ival = *((signed char*)fldAddr);
5807 ival = *((unsigned char*)fldAddr);
5811 ival = *((short*)fldAddr);
5816 ival = *((unsigned short*)fldAddr);
5821 ival = *((int*)fldAddr);
5823 op1 = gtNewIconNode(ival);
5828 lval = *((__int64*)fldAddr);
5829 op1 = gtNewLconNode(lval);
5833 dval = *((float*)fldAddr);
5834 op1 = gtNewDconNode(dval);
5835 #if !FEATURE_X87_DOUBLES
5836 // X87 stack doesn't differentiate between float/double
5837 // so R4 is treated as R8, but everybody else does
5838 op1->gtType = TYP_FLOAT;
5839 #endif // FEATURE_X87_DOUBLES
5843 dval = *((double*)fldAddr);
5844 op1 = gtNewDconNode(dval);
5848 assert(!"Unexpected lclTyp");
5855 GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
5856 CORINFO_ACCESS_FLAGS access,
5857 CORINFO_FIELD_INFO* pFieldInfo,
5862 switch (pFieldInfo->fieldAccessor)
5864 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
5866 assert(!compIsForInlining());
5868 // We first call a special helper to get the statics base pointer
5869 op1 = impParentClassTokenToHandle(pResolvedToken);
5871 // compIsForInlining() is false so we should not neve get NULL here
5872 assert(op1 != nullptr);
5874 var_types type = TYP_BYREF;
5876 switch (pFieldInfo->helper)
5878 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
5881 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
5882 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
5883 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
5886 assert(!"unknown generic statics helper");
5890 op1 = gtNewHelperCallNode(pFieldInfo->helper, type, 0, gtNewArgList(op1));
5892 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5893 op1 = gtNewOperNode(GT_ADD, type, op1,
5894 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5898 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
5900 #ifdef FEATURE_READYTORUN_COMPILER
5901 if (opts.IsReadyToRun())
5903 unsigned callFlags = 0;
5905 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5907 callFlags |= GTF_CALL_HOISTABLE;
5910 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF, callFlags);
5912 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5917 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
5921 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5922 op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
5923 new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
5927 #if COR_JIT_EE_VERSION > 460
5928 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
5930 #ifdef FEATURE_READYTORUN_COMPILER
5931 noway_assert(opts.IsReadyToRun());
5932 CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
5933 assert(kind.needsRuntimeLookup);
5935 GenTreePtr ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
5936 GenTreeArgList* args = gtNewArgList(ctxTree);
5938 unsigned callFlags = 0;
5940 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5942 callFlags |= GTF_CALL_HOISTABLE;
5944 var_types type = TYP_BYREF;
5945 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, callFlags, args);
5947 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5948 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5949 op1 = gtNewOperNode(GT_ADD, type, op1,
5950 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5953 #endif // FEATURE_READYTORUN_COMPILER
5956 #endif // COR_JIT_EE_VERSION > 460
5959 if (!(access & CORINFO_ACCESS_ADDRESS))
5961 // In future, it may be better to just create the right tree here instead of folding it later.
5962 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
5964 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
5966 op1->gtType = TYP_REF; // points at boxed object
5967 FieldSeqNode* firstElemFldSeq =
5968 GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
5970 gtNewOperNode(GT_ADD, TYP_BYREF, op1,
5971 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
5973 if (varTypeIsStruct(lclTyp))
5975 // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT.
5976 op1 = gtNewObjNode(pFieldInfo->structType, op1);
5980 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
5981 op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
5989 void** pFldAddr = nullptr;
5990 void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
5992 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5994 /* Create the data member node */
5995 if (pFldAddr == nullptr)
5997 op1 = gtNewIconHandleNode((size_t)fldAddr, GTF_ICON_STATIC_HDL, fldSeq);
6001 op1 = gtNewIconHandleNode((size_t)pFldAddr, GTF_ICON_STATIC_HDL, fldSeq);
6003 // There are two cases here, either the static is RVA based,
6004 // in which case the type of the FIELD node is not a GC type
6005 // and the handle to the RVA is a TYP_I_IMPL. Or the FIELD node is
6006 // a GC type and the handle to it is a TYP_BYREF in the GC heap
6007 // because handles to statics now go into the large object heap
6009 var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
6010 op1 = gtNewOperNode(GT_IND, handleTyp, op1);
6011 op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
6018 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6020 op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
6022 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6024 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6025 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
6028 if (!(access & CORINFO_ACCESS_ADDRESS))
6030 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6031 op1->gtFlags |= GTF_GLOB_REF;
6037 // In general try to call this before most of the verification work. Most people expect the access
6038 // exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns
6039 // out if you can't access something we also think that you're unverifiable for other reasons.
6040 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6042 if (result != CORINFO_ACCESS_ALLOWED)
6044 impHandleAccessAllowedInternal(result, helperCall);
6048 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6052 case CORINFO_ACCESS_ALLOWED:
6054 case CORINFO_ACCESS_ILLEGAL:
6055 // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
6056 // method is verifiable. Otherwise, delay the exception to runtime.
6057 if (compIsForImportOnly())
6059 info.compCompHnd->ThrowExceptionForHelper(helperCall);
6063 impInsertHelperCall(helperCall);
6066 case CORINFO_ACCESS_RUNTIME_CHECK:
6067 impInsertHelperCall(helperCall);
6072 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
6074 // Construct the argument list
6075 GenTreeArgList* args = nullptr;
6076 assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6077 for (unsigned i = helperInfo->numArgs; i > 0; --i)
6079 const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1];
6080 GenTreePtr currentArg = nullptr;
6081 switch (helperArg.argType)
6083 case CORINFO_HELPER_ARG_TYPE_Field:
6084 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6085 info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6086 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6088 case CORINFO_HELPER_ARG_TYPE_Method:
6089 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6090 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6092 case CORINFO_HELPER_ARG_TYPE_Class:
6093 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6094 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6096 case CORINFO_HELPER_ARG_TYPE_Module:
6097 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6099 case CORINFO_HELPER_ARG_TYPE_Const:
6100 currentArg = gtNewIconNode(helperArg.constant);
6103 NO_WAY("Illegal helper arg type");
6105 args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6109 * Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee.
6110 * Also, consider sticking this in the first basic block.
6112 GenTreePtr callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, GTF_EXCEPT, args);
6113 impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6116 void Compiler::impInsertCalloutForDelegate(CORINFO_METHOD_HANDLE callerMethodHnd,
6117 CORINFO_METHOD_HANDLE calleeMethodHnd,
6118 CORINFO_CLASS_HANDLE delegateTypeHnd)
6120 #ifdef FEATURE_CORECLR
6121 if (!info.compCompHnd->isDelegateCreationAllowed(delegateTypeHnd, calleeMethodHnd))
6123 // Call the JIT_DelegateSecurityCheck helper before calling the actual function.
6124 // This helper throws an exception if the CLR host disallows the call.
6126 GenTreePtr helper = gtNewHelperCallNode(CORINFO_HELP_DELEGATE_SECURITY_CHECK, TYP_VOID, GTF_EXCEPT,
6127 gtNewArgList(gtNewIconEmbClsHndNode(delegateTypeHnd),
6128 gtNewIconEmbMethHndNode(calleeMethodHnd)));
6129 // Append the callout statement
6130 impAppendTree(helper, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6132 #endif // FEATURE_CORECLR
6135 // Checks whether the return types of caller and callee are compatible
6136 // so that callee can be tail called. Note that here we don't check
6137 // compatibility in IL Verifier sense, but on the lines of return type
6138 // sizes are equal and get returned in the same return register.
6139 bool Compiler::impTailCallRetTypeCompatible(var_types callerRetType,
6140 CORINFO_CLASS_HANDLE callerRetTypeClass,
6141 var_types calleeRetType,
6142 CORINFO_CLASS_HANDLE calleeRetTypeClass)
6144 // Note that we can not relax this condition with genActualType() as the
6145 // calling convention dictates that the caller of a function with a small
6146 // typed return value is responsible for normalizing the return val.
6147 if (callerRetType == calleeRetType)
6152 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6154 if (callerRetType == TYP_VOID)
6156 // This needs to be allowed to support the following IL pattern that Jit64 allows:
6161 // Note that the above IL pattern is not valid as per IL verification rules.
6162 // Therefore, only full trust code can take advantage of this pattern.
6166 // These checks return true if the return value type sizes are the same and
6167 // get returned in the same return register i.e. caller doesn't need to normalize
6168 // return value. Some of the tail calls permitted by below checks would have
6169 // been rejected by IL Verifier before we reached here. Therefore, only full
6170 // trust code can make those tail calls.
6171 unsigned callerRetTypeSize = 0;
6172 unsigned calleeRetTypeSize = 0;
6173 bool isCallerRetTypMBEnreg =
6174 VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6175 bool isCalleeRetTypMBEnreg =
6176 VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6178 if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6180 return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6182 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6190 PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6191 PREFIX_TAILCALL_IMPLICIT =
6192 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6193 PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6194 PREFIX_VOLATILE = 0x00000100,
6195 PREFIX_UNALIGNED = 0x00001000,
6196 PREFIX_CONSTRAINED = 0x00010000,
6197 PREFIX_READONLY = 0x00100000
6200 /********************************************************************************
6202 * Returns true if the current opcode and and the opcodes following it correspond
6203 * to a supported tail call IL pattern.
6206 bool Compiler::impIsTailCallILPattern(bool tailPrefixed,
6208 const BYTE* codeAddrOfNextOpcode,
6209 const BYTE* codeEnd,
6211 bool* isCallPopAndRet /* = nullptr */)
6213 // Bail out if the current opcode is not a call.
6214 if (!impOpcodeIsCallOpcode(curOpcode))
6219 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6220 // If shared ret tail opt is not enabled, we will enable
6221 // it for recursive methods.
6225 // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6226 // sequence. Make sure we don't go past the end of the IL however.
6227 codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6230 // Bail out if there is no next opcode after call
6231 if (codeAddrOfNextOpcode >= codeEnd)
6236 // Scan the opcodes to look for the following IL patterns if either
6237 // i) the call is not tail prefixed (i.e. implicit tail call) or
6238 // ii) if tail prefixed, IL verification is not needed for the method.
6240 // Only in the above two cases we can allow the below tail call patterns
6241 // violating ECMA spec.
6257 #ifdef _TARGET_AMD64_
6260 nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6261 codeAddrOfNextOpcode += sizeof(__int8);
6262 } while ((codeAddrOfNextOpcode < codeEnd) && // Haven't reached end of method
6263 (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6264 ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6265 // one pop seen so far.
6267 nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6270 if (isCallPopAndRet)
6272 // Allow call+pop+ret to be tail call optimized if caller ret type is void
6273 *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6276 #ifdef _TARGET_AMD64_
6278 // Tail call IL pattern could be either of the following
6279 // 1) call/callvirt/calli + ret
6280 // 2) call/callvirt/calli + pop + ret in a method returning void.
6281 return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6282 #else //!_TARGET_AMD64_
6283 return (nextOpcode == CEE_RET) && (cntPop == 0);
6287 /*****************************************************************************
6289 * Determine whether the call could be converted to an implicit tail call
6292 bool Compiler::impIsImplicitTailCallCandidate(
6293 OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6296 #if FEATURE_TAILCALL_OPT
6297 if (!opts.compTailCallOpt)
6302 if (opts.compDbgCode || opts.MinOpts())
6307 // must not be tail prefixed
6308 if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6313 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6314 // the block containing call is marked as BBJ_RETURN
6315 // We allow shared ret tail call optimization on recursive calls even under
6316 // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6317 if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6319 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6321 // must be call+ret or call+pop+ret
6322 if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6330 #endif // FEATURE_TAILCALL_OPT
6333 //------------------------------------------------------------------------
6334 // impImportCall: import a call-inspiring opcode
6337 // opcode - opcode that inspires the call
6338 // pResolvedToken - resolved token for the call target
6339 // pConstrainedResolvedToken - resolved constraint token (or nullptr)
6340 // newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr)
6341 // prefixFlags - IL prefix flags for the call
6342 // callInfo - EE supplied info for the call
6343 // rawILOffset - IL offset of the opcode
6346 // Type of the call's return value.
6349 // opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6351 // For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6352 // uninitalized object.
6355 #pragma warning(push)
6356 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6359 var_types Compiler::impImportCall(OPCODE opcode,
6360 CORINFO_RESOLVED_TOKEN* pResolvedToken,
6361 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6362 GenTreePtr newobjThis,
6364 CORINFO_CALL_INFO* callInfo,
6365 IL_OFFSET rawILOffset)
6367 assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6369 IL_OFFSETX ilOffset = impCurILOffset(rawILOffset, true);
6370 var_types callRetTyp = TYP_COUNT;
6371 CORINFO_SIG_INFO* sig = nullptr;
6372 CORINFO_METHOD_HANDLE methHnd = nullptr;
6373 CORINFO_CLASS_HANDLE clsHnd = nullptr;
6374 unsigned clsFlags = 0;
6375 unsigned mflags = 0;
6376 unsigned argFlags = 0;
6377 GenTreePtr call = nullptr;
6378 GenTreeArgList* args = nullptr;
6379 CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM;
6380 CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr;
6381 BOOL exactContextNeedsRuntimeLookup = FALSE;
6382 bool canTailCall = true;
6383 const char* szCanTailCallFailReason = nullptr;
6384 int tailCall = prefixFlags & PREFIX_TAILCALL;
6385 bool readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
6387 // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6388 // do that before tailcalls, but that is probably not the intended
6389 // semantic. So just disallow tailcalls from synchronized methods.
6390 // Also, popping arguments in a varargs function is more work and NYI
6391 // If we have a security object, we have to keep our frame around for callers
6392 // to see any imperative security.
6393 if (info.compFlags & CORINFO_FLG_SYNCH)
6395 canTailCall = false;
6396 szCanTailCallFailReason = "Caller is synchronized";
6398 #if !FEATURE_FIXED_OUT_ARGS
6399 else if (info.compIsVarArgs)
6401 canTailCall = false;
6402 szCanTailCallFailReason = "Caller is varargs";
6404 #endif // FEATURE_FIXED_OUT_ARGS
6405 else if (opts.compNeedSecurityCheck)
6407 canTailCall = false;
6408 szCanTailCallFailReason = "Caller requires a security check.";
6411 // We only need to cast the return value of pinvoke inlined calls that return small types
6413 // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6414 // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6415 // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6416 // the time being that the callee might be compiled by the other JIT and thus the return
6417 // value will need to be widened by us (or not widened at all...)
6419 // ReadyToRun code sticks with default calling convention that does not widen small return types.
6421 bool checkForSmallType = opts.IsJit64Compat() || opts.IsReadyToRun();
6422 bool bIntrinsicImported = false;
6424 CORINFO_SIG_INFO calliSig;
6425 GenTreeArgList* extraArg = nullptr;
6427 /*-------------------------------------------------------------------------
6428 * First create the call node
6431 if (opcode == CEE_CALLI)
6433 /* Get the call site sig */
6434 eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
6436 callRetTyp = JITtype2varType(calliSig.retType);
6437 clsHnd = calliSig.retTypeClass;
6439 call = impImportIndirectCall(&calliSig, ilOffset);
6441 // We don't know the target method, so we have to infer the flags, or
6442 // assume the worst-case.
6443 mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
6448 unsigned structSize =
6449 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
6450 printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6451 opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6454 // This should be checked in impImportBlockCode.
6455 assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
6460 // We cannot lazily obtain the signature of a CALLI call because it has no method
6461 // handle that we can use, so we need to save its full call signature here.
6462 assert(call->gtCall.callSig == nullptr);
6463 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6464 *call->gtCall.callSig = calliSig;
6467 if (IsTargetAbi(CORINFO_CORERT_ABI))
6469 bool managedCall = (calliSig.callConv & GTF_CALL_UNMANAGED) == 0;
6472 call->AsCall()->SetFatPointerCandidate();
6473 setMethodHasFatPointer();
6477 else // (opcode != CEE_CALLI)
6479 CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
6481 // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
6482 // supply the instantiation parameters necessary to make direct calls to underlying
6483 // shared generic code, rather than calling through instantiating stubs. If the
6484 // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
6485 // must indeed pass an instantiation parameter.
6487 methHnd = callInfo->hMethod;
6489 sig = &(callInfo->sig);
6490 callRetTyp = JITtype2varType(sig->retType);
6492 mflags = callInfo->methodFlags;
6497 unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
6498 printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6499 opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6502 if (compIsForInlining())
6504 /* Does this call site have security boundary restrictions? */
6506 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
6508 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
6512 /* Does the inlinee need a security check token on the frame */
6514 if (mflags & CORINFO_FLG_SECURITYCHECK)
6516 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6520 /* Does the inlinee use StackCrawlMark */
6522 if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
6524 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
6528 /* For now ignore delegate invoke */
6530 if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6532 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
6536 /* For now ignore varargs */
6537 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6539 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
6543 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
6545 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
6549 if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
6551 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
6556 clsHnd = pResolvedToken->hClass;
6558 clsFlags = callInfo->classFlags;
6561 // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
6563 // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
6564 // These should be in mscorlib.h, and available through a JIT/EE interface call.
6565 const char* modName;
6566 const char* className;
6567 const char* methodName;
6568 if ((className = eeGetClassName(clsHnd)) != nullptr &&
6569 strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
6570 (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
6572 return impImportJitTestLabelMark(sig->numArgs);
6576 // <NICE> Factor this into getCallInfo </NICE>
6577 if ((mflags & CORINFO_FLG_INTRINSIC) && !pConstrainedResolvedToken)
6579 call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, pResolvedToken->token, readonlyCall,
6580 (canTailCall && (tailCall != 0)), &intrinsicID);
6582 if (call != nullptr)
6584 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
6585 (clsFlags & CORINFO_FLG_FINAL));
6587 #ifdef FEATURE_READYTORUN_COMPILER
6588 if (call->OperGet() == GT_INTRINSIC)
6590 if (opts.IsReadyToRun())
6592 noway_assert(callInfo->kind == CORINFO_CALL);
6593 call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
6597 call->gtIntrinsic.gtEntryPoint.addr = nullptr;
6602 bIntrinsicImported = true;
6610 call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
6611 if (call != nullptr)
6613 bIntrinsicImported = true;
6617 #endif // FEATURE_SIMD
6619 if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
6621 NO_WAY("Virtual call to a function added via EnC is not supported");
6624 if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
6625 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6626 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
6628 BADCODE("Bad calling convention");
6631 //-------------------------------------------------------------------------
6632 // Construct the call node
6634 // Work out what sort of call we're making.
6635 // Dispense with virtual calls implemented via LDVIRTFTN immediately.
6637 constraintCallThisTransform = callInfo->thisTransform;
6639 exactContextHnd = callInfo->contextHandle;
6640 exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup;
6642 // Recursive call is treaded as a loop to the begining of the method.
6643 if (methHnd == info.compMethodHnd)
6648 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
6649 fgFirstBB->bbNum, compCurBB->bbNum);
6652 fgMarkBackwardJump(fgFirstBB, compCurBB);
6655 switch (callInfo->kind)
6658 case CORINFO_VIRTUALCALL_STUB:
6660 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6661 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6662 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
6665 if (compIsForInlining())
6667 // Don't import runtime lookups when inlining
6668 // Inlining has to be aborted in such a case
6669 /* XXX Fri 3/20/2009
6670 * By the way, this would never succeed. If the handle lookup is into the generic
6671 * dictionary for a candidate, you'll generate different dictionary offsets and the
6672 * inlined code will crash.
6674 * To anyone code reviewing this, when could this ever succeed in the future? It'll
6675 * always have a handle lookup. These lookups are safe intra-module, but we're just
6678 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
6682 GenTreePtr stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
6683 assert(!compDonotInline());
6685 // This is the rough code to set up an indirect stub call
6686 assert(stubAddr != nullptr);
6688 // The stubAddr may be a
6689 // complex expression. As it is evaluated after the args,
6690 // it may cause registered args to be spilled. Simply spill it.
6692 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
6693 impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
6694 stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6696 // Create the actual call node
6698 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6699 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6701 call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
6703 call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
6704 call->gtFlags |= GTF_CALL_VIRT_STUB;
6707 // No tailcalls allowed for these yet...
6708 canTailCall = false;
6709 szCanTailCallFailReason = "VirtualCall with runtime lookup";
6714 // ok, the stub is available at compile type.
6716 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6717 call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
6718 call->gtFlags |= GTF_CALL_VIRT_STUB;
6719 assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
6720 if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
6722 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
6726 #ifdef FEATURE_READYTORUN_COMPILER
6727 if (opts.IsReadyToRun())
6729 // Null check is sometimes needed for ready to run to handle
6730 // non-virtual <-> virtual changes between versions
6731 if (callInfo->nullInstanceCheck)
6733 call->gtFlags |= GTF_CALL_NULLCHECK;
6741 case CORINFO_VIRTUALCALL_VTABLE:
6743 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6744 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6745 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6746 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
6750 case CORINFO_VIRTUALCALL_LDVIRTFTN:
6752 if (compIsForInlining())
6754 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
6758 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6759 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6760 // OK, We've been told to call via LDVIRTFTN, so just
6761 // take the call now....
6763 args = impPopList(sig->numArgs, &argFlags, sig);
6765 GenTreePtr thisPtr = impPopStack().val;
6766 thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
6767 if (compDonotInline())
6772 // Clone the (possibly transformed) "this" pointer
6773 GenTreePtr thisPtrCopy;
6774 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
6775 nullptr DEBUGARG("LDVIRTFTN this pointer"));
6777 GenTreePtr fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
6778 if (compDonotInline())
6783 thisPtr = nullptr; // can't reuse it
6785 // Now make an indirect call through the function pointer
6787 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
6788 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6789 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6791 // Create the actual call node
6793 call = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
6794 call->gtCall.gtCallObjp = thisPtrCopy;
6795 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6797 #ifdef FEATURE_READYTORUN_COMPILER
6798 if (opts.IsReadyToRun())
6800 // Null check is needed for ready to run to handle
6801 // non-virtual <-> virtual changes between versions
6802 call->gtFlags |= GTF_CALL_NULLCHECK;
6806 // Sine we are jumping over some code, check that its OK to skip that code
6807 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6808 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6814 // This is for a non-virtual, non-interface etc. call
6815 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6817 // We remove the nullcheck for the GetType call instrinsic.
6818 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
6820 if (callInfo->nullInstanceCheck &&
6821 !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
6823 call->gtFlags |= GTF_CALL_NULLCHECK;
6826 #ifdef FEATURE_READYTORUN_COMPILER
6827 if (opts.IsReadyToRun())
6829 call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
6835 case CORINFO_CALL_CODE_POINTER:
6837 // The EE has asked us to call by computing a code pointer and then doing an
6838 // indirect call. This is because a runtime lookup is required to get the code entry point.
6840 // These calls always follow a uniform calling convention, i.e. no extra hidden params
6841 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
6843 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
6844 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6847 impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
6849 if (compDonotInline())
6854 // Now make an indirect call through the function pointer
6856 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
6857 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6858 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6860 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6861 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6862 if (callInfo->nullInstanceCheck)
6864 call->gtFlags |= GTF_CALL_NULLCHECK;
6871 assert(!"unknown call kind");
6875 //-------------------------------------------------------------------------
6878 PREFIX_ASSUME(call != nullptr);
6880 if (mflags & CORINFO_FLG_NOGCCHECK)
6882 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
6885 // Mark call if it's one of the ones we will maybe treat as an intrinsic
6886 if (intrinsicID == CORINFO_INTRINSIC_Object_GetType || intrinsicID == CORINFO_INTRINSIC_TypeEQ ||
6887 intrinsicID == CORINFO_INTRINSIC_TypeNEQ || intrinsicID == CORINFO_INTRINSIC_GetCurrentManagedThread ||
6888 intrinsicID == CORINFO_INTRINSIC_GetManagedThreadId)
6890 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
6894 assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
6896 /* Some sanity checks */
6898 // CALL_VIRT and NEWOBJ must have a THIS pointer
6899 assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
6900 // static bit and hasThis are negations of one another
6901 assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
6902 assert(call != nullptr);
6904 /*-------------------------------------------------------------------------
6905 * Check special-cases etc
6908 /* Special case - Check if it is a call to Delegate.Invoke(). */
6910 if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6912 assert(!compIsForInlining());
6913 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6914 assert(mflags & CORINFO_FLG_FINAL);
6916 /* Set the delegate flag */
6917 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
6919 if (callInfo->secureDelegateInvoke)
6921 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
6924 if (opcode == CEE_CALLVIRT)
6926 assert(mflags & CORINFO_FLG_FINAL);
6928 /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
6929 assert(call->gtFlags & GTF_CALL_NULLCHECK);
6930 call->gtFlags &= ~GTF_CALL_NULLCHECK;
6934 CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
6935 actualMethodRetTypeSigClass = sig->retTypeSigClass;
6936 if (varTypeIsStruct(callRetTyp))
6938 callRetTyp = impNormStructType(actualMethodRetTypeSigClass);
6939 call->gtType = callRetTyp;
6943 /* Check for varargs */
6944 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6945 (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6947 BADCODE("Varargs not supported.");
6949 #endif // !FEATURE_VARARG
6951 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6952 (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6954 assert(!compIsForInlining());
6956 /* Set the right flags */
6958 call->gtFlags |= GTF_CALL_POP_ARGS;
6959 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
6961 /* Can't allow tailcall for varargs as it is caller-pop. The caller
6962 will be expecting to pop a certain number of arguments, but if we
6963 tailcall to a function with a different number of arguments, we
6964 are hosed. There are ways around this (caller remembers esp value,
6965 varargs is not caller-pop, etc), but not worth it. */
6966 CLANG_FORMAT_COMMENT_ANCHOR;
6971 canTailCall = false;
6972 szCanTailCallFailReason = "Callee is varargs";
6976 /* Get the total number of arguments - this is already correct
6977 * for CALLI - for methods we have to get it from the call site */
6979 if (opcode != CEE_CALLI)
6982 unsigned numArgsDef = sig->numArgs;
6984 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
6987 // We cannot lazily obtain the signature of a vararg call because using its method
6988 // handle will give us only the declared argument list, not the full argument list.
6989 assert(call->gtCall.callSig == nullptr);
6990 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6991 *call->gtCall.callSig = *sig;
6994 // For vararg calls we must be sure to load the return type of the
6995 // method actually being called, as well as the return types of the
6996 // specified in the vararg signature. With type equivalency, these types
6997 // may not be the same.
6998 if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
7000 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
7001 sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
7002 sig->retType != CORINFO_TYPE_VAR)
7004 // Make sure that all valuetypes (including enums) that we push are loaded.
7005 // This is to guarantee that if a GC is triggerred from the prestub of this methods,
7006 // all valuetypes in the method signature are already loaded.
7007 // We need to be able to find the size of the valuetypes, but we cannot
7008 // do a class-load from within GC.
7009 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
7013 assert(numArgsDef <= sig->numArgs);
7016 /* We will have "cookie" as the last argument but we cannot push
7017 * it on the operand stack because we may overflow, so we append it
7018 * to the arg list next after we pop them */
7021 if (mflags & CORINFO_FLG_SECURITYCHECK)
7023 assert(!compIsForInlining());
7025 // Need security prolog/epilog callouts when there is
7026 // imperative security in the method. This is to give security a
7027 // chance to do any setup in the prolog and cleanup in the epilog if needed.
7029 if (compIsForInlining())
7031 // Cannot handle this if the method being imported is an inlinee by itself.
7032 // Because inlinee method does not have its own frame.
7034 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7039 tiSecurityCalloutNeeded = true;
7041 // If the current method calls a method which needs a security check,
7042 // (i.e. the method being compiled has imperative security)
7043 // we need to reserve a slot for the security object in
7044 // the current method's stack frame
7045 opts.compNeedSecurityCheck = true;
7049 //--------------------------- Inline NDirect ------------------------------
7051 // For inline cases we technically should look at both the current
7052 // block and the call site block (or just the latter if we've
7053 // fused the EH trees). However the block-related checks pertain to
7054 // EH and we currently won't inline a method with EH. So for
7055 // inlinees, just checking the call site block is sufficient.
7057 // New lexical block here to avoid compilation errors because of GOTOs.
7058 BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7059 impCheckForPInvokeCall(call, methHnd, sig, mflags, block);
7062 if (call->gtFlags & GTF_CALL_UNMANAGED)
7064 // We set up the unmanaged call by linking the frame, disabling GC, etc
7065 // This needs to be cleaned up on return
7068 canTailCall = false;
7069 szCanTailCallFailReason = "Callee is native";
7072 checkForSmallType = true;
7074 impPopArgsForUnmanagedCall(call, sig);
7078 else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
7079 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
7080 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
7081 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
7083 if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
7085 // Normally this only happens with inlining.
7086 // However, a generic method (or type) being NGENd into another module
7087 // can run into this issue as well. There's not an easy fall-back for NGEN
7088 // so instead we fallback to JIT.
7089 if (compIsForInlining())
7091 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7095 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7101 GenTreePtr cookie = eeGetPInvokeCookie(sig);
7103 // This cookie is required to be either a simple GT_CNS_INT or
7104 // an indirection of a GT_CNS_INT
7106 GenTreePtr cookieConst = cookie;
7107 if (cookie->gtOper == GT_IND)
7109 cookieConst = cookie->gtOp.gtOp1;
7111 assert(cookieConst->gtOper == GT_CNS_INT);
7113 // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7114 // we won't allow this tree to participate in any CSE logic
7116 cookie->gtFlags |= GTF_DONT_CSE;
7117 cookieConst->gtFlags |= GTF_DONT_CSE;
7119 call->gtCall.gtCallCookie = cookie;
7123 canTailCall = false;
7124 szCanTailCallFailReason = "PInvoke calli";
7128 /*-------------------------------------------------------------------------
7129 * Create the argument list
7132 //-------------------------------------------------------------------------
7133 // Special case - for varargs we have an implicit last argument
7135 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7137 assert(!compIsForInlining());
7139 void *varCookie, *pVarCookie;
7140 if (!info.compCompHnd->canGetVarArgsHandle(sig))
7142 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7146 varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7147 assert((!varCookie) != (!pVarCookie));
7148 GenTreePtr cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL);
7150 assert(extraArg == nullptr);
7151 extraArg = gtNewArgList(cookie);
7154 //-------------------------------------------------------------------------
7155 // Extra arg for shared generic code and array methods
7157 // Extra argument containing instantiation information is passed in the
7158 // following circumstances:
7159 // (a) To the "Address" method on array classes; the extra parameter is
7160 // the array's type handle (a TypeDesc)
7161 // (b) To shared-code instance methods in generic structs; the extra parameter
7162 // is the struct's type handle (a vtable ptr)
7163 // (c) To shared-code per-instantiation non-generic static methods in generic
7164 // classes and structs; the extra parameter is the type handle
7165 // (d) To shared-code generic methods; the extra parameter is an
7166 // exact-instantiation MethodDesc
7168 // We also set the exact type context associated with the call so we can
7169 // inline the call correctly later on.
7171 if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7173 assert(call->gtCall.gtCallType == CT_USER_FUNC);
7174 if (clsHnd == nullptr)
7176 NO_WAY("CALLI on parameterized type");
7179 assert(opcode != CEE_CALLI);
7181 GenTreePtr instParam;
7184 // Instantiated generic method
7185 if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7187 CORINFO_METHOD_HANDLE exactMethodHandle =
7188 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7190 if (!exactContextNeedsRuntimeLookup)
7192 #ifdef FEATURE_READYTORUN_COMPILER
7193 if (opts.IsReadyToRun())
7196 impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7197 if (instParam == nullptr)
7205 instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7206 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7211 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7212 if (instParam == nullptr)
7219 // otherwise must be an instance method in a generic struct,
7220 // a static method in a generic type, or a runtime-generated array method
7223 assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7224 CORINFO_CLASS_HANDLE exactClassHandle =
7225 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7227 if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7229 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7233 if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7235 // We indicate "readonly" to the Address operation by using a null
7237 instParam = gtNewIconNode(0, TYP_REF);
7240 if (!exactContextNeedsRuntimeLookup)
7242 #ifdef FEATURE_READYTORUN_COMPILER
7243 if (opts.IsReadyToRun())
7246 impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7247 if (instParam == nullptr)
7255 instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7256 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7261 instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7262 if (instParam == nullptr)
7269 assert(extraArg == nullptr);
7270 extraArg = gtNewArgList(instParam);
7273 // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7274 // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7275 // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7276 // exactContextHnd is not currently required when inlining shared generic code into shared
7277 // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7278 // (e.g. anything marked needsRuntimeLookup)
7279 if (exactContextNeedsRuntimeLookup)
7281 exactContextHnd = nullptr;
7284 //-------------------------------------------------------------------------
7285 // The main group of arguments
7287 args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, &argFlags, sig, extraArg);
7291 call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7294 //-------------------------------------------------------------------------
7295 // The "this" pointer
7297 if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7301 if (opcode == CEE_NEWOBJ)
7307 obj = impPopStack().val;
7308 obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7309 if (compDonotInline())
7315 /* Is this a virtual or interface call? */
7317 if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
7319 /* only true object pointers can be virtual */
7321 assert(obj->gtType == TYP_REF);
7327 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7331 /* Store the "this" value in the call */
7333 call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7334 call->gtCall.gtCallObjp = obj;
7337 //-------------------------------------------------------------------------
7338 // The "this" pointer for "newobj"
7340 if (opcode == CEE_NEWOBJ)
7342 if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7344 assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7345 // This is a 'new' of a variable sized object, wher
7346 // the constructor is to return the object. In this case
7347 // the constructor claims to return VOID but we know it
7348 // actually returns the new object
7349 assert(callRetTyp == TYP_VOID);
7350 callRetTyp = TYP_REF;
7351 call->gtType = TYP_REF;
7352 impSpillSpecialSideEff();
7354 impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7358 if (clsFlags & CORINFO_FLG_DELEGATE)
7360 // New inliner morph it in impImportCall.
7361 // This will allow us to inline the call to the delegate constructor.
7362 call = fgOptimizeDelegateConstructor(call, &exactContextHnd);
7365 if (!bIntrinsicImported)
7368 #if defined(DEBUG) || defined(INLINE_DATA)
7370 // Keep track of the raw IL offset of the call
7371 call->gtCall.gtRawILOffset = rawILOffset;
7373 #endif // defined(DEBUG) || defined(INLINE_DATA)
7375 // Is it an inline candidate?
7376 impMarkInlineCandidate(call, exactContextHnd, callInfo);
7379 // append the call node.
7380 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7382 // Now push the value of the 'new onto the stack
7384 // This is a 'new' of a non-variable sized object.
7385 // Append the new node (op1) to the statement list,
7386 // and then push the local holding the value of this
7387 // new instruction on the stack.
7389 if (clsFlags & CORINFO_FLG_VALUECLASS)
7391 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
7393 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
7394 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
7398 if (newobjThis->gtOper == GT_COMMA)
7400 // In coreclr the callout can be inserted even if verification is disabled
7401 // so we cannot rely on tiVerificationNeeded alone
7403 // We must have inserted the callout. Get the real newobj.
7404 newobjThis = newobjThis->gtOp.gtOp2;
7407 assert(newobjThis->gtOper == GT_LCL_VAR);
7408 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
7418 // This check cannot be performed for implicit tail calls for the reason
7419 // that impIsImplicitTailCallCandidate() is not checking whether return
7420 // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
7421 // As a result it is possible that in the following case, we find that
7422 // the type stack is non-empty if Callee() is considered for implicit
7424 // int Caller(..) { .... void Callee(); ret val; ... }
7426 // Note that we cannot check return type compatibility before ImpImportCall()
7427 // as we don't have required info or need to duplicate some of the logic of
7430 // For implicit tail calls, we perform this check after return types are
7431 // known to be compatible.
7432 if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
7434 BADCODE("Stack should be empty after tailcall");
7437 // Note that we can not relax this condition with genActualType() as
7438 // the calling convention dictates that the caller of a function with
7439 // a small-typed return value is responsible for normalizing the return val
7442 !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
7443 callInfo->sig.retTypeClass))
7445 canTailCall = false;
7446 szCanTailCallFailReason = "Return types are not tail call compatible";
7449 // Stack empty check for implicit tail calls.
7450 if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
7452 #ifdef _TARGET_AMD64_
7453 // JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException
7454 // in JIT64, not an InvalidProgramException.
7455 Verify(false, "Stack should be empty after tailcall");
7456 #else // _TARGET_64BIT_
7457 BADCODE("Stack should be empty after tailcall");
7458 #endif //!_TARGET_64BIT_
7461 // assert(compCurBB is not a catch, finally or filter block);
7462 // assert(compCurBB is not a try block protected by a finally block);
7464 // Check for permission to tailcall
7465 bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
7467 assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
7471 // True virtual or indirect calls, shouldn't pass in a callee handle.
7472 CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->gtCall.gtCallType != CT_USER_FUNC) ||
7473 ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT))
7476 GenTreePtr thisArg = call->gtCall.gtCallObjp;
7478 if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
7481 if (explicitTailCall)
7483 // In case of explicit tail calls, mark it so that it is not considered
7485 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
7489 printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
7497 #if FEATURE_TAILCALL_OPT
7498 // Must be an implicit tail call.
7499 assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
7501 // It is possible that a call node is both an inline candidate and marked
7502 // for opportunistic tail calling. In-lining happens before morhphing of
7503 // trees. If in-lining of an in-line candidate gets aborted for whatever
7504 // reason, it will survive to the morphing stage at which point it will be
7505 // transformed into a tail call after performing additional checks.
7507 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
7511 printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
7517 #else //! FEATURE_TAILCALL_OPT
7518 NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
7520 #endif // FEATURE_TAILCALL_OPT
7523 // we can't report success just yet...
7527 canTailCall = false;
7528 // canTailCall reported its reasons already
7532 printf("\ninfo.compCompHnd->canTailCall returned false for call ");
7541 // If this assert fires it means that canTailCall was set to false without setting a reason!
7542 assert(szCanTailCallFailReason != nullptr);
7547 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
7549 printf(": %s\n", szCanTailCallFailReason);
7552 info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
7553 szCanTailCallFailReason);
7557 // Note: we assume that small return types are already normalized by the managed callee
7558 // or by the pinvoke stub for calls to unmanaged code.
7560 if (!bIntrinsicImported)
7563 // Things needed to be checked when bIntrinsicImported is false.
7566 assert(call->gtOper == GT_CALL);
7567 assert(sig != nullptr);
7569 // Tail calls require us to save the call site's sig info so we can obtain an argument
7570 // copying thunk from the EE later on.
7571 if (call->gtCall.callSig == nullptr)
7573 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7574 *call->gtCall.callSig = *sig;
7577 if (compIsForInlining() && opcode == CEE_CALLVIRT)
7579 GenTreePtr callObj = call->gtCall.gtCallObjp;
7580 assert(callObj != nullptr);
7582 unsigned callKind = call->gtFlags & GTF_CALL_VIRT_KIND_MASK;
7584 if (((callKind != GTF_CALL_NONVIRT) || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
7585 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
7586 impInlineInfo->inlArgInfo))
7588 impInlineInfo->thisDereferencedFirst = true;
7592 #if defined(DEBUG) || defined(INLINE_DATA)
7594 // Keep track of the raw IL offset of the call
7595 call->gtCall.gtRawILOffset = rawILOffset;
7597 #endif // defined(DEBUG) || defined(INLINE_DATA)
7599 // Is it an inline candidate?
7600 impMarkInlineCandidate(call, exactContextHnd, callInfo);
7604 // Push or append the result of the call
7605 if (callRetTyp == TYP_VOID)
7607 if (opcode == CEE_NEWOBJ)
7609 // we actually did push something, so don't spill the thing we just pushed.
7610 assert(verCurrentState.esStackDepth > 0);
7611 impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
7615 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7620 impSpillSpecialSideEff();
7622 if (clsFlags & CORINFO_FLG_ARRAY)
7624 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
7627 // Find the return type used for verification by interpreting the method signature.
7628 // NB: we are clobbering the already established sig.
7629 if (tiVerificationNeeded)
7631 // Actually, we never get the sig for the original method.
7632 sig = &(callInfo->verSig);
7635 typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
7636 tiRetVal.NormaliseForStack();
7638 // The CEE_READONLY prefix modifies the verification semantics of an Address
7639 // operation on an array type.
7640 if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
7642 tiRetVal.SetIsReadonlyByRef();
7645 if (tiVerificationNeeded)
7647 // We assume all calls return permanent home byrefs. If they
7648 // didn't they wouldn't be verifiable. This is also covering
7649 // the Address() helper for multidimensional arrays.
7650 if (tiRetVal.IsByRef())
7652 tiRetVal.SetIsPermanentHomeByRef();
7658 // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
7660 bool fatPointerCandidate = call->AsCall()->IsFatPointerCandidate();
7661 if (varTypeIsStruct(callRetTyp))
7663 call = impFixupCallStructReturn(call, sig->retTypeClass);
7666 if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
7668 assert(opts.OptEnabled(CLFLG_INLINING));
7669 assert(!fatPointerCandidate); // We should not try to inline calli.
7671 // Make the call its own tree (spill the stack if needed).
7672 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7674 // TODO: Still using the widened type.
7675 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
7679 if (fatPointerCandidate)
7681 // fatPointer candidates should be in statements of the form call() or var = call().
7682 // Such form allows to find statements with fat calls without walking through whole trees
7683 // and removes problems with cutting trees.
7684 assert(!bIntrinsicImported);
7685 assert(IsTargetAbi(CORINFO_CORERT_ABI));
7686 if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
7688 unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli"));
7689 LclVarDsc* varDsc = &lvaTable[calliSlot];
7690 varDsc->lvVerTypeInfo = tiRetVal;
7691 impAssignTempGen(calliSlot, call, clsHnd, (unsigned)CHECK_SPILL_NONE);
7692 // impAssignTempGen can change src arg list and return type for call that returns struct.
7693 var_types type = genActualType(lvaTable[calliSlot].TypeGet());
7694 call = gtNewLclvNode(calliSlot, type);
7697 // For non-candidates we must also spill, since we
7698 // might have locals live on the eval stack that this
7700 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
7704 if (!bIntrinsicImported)
7706 //-------------------------------------------------------------------------
7708 /* If the call is of a small type and the callee is managed, the callee will normalize the result
7710 However, we need to normalize small type values returned by unmanaged
7711 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
7712 if we use the shorter inlined pinvoke stub. */
7714 if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
7716 call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
7720 impPushOnStack(call, tiRetVal);
7723 // VSD functions get a new call target each time we getCallInfo, so clear the cache.
7724 // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
7725 // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
7726 // callInfoCache.uncacheCallInfo();
7731 #pragma warning(pop)
7734 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
7736 CorInfoType corType = methInfo->args.retType;
7738 if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
7740 // We have some kind of STRUCT being returned
7742 structPassingKind howToReturnStruct = SPK_Unknown;
7744 var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
7746 if (howToReturnStruct == SPK_ByReference)
7757 var_types Compiler::impImportJitTestLabelMark(int numArgs)
7759 TestLabelAndNum tlAndN;
7763 StackEntry se = impPopStack();
7764 assert(se.seTypeInfo.GetType() == TI_INT);
7765 GenTreePtr val = se.val;
7766 assert(val->IsCnsIntOrI());
7767 tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7769 else if (numArgs == 3)
7771 StackEntry se = impPopStack();
7772 assert(se.seTypeInfo.GetType() == TI_INT);
7773 GenTreePtr val = se.val;
7774 assert(val->IsCnsIntOrI());
7775 tlAndN.m_num = val->AsIntConCommon()->IconValue();
7777 assert(se.seTypeInfo.GetType() == TI_INT);
7779 assert(val->IsCnsIntOrI());
7780 tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7787 StackEntry expSe = impPopStack();
7788 GenTreePtr node = expSe.val;
7790 // There are a small number of special cases, where we actually put the annotation on a subnode.
7791 if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
7793 // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
7794 // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
7795 // offset within the the static field block whose address is returned by the helper call.
7796 // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
7797 GenTreePtr helperCall = nullptr;
7798 assert(node->OperGet() == GT_IND);
7799 tlAndN.m_num -= 100;
7800 GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
7801 GetNodeTestData()->Remove(node);
7805 GetNodeTestData()->Set(node, tlAndN);
7808 impPushOnStack(node, expSe.seTypeInfo);
7809 return node->TypeGet();
7813 //-----------------------------------------------------------------------------------
7814 // impFixupCallStructReturn: For a call node that returns a struct type either
7815 // adjust the return type to an enregisterable type, or set the flag to indicate
7816 // struct return via retbuf arg.
7819 // call - GT_CALL GenTree node
7820 // retClsHnd - Class handle of return type of the call
7823 // Returns new GenTree node after fixing struct return of call node
7825 GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call, CORINFO_CLASS_HANDLE retClsHnd)
7827 assert(call->gtOper == GT_CALL);
7829 if (!varTypeIsStruct(call))
7834 call->gtCall.gtRetClsHnd = retClsHnd;
7836 GenTreeCall* callNode = call->AsCall();
7838 #if FEATURE_MULTIREG_RET
7839 // Initialize Return type descriptor of call node
7840 ReturnTypeDesc* retTypeDesc = callNode->GetReturnTypeDesc();
7841 retTypeDesc->InitializeStructReturnType(this, retClsHnd);
7842 #endif // FEATURE_MULTIREG_RET
7844 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7846 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
7847 assert(!callNode->IsVarargs() && "varargs not allowed for System V OSs.");
7849 // The return type will remain as the incoming struct type unless normalized to a
7850 // single eightbyte return type below.
7851 callNode->gtReturnType = call->gtType;
7853 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7854 if (retRegCount != 0)
7856 if (retRegCount == 1)
7858 // struct returned in a single register
7859 callNode->gtReturnType = retTypeDesc->GetReturnRegType(0);
7863 // must be a struct returned in two registers
7864 assert(retRegCount == 2);
7866 if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7868 // Force a call returning multi-reg struct to be always of the IR form
7871 // No need to assign a multi-reg struct to a local var if:
7872 // - It is a tail call or
7873 // - The call is marked for in-lining later
7874 return impAssignMultiRegTypeToVar(call, retClsHnd);
7880 // struct not returned in registers i.e returned via hiddden retbuf arg.
7881 callNode->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7884 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7886 #if FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7887 // There is no fixup necessary if the return type is a HFA struct.
7888 // HFA structs are returned in registers for ARM32 and ARM64
7890 if (!call->gtCall.IsVarargs() && IsHfa(retClsHnd))
7892 if (call->gtCall.CanTailCall())
7894 if (info.compIsVarArgs)
7896 // We cannot tail call because control needs to return to fixup the calling
7897 // convention for result return.
7898 call->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7902 // If we can tail call returning HFA, then don't assign it to
7903 // a variable back and forth.
7908 if (call->gtFlags & GTF_CALL_INLINE_CANDIDATE)
7913 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7914 if (retRegCount >= 2)
7916 return impAssignMultiRegTypeToVar(call, retClsHnd);
7919 #endif // _TARGET_ARM_
7921 // Check for TYP_STRUCT type that wraps a primitive type
7922 // Such structs are returned using a single register
7923 // and we change the return type on those calls here.
7925 structPassingKind howToReturnStruct;
7926 var_types returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
7928 if (howToReturnStruct == SPK_ByReference)
7930 assert(returnType == TYP_UNKNOWN);
7931 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7935 assert(returnType != TYP_UNKNOWN);
7936 call->gtCall.gtReturnType = returnType;
7938 // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
7939 if ((returnType == TYP_LONG) && (compLongUsed == false))
7941 compLongUsed = true;
7943 else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
7945 compFloatingPointUsed = true;
7948 #if FEATURE_MULTIREG_RET
7949 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7950 assert(retRegCount != 0);
7952 if (retRegCount >= 2)
7954 if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7956 // Force a call returning multi-reg struct to be always of the IR form
7959 // No need to assign a multi-reg struct to a local var if:
7960 // - It is a tail call or
7961 // - The call is marked for in-lining later
7962 return impAssignMultiRegTypeToVar(call, retClsHnd);
7965 #endif // FEATURE_MULTIREG_RET
7968 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7973 /*****************************************************************************
7974 For struct return values, re-type the operand in the case where the ABI
7975 does not use a struct return buffer
7976 Note that this method is only call for !_TARGET_X86_
7979 GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd)
7981 assert(varTypeIsStruct(info.compRetType));
7982 assert(info.compRetBuffArg == BAD_VAR_NUM);
7984 #if defined(_TARGET_XARCH_)
7986 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7987 // No VarArgs for CoreCLR on x64 Unix
7988 assert(!info.compIsVarArgs);
7990 // Is method returning a multi-reg struct?
7991 if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
7993 // In case of multi-reg struct return, we force IR to be one of the following:
7994 // GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a
7995 // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
7997 if (op->gtOper == GT_LCL_VAR)
7999 // Make sure that this struct stays in memory and doesn't get promoted.
8000 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8001 lvaTable[lclNum].lvIsMultiRegRet = true;
8003 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8004 op->gtFlags |= GTF_DONT_CSE;
8009 if (op->gtOper == GT_CALL)
8014 return impAssignMultiRegTypeToVar(op, retClsHnd);
8016 #else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8017 assert(info.compRetNativeType != TYP_STRUCT);
8018 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8020 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
8022 if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
8024 if (op->gtOper == GT_LCL_VAR)
8026 // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
8027 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8028 // Make sure this struct type stays as struct so that we can return it as an HFA
8029 lvaTable[lclNum].lvIsMultiRegRet = true;
8031 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8032 op->gtFlags |= GTF_DONT_CSE;
8037 if (op->gtOper == GT_CALL)
8039 if (op->gtCall.IsVarargs())
8041 // We cannot tail call because control needs to return to fixup the calling
8042 // convention for result return.
8043 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8044 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8051 return impAssignMultiRegTypeToVar(op, retClsHnd);
8054 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
8056 // Is method returning a multi-reg struct?
8057 if (IsMultiRegReturnedType(retClsHnd))
8059 if (op->gtOper == GT_LCL_VAR)
8061 // This LCL_VAR stays as a TYP_STRUCT
8062 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8064 // Make sure this struct type is not struct promoted
8065 lvaTable[lclNum].lvIsMultiRegRet = true;
8067 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8068 op->gtFlags |= GTF_DONT_CSE;
8073 if (op->gtOper == GT_CALL)
8075 if (op->gtCall.IsVarargs())
8077 // We cannot tail call because control needs to return to fixup the calling
8078 // convention for result return.
8079 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8080 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8087 return impAssignMultiRegTypeToVar(op, retClsHnd);
8090 #endif // FEATURE_MULTIREG_RET && FEATURE_HFA
8093 // adjust the type away from struct to integral
8094 // and no normalizing
8095 if (op->gtOper == GT_LCL_VAR)
8097 op->ChangeOper(GT_LCL_FLD);
8099 else if (op->gtOper == GT_OBJ)
8101 GenTreePtr op1 = op->AsObj()->Addr();
8103 // We will fold away OBJ/ADDR
8104 // except for OBJ/ADDR/INDEX
8105 // as the array type influences the array element's offset
8106 // Later in this method we change op->gtType to info.compRetNativeType
8107 // This is not correct when op is a GT_INDEX as the starting offset
8108 // for the array elements 'elemOffs' is different for an array of
8109 // TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8110 // Also refer to the GTF_INX_REFARR_LAYOUT flag
8112 if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8114 // Change '*(&X)' to 'X' and see if we can do better
8115 op = op1->gtOp.gtOp1;
8116 goto REDO_RETURN_NODE;
8118 op->gtObj.gtClass = NO_CLASS_HANDLE;
8119 op->ChangeOperUnchecked(GT_IND);
8120 op->gtFlags |= GTF_IND_TGTANYWHERE;
8122 else if (op->gtOper == GT_CALL)
8124 if (op->AsCall()->TreatAsHasRetBufArg(this))
8126 // This must be one of those 'special' helpers that don't
8127 // really have a return buffer, but instead use it as a way
8128 // to keep the trees cleaner with fewer address-taken temps.
8130 // Well now we have to materialize the the return buffer as
8131 // an address-taken temp. Then we can return the temp.
8133 // NOTE: this code assumes that since the call directly
8134 // feeds the return, then the call must be returning the
8135 // same structure/class/type.
8137 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8139 // No need to spill anything as we're about to return.
8140 impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8142 // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8143 // jump directly to a GT_LCL_FLD.
8144 op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8145 op->ChangeOper(GT_LCL_FLD);
8149 assert(info.compRetNativeType == op->gtCall.gtReturnType);
8151 // Don't change the gtType of the node just yet, it will get changed later.
8155 else if (op->gtOper == GT_COMMA)
8157 op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8160 op->gtType = info.compRetNativeType;
8165 /*****************************************************************************
8166 CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8167 finally-protected try. We find the finally blocks protecting the current
8168 offset (in order) by walking over the complete exception table and
8169 finding enclosing clauses. This assumes that the table is sorted.
8170 This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8172 If we are leaving a catch handler, we need to attach the
8173 CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8175 After this function, the BBJ_LEAVE block has been converted to a different type.
8178 #if !FEATURE_EH_FUNCLETS
8180 void Compiler::impImportLeave(BasicBlock* block)
8185 printf("\nBefore import CEE_LEAVE:\n");
8186 fgDispBasicBlocks();
8191 bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8192 unsigned blkAddr = block->bbCodeOffs;
8193 BasicBlock* leaveTarget = block->bbJumpDest;
8194 unsigned jmpAddr = leaveTarget->bbCodeOffs;
8196 // LEAVE clears the stack, spill side effects, and set stack to 0
8198 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8199 verCurrentState.esStackDepth = 0;
8201 assert(block->bbJumpKind == BBJ_LEAVE);
8202 assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8204 BasicBlock* step = DUMMY_INIT(NULL);
8205 unsigned encFinallies = 0; // Number of enclosing finallies.
8206 GenTreePtr endCatches = NULL;
8207 GenTreePtr endLFin = NULL; // The statement tree to indicate the end of locally-invoked finally.
8212 for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8214 // Grab the handler offsets
8216 IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8217 IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8218 IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8219 IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8221 /* Is this a catch-handler we are CEE_LEAVEing out of?
8222 * If so, we need to call CORINFO_HELP_ENDCATCH.
8225 if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8227 // Can't CEE_LEAVE out of a finally/fault handler
8228 if (HBtab->HasFinallyOrFaultHandler())
8229 BADCODE("leave out of fault/finally block");
8231 // Create the call to CORINFO_HELP_ENDCATCH
8232 GenTreePtr endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8234 // Make a list of all the currently pending endCatches
8236 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8238 endCatches = endCatch;
8243 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8244 "CORINFO_HELP_ENDCATCH\n",
8245 block->bbNum, XTnum);
8249 else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8250 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8252 /* This is a finally-protected try we are jumping out of */
8254 /* If there are any pending endCatches, and we have already
8255 jumped out of a finally-protected try, then the endCatches
8256 have to be put in a block in an outer try for async
8257 exceptions to work correctly.
8258 Else, just use append to the original block */
8260 BasicBlock* callBlock;
8262 assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8264 if (encFinallies == 0)
8266 assert(step == DUMMY_INIT(NULL));
8268 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8271 impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8276 printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8277 "block BB%02u [%08p]\n",
8278 callBlock->bbNum, dspPtr(callBlock));
8284 assert(step != DUMMY_INIT(NULL));
8286 /* Calling the finally block */
8287 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8288 assert(step->bbJumpKind == BBJ_ALWAYS);
8289 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8290 // finally in the chain)
8291 step->bbJumpDest->bbRefs++;
8293 /* The new block will inherit this block's weight */
8294 callBlock->setBBWeight(block->bbWeight);
8295 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8300 printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block BB%02u "
8302 callBlock->bbNum, dspPtr(callBlock));
8306 GenTreePtr lastStmt;
8310 lastStmt = gtNewStmt(endCatches);
8311 endLFin->gtNext = lastStmt;
8312 lastStmt->gtPrev = endLFin;
8319 // note that this sets BBF_IMPORTED on the block
8320 impEndTreeList(callBlock, endLFin, lastStmt);
8323 step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8324 /* The new block will inherit this block's weight */
8325 step->setBBWeight(block->bbWeight);
8326 step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8331 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block "
8333 step->bbNum, dspPtr(step));
8337 unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8338 assert(finallyNesting <= compHndBBtabCount);
8340 callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8341 endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8342 endLFin = gtNewStmt(endLFin);
8347 invalidatePreds = true;
8351 /* Append any remaining endCatches, if any */
8353 assert(!encFinallies == !endLFin);
8355 if (encFinallies == 0)
8357 assert(step == DUMMY_INIT(NULL));
8358 block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8361 impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8366 printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8367 "block BB%02u [%08p]\n",
8368 block->bbNum, dspPtr(block));
8374 // If leaveTarget is the start of another try block, we want to make sure that
8375 // we do not insert finalStep into that try block. Hence, we find the enclosing
8377 unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8379 // Insert a new BB either in the try region indicated by tryIndex or
8380 // the handler region indicated by leaveTarget->bbHndIndex,
8381 // depending on which is the inner region.
8382 BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8383 finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8384 step->bbJumpDest = finalStep;
8386 /* The new block will inherit this block's weight */
8387 finalStep->setBBWeight(block->bbWeight);
8388 finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8393 printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block BB%02u [%08p]\n",
8394 encFinallies, finalStep->bbNum, dspPtr(finalStep));
8398 GenTreePtr lastStmt;
8402 lastStmt = gtNewStmt(endCatches);
8403 endLFin->gtNext = lastStmt;
8404 lastStmt->gtPrev = endLFin;
8411 impEndTreeList(finalStep, endLFin, lastStmt);
8413 finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8415 // Queue up the jump target for importing
8417 impImportBlockPending(leaveTarget);
8419 invalidatePreds = true;
8422 if (invalidatePreds && fgComputePredsDone)
8424 JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8429 fgVerifyHandlerTab();
8433 printf("\nAfter import CEE_LEAVE:\n");
8434 fgDispBasicBlocks();
8440 #else // FEATURE_EH_FUNCLETS
8442 void Compiler::impImportLeave(BasicBlock* block)
8447 printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
8448 fgDispBasicBlocks();
8453 bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8454 unsigned blkAddr = block->bbCodeOffs;
8455 BasicBlock* leaveTarget = block->bbJumpDest;
8456 unsigned jmpAddr = leaveTarget->bbCodeOffs;
8458 // LEAVE clears the stack, spill side effects, and set stack to 0
8460 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8461 verCurrentState.esStackDepth = 0;
8463 assert(block->bbJumpKind == BBJ_LEAVE);
8464 assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
8466 BasicBlock* step = nullptr;
8470 // No step type; step == NULL.
8473 // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
8474 // That is, is step->bbJumpDest where a finally will return to?
8477 // The step block is a catch return.
8480 // The step block is in a "try", created as the target for a finally return or the target for a catch return.
8483 StepType stepType = ST_None;
8488 for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8490 // Grab the handler offsets
8492 IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8493 IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8494 IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8495 IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8497 /* Is this a catch-handler we are CEE_LEAVEing out of?
8500 if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8502 // Can't CEE_LEAVE out of a finally/fault handler
8503 if (HBtab->HasFinallyOrFaultHandler())
8505 BADCODE("leave out of fault/finally block");
8508 /* We are jumping out of a catch */
8510 if (step == nullptr)
8513 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
8514 stepType = ST_Catch;
8519 printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
8521 XTnum, step->bbNum);
8527 BasicBlock* exitBlock;
8529 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
8531 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
8533 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8534 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
8535 // exit) returns to this block
8536 step->bbJumpDest->bbRefs++;
8538 #if defined(_TARGET_ARM_)
8539 if (stepType == ST_FinallyReturn)
8541 assert(step->bbJumpKind == BBJ_ALWAYS);
8542 // Mark the target of a finally return
8543 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8545 #endif // defined(_TARGET_ARM_)
8547 /* The new block will inherit this block's weight */
8548 exitBlock->setBBWeight(block->bbWeight);
8549 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8551 /* This exit block is the new step */
8553 stepType = ST_Catch;
8555 invalidatePreds = true;
8560 printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
8566 else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8567 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8569 /* We are jumping out of a finally-protected try */
8571 BasicBlock* callBlock;
8573 if (step == nullptr)
8575 #if FEATURE_EH_CALLFINALLY_THUNKS
8577 // Put the call to the finally in the enclosing region.
8578 unsigned callFinallyTryIndex =
8579 (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8580 unsigned callFinallyHndIndex =
8581 (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8582 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
8584 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
8585 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
8586 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
8587 // next block, and flow optimizations will remove it.
8588 block->bbJumpKind = BBJ_ALWAYS;
8589 block->bbJumpDest = callBlock;
8590 block->bbJumpDest->bbRefs++;
8592 /* The new block will inherit this block's weight */
8593 callBlock->setBBWeight(block->bbWeight);
8594 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8599 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8600 "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
8601 XTnum, block->bbNum, callBlock->bbNum);
8605 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8608 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8613 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8614 "BBJ_CALLFINALLY block\n",
8615 XTnum, callBlock->bbNum);
8619 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8623 // Calling the finally block. We already have a step block that is either the call-to-finally from a
8624 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
8625 // a 'finally'), or the step block is the return from a catch.
8627 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
8628 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
8629 // automatically re-raise the exception, using the return address of the catch (that is, the target
8630 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
8631 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
8632 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
8633 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
8634 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
8635 // within the 'try' region protected by the finally, since we generate code in such a way that execution
8636 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
8639 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8641 #if FEATURE_EH_CALLFINALLY_THUNKS
8642 if (step->bbJumpKind == BBJ_EHCATCHRET)
8644 // Need to create another step block in the 'try' region that will actually branch to the
8645 // call-to-finally thunk.
8646 BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8647 step->bbJumpDest = step2;
8648 step->bbJumpDest->bbRefs++;
8649 step2->setBBWeight(block->bbWeight);
8650 step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8655 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
8656 "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
8657 XTnum, step->bbNum, step2->bbNum);
8662 assert(stepType == ST_Catch); // Leave it as catch type for now.
8664 #endif // FEATURE_EH_CALLFINALLY_THUNKS
8666 #if FEATURE_EH_CALLFINALLY_THUNKS
8667 unsigned callFinallyTryIndex =
8668 (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8669 unsigned callFinallyHndIndex =
8670 (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8671 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8672 unsigned callFinallyTryIndex = XTnum + 1;
8673 unsigned callFinallyHndIndex = 0; // don't care
8674 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8676 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
8677 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8678 // finally in the chain)
8679 step->bbJumpDest->bbRefs++;
8681 #if defined(_TARGET_ARM_)
8682 if (stepType == ST_FinallyReturn)
8684 assert(step->bbJumpKind == BBJ_ALWAYS);
8685 // Mark the target of a finally return
8686 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8688 #endif // defined(_TARGET_ARM_)
8690 /* The new block will inherit this block's weight */
8691 callBlock->setBBWeight(block->bbWeight);
8692 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8697 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
8699 XTnum, callBlock->bbNum);
8704 step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8705 stepType = ST_FinallyReturn;
8707 /* The new block will inherit this block's weight */
8708 step->setBBWeight(block->bbWeight);
8709 step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8714 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
8716 XTnum, step->bbNum);
8720 callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8722 invalidatePreds = true;
8724 else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8725 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8727 // We are jumping out of a catch-protected try.
8729 // If we are returning from a call to a finally, then we must have a step block within a try
8730 // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
8731 // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
8732 // and invoke the appropriate catch.
8734 // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
8735 // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
8736 // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
8737 // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
8738 // address of the catch return as the new exception address. That is, the re-raised exception appears to
8739 // occur at the catch return address. If this exception return address skips an enclosing try/catch that
8740 // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
8745 // // something here raises ThreadAbortException
8746 // LEAVE LABEL_1; // no need to stop at LABEL_2
8747 // } catch (Exception) {
8748 // // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
8749 // // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
8750 // // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
8751 // // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
8752 // // need to do this transformation if the current EH block is a try/catch that catches
8753 // // ThreadAbortException (or one of its parents), however we might not be able to find that
8754 // // information, so currently we do it for all catch types.
8755 // LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
8757 // LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
8758 // } catch (ThreadAbortException) {
8762 // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
8765 if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
8767 BasicBlock* catchStep;
8771 if (stepType == ST_FinallyReturn)
8773 assert(step->bbJumpKind == BBJ_ALWAYS);
8777 assert(stepType == ST_Catch);
8778 assert(step->bbJumpKind == BBJ_EHCATCHRET);
8781 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
8782 catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8783 step->bbJumpDest = catchStep;
8784 step->bbJumpDest->bbRefs++;
8786 #if defined(_TARGET_ARM_)
8787 if (stepType == ST_FinallyReturn)
8789 // Mark the target of a finally return
8790 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8792 #endif // defined(_TARGET_ARM_)
8794 /* The new block will inherit this block's weight */
8795 catchStep->setBBWeight(block->bbWeight);
8796 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8801 if (stepType == ST_FinallyReturn)
8803 printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
8804 "BBJ_ALWAYS block BB%02u\n",
8805 XTnum, catchStep->bbNum);
8809 assert(stepType == ST_Catch);
8810 printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
8811 "BBJ_ALWAYS block BB%02u\n",
8812 XTnum, catchStep->bbNum);
8817 /* This block is the new step */
8821 invalidatePreds = true;
8826 if (step == nullptr)
8828 block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8833 printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
8834 "block BB%02u to BBJ_ALWAYS\n",
8841 step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8843 #if defined(_TARGET_ARM_)
8844 if (stepType == ST_FinallyReturn)
8846 assert(step->bbJumpKind == BBJ_ALWAYS);
8847 // Mark the target of a finally return
8848 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8850 #endif // defined(_TARGET_ARM_)
8855 printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
8859 // Queue up the jump target for importing
8861 impImportBlockPending(leaveTarget);
8864 if (invalidatePreds && fgComputePredsDone)
8866 JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8871 fgVerifyHandlerTab();
8875 printf("\nAfter import CEE_LEAVE:\n");
8876 fgDispBasicBlocks();
8882 #endif // FEATURE_EH_FUNCLETS
8884 /*****************************************************************************/
8885 // This is called when reimporting a leave block. It resets the JumpKind,
8886 // JumpDest, and bbNext to the original values
8888 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
8890 #if FEATURE_EH_FUNCLETS
8891 // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
8892 // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0,
8893 // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
8894 // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
8895 // only predecessor are also considered orphans and attempted to be deleted.
8902 // leave OUTSIDE; // B0 is the block containing this leave, following this would be B1
8907 // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
8908 // where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block.
8909 // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To
8910 // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
8911 // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
8912 // will be treated as pair and handled correctly.
8913 if (block->bbJumpKind == BBJ_CALLFINALLY)
8915 BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
8916 dupBlock->bbFlags = block->bbFlags;
8917 dupBlock->bbJumpDest = block->bbJumpDest;
8918 dupBlock->copyEHRegion(block);
8919 dupBlock->bbCatchTyp = block->bbCatchTyp;
8921 // Mark this block as
8922 // a) not referenced by any other block to make sure that it gets deleted
8924 // c) prevent from being imported
8927 dupBlock->bbRefs = 0;
8928 dupBlock->bbWeight = 0;
8929 dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
8931 // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
8932 // will be next to each other.
8933 fgInsertBBafter(block, dupBlock);
8938 printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
8942 #endif // FEATURE_EH_FUNCLETS
8944 block->bbJumpKind = BBJ_LEAVE;
8946 block->bbJumpDest = fgLookupBB(jmpAddr);
8948 // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
8949 // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
8950 // reason we don't want to remove the block at this point is that if we call
8951 // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
8952 // added and the linked list length will be different than fgBBcount.
8955 /*****************************************************************************/
8956 // Get the first non-prefix opcode. Used for verification of valid combinations
8957 // of prefixes and actual opcodes.
8959 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
8961 while (codeAddr < codeEndp)
8963 OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
8964 codeAddr += sizeof(__int8);
8966 if (opcode == CEE_PREFIX1)
8968 if (codeAddr >= codeEndp)
8972 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
8973 codeAddr += sizeof(__int8);
8981 case CEE_CONSTRAINED:
8988 codeAddr += opcodeSizes[opcode];
8994 /*****************************************************************************/
8995 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
8997 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
8999 OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
9002 // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
9003 ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
9004 (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
9005 (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
9006 // volatile. prefix is allowed with the ldsfld and stsfld
9007 (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
9009 BADCODE("Invalid opcode for unaligned. or volatile. prefix");
9013 /*****************************************************************************/
9017 #undef RETURN // undef contracts RETURN macro
9032 const static controlFlow_t controlFlow[] = {
9033 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
9034 #include "opcode.def"
9040 /*****************************************************************************
9041 * Determine the result type of an arithemetic operation
9042 * On 64-bit inserts upcasts when native int is mixed with int32
9044 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr* pOp1, GenTreePtr* pOp2)
9046 var_types type = TYP_UNDEF;
9047 GenTreePtr op1 = *pOp1, op2 = *pOp2;
9049 // Arithemetic operations are generally only allowed with
9050 // primitive types, but certain operations are allowed
9053 if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9055 if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9057 // byref1-byref2 => gives a native int
9060 else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9062 // [native] int - byref => gives a native int
9065 // The reason is that it is possible, in managed C++,
9066 // to have a tree like this:
9073 // const(h) int addr byref
9075 // <BUGNUM> VSW 318822 </BUGNUM>
9077 // So here we decide to make the resulting type to be a native int.
9078 CLANG_FORMAT_COMMENT_ANCHOR;
9080 #ifdef _TARGET_64BIT_
9081 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9083 // insert an explicit upcast
9084 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9086 #endif // _TARGET_64BIT_
9092 // byref - [native] int => gives a byref
9093 assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
9095 #ifdef _TARGET_64BIT_
9096 if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
9098 // insert an explicit upcast
9099 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9101 #endif // _TARGET_64BIT_
9106 else if ((oper == GT_ADD) &&
9107 (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9109 // byref + [native] int => gives a byref
9111 // [native] int + byref => gives a byref
9113 // only one can be a byref : byref op byref not allowed
9114 assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9115 assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9117 #ifdef _TARGET_64BIT_
9118 if (genActualType(op2->TypeGet()) == TYP_BYREF)
9120 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9122 // insert an explicit upcast
9123 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9126 else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9128 // insert an explicit upcast
9129 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9131 #endif // _TARGET_64BIT_
9135 #ifdef _TARGET_64BIT_
9136 else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9138 assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9140 // int + long => gives long
9141 // long + int => gives long
9142 // we get this because in the IL the long isn't Int64, it's just IntPtr
9144 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9146 // insert an explicit upcast
9147 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9149 else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9151 // insert an explicit upcast
9152 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9157 #else // 32-bit TARGET
9158 else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9160 assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9162 // int + long => gives long
9163 // long + int => gives long
9167 #endif // _TARGET_64BIT_
9170 // int + int => gives an int
9171 assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9173 assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9174 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9176 type = genActualType(op1->gtType);
9178 #if FEATURE_X87_DOUBLES
9180 // For x87, since we only have 1 size of registers, prefer double
9181 // For everybody else, be more precise
9182 if (type == TYP_FLOAT)
9185 #else // !FEATURE_X87_DOUBLES
9187 // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9188 // Otherwise, turn floats into doubles
9189 if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9191 assert(genActualType(op2->gtType) == TYP_DOUBLE);
9195 #endif // FEATURE_X87_DOUBLES
9198 #if FEATURE_X87_DOUBLES
9199 assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9200 #else // FEATURE_X87_DOUBLES
9201 assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9202 #endif // FEATURE_X87_DOUBLES
9207 /*****************************************************************************
9208 * Casting Helper Function to service both CEE_CASTCLASS and CEE_ISINST
9210 * typeRef contains the token, op1 to contain the value being cast,
9211 * and op2 to contain code that creates the type handle corresponding to typeRef
9212 * isCastClass = true means CEE_CASTCLASS, false means CEE_ISINST
9214 GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr op1,
9216 CORINFO_RESOLVED_TOKEN* pResolvedToken,
9221 assert(op1->TypeGet() == TYP_REF);
9223 CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9227 // We only want to expand inline the normal CHKCASTCLASS helper;
9228 expandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9232 if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9234 // Get the Class Handle abd class attributes for the type we are casting to
9236 DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9239 // If the class handle is marked as final we can also expand the IsInst check inline
9241 expandInline = ((flags & CORINFO_FLG_FINAL) != 0);
9244 // But don't expand inline these two cases
9246 if (flags & CORINFO_FLG_MARSHAL_BYREF)
9248 expandInline = false;
9250 else if (flags & CORINFO_FLG_CONTEXTFUL)
9252 expandInline = false;
9258 // We can't expand inline any other helpers
9260 expandInline = false;
9266 if (compCurBB->isRunRarely())
9268 expandInline = false; // not worth the code expansion in a rarely run block
9271 if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9273 expandInline = false; // not worth creating an untracked local variable
9279 // If we CSE this class handle we prevent assertionProp from making SubType assertions
9280 // so instead we force the CSE logic to not consider CSE-ing this class handle.
9282 op2->gtFlags |= GTF_DONT_CSE;
9284 return gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2, op1));
9287 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9292 // expand the methodtable match:
9296 // GT_IND op2 (typically CNS_INT)
9301 // This can replace op1 with a GT_COMMA that evaluates op1 into a local
9303 op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
9305 // op1 is now known to be a non-complex tree
9306 // thus we can use gtClone(op1) from now on
9309 GenTreePtr op2Var = op2;
9312 op2Var = fgInsertCommaFormTemp(&op2);
9313 lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
9315 temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
9316 temp->gtFlags |= GTF_EXCEPT;
9317 condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
9319 GenTreePtr condNull;
9321 // expand the null check:
9323 // condNull ==> GT_EQ
9328 condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
9331 // expand the true and false trees for the condMT
9333 GenTreePtr condFalse = gtClone(op1);
9334 GenTreePtr condTrue;
9338 // use the special helper that skips the cases checked by our inlined cast
9340 helper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
9342 condTrue = gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2Var, gtClone(op1)));
9346 condTrue = gtNewIconNode(0, TYP_REF);
9349 #define USE_QMARK_TREES
9351 #ifdef USE_QMARK_TREES
9354 // Generate first QMARK - COLON tree
9356 // qmarkMT ==> GT_QMARK
9360 // condFalse condTrue
9362 temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
9363 qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
9364 condMT->gtFlags |= GTF_RELOP_QMARK;
9366 GenTreePtr qmarkNull;
9368 // Generate second QMARK - COLON tree
9370 // qmarkNull ==> GT_QMARK
9372 // condNull GT_COLON
9376 temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
9377 qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
9378 qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
9379 condNull->gtFlags |= GTF_RELOP_QMARK;
9381 // Make QMark node a top level node by spilling it.
9382 unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
9383 impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
9384 return gtNewLclvNode(tmp, TYP_REF);
9389 #define assertImp(cond) ((void)0)
9391 #define assertImp(cond) \
9396 const int cchAssertImpBuf = 600; \
9397 char* assertImpBuf = (char*)alloca(cchAssertImpBuf); \
9398 _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \
9399 "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \
9400 impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \
9401 op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \
9402 assertAbort(assertImpBuf, __FILE__, __LINE__); \
9408 #pragma warning(push)
9409 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
9411 /*****************************************************************************
9412 * Import the instr for the given basic block
9414 void Compiler::impImportBlockCode(BasicBlock* block)
9416 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
9422 printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
9426 unsigned nxtStmtIndex = impInitBlockLineInfo();
9427 IL_OFFSET nxtStmtOffs;
9429 GenTreePtr arrayNodeFrom, arrayNodeTo, arrayNodeToIndex;
9431 CorInfoHelpFunc helper;
9432 CorInfoIsAccessAllowedResult accessAllowedResult;
9433 CORINFO_HELPER_DESC calloutHelper;
9434 const BYTE* lastLoadToken = nullptr;
9436 // reject cyclic constraints
9437 if (tiVerificationNeeded)
9439 Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
9440 Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
9443 /* Get the tree list started */
9447 /* Walk the opcodes that comprise the basic block */
9449 const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
9450 const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
9452 IL_OFFSET opcodeOffs = block->bbCodeOffs;
9453 IL_OFFSET lastSpillOffs = opcodeOffs;
9457 /* remember the start of the delegate creation sequence (used for verification) */
9458 const BYTE* delegateCreateStart = nullptr;
9460 int prefixFlags = 0;
9461 bool explicitTailCall, constraintCall, readonlyCall;
9463 bool insertLdloc = false; // set by CEE_DUP and cleared by following store
9466 unsigned numArgs = info.compArgsCount;
9468 /* Now process all the opcodes in the block */
9470 var_types callTyp = TYP_COUNT;
9471 OPCODE prevOpcode = CEE_ILLEGAL;
9473 if (block->bbCatchTyp)
9475 if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
9477 impCurStmtOffsSet(block->bbCodeOffs);
9480 // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
9481 // to a temp. This is a trade off for code simplicity
9482 impSpillSpecialSideEff();
9485 while (codeAddr < codeEndp)
9487 bool usingReadyToRunHelper = false;
9488 CORINFO_RESOLVED_TOKEN resolvedToken;
9489 CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
9490 CORINFO_CALL_INFO callInfo;
9491 CORINFO_FIELD_INFO fieldInfo;
9493 tiRetVal = typeInfo(); // Default type info
9495 //---------------------------------------------------------------------
9497 /* We need to restrict the max tree depth as many of the Compiler
9498 functions are recursive. We do this by spilling the stack */
9500 if (verCurrentState.esStackDepth)
9502 /* Has it been a while since we last saw a non-empty stack (which
9503 guarantees that the tree depth isnt accumulating. */
9505 if ((opcodeOffs - lastSpillOffs) > 200)
9507 impSpillStackEnsure();
9508 lastSpillOffs = opcodeOffs;
9513 lastSpillOffs = opcodeOffs;
9514 impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
9517 /* Compute the current instr offset */
9519 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9522 if (opts.compDbgInfo)
9525 if (!compIsForInlining())
9528 (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
9530 /* Have we reached the next stmt boundary ? */
9532 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
9534 assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
9536 if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
9538 /* We need to provide accurate IP-mapping at this point.
9539 So spill anything on the stack so that it will form
9540 gtStmts with the correct stmt offset noted */
9542 impSpillStackEnsure(true);
9545 // Has impCurStmtOffs been reported in any tree?
9547 if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
9549 GenTreePtr placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
9550 impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9552 assert(impCurStmtOffs == BAD_IL_OFFSET);
9555 if (impCurStmtOffs == BAD_IL_OFFSET)
9557 /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
9558 If opcodeOffs has gone past nxtStmtIndex, catch up */
9560 while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
9561 info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
9566 /* Go to the new stmt */
9568 impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
9570 /* Update the stmt boundary index */
9573 assert(nxtStmtIndex <= info.compStmtOffsetsCount);
9575 /* Are there any more line# entries after this one? */
9577 if (nxtStmtIndex < info.compStmtOffsetsCount)
9579 /* Remember where the next line# starts */
9581 nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
9585 /* No more line# entries */
9587 nxtStmtOffs = BAD_IL_OFFSET;
9591 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
9592 (verCurrentState.esStackDepth == 0))
9594 /* At stack-empty locations, we have already added the tree to
9595 the stmt list with the last offset. We just need to update
9599 impCurStmtOffsSet(opcodeOffs);
9601 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
9602 impOpcodeIsCallSiteBoundary(prevOpcode))
9604 /* Make sure we have a type cached */
9605 assert(callTyp != TYP_COUNT);
9607 if (callTyp == TYP_VOID)
9609 impCurStmtOffsSet(opcodeOffs);
9611 else if (opts.compDbgCode)
9613 impSpillStackEnsure(true);
9614 impCurStmtOffsSet(opcodeOffs);
9617 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
9619 if (opts.compDbgCode)
9621 impSpillStackEnsure(true);
9624 impCurStmtOffsSet(opcodeOffs);
9627 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
9628 jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
9632 CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL);
9633 CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
9634 CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
9636 var_types lclTyp, ovflType = TYP_UNKNOWN;
9637 GenTreePtr op1 = DUMMY_INIT(NULL);
9638 GenTreePtr op2 = DUMMY_INIT(NULL);
9639 GenTreeArgList* args = nullptr; // What good do these "DUMMY_INIT"s do?
9640 GenTreePtr newObjThisPtr = DUMMY_INIT(NULL);
9641 bool uns = DUMMY_INIT(false);
9643 /* Get the next opcode and the size of its parameters */
9645 OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9646 codeAddr += sizeof(__int8);
9649 impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9650 JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
9655 // Return if any previous code has caused inline to fail.
9656 if (compDonotInline())
9661 /* Get the size of additional parameters */
9663 signed int sz = opcodeSizes[opcode];
9666 clsHnd = NO_CLASS_HANDLE;
9668 callTyp = TYP_COUNT;
9670 impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9671 impCurOpcName = opcodeNames[opcode];
9673 if (verbose && (opcode != CEE_PREFIX1))
9675 printf("%s", impCurOpcName);
9678 /* Use assertImp() to display the opcode */
9680 op1 = op2 = nullptr;
9683 /* See what kind of an opcode we have, then */
9685 unsigned mflags = 0;
9686 unsigned clsFlags = 0;
9699 CORINFO_SIG_INFO sig;
9702 bool ovfl, unordered, callNode;
9704 CORINFO_CLASS_HANDLE tokenType;
9714 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9715 codeAddr += sizeof(__int8);
9716 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9721 // We need to call impSpillLclRefs() for a struct type lclVar.
9722 // This is done for non-block assignments in the handling of stloc.
9723 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
9724 (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
9726 impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
9729 /* Append 'op1' to the list of statements */
9730 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
9735 /* Append 'op1' to the list of statements */
9737 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9743 // Remember at which BC offset the tree was finished
9744 impNoteLastILoffs();
9749 impPushNullObjRefOnStack();
9762 cval.intVal = (opcode - CEE_LDC_I4_0);
9763 assert(-1 <= cval.intVal && cval.intVal <= 8);
9767 cval.intVal = getI1LittleEndian(codeAddr);
9770 cval.intVal = getI4LittleEndian(codeAddr);
9773 JITDUMP(" %d", cval.intVal);
9774 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
9778 cval.lngVal = getI8LittleEndian(codeAddr);
9779 JITDUMP(" 0x%016llx", cval.lngVal);
9780 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
9784 cval.dblVal = getR8LittleEndian(codeAddr);
9785 JITDUMP(" %#.17g", cval.dblVal);
9786 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
9790 cval.dblVal = getR4LittleEndian(codeAddr);
9791 JITDUMP(" %#.17g", cval.dblVal);
9793 GenTreePtr cnsOp = gtNewDconNode(cval.dblVal);
9794 #if !FEATURE_X87_DOUBLES
9795 // X87 stack doesn't differentiate between float/double
9796 // so R4 is treated as R8, but everybody else does
9797 cnsOp->gtType = TYP_FLOAT;
9798 #endif // FEATURE_X87_DOUBLES
9799 impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
9805 if (compIsForInlining())
9807 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
9809 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
9814 val = getU4LittleEndian(codeAddr);
9815 JITDUMP(" %08X", val);
9816 if (tiVerificationNeeded)
9818 Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
9819 tiRetVal = typeInfo(TI_REF, impGetStringClass());
9821 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
9826 lclNum = getU2LittleEndian(codeAddr);
9827 JITDUMP(" %u", lclNum);
9828 impLoadArg(lclNum, opcodeOffs + sz + 1);
9832 lclNum = getU1LittleEndian(codeAddr);
9833 JITDUMP(" %u", lclNum);
9834 impLoadArg(lclNum, opcodeOffs + sz + 1);
9841 lclNum = (opcode - CEE_LDARG_0);
9842 assert(lclNum >= 0 && lclNum < 4);
9843 impLoadArg(lclNum, opcodeOffs + sz + 1);
9847 lclNum = getU2LittleEndian(codeAddr);
9848 JITDUMP(" %u", lclNum);
9849 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9853 lclNum = getU1LittleEndian(codeAddr);
9854 JITDUMP(" %u", lclNum);
9855 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9862 lclNum = (opcode - CEE_LDLOC_0);
9863 assert(lclNum >= 0 && lclNum < 4);
9864 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9868 lclNum = getU2LittleEndian(codeAddr);
9872 lclNum = getU1LittleEndian(codeAddr);
9874 JITDUMP(" %u", lclNum);
9876 if (tiVerificationNeeded)
9878 Verify(lclNum < info.compILargsCount, "bad arg num");
9881 if (compIsForInlining())
9883 op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
9884 noway_assert(op1->gtOper == GT_LCL_VAR);
9885 lclNum = op1->AsLclVar()->gtLclNum;
9890 lclNum = compMapILargNum(lclNum); // account for possible hidden param
9891 assertImp(lclNum < numArgs);
9893 if (lclNum == info.compThisArg)
9895 lclNum = lvaArg0Var;
9897 lvaTable[lclNum].lvArgWrite = 1;
9899 if (tiVerificationNeeded)
9901 typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
9902 Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
9905 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
9907 Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
9914 lclNum = getU2LittleEndian(codeAddr);
9915 JITDUMP(" %u", lclNum);
9919 lclNum = getU1LittleEndian(codeAddr);
9920 JITDUMP(" %u", lclNum);
9927 lclNum = (opcode - CEE_STLOC_0);
9928 assert(lclNum >= 0 && lclNum < 4);
9931 if (tiVerificationNeeded)
9933 Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
9934 Verify(tiCompatibleWith(impStackTop().seTypeInfo,
9935 NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
9939 if (compIsForInlining())
9941 lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
9943 /* Have we allocated a temp for this local? */
9945 lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
9954 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
9956 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9962 /* if it is a struct assignment, make certain we don't overflow the buffer */
9963 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
9965 if (lvaTable[lclNum].lvNormalizeOnLoad())
9967 lclTyp = lvaGetRealType(lclNum);
9971 lclTyp = lvaGetActualType(lclNum);
9975 /* Pop the value being assigned */
9978 StackEntry se = impPopStack(clsHnd);
9980 tiRetVal = se.seTypeInfo;
9984 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
9986 assert(op1->TypeGet() == TYP_STRUCT);
9987 op1->gtType = lclTyp;
9989 #endif // FEATURE_SIMD
9991 op1 = impImplicitIorI4Cast(op1, lclTyp);
9993 #ifdef _TARGET_64BIT_
9994 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
9995 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
9997 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9998 op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
10000 #endif // _TARGET_64BIT_
10002 // We had better assign it a value of the correct type
10004 genActualType(lclTyp) == genActualType(op1->gtType) ||
10005 genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
10006 (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
10007 (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
10008 (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
10009 ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
10011 /* If op1 is "&var" then its type is the transient "*" and it can
10012 be used either as TYP_BYREF or TYP_I_IMPL */
10014 if (op1->IsVarAddr())
10016 assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
10018 /* When "&var" is created, we assume it is a byref. If it is
10019 being assigned to a TYP_I_IMPL var, change the type to
10020 prevent unnecessary GC info */
10022 if (genActualType(lclTyp) == TYP_I_IMPL)
10024 op1->gtType = TYP_I_IMPL;
10028 /* Filter out simple assignments to itself */
10030 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
10034 // This is a sequence of (ldloc, dup, stloc). Can simplify
10035 // to (ldloc, stloc). Goto LDVAR to reconstruct the ldloc node.
10036 CLANG_FORMAT_COMMENT_ANCHOR;
10039 if (tiVerificationNeeded)
10042 typeInfo::AreEquivalent(tiRetVal, NormaliseForStack(lvaTable[lclNum].lvVerTypeInfo)));
10047 insertLdloc = false;
10049 impLoadVar(lclNum, opcodeOffs + sz + 1);
10052 else if (opts.compDbgCode)
10054 op1 = gtNewNothingNode();
10063 /* Create the assignment node */
10065 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
10067 /* If the local is aliased, we need to spill calls and
10068 indirections from the stack. */
10070 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp) &&
10071 verCurrentState.esStackDepth > 0)
10073 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased"));
10076 /* Spill any refs to the local from the stack */
10078 impSpillLclRefs(lclNum);
10080 #if !FEATURE_X87_DOUBLES
10081 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
10082 // We insert a cast to the dest 'op2' type
10084 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
10085 varTypeIsFloating(op2->gtType))
10087 op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
10089 #endif // !FEATURE_X87_DOUBLES
10091 if (varTypeIsStruct(lclTyp))
10093 op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
10097 // The code generator generates GC tracking information
10098 // based on the RHS of the assignment. Later the LHS (which is
10099 // is a BYREF) gets used and the emitter checks that that variable
10100 // is being tracked. It is not (since the RHS was an int and did
10101 // not need tracking). To keep this assert happy, we change the RHS
10102 if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
10104 op1->gtType = TYP_BYREF;
10106 op1 = gtNewAssignNode(op2, op1);
10109 /* If insertLdloc is true, then we need to insert a ldloc following the
10110 stloc. This is done when converting a (dup, stloc) sequence into
10111 a (stloc, ldloc) sequence. */
10115 // From SPILL_APPEND
10116 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10119 // From DONE_APPEND
10120 impNoteLastILoffs();
10123 insertLdloc = false;
10125 impLoadVar(lclNum, opcodeOffs + sz + 1, tiRetVal);
10132 lclNum = getU2LittleEndian(codeAddr);
10136 lclNum = getU1LittleEndian(codeAddr);
10138 JITDUMP(" %u", lclNum);
10139 if (tiVerificationNeeded)
10141 Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10142 Verify(info.compInitMem, "initLocals not set");
10145 if (compIsForInlining())
10147 // Get the local type
10148 lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10150 /* Have we allocated a temp for this local? */
10152 lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10154 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10160 assertImp(lclNum < info.compLocalsCount);
10164 lclNum = getU2LittleEndian(codeAddr);
10168 lclNum = getU1LittleEndian(codeAddr);
10170 JITDUMP(" %u", lclNum);
10171 Verify(lclNum < info.compILargsCount, "bad arg num");
10173 if (compIsForInlining())
10175 // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10176 // followed by a ldfld to load the field.
10178 op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10179 if (op1->gtOper != GT_LCL_VAR)
10181 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10185 assert(op1->gtOper == GT_LCL_VAR);
10190 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10191 assertImp(lclNum < numArgs);
10193 if (lclNum == info.compThisArg)
10195 lclNum = lvaArg0Var;
10202 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10205 assert(op1->gtOper == GT_LCL_VAR);
10207 /* Note that this is supposed to create the transient type "*"
10208 which may be used as a TYP_I_IMPL. However we catch places
10209 where it is used as a TYP_I_IMPL and change the node if needed.
10210 Thus we are pessimistic and may report byrefs in the GC info
10211 where it was not absolutely needed, but it is safer this way.
10213 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10215 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10216 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10218 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10219 if (tiVerificationNeeded)
10221 // Don't allow taking address of uninit this ptr.
10222 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10224 Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10227 if (!tiRetVal.IsByRef())
10229 tiRetVal.MakeByRef();
10233 Verify(false, "byref to byref");
10237 impPushOnStack(op1, tiRetVal);
10242 if (!info.compIsVarArgs)
10244 BADCODE("arglist in non-vararg method");
10247 if (tiVerificationNeeded)
10249 tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10251 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10253 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10254 adjusted the arg count cos this is like fetching the last param */
10255 assertImp(0 < numArgs);
10256 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10257 lclNum = lvaVarargsHandleArg;
10258 op1 = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10259 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10260 impPushOnStack(op1, tiRetVal);
10263 case CEE_ENDFINALLY:
10265 if (compIsForInlining())
10267 assert(!"Shouldn't have exception handlers in the inliner!");
10268 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10272 if (verCurrentState.esStackDepth > 0)
10274 impEvalSideEffects();
10277 if (info.compXcptnsCount == 0)
10279 BADCODE("endfinally outside finally");
10282 assert(verCurrentState.esStackDepth == 0);
10284 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10287 case CEE_ENDFILTER:
10289 if (compIsForInlining())
10291 assert(!"Shouldn't have exception handlers in the inliner!");
10292 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10296 block->bbSetRunRarely(); // filters are rare
10298 if (info.compXcptnsCount == 0)
10300 BADCODE("endfilter outside filter");
10303 if (tiVerificationNeeded)
10305 Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
10308 op1 = impPopStack().val;
10309 assertImp(op1->gtType == TYP_INT);
10310 if (!bbInFilterILRange(block))
10312 BADCODE("EndFilter outside a filter handler");
10315 /* Mark current bb as end of filter */
10317 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
10318 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
10320 /* Mark catch handler as successor */
10322 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
10323 if (verCurrentState.esStackDepth != 0)
10325 verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
10326 DEBUGARG(__LINE__));
10331 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
10333 if (!impReturnInstruction(block, prefixFlags, opcode))
10344 assert(!compIsForInlining());
10346 if (tiVerificationNeeded)
10348 Verify(false, "Invalid opcode: CEE_JMP");
10351 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
10353 /* CEE_JMP does not make sense in some "protected" regions. */
10355 BADCODE("Jmp not allowed in protected region");
10358 if (verCurrentState.esStackDepth != 0)
10360 BADCODE("Stack must be empty after CEE_JMPs");
10363 _impResolveToken(CORINFO_TOKENKIND_Method);
10365 JITDUMP(" %08X", resolvedToken.token);
10367 /* The signature of the target has to be identical to ours.
10368 At least check that argCnt and returnType match */
10370 eeGetMethodSig(resolvedToken.hMethod, &sig);
10371 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
10372 sig.retType != info.compMethodInfo->args.retType ||
10373 sig.callConv != info.compMethodInfo->args.callConv)
10375 BADCODE("Incompatible target for CEE_JMPs");
10378 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
10380 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
10382 /* Mark the basic block as being a JUMP instead of RETURN */
10384 block->bbFlags |= BBF_HAS_JMP;
10386 /* Set this flag to make sure register arguments have a location assigned
10387 * even if we don't use them inside the method */
10389 compJmpOpUsed = true;
10391 fgNoStructPromotion = true;
10395 #else // !_TARGET_XARCH_ && !_TARGET_ARMARCH_
10397 // Import this just like a series of LDARGs + tail. + call + ret
10399 if (info.compIsVarArgs)
10401 // For now we don't implement true tail calls, so this breaks varargs.
10402 // So warn the user instead of generating bad code.
10403 // This is a semi-temporary workaround for DevDiv 173860, until we can properly
10404 // implement true tail calls.
10405 IMPL_LIMITATION("varags + CEE_JMP doesn't work yet");
10408 // First load up the arguments (0 - N)
10409 for (unsigned argNum = 0; argNum < info.compILargsCount; argNum++)
10411 impLoadArg(argNum, opcodeOffs + sz + 1);
10414 // Now generate the tail call
10415 noway_assert(prefixFlags == 0);
10416 prefixFlags = PREFIX_TAILCALL_EXPLICIT;
10419 eeGetCallInfo(&resolvedToken, NULL,
10420 combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), &callInfo);
10422 // All calls and delegates need a security callout.
10423 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
10425 callTyp = impImportCall(CEE_CALL, &resolvedToken, NULL, NULL, PREFIX_TAILCALL_EXPLICIT, &callInfo,
10428 // And finish with the ret
10431 #endif // _TARGET_XARCH_ || _TARGET_ARMARCH_
10434 assertImp(sz == sizeof(unsigned));
10436 _impResolveToken(CORINFO_TOKENKIND_Class);
10438 JITDUMP(" %08X", resolvedToken.token);
10440 ldelemClsHnd = resolvedToken.hClass;
10442 if (tiVerificationNeeded)
10444 typeInfo tiArray = impStackTop(1).seTypeInfo;
10445 typeInfo tiIndex = impStackTop().seTypeInfo;
10447 // As per ECMA 'index' specified can be either int32 or native int.
10448 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10450 typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
10451 Verify(tiArray.IsNullObjRef() ||
10452 typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
10455 tiRetVal = arrayElemType;
10456 tiRetVal.MakeByRef();
10457 if (prefixFlags & PREFIX_READONLY)
10459 tiRetVal.SetIsReadonlyByRef();
10462 // an array interior pointer is always in the heap
10463 tiRetVal.SetIsPermanentHomeByRef();
10466 // If it's a value class array we just do a simple address-of
10467 if (eeIsValueClass(ldelemClsHnd))
10469 CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
10470 if (cit == CORINFO_TYPE_UNDEF)
10472 lclTyp = TYP_STRUCT;
10476 lclTyp = JITtype2varType(cit);
10478 goto ARR_LD_POST_VERIFY;
10481 // Similarly, if its a readonly access, we can do a simple address-of
10482 // without doing a runtime type-check
10483 if (prefixFlags & PREFIX_READONLY)
10486 goto ARR_LD_POST_VERIFY;
10489 // Otherwise we need the full helper function with run-time type check
10490 op1 = impTokenToHandle(&resolvedToken);
10491 if (op1 == nullptr)
10492 { // compDonotInline()
10496 args = gtNewArgList(op1); // Type
10497 args = gtNewListNode(impPopStack().val, args); // index
10498 args = gtNewListNode(impPopStack().val, args); // array
10499 op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, GTF_EXCEPT, args);
10501 impPushOnStack(op1, tiRetVal);
10504 // ldelem for reference and value types
10506 assertImp(sz == sizeof(unsigned));
10508 _impResolveToken(CORINFO_TOKENKIND_Class);
10510 JITDUMP(" %08X", resolvedToken.token);
10512 ldelemClsHnd = resolvedToken.hClass;
10514 if (tiVerificationNeeded)
10516 typeInfo tiArray = impStackTop(1).seTypeInfo;
10517 typeInfo tiIndex = impStackTop().seTypeInfo;
10519 // As per ECMA 'index' specified can be either int32 or native int.
10520 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10521 tiRetVal = verMakeTypeInfo(ldelemClsHnd);
10523 Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
10524 "type of array incompatible with type operand");
10525 tiRetVal.NormaliseForStack();
10528 // If it's a reference type or generic variable type
10529 // then just generate code as though it's a ldelem.ref instruction
10530 if (!eeIsValueClass(ldelemClsHnd))
10533 opcode = CEE_LDELEM_REF;
10537 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
10538 lclTyp = JITtype2varType(jitTyp);
10539 tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
10540 tiRetVal.NormaliseForStack();
10542 goto ARR_LD_POST_VERIFY;
10544 case CEE_LDELEM_I1:
10547 case CEE_LDELEM_I2:
10548 lclTyp = TYP_SHORT;
10551 lclTyp = TYP_I_IMPL;
10554 // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
10555 // and treating it as TYP_INT avoids other asserts.
10556 case CEE_LDELEM_U4:
10560 case CEE_LDELEM_I4:
10563 case CEE_LDELEM_I8:
10566 case CEE_LDELEM_REF:
10569 case CEE_LDELEM_R4:
10570 lclTyp = TYP_FLOAT;
10572 case CEE_LDELEM_R8:
10573 lclTyp = TYP_DOUBLE;
10575 case CEE_LDELEM_U1:
10576 lclTyp = TYP_UBYTE;
10578 case CEE_LDELEM_U2:
10584 if (tiVerificationNeeded)
10586 typeInfo tiArray = impStackTop(1).seTypeInfo;
10587 typeInfo tiIndex = impStackTop().seTypeInfo;
10589 // As per ECMA 'index' specified can be either int32 or native int.
10590 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10591 if (tiArray.IsNullObjRef())
10593 if (lclTyp == TYP_REF)
10594 { // we will say a deref of a null array yields a null ref
10595 tiRetVal = typeInfo(TI_NULL);
10599 tiRetVal = typeInfo(lclTyp);
10604 tiRetVal = verGetArrayElemType(tiArray);
10605 typeInfo arrayElemTi = typeInfo(lclTyp);
10606 #ifdef _TARGET_64BIT_
10607 if (opcode == CEE_LDELEM_I)
10609 arrayElemTi = typeInfo::nativeInt();
10612 if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
10614 Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
10617 #endif // _TARGET_64BIT_
10619 Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
10622 tiRetVal.NormaliseForStack();
10624 ARR_LD_POST_VERIFY:
10626 /* Pull the index value and array address */
10627 op2 = impPopStack().val;
10628 op1 = impPopStack().val;
10629 assertImp(op1->gtType == TYP_REF);
10631 /* Check for null pointer - in the inliner case we simply abort */
10633 if (compIsForInlining())
10635 if (op1->gtOper == GT_CNS_INT)
10637 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
10642 op1 = impCheckForNullPointer(op1);
10644 /* Mark the block as containing an index expression */
10646 if (op1->gtOper == GT_LCL_VAR)
10648 if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
10650 block->bbFlags |= BBF_HAS_IDX_LEN;
10651 optMethodFlags |= OMF_HAS_ARRAYREF;
10655 /* Create the index node and push it on the stack */
10657 op1 = gtNewIndexRef(lclTyp, op1, op2);
10659 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
10661 if ((opcode == CEE_LDELEMA) || ldstruct ||
10662 (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
10664 assert(ldelemClsHnd != DUMMY_INIT(NULL));
10666 // remember the element size
10667 if (lclTyp == TYP_REF)
10669 op1->gtIndex.gtIndElemSize = sizeof(void*);
10673 // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
10674 if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
10676 op1->gtIndex.gtStructElemClass = ldelemClsHnd;
10678 assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
10679 if (lclTyp == TYP_STRUCT)
10681 size = info.compCompHnd->getClassSize(ldelemClsHnd);
10682 op1->gtIndex.gtIndElemSize = size;
10683 op1->gtType = lclTyp;
10687 if ((opcode == CEE_LDELEMA) || ldstruct)
10690 lclTyp = TYP_BYREF;
10692 op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
10696 assert(lclTyp != TYP_STRUCT);
10702 // Create an OBJ for the result
10703 op1 = gtNewObjNode(ldelemClsHnd, op1);
10704 op1->gtFlags |= GTF_EXCEPT;
10706 impPushOnStack(op1, tiRetVal);
10709 // stelem for reference and value types
10712 assertImp(sz == sizeof(unsigned));
10714 _impResolveToken(CORINFO_TOKENKIND_Class);
10716 JITDUMP(" %08X", resolvedToken.token);
10718 stelemClsHnd = resolvedToken.hClass;
10720 if (tiVerificationNeeded)
10722 typeInfo tiArray = impStackTop(2).seTypeInfo;
10723 typeInfo tiIndex = impStackTop(1).seTypeInfo;
10724 typeInfo tiValue = impStackTop().seTypeInfo;
10726 // As per ECMA 'index' specified can be either int32 or native int.
10727 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10728 typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
10730 Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
10731 "type operand incompatible with array element type");
10732 arrayElem.NormaliseForStack();
10733 Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
10736 // If it's a reference type just behave as though it's a stelem.ref instruction
10737 if (!eeIsValueClass(stelemClsHnd))
10739 goto STELEM_REF_POST_VERIFY;
10742 // Otherwise extract the type
10744 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
10745 lclTyp = JITtype2varType(jitTyp);
10746 goto ARR_ST_POST_VERIFY;
10749 case CEE_STELEM_REF:
10751 if (tiVerificationNeeded)
10753 typeInfo tiArray = impStackTop(2).seTypeInfo;
10754 typeInfo tiIndex = impStackTop(1).seTypeInfo;
10755 typeInfo tiValue = impStackTop().seTypeInfo;
10757 // As per ECMA 'index' specified can be either int32 or native int.
10758 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10759 Verify(tiValue.IsObjRef(), "bad value");
10761 // we only check that it is an object referece, The helper does additional checks
10762 Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
10765 arrayNodeTo = impStackTop(2).val;
10766 arrayNodeToIndex = impStackTop(1).val;
10767 arrayNodeFrom = impStackTop().val;
10770 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
10771 // lot of cases because of covariance. ie. foo[] can be cast to object[].
10774 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
10775 // This does not need CORINFO_HELP_ARRADDR_ST
10777 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
10778 arrayNodeTo->gtOper == GT_LCL_VAR &&
10779 arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
10780 !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
10783 goto ARR_ST_POST_VERIFY;
10786 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
10788 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
10790 assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
10793 goto ARR_ST_POST_VERIFY;
10796 STELEM_REF_POST_VERIFY:
10798 /* Call a helper function to do the assignment */
10799 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, 0, impPopList(3, &flags, nullptr));
10803 case CEE_STELEM_I1:
10806 case CEE_STELEM_I2:
10807 lclTyp = TYP_SHORT;
10810 lclTyp = TYP_I_IMPL;
10812 case CEE_STELEM_I4:
10815 case CEE_STELEM_I8:
10818 case CEE_STELEM_R4:
10819 lclTyp = TYP_FLOAT;
10821 case CEE_STELEM_R8:
10822 lclTyp = TYP_DOUBLE;
10827 if (tiVerificationNeeded)
10829 typeInfo tiArray = impStackTop(2).seTypeInfo;
10830 typeInfo tiIndex = impStackTop(1).seTypeInfo;
10831 typeInfo tiValue = impStackTop().seTypeInfo;
10833 // As per ECMA 'index' specified can be either int32 or native int.
10834 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10835 typeInfo arrayElem = typeInfo(lclTyp);
10836 #ifdef _TARGET_64BIT_
10837 if (opcode == CEE_STELEM_I)
10839 arrayElem = typeInfo::nativeInt();
10841 #endif // _TARGET_64BIT_
10842 Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
10845 Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
10849 ARR_ST_POST_VERIFY:
10850 /* The strict order of evaluation is LHS-operands, RHS-operands,
10851 range-check, and then assignment. However, codegen currently
10852 does the range-check before evaluation the RHS-operands. So to
10853 maintain strict ordering, we spill the stack. */
10855 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
10857 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
10858 "Strict ordering of exceptions for Array store"));
10861 /* Pull the new value from the stack */
10862 op2 = impPopStack().val;
10864 /* Pull the index value */
10865 op1 = impPopStack().val;
10867 /* Pull the array address */
10868 op3 = impPopStack().val;
10870 assertImp(op3->gtType == TYP_REF);
10871 if (op2->IsVarAddr())
10873 op2->gtType = TYP_I_IMPL;
10876 op3 = impCheckForNullPointer(op3);
10878 // Mark the block as containing an index expression
10880 if (op3->gtOper == GT_LCL_VAR)
10882 if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
10884 block->bbFlags |= BBF_HAS_IDX_LEN;
10885 optMethodFlags |= OMF_HAS_ARRAYREF;
10889 /* Create the index node */
10891 op1 = gtNewIndexRef(lclTyp, op3, op1);
10893 /* Create the assignment node and append it */
10895 if (lclTyp == TYP_STRUCT)
10897 assert(stelemClsHnd != DUMMY_INIT(NULL));
10899 op1->gtIndex.gtStructElemClass = stelemClsHnd;
10900 op1->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd);
10902 if (varTypeIsStruct(op1))
10904 op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
10908 op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
10909 op1 = gtNewAssignNode(op1, op2);
10912 /* Mark the expression as containing an assignment */
10914 op1->gtFlags |= GTF_ASG;
10925 case CEE_ADD_OVF_UN:
10933 goto MATH_OP2_FLAGS;
10942 case CEE_SUB_OVF_UN:
10950 goto MATH_OP2_FLAGS;
10954 goto MATH_MAYBE_CALL_NO_OVF;
10959 case CEE_MUL_OVF_UN:
10966 goto MATH_MAYBE_CALL_OVF;
10968 // Other binary math operations
10972 goto MATH_MAYBE_CALL_NO_OVF;
10976 goto MATH_MAYBE_CALL_NO_OVF;
10980 goto MATH_MAYBE_CALL_NO_OVF;
10984 goto MATH_MAYBE_CALL_NO_OVF;
10986 MATH_MAYBE_CALL_NO_OVF:
10988 MATH_MAYBE_CALL_OVF:
10989 // Morpher has some complex logic about when to turn different
10990 // typed nodes on different platforms into helper calls. We
10991 // need to either duplicate that logic here, or just
10992 // pessimistically make all the nodes large enough to become
10993 // call nodes. Since call nodes aren't that much larger and
10994 // these opcodes are infrequent enough I chose the latter.
10996 goto MATH_OP2_FLAGS;
11008 MATH_OP2: // For default values of 'ovfl' and 'callNode'
11013 MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
11015 /* Pull two values and push back the result */
11017 if (tiVerificationNeeded)
11019 const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
11020 const typeInfo& tiOp2 = impStackTop().seTypeInfo;
11022 Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
11023 if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
11025 Verify(tiOp1.IsNumberType(), "not number");
11029 Verify(tiOp1.IsIntegerType(), "not integer");
11032 Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
11036 #ifdef _TARGET_64BIT_
11037 if (tiOp2.IsNativeIntType())
11041 #endif // _TARGET_64BIT_
11044 op2 = impPopStack().val;
11045 op1 = impPopStack().val;
11047 #if !CPU_HAS_FP_SUPPORT
11048 if (varTypeIsFloating(op1->gtType))
11053 /* Can't do arithmetic with references */
11054 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
11056 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
11057 // if it is in the stack)
11058 impBashVarAddrsToI(op1, op2);
11060 type = impGetByRefResultType(oper, uns, &op1, &op2);
11062 assert(!ovfl || !varTypeIsFloating(op1->gtType));
11064 /* Special case: "int+0", "int-0", "int*1", "int/1" */
11066 if (op2->gtOper == GT_CNS_INT)
11068 if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
11069 (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
11072 impPushOnStack(op1, tiRetVal);
11077 #if !FEATURE_X87_DOUBLES
11078 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
11080 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
11082 if (op1->TypeGet() != type)
11084 // We insert a cast of op1 to 'type'
11085 op1 = gtNewCastNode(type, op1, type);
11087 if (op2->TypeGet() != type)
11089 // We insert a cast of op2 to 'type'
11090 op2 = gtNewCastNode(type, op2, type);
11093 #endif // !FEATURE_X87_DOUBLES
11095 #if SMALL_TREE_NODES
11098 /* These operators can later be transformed into 'GT_CALL' */
11100 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
11101 #ifndef _TARGET_ARM_
11102 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
11103 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
11104 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11105 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11107 // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11108 // that we'll need to transform into a general large node, but rather specifically
11109 // to a call: by doing it this way, things keep working if there are multiple sizes,
11110 // and a CALL is no longer the largest.
11111 // That said, as of now it *is* a large node, so we'll do this with an assert rather
11113 assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11114 op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11117 #endif // SMALL_TREE_NODES
11119 op1 = gtNewOperNode(oper, type, op1, op2);
11122 /* Special case: integer/long division may throw an exception */
11124 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow())
11126 op1->gtFlags |= GTF_EXCEPT;
11131 assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11132 if (ovflType != TYP_UNKNOWN)
11134 op1->gtType = ovflType;
11136 op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11139 op1->gtFlags |= GTF_UNSIGNED;
11143 impPushOnStack(op1, tiRetVal);
11158 if (tiVerificationNeeded)
11160 const typeInfo& tiVal = impStackTop(1).seTypeInfo;
11161 const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11162 Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11165 op2 = impPopStack().val;
11166 op1 = impPopStack().val; // operand to be shifted
11167 impBashVarAddrsToI(op1, op2);
11169 type = genActualType(op1->TypeGet());
11170 op1 = gtNewOperNode(oper, type, op1, op2);
11172 impPushOnStack(op1, tiRetVal);
11176 if (tiVerificationNeeded)
11178 tiRetVal = impStackTop().seTypeInfo;
11179 Verify(tiRetVal.IsIntegerType(), "bad int value");
11182 op1 = impPopStack().val;
11183 impBashVarAddrsToI(op1, nullptr);
11184 type = genActualType(op1->TypeGet());
11185 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11189 if (tiVerificationNeeded)
11191 tiRetVal = impStackTop().seTypeInfo;
11192 Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11194 op1 = impPopStack().val;
11195 type = op1->TypeGet();
11196 op1 = gtNewOperNode(GT_CKFINITE, type, op1);
11197 op1->gtFlags |= GTF_EXCEPT;
11199 impPushOnStack(op1, tiRetVal);
11204 val = getI4LittleEndian(codeAddr); // jump distance
11205 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11209 val = getI1LittleEndian(codeAddr); // jump distance
11210 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11214 if (compIsForInlining())
11216 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11220 JITDUMP(" %04X", jmpAddr);
11221 if (block->bbJumpKind != BBJ_LEAVE)
11223 impResetLeaveBlock(block, jmpAddr);
11226 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11227 impImportLeave(block);
11228 impNoteBranchOffs();
11234 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11236 if (compIsForInlining() && jmpDist == 0)
11241 impNoteBranchOffs();
11247 case CEE_BRFALSE_S:
11249 /* Pop the comparand (now there's a neat term) from the stack */
11250 if (tiVerificationNeeded)
11252 typeInfo& tiVal = impStackTop().seTypeInfo;
11253 Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11257 op1 = impPopStack().val;
11258 type = op1->TypeGet();
11260 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11261 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11263 block->bbJumpKind = BBJ_NONE;
11265 if (op1->gtFlags & GTF_GLOB_EFFECT)
11267 op1 = gtUnusedValNode(op1);
11276 if (op1->OperIsCompare())
11278 if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11280 // Flip the sense of the compare
11282 op1 = gtReverseCond(op1);
11287 /* We'll compare against an equally-sized integer 0 */
11288 /* For small types, we always compare against int */
11289 op2 = gtNewZeroConNode(genActualType(op1->gtType));
11291 /* Create the comparison operator and try to fold it */
11293 oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11294 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11301 /* Fold comparison if we can */
11303 op1 = gtFoldExpr(op1);
11305 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11306 /* Don't make any blocks unreachable in import only mode */
11308 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11310 /* gtFoldExpr() should prevent this as we don't want to make any blocks
11311 unreachable under compDbgCode */
11312 assert(!opts.compDbgCode);
11314 BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11315 assertImp((block->bbJumpKind == BBJ_COND) // normal case
11316 || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11317 // block for the second time
11319 block->bbJumpKind = foldedJumpKind;
11323 if (op1->gtIntCon.gtIconVal)
11325 printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11326 block->bbJumpDest->bbNum);
11330 printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11337 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11339 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
11340 in impImportBlock(block). For correct line numbers, spill stack. */
11342 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
11344 impSpillStackEnsure(true);
11371 if (tiVerificationNeeded)
11373 verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11374 tiRetVal = typeInfo(TI_INT);
11377 op2 = impPopStack().val;
11378 op1 = impPopStack().val;
11380 #ifdef _TARGET_64BIT_
11381 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
11383 op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11385 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
11387 op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11389 #endif // _TARGET_64BIT_
11391 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11392 varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11393 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11395 /* Create the comparison node */
11397 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11399 /* TODO: setting both flags when only one is appropriate */
11400 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
11402 op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
11405 impPushOnStack(op1, tiRetVal);
11411 goto CMP_2_OPs_AND_BR;
11416 goto CMP_2_OPs_AND_BR;
11421 goto CMP_2_OPs_AND_BR_UN;
11426 goto CMP_2_OPs_AND_BR;
11431 goto CMP_2_OPs_AND_BR_UN;
11436 goto CMP_2_OPs_AND_BR;
11441 goto CMP_2_OPs_AND_BR_UN;
11446 goto CMP_2_OPs_AND_BR;
11451 goto CMP_2_OPs_AND_BR_UN;
11456 goto CMP_2_OPs_AND_BR_UN;
11458 CMP_2_OPs_AND_BR_UN:
11461 goto CMP_2_OPs_AND_BR_ALL;
11465 goto CMP_2_OPs_AND_BR_ALL;
11466 CMP_2_OPs_AND_BR_ALL:
11468 if (tiVerificationNeeded)
11470 verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11473 /* Pull two values */
11474 op2 = impPopStack().val;
11475 op1 = impPopStack().val;
11477 #ifdef _TARGET_64BIT_
11478 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
11480 op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11482 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
11484 op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11486 #endif // _TARGET_64BIT_
11488 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11489 varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11490 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11492 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11494 block->bbJumpKind = BBJ_NONE;
11496 if (op1->gtFlags & GTF_GLOB_EFFECT)
11498 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11499 "Branch to next Optimization, op1 side effect"));
11500 impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11502 if (op2->gtFlags & GTF_GLOB_EFFECT)
11504 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11505 "Branch to next Optimization, op2 side effect"));
11506 impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11510 if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
11512 impNoteLastILoffs();
11517 #if !FEATURE_X87_DOUBLES
11518 // We can generate an compare of different sized floating point op1 and op2
11519 // We insert a cast
11521 if (varTypeIsFloating(op1->TypeGet()))
11523 if (op1->TypeGet() != op2->TypeGet())
11525 assert(varTypeIsFloating(op2->TypeGet()));
11527 // say op1=double, op2=float. To avoid loss of precision
11528 // while comparing, op2 is converted to double and double
11529 // comparison is done.
11530 if (op1->TypeGet() == TYP_DOUBLE)
11532 // We insert a cast of op2 to TYP_DOUBLE
11533 op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
11535 else if (op2->TypeGet() == TYP_DOUBLE)
11537 // We insert a cast of op1 to TYP_DOUBLE
11538 op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
11542 #endif // !FEATURE_X87_DOUBLES
11544 /* Create and append the operator */
11546 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11550 op1->gtFlags |= GTF_UNSIGNED;
11555 op1->gtFlags |= GTF_RELOP_NAN_UN;
11561 assert(!compIsForInlining());
11563 if (tiVerificationNeeded)
11565 Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
11567 /* Pop the switch value off the stack */
11568 op1 = impPopStack().val;
11569 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
11571 #ifdef _TARGET_64BIT_
11572 // Widen 'op1' on 64-bit targets
11573 if (op1->TypeGet() != TYP_I_IMPL)
11575 if (op1->OperGet() == GT_CNS_INT)
11577 op1->gtType = TYP_I_IMPL;
11581 op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
11584 #endif // _TARGET_64BIT_
11585 assert(genActualType(op1->TypeGet()) == TYP_I_IMPL);
11587 /* We can create a switch node */
11589 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
11591 val = (int)getU4LittleEndian(codeAddr);
11592 codeAddr += 4 + val * 4; // skip over the switch-table
11596 /************************** Casting OPCODES ***************************/
11598 case CEE_CONV_OVF_I1:
11601 case CEE_CONV_OVF_I2:
11602 lclTyp = TYP_SHORT;
11604 case CEE_CONV_OVF_I:
11605 lclTyp = TYP_I_IMPL;
11607 case CEE_CONV_OVF_I4:
11610 case CEE_CONV_OVF_I8:
11614 case CEE_CONV_OVF_U1:
11615 lclTyp = TYP_UBYTE;
11617 case CEE_CONV_OVF_U2:
11620 case CEE_CONV_OVF_U:
11621 lclTyp = TYP_U_IMPL;
11623 case CEE_CONV_OVF_U4:
11626 case CEE_CONV_OVF_U8:
11627 lclTyp = TYP_ULONG;
11630 case CEE_CONV_OVF_I1_UN:
11633 case CEE_CONV_OVF_I2_UN:
11634 lclTyp = TYP_SHORT;
11636 case CEE_CONV_OVF_I_UN:
11637 lclTyp = TYP_I_IMPL;
11639 case CEE_CONV_OVF_I4_UN:
11642 case CEE_CONV_OVF_I8_UN:
11646 case CEE_CONV_OVF_U1_UN:
11647 lclTyp = TYP_UBYTE;
11649 case CEE_CONV_OVF_U2_UN:
11652 case CEE_CONV_OVF_U_UN:
11653 lclTyp = TYP_U_IMPL;
11655 case CEE_CONV_OVF_U4_UN:
11658 case CEE_CONV_OVF_U8_UN:
11659 lclTyp = TYP_ULONG;
11664 goto CONV_OVF_COMMON;
11667 goto CONV_OVF_COMMON;
11677 lclTyp = TYP_SHORT;
11680 lclTyp = TYP_I_IMPL;
11690 lclTyp = TYP_UBYTE;
11695 #if (REGSIZE_BYTES == 8)
11697 lclTyp = TYP_U_IMPL;
11701 lclTyp = TYP_U_IMPL;
11708 lclTyp = TYP_ULONG;
11712 lclTyp = TYP_FLOAT;
11715 lclTyp = TYP_DOUBLE;
11718 case CEE_CONV_R_UN:
11719 lclTyp = TYP_DOUBLE;
11733 // just check that we have a number on the stack
11734 if (tiVerificationNeeded)
11736 const typeInfo& tiVal = impStackTop().seTypeInfo;
11737 Verify(tiVal.IsNumberType(), "bad arg");
11739 #ifdef _TARGET_64BIT_
11740 bool isNative = false;
11744 case CEE_CONV_OVF_I:
11745 case CEE_CONV_OVF_I_UN:
11747 case CEE_CONV_OVF_U:
11748 case CEE_CONV_OVF_U_UN:
11752 // leave 'isNative' = false;
11757 tiRetVal = typeInfo::nativeInt();
11760 #endif // _TARGET_64BIT_
11762 tiRetVal = typeInfo(lclTyp).NormaliseForStack();
11766 // only converts from FLOAT or DOUBLE to an integer type
11767 // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls
11769 if (varTypeIsFloating(lclTyp))
11771 callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
11772 #ifdef _TARGET_64BIT_
11773 // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
11774 // TYP_BYREF could be used as TYP_I_IMPL which is long.
11775 // TODO-CQ: remove this when we lower casts long/ulong --> float/double
11776 // and generate SSE2 code instead of going through helper calls.
11777 || (impStackTop().val->TypeGet() == TYP_BYREF)
11783 callNode = varTypeIsFloating(impStackTop().val->TypeGet());
11786 // At this point uns, ovf, callNode all set
11788 op1 = impPopStack().val;
11789 impBashVarAddrsToI(op1);
11791 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
11793 op2 = op1->gtOp.gtOp2;
11795 if (op2->gtOper == GT_CNS_INT)
11797 ssize_t ival = op2->gtIntCon.gtIconVal;
11798 ssize_t mask, umask;
11814 assert(!"unexpected type");
11818 if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
11820 /* Toss the cast, it's a waste of time */
11822 impPushOnStack(op1, tiRetVal);
11825 else if (ival == mask)
11827 /* Toss the masking, it's a waste of time, since
11828 we sign-extend from the small value anyways */
11830 op1 = op1->gtOp.gtOp1;
11835 /* The 'op2' sub-operand of a cast is the 'real' type number,
11836 since the result of a cast to one of the 'small' integer
11837 types is an integer.
11840 type = genActualType(lclTyp);
11842 #if SMALL_TREE_NODES
11845 op1 = gtNewCastNodeL(type, op1, lclTyp);
11848 #endif // SMALL_TREE_NODES
11850 op1 = gtNewCastNode(type, op1, lclTyp);
11855 op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
11859 op1->gtFlags |= GTF_UNSIGNED;
11861 impPushOnStack(op1, tiRetVal);
11865 if (tiVerificationNeeded)
11867 tiRetVal = impStackTop().seTypeInfo;
11868 Verify(tiRetVal.IsNumberType(), "Bad arg");
11871 op1 = impPopStack().val;
11872 impBashVarAddrsToI(op1, nullptr);
11873 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
11877 if (tiVerificationNeeded)
11882 /* Pull the top value from the stack */
11884 op1 = impPopStack(clsHnd).val;
11886 /* Get hold of the type of the value being duplicated */
11888 lclTyp = genActualType(op1->gtType);
11890 /* Does the value have any side effects? */
11892 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
11894 // Since we are throwing away the value, just normalize
11895 // it to its address. This is more efficient.
11897 if (varTypeIsStruct(op1))
11899 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
11900 // Non-calls, such as obj or ret_expr, have to go through this.
11901 // Calls with large struct return value have to go through this.
11902 // Helper calls with small struct return value also have to go
11903 // through this since they do not follow Unix calling convention.
11904 if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
11905 op1->AsCall()->gtCallType == CT_HELPER)
11906 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
11908 op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
11912 // If op1 is non-overflow cast, throw it away since it is useless.
11913 // Another reason for throwing away the useless cast is in the context of
11914 // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
11915 // The cast gets added as part of importing GT_CALL, which gets in the way
11916 // of fgMorphCall() on the forms of tail call nodes that we assert.
11917 if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
11919 op1 = op1->gtOp.gtOp1;
11922 // If 'op1' is an expression, create an assignment node.
11923 // Helps analyses (like CSE) to work fine.
11925 if (op1->gtOper != GT_CALL)
11927 op1 = gtUnusedValNode(op1);
11930 /* Append the value to the tree list */
11934 /* No side effects - just throw the <BEEP> thing away */
11939 if (tiVerificationNeeded)
11941 // Dup could start the begining of delegate creation sequence, remember that
11942 delegateCreateStart = codeAddr - 1;
11946 // Convert a (dup, stloc) sequence into a (stloc, ldloc) sequence in the following cases:
11947 // - If this is non-debug code - so that CSE will recognize the two as equal.
11948 // This helps eliminate a redundant bounds check in cases such as:
11949 // ariba[i+3] += some_value;
11950 // - If the top of the stack is a non-leaf that may be expensive to clone.
11952 if (codeAddr < codeEndp)
11954 OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddr);
11955 if (impIsAnySTLOC(nextOpcode))
11957 if (!opts.compDbgCode)
11959 insertLdloc = true;
11962 GenTree* stackTop = impStackTop().val;
11963 if (!stackTop->IsIntegralConst(0) && !stackTop->IsFPZero() && !stackTop->IsLocal())
11965 insertLdloc = true;
11971 /* Pull the top value from the stack */
11972 op1 = impPopStack(tiRetVal);
11974 /* Clone the value */
11975 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
11976 nullptr DEBUGARG("DUP instruction"));
11978 /* Either the tree started with no global effects, or impCloneExpr
11979 evaluated the tree to a temp and returned two copies of that
11980 temp. Either way, neither op1 nor op2 should have side effects.
11982 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
11984 /* Push the tree/temp back on the stack */
11985 impPushOnStack(op1, tiRetVal);
11987 /* Push the copy on the stack */
11988 impPushOnStack(op2, tiRetVal);
11996 lclTyp = TYP_SHORT;
12005 lclTyp = TYP_I_IMPL;
12007 case CEE_STIND_REF:
12011 lclTyp = TYP_FLOAT;
12014 lclTyp = TYP_DOUBLE;
12018 if (tiVerificationNeeded)
12020 typeInfo instrType(lclTyp);
12021 #ifdef _TARGET_64BIT_
12022 if (opcode == CEE_STIND_I)
12024 instrType = typeInfo::nativeInt();
12026 #endif // _TARGET_64BIT_
12027 verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
12031 compUnsafeCastUsed = true; // Have to go conservative
12036 op2 = impPopStack().val; // value to store
12037 op1 = impPopStack().val; // address to store to
12039 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
12040 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12042 impBashVarAddrsToI(op1, op2);
12044 op2 = impImplicitR4orR8Cast(op2, lclTyp);
12046 #ifdef _TARGET_64BIT_
12047 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
12048 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
12050 op2->gtType = TYP_I_IMPL;
12054 // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
12056 if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
12058 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12059 op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
12061 // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12063 if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
12065 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12066 op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
12069 #endif // _TARGET_64BIT_
12071 if (opcode == CEE_STIND_REF)
12073 // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
12074 assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
12075 lclTyp = genActualType(op2->TypeGet());
12078 // Check target type.
12080 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
12082 if (op2->gtType == TYP_BYREF)
12084 assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
12086 else if (lclTyp == TYP_BYREF)
12088 assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
12093 assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
12094 ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
12095 (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
12099 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12101 // stind could point anywhere, example a boxed class static int
12102 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12104 if (prefixFlags & PREFIX_VOLATILE)
12106 assert(op1->OperGet() == GT_IND);
12107 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
12108 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12109 op1->gtFlags |= GTF_IND_VOLATILE;
12112 if (prefixFlags & PREFIX_UNALIGNED)
12114 assert(op1->OperGet() == GT_IND);
12115 op1->gtFlags |= GTF_IND_UNALIGNED;
12118 op1 = gtNewAssignNode(op1, op2);
12119 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12121 // Spill side-effects AND global-data-accesses
12122 if (verCurrentState.esStackDepth > 0)
12124 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12133 lclTyp = TYP_SHORT;
12142 case CEE_LDIND_REF:
12146 lclTyp = TYP_I_IMPL;
12149 lclTyp = TYP_FLOAT;
12152 lclTyp = TYP_DOUBLE;
12155 lclTyp = TYP_UBYTE;
12162 if (tiVerificationNeeded)
12164 typeInfo lclTiType(lclTyp);
12165 #ifdef _TARGET_64BIT_
12166 if (opcode == CEE_LDIND_I)
12168 lclTiType = typeInfo::nativeInt();
12170 #endif // _TARGET_64BIT_
12171 tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12172 tiRetVal.NormaliseForStack();
12176 compUnsafeCastUsed = true; // Have to go conservative
12181 op1 = impPopStack().val; // address to load from
12182 impBashVarAddrsToI(op1);
12184 #ifdef _TARGET_64BIT_
12185 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12187 if (genActualType(op1->gtType) == TYP_INT)
12189 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12190 op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
12194 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12196 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12198 // ldind could point anywhere, example a boxed class static int
12199 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12201 if (prefixFlags & PREFIX_VOLATILE)
12203 assert(op1->OperGet() == GT_IND);
12204 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
12205 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12206 op1->gtFlags |= GTF_IND_VOLATILE;
12209 if (prefixFlags & PREFIX_UNALIGNED)
12211 assert(op1->OperGet() == GT_IND);
12212 op1->gtFlags |= GTF_IND_UNALIGNED;
12215 impPushOnStack(op1, tiRetVal);
12219 case CEE_UNALIGNED:
12222 val = getU1LittleEndian(codeAddr);
12224 JITDUMP(" %u", val);
12225 if ((val != 1) && (val != 2) && (val != 4))
12227 BADCODE("Alignment unaligned. must be 1, 2, or 4");
12230 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12231 prefixFlags |= PREFIX_UNALIGNED;
12233 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12236 opcode = (OPCODE)getU1LittleEndian(codeAddr);
12237 codeAddr += sizeof(__int8);
12238 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12239 goto DECODE_OPCODE;
12243 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12244 prefixFlags |= PREFIX_VOLATILE;
12246 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12253 // Need to do a lookup here so that we perform an access check
12254 // and do a NOWAY if protections are violated
12255 _impResolveToken(CORINFO_TOKENKIND_Method);
12257 JITDUMP(" %08X", resolvedToken.token);
12259 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12260 addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12263 // This check really only applies to intrinsic Array.Address methods
12264 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12266 NO_WAY("Currently do not support LDFTN of Parameterized functions");
12269 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12270 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12272 if (tiVerificationNeeded)
12274 // LDFTN could start the begining of delegate creation sequence, remember that
12275 delegateCreateStart = codeAddr - 2;
12277 // check any constraints on the callee's class and type parameters
12278 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12279 "method has unsatisfied class constraints");
12280 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12281 resolvedToken.hMethod),
12282 "method has unsatisfied method constraints");
12284 mflags = callInfo.verMethodFlags;
12285 Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12289 op1 = impMethodPointer(&resolvedToken, &callInfo);
12290 if (compDonotInline())
12295 impPushOnStack(op1, typeInfo(resolvedToken.hMethod));
12300 case CEE_LDVIRTFTN:
12302 /* Get the method token */
12304 _impResolveToken(CORINFO_TOKENKIND_Method);
12306 JITDUMP(" %08X", resolvedToken.token);
12308 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12309 addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12310 CORINFO_CALLINFO_CALLVIRT)),
12313 // This check really only applies to intrinsic Array.Address methods
12314 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12316 NO_WAY("Currently do not support LDFTN of Parameterized functions");
12319 mflags = callInfo.methodFlags;
12321 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12323 if (compIsForInlining())
12325 if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12327 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12332 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12334 if (tiVerificationNeeded)
12337 Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12338 Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12340 // JIT32 verifier rejects verifiable ldvirtftn pattern
12341 typeInfo declType =
12342 verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12344 typeInfo arg = impStackTop().seTypeInfo;
12345 Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12348 CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12349 if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12351 instanceClassHnd = arg.GetClassHandleForObjRef();
12354 // check any constraints on the method's class and type parameters
12355 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12356 "method has unsatisfied class constraints");
12357 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12358 resolvedToken.hMethod),
12359 "method has unsatisfied method constraints");
12361 if (mflags & CORINFO_FLG_PROTECTED)
12363 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12364 "Accessing protected method through wrong type.");
12368 /* Get the object-ref */
12369 op1 = impPopStack().val;
12370 assertImp(op1->gtType == TYP_REF);
12372 if (opts.IsReadyToRun())
12374 if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
12376 if (op1->gtFlags & GTF_SIDE_EFFECT)
12378 op1 = gtUnusedValNode(op1);
12379 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12384 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12386 if (op1->gtFlags & GTF_SIDE_EFFECT)
12388 op1 = gtUnusedValNode(op1);
12389 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12394 GenTreePtr fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
12395 if (compDonotInline())
12400 impPushOnStack(fptr, typeInfo(resolvedToken.hMethod));
12405 case CEE_CONSTRAINED:
12407 assertImp(sz == sizeof(unsigned));
12408 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
12409 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
12410 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
12412 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
12413 prefixFlags |= PREFIX_CONSTRAINED;
12416 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12417 if (actualOpcode != CEE_CALLVIRT)
12419 BADCODE("constrained. has to be followed by callvirt");
12426 JITDUMP(" readonly.");
12428 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
12429 prefixFlags |= PREFIX_READONLY;
12432 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12433 if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
12435 BADCODE("readonly. has to be followed by ldelema or call");
12445 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
12446 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12449 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12450 if (!impOpcodeIsCallOpcode(actualOpcode))
12452 BADCODE("tailcall. has to be followed by call, callvirt or calli");
12460 /* Since we will implicitly insert newObjThisPtr at the start of the
12461 argument list, spill any GTF_ORDER_SIDEEFF */
12462 impSpillSpecialSideEff();
12464 /* NEWOBJ does not respond to TAIL */
12465 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
12467 /* NEWOBJ does not respond to CONSTRAINED */
12468 prefixFlags &= ~PREFIX_CONSTRAINED;
12470 #if COR_JIT_EE_VERSION > 460
12471 _impResolveToken(CORINFO_TOKENKIND_NewObj);
12473 _impResolveToken(CORINFO_TOKENKIND_Method);
12476 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12477 addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
12480 if (compIsForInlining())
12482 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12484 // Check to see if this call violates the boundary.
12485 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
12490 mflags = callInfo.methodFlags;
12492 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
12494 BADCODE("newobj on static or abstract method");
12497 // Insert the security callout before any actual code is generated
12498 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12500 // There are three different cases for new
12501 // Object size is variable (depends on arguments)
12502 // 1) Object is an array (arrays treated specially by the EE)
12503 // 2) Object is some other variable sized object (e.g. String)
12504 // 3) Class Size can be determined beforehand (normal case)
12505 // In the first case, we need to call a NEWOBJ helper (multinewarray)
12506 // in the second case we call the constructor with a '0' this pointer
12507 // In the third case we alloc the memory, then call the constuctor
12509 clsFlags = callInfo.classFlags;
12510 if (clsFlags & CORINFO_FLG_ARRAY)
12512 if (tiVerificationNeeded)
12514 CORINFO_CLASS_HANDLE elemTypeHnd;
12515 INDEBUG(CorInfoType corType =)
12516 info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
12517 assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
12518 Verify(elemTypeHnd == nullptr ||
12519 !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
12520 "newarr of byref-like objects");
12521 verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
12522 ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
12523 &callInfo DEBUGARG(info.compFullName));
12525 // Arrays need to call the NEWOBJ helper.
12526 assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
12528 impImportNewObjArray(&resolvedToken, &callInfo);
12529 if (compDonotInline())
12537 // At present this can only be String
12538 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
12540 if (IsTargetAbi(CORINFO_CORERT_ABI))
12542 // The dummy argument does not exist in CoreRT
12543 newObjThisPtr = nullptr;
12547 // This is the case for variable-sized objects that are not
12548 // arrays. In this case, call the constructor with a null 'this'
12550 newObjThisPtr = gtNewIconNode(0, TYP_REF);
12553 /* Remember that this basic block contains 'new' of an object */
12554 block->bbFlags |= BBF_HAS_NEWOBJ;
12555 optMethodFlags |= OMF_HAS_NEWOBJ;
12559 // This is the normal case where the size of the object is
12560 // fixed. Allocate the memory and call the constructor.
12562 // Note: We cannot add a peep to avoid use of temp here
12563 // becase we don't have enough interference info to detect when
12564 // sources and destination interfere, example: s = new S(ref);
12566 // TODO: We find the correct place to introduce a general
12567 // reverse copy prop for struct return values from newobj or
12568 // any function returning structs.
12570 /* get a temporary for the new object */
12571 lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
12573 // In the value class case we only need clsHnd for size calcs.
12575 // The lookup of the code pointer will be handled by CALL in this case
12576 if (clsFlags & CORINFO_FLG_VALUECLASS)
12578 if (compIsForInlining())
12580 // If value class has GC fields, inform the inliner. It may choose to
12581 // bail out on the inline.
12582 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
12583 if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
12585 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
12586 if (compInlineResult->IsFailure())
12591 // Do further notification in the case where the call site is rare;
12592 // some policies do not track the relative hotness of call sites for
12593 // "always" inline cases.
12594 if (impInlineInfo->iciBlock->isRunRarely())
12596 compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
12597 if (compInlineResult->IsFailure())
12605 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
12606 unsigned size = info.compCompHnd->getClassSize(resolvedToken.hClass);
12608 if (impIsPrimitive(jitTyp))
12610 lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
12614 // The local variable itself is the allocated space.
12615 // Here we need unsafe value cls check, since the address of struct is taken for further use
12616 // and potentially exploitable.
12617 lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
12620 // Append a tree to zero-out the temp
12621 newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
12623 newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest
12624 gtNewIconNode(0), // Value
12626 false, // isVolatile
12627 false); // not copyBlock
12628 impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12630 // Obtain the address of the temp
12632 gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
12636 #ifdef FEATURE_READYTORUN_COMPILER
12637 if (opts.IsReadyToRun())
12639 op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
12640 usingReadyToRunHelper = (op1 != nullptr);
12643 if (!usingReadyToRunHelper)
12646 op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
12647 if (op1 == nullptr)
12648 { // compDonotInline()
12652 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
12653 // and the newfast call with a single call to a dynamic R2R cell that will:
12654 // 1) Load the context
12655 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate
12657 // 3) Allocate and return the new object
12658 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
12660 op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
12661 resolvedToken.hClass, TYP_REF, op1);
12664 // Remember that this basic block contains 'new' of an object
12665 block->bbFlags |= BBF_HAS_NEWOBJ;
12666 optMethodFlags |= OMF_HAS_NEWOBJ;
12668 // Append the assignment to the temp/local. Dont need to spill
12669 // at all as we are just calling an EE-Jit helper which can only
12670 // cause an (async) OutOfMemoryException.
12672 // We assign the newly allocated object (by a GT_ALLOCOBJ node)
12673 // to a temp. Note that the pattern "temp = allocObj" is required
12674 // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
12675 // without exhaustive walk over all expressions.
12677 impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
12679 newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
12686 /* CALLI does not respond to CONSTRAINED */
12687 prefixFlags &= ~PREFIX_CONSTRAINED;
12689 if (compIsForInlining())
12691 // CALLI doesn't have a method handle, so assume the worst.
12692 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12694 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
12704 // We can't call getCallInfo on the token from a CALLI, but we need it in
12705 // many other places. We unfortunately embed that knowledge here.
12706 if (opcode != CEE_CALLI)
12708 _impResolveToken(CORINFO_TOKENKIND_Method);
12710 eeGetCallInfo(&resolvedToken,
12711 (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
12712 // this is how impImportCall invokes getCallInfo
12714 combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
12715 (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
12716 : CORINFO_CALLINFO_NONE)),
12721 // Suppress uninitialized use warning.
12722 memset(&resolvedToken, 0, sizeof(resolvedToken));
12723 memset(&callInfo, 0, sizeof(callInfo));
12725 resolvedToken.token = getU4LittleEndian(codeAddr);
12728 CALL: // memberRef should be set.
12729 // newObjThisPtr should be set for CEE_NEWOBJ
12731 JITDUMP(" %08X", resolvedToken.token);
12732 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
12734 bool newBBcreatedForTailcallStress;
12736 newBBcreatedForTailcallStress = false;
12738 if (compIsForInlining())
12740 if (compDonotInline())
12744 // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
12745 assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
12749 if (compTailCallStress())
12751 // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
12752 // Tail call stress only recognizes call+ret patterns and forces them to be
12753 // explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress
12754 // doesn't import 'ret' opcode following the call into the basic block containing
12755 // the call instead imports it to a new basic block. Note that fgMakeBasicBlocks()
12756 // is already checking that there is an opcode following call and hence it is
12757 // safe here to read next opcode without bounds check.
12758 newBBcreatedForTailcallStress =
12759 impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
12760 // make it jump to RET.
12761 (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
12763 if (newBBcreatedForTailcallStress &&
12764 !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
12765 verCheckTailCallConstraint(opcode, &resolvedToken,
12766 constraintCall ? &constrainedResolvedToken : nullptr,
12767 true) // Is it legal to do talcall?
12770 // Stress the tailcall.
12771 JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
12772 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12776 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
12777 // hence will not be considered for implicit tail calling.
12778 bool isRecursive = (callInfo.hMethod == info.compMethodHnd);
12779 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
12781 JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
12782 prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
12786 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
12787 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
12788 readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
12790 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
12792 // All calls and delegates need a security callout.
12793 // For delegates, this is the call to the delegate constructor, not the access check on the
12795 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12797 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
12799 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
12800 // and the field it is reading, thus it is now unverifiable to not immediately precede with
12801 // ldtoken <filed token>, and we now check accessibility
12802 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
12803 (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
12805 if (prevOpcode != CEE_LDTOKEN)
12807 Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
12811 assert(lastLoadToken != NULL);
12812 // Now that we know we have a token, verify that it is accessible for loading
12813 CORINFO_RESOLVED_TOKEN resolvedLoadField;
12814 impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
12815 eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
12816 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12820 #endif // DevDiv 410397
12823 if (tiVerificationNeeded)
12825 verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12826 explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
12827 &callInfo DEBUGARG(info.compFullName));
12830 // Insert delegate callout here.
12831 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
12834 // We should do this only if verification is enabled
12835 // If verification is disabled, delegateCreateStart will not be initialized correctly
12836 if (tiVerificationNeeded)
12838 mdMemberRef delegateMethodRef = mdMemberRefNil;
12839 // We should get here only for well formed delegate creation.
12840 assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
12844 #ifdef FEATURE_CORECLR
12845 // In coreclr the delegate transparency rule needs to be enforced even if verification is disabled
12846 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
12847 CORINFO_METHOD_HANDLE delegateMethodHandle = tiActualFtn.GetMethod2();
12849 impInsertCalloutForDelegate(info.compMethodHnd, delegateMethodHandle, resolvedToken.hClass);
12850 #endif // FEATURE_CORECLR
12853 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12854 newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
12855 if (compDonotInline())
12860 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
12861 // have created a new BB after the "call"
12862 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
12864 assert(!compIsForInlining());
12876 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
12877 BOOL isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
12879 /* Get the CP_Fieldref index */
12880 assertImp(sz == sizeof(unsigned));
12882 _impResolveToken(CORINFO_TOKENKIND_Field);
12884 JITDUMP(" %08X", resolvedToken.token);
12886 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
12888 GenTreePtr obj = nullptr;
12889 typeInfo* tiObj = nullptr;
12890 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
12892 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
12894 tiObj = &impStackTop().seTypeInfo;
12895 obj = impPopStack(objType).val;
12897 if (impIsThis(obj))
12899 aflags |= CORINFO_ACCESS_THIS;
12901 // An optimization for Contextful classes:
12902 // we unwrap the proxy when we have a 'this reference'
12904 if (info.compUnwrapContextful)
12906 aflags |= CORINFO_ACCESS_UNWRAP;
12911 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
12913 // Figure out the type of the member. We always call canAccessField, so you always need this
12915 CorInfoType ciType = fieldInfo.fieldType;
12916 clsHnd = fieldInfo.structType;
12918 lclTyp = JITtype2varType(ciType);
12920 #ifdef _TARGET_AMD64
12921 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
12922 #endif // _TARGET_AMD64
12924 if (compIsForInlining())
12926 switch (fieldInfo.fieldAccessor)
12928 case CORINFO_FIELD_INSTANCE_HELPER:
12929 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
12930 case CORINFO_FIELD_STATIC_ADDR_HELPER:
12931 case CORINFO_FIELD_STATIC_TLS:
12933 compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
12936 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
12937 #if COR_JIT_EE_VERSION > 460
12938 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
12940 /* We may be able to inline the field accessors in specific instantiations of generic
12942 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
12949 if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
12952 if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
12953 !(info.compFlags & CORINFO_FLG_FORCEINLINE))
12955 // Loading a static valuetype field usually will cause a JitHelper to be called
12956 // for the static base. This will bloat the code.
12957 compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
12959 if (compInlineResult->IsFailure())
12967 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
12970 tiRetVal.MakeByRef();
12974 tiRetVal.NormaliseForStack();
12977 // Perform this check always to ensure that we get field access exceptions even with
12978 // SkipVerification.
12979 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12981 if (tiVerificationNeeded)
12983 // You can also pass the unboxed struct to LDFLD
12984 BOOL bAllowPlainValueTypeAsThis = FALSE;
12985 if (opcode == CEE_LDFLD && impIsValueType(tiObj))
12987 bAllowPlainValueTypeAsThis = TRUE;
12990 verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
12992 // If we're doing this on a heap object or from a 'safe' byref
12993 // then the result is a safe byref too
12994 if (isLoadAddress) // load address
12996 if (fieldInfo.fieldFlags &
12997 CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
12999 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
13001 tiRetVal.SetIsPermanentHomeByRef();
13004 else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
13006 // ldflda of byref is safe if done on a gc object or on a
13008 tiRetVal.SetIsPermanentHomeByRef();
13014 // tiVerificationNeeded is false.
13015 // Raise InvalidProgramException if static load accesses non-static field
13016 if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13018 BADCODE("static access on an instance field");
13022 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
13023 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13025 if (obj->gtFlags & GTF_SIDE_EFFECT)
13027 obj = gtUnusedValNode(obj);
13028 impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13033 /* Preserve 'small' int types */
13034 if (lclTyp > TYP_INT)
13036 lclTyp = genActualType(lclTyp);
13039 bool usesHelper = false;
13041 switch (fieldInfo.fieldAccessor)
13043 case CORINFO_FIELD_INSTANCE:
13044 #ifdef FEATURE_READYTORUN_COMPILER
13045 case CORINFO_FIELD_INSTANCE_WITH_BASE:
13048 bool nullcheckNeeded = false;
13050 obj = impCheckForNullPointer(obj);
13052 if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
13054 nullcheckNeeded = true;
13057 // If the object is a struct, what we really want is
13058 // for the field to operate on the address of the struct.
13059 if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
13061 assert(opcode == CEE_LDFLD && objType != nullptr);
13063 obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
13066 /* Create the data member node */
13067 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
13069 #ifdef FEATURE_READYTORUN_COMPILER
13070 if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13072 op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13076 op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13078 if (fgAddrCouldBeNull(obj))
13080 op1->gtFlags |= GTF_EXCEPT;
13083 // If gtFldObj is a BYREF then our target is a value class and
13084 // it could point anywhere, example a boxed class static int
13085 if (obj->gtType == TYP_BYREF)
13087 op1->gtFlags |= GTF_IND_TGTANYWHERE;
13090 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13091 if (StructHasOverlappingFields(typeFlags))
13093 op1->gtField.gtFldMayOverlap = true;
13096 // wrap it in a address of operator if necessary
13099 op1 = gtNewOperNode(GT_ADDR,
13100 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
13104 if (compIsForInlining() &&
13105 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13106 impInlineInfo->inlArgInfo))
13108 impInlineInfo->thisDereferencedFirst = true;
13114 case CORINFO_FIELD_STATIC_TLS:
13115 #ifdef _TARGET_X86_
13116 // Legacy TLS access is implemented as intrinsic on x86 only
13118 /* Create the data member node */
13119 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13120 op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13124 op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13128 fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13133 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13134 case CORINFO_FIELD_INSTANCE_HELPER:
13135 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13136 op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13141 case CORINFO_FIELD_STATIC_ADDRESS:
13142 // Replace static read-only fields with constant if possible
13143 if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13144 !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13145 (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13147 CorInfoInitClassResult initClassResult =
13148 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13149 impTokenLookupContextHandle);
13151 if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13153 void** pFldAddr = nullptr;
13155 info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13157 // We should always be able to access this static's address directly
13158 assert(pFldAddr == nullptr);
13160 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13167 case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13168 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13169 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13170 #if COR_JIT_EE_VERSION > 460
13171 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13173 op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13177 case CORINFO_FIELD_INTRINSIC_ZERO:
13179 assert(aflags & CORINFO_ACCESS_GET);
13180 op1 = gtNewIconNode(0, lclTyp);
13185 case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13187 assert(aflags & CORINFO_ACCESS_GET);
13190 InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13191 op1 = gtNewStringLiteralNode(iat, pValue);
13197 assert(!"Unexpected fieldAccessor");
13200 if (!isLoadAddress)
13203 if (prefixFlags & PREFIX_VOLATILE)
13205 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
13206 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13210 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13211 (op1->OperGet() == GT_OBJ));
13212 op1->gtFlags |= GTF_IND_VOLATILE;
13216 if (prefixFlags & PREFIX_UNALIGNED)
13220 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13221 (op1->OperGet() == GT_OBJ));
13222 op1->gtFlags |= GTF_IND_UNALIGNED;
13227 /* Check if the class needs explicit initialization */
13229 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13231 GenTreePtr helperNode = impInitClass(&resolvedToken);
13232 if (compDonotInline())
13236 if (helperNode != nullptr)
13238 op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13243 impPushOnStack(op1, tiRetVal);
13251 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13253 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13255 /* Get the CP_Fieldref index */
13257 assertImp(sz == sizeof(unsigned));
13259 _impResolveToken(CORINFO_TOKENKIND_Field);
13261 JITDUMP(" %08X", resolvedToken.token);
13263 int aflags = CORINFO_ACCESS_SET;
13264 GenTreePtr obj = nullptr;
13265 typeInfo* tiObj = nullptr;
13268 /* Pull the value from the stack */
13269 op2 = impPopStack(tiVal);
13270 clsHnd = tiVal.GetClassHandle();
13272 if (opcode == CEE_STFLD)
13274 tiObj = &impStackTop().seTypeInfo;
13275 obj = impPopStack().val;
13277 if (impIsThis(obj))
13279 aflags |= CORINFO_ACCESS_THIS;
13281 // An optimization for Contextful classes:
13282 // we unwrap the proxy when we have a 'this reference'
13284 if (info.compUnwrapContextful)
13286 aflags |= CORINFO_ACCESS_UNWRAP;
13291 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13293 // Figure out the type of the member. We always call canAccessField, so you always need this
13295 CorInfoType ciType = fieldInfo.fieldType;
13296 fieldClsHnd = fieldInfo.structType;
13298 lclTyp = JITtype2varType(ciType);
13300 if (compIsForInlining())
13302 /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13303 * per-inst static? */
13305 switch (fieldInfo.fieldAccessor)
13307 case CORINFO_FIELD_INSTANCE_HELPER:
13308 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13309 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13310 case CORINFO_FIELD_STATIC_TLS:
13312 compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13315 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13316 #if COR_JIT_EE_VERSION > 460
13317 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13320 /* We may be able to inline the field accessors in specific instantiations of generic
13322 compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13330 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13332 if (tiVerificationNeeded)
13334 verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13335 typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13336 Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13340 // tiVerificationNeed is false.
13341 // Raise InvalidProgramException if static store accesses non-static field
13342 if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13344 BADCODE("static access on an instance field");
13348 // We are using stfld on a static field.
13349 // We allow it, but need to eval any side-effects for obj
13350 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13352 if (obj->gtFlags & GTF_SIDE_EFFECT)
13354 obj = gtUnusedValNode(obj);
13355 impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13360 /* Preserve 'small' int types */
13361 if (lclTyp > TYP_INT)
13363 lclTyp = genActualType(lclTyp);
13366 switch (fieldInfo.fieldAccessor)
13368 case CORINFO_FIELD_INSTANCE:
13369 #ifdef FEATURE_READYTORUN_COMPILER
13370 case CORINFO_FIELD_INSTANCE_WITH_BASE:
13373 obj = impCheckForNullPointer(obj);
13375 /* Create the data member node */
13376 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
13377 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13378 if (StructHasOverlappingFields(typeFlags))
13380 op1->gtField.gtFldMayOverlap = true;
13383 #ifdef FEATURE_READYTORUN_COMPILER
13384 if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13386 op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13390 op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13392 if (fgAddrCouldBeNull(obj))
13394 op1->gtFlags |= GTF_EXCEPT;
13397 // If gtFldObj is a BYREF then our target is a value class and
13398 // it could point anywhere, example a boxed class static int
13399 if (obj->gtType == TYP_BYREF)
13401 op1->gtFlags |= GTF_IND_TGTANYWHERE;
13404 if (compIsForInlining() &&
13405 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
13407 impInlineInfo->thisDereferencedFirst = true;
13412 case CORINFO_FIELD_STATIC_TLS:
13413 #ifdef _TARGET_X86_
13414 // Legacy TLS access is implemented as intrinsic on x86 only
13416 /* Create the data member node */
13417 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13418 op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13422 fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13427 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13428 case CORINFO_FIELD_INSTANCE_HELPER:
13429 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13430 op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13434 case CORINFO_FIELD_STATIC_ADDRESS:
13435 case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13436 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13437 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13438 #if COR_JIT_EE_VERSION > 460
13439 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13441 op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13446 assert(!"Unexpected fieldAccessor");
13449 // Create the member assignment, unless we have a struct.
13450 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
13451 bool deferStructAssign = varTypeIsStruct(lclTyp);
13453 if (!deferStructAssign)
13455 if (prefixFlags & PREFIX_VOLATILE)
13457 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13458 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
13459 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13460 op1->gtFlags |= GTF_IND_VOLATILE;
13462 if (prefixFlags & PREFIX_UNALIGNED)
13464 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13465 op1->gtFlags |= GTF_IND_UNALIGNED;
13468 /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
13470 apps). The reason this works is that JIT stores an i4 constant in Gentree union during
13472 and reads from the union as if it were a long during code generation. Though this can potentially
13473 read garbage, one can get lucky to have this working correctly.
13475 This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
13477 switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency
13479 it. To be backward compatible, we will explicitly add an upward cast here so that it works
13483 Note that this is limited to x86 alone as thereis no back compat to be addressed for Arm JIT for
13486 CLANG_FORMAT_COMMENT_ANCHOR;
13488 #ifdef _TARGET_X86_
13489 if (op1->TypeGet() != op2->TypeGet() && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
13490 varTypeIsLong(op1->TypeGet()))
13492 op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13496 #ifdef _TARGET_64BIT_
13497 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
13498 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
13500 op2->gtType = TYP_I_IMPL;
13504 // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
13506 if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
13508 op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
13510 // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13512 if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
13514 op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
13519 #if !FEATURE_X87_DOUBLES
13520 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
13521 // We insert a cast to the dest 'op1' type
13523 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
13524 varTypeIsFloating(op2->gtType))
13526 op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13528 #endif // !FEATURE_X87_DOUBLES
13530 op1 = gtNewAssignNode(op1, op2);
13532 /* Mark the expression as containing an assignment */
13534 op1->gtFlags |= GTF_ASG;
13537 /* Check if the class needs explicit initialization */
13539 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13541 GenTreePtr helperNode = impInitClass(&resolvedToken);
13542 if (compDonotInline())
13546 if (helperNode != nullptr)
13548 op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13552 /* stfld can interfere with value classes (consider the sequence
13553 ldloc, ldloca, ..., stfld, stloc). We will be conservative and
13554 spill all value class references from the stack. */
13556 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
13560 if (impIsValueType(tiObj))
13562 impSpillEvalStack();
13566 impSpillValueClasses();
13570 /* Spill any refs to the same member from the stack */
13572 impSpillLclRefs((ssize_t)resolvedToken.hField);
13574 /* stsfld also interferes with indirect accesses (for aliased
13575 statics) and calls. But don't need to spill other statics
13576 as we have explicitly spilled this particular static field. */
13578 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
13580 if (deferStructAssign)
13582 op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
13590 /* Get the class type index operand */
13592 _impResolveToken(CORINFO_TOKENKIND_Newarr);
13594 JITDUMP(" %08X", resolvedToken.token);
13596 if (!opts.IsReadyToRun())
13598 // Need to restore array classes before creating array objects on the heap
13599 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13600 if (op1 == nullptr)
13601 { // compDonotInline()
13606 if (tiVerificationNeeded)
13608 // As per ECMA 'numElems' specified can be either int32 or native int.
13609 Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
13611 CORINFO_CLASS_HANDLE elemTypeHnd;
13612 info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13613 Verify(elemTypeHnd == nullptr ||
13614 !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13615 "array of byref-like type");
13616 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13619 accessAllowedResult =
13620 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13621 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13623 /* Form the arglist: array class handle, size */
13624 op2 = impPopStack().val;
13625 assertImp(genActualTypeIsIntOrI(op2->gtType));
13627 #ifdef FEATURE_READYTORUN_COMPILER
13628 if (opts.IsReadyToRun())
13630 op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
13631 gtNewArgList(op2));
13632 usingReadyToRunHelper = (op1 != nullptr);
13634 if (!usingReadyToRunHelper)
13636 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13637 // and the newarr call with a single call to a dynamic R2R cell that will:
13638 // 1) Load the context
13639 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13640 // 3) Allocate the new array
13641 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13643 // Need to restore array classes before creating array objects on the heap
13644 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13645 if (op1 == nullptr)
13646 { // compDonotInline()
13652 if (!usingReadyToRunHelper)
13655 args = gtNewArgList(op1, op2);
13657 /* Create a call to 'new' */
13659 // Note that this only works for shared generic code because the same helper is used for all
13660 // reference array types
13662 gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, 0, args);
13665 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
13667 /* Remember that this basic block contains 'new' of an sd array */
13669 block->bbFlags |= BBF_HAS_NEWARRAY;
13670 optMethodFlags |= OMF_HAS_NEWARRAY;
13672 /* Push the result of the call on the stack */
13674 impPushOnStack(op1, tiRetVal);
13681 assert(!compIsForInlining());
13683 if (tiVerificationNeeded)
13685 Verify(false, "bad opcode");
13688 // We don't allow locallocs inside handlers
13689 if (block->hasHndIndex())
13691 BADCODE("Localloc can't be inside handler");
13694 /* The FP register may not be back to the original value at the end
13695 of the method, even if the frame size is 0, as localloc may
13696 have modified it. So we will HAVE to reset it */
13698 compLocallocUsed = true;
13699 setNeedsGSSecurityCookie();
13701 // Get the size to allocate
13703 op2 = impPopStack().val;
13704 assertImp(genActualTypeIsIntOrI(op2->gtType));
13706 if (verCurrentState.esStackDepth != 0)
13708 BADCODE("Localloc can only be used when the stack is empty");
13711 op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
13713 // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
13715 op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
13717 impPushOnStack(op1, tiRetVal);
13722 /* Get the type token */
13723 assertImp(sz == sizeof(unsigned));
13725 _impResolveToken(CORINFO_TOKENKIND_Casting);
13727 JITDUMP(" %08X", resolvedToken.token);
13729 if (!opts.IsReadyToRun())
13731 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13732 if (op2 == nullptr)
13733 { // compDonotInline()
13738 if (tiVerificationNeeded)
13740 Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
13741 // Even if this is a value class, we know it is boxed.
13742 tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
13744 accessAllowedResult =
13745 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13746 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13748 op1 = impPopStack().val;
13750 #ifdef FEATURE_READYTORUN_COMPILER
13751 if (opts.IsReadyToRun())
13753 GenTreePtr opLookup =
13754 impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
13755 gtNewArgList(op1));
13756 usingReadyToRunHelper = (opLookup != nullptr);
13757 op1 = (usingReadyToRunHelper ? opLookup : op1);
13759 if (!usingReadyToRunHelper)
13761 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13762 // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
13763 // 1) Load the context
13764 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13765 // 3) Perform the 'is instance' check on the input object
13766 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13768 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13769 if (op2 == nullptr)
13770 { // compDonotInline()
13776 if (!usingReadyToRunHelper)
13779 op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
13781 if (compDonotInline())
13786 impPushOnStack(op1, tiRetVal);
13790 case CEE_REFANYVAL:
13792 // get the class handle and make a ICON node out of it
13794 _impResolveToken(CORINFO_TOKENKIND_Class);
13796 JITDUMP(" %08X", resolvedToken.token);
13798 op2 = impTokenToHandle(&resolvedToken);
13799 if (op2 == nullptr)
13800 { // compDonotInline()
13804 if (tiVerificationNeeded)
13806 Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13808 tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
13811 op1 = impPopStack().val;
13812 // make certain it is normalized;
13813 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13815 // Call helper GETREFANY(classHandle, op1);
13816 args = gtNewArgList(op2, op1);
13817 op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, 0, args);
13819 impPushOnStack(op1, tiRetVal);
13822 case CEE_REFANYTYPE:
13824 if (tiVerificationNeeded)
13826 Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13830 op1 = impPopStack().val;
13832 // make certain it is normalized;
13833 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13835 if (op1->gtOper == GT_OBJ)
13837 // Get the address of the refany
13838 op1 = op1->gtOp.gtOp1;
13840 // Fetch the type from the correct slot
13841 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
13842 gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
13843 op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
13847 assertImp(op1->gtOper == GT_MKREFANY);
13849 // The pointer may have side-effects
13850 if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
13852 impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13854 impNoteLastILoffs();
13858 // We already have the class handle
13859 op1 = op1->gtOp.gtOp2;
13862 // convert native TypeHandle to RuntimeTypeHandle
13864 GenTreeArgList* helperArgs = gtNewArgList(op1);
13866 op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, GTF_EXCEPT,
13869 // The handle struct is returned in register
13870 op1->gtCall.gtReturnType = TYP_REF;
13872 tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
13875 impPushOnStack(op1, tiRetVal);
13880 /* Get the Class index */
13881 assertImp(sz == sizeof(unsigned));
13882 lastLoadToken = codeAddr;
13883 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
13885 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
13887 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
13888 if (op1 == nullptr)
13889 { // compDonotInline()
13893 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
13894 assert(resolvedToken.hClass != nullptr);
13896 if (resolvedToken.hMethod != nullptr)
13898 helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
13900 else if (resolvedToken.hField != nullptr)
13902 helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
13905 GenTreeArgList* helperArgs = gtNewArgList(op1);
13907 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, GTF_EXCEPT, helperArgs);
13909 // The handle struct is returned in register
13910 op1->gtCall.gtReturnType = TYP_REF;
13912 tiRetVal = verMakeTypeInfo(tokenType);
13913 impPushOnStack(op1, tiRetVal);
13918 case CEE_UNBOX_ANY:
13920 /* Get the Class index */
13921 assertImp(sz == sizeof(unsigned));
13923 _impResolveToken(CORINFO_TOKENKIND_Class);
13925 JITDUMP(" %08X", resolvedToken.token);
13927 BOOL runtimeLookup;
13928 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
13929 if (op2 == nullptr)
13930 { // compDonotInline()
13934 // Run this always so we can get access exceptions even with SkipVerification.
13935 accessAllowedResult =
13936 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13937 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13939 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
13941 if (tiVerificationNeeded)
13943 typeInfo tiUnbox = impStackTop().seTypeInfo;
13944 Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
13945 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13946 tiRetVal.NormaliseForStack();
13948 op1 = impPopStack().val;
13952 /* Pop the object and create the unbox helper call */
13953 /* You might think that for UNBOX_ANY we need to push a different */
13954 /* (non-byref) type, but here we're making the tiRetVal that is used */
13955 /* for the intermediate pointer which we then transfer onto the OBJ */
13956 /* instruction. OBJ then creates the appropriate tiRetVal. */
13957 if (tiVerificationNeeded)
13959 typeInfo tiUnbox = impStackTop().seTypeInfo;
13960 Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
13962 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13963 Verify(tiRetVal.IsValueClass(), "not value class");
13964 tiRetVal.MakeByRef();
13966 // We always come from an objref, so this is safe byref
13967 tiRetVal.SetIsPermanentHomeByRef();
13968 tiRetVal.SetIsReadonlyByRef();
13971 op1 = impPopStack().val;
13972 assertImp(op1->gtType == TYP_REF);
13974 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
13975 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
13977 // We only want to expand inline the normal UNBOX helper;
13978 expandInline = (helper == CORINFO_HELP_UNBOX);
13982 if (compCurBB->isRunRarely())
13984 expandInline = false; // not worth the code expansion
13990 // we are doing normal unboxing
13991 // inline the common case of the unbox helper
13992 // UNBOX(exp) morphs into
13993 // clone = pop(exp);
13994 // ((*clone == typeToken) ? nop : helper(clone, typeToken));
13995 // push(clone + sizeof(void*))
13997 GenTreePtr cloneOperand;
13998 op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
13999 nullptr DEBUGARG("inline UNBOX clone1"));
14000 op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
14002 GenTreePtr condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
14004 op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14005 nullptr DEBUGARG("inline UNBOX clone2"));
14006 op2 = impTokenToHandle(&resolvedToken);
14007 if (op2 == nullptr)
14008 { // compDonotInline()
14011 args = gtNewArgList(op2, op1);
14012 op1 = gtNewHelperCallNode(helper, TYP_VOID, 0, args);
14014 op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
14015 op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
14016 condBox->gtFlags |= GTF_RELOP_QMARK;
14018 // QMARK nodes cannot reside on the evaluation stack. Because there
14019 // may be other trees on the evaluation stack that side-effect the
14020 // sources of the UNBOX operation we must spill the stack.
14022 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14024 // Create the address-expression to reference past the object header
14025 // to the beginning of the value-type. Today this means adjusting
14026 // past the base of the objects vtable field which is pointer sized.
14028 op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
14029 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
14033 unsigned callFlags = (helper == CORINFO_HELP_UNBOX) ? 0 : GTF_EXCEPT;
14035 // Don't optimize, just call the helper and be done with it
14036 args = gtNewArgList(op2, op1);
14037 op1 = gtNewHelperCallNode(helper,
14038 (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT),
14042 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
14043 helper == CORINFO_HELP_UNBOX_NULLABLE &&
14044 varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
14048 ----------------------------------------------------------------------
14051 | \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE |
14052 | \ | (which returns a BYREF) | (which returns a STRUCT) | |
14054 |---------------------------------------------------------------------
14055 | UNBOX | push the BYREF | spill the STRUCT to a local, |
14056 | | | push the BYREF to this local |
14057 |---------------------------------------------------------------------
14058 | UNBOX_ANY | push a GT_OBJ of | push the STRUCT |
14059 | | the BYREF | For Linux when the |
14060 | | | struct is returned in two |
14061 | | | registers create a temp |
14062 | | | which address is passed to |
14063 | | | the unbox_nullable helper. |
14064 |---------------------------------------------------------------------
14067 if (opcode == CEE_UNBOX)
14069 if (helper == CORINFO_HELP_UNBOX_NULLABLE)
14071 // Unbox nullable helper returns a struct type.
14072 // We need to spill it to a temp so than can take the address of it.
14073 // Here we need unsafe value cls check, since the address of struct is taken to be used
14074 // further along and potetially be exploitable.
14076 unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
14077 lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14079 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14080 op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14081 assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14083 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14084 op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14085 op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14088 assert(op1->gtType == TYP_BYREF);
14089 assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14093 assert(opcode == CEE_UNBOX_ANY);
14095 if (helper == CORINFO_HELP_UNBOX)
14097 // Normal unbox helper returns a TYP_BYREF.
14098 impPushOnStack(op1, tiRetVal);
14103 assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
14105 #if FEATURE_MULTIREG_RET
14107 if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14109 // Unbox nullable helper returns a TYP_STRUCT.
14110 // For the multi-reg case we need to spill it to a temp so that
14111 // we can pass the address to the unbox_nullable jit helper.
14113 unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14114 lvaTable[tmp].lvIsMultiRegArg = true;
14115 lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14117 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14118 op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14119 assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14121 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14122 op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14123 op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14125 // In this case the return value of the unbox helper is TYP_BYREF.
14126 // Make sure the right type is placed on the operand type stack.
14127 impPushOnStack(op1, tiRetVal);
14129 // Load the struct.
14132 assert(op1->gtType == TYP_BYREF);
14133 assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14139 #endif // !FEATURE_MULTIREG_RET
14142 // If non register passable struct we have it materialized in the RetBuf.
14143 assert(op1->gtType == TYP_STRUCT);
14144 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14145 assert(tiRetVal.IsValueClass());
14149 impPushOnStack(op1, tiRetVal);
14155 /* Get the Class index */
14156 assertImp(sz == sizeof(unsigned));
14158 _impResolveToken(CORINFO_TOKENKIND_Box);
14160 JITDUMP(" %08X", resolvedToken.token);
14162 if (tiVerificationNeeded)
14164 typeInfo tiActual = impStackTop().seTypeInfo;
14165 typeInfo tiBox = verMakeTypeInfo(resolvedToken.hClass);
14167 Verify(verIsBoxable(tiBox), "boxable type expected");
14169 // check the class constraints of the boxed type in case we are boxing an uninitialized value
14170 Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14171 "boxed type has unsatisfied class constraints");
14173 Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14175 // Observation: the following code introduces a boxed value class on the stack, but,
14176 // according to the ECMA spec, one would simply expect: tiRetVal =
14177 // typeInfo(TI_REF,impGetObjectClass());
14179 // Push the result back on the stack,
14180 // even if clsHnd is a value class we want the TI_REF
14181 // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14182 tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14185 accessAllowedResult =
14186 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14187 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14189 // Note BOX can be used on things that are not value classes, in which
14190 // case we get a NOP. However the verifier's view of the type on the
14191 // stack changes (in generic code a 'T' becomes a 'boxed T')
14192 if (!eeIsValueClass(resolvedToken.hClass))
14194 verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14198 // Look ahead for unbox.any
14199 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14201 DWORD classAttribs = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14202 if (!(classAttribs & CORINFO_FLG_SHAREDINST))
14204 CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14206 impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14208 if (unboxResolvedToken.hClass == resolvedToken.hClass)
14210 // Skip the next unbox.any instruction
14211 sz += sizeof(mdToken) + 1;
14217 impImportAndPushBox(&resolvedToken);
14218 if (compDonotInline())
14227 /* Get the Class index */
14228 assertImp(sz == sizeof(unsigned));
14230 _impResolveToken(CORINFO_TOKENKIND_Class);
14232 JITDUMP(" %08X", resolvedToken.token);
14234 if (tiVerificationNeeded)
14236 tiRetVal = typeInfo(TI_INT);
14239 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14240 impPushOnStack(op1, tiRetVal);
14243 case CEE_CASTCLASS:
14245 /* Get the Class index */
14247 assertImp(sz == sizeof(unsigned));
14249 _impResolveToken(CORINFO_TOKENKIND_Casting);
14251 JITDUMP(" %08X", resolvedToken.token);
14253 if (!opts.IsReadyToRun())
14255 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14256 if (op2 == nullptr)
14257 { // compDonotInline()
14262 if (tiVerificationNeeded)
14264 Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14266 tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14269 accessAllowedResult =
14270 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14271 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14273 op1 = impPopStack().val;
14275 /* Pop the address and create the 'checked cast' helper call */
14277 // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
14278 // and op2 to contain code that creates the type handle corresponding to typeRef
14281 #ifdef FEATURE_READYTORUN_COMPILER
14282 if (opts.IsReadyToRun())
14284 GenTreePtr opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST,
14285 TYP_REF, gtNewArgList(op1));
14286 usingReadyToRunHelper = (opLookup != nullptr);
14287 op1 = (usingReadyToRunHelper ? opLookup : op1);
14289 if (!usingReadyToRunHelper)
14291 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14292 // and the chkcastany call with a single call to a dynamic R2R cell that will:
14293 // 1) Load the context
14294 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14295 // 3) Check the object on the stack for the type-cast
14296 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14298 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14299 if (op2 == nullptr)
14300 { // compDonotInline()
14306 if (!usingReadyToRunHelper)
14309 op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
14311 if (compDonotInline())
14316 /* Push the result back on the stack */
14317 impPushOnStack(op1, tiRetVal);
14322 if (compIsForInlining())
14324 // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14325 // TODO: Will this be too strict, given that we will inline many basic blocks?
14326 // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14328 /* Do we have just the exception on the stack ?*/
14330 if (verCurrentState.esStackDepth != 1)
14332 /* if not, just don't inline the method */
14334 compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
14339 if (tiVerificationNeeded)
14341 tiRetVal = impStackTop().seTypeInfo;
14342 Verify(tiRetVal.IsObjRef(), "object ref expected");
14343 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
14345 Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
14349 block->bbSetRunRarely(); // any block with a throw is rare
14350 /* Pop the exception object and create the 'throw' helper call */
14352 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, GTF_EXCEPT, gtNewArgList(impPopStack().val));
14355 if (verCurrentState.esStackDepth > 0)
14357 impEvalSideEffects();
14360 assert(verCurrentState.esStackDepth == 0);
14366 assert(!compIsForInlining());
14368 if (info.compXcptnsCount == 0)
14370 BADCODE("rethrow outside catch");
14373 if (tiVerificationNeeded)
14375 Verify(block->hasHndIndex(), "rethrow outside catch");
14376 if (block->hasHndIndex())
14378 EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
14379 Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
14380 if (HBtab->HasFilter())
14382 // we better be in the handler clause part, not the filter part
14383 Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
14384 "rethrow in filter");
14389 /* Create the 'rethrow' helper call */
14391 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID, GTF_EXCEPT);
14397 assertImp(sz == sizeof(unsigned));
14399 _impResolveToken(CORINFO_TOKENKIND_Class);
14401 JITDUMP(" %08X", resolvedToken.token);
14403 if (tiVerificationNeeded)
14405 typeInfo tiTo = impStackTop().seTypeInfo;
14406 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14408 Verify(tiTo.IsByRef(), "byref expected");
14409 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14411 Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14412 "type operand incompatible with type of address");
14415 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
14416 op2 = gtNewIconNode(0); // Value
14417 op1 = impPopStack().val; // Dest
14418 op1 = gtNewBlockVal(op1, size);
14419 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14424 if (tiVerificationNeeded)
14426 Verify(false, "bad opcode");
14429 op3 = impPopStack().val; // Size
14430 op2 = impPopStack().val; // Value
14431 op1 = impPopStack().val; // Dest
14433 if (op3->IsCnsIntOrI())
14435 size = (unsigned)op3->AsIntConCommon()->IconValue();
14436 op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14440 op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14443 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14449 if (tiVerificationNeeded)
14451 Verify(false, "bad opcode");
14453 op3 = impPopStack().val; // Size
14454 op2 = impPopStack().val; // Src
14455 op1 = impPopStack().val; // Dest
14457 if (op3->IsCnsIntOrI())
14459 size = (unsigned)op3->AsIntConCommon()->IconValue();
14460 op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14464 op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14467 if (op2->OperGet() == GT_ADDR)
14469 op2 = op2->gtOp.gtOp1;
14473 op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
14476 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
14481 assertImp(sz == sizeof(unsigned));
14483 _impResolveToken(CORINFO_TOKENKIND_Class);
14485 JITDUMP(" %08X", resolvedToken.token);
14487 if (tiVerificationNeeded)
14489 typeInfo tiFrom = impStackTop().seTypeInfo;
14490 typeInfo tiTo = impStackTop(1).seTypeInfo;
14491 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14493 Verify(tiFrom.IsByRef(), "expected byref source");
14494 Verify(tiTo.IsByRef(), "expected byref destination");
14496 Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
14497 "type of source address incompatible with type operand");
14498 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14499 Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14500 "type operand incompatible with type of destination address");
14503 if (!eeIsValueClass(resolvedToken.hClass))
14505 op1 = impPopStack().val; // address to load from
14507 impBashVarAddrsToI(op1);
14509 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
14511 op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
14512 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
14514 impPushOnStackNoType(op1);
14515 opcode = CEE_STIND_REF;
14517 goto STIND_POST_VERIFY;
14520 op2 = impPopStack().val; // Src
14521 op1 = impPopStack().val; // Dest
14522 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
14527 assertImp(sz == sizeof(unsigned));
14529 _impResolveToken(CORINFO_TOKENKIND_Class);
14531 JITDUMP(" %08X", resolvedToken.token);
14533 if (eeIsValueClass(resolvedToken.hClass))
14535 lclTyp = TYP_STRUCT;
14542 if (tiVerificationNeeded)
14545 typeInfo tiPtr = impStackTop(1).seTypeInfo;
14547 // Make sure we have a good looking byref
14548 Verify(tiPtr.IsByRef(), "pointer not byref");
14549 Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
14550 if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
14552 compUnsafeCastUsed = true;
14555 typeInfo ptrVal = DereferenceByRef(tiPtr);
14556 typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
14558 if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
14560 Verify(false, "type of value incompatible with type operand");
14561 compUnsafeCastUsed = true;
14564 if (!tiCompatibleWith(argVal, ptrVal, false))
14566 Verify(false, "type operand incompatible with type of address");
14567 compUnsafeCastUsed = true;
14572 compUnsafeCastUsed = true;
14575 if (lclTyp == TYP_REF)
14577 opcode = CEE_STIND_REF;
14578 goto STIND_POST_VERIFY;
14581 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14582 if (impIsPrimitive(jitTyp))
14584 lclTyp = JITtype2varType(jitTyp);
14585 goto STIND_POST_VERIFY;
14588 op2 = impPopStack().val; // Value
14589 op1 = impPopStack().val; // Ptr
14591 assertImp(varTypeIsStruct(op2));
14593 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14599 assert(!compIsForInlining());
14601 // Being lazy here. Refanys are tricky in terms of gc tracking.
14602 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
14604 JITDUMP("disabling struct promotion because of mkrefany\n");
14605 fgNoStructPromotion = true;
14607 oper = GT_MKREFANY;
14608 assertImp(sz == sizeof(unsigned));
14610 _impResolveToken(CORINFO_TOKENKIND_Class);
14612 JITDUMP(" %08X", resolvedToken.token);
14614 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14615 if (op2 == nullptr)
14616 { // compDonotInline()
14620 if (tiVerificationNeeded)
14622 typeInfo tiPtr = impStackTop().seTypeInfo;
14623 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14625 Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
14626 Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
14627 Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
14630 accessAllowedResult =
14631 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14632 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14634 op1 = impPopStack().val;
14636 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
14637 // But JIT32 allowed it, so we continue to allow it.
14638 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
14640 // MKREFANY returns a struct. op2 is the class token.
14641 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
14643 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
14649 assertImp(sz == sizeof(unsigned));
14651 _impResolveToken(CORINFO_TOKENKIND_Class);
14653 JITDUMP(" %08X", resolvedToken.token);
14657 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14659 if (tiVerificationNeeded)
14661 typeInfo tiPtr = impStackTop().seTypeInfo;
14663 // Make sure we have a byref
14664 if (!tiPtr.IsByRef())
14666 Verify(false, "pointer not byref");
14667 compUnsafeCastUsed = true;
14669 typeInfo tiPtrVal = DereferenceByRef(tiPtr);
14671 if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
14673 Verify(false, "type of address incompatible with type operand");
14674 compUnsafeCastUsed = true;
14676 tiRetVal.NormaliseForStack();
14680 compUnsafeCastUsed = true;
14683 if (eeIsValueClass(resolvedToken.hClass))
14685 lclTyp = TYP_STRUCT;
14690 opcode = CEE_LDIND_REF;
14691 goto LDIND_POST_VERIFY;
14694 op1 = impPopStack().val;
14696 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
14698 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14699 if (impIsPrimitive(jitTyp))
14701 op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
14703 // Could point anywhere, example a boxed class static int
14704 op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
14705 assertImp(varTypeIsArithmetic(op1->gtType));
14709 // OBJ returns a struct
14710 // and an inline argument which is the class token of the loaded obj
14711 op1 = gtNewObjNode(resolvedToken.hClass, op1);
14713 op1->gtFlags |= GTF_EXCEPT;
14715 impPushOnStack(op1, tiRetVal);
14720 if (tiVerificationNeeded)
14722 typeInfo tiArray = impStackTop().seTypeInfo;
14723 Verify(verIsSDArray(tiArray), "bad array");
14724 tiRetVal = typeInfo(TI_INT);
14727 op1 = impPopStack().val;
14728 if (!opts.MinOpts() && !opts.compDbgCode)
14730 /* Use GT_ARR_LENGTH operator so rng check opts see this */
14731 GenTreeArrLen* arrLen =
14732 new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
14734 /* Mark the block as containing a length expression */
14736 if (op1->gtOper == GT_LCL_VAR)
14738 block->bbFlags |= BBF_HAS_IDX_LEN;
14745 /* Create the expression "*(array_addr + ArrLenOffs)" */
14746 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14747 gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
14748 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
14749 op1->gtFlags |= GTF_IND_ARR_LEN;
14752 /* An indirection will cause a GPF if the address is null */
14753 op1->gtFlags |= GTF_EXCEPT;
14755 /* Push the result back on the stack */
14756 impPushOnStack(op1, tiRetVal);
14760 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
14764 if (opts.compDbgCode)
14766 op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
14771 /******************************** NYI *******************************/
14774 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
14777 case CEE_MACRO_END:
14780 BADCODE3("unknown opcode", ": %02X", (int)opcode);
14784 prevOpcode = opcode;
14787 assert(!insertLdloc || opcode == CEE_DUP);
14790 assert(!insertLdloc);
14793 #undef _impResolveToken
14796 #pragma warning(pop)
14799 // Push a local/argument treeon the operand stack
14800 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
14802 tiRetVal.NormaliseForStack();
14804 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
14806 tiRetVal.SetUninitialisedObjRef();
14809 impPushOnStack(op, tiRetVal);
14812 // Load a local/argument on the operand stack
14813 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
14814 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
14818 if (lvaTable[lclNum].lvNormalizeOnLoad())
14820 lclTyp = lvaGetRealType(lclNum);
14824 lclTyp = lvaGetActualType(lclNum);
14827 impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
14830 // Load an argument on the operand stack
14831 // Shared by the various CEE_LDARG opcodes
14832 // ilArgNum is the argument index as specified in IL.
14833 // It will be mapped to the correct lvaTable index
14834 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
14836 Verify(ilArgNum < info.compILargsCount, "bad arg num");
14838 if (compIsForInlining())
14840 if (ilArgNum >= info.compArgsCount)
14842 compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
14846 impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
14847 impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
14851 if (ilArgNum >= info.compArgsCount)
14856 unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
14858 if (lclNum == info.compThisArg)
14860 lclNum = lvaArg0Var;
14863 impLoadVar(lclNum, offset);
14867 // Load a local on the operand stack
14868 // Shared by the various CEE_LDLOC opcodes
14869 // ilLclNum is the local index as specified in IL.
14870 // It will be mapped to the correct lvaTable index
14871 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
14873 if (tiVerificationNeeded)
14875 Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
14876 Verify(info.compInitMem, "initLocals not set");
14879 if (compIsForInlining())
14881 if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14883 compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
14887 // Get the local type
14888 var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
14890 typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
14892 /* Have we allocated a temp for this local? */
14894 unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
14896 // All vars of inlined methods should be !lvNormalizeOnLoad()
14898 assert(!lvaTable[lclNum].lvNormalizeOnLoad());
14899 lclTyp = genActualType(lclTyp);
14901 impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
14905 if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14910 unsigned lclNum = info.compArgsCount + ilLclNum;
14912 impLoadVar(lclNum, offset);
14916 #ifdef _TARGET_ARM_
14917 /**************************************************************************************
14919 * When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
14920 * dst struct, because struct promotion will turn it into a float/double variable while
14921 * the rhs will be an int/long variable. We don't code generate assignment of int into
14922 * a float, but there is nothing that might prevent us from doing so. The tree however
14923 * would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
14925 * tmpNum - the lcl dst variable num that is a struct.
14926 * src - the src tree assigned to the dest that is a struct/int (when varargs call.)
14927 * hClass - the type handle for the struct variable.
14929 * TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
14930 * however, we could do a codegen of transferring from int to float registers
14931 * (transfer, not a cast.)
14934 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr src, CORINFO_CLASS_HANDLE hClass)
14936 if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
14938 int hfaSlots = GetHfaCount(hClass);
14939 var_types hfaType = GetHfaType(hClass);
14941 // If we have varargs we morph the method's return type to be "int" irrespective of its original
14942 // type: struct/float at importer because the ABI calls out return in integer registers.
14943 // We don't want struct promotion to replace an expression like this:
14944 // lclFld_int = callvar_int() into lclFld_float = callvar_int();
14945 // This means an int is getting assigned to a float without a cast. Prevent the promotion.
14946 if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
14947 (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
14949 // Make sure this struct type stays as struct so we can receive the call in a struct.
14950 lvaTable[tmpNum].lvIsMultiRegRet = true;
14954 #endif // _TARGET_ARM_
14956 #if FEATURE_MULTIREG_RET
14957 GenTreePtr Compiler::impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HANDLE hClass)
14959 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
14960 impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_NONE);
14961 GenTreePtr ret = gtNewLclvNode(tmpNum, op->gtType);
14963 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
14964 ret->gtFlags |= GTF_DONT_CSE;
14966 assert(IsMultiRegReturnedType(hClass));
14968 // Mark the var so that fields are not promoted and stay together.
14969 lvaTable[tmpNum].lvIsMultiRegRet = true;
14973 #endif // FEATURE_MULTIREG_RET
14975 // do import for a return
14976 // returns false if inlining was aborted
14977 // opcode can be ret or call in the case of a tail.call
14978 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
14980 if (tiVerificationNeeded)
14982 verVerifyThisPtrInitialised();
14984 unsigned expectedStack = 0;
14985 if (info.compRetType != TYP_VOID)
14987 typeInfo tiVal = impStackTop().seTypeInfo;
14988 typeInfo tiDeclared =
14989 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
14991 Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
14993 Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
14996 Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
14999 GenTree* op2 = nullptr;
15000 GenTree* op1 = nullptr;
15001 CORINFO_CLASS_HANDLE retClsHnd = nullptr;
15003 if (info.compRetType != TYP_VOID)
15005 StackEntry se = impPopStack(retClsHnd);
15008 if (!compIsForInlining())
15010 impBashVarAddrsToI(op2);
15011 op2 = impImplicitIorI4Cast(op2, info.compRetType);
15012 op2 = impImplicitR4orR8Cast(op2, info.compRetType);
15013 assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
15014 ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
15015 ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
15016 (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
15017 (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
15020 if (opts.compGcChecks && info.compRetType == TYP_REF)
15022 // DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path
15023 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
15026 assert(op2->gtType == TYP_REF);
15028 // confirm that the argument is a GC pointer (for debugging (GC stress))
15029 GenTreeArgList* args = gtNewArgList(op2);
15030 op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, 0, args);
15034 printf("\ncompGcChecks tree:\n");
15042 // inlinee's stack should be empty now.
15043 assert(verCurrentState.esStackDepth == 0);
15048 printf("\n\n Inlinee Return expression (before normalization) =>\n");
15053 // Make sure the type matches the original call.
15055 var_types returnType = genActualType(op2->gtType);
15056 var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
15057 if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
15059 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
15062 if (returnType != originalCallType)
15064 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
15068 // Below, we are going to set impInlineInfo->retExpr to the tree with the return
15069 // expression. At this point, retExpr could already be set if there are multiple
15070 // return blocks (meaning lvaInlineeReturnSpillTemp != BAD_VAR_NUM) and one of
15071 // the other blocks already set it. If there is only a single return block,
15072 // retExpr shouldn't be set. However, this is not true if we reimport a block
15073 // with a return. In that case, retExpr will be set, then the block will be
15074 // reimported, but retExpr won't get cleared as part of setting the block to
15075 // be reimported. The reimported retExpr value should be the same, so even if
15076 // we don't unconditionally overwrite it, it shouldn't matter.
15077 if (info.compRetNativeType != TYP_STRUCT)
15079 // compRetNativeType is not TYP_STRUCT.
15080 // This implies it could be either a scalar type or SIMD vector type or
15081 // a struct type that can be normalized to a scalar type.
15083 if (varTypeIsStruct(info.compRetType))
15085 noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
15086 // adjust the type away from struct to integral
15087 // and no normalizing
15088 op2 = impFixupStructReturnType(op2, retClsHnd);
15092 // Do we have to normalize?
15093 var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
15094 if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
15095 fgCastNeeded(op2, fncRealRetType))
15097 // Small-typed return values are normalized by the callee
15098 op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
15102 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15104 assert(info.compRetNativeType != TYP_VOID &&
15105 (fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals));
15107 // This is a bit of a workaround...
15108 // If we are inlining a call that returns a struct, where the actual "native" return type is
15109 // not a struct (for example, the struct is composed of exactly one int, and the native
15110 // return type is thus an int), and the inlinee has multiple return blocks (thus,
15111 // lvaInlineeReturnSpillTemp is != BAD_VAR_NUM, and is the index of a local var that is set
15112 // to the *native* return type), and at least one of the return blocks is the result of
15113 // a call, then we have a problem. The situation is like this (from a failed test case):
15116 // // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15117 // call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15118 // plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15122 // ldobj !!T // this gets bashed to a GT_LCL_FLD, type TYP_INT
15125 // call !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15126 // object&, class System.Func`1<!!0>)
15129 // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15130 // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15131 // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15132 // inlining properly by leaving the correct type on the GT_CALL node through importing.
15134 // To fix this, for this case, we temporarily change the GT_CALL node type to the
15135 // native return type, which is what it will be set to eventually. We generate the
15136 // assignment to the return temp, using the correct type, and then restore the GT_CALL
15137 // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15139 bool restoreType = false;
15140 if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15142 noway_assert(op2->TypeGet() == TYP_STRUCT);
15143 op2->gtType = info.compRetNativeType;
15144 restoreType = true;
15147 impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15148 (unsigned)CHECK_SPILL_ALL);
15150 GenTreePtr tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15154 op2->gtType = TYP_STRUCT; // restore it to what it was
15160 if (impInlineInfo->retExpr)
15162 // Some other block(s) have seen the CEE_RET first.
15163 // Better they spilled to the same temp.
15164 assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15165 assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15173 printf("\n\n Inlinee Return expression (after normalization) =>\n");
15178 // Report the return expression
15179 impInlineInfo->retExpr = op2;
15183 // compRetNativeType is TYP_STRUCT.
15184 // This implies that struct return via RetBuf arg or multi-reg struct return
15186 GenTreePtr iciCall = impInlineInfo->iciCall;
15187 assert(iciCall->gtOper == GT_CALL);
15189 // Assign the inlinee return into a spill temp.
15190 // spill temp only exists if there are multiple return points
15191 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15193 // in this case we have to insert multiple struct copies to the temp
15194 // and the retexpr is just the temp.
15195 assert(info.compRetNativeType != TYP_VOID);
15196 assert(fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals);
15198 impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15199 (unsigned)CHECK_SPILL_ALL);
15202 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15203 #if defined(_TARGET_ARM_)
15204 // TODO-ARM64-NYI: HFA
15205 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15206 // next ifdefs could be refactored in a single method with the ifdef inside.
15207 if (IsHfa(retClsHnd))
15209 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15210 #else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15211 ReturnTypeDesc retTypeDesc;
15212 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15213 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15215 if (retRegCount != 0)
15217 // If single eightbyte, the return type would have been normalized and there won't be a temp var.
15218 // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
15220 assert(retRegCount == MAX_RET_REG_COUNT);
15221 // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
15222 CLANG_FORMAT_COMMENT_ANCHOR;
15223 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15225 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15227 if (!impInlineInfo->retExpr)
15229 #if defined(_TARGET_ARM_)
15230 impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
15231 #else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15232 // The inlinee compiler has figured out the type of the temp already. Use it here.
15233 impInlineInfo->retExpr =
15234 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15235 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15240 impInlineInfo->retExpr = op2;
15244 #elif defined(_TARGET_ARM64_)
15245 ReturnTypeDesc retTypeDesc;
15246 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15247 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15249 if (retRegCount != 0)
15251 assert(!iciCall->AsCall()->HasRetBufArg());
15252 assert(retRegCount >= 2);
15253 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15255 if (!impInlineInfo->retExpr)
15257 // The inlinee compiler has figured out the type of the temp already. Use it here.
15258 impInlineInfo->retExpr =
15259 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15264 impInlineInfo->retExpr = op2;
15268 #endif // defined(_TARGET_ARM64_)
15270 assert(iciCall->AsCall()->HasRetBufArg());
15271 GenTreePtr dest = gtCloneExpr(iciCall->gtCall.gtCallArgs->gtOp.gtOp1);
15272 // spill temp only exists if there are multiple return points
15273 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15275 // if this is the first return we have seen set the retExpr
15276 if (!impInlineInfo->retExpr)
15278 impInlineInfo->retExpr =
15279 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
15280 retClsHnd, (unsigned)CHECK_SPILL_ALL);
15285 impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15292 if (compIsForInlining())
15297 if (info.compRetType == TYP_VOID)
15300 op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15302 else if (info.compRetBuffArg != BAD_VAR_NUM)
15304 // Assign value to return buff (first param)
15305 GenTreePtr retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
15307 op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15308 impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15310 // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
15311 CLANG_FORMAT_COMMENT_ANCHOR;
15313 #if defined(_TARGET_AMD64_)
15315 // x64 (System V and Win64) calling convention requires to
15316 // return the implicit return buffer explicitly (in RAX).
15317 // Change the return type to be BYREF.
15318 op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15319 #else // !defined(_TARGET_AMD64_)
15320 // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
15321 // In such case the return value of the function is changed to BYREF.
15322 // If profiler hook is not needed the return type of the function is TYP_VOID.
15323 if (compIsProfilerHookNeeded())
15325 op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15330 op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15332 #endif // !defined(_TARGET_AMD64_)
15334 else if (varTypeIsStruct(info.compRetType))
15336 #if !FEATURE_MULTIREG_RET
15337 // For both ARM architectures the HFA native types are maintained as structs.
15338 // Also on System V AMD64 the multireg structs returns are also left as structs.
15339 noway_assert(info.compRetNativeType != TYP_STRUCT);
15341 op2 = impFixupStructReturnType(op2, retClsHnd);
15343 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
15348 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
15351 // We must have imported a tailcall and jumped to RET
15352 if (prefixFlags & PREFIX_TAILCALL)
15354 #ifndef _TARGET_AMD64_
15356 // This cannot be asserted on Amd64 since we permit the following IL pattern:
15360 assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
15363 opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
15365 // impImportCall() would have already appended TYP_VOID calls
15366 if (info.compRetType == TYP_VOID)
15372 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15374 // Remember at which BC offset the tree was finished
15375 impNoteLastILoffs();
15380 /*****************************************************************************
15381 * Mark the block as unimported.
15382 * Note that the caller is responsible for calling impImportBlockPending(),
15383 * with the appropriate stack-state
15386 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
15389 if (verbose && (block->bbFlags & BBF_IMPORTED))
15391 printf("\nBB%02u will be reimported\n", block->bbNum);
15395 block->bbFlags &= ~BBF_IMPORTED;
15398 /*****************************************************************************
15399 * Mark the successors of the given block as unimported.
15400 * Note that the caller is responsible for calling impImportBlockPending()
15401 * for all the successors, with the appropriate stack-state.
15404 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
15406 for (unsigned i = 0; i < block->NumSucc(); i++)
15408 impReimportMarkBlock(block->GetSucc(i));
15412 /*****************************************************************************
15414 * Filter wrapper to handle only passed in exception code
15418 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
15420 if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
15422 return EXCEPTION_EXECUTE_HANDLER;
15425 return EXCEPTION_CONTINUE_SEARCH;
15428 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
15430 assert(block->hasTryIndex());
15431 assert(!compIsForInlining());
15433 unsigned tryIndex = block->getTryIndex();
15434 EHblkDsc* HBtab = ehGetDsc(tryIndex);
15438 assert(block->bbFlags & BBF_TRY_BEG);
15440 // The Stack must be empty
15442 if (block->bbStkDepth != 0)
15444 BADCODE("Evaluation stack must be empty on entry into a try block");
15448 // Save the stack contents, we'll need to restore it later
15450 SavedStack blockState;
15451 impSaveStackState(&blockState, false);
15453 while (HBtab != nullptr)
15457 // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
15458 // We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
15460 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15462 // We trigger an invalid program exception here unless we have a try/fault region.
15464 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
15467 "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
15471 // Allow a try/fault region to proceed.
15472 assert(HBtab->HasFaultHandler());
15476 /* Recursively process the handler block */
15477 BasicBlock* hndBegBB = HBtab->ebdHndBeg;
15479 // Construct the proper verification stack state
15480 // either empty or one that contains just
15481 // the Exception Object that we are dealing with
15483 verCurrentState.esStackDepth = 0;
15485 if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
15487 CORINFO_CLASS_HANDLE clsHnd;
15489 if (HBtab->HasFilter())
15491 clsHnd = impGetObjectClass();
15495 CORINFO_RESOLVED_TOKEN resolvedToken;
15497 resolvedToken.tokenContext = impTokenLookupContextHandle;
15498 resolvedToken.tokenScope = info.compScopeHnd;
15499 resolvedToken.token = HBtab->ebdTyp;
15500 resolvedToken.tokenType = CORINFO_TOKENKIND_Class;
15501 info.compCompHnd->resolveToken(&resolvedToken);
15503 clsHnd = resolvedToken.hClass;
15506 // push catch arg the stack, spill to a temp if necessary
15507 // Note: can update HBtab->ebdHndBeg!
15508 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd);
15511 // Queue up the handler for importing
15513 impImportBlockPending(hndBegBB);
15515 if (HBtab->HasFilter())
15517 /* @VERIFICATION : Ideally the end of filter state should get
15518 propagated to the catch handler, this is an incompleteness,
15519 but is not a security/compliance issue, since the only
15520 interesting state is the 'thisInit' state.
15523 verCurrentState.esStackDepth = 0;
15525 BasicBlock* filterBB = HBtab->ebdFilter;
15527 // push catch arg the stack, spill to a temp if necessary
15528 // Note: can update HBtab->ebdFilter!
15529 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass());
15531 impImportBlockPending(filterBB);
15534 else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
15536 /* Recursively process the handler block */
15538 verCurrentState.esStackDepth = 0;
15540 // Queue up the fault handler for importing
15542 impImportBlockPending(HBtab->ebdHndBeg);
15545 // Now process our enclosing try index (if any)
15547 tryIndex = HBtab->ebdEnclosingTryIndex;
15548 if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
15554 HBtab = ehGetDsc(tryIndex);
15558 // Restore the stack contents
15559 impRestoreStackState(&blockState);
15562 //***************************************************************
15563 // Import the instructions for the given basic block. Perform
15564 // verification, throwing an exception on failure. Push any successor blocks that are enabled for the first
15565 // time, or whose verification pre-state is changed.
15568 #pragma warning(push)
15569 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
15571 void Compiler::impImportBlock(BasicBlock* block)
15573 // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
15574 // handle them specially. In particular, there is no IL to import for them, but we do need
15575 // to mark them as imported and put their successors on the pending import list.
15576 if (block->bbFlags & BBF_INTERNAL)
15578 JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
15579 block->bbFlags |= BBF_IMPORTED;
15581 for (unsigned i = 0; i < block->NumSucc(); i++)
15583 impImportBlockPending(block->GetSucc(i));
15593 /* Make the block globaly available */
15598 /* Initialize the debug variables */
15599 impCurOpcName = "unknown";
15600 impCurOpcOffs = block->bbCodeOffs;
15603 /* Set the current stack state to the merged result */
15604 verResetCurrentState(block, &verCurrentState);
15606 /* Now walk the code and import the IL into GenTrees */
15608 struct FilterVerificationExceptionsParam
15613 FilterVerificationExceptionsParam param;
15615 param.pThis = this;
15616 param.block = block;
15618 PAL_TRY(FilterVerificationExceptionsParam*, pParam, ¶m)
15620 /* @VERIFICATION : For now, the only state propagation from try
15621 to it's handler is "thisInit" state (stack is empty at start of try).
15622 In general, for state that we track in verification, we need to
15623 model the possibility that an exception might happen at any IL
15624 instruction, so we really need to merge all states that obtain
15625 between IL instructions in a try block into the start states of
15628 However we do not allow the 'this' pointer to be uninitialized when
15629 entering most kinds try regions (only try/fault are allowed to have
15630 an uninitialized this pointer on entry to the try)
15632 Fortunately, the stack is thrown away when an exception
15633 leads to a handler, so we don't have to worry about that.
15634 We DO, however, have to worry about the "thisInit" state.
15635 But only for the try/fault case.
15637 The only allowed transition is from TIS_Uninit to TIS_Init.
15639 So for a try/fault region for the fault handler block
15640 we will merge the start state of the try begin
15641 and the post-state of each block that is part of this try region
15644 // merge the start state of the try begin
15646 if (pParam->block->bbFlags & BBF_TRY_BEG)
15648 pParam->pThis->impVerifyEHBlock(pParam->block, true);
15651 pParam->pThis->impImportBlockCode(pParam->block);
15653 // As discussed above:
15654 // merge the post-state of each block that is part of this try region
15656 if (pParam->block->hasTryIndex())
15658 pParam->pThis->impVerifyEHBlock(pParam->block, false);
15661 PAL_EXCEPT_FILTER(FilterVerificationExceptions)
15663 verHandleVerificationFailure(block DEBUGARG(false));
15667 if (compDonotInline())
15672 assert(!compDonotInline());
15674 markImport = false;
15678 unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks
15679 bool reimportSpillClique = false;
15680 BasicBlock* tgtBlock = nullptr;
15682 /* If the stack is non-empty, we might have to spill its contents */
15684 if (verCurrentState.esStackDepth != 0)
15686 impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
15687 // on the stack, its lifetime is hard to determine, simply
15688 // don't reuse such temps.
15690 GenTreePtr addStmt = nullptr;
15692 /* Do the successors of 'block' have any other predecessors ?
15693 We do not want to do some of the optimizations related to multiRef
15694 if we can reimport blocks */
15696 unsigned multRef = impCanReimport ? unsigned(~0) : 0;
15698 switch (block->bbJumpKind)
15702 /* Temporarily remove the 'jtrue' from the end of the tree list */
15704 assert(impTreeLast);
15705 assert(impTreeLast->gtOper == GT_STMT);
15706 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
15708 addStmt = impTreeLast;
15709 impTreeLast = impTreeLast->gtPrev;
15711 /* Note if the next block has more than one ancestor */
15713 multRef |= block->bbNext->bbRefs;
15715 /* Does the next block have temps assigned? */
15717 baseTmp = block->bbNext->bbStkTempsIn;
15718 tgtBlock = block->bbNext;
15720 if (baseTmp != NO_BASE_TMP)
15725 /* Try the target of the jump then */
15727 multRef |= block->bbJumpDest->bbRefs;
15728 baseTmp = block->bbJumpDest->bbStkTempsIn;
15729 tgtBlock = block->bbJumpDest;
15733 multRef |= block->bbJumpDest->bbRefs;
15734 baseTmp = block->bbJumpDest->bbStkTempsIn;
15735 tgtBlock = block->bbJumpDest;
15739 multRef |= block->bbNext->bbRefs;
15740 baseTmp = block->bbNext->bbStkTempsIn;
15741 tgtBlock = block->bbNext;
15746 BasicBlock** jmpTab;
15749 /* Temporarily remove the GT_SWITCH from the end of the tree list */
15751 assert(impTreeLast);
15752 assert(impTreeLast->gtOper == GT_STMT);
15753 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
15755 addStmt = impTreeLast;
15756 impTreeLast = impTreeLast->gtPrev;
15758 jmpCnt = block->bbJumpSwt->bbsCount;
15759 jmpTab = block->bbJumpSwt->bbsDstTab;
15763 tgtBlock = (*jmpTab);
15765 multRef |= tgtBlock->bbRefs;
15767 // Thanks to spill cliques, we should have assigned all or none
15768 assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
15769 baseTmp = tgtBlock->bbStkTempsIn;
15774 } while (++jmpTab, --jmpCnt);
15778 case BBJ_CALLFINALLY:
15779 case BBJ_EHCATCHRET:
15781 case BBJ_EHFINALLYRET:
15782 case BBJ_EHFILTERRET:
15784 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
15788 noway_assert(!"Unexpected bbJumpKind");
15792 assert(multRef >= 1);
15794 /* Do we have a base temp number? */
15796 bool newTemps = (baseTmp == NO_BASE_TMP);
15800 /* Grab enough temps for the whole stack */
15801 baseTmp = impGetSpillTmpBase(block);
15804 /* Spill all stack entries into temps */
15805 unsigned level, tempNum;
15807 JITDUMP("\nSpilling stack entries into temps\n");
15808 for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
15810 GenTreePtr tree = verCurrentState.esStack[level].val;
15812 /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
15813 the other. This should merge to a byref in unverifiable code.
15814 However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
15815 successor would be imported assuming there was a TYP_I_IMPL on
15816 the stack. Thus the value would not get GC-tracked. Hence,
15817 change the temp to TYP_BYREF and reimport the successors.
15818 Note: We should only allow this in unverifiable code.
15820 if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
15822 lvaTable[tempNum].lvType = TYP_BYREF;
15823 impReimportMarkSuccessors(block);
15827 #ifdef _TARGET_64BIT_
15828 if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
15830 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
15831 (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
15833 // Merge the current state into the entry state of block;
15834 // the call to verMergeEntryStates must have changed
15835 // the entry state of the block by merging the int local var
15836 // and the native-int stack entry.
15837 bool changed = false;
15838 if (verMergeEntryStates(tgtBlock, &changed))
15840 impRetypeEntryStateTemps(tgtBlock);
15841 impReimportBlockPending(tgtBlock);
15846 tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
15851 // Some other block in the spill clique set this to "int", but now we have "native int".
15852 // Change the type and go back to re-import any blocks that used the wrong type.
15853 lvaTable[tempNum].lvType = TYP_I_IMPL;
15854 reimportSpillClique = true;
15856 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
15858 // Spill clique has decided this should be "native int", but this block only pushes an "int".
15859 // Insert a sign-extension to "native int" so we match the clique.
15860 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15863 // Consider the case where one branch left a 'byref' on the stack and the other leaves
15864 // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
15865 // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
15866 // behavior instead of asserting and then generating bad code (where we save/restore the
15867 // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
15868 // imported already, we need to change the type of the local and reimport the spill clique.
15869 // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
15870 // the 'byref' size.
15871 if (!tiVerificationNeeded)
15873 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
15875 // Some other block in the spill clique set this to "int", but now we have "byref".
15876 // Change the type and go back to re-import any blocks that used the wrong type.
15877 lvaTable[tempNum].lvType = TYP_BYREF;
15878 reimportSpillClique = true;
15880 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
15882 // Spill clique has decided this should be "byref", but this block only pushes an "int".
15883 // Insert a sign-extension to "native int" so we match the clique size.
15884 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15887 #endif // _TARGET_64BIT_
15889 #if FEATURE_X87_DOUBLES
15890 // X87 stack doesn't differentiate between float/double
15891 // so promoting is no big deal.
15892 // For everybody else keep it as float until we have a collision and then promote
15893 // Just like for x64's TYP_INT<->TYP_I_IMPL
15895 if (multRef > 1 && tree->gtType == TYP_FLOAT)
15897 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15900 #else // !FEATURE_X87_DOUBLES
15902 if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
15904 // Some other block in the spill clique set this to "float", but now we have "double".
15905 // Change the type and go back to re-import any blocks that used the wrong type.
15906 lvaTable[tempNum].lvType = TYP_DOUBLE;
15907 reimportSpillClique = true;
15909 else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
15911 // Spill clique has decided this should be "double", but this block only pushes a "float".
15912 // Insert a cast to "double" so we match the clique.
15913 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15916 #endif // FEATURE_X87_DOUBLES
15918 /* If addStmt has a reference to tempNum (can only happen if we
15919 are spilling to the temps already used by a previous block),
15920 we need to spill addStmt */
15922 if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
15924 GenTreePtr addTree = addStmt->gtStmt.gtStmtExpr;
15926 if (addTree->gtOper == GT_JTRUE)
15928 GenTreePtr relOp = addTree->gtOp.gtOp1;
15929 assert(relOp->OperIsCompare());
15931 var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
15933 if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
15935 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
15936 impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
15937 type = genActualType(lvaTable[temp].TypeGet());
15938 relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
15941 if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
15943 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
15944 impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
15945 type = genActualType(lvaTable[temp].TypeGet());
15946 relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
15951 assert(addTree->gtOper == GT_SWITCH && genActualType(addTree->gtOp.gtOp1->gtType) == TYP_I_IMPL);
15953 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
15954 impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
15955 addTree->gtOp.gtOp1 = gtNewLclvNode(temp, TYP_I_IMPL);
15959 /* Spill the stack entry, and replace with the temp */
15961 if (!impSpillStackEntry(level, tempNum
15964 true, "Spill Stack Entry"
15970 BADCODE("bad stack state");
15973 // Oops. Something went wrong when spilling. Bad code.
15974 verHandleVerificationFailure(block DEBUGARG(true));
15980 /* Put back the 'jtrue'/'switch' if we removed it earlier */
15984 impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
15988 // Some of the append/spill logic works on compCurBB
15990 assert(compCurBB == block);
15992 /* Save the tree list in the block */
15993 impEndTreeList(block);
15995 // impEndTreeList sets BBF_IMPORTED on the block
15996 // We do *NOT* want to set it later than this because
15997 // impReimportSpillClique might clear it if this block is both a
15998 // predecessor and successor in the current spill clique
15999 assert(block->bbFlags & BBF_IMPORTED);
16001 // If we had a int/native int, or float/double collision, we need to re-import
16002 if (reimportSpillClique)
16004 // This will re-import all the successors of block (as well as each of their predecessors)
16005 impReimportSpillClique(block);
16007 // For blocks that haven't been imported yet, we still need to mark them as pending import.
16008 for (unsigned i = 0; i < block->NumSucc(); i++)
16010 BasicBlock* succ = block->GetSucc(i);
16011 if ((succ->bbFlags & BBF_IMPORTED) == 0)
16013 impImportBlockPending(succ);
16017 else // the normal case
16019 // otherwise just import the successors of block
16021 /* Does this block jump to any other blocks? */
16022 for (unsigned i = 0; i < block->NumSucc(); i++)
16024 impImportBlockPending(block->GetSucc(i));
16029 #pragma warning(pop)
16032 /*****************************************************************************/
16034 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16035 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16036 // impPendingBlockMembers). Merges the current verification state into the verification state of "block"
16037 // (its "pre-state").
16039 void Compiler::impImportBlockPending(BasicBlock* block)
16044 printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
16048 // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
16049 // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
16050 // (When we're doing verification, we always attempt the merge to detect verification errors.)
16052 // If the block has not been imported, add to pending set.
16053 bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
16055 // Initialize bbEntryState just the first time we try to add this block to the pending list
16056 // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
16057 // We use NULL to indicate the 'common' state to avoid memory allocation
16058 if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
16059 (impGetPendingBlockMember(block) == 0))
16061 verInitBBEntryState(block, &verCurrentState);
16062 assert(block->bbStkDepth == 0);
16063 block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
16064 assert(addToPending);
16065 assert(impGetPendingBlockMember(block) == 0);
16069 // The stack should have the same height on entry to the block from all its predecessors.
16070 if (block->bbStkDepth != verCurrentState.esStackDepth)
16074 sprintf_s(buffer, sizeof(buffer),
16075 "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
16076 "Previous depth was %d, current depth is %d",
16077 block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
16078 verCurrentState.esStackDepth);
16079 buffer[400 - 1] = 0;
16082 NO_WAY("Block entered with different stack depths");
16086 // Additionally, if we need to verify, merge the verification state.
16087 if (tiVerificationNeeded)
16089 // Merge the current state into the entry state of block; if this does not change the entry state
16090 // by merging, do not add the block to the pending-list.
16091 bool changed = false;
16092 if (!verMergeEntryStates(block, &changed))
16094 block->bbFlags |= BBF_FAILED_VERIFICATION;
16095 addToPending = true; // We will pop it off, and check the flag set above.
16099 addToPending = true;
16101 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
16110 if (block->bbStkDepth > 0)
16112 // We need to fix the types of any spill temps that might have changed:
16113 // int->native int, float->double, int->byref, etc.
16114 impRetypeEntryStateTemps(block);
16117 // OK, we must add to the pending list, if it's not already in it.
16118 if (impGetPendingBlockMember(block) != 0)
16124 // Get an entry to add to the pending list
16128 if (impPendingFree)
16130 // We can reuse one of the freed up dscs.
16131 dsc = impPendingFree;
16132 impPendingFree = dsc->pdNext;
16136 // We have to create a new dsc
16137 dsc = new (this, CMK_Unknown) PendingDsc;
16141 dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16142 dsc->pdThisPtrInit = verCurrentState.thisInitialized;
16144 // Save the stack trees for later
16146 if (verCurrentState.esStackDepth)
16148 impSaveStackState(&dsc->pdSavedStack, false);
16151 // Add the entry to the pending list
16153 dsc->pdNext = impPendingList;
16154 impPendingList = dsc;
16155 impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16157 // Various assertions require us to now to consider the block as not imported (at least for
16158 // the final time...)
16159 block->bbFlags &= ~BBF_IMPORTED;
16164 printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16169 /*****************************************************************************/
16171 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16172 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16173 // impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block.
16175 void Compiler::impReimportBlockPending(BasicBlock* block)
16177 JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16179 assert(block->bbFlags & BBF_IMPORTED);
16181 // OK, we must add to the pending list, if it's not already in it.
16182 if (impGetPendingBlockMember(block) != 0)
16187 // Get an entry to add to the pending list
16191 if (impPendingFree)
16193 // We can reuse one of the freed up dscs.
16194 dsc = impPendingFree;
16195 impPendingFree = dsc->pdNext;
16199 // We have to create a new dsc
16200 dsc = new (this, CMK_ImpStack) PendingDsc;
16205 if (block->bbEntryState)
16207 dsc->pdThisPtrInit = block->bbEntryState->thisInitialized;
16208 dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
16209 dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
16213 dsc->pdThisPtrInit = TIS_Bottom;
16214 dsc->pdSavedStack.ssDepth = 0;
16215 dsc->pdSavedStack.ssTrees = nullptr;
16218 // Add the entry to the pending list
16220 dsc->pdNext = impPendingList;
16221 impPendingList = dsc;
16222 impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16224 // Various assertions require us to now to consider the block as not imported (at least for
16225 // the final time...)
16226 block->bbFlags &= ~BBF_IMPORTED;
16231 printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16236 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
16238 if (comp->impBlockListNodeFreeList == nullptr)
16240 return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
16244 BlockListNode* res = comp->impBlockListNodeFreeList;
16245 comp->impBlockListNodeFreeList = res->m_next;
16250 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
16252 node->m_next = impBlockListNodeFreeList;
16253 impBlockListNodeFreeList = node;
16256 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
16260 noway_assert(!fgComputePredsDone);
16261 if (!fgCheapPredsValid)
16263 fgComputeCheapPreds();
16266 BlockListNode* succCliqueToDo = nullptr;
16267 BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
16271 // Look at the successors of every member of the predecessor to-do list.
16272 while (predCliqueToDo != nullptr)
16274 BlockListNode* node = predCliqueToDo;
16275 predCliqueToDo = node->m_next;
16276 BasicBlock* blk = node->m_blk;
16277 FreeBlockListNode(node);
16279 for (unsigned succNum = 0; succNum < blk->NumSucc(); succNum++)
16281 BasicBlock* succ = blk->GetSucc(succNum);
16282 // If it's not already in the clique, add it, and also add it
16283 // as a member of the successor "toDo" set.
16284 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
16286 callback->Visit(SpillCliqueSucc, succ);
16287 impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
16288 succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
16293 // Look at the predecessors of every member of the successor to-do list.
16294 while (succCliqueToDo != nullptr)
16296 BlockListNode* node = succCliqueToDo;
16297 succCliqueToDo = node->m_next;
16298 BasicBlock* blk = node->m_blk;
16299 FreeBlockListNode(node);
16301 for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
16303 BasicBlock* predBlock = pred->block;
16304 // If it's not already in the clique, add it, and also add it
16305 // as a member of the predecessor "toDo" set.
16306 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
16308 callback->Visit(SpillCliquePred, predBlock);
16309 impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
16310 predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
16317 // If this fails, it means we didn't walk the spill clique properly and somehow managed
16318 // miss walking back to include the predecessor we started from.
16319 // This most likely cause: missing or out of date bbPreds
16320 assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
16323 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16325 if (predOrSucc == SpillCliqueSucc)
16327 assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
16328 blk->bbStkTempsIn = m_baseTmp;
16332 assert(predOrSucc == SpillCliquePred);
16333 assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
16334 blk->bbStkTempsOut = m_baseTmp;
16338 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16340 // For Preds we could be a little smarter and just find the existing store
16341 // and re-type it/add a cast, but that is complicated and hopefully very rare, so
16342 // just re-import the whole block (just like we do for successors)
16344 if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
16346 // If we haven't imported this block and we're not going to (because it isn't on
16347 // the pending list) then just ignore it for now.
16349 // This block has either never been imported (EntryState == NULL) or it failed
16350 // verification. Neither state requires us to force it to be imported now.
16351 assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
16355 // For successors we have a valid verCurrentState, so just mark them for reimport
16356 // the 'normal' way
16357 // Unlike predecessors, we *DO* need to reimport the current block because the
16358 // initial import had the wrong entry state types.
16359 // Similarly, blocks that are currently on the pending list, still need to call
16360 // impImportBlockPending to fixup their entry state.
16361 if (predOrSucc == SpillCliqueSucc)
16363 m_pComp->impReimportMarkBlock(blk);
16365 // Set the current stack state to that of the blk->bbEntryState
16366 m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
16367 assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
16369 m_pComp->impImportBlockPending(blk);
16371 else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
16373 // As described above, we are only visiting predecessors so they can
16374 // add the appropriate casts, since we have already done that for the current
16375 // block, it does not need to be reimported.
16376 // Nor do we need to reimport blocks that are still pending, but not yet
16379 // For predecessors, we have no state to seed the EntryState, so we just have
16380 // to assume the existing one is correct.
16381 // If the block is also a successor, it will get the EntryState properly
16382 // updated when it is visited as a successor in the above "if" block.
16383 assert(predOrSucc == SpillCliquePred);
16384 m_pComp->impReimportBlockPending(blk);
16388 // Re-type the incoming lclVar nodes to match the varDsc.
16389 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
16391 if (blk->bbEntryState != nullptr)
16393 EntryState* es = blk->bbEntryState;
16394 for (unsigned level = 0; level < es->esStackDepth; level++)
16396 GenTreePtr tree = es->esStack[level].val;
16397 if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
16399 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
16400 noway_assert(lclNum < lvaCount);
16401 LclVarDsc* varDsc = lvaTable + lclNum;
16402 es->esStack[level].val->gtType = varDsc->TypeGet();
16408 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
16410 if (block->bbStkTempsOut != NO_BASE_TMP)
16412 return block->bbStkTempsOut;
16418 printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
16422 // Otherwise, choose one, and propagate to all members of the spill clique.
16423 // Grab enough temps for the whole stack.
16424 unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
16425 SetSpillTempsBase callback(baseTmp);
16427 // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
16428 // to one spill clique, and similarly can only be the sucessor to one spill clique
16429 impWalkSpillCliqueFromPred(block, &callback);
16434 void Compiler::impReimportSpillClique(BasicBlock* block)
16439 printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
16443 // If we get here, it is because this block is already part of a spill clique
16444 // and one predecessor had an outgoing live stack slot of type int, and this
16445 // block has an outgoing live stack slot of type native int.
16446 // We need to reset these before traversal because they have already been set
16447 // by the previous walk to determine all the members of the spill clique.
16448 impInlineRoot()->impSpillCliquePredMembers.Reset();
16449 impInlineRoot()->impSpillCliqueSuccMembers.Reset();
16451 ReimportSpillClique callback(this);
16453 impWalkSpillCliqueFromPred(block, &callback);
16456 // Set the pre-state of "block" (which should not have a pre-state allocated) to
16457 // a copy of "srcState", cloning tree pointers as required.
16458 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
16460 if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
16462 block->bbEntryState = nullptr;
16466 block->bbEntryState = (EntryState*)compGetMemA(sizeof(EntryState));
16468 // block->bbEntryState.esRefcount = 1;
16470 block->bbEntryState->esStackDepth = srcState->esStackDepth;
16471 block->bbEntryState->thisInitialized = TIS_Bottom;
16473 if (srcState->esStackDepth > 0)
16475 block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
16476 unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
16478 memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
16479 for (unsigned level = 0; level < srcState->esStackDepth; level++)
16481 GenTreePtr tree = srcState->esStack[level].val;
16482 block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
16486 if (verTrackObjCtorInitState)
16488 verSetThisInit(block, srcState->thisInitialized);
16494 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
16496 assert(tis != TIS_Bottom); // Precondition.
16497 if (block->bbEntryState == nullptr)
16499 block->bbEntryState = new (this, CMK_Unknown) EntryState();
16502 block->bbEntryState->thisInitialized = tis;
16506 * Resets the current state to the state at the start of the basic block
16508 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
16511 if (block->bbEntryState == nullptr)
16513 destState->esStackDepth = 0;
16514 destState->thisInitialized = TIS_Bottom;
16518 destState->esStackDepth = block->bbEntryState->esStackDepth;
16520 if (destState->esStackDepth > 0)
16522 unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
16524 memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
16527 destState->thisInitialized = block->bbThisOnEntry();
16532 ThisInitState BasicBlock::bbThisOnEntry()
16534 return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
16537 unsigned BasicBlock::bbStackDepthOnEntry()
16539 return (bbEntryState ? bbEntryState->esStackDepth : 0);
16542 void BasicBlock::bbSetStack(void* stackBuffer)
16544 assert(bbEntryState);
16545 assert(stackBuffer);
16546 bbEntryState->esStack = (StackEntry*)stackBuffer;
16549 StackEntry* BasicBlock::bbStackOnEntry()
16551 assert(bbEntryState);
16552 return bbEntryState->esStack;
16555 void Compiler::verInitCurrentState()
16557 verTrackObjCtorInitState = FALSE;
16558 verCurrentState.thisInitialized = TIS_Bottom;
16560 if (tiVerificationNeeded)
16562 // Track this ptr initialization
16563 if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
16565 verTrackObjCtorInitState = TRUE;
16566 verCurrentState.thisInitialized = TIS_Uninit;
16570 // initialize stack info
16572 verCurrentState.esStackDepth = 0;
16573 assert(verCurrentState.esStack != nullptr);
16575 // copy current state to entry state of first BB
16576 verInitBBEntryState(fgFirstBB, &verCurrentState);
16579 Compiler* Compiler::impInlineRoot()
16581 if (impInlineInfo == nullptr)
16587 return impInlineInfo->InlineRoot;
16591 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
16593 if (predOrSucc == SpillCliquePred)
16595 return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
16599 assert(predOrSucc == SpillCliqueSucc);
16600 return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
16604 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
16606 if (predOrSucc == SpillCliquePred)
16608 impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
16612 assert(predOrSucc == SpillCliqueSucc);
16613 impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
16617 /*****************************************************************************
16619 * Convert the instrs ("import") into our internal format (trees). The
16620 * basic flowgraph has already been constructed and is passed in.
16623 void Compiler::impImport(BasicBlock* method)
16628 printf("*************** In impImport() for %s\n", info.compFullName);
16632 /* Allocate the stack contents */
16634 if (info.compMaxStack <= sizeof(impSmallStack) / sizeof(impSmallStack[0]))
16636 /* Use local variable, don't waste time allocating on the heap */
16638 impStkSize = sizeof(impSmallStack) / sizeof(impSmallStack[0]);
16639 verCurrentState.esStack = impSmallStack;
16643 impStkSize = info.compMaxStack;
16644 verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
16647 // initialize the entry state at start of method
16648 verInitCurrentState();
16650 // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
16651 Compiler* inlineRoot = impInlineRoot();
16652 if (this == inlineRoot) // These are only used on the root of the inlining tree.
16654 // We have initialized these previously, but to size 0. Make them larger.
16655 impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
16656 impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
16657 impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
16659 inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
16660 inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
16661 inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
16662 impBlockListNodeFreeList = nullptr;
16665 impLastILoffsStmt = nullptr;
16666 impNestedStackSpill = false;
16668 impBoxTemp = BAD_VAR_NUM;
16670 impPendingList = impPendingFree = nullptr;
16672 /* Add the entry-point to the worker-list */
16674 // Skip leading internal blocks. There can be one as a leading scratch BB, and more
16675 // from EH normalization.
16676 // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
16678 for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
16680 // Treat these as imported.
16681 assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
16682 JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
16683 method->bbFlags |= BBF_IMPORTED;
16686 impImportBlockPending(method);
16688 /* Import blocks in the worker-list until there are no more */
16690 while (impPendingList)
16692 /* Remove the entry at the front of the list */
16694 PendingDsc* dsc = impPendingList;
16695 impPendingList = impPendingList->pdNext;
16696 impSetPendingBlockMember(dsc->pdBB, 0);
16698 /* Restore the stack state */
16700 verCurrentState.thisInitialized = dsc->pdThisPtrInit;
16701 verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth;
16702 if (verCurrentState.esStackDepth)
16704 impRestoreStackState(&dsc->pdSavedStack);
16707 /* Add the entry to the free list for reuse */
16709 dsc->pdNext = impPendingFree;
16710 impPendingFree = dsc;
16712 /* Now import the block */
16714 if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
16717 #ifdef _TARGET_64BIT_
16718 // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
16719 // coupled with the JIT64 IL Verification logic. Look inside verHandleVerificationFailure
16720 // method for further explanation on why we raise this exception instead of making the jitted
16721 // code throw the verification exception during execution.
16722 if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
16724 BADCODE("Basic block marked as not verifiable");
16727 #endif // _TARGET_64BIT_
16729 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
16730 impEndTreeList(dsc->pdBB);
16735 impImportBlock(dsc->pdBB);
16737 if (compDonotInline())
16741 if (compIsForImportOnly() && !tiVerificationNeeded)
16749 if (verbose && info.compXcptnsCount)
16751 printf("\nAfter impImport() added block for try,catch,finally");
16752 fgDispBasicBlocks();
16756 // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
16757 for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
16759 block->bbFlags &= ~BBF_VISITED;
16763 assert(!compIsForInlining() || !tiVerificationNeeded);
16766 // Checks if a typeinfo (usually stored in the type stack) is a struct.
16767 // The invariant here is that if it's not a ref or a method and has a class handle
16768 // it's a valuetype
16769 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
16771 if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
16781 /*****************************************************************************
16782 * Check to see if the tree is the address of a local or
16783 the address of a field in a local.
16785 *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
16789 BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr* lclVarTreeOut)
16791 if (tree->gtOper != GT_ADDR)
16796 GenTreePtr op = tree->gtOp.gtOp1;
16797 while (op->gtOper == GT_FIELD)
16799 op = op->gtField.gtFldObj;
16800 if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
16802 op = op->gtOp.gtOp1;
16810 if (op->gtOper == GT_LCL_VAR)
16812 *lclVarTreeOut = op;
16821 //------------------------------------------------------------------------
16822 // impMakeDiscretionaryInlineObservations: make observations that help
16823 // determine the profitability of a discretionary inline
16826 // pInlineInfo -- InlineInfo for the inline, or null for the prejit root
16827 // inlineResult -- InlineResult accumulating information about this inline
16830 // If inlining or prejitting the root, this method also makes
16831 // various observations about the method that factor into inline
16832 // decisions. It sets `compNativeSizeEstimate` as a side effect.
16834 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
16836 assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
16837 pInlineInfo == nullptr && !compIsForInlining() // Calculate the static inlining hint for ngen.
16840 // If we're really inlining, we should just have one result in play.
16841 assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
16843 // If this is a "forceinline" method, the JIT probably shouldn't have gone
16844 // to the trouble of estimating the native code size. Even if it did, it
16845 // shouldn't be relying on the result of this method.
16846 assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
16848 // Note if the caller contains NEWOBJ or NEWARR.
16849 Compiler* rootCompiler = impInlineRoot();
16851 if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
16853 inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
16856 if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
16858 inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
16861 bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0;
16862 bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
16864 if (isSpecialMethod)
16866 if (calleeIsStatic)
16868 inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
16872 inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
16875 else if (!calleeIsStatic)
16877 // Callee is an instance method.
16879 // Check if the callee has the same 'this' as the root.
16880 if (pInlineInfo != nullptr)
16882 GenTreePtr thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
16884 bool isSameThis = impIsThis(thisArg);
16885 inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
16889 // Note if the callee's class is a promotable struct
16890 if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
16892 lvaStructPromotionInfo structPromotionInfo;
16893 lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
16894 if (structPromotionInfo.canPromote)
16896 inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
16900 #ifdef FEATURE_SIMD
16902 // Note if this method is has SIMD args or return value
16903 if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
16905 inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
16908 #endif // FEATURE_SIMD
16910 // Roughly classify callsite frequency.
16911 InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
16913 // If this is a prejit root, or a maximally hot block...
16914 if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
16916 frequency = InlineCallsiteFrequency::HOT;
16918 // No training data. Look for loop-like things.
16919 // We consider a recursive call loop-like. Do not give the inlining boost to the method itself.
16920 // However, give it to things nearby.
16921 else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
16922 (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
16924 frequency = InlineCallsiteFrequency::LOOP;
16926 else if ((pInlineInfo->iciBlock->bbFlags & BBF_PROF_WEIGHT) && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
16928 frequency = InlineCallsiteFrequency::WARM;
16930 // Now modify the multiplier based on where we're called from.
16931 else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
16933 frequency = InlineCallsiteFrequency::RARE;
16937 frequency = InlineCallsiteFrequency::BORING;
16940 // Also capture the block weight of the call site. In the prejit
16941 // root case, assume there's some hot call site for this method.
16942 unsigned weight = 0;
16944 if (pInlineInfo != nullptr)
16946 weight = pInlineInfo->iciBlock->bbWeight;
16950 weight = BB_MAX_WEIGHT;
16953 inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
16954 inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
16957 /*****************************************************************************
16958 This method makes STATIC inlining decision based on the IL code.
16959 It should not make any inlining decision based on the context.
16960 If forceInline is true, then the inlining decision should not depend on
16961 performance heuristics (code size, etc.).
16964 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
16965 CORINFO_METHOD_INFO* methInfo,
16967 InlineResult* inlineResult)
16969 unsigned codeSize = methInfo->ILCodeSize;
16971 // We shouldn't have made up our minds yet...
16972 assert(!inlineResult->IsDecided());
16974 if (methInfo->EHcount)
16976 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
16980 if ((methInfo->ILCode == nullptr) || (codeSize == 0))
16982 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
16986 // For now we don't inline varargs (import code can't handle it)
16988 if (methInfo->args.isVarArg())
16990 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
16994 // Reject if it has too many locals.
16995 // This is currently an implementation limit due to fixed-size arrays in the
16996 // inline info, rather than a performance heuristic.
16998 inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
17000 if (methInfo->locals.numArgs > MAX_INL_LCLS)
17002 inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
17006 // Make sure there aren't too many arguments.
17007 // This is currently an implementation limit due to fixed-size arrays in the
17008 // inline info, rather than a performance heuristic.
17010 inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
17012 if (methInfo->args.numArgs > MAX_INL_ARGS)
17014 inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
17018 // Note force inline state
17020 inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
17022 // Note IL code size
17024 inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
17026 if (inlineResult->IsFailure())
17031 // Make sure maxstack is not too big
17033 inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
17035 if (inlineResult->IsFailure())
17041 /*****************************************************************************
17044 void Compiler::impCheckCanInline(GenTreePtr call,
17045 CORINFO_METHOD_HANDLE fncHandle,
17047 CORINFO_CONTEXT_HANDLE exactContextHnd,
17048 InlineCandidateInfo** ppInlineCandidateInfo,
17049 InlineResult* inlineResult)
17051 // Either EE or JIT might throw exceptions below.
17052 // If that happens, just don't inline the method.
17058 CORINFO_METHOD_HANDLE fncHandle;
17060 CORINFO_CONTEXT_HANDLE exactContextHnd;
17061 InlineResult* result;
17062 InlineCandidateInfo** ppInlineCandidateInfo;
17063 } param = {nullptr};
17065 param.pThis = this;
17067 param.fncHandle = fncHandle;
17068 param.methAttr = methAttr;
17069 param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
17070 param.result = inlineResult;
17071 param.ppInlineCandidateInfo = ppInlineCandidateInfo;
17073 bool success = eeRunWithErrorTrap<Param>(
17074 [](Param* pParam) {
17075 DWORD dwRestrictions = 0;
17076 CorInfoInitClassResult initClassResult;
17079 const char* methodName;
17080 const char* className;
17081 methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
17083 if (JitConfig.JitNoInline())
17085 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
17090 /* Try to get the code address/size for the method */
17092 CORINFO_METHOD_INFO methInfo;
17093 if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
17095 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
17100 forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
17102 pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
17104 if (pParam->result->IsFailure())
17106 assert(pParam->result->IsNever());
17110 // Speculatively check if initClass() can be done.
17111 // If it can be done, we will try to inline the method. If inlining
17112 // succeeds, then we will do the non-speculative initClass() and commit it.
17113 // If this speculative call to initClass() fails, there is no point
17114 // trying to inline this method.
17116 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17117 pParam->exactContextHnd /* context */,
17118 TRUE /* speculative */);
17120 if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17122 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17126 // Given the EE the final say in whether to inline or not.
17127 // This should be last since for verifiable code, this can be expensive
17129 /* VM Inline check also ensures that the method is verifiable if needed */
17130 CorInfoInline vmResult;
17131 vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17134 if (vmResult == INLINE_FAIL)
17136 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17138 else if (vmResult == INLINE_NEVER)
17140 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17143 if (pParam->result->IsFailure())
17145 // Make sure not to report this one. It was already reported by the VM.
17146 pParam->result->SetReported();
17150 // check for unsupported inlining restrictions
17151 assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17153 if (dwRestrictions & INLINE_SAME_THIS)
17155 GenTreePtr thisArg = pParam->call->gtCall.gtCallObjp;
17158 if (!pParam->pThis->impIsThis(thisArg))
17160 pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17165 /* Get the method properties */
17167 CORINFO_CLASS_HANDLE clsHandle;
17168 clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17170 clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17172 /* Get the return type */
17174 var_types fncRetType;
17175 fncRetType = pParam->call->TypeGet();
17178 var_types fncRealRetType;
17179 fncRealRetType = JITtype2varType(methInfo.args.retType);
17181 assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17182 // <BUGNUM> VSW 288602 </BUGNUM>
17183 // In case of IJW, we allow to assign a native pointer to a BYREF.
17184 (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17185 (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17189 // Allocate an InlineCandidateInfo structure
17191 InlineCandidateInfo* pInfo;
17192 pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17194 pInfo->dwRestrictions = dwRestrictions;
17195 pInfo->methInfo = methInfo;
17196 pInfo->methAttr = pParam->methAttr;
17197 pInfo->clsHandle = clsHandle;
17198 pInfo->clsAttr = clsAttr;
17199 pInfo->fncRetType = fncRetType;
17200 pInfo->exactContextHnd = pParam->exactContextHnd;
17201 pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd;
17202 pInfo->initClassResult = initClassResult;
17204 *(pParam->ppInlineCandidateInfo) = pInfo;
17211 param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
17215 void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo,
17216 GenTreePtr curArgVal,
17218 InlineResult* inlineResult)
17220 InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
17222 if (curArgVal->gtOper == GT_MKREFANY)
17224 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
17228 inlCurArgInfo->argNode = curArgVal;
17230 GenTreePtr lclVarTree;
17231 if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
17233 inlCurArgInfo->argIsByRefToStructLocal = true;
17234 #ifdef FEATURE_SIMD
17235 if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
17237 pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
17239 #endif // FEATURE_SIMD
17242 if (curArgVal->gtFlags & GTF_ALL_EFFECT)
17244 inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
17245 inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
17248 if (curArgVal->gtOper == GT_LCL_VAR)
17250 inlCurArgInfo->argIsLclVar = true;
17252 /* Remember the "original" argument number */
17253 curArgVal->gtLclVar.gtLclILoffs = argNum;
17256 if ((curArgVal->OperKind() & GTK_CONST) ||
17257 ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
17259 inlCurArgInfo->argIsInvariant = true;
17260 if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
17262 /* Abort, but do not mark as not inlinable */
17263 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
17268 if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
17270 inlCurArgInfo->argHasLdargaOp = true;
17276 if (inlCurArgInfo->argIsThis)
17278 printf("thisArg:");
17282 printf("\nArgument #%u:", argNum);
17284 if (inlCurArgInfo->argIsLclVar)
17286 printf(" is a local var");
17288 if (inlCurArgInfo->argIsInvariant)
17290 printf(" is a constant");
17292 if (inlCurArgInfo->argHasGlobRef)
17294 printf(" has global refs");
17296 if (inlCurArgInfo->argHasSideEff)
17298 printf(" has side effects");
17300 if (inlCurArgInfo->argHasLdargaOp)
17302 printf(" has ldarga effect");
17304 if (inlCurArgInfo->argHasStargOp)
17306 printf(" has starg effect");
17308 if (inlCurArgInfo->argIsByRefToStructLocal)
17310 printf(" is byref to a struct local");
17314 gtDispTree(curArgVal);
17320 /*****************************************************************************
17324 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
17326 assert(!compIsForInlining());
17328 GenTreePtr call = pInlineInfo->iciCall;
17329 CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo;
17330 unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr;
17331 InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo;
17332 InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo;
17333 InlineResult* inlineResult = pInlineInfo->inlineResult;
17335 const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
17337 /* init the argument stuct */
17339 memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
17341 /* Get hold of the 'this' pointer and the argument list proper */
17343 GenTreePtr thisArg = call->gtCall.gtCallObjp;
17344 GenTreePtr argList = call->gtCall.gtCallArgs;
17345 unsigned argCnt = 0; // Count of the arguments
17347 assert((methInfo->args.hasThis()) == (thisArg != nullptr));
17351 inlArgInfo[0].argIsThis = true;
17353 impInlineRecordArgInfo(pInlineInfo, thisArg, argCnt, inlineResult);
17355 if (inlineResult->IsFailure())
17360 /* Increment the argument count */
17364 /* Record some information about each of the arguments */
17365 bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
17367 #if USER_ARGS_COME_LAST
17368 unsigned typeCtxtArg = thisArg ? 1 : 0;
17369 #else // USER_ARGS_COME_LAST
17370 unsigned typeCtxtArg = methInfo->args.totalILArgs();
17371 #endif // USER_ARGS_COME_LAST
17373 for (GenTreePtr argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
17375 if (argTmp == argList && hasRetBuffArg)
17380 // Ignore the type context argument
17381 if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
17383 typeCtxtArg = 0xFFFFFFFF;
17387 assert(argTmp->gtOper == GT_LIST);
17388 GenTreePtr argVal = argTmp->gtOp.gtOp1;
17390 impInlineRecordArgInfo(pInlineInfo, argVal, argCnt, inlineResult);
17392 if (inlineResult->IsFailure())
17397 /* Increment the argument count */
17401 /* Make sure we got the arg number right */
17402 assert(argCnt == methInfo->args.totalILArgs());
17404 #ifdef FEATURE_SIMD
17405 bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
17406 #endif // FEATURE_SIMD
17408 /* We have typeless opcodes, get type information from the signature */
17414 if (clsAttr & CORINFO_FLG_VALUECLASS)
17416 sigType = TYP_BYREF;
17423 lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
17424 lclVarInfo[0].lclHasLdlocaOp = false;
17426 #ifdef FEATURE_SIMD
17427 // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
17428 // the inlining multiplier) for anything in that assembly.
17429 // But we only need to normalize it if it is a TYP_STRUCT
17430 // (which we need to do even if we have already set foundSIMDType).
17431 if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
17433 if (sigType == TYP_STRUCT)
17435 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
17437 foundSIMDType = true;
17439 #endif // FEATURE_SIMD
17440 lclVarInfo[0].lclTypeInfo = sigType;
17442 assert(varTypeIsGC(thisArg->gtType) || // "this" is managed
17443 (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
17444 (clsAttr & CORINFO_FLG_VALUECLASS)));
17446 if (genActualType(thisArg->gtType) != genActualType(sigType))
17448 if (sigType == TYP_REF)
17450 /* The argument cannot be bashed into a ref (see bug 750871) */
17451 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
17455 /* This can only happen with byrefs <-> ints/shorts */
17457 assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
17458 assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
17460 if (sigType == TYP_BYREF)
17462 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17464 else if (thisArg->gtType == TYP_BYREF)
17466 assert(sigType == TYP_I_IMPL);
17468 /* If possible change the BYREF to an int */
17469 if (thisArg->IsVarAddr())
17471 thisArg->gtType = TYP_I_IMPL;
17472 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17476 /* Arguments 'int <- byref' cannot be bashed */
17477 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17484 /* Init the types of the arguments and make sure the types
17485 * from the trees match the types in the signature */
17487 CORINFO_ARG_LIST_HANDLE argLst;
17488 argLst = methInfo->args.args;
17491 for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
17493 var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
17495 lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
17497 #ifdef FEATURE_SIMD
17498 if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
17500 // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
17501 // found a SIMD type, even if this may not be a type we recognize (the assumption is that
17502 // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
17503 foundSIMDType = true;
17504 if (sigType == TYP_STRUCT)
17506 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
17507 sigType = structType;
17510 #endif // FEATURE_SIMD
17512 lclVarInfo[i].lclTypeInfo = sigType;
17513 lclVarInfo[i].lclHasLdlocaOp = false;
17515 /* Does the tree type match the signature type? */
17517 GenTreePtr inlArgNode = inlArgInfo[i].argNode;
17519 if (sigType != inlArgNode->gtType)
17521 /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
17522 but in bad IL cases with caller-callee signature mismatches we can see other types.
17523 Intentionally reject cases with mismatches so the jit is more flexible when
17524 encountering bad IL. */
17526 bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
17527 (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
17528 (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
17530 if (!isPlausibleTypeMatch)
17532 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
17536 /* Is it a narrowing or widening cast?
17537 * Widening casts are ok since the value computed is already
17538 * normalized to an int (on the IL stack) */
17540 if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
17542 if (sigType == TYP_BYREF)
17544 lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17546 else if (inlArgNode->gtType == TYP_BYREF)
17548 assert(varTypeIsIntOrI(sigType));
17550 /* If possible bash the BYREF to an int */
17551 if (inlArgNode->IsVarAddr())
17553 inlArgNode->gtType = TYP_I_IMPL;
17554 lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17558 /* Arguments 'int <- byref' cannot be changed */
17559 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17563 else if (genTypeSize(sigType) < EA_PTRSIZE)
17565 /* Narrowing cast */
17567 if (inlArgNode->gtOper == GT_LCL_VAR &&
17568 !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
17569 sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
17571 /* We don't need to insert a cast here as the variable
17572 was assigned a normalized value of the right type */
17577 inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
17579 inlArgInfo[i].argIsLclVar = false;
17581 /* Try to fold the node in case we have constant arguments */
17583 if (inlArgInfo[i].argIsInvariant)
17585 inlArgNode = gtFoldExprConst(inlArgNode);
17586 inlArgInfo[i].argNode = inlArgNode;
17587 assert(inlArgNode->OperIsConst());
17590 #ifdef _TARGET_64BIT_
17591 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
17593 // This should only happen for int -> native int widening
17594 inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
17596 inlArgInfo[i].argIsLclVar = false;
17598 /* Try to fold the node in case we have constant arguments */
17600 if (inlArgInfo[i].argIsInvariant)
17602 inlArgNode = gtFoldExprConst(inlArgNode);
17603 inlArgInfo[i].argNode = inlArgNode;
17604 assert(inlArgNode->OperIsConst());
17607 #endif // _TARGET_64BIT_
17612 /* Init the types of the local variables */
17614 CORINFO_ARG_LIST_HANDLE localsSig;
17615 localsSig = methInfo->locals.args;
17617 for (i = 0; i < methInfo->locals.numArgs; i++)
17620 var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
17622 lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
17623 lclVarInfo[i + argCnt].lclIsPinned = isPinned;
17624 lclVarInfo[i + argCnt].lclTypeInfo = type;
17628 // Pinned locals may cause inlines to fail.
17629 inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
17630 if (inlineResult->IsFailure())
17636 lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
17638 // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
17639 // out on the inline.
17640 if (type == TYP_STRUCT)
17642 CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
17643 DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
17644 if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
17646 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
17647 if (inlineResult->IsFailure())
17652 // Do further notification in the case where the call site is rare; some policies do
17653 // not track the relative hotness of call sites for "always" inline cases.
17654 if (pInlineInfo->iciBlock->isRunRarely())
17656 inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
17657 if (inlineResult->IsFailure())
17666 localsSig = info.compCompHnd->getArgNext(localsSig);
17668 #ifdef FEATURE_SIMD
17669 if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
17671 foundSIMDType = true;
17672 if (featureSIMD && type == TYP_STRUCT)
17674 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
17675 lclVarInfo[i + argCnt].lclTypeInfo = structType;
17678 #endif // FEATURE_SIMD
17681 #ifdef FEATURE_SIMD
17682 if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
17684 foundSIMDType = true;
17686 pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
17687 #endif // FEATURE_SIMD
17690 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
17692 assert(compIsForInlining());
17694 unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
17696 if (tmpNum == BAD_VAR_NUM)
17698 var_types lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
17700 // The lifetime of this local might span multiple BBs.
17701 // So it is a long lifetime local.
17702 impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
17704 lvaTable[tmpNum].lvType = lclTyp;
17705 if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclHasLdlocaOp)
17707 lvaTable[tmpNum].lvHasLdAddrOp = 1;
17710 if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclIsPinned)
17712 lvaTable[tmpNum].lvPinned = 1;
17714 if (!impInlineInfo->hasPinnedLocals)
17716 // If the inlinee returns a value, use a spill temp
17717 // for the return value to ensure that even in case
17718 // where the return expression refers to one of the
17719 // pinned locals, we can unpin the local right after
17720 // the inlined method body.
17721 if ((info.compRetNativeType != TYP_VOID) && (lvaInlineeReturnSpillTemp == BAD_VAR_NUM))
17723 lvaInlineeReturnSpillTemp =
17724 lvaGrabTemp(false DEBUGARG("Inline candidate pinned local return spill temp"));
17725 lvaTable[lvaInlineeReturnSpillTemp].lvType = info.compRetNativeType;
17729 impInlineInfo->hasPinnedLocals = true;
17732 if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.IsStruct())
17734 if (varTypeIsStruct(lclTyp))
17736 lvaSetStruct(tmpNum,
17737 impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.GetClassHandle(),
17738 true /* unsafe value cls check */);
17742 // This is a wrapped primitive. Make sure the verstate knows that
17743 lvaTable[tmpNum].lvVerTypeInfo =
17744 impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo;
17752 // A method used to return the GenTree (usually a GT_LCL_VAR) representing the arguments of the inlined method.
17753 // Only use this method for the arguments of the inlinee method.
17754 // !!! Do not use it for the locals of the inlinee method. !!!!
17756 GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
17758 /* Get the argument type */
17759 var_types lclTyp = lclVarInfo[lclNum].lclTypeInfo;
17761 GenTreePtr op1 = nullptr;
17763 // constant or address of local
17764 if (inlArgInfo[lclNum].argIsInvariant && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17766 /* Clone the constant. Note that we cannot directly use argNode
17767 in the trees even if inlArgInfo[lclNum].argIsUsed==false as this
17768 would introduce aliasing between inlArgInfo[].argNode and
17769 impInlineExpr. Then gtFoldExpr() could change it, causing further
17770 references to the argument working off of the bashed copy. */
17772 op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17773 PREFIX_ASSUME(op1 != nullptr);
17774 inlArgInfo[lclNum].argTmpNum = (unsigned)-1; // illegal temp
17776 else if (inlArgInfo[lclNum].argIsLclVar && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17778 /* Argument is a local variable (of the caller)
17779 * Can we re-use the passed argument node? */
17781 op1 = inlArgInfo[lclNum].argNode;
17782 inlArgInfo[lclNum].argTmpNum = op1->gtLclVarCommon.gtLclNum;
17784 if (inlArgInfo[lclNum].argIsUsed)
17786 assert(op1->gtOper == GT_LCL_VAR);
17787 assert(lclNum == op1->gtLclVar.gtLclILoffs);
17789 if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
17791 lclTyp = genActualType(lclTyp);
17794 /* Create a new lcl var node - remember the argument lclNum */
17795 op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, lclTyp, op1->gtLclVar.gtLclILoffs);
17798 else if (inlArgInfo[lclNum].argIsByRefToStructLocal && !inlArgInfo[lclNum].argHasStargOp)
17800 /* Argument is a by-ref address to a struct, a normed struct, or its field.
17801 In these cases, don't spill the byref to a local, simply clone the tree and use it.
17802 This way we will increase the chance for this byref to be optimized away by
17803 a subsequent "dereference" operation.
17805 From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
17806 (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
17807 For example, if the caller is:
17808 ldloca.s V_1 // V_1 is a local struct
17809 call void Test.ILPart::RunLdargaOnPointerArg(int32*)
17810 and the callee being inlined has:
17811 .method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed
17813 call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
17814 then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
17815 soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
17817 assert(inlArgInfo[lclNum].argNode->TypeGet() == TYP_BYREF ||
17818 inlArgInfo[lclNum].argNode->TypeGet() == TYP_I_IMPL);
17819 op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17823 /* Argument is a complex expression - it must be evaluated into a temp */
17825 if (inlArgInfo[lclNum].argHasTmp)
17827 assert(inlArgInfo[lclNum].argIsUsed);
17828 assert(inlArgInfo[lclNum].argTmpNum < lvaCount);
17830 /* Create a new lcl var node - remember the argument lclNum */
17831 op1 = gtNewLclvNode(inlArgInfo[lclNum].argTmpNum, genActualType(lclTyp));
17833 /* This is the second or later use of the this argument,
17834 so we have to use the temp (instead of the actual arg) */
17835 inlArgInfo[lclNum].argBashTmpNode = nullptr;
17839 /* First time use */
17840 assert(inlArgInfo[lclNum].argIsUsed == false);
17842 /* Reserve a temp for the expression.
17843 * Use a large size node as we may change it later */
17845 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
17847 lvaTable[tmpNum].lvType = lclTyp;
17848 assert(lvaTable[tmpNum].lvAddrExposed == 0);
17849 if (inlArgInfo[lclNum].argHasLdargaOp)
17851 lvaTable[tmpNum].lvHasLdAddrOp = 1;
17854 if (lclVarInfo[lclNum].lclVerTypeInfo.IsStruct())
17856 if (varTypeIsStruct(lclTyp))
17858 lvaSetStruct(tmpNum, impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo.GetClassHandle(),
17859 true /* unsafe value cls check */);
17863 // This is a wrapped primitive. Make sure the verstate knows that
17864 lvaTable[tmpNum].lvVerTypeInfo = impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo;
17868 inlArgInfo[lclNum].argHasTmp = true;
17869 inlArgInfo[lclNum].argTmpNum = tmpNum;
17871 // If we require strict exception order, then arguments must
17872 // be evaluated in sequence before the body of the inlined method.
17873 // So we need to evaluate them to a temp.
17874 // Also, if arguments have global references, we need to
17875 // evaluate them to a temp before the inlined body as the
17876 // inlined body may be modifying the global ref.
17877 // TODO-1stClassStructs: We currently do not reuse an existing lclVar
17878 // if it is a struct, because it requires some additional handling.
17880 if (!varTypeIsStruct(lclTyp) && (!inlArgInfo[lclNum].argHasSideEff) && (!inlArgInfo[lclNum].argHasGlobRef))
17882 /* Get a *LARGE* LCL_VAR node */
17883 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
17885 /* Record op1 as the very first use of this argument.
17886 If there are no further uses of the arg, we may be
17887 able to use the actual arg node instead of the temp.
17888 If we do see any further uses, we will clear this. */
17889 inlArgInfo[lclNum].argBashTmpNode = op1;
17893 /* Get a small LCL_VAR node */
17894 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
17895 /* No bashing of this argument */
17896 inlArgInfo[lclNum].argBashTmpNode = nullptr;
17901 /* Mark the argument as used */
17903 inlArgInfo[lclNum].argIsUsed = true;
17908 /******************************************************************************
17909 Is this the original "this" argument to the call being inlined?
17911 Note that we do not inline methods with "starg 0", and so we do not need to
17915 BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo* inlArgInfo)
17917 assert(compIsForInlining());
17918 return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
17921 //-----------------------------------------------------------------------------
17922 // This function checks if a dereference in the inlinee can guarantee that
17923 // the "this" is non-NULL.
17924 // If we haven't hit a branch or a side effect, and we are dereferencing
17925 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
17926 // then we can avoid a separate null pointer check.
17928 // "additionalTreesToBeEvaluatedBefore"
17929 // is the set of pending trees that have not yet been added to the statement list,
17930 // and which have been removed from verCurrentState.esStack[]
17932 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTreePtr additionalTreesToBeEvaluatedBefore,
17933 GenTreePtr variableBeingDereferenced,
17934 InlArgInfo* inlArgInfo)
17936 assert(compIsForInlining());
17937 assert(opts.OptEnabled(CLFLG_INLINING));
17939 BasicBlock* block = compCurBB;
17944 if (block != fgFirstBB)
17949 if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
17954 if (additionalTreesToBeEvaluatedBefore &&
17955 GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
17960 for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
17962 expr = stmt->gtStmt.gtStmtExpr;
17964 if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
17970 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
17972 unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
17973 if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
17982 /******************************************************************************/
17983 // Check the inlining eligibility of this GT_CALL node.
17984 // Mark GTF_CALL_INLINE_CANDIDATE on the GT_CALL node
17986 // Todo: find a way to record the failure reasons in the IR (or
17987 // otherwise build tree context) so when we do the inlining pass we
17988 // can capture these reasons
17990 void Compiler::impMarkInlineCandidate(GenTreePtr callNode,
17991 CORINFO_CONTEXT_HANDLE exactContextHnd,
17992 CORINFO_CALL_INFO* callInfo)
17994 // Let the strategy know there's another call
17995 impInlineRoot()->m_inlineStrategy->NoteCall();
17997 if (!opts.OptEnabled(CLFLG_INLINING))
17999 /* XXX Mon 8/18/2008
18000 * This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before
18001 * calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and
18002 * CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and
18003 * figure out why we did not set MAXOPT for this compile.
18005 assert(!compIsForInlining());
18009 if (compIsForImportOnly())
18011 // Don't bother creating the inline candidate during verification.
18012 // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
18013 // that leads to the creation of multiple instances of Compiler.
18017 GenTreeCall* call = callNode->AsCall();
18018 InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
18020 // Don't inline if not optimizing root method
18021 if (opts.compDbgCode)
18023 inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
18027 // Don't inline if inlining into root method is disabled.
18028 if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
18030 inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
18034 // Inlining candidate determination needs to honor only IL tail prefix.
18035 // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
18036 if (call->IsTailPrefixedCall())
18038 inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
18042 // Tail recursion elimination takes precedence over inlining.
18043 // TODO: We may want to do some of the additional checks from fgMorphCall
18044 // here to reduce the chance we don't inline a call that won't be optimized
18045 // as a fast tail call or turned into a loop.
18046 if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
18048 inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
18052 if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
18054 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
18058 /* Ignore helper calls */
18060 if (call->gtCallType == CT_HELPER)
18062 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
18066 /* Ignore indirect calls */
18067 if (call->gtCallType == CT_INDIRECT)
18069 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
18073 /* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less
18074 * restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding
18075 * inlining in throw blocks. I should consider the same thing for catch and filter regions. */
18077 CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
18080 // Reuse method flags from the original callInfo if possible
18081 if (fncHandle == callInfo->hMethod)
18083 methAttr = callInfo->methodFlags;
18087 methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
18091 if (compStressCompile(STRESS_FORCE_INLINE, 0))
18093 methAttr |= CORINFO_FLG_FORCEINLINE;
18097 // Check for COMPlus_AggressiveInlining
18098 if (compDoAggressiveInlining)
18100 methAttr |= CORINFO_FLG_FORCEINLINE;
18103 if (!(methAttr & CORINFO_FLG_FORCEINLINE))
18105 /* Don't bother inline blocks that are in the filter region */
18106 if (bbInCatchHandlerILRange(compCurBB))
18111 printf("\nWill not inline blocks that are in the catch handler region\n");
18116 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
18120 if (bbInFilterILRange(compCurBB))
18125 printf("\nWill not inline blocks that are in the filter region\n");
18129 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
18134 /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
18136 if (opts.compNeedSecurityCheck)
18138 inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
18142 /* Check if we tried to inline this method before */
18144 if (methAttr & CORINFO_FLG_DONT_INLINE)
18146 inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
18150 /* Cannot inline synchronized methods */
18152 if (methAttr & CORINFO_FLG_SYNCH)
18154 inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
18158 /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
18160 if (methAttr & CORINFO_FLG_SECURITYCHECK)
18162 inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
18166 InlineCandidateInfo* inlineCandidateInfo = nullptr;
18167 impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
18169 if (inlineResult.IsFailure())
18174 // The old value should be NULL
18175 assert(call->gtInlineCandidateInfo == nullptr);
18177 call->gtInlineCandidateInfo = inlineCandidateInfo;
18179 // Mark the call node as inline candidate.
18180 call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
18182 // Let the strategy know there's another candidate.
18183 impInlineRoot()->m_inlineStrategy->NoteCandidate();
18185 // Since we're not actually inlining yet, and this call site is
18186 // still just an inline candidate, there's nothing to report.
18187 inlineResult.SetReported();
18190 /******************************************************************************/
18191 // Returns true if the given intrinsic will be implemented by target-specific
18194 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
18196 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
18197 switch (intrinsicId)
18199 // Amd64 only has SSE2 instruction to directly compute sqrt/abs.
18201 // TODO: Because the x86 backend only targets SSE for floating-point code,
18202 // it does not treat Sine, Cosine, or Round as intrinsics (JIT32
18203 // implemented those intrinsics as x87 instructions). If this poses
18204 // a CQ problem, it may be necessary to change the implementation of
18205 // the helper calls to decrease call overhead or switch back to the
18206 // x87 instructions. This is tracked by #7097.
18207 case CORINFO_INTRINSIC_Sqrt:
18208 case CORINFO_INTRINSIC_Abs:
18214 #elif defined(_TARGET_ARM64_)
18215 switch (intrinsicId)
18217 case CORINFO_INTRINSIC_Sqrt:
18218 case CORINFO_INTRINSIC_Abs:
18219 case CORINFO_INTRINSIC_Round:
18225 #elif defined(_TARGET_ARM_)
18226 switch (intrinsicId)
18228 case CORINFO_INTRINSIC_Sqrt:
18229 case CORINFO_INTRINSIC_Abs:
18230 case CORINFO_INTRINSIC_Round:
18236 #elif defined(_TARGET_X86_)
18237 switch (intrinsicId)
18239 case CORINFO_INTRINSIC_Sin:
18240 case CORINFO_INTRINSIC_Cos:
18241 case CORINFO_INTRINSIC_Sqrt:
18242 case CORINFO_INTRINSIC_Abs:
18243 case CORINFO_INTRINSIC_Round:
18250 // TODO: This portion of logic is not implemented for other arch.
18251 // The reason for returning true is that on all other arch the only intrinsic
18252 // enabled are target intrinsics.
18254 #endif //_TARGET_AMD64_
18257 /******************************************************************************/
18258 // Returns true if the given intrinsic will be implemented by calling System.Math
18261 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
18263 // Currently, if an math intrisic is not implemented by target-specific
18264 // intructions, it will be implemented by a System.Math call. In the
18265 // future, if we turn to implementing some of them with helper callers,
18266 // this predicate needs to be revisited.
18267 return !IsTargetIntrinsic(intrinsicId);
18270 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
18272 switch (intrinsicId)
18274 case CORINFO_INTRINSIC_Sin:
18275 case CORINFO_INTRINSIC_Sqrt:
18276 case CORINFO_INTRINSIC_Abs:
18277 case CORINFO_INTRINSIC_Cos:
18278 case CORINFO_INTRINSIC_Round:
18279 case CORINFO_INTRINSIC_Cosh:
18280 case CORINFO_INTRINSIC_Sinh:
18281 case CORINFO_INTRINSIC_Tan:
18282 case CORINFO_INTRINSIC_Tanh:
18283 case CORINFO_INTRINSIC_Asin:
18284 case CORINFO_INTRINSIC_Acos:
18285 case CORINFO_INTRINSIC_Atan:
18286 case CORINFO_INTRINSIC_Atan2:
18287 case CORINFO_INTRINSIC_Log10:
18288 case CORINFO_INTRINSIC_Pow:
18289 case CORINFO_INTRINSIC_Exp:
18290 case CORINFO_INTRINSIC_Ceiling:
18291 case CORINFO_INTRINSIC_Floor:
18298 bool Compiler::IsMathIntrinsic(GenTreePtr tree)
18300 return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
18302 /*****************************************************************************/