1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
10 XX Imports the given method and converts it to semantic trees XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
23 #define Verify(cond, msg) \
28 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
32 #define VerifyOrReturn(cond, msg) \
37 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
42 #define VerifyOrReturnSpeculative(cond, msg, speculative) \
56 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
62 /*****************************************************************************/
64 void Compiler::impInit()
68 impTreeList = nullptr;
69 impTreeLast = nullptr;
70 impInlinedCodeSize = 0;
74 /*****************************************************************************
76 * Pushes the given tree on the stack.
79 void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
81 /* Check for overflow. If inlining, we may be using a bigger stack */
83 if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84 (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
86 BADCODE("stack overflow");
90 // If we are pushing a struct, make certain we know the precise type!
91 if (tree->TypeGet() == TYP_STRUCT)
93 assert(ti.IsType(TI_STRUCT));
94 CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95 assert(clsHnd != NO_CLASS_HANDLE);
98 if (tiVerificationNeeded && !ti.IsDead())
100 assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
102 // The ti type is consistent with the tree type.
105 // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106 // In the verification type system, we always transform "native int" to "TI_INT".
107 // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108 // attempts to do that have proved too difficult. Instead, we'll assume that in checks like this,
109 // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110 // method used in the last disjunct allows exactly this mismatch.
111 assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112 ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113 ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114 ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115 typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116 NormaliseForStack(typeInfo(tree->TypeGet()))));
118 // If it is a struct type, make certain we normalized the primitive types
119 assert(!ti.IsType(TI_STRUCT) ||
120 info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
124 if (VERBOSE && tiVerificationNeeded)
127 printf(TI_DUMP_PADDING);
128 printf("About to push to stack: ");
131 #endif // VERBOSE_VERIFY
135 verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136 verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
138 if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
142 else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
144 compFloatingPointUsed = true;
148 /******************************************************************************/
149 // used in the inliner, where we can assume typesafe code. please don't use in the importer!!
150 inline void Compiler::impPushOnStackNoType(GenTreePtr tree)
152 assert(verCurrentState.esStackDepth < impStkSize);
153 INDEBUG(verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = typeInfo());
154 verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
156 if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
160 else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
162 compFloatingPointUsed = true;
166 inline void Compiler::impPushNullObjRefOnStack()
168 impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
171 // This method gets called when we run into unverifiable code
172 // (and we are verifying the method)
174 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
175 DEBUGARG(unsigned line))
177 // Remember that the code is not verifiable
178 // Note that the method may yet pass canSkipMethodVerification(),
179 // and so the presence of unverifiable code may not be an issue.
180 tiIsVerifiableCode = FALSE;
183 const char* tail = strrchr(file, '\\');
189 if (JitConfig.JitBreakOnUnsafeCode())
191 assert(!"Unsafe code detected");
195 JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
196 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
198 if (verNeedsVerification() || compIsForImportOnly())
200 JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
201 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
202 verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
206 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
207 DEBUGARG(unsigned line))
209 JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
210 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
213 // BreakIfDebuggerPresent();
214 if (getBreakOnBadCode())
216 assert(!"Typechecking error");
220 RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
224 // helper function that will tell us if the IL instruction at the addr passed
225 // by param consumes an address at the top of the stack. We use it to save
227 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
229 assert(!compIsForInlining());
233 opcode = (OPCODE)getU1LittleEndian(codeAddr);
237 // case CEE_LDFLDA: We're taking this one out as if you have a sequence
243 // of a primitivelike struct, you end up after morphing with addr of a local
244 // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
245 // for structs that contain other structs, which isnt a case we handle very
246 // well now for other reasons.
250 // We won't collapse small fields. This is probably not the right place to have this
251 // check, but we're only using the function for this purpose, and is easy to factor
252 // out if we need to do so.
254 CORINFO_RESOLVED_TOKEN resolvedToken;
255 impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
257 CORINFO_CLASS_HANDLE clsHnd;
258 var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
260 // Preserve 'small' int types
261 if (lclTyp > TYP_INT)
263 lclTyp = genActualType(lclTyp);
266 if (varTypeIsSmall(lclTyp))
280 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
282 pResolvedToken->tokenContext = impTokenLookupContextHandle;
283 pResolvedToken->tokenScope = info.compScopeHnd;
284 pResolvedToken->token = getU4LittleEndian(addr);
285 pResolvedToken->tokenType = kind;
287 if (!tiVerificationNeeded)
289 info.compCompHnd->resolveToken(pResolvedToken);
293 Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
297 /*****************************************************************************
299 * Pop one tree from the stack.
302 StackEntry Compiler::impPopStack()
304 if (verCurrentState.esStackDepth == 0)
306 BADCODE("stack underflow");
311 if (VERBOSE && tiVerificationNeeded)
314 printf(TI_DUMP_PADDING);
315 printf("About to pop from the stack: ");
316 const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
319 #endif // VERBOSE_VERIFY
322 return verCurrentState.esStack[--verCurrentState.esStackDepth];
325 StackEntry Compiler::impPopStack(CORINFO_CLASS_HANDLE& structType)
327 StackEntry ret = impPopStack();
328 structType = verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo.GetClassHandle();
332 GenTreePtr Compiler::impPopStack(typeInfo& ti)
334 StackEntry ret = impPopStack();
339 /*****************************************************************************
341 * Peep at n'th (0-based) tree on the top of the stack.
344 StackEntry& Compiler::impStackTop(unsigned n)
346 if (verCurrentState.esStackDepth <= n)
348 BADCODE("stack underflow");
351 return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
353 /*****************************************************************************
354 * Some of the trees are spilled specially. While unspilling them, or
355 * making a copy, these need to be handled specially. The function
356 * enumerates the operators possible after spilling.
359 #ifdef DEBUG // only used in asserts
360 static bool impValidSpilledStackEntry(GenTreePtr tree)
362 if (tree->gtOper == GT_LCL_VAR)
367 if (tree->OperIsConst())
376 /*****************************************************************************
378 * The following logic is used to save/restore stack contents.
379 * If 'copy' is true, then we make a copy of the trees on the stack. These
380 * have to all be cloneable/spilled values.
383 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
385 savePtr->ssDepth = verCurrentState.esStackDepth;
387 if (verCurrentState.esStackDepth)
389 savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
390 size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
394 StackEntry* table = savePtr->ssTrees;
396 /* Make a fresh copy of all the stack entries */
398 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
400 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
401 GenTreePtr tree = verCurrentState.esStack[level].val;
403 assert(impValidSpilledStackEntry(tree));
405 switch (tree->gtOper)
412 table->val = gtCloneExpr(tree);
416 assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
423 memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
428 void Compiler::impRestoreStackState(SavedStack* savePtr)
430 verCurrentState.esStackDepth = savePtr->ssDepth;
432 if (verCurrentState.esStackDepth)
434 memcpy(verCurrentState.esStack, savePtr->ssTrees,
435 verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
439 /*****************************************************************************
441 * Get the tree list started for a new basic block.
443 inline void Compiler::impBeginTreeList()
445 assert(impTreeList == nullptr && impTreeLast == nullptr);
447 impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
450 /*****************************************************************************
452 * Store the given start and end stmt in the given basic block. This is
453 * mostly called by impEndTreeList(BasicBlock *block). It is called
454 * directly only for handling CEE_LEAVEs out of finally-protected try's.
457 inline void Compiler::impEndTreeList(BasicBlock* block, GenTreePtr firstStmt, GenTreePtr lastStmt)
459 assert(firstStmt->gtOper == GT_STMT);
460 assert(lastStmt->gtOper == GT_STMT);
462 /* Make the list circular, so that we can easily walk it backwards */
464 firstStmt->gtPrev = lastStmt;
466 /* Store the tree list in the basic block */
468 block->bbTreeList = firstStmt;
470 /* The block should not already be marked as imported */
471 assert((block->bbFlags & BBF_IMPORTED) == 0);
473 block->bbFlags |= BBF_IMPORTED;
476 /*****************************************************************************
478 * Store the current tree list in the given basic block.
481 inline void Compiler::impEndTreeList(BasicBlock* block)
483 assert(impTreeList->gtOper == GT_BEG_STMTS);
485 GenTreePtr firstTree = impTreeList->gtNext;
489 /* The block should not already be marked as imported */
490 assert((block->bbFlags & BBF_IMPORTED) == 0);
492 // Empty block. Just mark it as imported
493 block->bbFlags |= BBF_IMPORTED;
497 // Ignore the GT_BEG_STMTS
498 assert(firstTree->gtPrev == impTreeList);
500 impEndTreeList(block, firstTree, impTreeLast);
504 if (impLastILoffsStmt != nullptr)
506 impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
507 impLastILoffsStmt = nullptr;
510 impTreeList = impTreeLast = nullptr;
514 /*****************************************************************************
516 * Check that storing the given tree doesnt mess up the semantic order. Note
517 * that this has only limited value as we can only check [0..chkLevel).
520 inline void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel)
525 assert(stmt->gtOper == GT_STMT);
527 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
529 chkLevel = verCurrentState.esStackDepth;
532 if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
537 GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
539 // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
541 if (tree->gtFlags & GTF_CALL)
543 for (unsigned level = 0; level < chkLevel; level++)
545 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
549 if (tree->gtOper == GT_ASG)
551 // For an assignment to a local variable, all references of that
552 // variable have to be spilled. If it is aliased, all calls and
553 // indirect accesses have to be spilled
555 if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
557 unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
558 for (unsigned level = 0; level < chkLevel; level++)
560 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
561 assert(!lvaTable[lclNum].lvAddrExposed ||
562 (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
566 // If the access may be to global memory, all side effects have to be spilled.
568 else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
570 for (unsigned level = 0; level < chkLevel; level++)
572 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
579 /*****************************************************************************
581 * Append the given GT_STMT node to the current block's tree list.
582 * [0..chkLevel) is the portion of the stack which we will check for
583 * interference with stmt and spill if needed.
586 inline void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
588 assert(stmt->gtOper == GT_STMT);
589 noway_assert(impTreeLast != nullptr);
591 /* If the statement being appended has any side-effects, check the stack
592 to see if anything needs to be spilled to preserve correct ordering. */
594 GenTreePtr expr = stmt->gtStmt.gtStmtExpr;
595 unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
597 // Assignment to (unaliased) locals don't count as a side-effect as
598 // we handle them specially using impSpillLclRefs(). Temp locals should
601 if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
602 !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
604 unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
605 assert(flags == (op2Flags | GTF_ASG));
609 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
611 chkLevel = verCurrentState.esStackDepth;
614 if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
616 assert(chkLevel <= verCurrentState.esStackDepth);
620 // If there is a call, we have to spill global refs
621 bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
623 if (expr->gtOper == GT_ASG)
625 GenTree* lhs = expr->gtGetOp1();
626 // If we are assigning to a global ref, we have to spill global refs on stack.
627 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
628 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
629 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
630 if (!expr->OperIsBlkOp())
632 // If we are assigning to a global ref, we have to spill global refs on stack
633 if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
635 spillGlobEffects = true;
638 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
639 ((lhs->OperGet() == GT_LCL_VAR) &&
640 (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
642 spillGlobEffects = true;
646 impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
650 impSpillSpecialSideEff();
654 impAppendStmtCheck(stmt, chkLevel);
656 /* Point 'prev' at the previous node, so that we can walk backwards */
658 stmt->gtPrev = impTreeLast;
660 /* Append the expression statement to the list */
662 impTreeLast->gtNext = stmt;
666 impMarkContiguousSIMDFieldAssignments(stmt);
669 /* Once we set impCurStmtOffs in an appended tree, we are ready to
670 report the following offsets. So reset impCurStmtOffs */
672 if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
674 impCurStmtOffsSet(BAD_IL_OFFSET);
678 if (impLastILoffsStmt == nullptr)
680 impLastILoffsStmt = stmt;
691 /*****************************************************************************
693 * Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
696 inline void Compiler::impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore)
698 assert(stmt->gtOper == GT_STMT);
699 assert(stmtBefore->gtOper == GT_STMT);
701 GenTreePtr stmtPrev = stmtBefore->gtPrev;
702 stmt->gtPrev = stmtPrev;
703 stmt->gtNext = stmtBefore;
704 stmtPrev->gtNext = stmt;
705 stmtBefore->gtPrev = stmt;
708 /*****************************************************************************
710 * Append the given expression tree to the current block's tree list.
711 * Return the newly created statement.
714 GenTreePtr Compiler::impAppendTree(GenTreePtr tree, unsigned chkLevel, IL_OFFSETX offset)
718 /* Allocate an 'expression statement' node */
720 GenTreePtr expr = gtNewStmt(tree, offset);
722 /* Append the statement to the current block's stmt list */
724 impAppendStmt(expr, chkLevel);
729 /*****************************************************************************
731 * Insert the given exression tree before GT_STMT "stmtBefore"
734 void Compiler::impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore)
736 assert(stmtBefore->gtOper == GT_STMT);
738 /* Allocate an 'expression statement' node */
740 GenTreePtr expr = gtNewStmt(tree, offset);
742 /* Append the statement to the current block's stmt list */
744 impInsertStmtBefore(expr, stmtBefore);
747 /*****************************************************************************
749 * Append an assignment of the given value to a temp to the current tree list.
750 * curLevel is the stack level for which the spill to the temp is being done.
753 void Compiler::impAssignTempGen(unsigned tmp,
756 GenTreePtr* pAfterStmt, /* = NULL */
757 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
758 BasicBlock* block /* = NULL */
761 GenTreePtr asg = gtNewTempAssign(tmp, val);
763 if (!asg->IsNothingNode())
767 GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
768 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
772 impAppendTree(asg, curLevel, impCurStmtOffs);
777 /*****************************************************************************
778 * same as above, but handle the valueclass case too
781 void Compiler::impAssignTempGen(unsigned tmpNum,
783 CORINFO_CLASS_HANDLE structType,
785 GenTreePtr* pAfterStmt, /* = NULL */
786 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
787 BasicBlock* block /* = NULL */
792 if (varTypeIsStruct(val))
794 assert(tmpNum < lvaCount);
795 assert(structType != NO_CLASS_HANDLE);
797 // if the method is non-verifiable the assert is not true
798 // so at least ignore it in the case when verification is turned on
799 // since any block that tries to use the temp would have failed verification.
800 var_types varType = lvaTable[tmpNum].lvType;
801 assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
802 lvaSetStruct(tmpNum, structType, false);
804 // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
805 // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
806 // that has been passed in for the value being assigned to the temp, in which case we
807 // need to set 'val' to that same type.
808 // Note also that if we always normalized the types of any node that might be a struct
809 // type, this would not be necessary - but that requires additional JIT/EE interface
810 // calls that may not actually be required - e.g. if we only access a field of a struct.
812 val->gtType = lvaTable[tmpNum].lvType;
814 GenTreePtr dst = gtNewLclvNode(tmpNum, val->gtType);
815 asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
819 asg = gtNewTempAssign(tmpNum, val);
822 if (!asg->IsNothingNode())
826 GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
827 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
831 impAppendTree(asg, curLevel, impCurStmtOffs);
836 /*****************************************************************************
838 * Pop the given number of values from the stack and return a list node with
840 * The 'prefixTree' argument may optionally contain an argument
841 * list that is prepended to the list returned from this function.
843 * The notion of prepended is a bit misleading in that the list is backwards
844 * from the way I would expect: The first element popped is at the end of
845 * the returned list, and prefixTree is 'before' that, meaning closer to
846 * the end of the list. To get to prefixTree, you have to walk to the
849 * For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
850 * such we reverse its meaning such that returnValue has a reversed
851 * prefixTree at the head of the list.
854 GenTreeArgList* Compiler::impPopList(unsigned count,
856 CORINFO_SIG_INFO* sig,
857 GenTreeArgList* prefixTree)
859 assert(sig == nullptr || count == sig->numArgs);
862 CORINFO_CLASS_HANDLE structType;
863 GenTreeArgList* treeList;
865 if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
871 treeList = prefixTree;
876 StackEntry se = impPopStack();
877 typeInfo ti = se.seTypeInfo;
878 GenTreePtr temp = se.val;
880 if (varTypeIsStruct(temp))
882 // Morph trees that aren't already OBJs or MKREFANY to be OBJs
883 assert(ti.IsType(TI_STRUCT));
884 structType = ti.GetClassHandleForValueClass();
885 temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
888 /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
889 flags |= temp->gtFlags;
890 treeList = gtNewListNode(temp, treeList);
897 if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
898 sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
900 // Make sure that all valuetypes (including enums) that we push are loaded.
901 // This is to guarantee that if a GC is triggerred from the prestub of this methods,
902 // all valuetypes in the method signature are already loaded.
903 // We need to be able to find the size of the valuetypes, but we cannot
904 // do a class-load from within GC.
905 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
908 CORINFO_ARG_LIST_HANDLE argLst = sig->args;
909 CORINFO_CLASS_HANDLE argClass;
910 CORINFO_CLASS_HANDLE argRealClass;
911 GenTreeArgList* args;
914 for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
916 PREFIX_ASSUME(args != nullptr);
918 CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
920 // insert implied casts (from float to double or double to float)
922 if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
924 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
926 else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
928 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
931 // insert any widening or narrowing casts for backwards compatibility
933 args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
935 if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
936 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
938 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
939 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
941 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
943 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
945 args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
948 // Make sure that all valuetypes (including enums) that we push are loaded.
949 // This is to guarantee that if a GC is triggered from the prestub of this methods,
950 // all valuetypes in the method signature are already loaded.
951 // We need to be able to find the size of the valuetypes, but we cannot
952 // do a class-load from within GC.
953 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
956 argLst = info.compCompHnd->getArgNext(argLst);
960 if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
962 // Prepend the prefixTree
964 // Simple in-place reversal to place treeList
965 // at the end of a reversed prefixTree
966 while (prefixTree != nullptr)
968 GenTreeArgList* next = prefixTree->Rest();
969 prefixTree->Rest() = treeList;
970 treeList = prefixTree;
977 /*****************************************************************************
979 * Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
980 * The first "skipReverseCount" items are not reversed.
983 GenTreeArgList* Compiler::impPopRevList(unsigned count,
985 CORINFO_SIG_INFO* sig,
986 unsigned skipReverseCount)
989 assert(skipReverseCount <= count);
991 GenTreeArgList* list = impPopList(count, flagsPtr, sig);
994 if (list == nullptr || skipReverseCount == count)
999 GenTreeArgList* ptr = nullptr; // Initialized to the first node that needs to be reversed
1000 GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
1002 if (skipReverseCount == 0)
1008 lastSkipNode = list;
1009 // Get to the first node that needs to be reversed
1010 for (unsigned i = 0; i < skipReverseCount - 1; i++)
1012 lastSkipNode = lastSkipNode->Rest();
1015 PREFIX_ASSUME(lastSkipNode != nullptr);
1016 ptr = lastSkipNode->Rest();
1019 GenTreeArgList* reversedList = nullptr;
1023 GenTreeArgList* tmp = ptr->Rest();
1024 ptr->Rest() = reversedList;
1027 } while (ptr != nullptr);
1029 if (skipReverseCount)
1031 lastSkipNode->Rest() = reversedList;
1036 return reversedList;
1040 /*****************************************************************************
1041 Assign (copy) the structure from 'src' to 'dest'. The structure is a value
1042 class of type 'clsHnd'. It returns the tree that should be appended to the
1043 statement list that represents the assignment.
1044 Temp assignments may be appended to impTreeList if spilling is necessary.
1045 curLevel is the stack level for which a spill may be being done.
1048 GenTreePtr Compiler::impAssignStruct(GenTreePtr dest,
1050 CORINFO_CLASS_HANDLE structHnd,
1052 GenTreePtr* pAfterStmt, /* = NULL */
1053 BasicBlock* block /* = NULL */
1056 assert(varTypeIsStruct(dest));
1058 while (dest->gtOper == GT_COMMA)
1060 assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1062 // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1065 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1069 impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1072 // set dest to the second thing
1073 dest = dest->gtOp.gtOp2;
1076 assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1077 dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1079 if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1080 src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1083 return gtNewNothingNode();
1086 // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1087 // or re-creating a Blk node if it is.
1088 GenTreePtr destAddr;
1090 if (dest->gtOper == GT_IND || dest->OperIsBlk())
1092 destAddr = dest->gtOp.gtOp1;
1096 destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1099 return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1102 /*****************************************************************************/
1104 GenTreePtr Compiler::impAssignStructPtr(GenTreePtr destAddr,
1106 CORINFO_CLASS_HANDLE structHnd,
1108 GenTreePtr* pAfterStmt, /* = NULL */
1109 BasicBlock* block /* = NULL */
1113 GenTreePtr dest = nullptr;
1114 unsigned destFlags = 0;
1116 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1117 assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1118 // TODO-ARM-BUG: Does ARM need this?
1119 // TODO-ARM64-BUG: Does ARM64 need this?
1120 assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1121 src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1122 src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1123 (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1124 #else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1125 assert(varTypeIsStruct(src));
1127 assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1128 src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1129 src->gtOper == GT_COMMA ||
1130 (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1131 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1132 if (destAddr->OperGet() == GT_ADDR)
1134 GenTree* destNode = destAddr->gtGetOp1();
1135 // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1136 // will be morphed, don't insert an OBJ(ADDR).
1137 if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1138 #ifndef LEGACY_BACKEND
1139 || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1140 #endif // !LEGACY_BACKEND
1145 destType = destNode->TypeGet();
1149 destType = src->TypeGet();
1152 var_types asgType = src->TypeGet();
1154 if (src->gtOper == GT_CALL)
1156 if (src->AsCall()->TreatAsHasRetBufArg(this))
1158 // Case of call returning a struct via hidden retbuf arg
1160 // insert the return value buffer into the argument list as first byref parameter
1161 src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1163 // now returns void, not a struct
1164 src->gtType = TYP_VOID;
1166 // return the morphed call node
1171 // Case of call returning a struct in one or more registers.
1173 var_types returnType = (var_types)src->gtCall.gtReturnType;
1175 // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1176 src->gtType = genActualType(returnType);
1178 // First we try to change this to "LclVar/LclFld = call"
1180 if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1182 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1183 // That is, the IR will be of the form lclVar = call for multi-reg return
1185 GenTreePtr lcl = destAddr->gtOp.gtOp1;
1186 if (src->AsCall()->HasMultiRegRetVal())
1188 // Mark the struct LclVar as used in a MultiReg return context
1189 // which currently makes it non promotable.
1190 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1191 // handle multireg returns.
1192 lcl->gtFlags |= GTF_DONT_CSE;
1193 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1195 else // The call result is not a multireg return
1197 // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1198 lcl->ChangeOper(GT_LCL_FLD);
1199 fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1202 lcl->gtType = src->gtType;
1203 asgType = src->gtType;
1206 #if defined(_TARGET_ARM_)
1207 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1208 // but that method has not been updadted to include ARM.
1209 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1210 lcl->gtFlags |= GTF_DONT_CSE;
1211 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1212 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1213 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1215 // Make the struct non promotable. The eightbytes could contain multiple fields.
1216 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1217 // handle multireg returns.
1218 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1219 // non-multireg returns.
1220 lcl->gtFlags |= GTF_DONT_CSE;
1221 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1224 else // we don't have a GT_ADDR of a GT_LCL_VAR
1226 // !!! The destination could be on stack. !!!
1227 // This flag will let us choose the correct write barrier.
1228 asgType = returnType;
1229 destFlags = GTF_IND_TGTANYWHERE;
1233 else if (src->gtOper == GT_RET_EXPR)
1235 GenTreePtr call = src->gtRetExpr.gtInlineCandidate;
1236 noway_assert(call->gtOper == GT_CALL);
1238 if (call->AsCall()->HasRetBufArg())
1240 // insert the return value buffer into the argument list as first byref parameter
1241 call->gtCall.gtCallArgs = gtNewListNode(destAddr, call->gtCall.gtCallArgs);
1243 // now returns void, not a struct
1244 src->gtType = TYP_VOID;
1245 call->gtType = TYP_VOID;
1247 // We already have appended the write to 'dest' GT_CALL's args
1248 // So now we just return an empty node (pruning the GT_RET_EXPR)
1253 // Case of inline method returning a struct in one or more registers.
1255 var_types returnType = (var_types)call->gtCall.gtReturnType;
1257 // We won't need a return buffer
1258 asgType = returnType;
1259 src->gtType = genActualType(returnType);
1260 call->gtType = src->gtType;
1262 // If we've changed the type, and it no longer matches a local destination,
1263 // we must use an indirection.
1264 if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1269 // !!! The destination could be on stack. !!!
1270 // This flag will let us choose the correct write barrier.
1271 destFlags = GTF_IND_TGTANYWHERE;
1274 else if (src->OperIsBlk())
1276 asgType = impNormStructType(structHnd);
1277 if (src->gtOper == GT_OBJ)
1279 assert(src->gtObj.gtClass == structHnd);
1282 else if (src->gtOper == GT_INDEX)
1284 asgType = impNormStructType(structHnd);
1285 assert(src->gtIndex.gtStructElemClass == structHnd);
1287 else if (src->gtOper == GT_MKREFANY)
1289 // Since we are assigning the result of a GT_MKREFANY,
1290 // "destAddr" must point to a refany.
1292 GenTreePtr destAddrClone;
1294 impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1296 assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1297 assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1298 GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1299 GenTreePtr ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1300 GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1301 typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1302 GenTreePtr typeSlot =
1303 gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1305 // append the assign of the pointer value
1306 GenTreePtr asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1309 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1313 impAppendTree(asg, curLevel, impCurStmtOffs);
1316 // return the assign of the type value, to be appended
1317 return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1319 else if (src->gtOper == GT_COMMA)
1321 // The second thing is the struct or its address.
1322 assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1325 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1329 impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1332 // Evaluate the second thing using recursion.
1333 return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1335 else if (src->IsLocal())
1337 asgType = src->TypeGet();
1339 else if (asgType == TYP_STRUCT)
1341 asgType = impNormStructType(structHnd);
1342 src->gtType = asgType;
1343 #ifdef LEGACY_BACKEND
1344 if (asgType == TYP_STRUCT)
1346 GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1347 src = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1351 if (dest == nullptr)
1353 // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1354 // if this is a known struct type.
1355 if (asgType == TYP_STRUCT)
1357 dest = gtNewObjNode(structHnd, destAddr);
1358 gtSetObjGcInfo(dest->AsObj());
1359 // Although an obj as a call argument was always assumed to be a globRef
1360 // (which is itself overly conservative), that is not true of the operands
1361 // of a block assignment.
1362 dest->gtFlags &= ~GTF_GLOB_REF;
1363 dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1365 else if (varTypeIsStruct(asgType))
1367 dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1371 dest = gtNewOperNode(GT_IND, asgType, destAddr);
1376 dest->gtType = asgType;
1379 dest->gtFlags |= destFlags;
1380 destFlags = dest->gtFlags;
1382 // return an assignment node, to be appended
1383 GenTree* asgNode = gtNewAssignNode(dest, src);
1384 gtBlockOpInit(asgNode, dest, src, false);
1386 // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1388 if ((destFlags & GTF_DONT_CSE) == 0)
1390 dest->gtFlags &= ~(GTF_DONT_CSE);
1395 /*****************************************************************************
1396 Given a struct value, and the class handle for that structure, return
1397 the expression for the address for that structure value.
1399 willDeref - does the caller guarantee to dereference the pointer.
1402 GenTreePtr Compiler::impGetStructAddr(GenTreePtr structVal,
1403 CORINFO_CLASS_HANDLE structHnd,
1407 assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1409 var_types type = structVal->TypeGet();
1411 genTreeOps oper = structVal->gtOper;
1413 if (oper == GT_OBJ && willDeref)
1415 assert(structVal->gtObj.gtClass == structHnd);
1416 return (structVal->gtObj.Addr());
1418 else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
1420 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1422 impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1424 // The 'return value' is now the temp itself
1426 type = genActualType(lvaTable[tmpNum].TypeGet());
1427 GenTreePtr temp = gtNewLclvNode(tmpNum, type);
1428 temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1431 else if (oper == GT_COMMA)
1433 assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1435 GenTreePtr oldTreeLast = impTreeLast;
1436 structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1437 structVal->gtType = TYP_BYREF;
1439 if (oldTreeLast != impTreeLast)
1441 // Some temp assignment statement was placed on the statement list
1442 // for Op2, but that would be out of order with op1, so we need to
1443 // spill op1 onto the statement list after whatever was last
1444 // before we recursed on Op2 (i.e. before whatever Op2 appended).
1445 impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1446 structVal->gtOp.gtOp1 = gtNewNothingNode();
1452 return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1455 //------------------------------------------------------------------------
1456 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1457 // and optionally determine the GC layout of the struct.
1460 // structHnd - The class handle for the struct type of interest.
1461 // gcLayout - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1462 // into which the gcLayout will be written.
1463 // pNumGCVars - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1464 // which will be set to the number of GC fields in the struct.
1465 // pSimdBaseType - (optional, default nullptr) - if non-null, and the struct is a SIMD
1466 // type, set to the SIMD base type
1469 // The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1470 // The gcLayout will be returned using the pointers provided by the caller, if non-null.
1471 // It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1474 // The caller must set gcLayout to nullptr OR ensure that it is large enough
1475 // (see ICorStaticInfo::getClassGClayout in corinfo.h).
1478 // Normalizing the type involves examining the struct type to determine if it should
1479 // be modified to one that is handled specially by the JIT, possibly being a candidate
1480 // for full enregistration, e.g. TYP_SIMD16.
1482 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1484 unsigned* pNumGCVars,
1485 var_types* pSimdBaseType)
1487 assert(structHnd != NO_CLASS_HANDLE);
1489 const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1490 var_types structType = TYP_STRUCT;
1492 // On coreclr the check for GC includes a "may" to account for the special
1493 // ByRef like span structs. The added check for "CONTAINS_STACK_PTR" is the particular bit.
1494 // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1496 const bool mayContainGCPtrs =
1497 ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1500 // Check to see if this is a SIMD type.
1501 if (featureSIMD && !mayContainGCPtrs)
1503 unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1505 if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1507 unsigned int sizeBytes;
1508 var_types simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1509 if (simdBaseType != TYP_UNKNOWN)
1511 assert(sizeBytes == originalSize);
1512 structType = getSIMDTypeForSize(sizeBytes);
1513 if (pSimdBaseType != nullptr)
1515 *pSimdBaseType = simdBaseType;
1517 #ifdef _TARGET_AMD64_
1518 // Amd64: also indicate that we use floating point registers
1519 compFloatingPointUsed = true;
1524 #endif // FEATURE_SIMD
1526 // Fetch GC layout info if requested
1527 if (gcLayout != nullptr)
1529 unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1531 // Verify that the quick test up above via the class attributes gave a
1532 // safe view of the type's GCness.
1534 // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1535 // does not report any gc fields.
1537 assert(mayContainGCPtrs || (numGCVars == 0));
1539 if (pNumGCVars != nullptr)
1541 *pNumGCVars = numGCVars;
1546 // Can't safely ask for number of GC pointers without also
1547 // asking for layout.
1548 assert(pNumGCVars == nullptr);
1554 //****************************************************************************
1555 // Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1556 // it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1558 GenTreePtr Compiler::impNormStructVal(GenTreePtr structVal,
1559 CORINFO_CLASS_HANDLE structHnd,
1561 bool forceNormalization /*=false*/)
1563 assert(forceNormalization || varTypeIsStruct(structVal));
1564 assert(structHnd != NO_CLASS_HANDLE);
1565 var_types structType = structVal->TypeGet();
1566 bool makeTemp = false;
1567 if (structType == TYP_STRUCT)
1569 structType = impNormStructType(structHnd);
1571 bool alreadyNormalized = false;
1572 GenTreeLclVarCommon* structLcl = nullptr;
1574 genTreeOps oper = structVal->OperGet();
1577 // GT_RETURN and GT_MKREFANY don't capture the handle.
1581 alreadyNormalized = true;
1585 structVal->gtCall.gtRetClsHnd = structHnd;
1590 structVal->gtRetExpr.gtRetClsHnd = structHnd;
1595 structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1599 // This will be transformed to an OBJ later.
1600 alreadyNormalized = true;
1601 structVal->gtIndex.gtStructElemClass = structHnd;
1602 structVal->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(structHnd);
1606 // Wrap it in a GT_OBJ.
1607 structVal->gtType = structType;
1608 structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1613 structLcl = structVal->AsLclVarCommon();
1614 // Wrap it in a GT_OBJ.
1615 structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1622 // These should already have the appropriate type.
1623 assert(structVal->gtType == structType);
1624 alreadyNormalized = true;
1628 assert(structVal->gtType == structType);
1629 structVal = gtNewObjNode(structHnd, structVal->gtGetOp1());
1630 alreadyNormalized = true;
1635 assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1637 #endif // FEATURE_SIMD
1641 // The second thing could either be a block node or a GT_SIMD or a GT_COMMA node.
1642 GenTree* blockNode = structVal->gtOp.gtOp2;
1643 assert(blockNode->gtType == structType);
1645 // Is this GT_COMMA(op1, GT_COMMA())?
1646 GenTree* parent = structVal;
1647 if (blockNode->OperGet() == GT_COMMA)
1649 // Find the last node in the comma chain.
1652 assert(blockNode->gtType == structType);
1654 blockNode = blockNode->gtOp.gtOp2;
1655 } while (blockNode->OperGet() == GT_COMMA);
1659 if (blockNode->OperGet() == GT_SIMD)
1661 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1662 alreadyNormalized = true;
1667 assert(blockNode->OperIsBlk());
1669 // Sink the GT_COMMA below the blockNode addr.
1670 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1671 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1673 // In case of a chained GT_COMMA case, we sink the last
1674 // GT_COMMA below the blockNode addr.
1675 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1676 assert(blockNodeAddr->gtType == TYP_BYREF);
1677 GenTree* commaNode = parent;
1678 commaNode->gtType = TYP_BYREF;
1679 commaNode->gtOp.gtOp2 = blockNodeAddr;
1680 blockNode->gtOp.gtOp1 = commaNode;
1681 if (parent == structVal)
1683 structVal = blockNode;
1685 alreadyNormalized = true;
1691 assert(!"Unexpected node in impNormStructVal()");
1694 structVal->gtType = structType;
1695 GenTree* structObj = structVal;
1697 if (!alreadyNormalized || forceNormalization)
1701 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1703 impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1705 // The structVal is now the temp itself
1707 structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1708 // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1709 structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1711 else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1713 // Wrap it in a GT_OBJ
1714 structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1718 if (structLcl != nullptr)
1720 // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1721 // so we don't set GTF_EXCEPT here.
1722 if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1724 structObj->gtFlags &= ~GTF_GLOB_REF;
1729 // In general a OBJ is an indirection and could raise an exception.
1730 structObj->gtFlags |= GTF_EXCEPT;
1735 /******************************************************************************/
1736 // Given a type token, generate code that will evaluate to the correct
1737 // handle representation of that token (type handle, field handle, or method handle)
1739 // For most cases, the handle is determined at compile-time, and the code
1740 // generated is simply an embedded handle.
1742 // Run-time lookup is required if the enclosing method is shared between instantiations
1743 // and the token refers to formal type parameters whose instantiation is not known
1746 GenTreePtr Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1747 BOOL* pRuntimeLookup /* = NULL */,
1748 BOOL mustRestoreHandle /* = FALSE */,
1749 BOOL importParent /* = FALSE */)
1751 assert(!fgGlobalMorph);
1753 CORINFO_GENERICHANDLE_RESULT embedInfo;
1754 info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1758 *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1761 if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1763 switch (embedInfo.handleType)
1765 case CORINFO_HANDLETYPE_CLASS:
1766 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1769 case CORINFO_HANDLETYPE_METHOD:
1770 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1773 case CORINFO_HANDLETYPE_FIELD:
1774 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1775 info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1783 return impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1784 embedInfo.compileTimeHandle);
1787 GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1788 CORINFO_LOOKUP* pLookup,
1789 unsigned handleFlags,
1790 void* compileTimeHandle)
1792 if (!pLookup->lookupKind.needsRuntimeLookup)
1794 // No runtime lookup is required.
1795 // Access is direct or memory-indirect (of a fixed address) reference
1797 CORINFO_GENERIC_HANDLE handle = nullptr;
1798 void* pIndirection = nullptr;
1799 assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1801 if (pLookup->constLookup.accessType == IAT_VALUE)
1803 handle = pLookup->constLookup.handle;
1805 else if (pLookup->constLookup.accessType == IAT_PVALUE)
1807 pIndirection = pLookup->constLookup.addr;
1809 return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1811 else if (compIsForInlining())
1813 // Don't import runtime lookups when inlining
1814 // Inlining has to be aborted in such a case
1815 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1820 // Need to use dictionary-based access which depends on the typeContext
1821 // which is only available at runtime, not at compile-time.
1823 return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1827 #ifdef FEATURE_READYTORUN_COMPILER
1828 GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1829 unsigned handleFlags,
1830 void* compileTimeHandle)
1832 CORINFO_GENERIC_HANDLE handle = nullptr;
1833 void* pIndirection = nullptr;
1834 assert(pLookup->accessType != IAT_PPVALUE);
1836 if (pLookup->accessType == IAT_VALUE)
1838 handle = pLookup->handle;
1840 else if (pLookup->accessType == IAT_PVALUE)
1842 pIndirection = pLookup->addr;
1844 return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1847 GenTreePtr Compiler::impReadyToRunHelperToTree(
1848 CORINFO_RESOLVED_TOKEN* pResolvedToken,
1849 CorInfoHelpFunc helper,
1851 GenTreeArgList* args /* =NULL*/,
1852 CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */)
1854 CORINFO_CONST_LOOKUP lookup;
1855 #if COR_JIT_EE_VERSION > 460
1856 if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1861 info.compCompHnd->getReadyToRunHelper(pResolvedToken, helper, &lookup);
1864 GenTreePtr op1 = gtNewHelperCallNode(helper, type, GTF_EXCEPT, args);
1866 op1->gtCall.setEntryPoint(lookup);
1872 GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1874 GenTreePtr op1 = nullptr;
1876 switch (pCallInfo->kind)
1879 op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1881 #ifdef FEATURE_READYTORUN_COMPILER
1882 if (opts.IsReadyToRun())
1884 op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
1885 op1->gtFptrVal.gtLdftnResolvedToken = new (this, CMK_Unknown) CORINFO_RESOLVED_TOKEN;
1886 *op1->gtFptrVal.gtLdftnResolvedToken = *pResolvedToken;
1890 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1895 case CORINFO_CALL_CODE_POINTER:
1896 if (compIsForInlining())
1898 // Don't import runtime lookups when inlining
1899 // Inlining has to be aborted in such a case
1900 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1904 op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1908 noway_assert(!"unknown call kind");
1915 //------------------------------------------------------------------------
1916 // getRuntimeContextTree: find pointer to context for runtime lookup.
1919 // kind - lookup kind.
1922 // Return GenTree pointer to generic shared context.
1925 // Reports about generic context using.
1927 GenTreePtr Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1929 GenTreePtr ctxTree = nullptr;
1931 // Collectible types requires that for shared generic code, if we use the generic context parameter
1932 // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1933 // context parameter is this that we don't need the eager reporting logic.)
1934 lvaGenericsContextUsed = true;
1936 if (kind == CORINFO_LOOKUP_THISOBJ)
1939 ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1941 // Vtable pointer of this object
1942 ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1943 ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1944 ctxTree->gtFlags |= GTF_IND_INVARIANT;
1948 assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1950 ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1955 /*****************************************************************************/
1956 /* Import a dictionary lookup to access a handle in code shared between
1957 generic instantiations.
1958 The lookup depends on the typeContext which is only available at
1959 runtime, and not at compile-time.
1960 pLookup->token1 and pLookup->token2 specify the handle that is needed.
1963 1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1964 instantiation-specific handle, and the tokens to lookup the handle.
1965 2. pLookup->indirections != CORINFO_USEHELPER :
1966 2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1968 2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1969 If it is non-NULL, it is the handle required. Else, call a helper
1970 to lookup the handle.
1973 GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1974 CORINFO_LOOKUP* pLookup,
1975 void* compileTimeHandle)
1978 // This method can only be called from the importer instance of the Compiler.
1979 // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1980 assert(!compIsForInlining());
1982 GenTreePtr ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1984 #ifdef FEATURE_READYTORUN_COMPILER
1985 if (opts.IsReadyToRun())
1987 return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1988 gtNewArgList(ctxTree), &pLookup->lookupKind);
1992 CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1993 // It's available only via the run-time helper function
1994 if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1996 GenTreeArgList* helperArgs =
1997 gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0,
1998 nullptr, compileTimeHandle));
2000 return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2004 GenTreePtr slotPtrTree = ctxTree;
2006 if (pRuntimeLookup->testForNull)
2008 slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2009 nullptr DEBUGARG("impRuntimeLookup slot"));
2012 // Applied repeated indirections
2013 for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
2017 slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2018 slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2019 slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
2021 if (pRuntimeLookup->offsets[i] != 0)
2024 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
2028 // No null test required
2029 if (!pRuntimeLookup->testForNull)
2031 if (pRuntimeLookup->indirections == 0)
2036 slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2037 slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2039 if (!pRuntimeLookup->testForFixup)
2044 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2046 GenTreePtr op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2047 nullptr DEBUGARG("impRuntimeLookup test"));
2048 op1 = impImplicitIorI4Cast(op1, TYP_INT); // downcast the pointer to a TYP_INT on 64-bit targets
2050 // Use a GT_AND to check for the lowest bit and indirect if it is set
2051 GenTreePtr testTree = gtNewOperNode(GT_AND, TYP_INT, op1, gtNewIconNode(1));
2052 GenTreePtr relop = gtNewOperNode(GT_EQ, TYP_INT, testTree, gtNewIconNode(0));
2053 relop->gtFlags |= GTF_RELOP_QMARK;
2055 op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2056 nullptr DEBUGARG("impRuntimeLookup indir"));
2057 op1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, op1, gtNewIconNode(-1, TYP_I_IMPL)); // subtract 1 from the pointer
2058 GenTreePtr indirTree = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
2059 GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, slotPtrTree, indirTree);
2061 GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2063 unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark0"));
2064 impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2065 return gtNewLclvNode(tmp, TYP_I_IMPL);
2068 assert(pRuntimeLookup->indirections != 0);
2070 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2072 // Extract the handle
2073 GenTreePtr handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2074 handle->gtFlags |= GTF_IND_NONFAULTING;
2076 GenTreePtr handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2077 nullptr DEBUGARG("impRuntimeLookup typehandle"));
2080 GenTreeArgList* helperArgs =
2081 gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0, nullptr,
2082 compileTimeHandle));
2083 GenTreePtr helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2085 // Check for null and possibly call helper
2086 GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2087 relop->gtFlags |= GTF_RELOP_QMARK;
2089 GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2090 gtNewNothingNode(), // do nothing if nonnull
2093 GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2096 if (handleCopy->IsLocal())
2098 tmp = handleCopy->gtLclVarCommon.gtLclNum;
2102 tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2105 impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2106 return gtNewLclvNode(tmp, TYP_I_IMPL);
2109 /******************************************************************************
2110 * Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2111 * If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2112 * else, grab a new temp.
2113 * For structs (which can be pushed on the stack using obj, etc),
2114 * special handling is needed
2117 struct RecursiveGuard
2122 m_pAddress = nullptr;
2129 *m_pAddress = false;
2133 void Init(bool* pAddress, bool bInitialize)
2135 assert(pAddress && *pAddress == false && "Recursive guard violation");
2136 m_pAddress = pAddress;
2148 bool Compiler::impSpillStackEntry(unsigned level,
2152 bool bAssertOnRecursion,
2159 RecursiveGuard guard;
2160 guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2163 GenTreePtr tree = verCurrentState.esStack[level].val;
2165 /* Allocate a temp if we haven't been asked to use a particular one */
2167 if (tiVerificationNeeded)
2169 // Ignore bad temp requests (they will happen with bad code and will be
2170 // catched when importing the destblock)
2171 if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2178 if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2184 if (tnum == BAD_VAR_NUM)
2186 tnum = lvaGrabTemp(true DEBUGARG(reason));
2188 else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2190 // if verification is needed and tnum's type is incompatible with
2191 // type on that stack, we grab a new temp. This is safe since
2192 // we will throw a verification exception in the dest block.
2194 var_types valTyp = tree->TypeGet();
2195 var_types dstTyp = lvaTable[tnum].TypeGet();
2197 // if the two types are different, we return. This will only happen with bad code and will
2198 // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2199 if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2201 #ifndef _TARGET_64BIT_
2202 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2203 #endif // !_TARGET_64BIT_
2204 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2206 if (verNeedsVerification())
2213 /* Assign the spilled entry to the temp */
2214 impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2216 // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2217 var_types type = genActualType(lvaTable[tnum].TypeGet());
2218 GenTreePtr temp = gtNewLclvNode(tnum, type);
2219 verCurrentState.esStack[level].val = temp;
2224 /*****************************************************************************
2226 * Ensure that the stack has only spilled values
2229 void Compiler::impSpillStackEnsure(bool spillLeaves)
2231 assert(!spillLeaves || opts.compDbgCode);
2233 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2235 GenTreePtr tree = verCurrentState.esStack[level].val;
2237 if (!spillLeaves && tree->OperIsLeaf())
2242 // Temps introduced by the importer itself don't need to be spilled
2244 bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2251 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2255 void Compiler::impSpillEvalStack()
2257 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2259 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2263 /*****************************************************************************
2265 * If the stack contains any trees with side effects in them, assign those
2266 * trees to temps and append the assignments to the statement list.
2267 * On return the stack is guaranteed to be empty.
2270 inline void Compiler::impEvalSideEffects()
2272 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2273 verCurrentState.esStackDepth = 0;
2276 /*****************************************************************************
2278 * If the stack contains any trees with side effects in them, assign those
2279 * trees to temps and replace them on the stack with refs to their temps.
2280 * [0..chkLevel) is the portion of the stack which will be checked and spilled.
2283 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2285 assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2287 /* Before we make any appends to the tree list we must spill the
2288 * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2290 impSpillSpecialSideEff();
2292 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2294 chkLevel = verCurrentState.esStackDepth;
2297 assert(chkLevel <= verCurrentState.esStackDepth);
2299 unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2301 for (unsigned i = 0; i < chkLevel; i++)
2303 GenTreePtr tree = verCurrentState.esStack[i].val;
2305 GenTreePtr lclVarTree;
2307 if ((tree->gtFlags & spillFlags) != 0 ||
2308 (spillGlobEffects && // Only consider the following when spillGlobEffects == TRUE
2309 !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2310 gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2311 // lvAddrTaken flag.
2313 impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2318 /*****************************************************************************
2320 * If the stack contains any trees with special side effects in them, assign
2321 * those trees to temps and replace them on the stack with refs to their temps.
2324 inline void Compiler::impSpillSpecialSideEff()
2326 // Only exception objects need to be carefully handled
2328 if (!compCurBB->bbCatchTyp)
2333 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2335 GenTreePtr tree = verCurrentState.esStack[level].val;
2336 // Make sure if we have an exception object in the sub tree we spill ourselves.
2337 if (gtHasCatchArg(tree))
2339 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2344 /*****************************************************************************
2346 * Spill all stack references to value classes (TYP_STRUCT nodes)
2349 void Compiler::impSpillValueClasses()
2351 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2353 GenTreePtr tree = verCurrentState.esStack[level].val;
2355 if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2357 // Tree walk was aborted, which means that we found a
2358 // value class on the stack. Need to spill that
2361 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2366 /*****************************************************************************
2368 * Callback that checks if a tree node is TYP_STRUCT
2371 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr* pTree, fgWalkData* data)
2373 fgWalkResult walkResult = WALK_CONTINUE;
2375 if ((*pTree)->gtType == TYP_STRUCT)
2377 // Abort the walk and indicate that we found a value class
2379 walkResult = WALK_ABORT;
2385 /*****************************************************************************
2387 * If the stack contains any trees with references to local #lclNum, assign
2388 * those trees to temps and replace their place on the stack with refs to
2392 void Compiler::impSpillLclRefs(ssize_t lclNum)
2394 /* Before we make any appends to the tree list we must spill the
2395 * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2397 impSpillSpecialSideEff();
2399 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2401 GenTreePtr tree = verCurrentState.esStack[level].val;
2403 /* If the tree may throw an exception, and the block has a handler,
2404 then we need to spill assignments to the local if the local is
2405 live on entry to the handler.
2406 Just spill 'em all without considering the liveness */
2408 bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2410 /* Skip the tree if it doesn't have an affected reference,
2411 unless xcptnCaught */
2413 if (xcptnCaught || gtHasRef(tree, lclNum, false))
2415 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2420 /*****************************************************************************
2422 * Push catch arg onto the stack.
2423 * If there are jumps to the beginning of the handler, insert basic block
2424 * and spill catch arg to a temp. Update the handler block if necessary.
2426 * Returns the basic block of the actual handler.
2429 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd)
2431 // Do not inject the basic block twice on reimport. This should be
2432 // hit only under JIT stress. See if the block is the one we injected.
2433 // Note that EH canonicalization can inject internal blocks here. We might
2434 // be able to re-use such a block (but we don't, right now).
2435 if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2436 (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2438 GenTreePtr tree = hndBlk->bbTreeList;
2440 if (tree != nullptr && tree->gtOper == GT_STMT)
2442 tree = tree->gtStmt.gtStmtExpr;
2443 assert(tree != nullptr);
2445 if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2446 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2448 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2450 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2452 return hndBlk->bbNext;
2456 // If we get here, it must have been some other kind of internal block. It's possible that
2457 // someone prepended something to our injected block, but that's unlikely.
2460 /* Push the exception address value on the stack */
2461 GenTreePtr arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2463 /* Mark the node as having a side-effect - i.e. cannot be
2464 * moved around since it is tied to a fixed location (EAX) */
2465 arg->gtFlags |= GTF_ORDER_SIDEEFF;
2467 /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2468 if (hndBlk->bbRefs > 1 || compStressCompile(STRESS_CATCH_ARG, 5))
2470 if (hndBlk->bbRefs == 1)
2475 /* Create extra basic block for the spill */
2476 BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2477 newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2478 newBlk->setBBWeight(hndBlk->bbWeight);
2479 newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2481 /* Account for the new link we are about to create */
2484 /* Spill into a temp */
2485 unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2486 lvaTable[tempNum].lvType = TYP_REF;
2487 arg = gtNewTempAssign(tempNum, arg);
2489 hndBlk->bbStkTempsIn = tempNum;
2491 /* Report the debug info. impImportBlockCode won't treat
2492 * the actual handler as exception block and thus won't do it for us. */
2493 if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2495 impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2496 arg = gtNewStmt(arg, impCurStmtOffs);
2499 fgInsertStmtAtEnd(newBlk, arg);
2501 arg = gtNewLclvNode(tempNum, TYP_REF);
2504 impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2509 /*****************************************************************************
2511 * Given a tree, clone it. *pClone is set to the cloned tree.
2512 * Returns the original tree if the cloning was easy,
2513 * else returns the temp to which the tree had to be spilled to.
2514 * If the tree has side-effects, it will be spilled to a temp.
2517 GenTreePtr Compiler::impCloneExpr(GenTreePtr tree,
2519 CORINFO_CLASS_HANDLE structHnd,
2521 GenTreePtr* pAfterStmt DEBUGARG(const char* reason))
2523 if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2525 GenTreePtr clone = gtClone(tree, true);
2534 /* Store the operand in a temp and return the temp */
2536 unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2538 // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2539 // return a struct type. It also may modify the struct type to a more
2540 // specialized type (e.g. a SIMD type). So we will get the type from
2541 // the lclVar AFTER calling impAssignTempGen().
2543 impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2544 var_types type = genActualType(lvaTable[temp].TypeGet());
2546 *pClone = gtNewLclvNode(temp, type);
2547 return gtNewLclvNode(temp, type);
2550 /*****************************************************************************
2551 * Remember the IL offset (including stack-empty info) for the trees we will
2555 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2557 if (compIsForInlining())
2559 GenTreePtr callStmt = impInlineInfo->iciStmt;
2560 assert(callStmt->gtOper == GT_STMT);
2561 impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2565 assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2566 IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2567 impCurStmtOffs = offs | stkBit;
2571 /*****************************************************************************
2572 * Returns current IL offset with stack-empty and call-instruction info incorporated
2574 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2576 if (compIsForInlining())
2578 return BAD_IL_OFFSET;
2582 assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2583 IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2584 IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2585 return offs | stkBit | callInstructionBit;
2589 /*****************************************************************************
2591 * Remember the instr offset for the statements
2593 * When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2594 * impCurOpcOffs, if the append was done because of a partial stack spill,
2595 * as some of the trees corresponding to code up to impCurOpcOffs might
2596 * still be sitting on the stack.
2597 * So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2598 * This should be called when an opcode finally/explicitly causes
2599 * impAppendTree(tree) to be called (as opposed to being called because of
2600 * a spill caused by the opcode)
2605 void Compiler::impNoteLastILoffs()
2607 if (impLastILoffsStmt == nullptr)
2609 // We should have added a statement for the current basic block
2610 // Is this assert correct ?
2612 assert(impTreeLast);
2613 assert(impTreeLast->gtOper == GT_STMT);
2615 impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2619 impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2620 impLastILoffsStmt = nullptr;
2626 /*****************************************************************************
2627 * We don't create any GenTree (excluding spills) for a branch.
2628 * For debugging info, we need a placeholder so that we can note
2629 * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2632 void Compiler::impNoteBranchOffs()
2634 if (opts.compDbgCode)
2636 impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2640 /*****************************************************************************
2641 * Locate the next stmt boundary for which we need to record info.
2642 * We will have to spill the stack at such boundaries if it is not
2644 * Returns the next stmt boundary (after the start of the block)
2647 unsigned Compiler::impInitBlockLineInfo()
2649 /* Assume the block does not correspond with any IL offset. This prevents
2650 us from reporting extra offsets. Extra mappings can cause confusing
2651 stepping, especially if the extra mapping is a jump-target, and the
2652 debugger does not ignore extra mappings, but instead rewinds to the
2653 nearest known offset */
2655 impCurStmtOffsSet(BAD_IL_OFFSET);
2657 if (compIsForInlining())
2662 IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2664 if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2666 impCurStmtOffsSet(blockOffs);
2669 if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2671 impCurStmtOffsSet(blockOffs);
2674 /* Always report IL offset 0 or some tests get confused.
2675 Probably a good idea anyways */
2679 impCurStmtOffsSet(blockOffs);
2682 if (!info.compStmtOffsetsCount)
2687 /* Find the lowest explicit stmt boundary within the block */
2689 /* Start looking at an entry that is based on our instr offset */
2691 unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2693 if (index >= info.compStmtOffsetsCount)
2695 index = info.compStmtOffsetsCount - 1;
2698 /* If we've guessed too far, back up */
2700 while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2705 /* If we guessed short, advance ahead */
2707 while (info.compStmtOffsets[index] < blockOffs)
2711 if (index == info.compStmtOffsetsCount)
2713 return info.compStmtOffsetsCount;
2717 assert(index < info.compStmtOffsetsCount);
2719 if (info.compStmtOffsets[index] == blockOffs)
2721 /* There is an explicit boundary for the start of this basic block.
2722 So we will start with bbCodeOffs. Else we will wait until we
2723 get to the next explicit boundary */
2725 impCurStmtOffsSet(blockOffs);
2733 /*****************************************************************************/
2735 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2749 /*****************************************************************************/
2751 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2768 /*****************************************************************************/
2770 // One might think it is worth caching these values, but results indicate
2772 // In addition, caching them causes SuperPMI to be unable to completely
2773 // encapsulate an individual method context.
2774 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2776 CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2777 assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2781 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2783 CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2784 assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2785 return typeHandleClass;
2788 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2790 CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2791 assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2792 return argIteratorClass;
2795 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2797 CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2798 assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2802 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2804 CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2805 assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2809 /*****************************************************************************
2810 * "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2811 * set its type to TYP_BYREF when we create it. We know if it can be
2812 * changed to TYP_I_IMPL only at the point where we use it
2816 void Compiler::impBashVarAddrsToI(GenTreePtr tree1, GenTreePtr tree2)
2818 if (tree1->IsVarAddr())
2820 tree1->gtType = TYP_I_IMPL;
2823 if (tree2 && tree2->IsVarAddr())
2825 tree2->gtType = TYP_I_IMPL;
2829 /*****************************************************************************
2830 * TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2831 * to make that an explicit cast in our trees, so any implicit casts that
2832 * exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2833 * turned into explicit casts here.
2834 * We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2837 GenTreePtr Compiler::impImplicitIorI4Cast(GenTreePtr tree, var_types dstTyp)
2839 var_types currType = genActualType(tree->gtType);
2840 var_types wantedType = genActualType(dstTyp);
2842 if (wantedType != currType)
2844 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2845 if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2847 if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2849 tree->gtType = TYP_I_IMPL;
2852 #ifdef _TARGET_64BIT_
2853 else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2855 // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2856 tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
2858 else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2860 // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2861 tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
2863 #endif // _TARGET_64BIT_
2869 /*****************************************************************************
2870 * TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2871 * but we want to make that an explicit cast in our trees, so any implicit casts
2872 * that exist in the IL are turned into explicit casts here.
2875 GenTreePtr Compiler::impImplicitR4orR8Cast(GenTreePtr tree, var_types dstTyp)
2877 #ifndef LEGACY_BACKEND
2878 if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2880 tree = gtNewCastNode(dstTyp, tree, dstTyp);
2882 #endif // !LEGACY_BACKEND
2887 //------------------------------------------------------------------------
2888 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2889 // with a GT_COPYBLK node.
2892 // sig - The InitializeArray signature.
2895 // A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2896 // nullptr otherwise.
2899 // The function recognizes the following IL pattern:
2900 // ldc <length> or a list of ldc <lower bound>/<length>
2903 // ldtoken <field handle>
2904 // call InitializeArray
2905 // The lower bounds need not be constant except when the array rank is 1.
2906 // The function recognizes all kinds of arrays thus enabling a small runtime
2907 // such as CoreRT to skip providing an implementation for InitializeArray.
2909 GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2911 assert(sig->numArgs == 2);
2913 GenTreePtr fieldTokenNode = impStackTop(0).val;
2914 GenTreePtr arrayLocalNode = impStackTop(1).val;
2917 // Verify that the field token is known and valid. Note that It's also
2918 // possible for the token to come from reflection, in which case we cannot do
2919 // the optimization and must therefore revert to calling the helper. You can
2920 // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2923 // Check to see if the ldtoken helper call is what we see here.
2924 if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2925 (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2930 // Strip helper call away
2931 fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2933 if (fieldTokenNode->gtOper == GT_IND)
2935 fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2938 // Check for constant
2939 if (fieldTokenNode->gtOper != GT_CNS_INT)
2944 CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2945 if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2951 // We need to get the number of elements in the array and the size of each element.
2952 // We verify that the newarr statement is exactly what we expect it to be.
2953 // If it's not then we just return NULL and we don't optimize this call
2957 // It is possible the we don't have any statements in the block yet
2959 if (impTreeLast->gtOper != GT_STMT)
2961 assert(impTreeLast->gtOper == GT_BEG_STMTS);
2966 // We start by looking at the last statement, making sure it's an assignment, and
2967 // that the target of the assignment is the array passed to InitializeArray.
2969 GenTreePtr arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
2970 if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
2971 (arrayLocalNode->gtOper != GT_LCL_VAR) ||
2972 (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
2978 // Make sure that the object being assigned is a helper call.
2981 GenTreePtr newArrayCall = arrayAssignment->gtOp.gtOp2;
2982 if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
2988 // Verify that it is one of the new array helpers.
2991 bool isMDArray = false;
2993 if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
2994 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
2995 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
2996 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
2997 #ifdef FEATURE_READYTORUN_COMPILER
2998 && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
3002 #if COR_JIT_EE_VERSION > 460
3003 if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3012 CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3015 // Make sure we found a compile time handle to the array
3024 S_UINT32 numElements;
3028 rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3035 GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3036 assert(tokenArg != nullptr);
3037 GenTreeArgList* numArgsArg = tokenArg->Rest();
3038 assert(numArgsArg != nullptr);
3039 GenTreeArgList* argsArg = numArgsArg->Rest();
3040 assert(argsArg != nullptr);
3043 // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3044 // so at least one length must be present and the rank can't exceed 32 so there can
3045 // be at most 64 arguments - 32 lengths and 32 lower bounds.
3048 if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3049 (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3054 unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3055 bool lowerBoundsSpecified;
3057 if (numArgs == rank * 2)
3059 lowerBoundsSpecified = true;
3061 else if (numArgs == rank)
3063 lowerBoundsSpecified = false;
3066 // If the rank is 1 and a lower bound isn't specified then the runtime creates
3067 // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3068 // we get a SDArray as well, see the for loop below.
3082 // The rank is known to be at least 1 so we can start with numElements being 1
3083 // to avoid the need to special case the first dimension.
3086 numElements = S_UINT32(1);
3090 static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3092 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3093 IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3096 static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3098 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3099 (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3100 IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3103 static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3105 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3106 (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3109 static bool IsComma(GenTree* tree)
3111 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3115 unsigned argIndex = 0;
3118 for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3120 if (lowerBoundsSpecified)
3123 // In general lower bounds can be ignored because they're not needed to
3124 // calculate the total number of elements. But for single dimensional arrays
3125 // we need to know if the lower bound is 0 because in this case the runtime
3126 // creates a SDArray and this affects the way the array data offset is calculated.
3131 GenTree* lowerBoundAssign = comma->gtGetOp1();
3132 assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3133 GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3135 if (lowerBoundNode->IsIntegralConst(0))
3141 comma = comma->gtGetOp2();
3145 GenTree* lengthNodeAssign = comma->gtGetOp1();
3146 assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3147 GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3149 if (!lengthNode->IsCnsIntOrI())
3154 numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3158 assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3160 if (argIndex != numArgs)
3168 // Make sure there are exactly two arguments: the array class and
3169 // the number of elements.
3172 GenTreePtr arrayLengthNode;
3174 GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3175 #ifdef FEATURE_READYTORUN_COMPILER
3176 if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3178 // Array length is 1st argument for readytorun helper
3179 arrayLengthNode = args->Current();
3184 // Array length is 2nd argument for regular helper
3185 arrayLengthNode = args->Rest()->Current();
3189 // Make sure that the number of elements look valid.
3191 if (arrayLengthNode->gtOper != GT_CNS_INT)
3196 numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3198 if (!info.compCompHnd->isSDArray(arrayClsHnd))
3204 CORINFO_CLASS_HANDLE elemClsHnd;
3205 var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3208 // Note that genTypeSize will return zero for non primitive types, which is exactly
3209 // what we want (size will then be 0, and we will catch this in the conditional below).
3210 // Note that we don't expect this to fail for valid binaries, so we assert in the
3211 // non-verification case (the verification case should not assert but rather correctly
3212 // handle bad binaries). This assert is not guarding any specific invariant, but rather
3213 // saying that we don't expect this to happen, and if it is hit, we need to investigate
3217 S_UINT32 elemSize(genTypeSize(elementType));
3218 S_UINT32 size = elemSize * S_UINT32(numElements);
3220 if (size.IsOverflow())
3225 if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3227 assert(verNeedsVerification());
3231 void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3238 // At this point we are ready to commit to implementing the InitializeArray
3239 // intrinsic using a struct assignment. Pop the arguments from the stack and
3240 // return the struct assignment node.
3246 const unsigned blkSize = size.Value();
3251 unsigned dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3253 dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3257 dst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewIndexRef(elementType, arrayLocalNode, gtNewIconNode(0)));
3259 GenTreePtr blk = gtNewBlockVal(dst, blkSize);
3260 GenTreePtr srcAddr = gtNewIconHandleNode((size_t)initData, GTF_ICON_STATIC_HDL);
3261 GenTreePtr src = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
3263 return gtNewBlkOpNode(blk, // dst
3270 /*****************************************************************************/
3271 // Returns the GenTree that should be used to do the intrinsic instead of the call.
3272 // Returns NULL if an intrinsic cannot be used
3274 GenTreePtr Compiler::impIntrinsic(GenTreePtr newobjThis,
3275 CORINFO_CLASS_HANDLE clsHnd,
3276 CORINFO_METHOD_HANDLE method,
3277 CORINFO_SIG_INFO* sig,
3281 CorInfoIntrinsics* pIntrinsicID)
3283 bool mustExpand = false;
3284 #if COR_JIT_EE_VERSION > 460
3285 CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3287 CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method);
3289 *pIntrinsicID = intrinsicID;
3291 #ifndef _TARGET_ARM_
3292 genTreeOps interlockedOperator;
3295 if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3297 // must be done regardless of DbgCode and MinOpts
3298 return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3300 #ifdef _TARGET_64BIT_
3301 if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3303 // must be done regardless of DbgCode and MinOpts
3304 return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3307 assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3310 GenTreePtr retNode = nullptr;
3313 // We disable the inlining of instrinsics for MinOpts.
3315 if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3317 *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3321 // Currently we don't have CORINFO_INTRINSIC_Exp because it does not
3322 // seem to work properly for Infinity values, we don't do
3323 // CORINFO_INTRINSIC_Pow because it needs a Helper which we currently don't have
3325 var_types callType = JITtype2varType(sig->retType);
3327 /* First do the intrinsics which are always smaller than a call */
3329 switch (intrinsicID)
3331 GenTreePtr op1, op2;
3333 case CORINFO_INTRINSIC_Sin:
3334 case CORINFO_INTRINSIC_Sqrt:
3335 case CORINFO_INTRINSIC_Abs:
3336 case CORINFO_INTRINSIC_Cos:
3337 case CORINFO_INTRINSIC_Round:
3338 case CORINFO_INTRINSIC_Cosh:
3339 case CORINFO_INTRINSIC_Sinh:
3340 case CORINFO_INTRINSIC_Tan:
3341 case CORINFO_INTRINSIC_Tanh:
3342 case CORINFO_INTRINSIC_Asin:
3343 case CORINFO_INTRINSIC_Acos:
3344 case CORINFO_INTRINSIC_Atan:
3345 case CORINFO_INTRINSIC_Atan2:
3346 case CORINFO_INTRINSIC_Log10:
3347 case CORINFO_INTRINSIC_Pow:
3348 case CORINFO_INTRINSIC_Exp:
3349 case CORINFO_INTRINSIC_Ceiling:
3350 case CORINFO_INTRINSIC_Floor:
3352 // These are math intrinsics
3354 assert(callType != TYP_STRUCT);
3358 #if defined(LEGACY_BACKEND)
3359 if (IsTargetIntrinsic(intrinsicID))
3360 #elif !defined(_TARGET_X86_)
3361 // Intrinsics that are not implemented directly by target instructions will
3362 // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3363 // don't do this optimization, because
3364 // a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3365 // b) It will be non-trivial task or too late to re-materialize a surviving
3366 // tail prefixed GT_INTRINSIC as tail call in rationalizer.
3367 if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3369 // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3370 // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3371 // code generation for certain EH constructs.
3372 if (!IsIntrinsicImplementedByUserCall(intrinsicID))
3375 switch (sig->numArgs)
3378 op1 = impPopStack().val;
3380 #if FEATURE_X87_DOUBLES
3382 // X87 stack doesn't differentiate between float/double
3383 // so it doesn't need a cast, but everybody else does
3384 // Just double check it is at least a FP type
3385 noway_assert(varTypeIsFloating(op1));
3387 #else // FEATURE_X87_DOUBLES
3389 if (op1->TypeGet() != callType)
3391 op1 = gtNewCastNode(callType, op1, callType);
3394 #endif // FEATURE_X87_DOUBLES
3396 op1 = new (this, GT_INTRINSIC)
3397 GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3401 op2 = impPopStack().val;
3402 op1 = impPopStack().val;
3404 #if FEATURE_X87_DOUBLES
3406 // X87 stack doesn't differentiate between float/double
3407 // so it doesn't need a cast, but everybody else does
3408 // Just double check it is at least a FP type
3409 noway_assert(varTypeIsFloating(op2));
3410 noway_assert(varTypeIsFloating(op1));
3412 #else // FEATURE_X87_DOUBLES
3414 if (op2->TypeGet() != callType)
3416 op2 = gtNewCastNode(callType, op2, callType);
3418 if (op1->TypeGet() != callType)
3420 op1 = gtNewCastNode(callType, op1, callType);
3423 #endif // FEATURE_X87_DOUBLES
3425 op1 = new (this, GT_INTRINSIC)
3426 GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
3430 NO_WAY("Unsupported number of args for Math Instrinsic");
3433 #ifndef LEGACY_BACKEND
3434 if (IsIntrinsicImplementedByUserCall(intrinsicID))
3436 op1->gtFlags |= GTF_CALL;
3444 #ifdef _TARGET_XARCH_
3445 // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3446 case CORINFO_INTRINSIC_InterlockedAdd32:
3447 interlockedOperator = GT_LOCKADD;
3448 goto InterlockedBinOpCommon;
3449 case CORINFO_INTRINSIC_InterlockedXAdd32:
3450 interlockedOperator = GT_XADD;
3451 goto InterlockedBinOpCommon;
3452 case CORINFO_INTRINSIC_InterlockedXchg32:
3453 interlockedOperator = GT_XCHG;
3454 goto InterlockedBinOpCommon;
3456 #ifdef _TARGET_AMD64_
3457 case CORINFO_INTRINSIC_InterlockedAdd64:
3458 interlockedOperator = GT_LOCKADD;
3459 goto InterlockedBinOpCommon;
3460 case CORINFO_INTRINSIC_InterlockedXAdd64:
3461 interlockedOperator = GT_XADD;
3462 goto InterlockedBinOpCommon;
3463 case CORINFO_INTRINSIC_InterlockedXchg64:
3464 interlockedOperator = GT_XCHG;
3465 goto InterlockedBinOpCommon;
3466 #endif // _TARGET_AMD64_
3468 InterlockedBinOpCommon:
3469 assert(callType != TYP_STRUCT);
3470 assert(sig->numArgs == 2);
3472 op2 = impPopStack().val;
3473 op1 = impPopStack().val;
3479 // field (for example)
3481 // In the case where the first argument is the address of a local, we might
3482 // want to make this *not* make the var address-taken -- but atomic instructions
3483 // on a local are probably pretty useless anyway, so we probably don't care.
3485 op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3486 op1->gtFlags |= GTF_GLOB_EFFECT;
3489 #endif // _TARGET_XARCH_
3491 case CORINFO_INTRINSIC_MemoryBarrier:
3493 assert(sig->numArgs == 0);
3495 op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3496 op1->gtFlags |= GTF_GLOB_EFFECT;
3500 #ifdef _TARGET_XARCH_
3501 // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3502 case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3503 #ifdef _TARGET_AMD64_
3504 case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3507 assert(callType != TYP_STRUCT);
3508 assert(sig->numArgs == 3);
3511 op3 = impPopStack().val; // comparand
3512 op2 = impPopStack().val; // value
3513 op1 = impPopStack().val; // location
3515 GenTreePtr node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3517 node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3523 case CORINFO_INTRINSIC_StringLength:
3524 op1 = impPopStack().val;
3525 if (!opts.MinOpts() && !opts.compDbgCode)
3527 GenTreeArrLen* arrLen =
3528 new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3533 /* Create the expression "*(str_addr + stringLengthOffset)" */
3534 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3535 gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3536 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3541 case CORINFO_INTRINSIC_StringGetChar:
3542 op2 = impPopStack().val;
3543 op1 = impPopStack().val;
3544 op1 = gtNewIndexRef(TYP_CHAR, op1, op2);
3545 op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3549 case CORINFO_INTRINSIC_InitializeArray:
3550 retNode = impInitializeArrayIntrinsic(sig);
3553 case CORINFO_INTRINSIC_Array_Address:
3554 case CORINFO_INTRINSIC_Array_Get:
3555 case CORINFO_INTRINSIC_Array_Set:
3556 retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3559 case CORINFO_INTRINSIC_GetTypeFromHandle:
3560 op1 = impStackTop(0).val;
3561 if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3562 gtIsTypeHandleToRuntimeTypeHelper(op1))
3564 op1 = impPopStack().val;
3565 // Change call to return RuntimeType directly.
3566 op1->gtType = TYP_REF;
3569 // Call the regular function.
3572 case CORINFO_INTRINSIC_RTH_GetValueInternal:
3573 op1 = impStackTop(0).val;
3574 if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3575 gtIsTypeHandleToRuntimeTypeHelper(op1))
3578 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3581 // TreeToGetNativeTypeHandle
3583 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3586 op1 = impPopStack().val;
3588 // Get native TypeHandle argument to old helper
3589 op1 = op1->gtCall.gtCallArgs;
3590 assert(op1->OperIsList());
3591 assert(op1->gtOp.gtOp2 == nullptr);
3592 op1 = op1->gtOp.gtOp1;
3595 // Call the regular function.
3598 #ifndef LEGACY_BACKEND
3599 case CORINFO_INTRINSIC_Object_GetType:
3601 op1 = impPopStack().val;
3602 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3604 // Set the CALL flag to indicate that the operator is implemented by a call.
3605 // Set also the EXCEPTION flag because the native implementation of
3606 // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3607 op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3611 // Implement ByReference Ctor. This wraps the assignment of the ref into a byref-like field
3612 // in a value type. The canonical example of this is Span<T>. In effect this is just a
3613 // substitution. The parameter byref will be assigned into the newly allocated object.
3614 case CORINFO_INTRINSIC_ByReference_Ctor:
3616 // Remove call to constructor and directly assign the byref passed
3617 // to the call to the first slot of the ByReference struct.
3618 op1 = impPopStack().val;
3619 GenTreePtr thisptr = newobjThis;
3620 CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3621 GenTreePtr field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0, false);
3622 GenTreePtr assign = gtNewAssignNode(field, op1);
3623 GenTreePtr byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3624 assert(byReferenceStruct != nullptr);
3625 impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3629 // Implement ptr value getter for ByReference struct.
3630 case CORINFO_INTRINSIC_ByReference_Value:
3632 op1 = impPopStack().val;
3633 CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3634 GenTreePtr field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0, false);
3639 /* Unknown intrinsic */
3645 if (retNode == nullptr)
3647 NO_WAY("JIT must expand the intrinsic!");
3654 /*****************************************************************************/
3656 GenTreePtr Compiler::impArrayAccessIntrinsic(
3657 CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
3659 /* If we are generating SMALL_CODE, we don't want to use intrinsics for
3660 the following, as it generates fatter code.
3663 if (compCodeOpt() == SMALL_CODE)
3668 /* These intrinsics generate fatter (but faster) code and are only
3669 done if we don't need SMALL_CODE */
3671 unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
3673 // The rank 1 case is special because it has to handle two array formats
3674 // we will simply not do that case
3675 if (rank > GT_ARR_MAX_RANK || rank <= 1)
3680 CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
3681 var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
3683 // For the ref case, we will only be able to inline if the types match
3684 // (verifier checks for this, we don't care for the nonverified case and the
3685 // type is final (so we don't need to do the cast)
3686 if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
3688 // Get the call site signature
3689 CORINFO_SIG_INFO LocalSig;
3690 eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
3691 assert(LocalSig.hasThis());
3693 CORINFO_CLASS_HANDLE actualElemClsHnd;
3695 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3697 // Fetch the last argument, the one that indicates the type we are setting.
3698 CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
3699 for (unsigned r = 0; r < rank; r++)
3701 argType = info.compCompHnd->getArgNext(argType);
3704 typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
3705 actualElemClsHnd = argInfo.GetClassHandle();
3709 assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
3711 // Fetch the return type
3712 typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
3713 assert(retInfo.IsByRef());
3714 actualElemClsHnd = retInfo.GetClassHandle();
3717 // if it's not final, we can't do the optimization
3718 if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
3724 unsigned arrayElemSize;
3725 if (elemType == TYP_STRUCT)
3727 assert(arrElemClsHnd);
3729 arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
3733 arrayElemSize = genTypeSize(elemType);
3736 if ((unsigned char)arrayElemSize != arrayElemSize)
3738 // arrayElemSize would be truncated as an unsigned char.
3739 // This means the array element is too large. Don't do the optimization.
3743 GenTreePtr val = nullptr;
3745 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3747 // Assignment of a struct is more work, and there are more gets than sets.
3748 if (elemType == TYP_STRUCT)
3753 val = impPopStack().val;
3754 assert(genActualType(elemType) == genActualType(val->gtType) ||
3755 (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
3756 (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
3757 (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
3760 noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
3762 GenTreePtr inds[GT_ARR_MAX_RANK];
3763 for (unsigned k = rank; k > 0; k--)
3765 inds[k - 1] = impPopStack().val;
3768 GenTreePtr arr = impPopStack().val;
3769 assert(arr->gtType == TYP_REF);
3771 GenTreePtr arrElem =
3772 new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
3773 static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
3775 if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
3777 arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
3780 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3782 assert(val != nullptr);
3783 return gtNewAssignNode(arrElem, val);
3791 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
3795 // do some basic checks first
3796 if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
3801 if (verCurrentState.esStackDepth > 0)
3803 // merge stack types
3804 StackEntry* parentStack = block->bbStackOnEntry();
3805 StackEntry* childStack = verCurrentState.esStack;
3807 for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
3809 if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
3816 // merge initialization status of this ptr
3818 if (verTrackObjCtorInitState)
3820 // If we're tracking the CtorInitState, then it must not be unknown in the current state.
3821 assert(verCurrentState.thisInitialized != TIS_Bottom);
3823 // If the successor block's thisInit state is unknown, copy it from the current state.
3824 if (block->bbThisOnEntry() == TIS_Bottom)
3827 verSetThisInit(block, verCurrentState.thisInitialized);
3829 else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
3831 if (block->bbThisOnEntry() != TIS_Top)
3834 verSetThisInit(block, TIS_Top);
3836 if (block->bbFlags & BBF_FAILED_VERIFICATION)
3838 // The block is bad. Control can flow through the block to any handler that catches the
3839 // verification exception, but the importer ignores bad blocks and therefore won't model
3840 // this flow in the normal way. To complete the merge into the bad block, the new state
3841 // needs to be manually pushed to the handlers that may be reached after the verification
3842 // exception occurs.
3844 // Usually, the new state was already propagated to the relevant handlers while processing
3845 // the predecessors of the bad block. The exception is when the bad block is at the start
3846 // of a try region, meaning it is protected by additional handlers that do not protect its
3849 if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
3851 // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
3852 // recursive calls back into this code path (if successors of the current bad block are
3853 // also bad blocks).
3855 ThisInitState origTIS = verCurrentState.thisInitialized;
3856 verCurrentState.thisInitialized = TIS_Top;
3857 impVerifyEHBlock(block, true);
3858 verCurrentState.thisInitialized = origTIS;
3866 assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
3872 /*****************************************************************************
3873 * 'logMsg' is true if a log message needs to be logged. false if the caller has
3874 * already logged it (presumably in a more detailed fashion than done here)
3875 * 'bVerificationException' is true for a verification exception, false for a
3876 * "call unauthorized by host" exception.
3879 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
3881 block->bbJumpKind = BBJ_THROW;
3882 block->bbFlags |= BBF_FAILED_VERIFICATION;
3884 impCurStmtOffsSet(block->bbCodeOffs);
3887 // we need this since BeginTreeList asserts otherwise
3888 impTreeList = impTreeLast = nullptr;
3889 block->bbFlags &= ~BBF_IMPORTED;
3893 JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
3894 block->bbCodeOffs, block->bbCodeOffsEnd));
3897 printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
3901 if (JitConfig.DebugBreakOnVerificationFailure())
3909 // if the stack is non-empty evaluate all the side-effects
3910 if (verCurrentState.esStackDepth > 0)
3912 impEvalSideEffects();
3914 assert(verCurrentState.esStackDepth == 0);
3916 GenTreePtr op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, GTF_EXCEPT,
3917 gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
3918 // verCurrentState.esStackDepth = 0;
3919 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
3921 // The inliner is not able to handle methods that require throw block, so
3922 // make sure this methods never gets inlined.
3923 info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
3926 /*****************************************************************************
3929 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
3932 // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
3933 // slightly different mechanism in which it calls the JIT to perform IL verification:
3934 // in the case of transparent methods the VM calls for a predicate IsVerifiable()
3935 // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
3936 // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
3937 // it bubble up until reported by the runtime. Currently in RyuJIT, this method doesn't bubble
3938 // up the exception, instead it embeds a throw inside the offending basic block and lets this
3939 // to fail upon runtime of the jitted method.
3941 // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
3942 // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
3943 // just try to find out whether to fail this method before even actually jitting it. So, in case
3944 // we detect these two conditions, instead of generating a throw statement inside the offending
3945 // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
3946 // to return false and make RyuJIT behave the same way JIT64 does.
3948 // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
3949 // RyuJIT for the time being until we completely replace JIT64.
3950 // TODO-ARM64-Cleanup: We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
3952 // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
3953 // exception if we are only importing and verifying. The method verNeedsVerification() can also modify the
3954 // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
3955 // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
3956 // be turned off during importation).
3957 CLANG_FORMAT_COMMENT_ANCHOR;
3959 #ifdef _TARGET_64BIT_
3962 bool canSkipVerificationResult =
3963 info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
3964 assert(tiVerificationNeeded || canSkipVerificationResult);
3967 // Add the non verifiable flag to the compiler
3968 if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
3970 tiIsVerifiableCode = FALSE;
3972 #endif //_TARGET_64BIT_
3973 verResetCurrentState(block, &verCurrentState);
3974 verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
3977 impNoteLastILoffs(); // Remember at which BC offset the tree was finished
3981 /******************************************************************************/
3982 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
3984 assert(ciType < CORINFO_TYPE_COUNT);
3989 case CORINFO_TYPE_STRING:
3990 case CORINFO_TYPE_CLASS:
3991 tiResult = verMakeTypeInfo(clsHnd);
3992 if (!tiResult.IsType(TI_REF))
3993 { // type must be consistent with element type
3998 #ifdef _TARGET_64BIT_
3999 case CORINFO_TYPE_NATIVEINT:
4000 case CORINFO_TYPE_NATIVEUINT:
4003 // If we have more precise information, use it
4004 return verMakeTypeInfo(clsHnd);
4008 return typeInfo::nativeInt();
4011 #endif // _TARGET_64BIT_
4013 case CORINFO_TYPE_VALUECLASS:
4014 case CORINFO_TYPE_REFANY:
4015 tiResult = verMakeTypeInfo(clsHnd);
4016 // type must be constant with element type;
4017 if (!tiResult.IsValueClass())
4022 case CORINFO_TYPE_VAR:
4023 return verMakeTypeInfo(clsHnd);
4025 case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4026 case CORINFO_TYPE_VOID:
4030 case CORINFO_TYPE_BYREF:
4032 CORINFO_CLASS_HANDLE childClassHandle;
4033 CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4034 return ByRef(verMakeTypeInfo(childType, childClassHandle));
4040 { // If we have more precise information, use it
4041 return typeInfo(TI_STRUCT, clsHnd);
4045 return typeInfo(JITtype2tiType(ciType));
4051 /******************************************************************************/
4053 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4055 if (clsHnd == nullptr)
4060 // Byrefs should only occur in method and local signatures, which are accessed
4061 // using ICorClassInfo and ICorClassInfo.getChildType.
4062 // So findClass() and getClassAttribs() should not be called for byrefs
4064 if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4066 assert(!"Did findClass() return a Byref?");
4070 unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4072 if (attribs & CORINFO_FLG_VALUECLASS)
4074 CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4076 // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4077 // not occur here, so we may want to change this to an assert instead.
4078 if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4083 #ifdef _TARGET_64BIT_
4084 if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4086 return typeInfo::nativeInt();
4088 #endif // _TARGET_64BIT_
4090 if (t != CORINFO_TYPE_UNDEF)
4092 return (typeInfo(JITtype2tiType(t)));
4094 else if (bashStructToRef)
4096 return (typeInfo(TI_REF, clsHnd));
4100 return (typeInfo(TI_STRUCT, clsHnd));
4103 else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4105 // See comment in _typeInfo.h for why we do it this way.
4106 return (typeInfo(TI_REF, clsHnd, true));
4110 return (typeInfo(TI_REF, clsHnd));
4114 /******************************************************************************/
4115 BOOL Compiler::verIsSDArray(typeInfo ti)
4117 if (ti.IsNullObjRef())
4118 { // nulls are SD arrays
4122 if (!ti.IsType(TI_REF))
4127 if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4134 /******************************************************************************/
4135 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4136 /* Returns an error type if anything goes wrong */
4138 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4140 assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4142 if (!verIsSDArray(arrayObjectType))
4147 CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4148 CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4150 return verMakeTypeInfo(ciType, childClassHandle);
4153 /*****************************************************************************
4155 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4157 CORINFO_CLASS_HANDLE classHandle;
4158 CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4160 var_types type = JITtype2varType(ciType);
4161 if (varTypeIsGC(type))
4163 // For efficiency, getArgType only returns something in classHandle for
4164 // value types. For other types that have addition type info, you
4165 // have to call back explicitly
4166 classHandle = info.compCompHnd->getArgClass(sig, args);
4169 NO_WAY("Could not figure out Class specified in argument or local signature");
4173 return verMakeTypeInfo(ciType, classHandle);
4176 /*****************************************************************************/
4178 // This does the expensive check to figure out whether the method
4179 // needs to be verified. It is called only when we fail verification,
4180 // just before throwing the verification exception.
4182 BOOL Compiler::verNeedsVerification()
4184 // If we have previously determined that verification is NOT needed
4185 // (for example in Compiler::compCompile), that means verification is really not needed.
4186 // Return the same decision we made before.
4187 // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4189 if (!tiVerificationNeeded)
4191 return tiVerificationNeeded;
4194 assert(tiVerificationNeeded);
4196 // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4197 // obtain the answer.
4198 CorInfoCanSkipVerificationResult canSkipVerificationResult =
4199 info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4201 // canSkipVerification will return one of the following three values:
4202 // CORINFO_VERIFICATION_CANNOT_SKIP = 0, // Cannot skip verification during jit time.
4203 // CORINFO_VERIFICATION_CAN_SKIP = 1, // Can skip verification during jit time.
4204 // CORINFO_VERIFICATION_RUNTIME_CHECK = 2, // Skip verification during jit time,
4205 // but need to insert a callout to the VM to ask during runtime
4206 // whether to skip verification or not.
4208 // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4209 if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4211 tiRuntimeCalloutNeeded = true;
4214 if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4216 // Dev10 706080 - Testers don't like the assert, so just silence it
4217 // by not using the macros that invoke debugAssert.
4221 // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4222 // The following line means we will NOT do jit time verification if canSkipVerification
4223 // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4224 tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4225 return tiVerificationNeeded;
4228 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4234 if (!ti.IsType(TI_STRUCT))
4238 return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4241 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4243 if (ti.IsPermanentHomeByRef())
4253 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4255 return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4256 || ti.IsUnboxedGenericTypeVar() ||
4257 (ti.IsType(TI_STRUCT) &&
4258 // exclude byreflike structs
4259 !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4262 // Is it a boxed value type?
4263 bool Compiler::verIsBoxedValueType(typeInfo ti)
4265 if (ti.GetType() == TI_REF)
4267 CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4268 return !!eeIsValueClass(clsHnd);
4276 /*****************************************************************************
4278 * Check if a TailCall is legal.
4281 bool Compiler::verCheckTailCallConstraint(
4283 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4284 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4285 bool speculative // If true, won't throw if verificatoin fails. Instead it will
4286 // return false to the caller.
4287 // If false, it will throw.
4291 CORINFO_SIG_INFO sig;
4292 unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
4293 // this counter is used to keep track of how many items have been
4296 CORINFO_METHOD_HANDLE methodHnd = nullptr;
4297 CORINFO_CLASS_HANDLE methodClassHnd = nullptr;
4298 unsigned methodClassFlgs = 0;
4300 assert(impOpcodeIsCallOpcode(opcode));
4302 if (compIsForInlining())
4307 // for calli, VerifyOrReturn that this is not a virtual method
4308 if (opcode == CEE_CALLI)
4310 /* Get the call sig */
4311 eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4313 // We don't know the target method, so we have to infer the flags, or
4314 // assume the worst-case.
4315 mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4319 methodHnd = pResolvedToken->hMethod;
4321 mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4323 // When verifying generic code we pair the method handle with its
4324 // owning class to get the exact method signature.
4325 methodClassHnd = pResolvedToken->hClass;
4326 assert(methodClassHnd);
4328 eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4330 // opcode specific check
4331 methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4334 // We must have got the methodClassHnd if opcode is not CEE_CALLI
4335 assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4337 if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4339 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4342 // check compatibility of the arguments
4343 unsigned int argCount;
4344 argCount = sig.numArgs;
4345 CORINFO_ARG_LIST_HANDLE args;
4349 typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4351 // check that the argument is not a byref for tailcalls
4352 VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4354 // For unsafe code, we might have parameters containing pointer to the stack location.
4355 // Disallow the tailcall for this kind.
4356 CORINFO_CLASS_HANDLE classHandle;
4357 CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4358 VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4360 args = info.compCompHnd->getArgNext(args);
4364 popCount += sig.numArgs;
4366 // check for 'this' which is on non-static methods, not called via NEWOBJ
4367 if (!(mflags & CORINFO_FLG_STATIC))
4369 // Always update the popCount.
4370 // This is crucial for the stack calculation to be correct.
4371 typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4374 if (opcode == CEE_CALLI)
4376 // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4378 if (tiThis.IsValueClass())
4382 VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4386 // Check type compatibility of the this argument
4387 typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4388 if (tiDeclaredThis.IsValueClass())
4390 tiDeclaredThis.MakeByRef();
4393 VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4397 // Tail calls on constrained calls should be illegal too:
4398 // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4399 VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4401 // Get the exact view of the signature for an array method
4402 if (sig.retType != CORINFO_TYPE_VOID)
4404 if (methodClassFlgs & CORINFO_FLG_ARRAY)
4406 assert(opcode != CEE_CALLI);
4407 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4411 typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4412 typeInfo tiCallerRetType =
4413 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4415 // void return type gets morphed into the error type, so we have to treat them specially here
4416 if (sig.retType == CORINFO_TYPE_VOID)
4418 VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4423 VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4424 NormaliseForStack(tiCallerRetType), true),
4425 "tailcall return mismatch", speculative);
4428 // for tailcall, stack must be empty
4429 VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4431 return true; // Yes, tailcall is legal
4434 /*****************************************************************************
4436 * Checks the IL verification rules for the call
4439 void Compiler::verVerifyCall(OPCODE opcode,
4440 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4441 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4444 const BYTE* delegateCreateStart,
4445 const BYTE* codeAddr,
4446 CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4449 CORINFO_SIG_INFO* sig = nullptr;
4450 unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
4451 // this counter is used to keep track of how many items have been
4454 // for calli, VerifyOrReturn that this is not a virtual method
4455 if (opcode == CEE_CALLI)
4457 Verify(false, "Calli not verifiable");
4461 //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4462 mflags = callInfo->verMethodFlags;
4464 sig = &callInfo->verSig;
4466 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4468 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4471 // opcode specific check
4472 unsigned methodClassFlgs = callInfo->classFlags;
4476 // cannot do callvirt on valuetypes
4477 VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4478 VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4483 assert(!tailCall); // Importer should not allow this
4484 VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4485 "newobj must be on instance");
4487 if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4489 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4490 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4491 typeInfo tiDeclaredFtn =
4492 verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4493 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4495 assert(popCount == 0);
4496 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4497 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4499 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4500 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4501 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4502 "delegate object type mismatch");
4504 CORINFO_CLASS_HANDLE objTypeHandle =
4505 tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4507 // the method signature must be compatible with the delegate's invoke method
4509 // check that for virtual functions, the type of the object used to get the
4510 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4511 // since this is a bit of work to determine in general, we pattern match stylized
4514 // the delegate creation code check, which used to be done later, is now done here
4515 // so we can read delegateMethodRef directly from
4516 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
4517 // we then use it in our call to isCompatibleDelegate().
4519 mdMemberRef delegateMethodRef = mdMemberRefNil;
4520 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
4521 "must create delegates with certain IL");
4523 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
4524 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
4525 delegateResolvedToken.tokenScope = info.compScopeHnd;
4526 delegateResolvedToken.token = delegateMethodRef;
4527 delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method;
4528 info.compCompHnd->resolveToken(&delegateResolvedToken);
4530 CORINFO_CALL_INFO delegateCallInfo;
4531 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
4532 addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
4534 BOOL isOpenDelegate = FALSE;
4535 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
4536 tiActualFtn.GetMethod(), pResolvedToken->hClass,
4538 "function incompatible with delegate");
4540 // check the constraints on the target method
4541 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
4542 "delegate target has unsatisfied class constraints");
4543 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
4544 tiActualFtn.GetMethod()),
4545 "delegate target has unsatisfied method constraints");
4547 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
4548 // for additional verification rules for delegates
4549 CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod();
4550 DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
4551 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4554 if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
4556 && StrictCheckForNonVirtualCallToVirtualMethod()
4560 if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4562 VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
4563 verIsBoxedValueType(tiActualObj),
4564 "The 'this' parameter to the call must be either the calling method's "
4565 "'this' parameter or "
4566 "a boxed value type.");
4571 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
4573 BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
4575 Verify(targetIsStatic || !isOpenDelegate,
4576 "Unverifiable creation of an open instance delegate for a protected member.");
4578 CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
4580 : tiActualObj.GetClassHandleForObjRef();
4582 // In the case of protected methods, it is a requirement that the 'this'
4583 // pointer be a subclass of the current context. Perform this check.
4584 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4585 "Accessing protected method through wrong type.");
4590 // fall thru to default checks
4592 VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
4594 VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
4595 "can only newobj a delegate constructor");
4597 // check compatibility of the arguments
4598 unsigned int argCount;
4599 argCount = sig->numArgs;
4600 CORINFO_ARG_LIST_HANDLE args;
4604 typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
4606 typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
4607 VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
4609 args = info.compCompHnd->getArgNext(args);
4615 popCount += sig->numArgs;
4617 // check for 'this' which are is non-static methods, not called via NEWOBJ
4618 CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
4619 if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
4621 typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4624 // If it is null, we assume we can access it (since it will AV shortly)
4625 // If it is anything but a reference class, there is no hierarchy, so
4626 // again, we don't need the precise instance class to compute 'protected' access
4627 if (tiThis.IsType(TI_REF))
4629 instanceClassHnd = tiThis.GetClassHandleForObjRef();
4632 // Check type compatibility of the this argument
4633 typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
4634 if (tiDeclaredThis.IsValueClass())
4636 tiDeclaredThis.MakeByRef();
4639 // If this is a call to the base class .ctor, set thisPtr Init for
4641 if (mflags & CORINFO_FLG_CONSTRUCTOR)
4643 if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
4644 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
4646 assert(verCurrentState.thisInitialized !=
4647 TIS_Bottom); // This should never be the case just from the logic of the verifier.
4648 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
4649 "Call to base class constructor when 'this' is possibly initialized");
4650 // Otherwise, 'this' is now initialized.
4651 verCurrentState.thisInitialized = TIS_Init;
4652 tiThis.SetInitialisedObjRef();
4656 // We allow direct calls to value type constructors
4657 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
4658 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
4659 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
4660 "Bad call to a constructor");
4664 if (pConstrainedResolvedToken != nullptr)
4666 VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
4668 typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
4670 // We just dereference this and test for equality
4671 tiThis.DereferenceByRef();
4672 VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
4673 "this type mismatch with constrained type operand");
4675 // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
4676 tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
4679 // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
4680 if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
4682 tiDeclaredThis.SetIsReadonlyByRef();
4685 VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
4687 if (tiThis.IsByRef())
4689 // Find the actual type where the method exists (as opposed to what is declared
4690 // in the metadata). This is to prevent passing a byref as the "this" argument
4691 // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
4693 CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
4694 VerifyOrReturn(eeIsValueClass(actualClassHnd),
4695 "Call to base type of valuetype (which is never a valuetype)");
4698 // Rules for non-virtual call to a non-final virtual method:
4701 // The "this" pointer is considered to be "possibly written" if
4702 // 1. Its address have been taken (LDARGA 0) anywhere in the method.
4704 // 2. It has been stored to (STARG.0) anywhere in the method.
4706 // A non-virtual call to a non-final virtual method is only allowed if
4707 // 1. The this pointer passed to the callee is an instance of a boxed value type.
4709 // 2. The this pointer passed to the callee is the current method's this pointer.
4710 // (and) The current method's this pointer is not "possibly written".
4712 // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
4713 // virtual methods. (Luckily this does affect .ctors, since they are not virtual).
4714 // This is stronger that is strictly needed, but implementing a laxer rule is significantly
4715 // hard and more error prone.
4717 if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
4719 && StrictCheckForNonVirtualCallToVirtualMethod()
4723 if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4726 tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
4727 "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
4728 "a boxed value type.");
4733 // check any constraints on the callee's class and type parameters
4734 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
4735 "method has unsatisfied class constraints");
4736 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
4737 "method has unsatisfied method constraints");
4739 if (mflags & CORINFO_FLG_PROTECTED)
4741 VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4742 "Can't access protected method");
4745 // Get the exact view of the signature for an array method
4746 if (sig->retType != CORINFO_TYPE_VOID)
4748 eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
4751 // "readonly." prefixed calls only allowed for the Address operation on arrays.
4752 // The methods supported by array types are under the control of the EE
4753 // so we can trust that only the Address operation returns a byref.
4756 typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
4757 VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
4758 "unexpected use of readonly prefix");
4761 // Verify the tailcall
4764 verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
4768 /*****************************************************************************
4769 * Checks that a delegate creation is done using the following pattern:
4771 * ldvirtftn targetMemberRef
4773 * ldftn targetMemberRef
4775 * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
4776 * not in this basic block)
4778 * targetMemberRef is read from the code sequence.
4779 * targetMemberRef is validated iff verificationNeeded.
4782 BOOL Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart,
4783 const BYTE* codeAddr,
4784 mdMemberRef& targetMemberRef)
4786 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4788 targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
4791 else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
4793 targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
4800 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
4802 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
4803 typeInfo ptrVal = verVerifyLDIND(tiTo, instrType);
4804 typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
4805 if (!tiCompatibleWith(value, normPtrVal, true))
4807 Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
4808 compUnsafeCastUsed = true;
4813 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
4815 assert(!instrType.IsStruct());
4820 ptrVal = DereferenceByRef(ptr);
4821 if (instrType.IsObjRef() && !ptrVal.IsObjRef())
4823 Verify(false, "bad pointer");
4824 compUnsafeCastUsed = true;
4826 else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
4828 Verify(false, "pointer not consistent with instr");
4829 compUnsafeCastUsed = true;
4834 Verify(false, "pointer not byref");
4835 compUnsafeCastUsed = true;
4841 // Verify that the field is used properly. 'tiThis' is NULL for statics,
4842 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
4843 // ld*flda or a st*fld.
4844 // 'enclosingClass' is given if we are accessing a field in some specific type.
4846 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken,
4847 const CORINFO_FIELD_INFO& fieldInfo,
4848 const typeInfo* tiThis,
4850 BOOL allowPlainStructAsThis)
4852 CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
4853 unsigned fieldFlags = fieldInfo.fieldFlags;
4854 CORINFO_CLASS_HANDLE instanceClass =
4855 info.compClassHnd; // for statics, we imagine the instance is the current class.
4857 bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
4860 Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
4861 if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
4863 Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
4864 info.compIsStatic == isStaticField,
4865 "bad use of initonly field (set or address taken)");
4869 if (tiThis == nullptr)
4871 Verify(isStaticField, "used static opcode with non-static field");
4875 typeInfo tThis = *tiThis;
4877 if (allowPlainStructAsThis && tThis.IsValueClass())
4882 // If it is null, we assume we can access it (since it will AV shortly)
4883 // If it is anything but a refernce class, there is no hierarchy, so
4884 // again, we don't need the precise instance class to compute 'protected' access
4885 if (tiThis->IsType(TI_REF))
4887 instanceClass = tiThis->GetClassHandleForObjRef();
4890 // Note that even if the field is static, we require that the this pointer
4891 // satisfy the same constraints as a non-static field This happens to
4892 // be simpler and seems reasonable
4893 typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
4894 if (tiDeclaredThis.IsValueClass())
4896 tiDeclaredThis.MakeByRef();
4898 // we allow read-only tThis, on any field access (even stores!), because if the
4899 // class implementor wants to prohibit stores he should make the field private.
4900 // we do this by setting the read-only bit on the type we compare tThis to.
4901 tiDeclaredThis.SetIsReadonlyByRef();
4903 else if (verTrackObjCtorInitState && tThis.IsThisPtr())
4905 // Any field access is legal on "uninitialized" this pointers.
4906 // The easiest way to implement this is to simply set the
4907 // initialized bit for the duration of the type check on the
4908 // field access only. It does not change the state of the "this"
4909 // for the function as a whole. Note that the "tThis" is a copy
4910 // of the original "this" type (*tiThis) passed in.
4911 tThis.SetInitialisedObjRef();
4914 Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
4917 // Presently the JIT does not check that we don't store or take the address of init-only fields
4918 // since we cannot guarantee their immutability and it is not a security issue.
4920 // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
4921 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
4922 "field has unsatisfied class constraints");
4923 if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
4925 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
4926 "Accessing protected method through wrong type.");
4930 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
4932 if (tiOp1.IsNumberType())
4934 #ifdef _TARGET_64BIT_
4935 Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
4936 #else // _TARGET_64BIT
4937 // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
4938 // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
4939 // but compatible, since we can coalesce native int with int32 (see section III.1.5).
4940 Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
4941 #endif // !_TARGET_64BIT_
4943 else if (tiOp1.IsObjRef())
4955 Verify(FALSE, "Cond not allowed on object types");
4957 Verify(tiOp2.IsObjRef(), "Cond type mismatch");
4959 else if (tiOp1.IsByRef())
4961 Verify(tiOp2.IsByRef(), "Cond type mismatch");
4965 Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
4969 void Compiler::verVerifyThisPtrInitialised()
4971 if (verTrackObjCtorInitState)
4973 Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
4977 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
4979 // Either target == context, in this case calling an alternate .ctor
4980 // Or target is the immediate parent of context
4982 return ((target == context) || (target == info.compCompHnd->getParentType(context)));
4985 GenTreePtr Compiler::impImportLdvirtftn(GenTreePtr thisPtr,
4986 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4987 CORINFO_CALL_INFO* pCallInfo)
4989 if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
4991 NO_WAY("Virtual call to a function added via EnC is not supported");
4994 #ifdef FEATURE_READYTORUN_COMPILER
4995 if (opts.IsReadyToRun())
4997 if (!pCallInfo->exactContextNeedsRuntimeLookup)
4999 GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT,
5000 gtNewArgList(thisPtr));
5002 call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5007 // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5008 if (IsTargetAbi(CORINFO_CORERT_ABI))
5010 GenTreePtr ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5012 return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5013 gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5018 // Get the exact descriptor for the static callsite
5019 GenTreePtr exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5020 if (exactTypeDesc == nullptr)
5021 { // compDonotInline()
5025 GenTreePtr exactMethodDesc = impTokenToHandle(pResolvedToken);
5026 if (exactMethodDesc == nullptr)
5027 { // compDonotInline()
5031 GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5033 helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5035 helpArgs = gtNewListNode(thisPtr, helpArgs);
5037 // Call helper function. This gets the target address of the final destination callsite.
5039 return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT, helpArgs);
5042 /*****************************************************************************
5044 * Build and import a box node
5047 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5049 // Get the tree for the type handle for the boxed object. In the case
5050 // of shared generic code or ngen'd code this might be an embedded
5052 // Note we can only box do it if the class construtor has been called
5053 // We can always do it on primitive types
5055 GenTreePtr op1 = nullptr;
5056 GenTreePtr op2 = nullptr;
5059 impSpillSpecialSideEff();
5061 // Now get the expression to box from the stack.
5062 CORINFO_CLASS_HANDLE operCls;
5063 GenTreePtr exprToBox = impPopStack(operCls).val;
5065 CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5066 if (boxHelper == CORINFO_HELP_BOX)
5068 // we are doing 'normal' boxing. This means that we can inline the box operation
5069 // Box(expr) gets morphed into
5070 // temp = new(clsHnd)
5071 // cpobj(temp+4, expr, clsHnd)
5073 // The code paths differ slightly below for structs and primitives because
5074 // "cpobj" differs in these cases. In one case you get
5075 // impAssignStructPtr(temp+4, expr, clsHnd)
5076 // and the other you get
5079 if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5081 impBoxTemp = lvaGrabTemp(true DEBUGARG("Box Helper"));
5084 // needs to stay in use until this box expression is appended
5085 // some other node. We approximate this by keeping it alive until
5086 // the opcode stack becomes empty
5087 impBoxTempInUse = true;
5089 #ifdef FEATURE_READYTORUN_COMPILER
5090 bool usingReadyToRunHelper = false;
5092 if (opts.IsReadyToRun())
5094 op1 = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5095 usingReadyToRunHelper = (op1 != nullptr);
5098 if (!usingReadyToRunHelper)
5101 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5102 // and the newfast call with a single call to a dynamic R2R cell that will:
5103 // 1) Load the context
5104 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5105 // 3) Allocate and return the new object for boxing
5106 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5108 // Ensure that the value class is restored
5109 op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5111 { // compDonotInline()
5115 op1 = gtNewHelperCallNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd), TYP_REF, 0,
5119 /* Remember that this basic block contains 'new' of an array */
5120 compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5122 GenTreePtr asg = gtNewTempAssign(impBoxTemp, op1);
5124 GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5126 op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5127 op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
5128 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5130 if (varTypeIsStruct(exprToBox))
5132 assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5133 op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5137 lclTyp = exprToBox->TypeGet();
5138 if (lclTyp == TYP_BYREF)
5140 lclTyp = TYP_I_IMPL;
5142 CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5143 if (impIsPrimitive(jitType))
5145 lclTyp = JITtype2varType(jitType);
5147 assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5148 varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5149 var_types srcTyp = exprToBox->TypeGet();
5150 var_types dstTyp = lclTyp;
5152 if (srcTyp != dstTyp)
5154 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5155 (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5156 exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
5158 op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5161 op2 = gtNewLclvNode(impBoxTemp, TYP_REF);
5162 op1 = gtNewOperNode(GT_COMMA, TYP_REF, op1, op2);
5164 // Record that this is a "box" node.
5165 op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt);
5167 // If it is a value class, mark the "box" node. We can use this information
5168 // to optimise several cases:
5169 // "box(x) == null" --> false
5170 // "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5171 // "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5173 op1->gtFlags |= GTF_BOX_VALUE;
5174 assert(op1->IsBoxedValue());
5175 assert(asg->gtOper == GT_ASG);
5179 // Don't optimize, just call the helper and be done with it
5181 // Ensure that the value class is restored
5182 op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5184 { // compDonotInline()
5188 GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5189 op1 = gtNewHelperCallNode(boxHelper, TYP_REF, GTF_EXCEPT, args);
5192 /* Push the result back on the stack, */
5193 /* even if clsHnd is a value class we want the TI_REF */
5194 typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5195 impPushOnStack(op1, tiRetVal);
5198 //------------------------------------------------------------------------
5199 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5202 // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5203 // by a call to CEEInfo::resolveToken().
5204 // pCallInfo - The CORINFO_CALL_INFO that has been initialized
5205 // by a call to CEEInfo::getCallInfo().
5208 // The multi-dimensional array constructor arguments (array dimensions) are
5209 // pushed on the IL stack on entry to this method.
5212 // Multi-dimensional array constructors are imported as calls to a JIT
5213 // helper, not as regular calls.
5215 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5217 GenTreePtr classHandle = impParentClassTokenToHandle(pResolvedToken);
5218 if (classHandle == nullptr)
5219 { // compDonotInline()
5223 assert(pCallInfo->sig.numArgs);
5226 GenTreeArgList* args;
5229 // There are two different JIT helpers that can be used to allocate
5230 // multi-dimensional arrays:
5232 // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5233 // This variant is deprecated. It should be eventually removed.
5235 // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5236 // pointer to block of int32s. This variant is more portable.
5238 // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5239 // unconditionally would require ReadyToRun version bump.
5241 CLANG_FORMAT_COMMENT_ANCHOR;
5243 #if COR_JIT_EE_VERSION > 460
5244 if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5246 LclVarDsc* newObjArrayArgsVar;
5248 // Reuse the temp used to pass the array dimensions to avoid bloating
5249 // the stack frame in case there are multiple calls to multi-dim array
5250 // constructors within a single method.
5251 if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5253 lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5254 lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK;
5255 lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5258 // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5259 // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5260 lvaTable[lvaNewObjArrayArgs].lvExactSize =
5261 max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5263 // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5264 // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5265 // to one allocation at a time.
5266 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5269 // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5270 // - Array class handle
5271 // - Number of dimension arguments
5272 // - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp.
5275 node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5276 node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5278 // Pop dimension arguments from the stack one at a time and store it
5279 // into lvaNewObjArrayArgs temp.
5280 for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5282 GenTreePtr arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5284 GenTreePtr dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5285 dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5286 dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5287 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5288 dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5290 node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5293 args = gtNewArgList(node);
5295 // pass number of arguments to the helper
5296 args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5298 args = gtNewListNode(classHandle, args);
5300 node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, 0, args);
5306 // The varargs helper needs the type and method handles as last
5307 // and last-1 param (this is a cdecl call, so args will be
5308 // pushed in reverse order on the CPU stack)
5311 args = gtNewArgList(classHandle);
5313 // pass number of arguments to the helper
5314 args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5316 unsigned argFlags = 0;
5317 args = impPopList(pCallInfo->sig.numArgs, &argFlags, &pCallInfo->sig, args);
5319 node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, 0, args);
5321 // varargs, so we pop the arguments
5322 node->gtFlags |= GTF_CALL_POP_ARGS;
5325 // At the present time we don't track Caller pop arguments
5326 // that have GC references in them
5327 for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5329 assert(temp->Current()->gtType != TYP_REF);
5334 node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5335 node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5337 // Remember that this basic block contains 'new' of a md array
5338 compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5340 impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5343 GenTreePtr Compiler::impTransformThis(GenTreePtr thisPtr,
5344 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5345 CORINFO_THIS_TRANSFORM transform)
5349 case CORINFO_DEREF_THIS:
5351 GenTreePtr obj = thisPtr;
5353 // This does a LDIND on the obj, which should be a byref. pointing to a ref
5354 impBashVarAddrsToI(obj);
5355 assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5356 CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5358 obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5359 // ldind could point anywhere, example a boxed class static int
5360 obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5365 case CORINFO_BOX_THIS:
5367 // Constraint calls where there might be no
5368 // unboxed entry point require us to implement the call via helper.
5369 // These only occur when a possible target of the call
5370 // may have inherited an implementation of an interface
5371 // method from System.Object or System.ValueType. The EE does not provide us with
5372 // "unboxed" versions of these methods.
5374 GenTreePtr obj = thisPtr;
5376 assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5377 obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5378 obj->gtFlags |= GTF_EXCEPT;
5380 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5381 var_types objType = JITtype2varType(jitTyp);
5382 if (impIsPrimitive(jitTyp))
5384 if (obj->OperIsBlk())
5386 obj->ChangeOperUnchecked(GT_IND);
5388 // Obj could point anywhere, example a boxed class static int
5389 obj->gtFlags |= GTF_IND_TGTANYWHERE;
5390 obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5393 obj->gtType = JITtype2varType(jitTyp);
5394 assert(varTypeIsArithmetic(obj->gtType));
5397 // This pushes on the dereferenced byref
5398 // This is then used immediately to box.
5399 impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5401 // This pops off the byref-to-a-value-type remaining on the stack and
5402 // replaces it with a boxed object.
5403 // This is then used as the object to the virtual call immediately below.
5404 impImportAndPushBox(pConstrainedResolvedToken);
5405 if (compDonotInline())
5410 obj = impPopStack().val;
5413 case CORINFO_NO_THIS_TRANSFORM:
5419 //------------------------------------------------------------------------
5420 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
5423 // true if PInvoke inlining should be enabled in current method, false otherwise
5426 // Checks a number of ambient conditions where we could pinvoke but choose not to
5428 bool Compiler::impCanPInvokeInline()
5430 return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
5431 (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
5435 //------------------------------------------------------------------------
5436 // impCanPInvokeInlineCallSite: basic legality checks using information
5437 // from a call to see if the call qualifies as an inline pinvoke.
5440 // block - block contaning the call, or for inlinees, block
5441 // containing the call being inlined
5444 // true if this call can legally qualify as an inline pinvoke, false otherwise
5447 // For runtimes that support exception handling interop there are
5448 // restrictions on using inline pinvoke in handler regions.
5450 // * We have to disable pinvoke inlining inside of filters because
5451 // in case the main execution (i.e. in the try block) is inside
5452 // unmanaged code, we cannot reuse the inlined stub (we still need
5453 // the original state until we are in the catch handler)
5455 // * We disable pinvoke inlining inside handlers since the GSCookie
5456 // is in the inlined Frame (see
5457 // CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
5458 // this would not protect framelets/return-address of handlers.
5460 // These restrictions are currently also in place for CoreCLR but
5461 // can be relaxed when coreclr/#8459 is addressed.
5463 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
5465 if (block->hasHndIndex())
5470 // The remaining limitations do not apply to CoreRT
5471 if (IsTargetAbi(CORINFO_CORERT_ABI))
5476 #ifdef _TARGET_AMD64_
5477 // On x64, we disable pinvoke inlining inside of try regions.
5478 // Here is the comment from JIT64 explaining why:
5480 // [VSWhidbey: 611015] - because the jitted code links in the
5481 // Frame (instead of the stub) we rely on the Frame not being
5482 // 'active' until inside the stub. This normally happens by the
5483 // stub setting the return address pointer in the Frame object
5484 // inside the stub. On a normal return, the return address
5485 // pointer is zeroed out so the Frame can be safely re-used, but
5486 // if an exception occurs, nobody zeros out the return address
5487 // pointer. Thus if we re-used the Frame object, it would go
5488 // 'active' as soon as we link it into the Frame chain.
5490 // Technically we only need to disable PInvoke inlining if we're
5491 // in a handler or if we're in a try body with a catch or
5492 // filter/except where other non-handler code in this method
5493 // might run and try to re-use the dirty Frame object.
5495 // A desktop test case where this seems to matter is
5496 // jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
5497 if (block->hasTryIndex())
5501 #endif // _TARGET_AMD64_
5506 //------------------------------------------------------------------------
5507 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
5508 // if it can be expressed as an inline pinvoke.
5511 // call - tree for the call
5512 // methHnd - handle for the method being called (may be null)
5513 // sig - signature of the method being called
5514 // mflags - method flags for the method being called
5515 // block - block contaning the call, or for inlinees, block
5516 // containing the call being inlined
5519 // Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
5521 // Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
5522 // call passes a combination of legality and profitabilty checks.
5524 // If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
5526 void Compiler::impCheckForPInvokeCall(
5527 GenTreePtr call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
5529 CorInfoUnmanagedCallConv unmanagedCallConv;
5531 // If VM flagged it as Pinvoke, flag the call node accordingly
5532 if ((mflags & CORINFO_FLG_PINVOKE) != 0)
5534 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
5539 if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
5544 unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
5548 CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
5549 if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
5551 // Used by the IL Stubs.
5552 callConv = CORINFO_CALLCONV_C;
5554 static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
5555 static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
5556 static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
5557 unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
5559 assert(!call->gtCall.gtCallCookie);
5562 if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
5563 unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
5567 optNativeCallCount++;
5569 if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && methHnd == nullptr)
5571 // PInvoke CALLI in IL stubs must be inlined
5576 if (!impCanPInvokeInlineCallSite(block))
5581 // PInvoke CALL in IL stubs must be inlined on CoreRT. Skip the ambient conditions checks and
5582 // profitability checks
5583 if (!(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && IsTargetAbi(CORINFO_CORERT_ABI)))
5585 if (!impCanPInvokeInline())
5590 // Size-speed tradeoff: don't use inline pinvoke at rarely
5591 // executed call sites. The non-inline version is more
5593 if (block->isRunRarely())
5599 // The expensive check should be last
5600 if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
5606 JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
5608 call->gtFlags |= GTF_CALL_UNMANAGED;
5609 info.compCallUnmanaged++;
5611 // AMD64 convention is same for native and managed
5612 if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
5614 call->gtFlags |= GTF_CALL_POP_ARGS;
5617 if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
5619 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
5623 GenTreePtr Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
5625 var_types callRetTyp = JITtype2varType(sig->retType);
5627 /* The function pointer is on top of the stack - It may be a
5628 * complex expression. As it is evaluated after the args,
5629 * it may cause registered args to be spilled. Simply spill it.
5632 // Ignore this trivial case.
5633 if (impStackTop().val->gtOper != GT_LCL_VAR)
5635 impSpillStackEntry(verCurrentState.esStackDepth - 1,
5636 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
5639 /* Get the function pointer */
5641 GenTreePtr fptr = impPopStack().val;
5642 assert(genActualType(fptr->gtType) == TYP_I_IMPL);
5645 // This temporary must never be converted to a double in stress mode,
5646 // because that can introduce a call to the cast helper after the
5647 // arguments have already been evaluated.
5649 if (fptr->OperGet() == GT_LCL_VAR)
5651 lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
5655 /* Create the call node */
5657 GenTreePtr call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
5659 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
5664 /*****************************************************************************/
5666 void Compiler::impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig)
5668 assert(call->gtFlags & GTF_CALL_UNMANAGED);
5670 /* Since we push the arguments in reverse order (i.e. right -> left)
5671 * spill any side effects from the stack
5673 * OBS: If there is only one side effect we do not need to spill it
5674 * thus we have to spill all side-effects except last one
5677 unsigned lastLevelWithSideEffects = UINT_MAX;
5679 unsigned argsToReverse = sig->numArgs;
5681 // For "thiscall", the first argument goes in a register. Since its
5682 // order does not need to be changed, we do not need to spill it
5684 if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5686 assert(argsToReverse);
5690 #ifndef _TARGET_X86_
5691 // Don't reverse args on ARM or x64 - first four args always placed in regs in order
5695 for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
5697 if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
5699 assert(lastLevelWithSideEffects == UINT_MAX);
5701 impSpillStackEntry(level,
5702 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
5704 else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
5706 if (lastLevelWithSideEffects != UINT_MAX)
5708 /* We had a previous side effect - must spill it */
5709 impSpillStackEntry(lastLevelWithSideEffects,
5710 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
5712 /* Record the level for the current side effect in case we will spill it */
5713 lastLevelWithSideEffects = level;
5717 /* This is the first side effect encountered - record its level */
5719 lastLevelWithSideEffects = level;
5724 /* The argument list is now "clean" - no out-of-order side effects
5725 * Pop the argument list in reverse order */
5727 unsigned argFlags = 0;
5728 GenTreePtr args = call->gtCall.gtCallArgs =
5729 impPopRevList(sig->numArgs, &argFlags, sig, sig->numArgs - argsToReverse);
5731 if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5733 GenTreePtr thisPtr = args->Current();
5734 impBashVarAddrsToI(thisPtr);
5735 assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
5740 call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5744 //------------------------------------------------------------------------
5745 // impInitClass: Build a node to initialize the class before accessing the
5746 // field if necessary
5749 // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5750 // by a call to CEEInfo::resolveToken().
5752 // Return Value: If needed, a pointer to the node that will perform the class
5753 // initializtion. Otherwise, nullptr.
5756 GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5758 CorInfoInitClassResult initClassResult =
5759 info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
5761 if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
5767 GenTreePtr node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
5769 if (node == nullptr)
5771 assert(compDonotInline());
5777 node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, 0, gtNewArgList(node));
5781 // Call the shared non gc static helper, as its the fastest
5782 node = fgGetSharedCCtor(pResolvedToken->hClass);
5788 GenTreePtr Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
5790 GenTreePtr op1 = nullptr;
5799 ival = *((bool*)fldAddr);
5803 ival = *((signed char*)fldAddr);
5807 ival = *((unsigned char*)fldAddr);
5811 ival = *((short*)fldAddr);
5816 ival = *((unsigned short*)fldAddr);
5821 ival = *((int*)fldAddr);
5823 op1 = gtNewIconNode(ival);
5828 lval = *((__int64*)fldAddr);
5829 op1 = gtNewLconNode(lval);
5833 dval = *((float*)fldAddr);
5834 op1 = gtNewDconNode(dval);
5835 #if !FEATURE_X87_DOUBLES
5836 // X87 stack doesn't differentiate between float/double
5837 // so R4 is treated as R8, but everybody else does
5838 op1->gtType = TYP_FLOAT;
5839 #endif // FEATURE_X87_DOUBLES
5843 dval = *((double*)fldAddr);
5844 op1 = gtNewDconNode(dval);
5848 assert(!"Unexpected lclTyp");
5855 GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
5856 CORINFO_ACCESS_FLAGS access,
5857 CORINFO_FIELD_INFO* pFieldInfo,
5862 switch (pFieldInfo->fieldAccessor)
5864 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
5866 assert(!compIsForInlining());
5868 // We first call a special helper to get the statics base pointer
5869 op1 = impParentClassTokenToHandle(pResolvedToken);
5871 // compIsForInlining() is false so we should not neve get NULL here
5872 assert(op1 != nullptr);
5874 var_types type = TYP_BYREF;
5876 switch (pFieldInfo->helper)
5878 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
5881 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
5882 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
5883 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
5886 assert(!"unknown generic statics helper");
5890 op1 = gtNewHelperCallNode(pFieldInfo->helper, type, 0, gtNewArgList(op1));
5892 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5893 op1 = gtNewOperNode(GT_ADD, type, op1,
5894 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5898 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
5900 #ifdef FEATURE_READYTORUN_COMPILER
5901 if (opts.IsReadyToRun())
5903 unsigned callFlags = 0;
5905 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5907 callFlags |= GTF_CALL_HOISTABLE;
5910 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF, callFlags);
5912 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5917 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
5921 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5922 op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
5923 new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
5927 #if COR_JIT_EE_VERSION > 460
5928 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
5930 #ifdef FEATURE_READYTORUN_COMPILER
5931 noway_assert(opts.IsReadyToRun());
5932 CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
5933 assert(kind.needsRuntimeLookup);
5935 GenTreePtr ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
5936 GenTreeArgList* args = gtNewArgList(ctxTree);
5938 unsigned callFlags = 0;
5940 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5942 callFlags |= GTF_CALL_HOISTABLE;
5944 var_types type = TYP_BYREF;
5945 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, callFlags, args);
5947 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5948 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5949 op1 = gtNewOperNode(GT_ADD, type, op1,
5950 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5953 #endif // FEATURE_READYTORUN_COMPILER
5956 #endif // COR_JIT_EE_VERSION > 460
5959 if (!(access & CORINFO_ACCESS_ADDRESS))
5961 // In future, it may be better to just create the right tree here instead of folding it later.
5962 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
5964 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
5966 op1->gtType = TYP_REF; // points at boxed object
5967 FieldSeqNode* firstElemFldSeq =
5968 GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
5970 gtNewOperNode(GT_ADD, TYP_BYREF, op1,
5971 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
5973 if (varTypeIsStruct(lclTyp))
5975 // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT.
5976 op1 = gtNewObjNode(pFieldInfo->structType, op1);
5980 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
5981 op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
5989 void** pFldAddr = nullptr;
5990 void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
5992 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5994 /* Create the data member node */
5995 if (pFldAddr == nullptr)
5997 op1 = gtNewIconHandleNode((size_t)fldAddr, GTF_ICON_STATIC_HDL, fldSeq);
6001 op1 = gtNewIconHandleNode((size_t)pFldAddr, GTF_ICON_STATIC_HDL, fldSeq);
6003 // There are two cases here, either the static is RVA based,
6004 // in which case the type of the FIELD node is not a GC type
6005 // and the handle to the RVA is a TYP_I_IMPL. Or the FIELD node is
6006 // a GC type and the handle to it is a TYP_BYREF in the GC heap
6007 // because handles to statics now go into the large object heap
6009 var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
6010 op1 = gtNewOperNode(GT_IND, handleTyp, op1);
6011 op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
6018 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6020 op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
6022 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6024 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6025 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
6028 if (!(access & CORINFO_ACCESS_ADDRESS))
6030 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6031 op1->gtFlags |= GTF_GLOB_REF;
6037 // In general try to call this before most of the verification work. Most people expect the access
6038 // exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns
6039 // out if you can't access something we also think that you're unverifiable for other reasons.
6040 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6042 if (result != CORINFO_ACCESS_ALLOWED)
6044 impHandleAccessAllowedInternal(result, helperCall);
6048 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6052 case CORINFO_ACCESS_ALLOWED:
6054 case CORINFO_ACCESS_ILLEGAL:
6055 // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
6056 // method is verifiable. Otherwise, delay the exception to runtime.
6057 if (compIsForImportOnly())
6059 info.compCompHnd->ThrowExceptionForHelper(helperCall);
6063 impInsertHelperCall(helperCall);
6066 case CORINFO_ACCESS_RUNTIME_CHECK:
6067 impInsertHelperCall(helperCall);
6072 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
6074 // Construct the argument list
6075 GenTreeArgList* args = nullptr;
6076 assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6077 for (unsigned i = helperInfo->numArgs; i > 0; --i)
6079 const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1];
6080 GenTreePtr currentArg = nullptr;
6081 switch (helperArg.argType)
6083 case CORINFO_HELPER_ARG_TYPE_Field:
6084 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6085 info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6086 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6088 case CORINFO_HELPER_ARG_TYPE_Method:
6089 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6090 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6092 case CORINFO_HELPER_ARG_TYPE_Class:
6093 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6094 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6096 case CORINFO_HELPER_ARG_TYPE_Module:
6097 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6099 case CORINFO_HELPER_ARG_TYPE_Const:
6100 currentArg = gtNewIconNode(helperArg.constant);
6103 NO_WAY("Illegal helper arg type");
6105 args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6109 * Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee.
6110 * Also, consider sticking this in the first basic block.
6112 GenTreePtr callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, GTF_EXCEPT, args);
6113 impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6116 void Compiler::impInsertCalloutForDelegate(CORINFO_METHOD_HANDLE callerMethodHnd,
6117 CORINFO_METHOD_HANDLE calleeMethodHnd,
6118 CORINFO_CLASS_HANDLE delegateTypeHnd)
6120 #ifdef FEATURE_CORECLR
6121 if (!info.compCompHnd->isDelegateCreationAllowed(delegateTypeHnd, calleeMethodHnd))
6123 // Call the JIT_DelegateSecurityCheck helper before calling the actual function.
6124 // This helper throws an exception if the CLR host disallows the call.
6126 GenTreePtr helper = gtNewHelperCallNode(CORINFO_HELP_DELEGATE_SECURITY_CHECK, TYP_VOID, GTF_EXCEPT,
6127 gtNewArgList(gtNewIconEmbClsHndNode(delegateTypeHnd),
6128 gtNewIconEmbMethHndNode(calleeMethodHnd)));
6129 // Append the callout statement
6130 impAppendTree(helper, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6132 #endif // FEATURE_CORECLR
6135 // Checks whether the return types of caller and callee are compatible
6136 // so that callee can be tail called. Note that here we don't check
6137 // compatibility in IL Verifier sense, but on the lines of return type
6138 // sizes are equal and get returned in the same return register.
6139 bool Compiler::impTailCallRetTypeCompatible(var_types callerRetType,
6140 CORINFO_CLASS_HANDLE callerRetTypeClass,
6141 var_types calleeRetType,
6142 CORINFO_CLASS_HANDLE calleeRetTypeClass)
6144 // Note that we can not relax this condition with genActualType() as the
6145 // calling convention dictates that the caller of a function with a small
6146 // typed return value is responsible for normalizing the return val.
6147 if (callerRetType == calleeRetType)
6152 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6154 if (callerRetType == TYP_VOID)
6156 // This needs to be allowed to support the following IL pattern that Jit64 allows:
6161 // Note that the above IL pattern is not valid as per IL verification rules.
6162 // Therefore, only full trust code can take advantage of this pattern.
6166 // These checks return true if the return value type sizes are the same and
6167 // get returned in the same return register i.e. caller doesn't need to normalize
6168 // return value. Some of the tail calls permitted by below checks would have
6169 // been rejected by IL Verifier before we reached here. Therefore, only full
6170 // trust code can make those tail calls.
6171 unsigned callerRetTypeSize = 0;
6172 unsigned calleeRetTypeSize = 0;
6173 bool isCallerRetTypMBEnreg =
6174 VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6175 bool isCalleeRetTypMBEnreg =
6176 VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6178 if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6180 return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6182 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6190 PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6191 PREFIX_TAILCALL_IMPLICIT =
6192 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6193 PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6194 PREFIX_VOLATILE = 0x00000100,
6195 PREFIX_UNALIGNED = 0x00001000,
6196 PREFIX_CONSTRAINED = 0x00010000,
6197 PREFIX_READONLY = 0x00100000
6200 /********************************************************************************
6202 * Returns true if the current opcode and and the opcodes following it correspond
6203 * to a supported tail call IL pattern.
6206 bool Compiler::impIsTailCallILPattern(bool tailPrefixed,
6208 const BYTE* codeAddrOfNextOpcode,
6209 const BYTE* codeEnd,
6211 bool* isCallPopAndRet /* = nullptr */)
6213 // Bail out if the current opcode is not a call.
6214 if (!impOpcodeIsCallOpcode(curOpcode))
6219 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6220 // If shared ret tail opt is not enabled, we will enable
6221 // it for recursive methods.
6225 // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6226 // sequence. Make sure we don't go past the end of the IL however.
6227 codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6230 // Bail out if there is no next opcode after call
6231 if (codeAddrOfNextOpcode >= codeEnd)
6236 // Scan the opcodes to look for the following IL patterns if either
6237 // i) the call is not tail prefixed (i.e. implicit tail call) or
6238 // ii) if tail prefixed, IL verification is not needed for the method.
6240 // Only in the above two cases we can allow the below tail call patterns
6241 // violating ECMA spec.
6257 #ifdef _TARGET_AMD64_
6260 nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6261 codeAddrOfNextOpcode += sizeof(__int8);
6262 } while ((codeAddrOfNextOpcode < codeEnd) && // Haven't reached end of method
6263 (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6264 ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6265 // one pop seen so far.
6267 nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6270 if (isCallPopAndRet)
6272 // Allow call+pop+ret to be tail call optimized if caller ret type is void
6273 *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6276 #ifdef _TARGET_AMD64_
6278 // Tail call IL pattern could be either of the following
6279 // 1) call/callvirt/calli + ret
6280 // 2) call/callvirt/calli + pop + ret in a method returning void.
6281 return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6282 #else //!_TARGET_AMD64_
6283 return (nextOpcode == CEE_RET) && (cntPop == 0);
6287 /*****************************************************************************
6289 * Determine whether the call could be converted to an implicit tail call
6292 bool Compiler::impIsImplicitTailCallCandidate(
6293 OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6296 #if FEATURE_TAILCALL_OPT
6297 if (!opts.compTailCallOpt)
6302 if (opts.compDbgCode || opts.MinOpts())
6307 // must not be tail prefixed
6308 if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6313 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6314 // the block containing call is marked as BBJ_RETURN
6315 // We allow shared ret tail call optimization on recursive calls even under
6316 // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6317 if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6319 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6321 // must be call+ret or call+pop+ret
6322 if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6330 #endif // FEATURE_TAILCALL_OPT
6333 //------------------------------------------------------------------------
6334 // impImportCall: import a call-inspiring opcode
6337 // opcode - opcode that inspires the call
6338 // pResolvedToken - resolved token for the call target
6339 // pConstrainedResolvedToken - resolved constraint token (or nullptr)
6340 // newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr)
6341 // prefixFlags - IL prefix flags for the call
6342 // callInfo - EE supplied info for the call
6343 // rawILOffset - IL offset of the opcode
6346 // Type of the call's return value.
6349 // opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6351 // For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6352 // uninitalized object.
6355 #pragma warning(push)
6356 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6359 var_types Compiler::impImportCall(OPCODE opcode,
6360 CORINFO_RESOLVED_TOKEN* pResolvedToken,
6361 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6362 GenTreePtr newobjThis,
6364 CORINFO_CALL_INFO* callInfo,
6365 IL_OFFSET rawILOffset)
6367 assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6369 IL_OFFSETX ilOffset = impCurILOffset(rawILOffset, true);
6370 var_types callRetTyp = TYP_COUNT;
6371 CORINFO_SIG_INFO* sig = nullptr;
6372 CORINFO_METHOD_HANDLE methHnd = nullptr;
6373 CORINFO_CLASS_HANDLE clsHnd = nullptr;
6374 unsigned clsFlags = 0;
6375 unsigned mflags = 0;
6376 unsigned argFlags = 0;
6377 GenTreePtr call = nullptr;
6378 GenTreeArgList* args = nullptr;
6379 CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM;
6380 CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr;
6381 BOOL exactContextNeedsRuntimeLookup = FALSE;
6382 bool canTailCall = true;
6383 const char* szCanTailCallFailReason = nullptr;
6384 int tailCall = prefixFlags & PREFIX_TAILCALL;
6385 bool readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
6387 // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6388 // do that before tailcalls, but that is probably not the intended
6389 // semantic. So just disallow tailcalls from synchronized methods.
6390 // Also, popping arguments in a varargs function is more work and NYI
6391 // If we have a security object, we have to keep our frame around for callers
6392 // to see any imperative security.
6393 if (info.compFlags & CORINFO_FLG_SYNCH)
6395 canTailCall = false;
6396 szCanTailCallFailReason = "Caller is synchronized";
6398 #if !FEATURE_FIXED_OUT_ARGS
6399 else if (info.compIsVarArgs)
6401 canTailCall = false;
6402 szCanTailCallFailReason = "Caller is varargs";
6404 #endif // FEATURE_FIXED_OUT_ARGS
6405 else if (opts.compNeedSecurityCheck)
6407 canTailCall = false;
6408 szCanTailCallFailReason = "Caller requires a security check.";
6411 // We only need to cast the return value of pinvoke inlined calls that return small types
6413 // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6414 // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6415 // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6416 // the time being that the callee might be compiled by the other JIT and thus the return
6417 // value will need to be widened by us (or not widened at all...)
6419 // ReadyToRun code sticks with default calling convention that does not widen small return types.
6421 bool checkForSmallType = opts.IsJit64Compat() || opts.IsReadyToRun();
6422 bool bIntrinsicImported = false;
6424 CORINFO_SIG_INFO calliSig;
6425 GenTreeArgList* extraArg = nullptr;
6427 /*-------------------------------------------------------------------------
6428 * First create the call node
6431 if (opcode == CEE_CALLI)
6433 /* Get the call site sig */
6434 eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
6436 callRetTyp = JITtype2varType(calliSig.retType);
6438 call = impImportIndirectCall(&calliSig, ilOffset);
6440 // We don't know the target method, so we have to infer the flags, or
6441 // assume the worst-case.
6442 mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
6447 unsigned structSize =
6448 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
6449 printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6450 opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6453 // This should be checked in impImportBlockCode.
6454 assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
6459 // We cannot lazily obtain the signature of a CALLI call because it has no method
6460 // handle that we can use, so we need to save its full call signature here.
6461 assert(call->gtCall.callSig == nullptr);
6462 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6463 *call->gtCall.callSig = calliSig;
6466 else // (opcode != CEE_CALLI)
6468 CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
6470 // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
6471 // supply the instantiation parameters necessary to make direct calls to underlying
6472 // shared generic code, rather than calling through instantiating stubs. If the
6473 // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
6474 // must indeed pass an instantiation parameter.
6476 methHnd = callInfo->hMethod;
6478 sig = &(callInfo->sig);
6479 callRetTyp = JITtype2varType(sig->retType);
6481 mflags = callInfo->methodFlags;
6486 unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
6487 printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6488 opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6491 if (compIsForInlining())
6493 /* Does this call site have security boundary restrictions? */
6495 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
6497 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
6501 /* Does the inlinee need a security check token on the frame */
6503 if (mflags & CORINFO_FLG_SECURITYCHECK)
6505 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6509 /* Does the inlinee use StackCrawlMark */
6511 if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
6513 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
6517 /* For now ignore delegate invoke */
6519 if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6521 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
6525 /* For now ignore varargs */
6526 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6528 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
6532 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
6534 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
6538 if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
6540 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
6545 clsHnd = pResolvedToken->hClass;
6547 clsFlags = callInfo->classFlags;
6550 // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
6552 // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
6553 // These should be in mscorlib.h, and available through a JIT/EE interface call.
6554 const char* modName;
6555 const char* className;
6556 const char* methodName;
6557 if ((className = eeGetClassName(clsHnd)) != nullptr &&
6558 strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
6559 (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
6561 return impImportJitTestLabelMark(sig->numArgs);
6565 // <NICE> Factor this into getCallInfo </NICE>
6566 if ((mflags & CORINFO_FLG_INTRINSIC) && !pConstrainedResolvedToken)
6568 call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, pResolvedToken->token, readonlyCall,
6569 (canTailCall && (tailCall != 0)), &intrinsicID);
6571 if (call != nullptr)
6573 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
6574 (clsFlags & CORINFO_FLG_FINAL));
6576 #ifdef FEATURE_READYTORUN_COMPILER
6577 if (call->OperGet() == GT_INTRINSIC)
6579 if (opts.IsReadyToRun())
6581 noway_assert(callInfo->kind == CORINFO_CALL);
6582 call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
6586 call->gtIntrinsic.gtEntryPoint.addr = nullptr;
6591 bIntrinsicImported = true;
6599 call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
6600 if (call != nullptr)
6602 bIntrinsicImported = true;
6606 #endif // FEATURE_SIMD
6608 if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
6610 NO_WAY("Virtual call to a function added via EnC is not supported");
6614 if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
6615 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6616 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
6618 BADCODE("Bad calling convention");
6621 //-------------------------------------------------------------------------
6622 // Construct the call node
6624 // Work out what sort of call we're making.
6625 // Dispense with virtual calls implemented via LDVIRTFTN immediately.
6627 constraintCallThisTransform = callInfo->thisTransform;
6629 exactContextHnd = callInfo->contextHandle;
6630 exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup;
6632 // Recursive call is treaded as a loop to the begining of the method.
6633 if (methHnd == info.compMethodHnd)
6638 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
6639 fgFirstBB->bbNum, compCurBB->bbNum);
6642 fgMarkBackwardJump(fgFirstBB, compCurBB);
6645 switch (callInfo->kind)
6648 case CORINFO_VIRTUALCALL_STUB:
6650 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6651 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6652 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
6655 if (compIsForInlining())
6657 // Don't import runtime lookups when inlining
6658 // Inlining has to be aborted in such a case
6659 /* XXX Fri 3/20/2009
6660 * By the way, this would never succeed. If the handle lookup is into the generic
6661 * dictionary for a candidate, you'll generate different dictionary offsets and the
6662 * inlined code will crash.
6664 * To anyone code reviewing this, when could this ever succeed in the future? It'll
6665 * always have a handle lookup. These lookups are safe intra-module, but we're just
6668 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
6672 GenTreePtr stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
6673 assert(!compDonotInline());
6675 // This is the rough code to set up an indirect stub call
6676 assert(stubAddr != nullptr);
6678 // The stubAddr may be a
6679 // complex expression. As it is evaluated after the args,
6680 // it may cause registered args to be spilled. Simply spill it.
6682 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
6683 impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
6684 stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6686 // Create the actual call node
6688 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6689 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6691 call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
6693 call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
6694 call->gtFlags |= GTF_CALL_VIRT_STUB;
6697 // No tailcalls allowed for these yet...
6698 canTailCall = false;
6699 szCanTailCallFailReason = "VirtualCall with runtime lookup";
6704 // ok, the stub is available at compile type.
6706 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6707 call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
6708 call->gtFlags |= GTF_CALL_VIRT_STUB;
6709 assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
6710 if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
6712 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
6716 #ifdef FEATURE_READYTORUN_COMPILER
6717 if (opts.IsReadyToRun())
6719 // Null check is sometimes needed for ready to run to handle
6720 // non-virtual <-> virtual changes between versions
6721 if (callInfo->nullInstanceCheck)
6723 call->gtFlags |= GTF_CALL_NULLCHECK;
6731 case CORINFO_VIRTUALCALL_VTABLE:
6733 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6734 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6735 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6736 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
6740 case CORINFO_VIRTUALCALL_LDVIRTFTN:
6742 if (compIsForInlining())
6744 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
6748 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6749 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6750 // OK, We've been told to call via LDVIRTFTN, so just
6751 // take the call now....
6753 args = impPopList(sig->numArgs, &argFlags, sig);
6755 GenTreePtr thisPtr = impPopStack().val;
6756 thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
6757 if (compDonotInline())
6762 // Clone the (possibly transformed) "this" pointer
6763 GenTreePtr thisPtrCopy;
6764 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
6765 nullptr DEBUGARG("LDVIRTFTN this pointer"));
6767 GenTreePtr fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
6768 if (compDonotInline())
6773 thisPtr = nullptr; // can't reuse it
6775 // Now make an indirect call through the function pointer
6777 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
6778 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6779 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6781 // Create the actual call node
6783 call = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
6784 call->gtCall.gtCallObjp = thisPtrCopy;
6785 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6787 #ifdef FEATURE_READYTORUN_COMPILER
6788 if (opts.IsReadyToRun())
6790 // Null check is needed for ready to run to handle
6791 // non-virtual <-> virtual changes between versions
6792 call->gtFlags |= GTF_CALL_NULLCHECK;
6796 // Sine we are jumping over some code, check that its OK to skip that code
6797 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6798 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6804 // This is for a non-virtual, non-interface etc. call
6805 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6807 // We remove the nullcheck for the GetType call instrinsic.
6808 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
6810 if (callInfo->nullInstanceCheck &&
6811 !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
6813 call->gtFlags |= GTF_CALL_NULLCHECK;
6816 #ifdef FEATURE_READYTORUN_COMPILER
6817 if (opts.IsReadyToRun())
6819 call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
6825 case CORINFO_CALL_CODE_POINTER:
6827 // The EE has asked us to call by computing a code pointer and then doing an
6828 // indirect call. This is because a runtime lookup is required to get the code entry point.
6830 // These calls always follow a uniform calling convention, i.e. no extra hidden params
6831 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
6833 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
6834 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6837 impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
6839 if (compDonotInline())
6844 // Now make an indirect call through the function pointer
6846 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
6847 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6848 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6850 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6851 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6852 if (callInfo->nullInstanceCheck)
6854 call->gtFlags |= GTF_CALL_NULLCHECK;
6861 assert(!"unknown call kind");
6865 //-------------------------------------------------------------------------
6868 PREFIX_ASSUME(call != nullptr);
6870 if (mflags & CORINFO_FLG_NOGCCHECK)
6872 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
6875 // Mark call if it's one of the ones we will maybe treat as an intrinsic
6876 if (intrinsicID == CORINFO_INTRINSIC_Object_GetType || intrinsicID == CORINFO_INTRINSIC_TypeEQ ||
6877 intrinsicID == CORINFO_INTRINSIC_TypeNEQ || intrinsicID == CORINFO_INTRINSIC_GetCurrentManagedThread ||
6878 intrinsicID == CORINFO_INTRINSIC_GetManagedThreadId)
6880 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
6884 assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
6886 /* Some sanity checks */
6888 // CALL_VIRT and NEWOBJ must have a THIS pointer
6889 assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
6890 // static bit and hasThis are negations of one another
6891 assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
6892 assert(call != nullptr);
6894 /*-------------------------------------------------------------------------
6895 * Check special-cases etc
6898 /* Special case - Check if it is a call to Delegate.Invoke(). */
6900 if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6902 assert(!compIsForInlining());
6903 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6904 assert(mflags & CORINFO_FLG_FINAL);
6906 /* Set the delegate flag */
6907 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
6909 if (callInfo->secureDelegateInvoke)
6911 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
6914 if (opcode == CEE_CALLVIRT)
6916 assert(mflags & CORINFO_FLG_FINAL);
6918 /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
6919 assert(call->gtFlags & GTF_CALL_NULLCHECK);
6920 call->gtFlags &= ~GTF_CALL_NULLCHECK;
6924 CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
6925 actualMethodRetTypeSigClass = sig->retTypeSigClass;
6926 if (varTypeIsStruct(callRetTyp))
6928 callRetTyp = impNormStructType(actualMethodRetTypeSigClass);
6929 call->gtType = callRetTyp;
6933 /* Check for varargs */
6934 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6935 (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6937 BADCODE("Varargs not supported.");
6939 #endif // !FEATURE_VARARG
6941 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6942 (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6944 assert(!compIsForInlining());
6946 /* Set the right flags */
6948 call->gtFlags |= GTF_CALL_POP_ARGS;
6949 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
6951 /* Can't allow tailcall for varargs as it is caller-pop. The caller
6952 will be expecting to pop a certain number of arguments, but if we
6953 tailcall to a function with a different number of arguments, we
6954 are hosed. There are ways around this (caller remembers esp value,
6955 varargs is not caller-pop, etc), but not worth it. */
6956 CLANG_FORMAT_COMMENT_ANCHOR;
6961 canTailCall = false;
6962 szCanTailCallFailReason = "Callee is varargs";
6966 /* Get the total number of arguments - this is already correct
6967 * for CALLI - for methods we have to get it from the call site */
6969 if (opcode != CEE_CALLI)
6972 unsigned numArgsDef = sig->numArgs;
6974 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
6977 // We cannot lazily obtain the signature of a vararg call because using its method
6978 // handle will give us only the declared argument list, not the full argument list.
6979 assert(call->gtCall.callSig == nullptr);
6980 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6981 *call->gtCall.callSig = *sig;
6984 // For vararg calls we must be sure to load the return type of the
6985 // method actually being called, as well as the return types of the
6986 // specified in the vararg signature. With type equivalency, these types
6987 // may not be the same.
6988 if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
6990 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
6991 sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
6992 sig->retType != CORINFO_TYPE_VAR)
6994 // Make sure that all valuetypes (including enums) that we push are loaded.
6995 // This is to guarantee that if a GC is triggerred from the prestub of this methods,
6996 // all valuetypes in the method signature are already loaded.
6997 // We need to be able to find the size of the valuetypes, but we cannot
6998 // do a class-load from within GC.
6999 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
7003 assert(numArgsDef <= sig->numArgs);
7006 /* We will have "cookie" as the last argument but we cannot push
7007 * it on the operand stack because we may overflow, so we append it
7008 * to the arg list next after we pop them */
7011 if (mflags & CORINFO_FLG_SECURITYCHECK)
7013 assert(!compIsForInlining());
7015 // Need security prolog/epilog callouts when there is
7016 // imperative security in the method. This is to give security a
7017 // chance to do any setup in the prolog and cleanup in the epilog if needed.
7019 if (compIsForInlining())
7021 // Cannot handle this if the method being imported is an inlinee by itself.
7022 // Because inlinee method does not have its own frame.
7024 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7029 tiSecurityCalloutNeeded = true;
7031 // If the current method calls a method which needs a security check,
7032 // (i.e. the method being compiled has imperative security)
7033 // we need to reserve a slot for the security object in
7034 // the current method's stack frame
7035 opts.compNeedSecurityCheck = true;
7039 //--------------------------- Inline NDirect ------------------------------
7041 // For inline cases we technically should look at both the current
7042 // block and the call site block (or just the latter if we've
7043 // fused the EH trees). However the block-related checks pertain to
7044 // EH and we currently won't inline a method with EH. So for
7045 // inlinees, just checking the call site block is sufficient.
7047 // New lexical block here to avoid compilation errors because of GOTOs.
7048 BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7049 impCheckForPInvokeCall(call, methHnd, sig, mflags, block);
7052 if (call->gtFlags & GTF_CALL_UNMANAGED)
7054 // We set up the unmanaged call by linking the frame, disabling GC, etc
7055 // This needs to be cleaned up on return
7058 canTailCall = false;
7059 szCanTailCallFailReason = "Callee is native";
7062 checkForSmallType = true;
7064 impPopArgsForUnmanagedCall(call, sig);
7068 else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
7069 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
7070 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
7071 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
7073 if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
7075 // Normally this only happens with inlining.
7076 // However, a generic method (or type) being NGENd into another module
7077 // can run into this issue as well. There's not an easy fall-back for NGEN
7078 // so instead we fallback to JIT.
7079 if (compIsForInlining())
7081 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7085 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7091 GenTreePtr cookie = eeGetPInvokeCookie(sig);
7093 // This cookie is required to be either a simple GT_CNS_INT or
7094 // an indirection of a GT_CNS_INT
7096 GenTreePtr cookieConst = cookie;
7097 if (cookie->gtOper == GT_IND)
7099 cookieConst = cookie->gtOp.gtOp1;
7101 assert(cookieConst->gtOper == GT_CNS_INT);
7103 // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7104 // we won't allow this tree to participate in any CSE logic
7106 cookie->gtFlags |= GTF_DONT_CSE;
7107 cookieConst->gtFlags |= GTF_DONT_CSE;
7109 call->gtCall.gtCallCookie = cookie;
7113 canTailCall = false;
7114 szCanTailCallFailReason = "PInvoke calli";
7118 /*-------------------------------------------------------------------------
7119 * Create the argument list
7122 //-------------------------------------------------------------------------
7123 // Special case - for varargs we have an implicit last argument
7125 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7127 assert(!compIsForInlining());
7129 void *varCookie, *pVarCookie;
7130 if (!info.compCompHnd->canGetVarArgsHandle(sig))
7132 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7136 varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7137 assert((!varCookie) != (!pVarCookie));
7138 GenTreePtr cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL);
7140 assert(extraArg == nullptr);
7141 extraArg = gtNewArgList(cookie);
7144 //-------------------------------------------------------------------------
7145 // Extra arg for shared generic code and array methods
7147 // Extra argument containing instantiation information is passed in the
7148 // following circumstances:
7149 // (a) To the "Address" method on array classes; the extra parameter is
7150 // the array's type handle (a TypeDesc)
7151 // (b) To shared-code instance methods in generic structs; the extra parameter
7152 // is the struct's type handle (a vtable ptr)
7153 // (c) To shared-code per-instantiation non-generic static methods in generic
7154 // classes and structs; the extra parameter is the type handle
7155 // (d) To shared-code generic methods; the extra parameter is an
7156 // exact-instantiation MethodDesc
7158 // We also set the exact type context associated with the call so we can
7159 // inline the call correctly later on.
7161 if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7163 assert(call->gtCall.gtCallType == CT_USER_FUNC);
7164 if (clsHnd == nullptr)
7166 NO_WAY("CALLI on parameterized type");
7169 assert(opcode != CEE_CALLI);
7171 GenTreePtr instParam;
7174 // Instantiated generic method
7175 if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7177 CORINFO_METHOD_HANDLE exactMethodHandle =
7178 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7180 if (!exactContextNeedsRuntimeLookup)
7182 #ifdef FEATURE_READYTORUN_COMPILER
7183 if (opts.IsReadyToRun())
7186 impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7187 if (instParam == nullptr)
7195 instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7196 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7201 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7202 if (instParam == nullptr)
7209 // otherwise must be an instance method in a generic struct,
7210 // a static method in a generic type, or a runtime-generated array method
7213 assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7214 CORINFO_CLASS_HANDLE exactClassHandle =
7215 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7217 if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7219 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7223 if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7225 // We indicate "readonly" to the Address operation by using a null
7227 instParam = gtNewIconNode(0, TYP_REF);
7230 if (!exactContextNeedsRuntimeLookup)
7232 #ifdef FEATURE_READYTORUN_COMPILER
7233 if (opts.IsReadyToRun())
7236 impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7237 if (instParam == nullptr)
7245 instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7246 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7251 instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7252 if (instParam == nullptr)
7259 assert(extraArg == nullptr);
7260 extraArg = gtNewArgList(instParam);
7263 // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7264 // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7265 // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7266 // exactContextHnd is not currently required when inlining shared generic code into shared
7267 // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7268 // (e.g. anything marked needsRuntimeLookup)
7269 if (exactContextNeedsRuntimeLookup)
7271 exactContextHnd = nullptr;
7274 //-------------------------------------------------------------------------
7275 // The main group of arguments
7277 args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, &argFlags, sig, extraArg);
7281 call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7284 //-------------------------------------------------------------------------
7285 // The "this" pointer
7287 if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7291 if (opcode == CEE_NEWOBJ)
7297 obj = impPopStack().val;
7298 obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7299 if (compDonotInline())
7305 /* Is this a virtual or interface call? */
7307 if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
7309 /* only true object pointers can be virtual */
7311 assert(obj->gtType == TYP_REF);
7317 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7321 /* Store the "this" value in the call */
7323 call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7324 call->gtCall.gtCallObjp = obj;
7327 //-------------------------------------------------------------------------
7328 // The "this" pointer for "newobj"
7330 if (opcode == CEE_NEWOBJ)
7332 if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7334 assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7335 // This is a 'new' of a variable sized object, wher
7336 // the constructor is to return the object. In this case
7337 // the constructor claims to return VOID but we know it
7338 // actually returns the new object
7339 assert(callRetTyp == TYP_VOID);
7340 callRetTyp = TYP_REF;
7341 call->gtType = TYP_REF;
7342 impSpillSpecialSideEff();
7344 impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7348 if (clsFlags & CORINFO_FLG_DELEGATE)
7350 // New inliner morph it in impImportCall.
7351 // This will allow us to inline the call to the delegate constructor.
7352 call = fgOptimizeDelegateConstructor(call, &exactContextHnd);
7355 if (!bIntrinsicImported)
7358 #if defined(DEBUG) || defined(INLINE_DATA)
7360 // Keep track of the raw IL offset of the call
7361 call->gtCall.gtRawILOffset = rawILOffset;
7363 #endif // defined(DEBUG) || defined(INLINE_DATA)
7365 // Is it an inline candidate?
7366 impMarkInlineCandidate(call, exactContextHnd, callInfo);
7369 // append the call node.
7370 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7372 // Now push the value of the 'new onto the stack
7374 // This is a 'new' of a non-variable sized object.
7375 // Append the new node (op1) to the statement list,
7376 // and then push the local holding the value of this
7377 // new instruction on the stack.
7379 if (clsFlags & CORINFO_FLG_VALUECLASS)
7381 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
7383 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
7384 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
7388 if (newobjThis->gtOper == GT_COMMA)
7390 // In coreclr the callout can be inserted even if verification is disabled
7391 // so we cannot rely on tiVerificationNeeded alone
7393 // We must have inserted the callout. Get the real newobj.
7394 newobjThis = newobjThis->gtOp.gtOp2;
7397 assert(newobjThis->gtOper == GT_LCL_VAR);
7398 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
7408 // This check cannot be performed for implicit tail calls for the reason
7409 // that impIsImplicitTailCallCandidate() is not checking whether return
7410 // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
7411 // As a result it is possible that in the following case, we find that
7412 // the type stack is non-empty if Callee() is considered for implicit
7414 // int Caller(..) { .... void Callee(); ret val; ... }
7416 // Note that we cannot check return type compatibility before ImpImportCall()
7417 // as we don't have required info or need to duplicate some of the logic of
7420 // For implicit tail calls, we perform this check after return types are
7421 // known to be compatible.
7422 if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
7424 BADCODE("Stack should be empty after tailcall");
7427 // Note that we can not relax this condition with genActualType() as
7428 // the calling convention dictates that the caller of a function with
7429 // a small-typed return value is responsible for normalizing the return val
7432 !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
7433 callInfo->sig.retTypeClass))
7435 canTailCall = false;
7436 szCanTailCallFailReason = "Return types are not tail call compatible";
7439 // Stack empty check for implicit tail calls.
7440 if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
7442 #ifdef _TARGET_AMD64_
7443 // JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException
7444 // in JIT64, not an InvalidProgramException.
7445 Verify(false, "Stack should be empty after tailcall");
7446 #else // _TARGET_64BIT_
7447 BADCODE("Stack should be empty after tailcall");
7448 #endif //!_TARGET_64BIT_
7451 // assert(compCurBB is not a catch, finally or filter block);
7452 // assert(compCurBB is not a try block protected by a finally block);
7454 // Check for permission to tailcall
7455 bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
7457 assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
7461 // True virtual or indirect calls, shouldn't pass in a callee handle.
7462 CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->gtCall.gtCallType != CT_USER_FUNC) ||
7463 ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT))
7466 GenTreePtr thisArg = call->gtCall.gtCallObjp;
7468 if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
7471 if (explicitTailCall)
7473 // In case of explicit tail calls, mark it so that it is not considered
7475 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
7479 printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
7487 #if FEATURE_TAILCALL_OPT
7488 // Must be an implicit tail call.
7489 assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
7491 // It is possible that a call node is both an inline candidate and marked
7492 // for opportunistic tail calling. In-lining happens before morhphing of
7493 // trees. If in-lining of an in-line candidate gets aborted for whatever
7494 // reason, it will survive to the morphing stage at which point it will be
7495 // transformed into a tail call after performing additional checks.
7497 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
7501 printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
7507 #else //! FEATURE_TAILCALL_OPT
7508 NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
7510 #endif // FEATURE_TAILCALL_OPT
7513 // we can't report success just yet...
7517 canTailCall = false;
7518 // canTailCall reported its reasons already
7522 printf("\ninfo.compCompHnd->canTailCall returned false for call ");
7531 // If this assert fires it means that canTailCall was set to false without setting a reason!
7532 assert(szCanTailCallFailReason != nullptr);
7537 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
7539 printf(": %s\n", szCanTailCallFailReason);
7542 info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
7543 szCanTailCallFailReason);
7547 // Note: we assume that small return types are already normalized by the managed callee
7548 // or by the pinvoke stub for calls to unmanaged code.
7552 if (!bIntrinsicImported)
7555 // Things needed to be checked when bIntrinsicImported is false.
7558 assert(call->gtOper == GT_CALL);
7559 assert(sig != nullptr);
7561 // Tail calls require us to save the call site's sig info so we can obtain an argument
7562 // copying thunk from the EE later on.
7563 if (call->gtCall.callSig == nullptr)
7565 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7566 *call->gtCall.callSig = *sig;
7569 if (compIsForInlining() && opcode == CEE_CALLVIRT)
7571 GenTreePtr callObj = call->gtCall.gtCallObjp;
7572 assert(callObj != nullptr);
7574 unsigned callKind = call->gtFlags & GTF_CALL_VIRT_KIND_MASK;
7576 if (((callKind != GTF_CALL_NONVIRT) || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
7577 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
7578 impInlineInfo->inlArgInfo))
7580 impInlineInfo->thisDereferencedFirst = true;
7584 #if defined(DEBUG) || defined(INLINE_DATA)
7586 // Keep track of the raw IL offset of the call
7587 call->gtCall.gtRawILOffset = rawILOffset;
7589 #endif // defined(DEBUG) || defined(INLINE_DATA)
7591 // Is it an inline candidate?
7592 impMarkInlineCandidate(call, exactContextHnd, callInfo);
7595 // Push or append the result of the call
7596 if (callRetTyp == TYP_VOID)
7598 if (opcode == CEE_NEWOBJ)
7600 // we actually did push something, so don't spill the thing we just pushed.
7601 assert(verCurrentState.esStackDepth > 0);
7602 impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
7606 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7611 impSpillSpecialSideEff();
7613 if (clsFlags & CORINFO_FLG_ARRAY)
7615 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
7618 // Find the return type used for verification by interpreting the method signature.
7619 // NB: we are clobbering the already established sig.
7620 if (tiVerificationNeeded)
7622 // Actually, we never get the sig for the original method.
7623 sig = &(callInfo->verSig);
7626 typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
7627 tiRetVal.NormaliseForStack();
7629 // The CEE_READONLY prefix modifies the verification semantics of an Address
7630 // operation on an array type.
7631 if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
7633 tiRetVal.SetIsReadonlyByRef();
7636 if (tiVerificationNeeded)
7638 // We assume all calls return permanent home byrefs. If they
7639 // didn't they wouldn't be verifiable. This is also covering
7640 // the Address() helper for multidimensional arrays.
7641 if (tiRetVal.IsByRef())
7643 tiRetVal.SetIsPermanentHomeByRef();
7647 if (call->gtOper == GT_CALL)
7649 // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
7650 if (varTypeIsStruct(callRetTyp))
7652 call = impFixupCallStructReturn(call, sig->retTypeClass);
7655 if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
7657 assert(opts.OptEnabled(CLFLG_INLINING));
7659 // Make the call its own tree (spill the stack if needed).
7660 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7662 // TODO: Still using the widened type.
7663 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
7667 // For non-candidates we must also spill, since we
7668 // might have locals live on the eval stack that this
7670 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
7674 if (!bIntrinsicImported)
7676 //-------------------------------------------------------------------------
7678 /* If the call is of a small type and the callee is managed, the callee will normalize the result
7680 However, we need to normalize small type values returned by unmanaged
7681 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
7682 if we use the shorter inlined pinvoke stub. */
7684 if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
7686 call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
7690 impPushOnStack(call, tiRetVal);
7693 // VSD functions get a new call target each time we getCallInfo, so clear the cache.
7694 // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
7695 // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
7696 // callInfoCache.uncacheCallInfo();
7701 #pragma warning(pop)
7704 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
7706 CorInfoType corType = methInfo->args.retType;
7708 if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
7710 // We have some kind of STRUCT being returned
7712 structPassingKind howToReturnStruct = SPK_Unknown;
7714 var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
7716 if (howToReturnStruct == SPK_ByReference)
7727 var_types Compiler::impImportJitTestLabelMark(int numArgs)
7729 TestLabelAndNum tlAndN;
7733 StackEntry se = impPopStack();
7734 assert(se.seTypeInfo.GetType() == TI_INT);
7735 GenTreePtr val = se.val;
7736 assert(val->IsCnsIntOrI());
7737 tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7739 else if (numArgs == 3)
7741 StackEntry se = impPopStack();
7742 assert(se.seTypeInfo.GetType() == TI_INT);
7743 GenTreePtr val = se.val;
7744 assert(val->IsCnsIntOrI());
7745 tlAndN.m_num = val->AsIntConCommon()->IconValue();
7747 assert(se.seTypeInfo.GetType() == TI_INT);
7749 assert(val->IsCnsIntOrI());
7750 tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7757 StackEntry expSe = impPopStack();
7758 GenTreePtr node = expSe.val;
7760 // There are a small number of special cases, where we actually put the annotation on a subnode.
7761 if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
7763 // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
7764 // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
7765 // offset within the the static field block whose address is returned by the helper call.
7766 // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
7767 GenTreePtr helperCall = nullptr;
7768 assert(node->OperGet() == GT_IND);
7769 tlAndN.m_num -= 100;
7770 GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
7771 GetNodeTestData()->Remove(node);
7775 GetNodeTestData()->Set(node, tlAndN);
7778 impPushOnStack(node, expSe.seTypeInfo);
7779 return node->TypeGet();
7783 //-----------------------------------------------------------------------------------
7784 // impFixupCallStructReturn: For a call node that returns a struct type either
7785 // adjust the return type to an enregisterable type, or set the flag to indicate
7786 // struct return via retbuf arg.
7789 // call - GT_CALL GenTree node
7790 // retClsHnd - Class handle of return type of the call
7793 // Returns new GenTree node after fixing struct return of call node
7795 GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call, CORINFO_CLASS_HANDLE retClsHnd)
7797 assert(call->gtOper == GT_CALL);
7799 if (!varTypeIsStruct(call))
7804 call->gtCall.gtRetClsHnd = retClsHnd;
7806 GenTreeCall* callNode = call->AsCall();
7808 #if FEATURE_MULTIREG_RET
7809 // Initialize Return type descriptor of call node
7810 ReturnTypeDesc* retTypeDesc = callNode->GetReturnTypeDesc();
7811 retTypeDesc->InitializeStructReturnType(this, retClsHnd);
7812 #endif // FEATURE_MULTIREG_RET
7814 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7816 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
7817 assert(!callNode->IsVarargs() && "varargs not allowed for System V OSs.");
7819 // The return type will remain as the incoming struct type unless normalized to a
7820 // single eightbyte return type below.
7821 callNode->gtReturnType = call->gtType;
7823 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7824 if (retRegCount != 0)
7826 if (retRegCount == 1)
7828 // struct returned in a single register
7829 callNode->gtReturnType = retTypeDesc->GetReturnRegType(0);
7833 // must be a struct returned in two registers
7834 assert(retRegCount == 2);
7836 if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7838 // Force a call returning multi-reg struct to be always of the IR form
7841 // No need to assign a multi-reg struct to a local var if:
7842 // - It is a tail call or
7843 // - The call is marked for in-lining later
7844 return impAssignMultiRegTypeToVar(call, retClsHnd);
7850 // struct not returned in registers i.e returned via hiddden retbuf arg.
7851 callNode->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7854 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7856 #if FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7857 // There is no fixup necessary if the return type is a HFA struct.
7858 // HFA structs are returned in registers for ARM32 and ARM64
7860 if (!call->gtCall.IsVarargs() && IsHfa(retClsHnd))
7862 if (call->gtCall.CanTailCall())
7864 if (info.compIsVarArgs)
7866 // We cannot tail call because control needs to return to fixup the calling
7867 // convention for result return.
7868 call->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7872 // If we can tail call returning HFA, then don't assign it to
7873 // a variable back and forth.
7878 if (call->gtFlags & GTF_CALL_INLINE_CANDIDATE)
7883 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7884 if (retRegCount >= 2)
7886 return impAssignMultiRegTypeToVar(call, retClsHnd);
7889 #endif // _TARGET_ARM_
7891 // Check for TYP_STRUCT type that wraps a primitive type
7892 // Such structs are returned using a single register
7893 // and we change the return type on those calls here.
7895 structPassingKind howToReturnStruct;
7896 var_types returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
7898 if (howToReturnStruct == SPK_ByReference)
7900 assert(returnType == TYP_UNKNOWN);
7901 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7905 assert(returnType != TYP_UNKNOWN);
7906 call->gtCall.gtReturnType = returnType;
7908 // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
7909 if ((returnType == TYP_LONG) && (compLongUsed == false))
7911 compLongUsed = true;
7913 else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
7915 compFloatingPointUsed = true;
7918 #if FEATURE_MULTIREG_RET
7919 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7920 assert(retRegCount != 0);
7922 if (retRegCount >= 2)
7924 if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7926 // Force a call returning multi-reg struct to be always of the IR form
7929 // No need to assign a multi-reg struct to a local var if:
7930 // - It is a tail call or
7931 // - The call is marked for in-lining later
7932 return impAssignMultiRegTypeToVar(call, retClsHnd);
7935 #endif // FEATURE_MULTIREG_RET
7938 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7943 /*****************************************************************************
7944 For struct return values, re-type the operand in the case where the ABI
7945 does not use a struct return buffer
7946 Note that this method is only call for !_TARGET_X86_
7949 GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd)
7951 assert(varTypeIsStruct(info.compRetType));
7952 assert(info.compRetBuffArg == BAD_VAR_NUM);
7954 #if defined(_TARGET_XARCH_)
7956 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7957 // No VarArgs for CoreCLR on x64 Unix
7958 assert(!info.compIsVarArgs);
7960 // Is method returning a multi-reg struct?
7961 if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
7963 // In case of multi-reg struct return, we force IR to be one of the following:
7964 // GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a
7965 // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
7967 if (op->gtOper == GT_LCL_VAR)
7969 // Make sure that this struct stays in memory and doesn't get promoted.
7970 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
7971 lvaTable[lclNum].lvIsMultiRegRet = true;
7973 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
7974 op->gtFlags |= GTF_DONT_CSE;
7979 if (op->gtOper == GT_CALL)
7984 return impAssignMultiRegTypeToVar(op, retClsHnd);
7986 #else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7987 assert(info.compRetNativeType != TYP_STRUCT);
7988 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7990 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7992 if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
7994 if (op->gtOper == GT_LCL_VAR)
7996 // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
7997 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
7998 // Make sure this struct type stays as struct so that we can return it as an HFA
7999 lvaTable[lclNum].lvIsMultiRegRet = true;
8001 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8002 op->gtFlags |= GTF_DONT_CSE;
8007 if (op->gtOper == GT_CALL)
8009 if (op->gtCall.IsVarargs())
8011 // We cannot tail call because control needs to return to fixup the calling
8012 // convention for result return.
8013 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8014 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8021 return impAssignMultiRegTypeToVar(op, retClsHnd);
8024 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
8026 // Is method returning a multi-reg struct?
8027 if (IsMultiRegReturnedType(retClsHnd))
8029 if (op->gtOper == GT_LCL_VAR)
8031 // This LCL_VAR stays as a TYP_STRUCT
8032 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8034 // Make sure this struct type is not struct promoted
8035 lvaTable[lclNum].lvIsMultiRegRet = true;
8037 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8038 op->gtFlags |= GTF_DONT_CSE;
8043 if (op->gtOper == GT_CALL)
8045 if (op->gtCall.IsVarargs())
8047 // We cannot tail call because control needs to return to fixup the calling
8048 // convention for result return.
8049 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8050 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8057 return impAssignMultiRegTypeToVar(op, retClsHnd);
8060 #endif // FEATURE_MULTIREG_RET && FEATURE_HFA
8063 // adjust the type away from struct to integral
8064 // and no normalizing
8065 if (op->gtOper == GT_LCL_VAR)
8067 op->ChangeOper(GT_LCL_FLD);
8069 else if (op->gtOper == GT_OBJ)
8071 GenTreePtr op1 = op->AsObj()->Addr();
8073 // We will fold away OBJ/ADDR
8074 // except for OBJ/ADDR/INDEX
8075 // as the array type influences the array element's offset
8076 // Later in this method we change op->gtType to info.compRetNativeType
8077 // This is not correct when op is a GT_INDEX as the starting offset
8078 // for the array elements 'elemOffs' is different for an array of
8079 // TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8080 // Also refer to the GTF_INX_REFARR_LAYOUT flag
8082 if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8084 // Change '*(&X)' to 'X' and see if we can do better
8085 op = op1->gtOp.gtOp1;
8086 goto REDO_RETURN_NODE;
8088 op->gtObj.gtClass = NO_CLASS_HANDLE;
8089 op->ChangeOperUnchecked(GT_IND);
8090 op->gtFlags |= GTF_IND_TGTANYWHERE;
8092 else if (op->gtOper == GT_CALL)
8094 if (op->AsCall()->TreatAsHasRetBufArg(this))
8096 // This must be one of those 'special' helpers that don't
8097 // really have a return buffer, but instead use it as a way
8098 // to keep the trees cleaner with fewer address-taken temps.
8100 // Well now we have to materialize the the return buffer as
8101 // an address-taken temp. Then we can return the temp.
8103 // NOTE: this code assumes that since the call directly
8104 // feeds the return, then the call must be returning the
8105 // same structure/class/type.
8107 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8109 // No need to spill anything as we're about to return.
8110 impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8112 // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8113 // jump directly to a GT_LCL_FLD.
8114 op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8115 op->ChangeOper(GT_LCL_FLD);
8119 assert(info.compRetNativeType == op->gtCall.gtReturnType);
8121 // Don't change the gtType of the node just yet, it will get changed later.
8125 else if (op->gtOper == GT_COMMA)
8127 op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8130 op->gtType = info.compRetNativeType;
8135 /*****************************************************************************
8136 CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8137 finally-protected try. We find the finally blocks protecting the current
8138 offset (in order) by walking over the complete exception table and
8139 finding enclosing clauses. This assumes that the table is sorted.
8140 This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8142 If we are leaving a catch handler, we need to attach the
8143 CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8145 After this function, the BBJ_LEAVE block has been converted to a different type.
8148 #if !FEATURE_EH_FUNCLETS
8150 void Compiler::impImportLeave(BasicBlock* block)
8155 printf("\nBefore import CEE_LEAVE:\n");
8156 fgDispBasicBlocks();
8161 bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8162 unsigned blkAddr = block->bbCodeOffs;
8163 BasicBlock* leaveTarget = block->bbJumpDest;
8164 unsigned jmpAddr = leaveTarget->bbCodeOffs;
8166 // LEAVE clears the stack, spill side effects, and set stack to 0
8168 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8169 verCurrentState.esStackDepth = 0;
8171 assert(block->bbJumpKind == BBJ_LEAVE);
8172 assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8174 BasicBlock* step = DUMMY_INIT(NULL);
8175 unsigned encFinallies = 0; // Number of enclosing finallies.
8176 GenTreePtr endCatches = NULL;
8177 GenTreePtr endLFin = NULL; // The statement tree to indicate the end of locally-invoked finally.
8182 for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8184 // Grab the handler offsets
8186 IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8187 IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8188 IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8189 IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8191 /* Is this a catch-handler we are CEE_LEAVEing out of?
8192 * If so, we need to call CORINFO_HELP_ENDCATCH.
8195 if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8197 // Can't CEE_LEAVE out of a finally/fault handler
8198 if (HBtab->HasFinallyOrFaultHandler())
8199 BADCODE("leave out of fault/finally block");
8201 // Create the call to CORINFO_HELP_ENDCATCH
8202 GenTreePtr endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8204 // Make a list of all the currently pending endCatches
8206 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8208 endCatches = endCatch;
8213 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8214 "CORINFO_HELP_ENDCATCH\n",
8215 block->bbNum, XTnum);
8219 else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8220 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8222 /* This is a finally-protected try we are jumping out of */
8224 /* If there are any pending endCatches, and we have already
8225 jumped out of a finally-protected try, then the endCatches
8226 have to be put in a block in an outer try for async
8227 exceptions to work correctly.
8228 Else, just use append to the original block */
8230 BasicBlock* callBlock;
8232 assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8234 if (encFinallies == 0)
8236 assert(step == DUMMY_INIT(NULL));
8238 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8241 impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8246 printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8247 "block BB%02u [%08p]\n",
8248 callBlock->bbNum, dspPtr(callBlock));
8254 assert(step != DUMMY_INIT(NULL));
8256 /* Calling the finally block */
8257 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8258 assert(step->bbJumpKind == BBJ_ALWAYS);
8259 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8260 // finally in the chain)
8261 step->bbJumpDest->bbRefs++;
8263 /* The new block will inherit this block's weight */
8264 callBlock->setBBWeight(block->bbWeight);
8265 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8270 printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block BB%02u "
8272 callBlock->bbNum, dspPtr(callBlock));
8276 GenTreePtr lastStmt;
8280 lastStmt = gtNewStmt(endCatches);
8281 endLFin->gtNext = lastStmt;
8282 lastStmt->gtPrev = endLFin;
8289 // note that this sets BBF_IMPORTED on the block
8290 impEndTreeList(callBlock, endLFin, lastStmt);
8293 step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8294 /* The new block will inherit this block's weight */
8295 step->setBBWeight(block->bbWeight);
8296 step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8301 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block "
8303 step->bbNum, dspPtr(step));
8307 unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8308 assert(finallyNesting <= compHndBBtabCount);
8310 callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8311 endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8312 endLFin = gtNewStmt(endLFin);
8317 invalidatePreds = true;
8321 /* Append any remaining endCatches, if any */
8323 assert(!encFinallies == !endLFin);
8325 if (encFinallies == 0)
8327 assert(step == DUMMY_INIT(NULL));
8328 block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8331 impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8336 printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8337 "block BB%02u [%08p]\n",
8338 block->bbNum, dspPtr(block));
8344 // If leaveTarget is the start of another try block, we want to make sure that
8345 // we do not insert finalStep into that try block. Hence, we find the enclosing
8347 unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8349 // Insert a new BB either in the try region indicated by tryIndex or
8350 // the handler region indicated by leaveTarget->bbHndIndex,
8351 // depending on which is the inner region.
8352 BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8353 finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8354 step->bbJumpDest = finalStep;
8356 /* The new block will inherit this block's weight */
8357 finalStep->setBBWeight(block->bbWeight);
8358 finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8363 printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block BB%02u [%08p]\n",
8364 encFinallies, finalStep->bbNum, dspPtr(finalStep));
8368 GenTreePtr lastStmt;
8372 lastStmt = gtNewStmt(endCatches);
8373 endLFin->gtNext = lastStmt;
8374 lastStmt->gtPrev = endLFin;
8381 impEndTreeList(finalStep, endLFin, lastStmt);
8383 finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8385 // Queue up the jump target for importing
8387 impImportBlockPending(leaveTarget);
8389 invalidatePreds = true;
8392 if (invalidatePreds && fgComputePredsDone)
8394 JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8399 fgVerifyHandlerTab();
8403 printf("\nAfter import CEE_LEAVE:\n");
8404 fgDispBasicBlocks();
8410 #else // FEATURE_EH_FUNCLETS
8412 void Compiler::impImportLeave(BasicBlock* block)
8417 printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
8418 fgDispBasicBlocks();
8423 bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8424 unsigned blkAddr = block->bbCodeOffs;
8425 BasicBlock* leaveTarget = block->bbJumpDest;
8426 unsigned jmpAddr = leaveTarget->bbCodeOffs;
8428 // LEAVE clears the stack, spill side effects, and set stack to 0
8430 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8431 verCurrentState.esStackDepth = 0;
8433 assert(block->bbJumpKind == BBJ_LEAVE);
8434 assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
8436 BasicBlock* step = nullptr;
8440 // No step type; step == NULL.
8443 // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
8444 // That is, is step->bbJumpDest where a finally will return to?
8447 // The step block is a catch return.
8450 // The step block is in a "try", created as the target for a finally return or the target for a catch return.
8453 StepType stepType = ST_None;
8458 for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8460 // Grab the handler offsets
8462 IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8463 IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8464 IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8465 IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8467 /* Is this a catch-handler we are CEE_LEAVEing out of?
8470 if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8472 // Can't CEE_LEAVE out of a finally/fault handler
8473 if (HBtab->HasFinallyOrFaultHandler())
8475 BADCODE("leave out of fault/finally block");
8478 /* We are jumping out of a catch */
8480 if (step == nullptr)
8483 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
8484 stepType = ST_Catch;
8489 printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
8491 XTnum, step->bbNum);
8497 BasicBlock* exitBlock;
8499 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
8501 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
8503 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8504 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
8505 // exit) returns to this block
8506 step->bbJumpDest->bbRefs++;
8508 #if defined(_TARGET_ARM_)
8509 if (stepType == ST_FinallyReturn)
8511 assert(step->bbJumpKind == BBJ_ALWAYS);
8512 // Mark the target of a finally return
8513 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8515 #endif // defined(_TARGET_ARM_)
8517 /* The new block will inherit this block's weight */
8518 exitBlock->setBBWeight(block->bbWeight);
8519 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8521 /* This exit block is the new step */
8523 stepType = ST_Catch;
8525 invalidatePreds = true;
8530 printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
8536 else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8537 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8539 /* We are jumping out of a finally-protected try */
8541 BasicBlock* callBlock;
8543 if (step == nullptr)
8545 #if FEATURE_EH_CALLFINALLY_THUNKS
8547 // Put the call to the finally in the enclosing region.
8548 unsigned callFinallyTryIndex =
8549 (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8550 unsigned callFinallyHndIndex =
8551 (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8552 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
8554 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
8555 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
8556 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
8557 // next block, and flow optimizations will remove it.
8558 block->bbJumpKind = BBJ_ALWAYS;
8559 block->bbJumpDest = callBlock;
8560 block->bbJumpDest->bbRefs++;
8562 /* The new block will inherit this block's weight */
8563 callBlock->setBBWeight(block->bbWeight);
8564 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8569 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8570 "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
8571 XTnum, block->bbNum, callBlock->bbNum);
8575 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8578 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8583 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8584 "BBJ_CALLFINALLY block\n",
8585 XTnum, callBlock->bbNum);
8589 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8593 // Calling the finally block. We already have a step block that is either the call-to-finally from a
8594 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
8595 // a 'finally'), or the step block is the return from a catch.
8597 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
8598 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
8599 // automatically re-raise the exception, using the return address of the catch (that is, the target
8600 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
8601 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
8602 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
8603 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
8604 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
8605 // within the 'try' region protected by the finally, since we generate code in such a way that execution
8606 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
8609 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8611 #if FEATURE_EH_CALLFINALLY_THUNKS
8612 if (step->bbJumpKind == BBJ_EHCATCHRET)
8614 // Need to create another step block in the 'try' region that will actually branch to the
8615 // call-to-finally thunk.
8616 BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8617 step->bbJumpDest = step2;
8618 step->bbJumpDest->bbRefs++;
8619 step2->setBBWeight(block->bbWeight);
8620 step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8625 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
8626 "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
8627 XTnum, step->bbNum, step2->bbNum);
8632 assert(stepType == ST_Catch); // Leave it as catch type for now.
8634 #endif // FEATURE_EH_CALLFINALLY_THUNKS
8636 #if FEATURE_EH_CALLFINALLY_THUNKS
8637 unsigned callFinallyTryIndex =
8638 (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8639 unsigned callFinallyHndIndex =
8640 (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8641 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8642 unsigned callFinallyTryIndex = XTnum + 1;
8643 unsigned callFinallyHndIndex = 0; // don't care
8644 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8646 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
8647 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8648 // finally in the chain)
8649 step->bbJumpDest->bbRefs++;
8651 #if defined(_TARGET_ARM_)
8652 if (stepType == ST_FinallyReturn)
8654 assert(step->bbJumpKind == BBJ_ALWAYS);
8655 // Mark the target of a finally return
8656 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8658 #endif // defined(_TARGET_ARM_)
8660 /* The new block will inherit this block's weight */
8661 callBlock->setBBWeight(block->bbWeight);
8662 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8667 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
8669 XTnum, callBlock->bbNum);
8674 step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8675 stepType = ST_FinallyReturn;
8677 /* The new block will inherit this block's weight */
8678 step->setBBWeight(block->bbWeight);
8679 step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8684 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
8686 XTnum, step->bbNum);
8690 callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8692 invalidatePreds = true;
8694 else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8695 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8697 // We are jumping out of a catch-protected try.
8699 // If we are returning from a call to a finally, then we must have a step block within a try
8700 // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
8701 // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
8702 // and invoke the appropriate catch.
8704 // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
8705 // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
8706 // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
8707 // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
8708 // address of the catch return as the new exception address. That is, the re-raised exception appears to
8709 // occur at the catch return address. If this exception return address skips an enclosing try/catch that
8710 // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
8715 // // something here raises ThreadAbortException
8716 // LEAVE LABEL_1; // no need to stop at LABEL_2
8717 // } catch (Exception) {
8718 // // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
8719 // // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
8720 // // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
8721 // // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
8722 // // need to do this transformation if the current EH block is a try/catch that catches
8723 // // ThreadAbortException (or one of its parents), however we might not be able to find that
8724 // // information, so currently we do it for all catch types.
8725 // LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
8727 // LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
8728 // } catch (ThreadAbortException) {
8732 // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
8735 if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
8737 BasicBlock* catchStep;
8741 if (stepType == ST_FinallyReturn)
8743 assert(step->bbJumpKind == BBJ_ALWAYS);
8747 assert(stepType == ST_Catch);
8748 assert(step->bbJumpKind == BBJ_EHCATCHRET);
8751 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
8752 catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8753 step->bbJumpDest = catchStep;
8754 step->bbJumpDest->bbRefs++;
8756 #if defined(_TARGET_ARM_)
8757 if (stepType == ST_FinallyReturn)
8759 // Mark the target of a finally return
8760 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8762 #endif // defined(_TARGET_ARM_)
8764 /* The new block will inherit this block's weight */
8765 catchStep->setBBWeight(block->bbWeight);
8766 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8771 if (stepType == ST_FinallyReturn)
8773 printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
8774 "BBJ_ALWAYS block BB%02u\n",
8775 XTnum, catchStep->bbNum);
8779 assert(stepType == ST_Catch);
8780 printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
8781 "BBJ_ALWAYS block BB%02u\n",
8782 XTnum, catchStep->bbNum);
8787 /* This block is the new step */
8791 invalidatePreds = true;
8796 if (step == nullptr)
8798 block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8803 printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
8804 "block BB%02u to BBJ_ALWAYS\n",
8811 step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8813 #if defined(_TARGET_ARM_)
8814 if (stepType == ST_FinallyReturn)
8816 assert(step->bbJumpKind == BBJ_ALWAYS);
8817 // Mark the target of a finally return
8818 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8820 #endif // defined(_TARGET_ARM_)
8825 printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
8829 // Queue up the jump target for importing
8831 impImportBlockPending(leaveTarget);
8834 if (invalidatePreds && fgComputePredsDone)
8836 JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8841 fgVerifyHandlerTab();
8845 printf("\nAfter import CEE_LEAVE:\n");
8846 fgDispBasicBlocks();
8852 #endif // FEATURE_EH_FUNCLETS
8854 /*****************************************************************************/
8855 // This is called when reimporting a leave block. It resets the JumpKind,
8856 // JumpDest, and bbNext to the original values
8858 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
8860 #if FEATURE_EH_FUNCLETS
8861 // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
8862 // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0,
8863 // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
8864 // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
8865 // only predecessor are also considered orphans and attempted to be deleted.
8872 // leave OUTSIDE; // B0 is the block containing this leave, following this would be B1
8877 // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
8878 // where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block.
8879 // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To
8880 // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
8881 // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
8882 // will be treated as pair and handled correctly.
8883 if (block->bbJumpKind == BBJ_CALLFINALLY)
8885 BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
8886 dupBlock->bbFlags = block->bbFlags;
8887 dupBlock->bbJumpDest = block->bbJumpDest;
8888 dupBlock->copyEHRegion(block);
8889 dupBlock->bbCatchTyp = block->bbCatchTyp;
8891 // Mark this block as
8892 // a) not referenced by any other block to make sure that it gets deleted
8894 // c) prevent from being imported
8897 dupBlock->bbRefs = 0;
8898 dupBlock->bbWeight = 0;
8899 dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
8901 // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
8902 // will be next to each other.
8903 fgInsertBBafter(block, dupBlock);
8908 printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
8912 #endif // FEATURE_EH_FUNCLETS
8914 block->bbJumpKind = BBJ_LEAVE;
8916 block->bbJumpDest = fgLookupBB(jmpAddr);
8918 // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
8919 // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
8920 // reason we don't want to remove the block at this point is that if we call
8921 // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
8922 // added and the linked list length will be different than fgBBcount.
8925 /*****************************************************************************/
8926 // Get the first non-prefix opcode. Used for verification of valid combinations
8927 // of prefixes and actual opcodes.
8929 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
8931 while (codeAddr < codeEndp)
8933 OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
8934 codeAddr += sizeof(__int8);
8936 if (opcode == CEE_PREFIX1)
8938 if (codeAddr >= codeEndp)
8942 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
8943 codeAddr += sizeof(__int8);
8951 case CEE_CONSTRAINED:
8958 codeAddr += opcodeSizes[opcode];
8964 /*****************************************************************************/
8965 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
8967 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
8969 OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
8972 // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
8973 ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
8974 (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
8975 (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
8976 // volatile. prefix is allowed with the ldsfld and stsfld
8977 (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
8979 BADCODE("Invalid opcode for unaligned. or volatile. prefix");
8983 /*****************************************************************************/
8987 #undef RETURN // undef contracts RETURN macro
9002 const static controlFlow_t controlFlow[] = {
9003 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
9004 #include "opcode.def"
9010 /*****************************************************************************
9011 * Determine the result type of an arithemetic operation
9012 * On 64-bit inserts upcasts when native int is mixed with int32
9014 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr* pOp1, GenTreePtr* pOp2)
9016 var_types type = TYP_UNDEF;
9017 GenTreePtr op1 = *pOp1, op2 = *pOp2;
9019 // Arithemetic operations are generally only allowed with
9020 // primitive types, but certain operations are allowed
9023 if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9025 if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9027 // byref1-byref2 => gives a native int
9030 else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9032 // [native] int - byref => gives a native int
9035 // The reason is that it is possible, in managed C++,
9036 // to have a tree like this:
9043 // const(h) int addr byref
9045 // <BUGNUM> VSW 318822 </BUGNUM>
9047 // So here we decide to make the resulting type to be a native int.
9048 CLANG_FORMAT_COMMENT_ANCHOR;
9050 #ifdef _TARGET_64BIT_
9051 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9053 // insert an explicit upcast
9054 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9056 #endif // _TARGET_64BIT_
9062 // byref - [native] int => gives a byref
9063 assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
9065 #ifdef _TARGET_64BIT_
9066 if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
9068 // insert an explicit upcast
9069 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9071 #endif // _TARGET_64BIT_
9076 else if ((oper == GT_ADD) &&
9077 (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9079 // byref + [native] int => gives a byref
9081 // [native] int + byref => gives a byref
9083 // only one can be a byref : byref op byref not allowed
9084 assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9085 assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9087 #ifdef _TARGET_64BIT_
9088 if (genActualType(op2->TypeGet()) == TYP_BYREF)
9090 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9092 // insert an explicit upcast
9093 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9096 else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9098 // insert an explicit upcast
9099 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9101 #endif // _TARGET_64BIT_
9105 #ifdef _TARGET_64BIT_
9106 else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9108 assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9110 // int + long => gives long
9111 // long + int => gives long
9112 // we get this because in the IL the long isn't Int64, it's just IntPtr
9114 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9116 // insert an explicit upcast
9117 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9119 else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9121 // insert an explicit upcast
9122 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9127 #else // 32-bit TARGET
9128 else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9130 assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9132 // int + long => gives long
9133 // long + int => gives long
9137 #endif // _TARGET_64BIT_
9140 // int + int => gives an int
9141 assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9143 assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9144 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9146 type = genActualType(op1->gtType);
9148 #if FEATURE_X87_DOUBLES
9150 // For x87, since we only have 1 size of registers, prefer double
9151 // For everybody else, be more precise
9152 if (type == TYP_FLOAT)
9155 #else // !FEATURE_X87_DOUBLES
9157 // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9158 // Otherwise, turn floats into doubles
9159 if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9161 assert(genActualType(op2->gtType) == TYP_DOUBLE);
9165 #endif // FEATURE_X87_DOUBLES
9168 #if FEATURE_X87_DOUBLES
9169 assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9170 #else // FEATURE_X87_DOUBLES
9171 assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9172 #endif // FEATURE_X87_DOUBLES
9177 /*****************************************************************************
9178 * Casting Helper Function to service both CEE_CASTCLASS and CEE_ISINST
9180 * typeRef contains the token, op1 to contain the value being cast,
9181 * and op2 to contain code that creates the type handle corresponding to typeRef
9182 * isCastClass = true means CEE_CASTCLASS, false means CEE_ISINST
9184 GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr op1,
9186 CORINFO_RESOLVED_TOKEN* pResolvedToken,
9191 assert(op1->TypeGet() == TYP_REF);
9193 CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9197 // We only want to expand inline the normal CHKCASTCLASS helper;
9198 expandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9202 if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9204 // Get the Class Handle abd class attributes for the type we are casting to
9206 DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9209 // If the class handle is marked as final we can also expand the IsInst check inline
9211 expandInline = ((flags & CORINFO_FLG_FINAL) != 0);
9214 // But don't expand inline these two cases
9216 if (flags & CORINFO_FLG_MARSHAL_BYREF)
9218 expandInline = false;
9220 else if (flags & CORINFO_FLG_CONTEXTFUL)
9222 expandInline = false;
9228 // We can't expand inline any other helpers
9230 expandInline = false;
9236 if (compCurBB->isRunRarely())
9238 expandInline = false; // not worth the code expansion in a rarely run block
9241 if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9243 expandInline = false; // not worth creating an untracked local variable
9249 // If we CSE this class handle we prevent assertionProp from making SubType assertions
9250 // so instead we force the CSE logic to not consider CSE-ing this class handle.
9252 op2->gtFlags |= GTF_DONT_CSE;
9254 return gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2, op1));
9257 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9262 // expand the methodtable match:
9266 // GT_IND op2 (typically CNS_INT)
9271 // This can replace op1 with a GT_COMMA that evaluates op1 into a local
9273 op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
9275 // op1 is now known to be a non-complex tree
9276 // thus we can use gtClone(op1) from now on
9279 GenTreePtr op2Var = op2;
9282 op2Var = fgInsertCommaFormTemp(&op2);
9283 lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
9285 temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
9286 temp->gtFlags |= GTF_EXCEPT;
9287 condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
9289 GenTreePtr condNull;
9291 // expand the null check:
9293 // condNull ==> GT_EQ
9298 condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
9301 // expand the true and false trees for the condMT
9303 GenTreePtr condFalse = gtClone(op1);
9304 GenTreePtr condTrue;
9308 // use the special helper that skips the cases checked by our inlined cast
9310 helper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
9312 condTrue = gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2Var, gtClone(op1)));
9316 condTrue = gtNewIconNode(0, TYP_REF);
9319 #define USE_QMARK_TREES
9321 #ifdef USE_QMARK_TREES
9324 // Generate first QMARK - COLON tree
9326 // qmarkMT ==> GT_QMARK
9330 // condFalse condTrue
9332 temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
9333 qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
9334 condMT->gtFlags |= GTF_RELOP_QMARK;
9336 GenTreePtr qmarkNull;
9338 // Generate second QMARK - COLON tree
9340 // qmarkNull ==> GT_QMARK
9342 // condNull GT_COLON
9346 temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
9347 qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
9348 qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
9349 condNull->gtFlags |= GTF_RELOP_QMARK;
9351 // Make QMark node a top level node by spilling it.
9352 unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
9353 impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
9354 return gtNewLclvNode(tmp, TYP_REF);
9359 #define assertImp(cond) ((void)0)
9361 #define assertImp(cond) \
9366 const int cchAssertImpBuf = 600; \
9367 char* assertImpBuf = (char*)alloca(cchAssertImpBuf); \
9368 _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \
9369 "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \
9370 impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \
9371 op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \
9372 assertAbort(assertImpBuf, __FILE__, __LINE__); \
9378 #pragma warning(push)
9379 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
9381 /*****************************************************************************
9382 * Import the instr for the given basic block
9384 void Compiler::impImportBlockCode(BasicBlock* block)
9386 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
9392 printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
9396 unsigned nxtStmtIndex = impInitBlockLineInfo();
9397 IL_OFFSET nxtStmtOffs;
9399 GenTreePtr arrayNodeFrom, arrayNodeTo, arrayNodeToIndex;
9401 CorInfoHelpFunc helper;
9402 CorInfoIsAccessAllowedResult accessAllowedResult;
9403 CORINFO_HELPER_DESC calloutHelper;
9404 const BYTE* lastLoadToken = nullptr;
9406 // reject cyclic constraints
9407 if (tiVerificationNeeded)
9409 Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
9410 Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
9413 /* Get the tree list started */
9417 /* Walk the opcodes that comprise the basic block */
9419 const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
9420 const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
9422 IL_OFFSET opcodeOffs = block->bbCodeOffs;
9423 IL_OFFSET lastSpillOffs = opcodeOffs;
9427 /* remember the start of the delegate creation sequence (used for verification) */
9428 const BYTE* delegateCreateStart = nullptr;
9430 int prefixFlags = 0;
9431 bool explicitTailCall, constraintCall, readonlyCall;
9433 bool insertLdloc = false; // set by CEE_DUP and cleared by following store
9436 unsigned numArgs = info.compArgsCount;
9438 /* Now process all the opcodes in the block */
9440 var_types callTyp = TYP_COUNT;
9441 OPCODE prevOpcode = CEE_ILLEGAL;
9443 if (block->bbCatchTyp)
9445 if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
9447 impCurStmtOffsSet(block->bbCodeOffs);
9450 // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
9451 // to a temp. This is a trade off for code simplicity
9452 impSpillSpecialSideEff();
9455 while (codeAddr < codeEndp)
9457 bool usingReadyToRunHelper = false;
9458 CORINFO_RESOLVED_TOKEN resolvedToken;
9459 CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
9460 CORINFO_CALL_INFO callInfo;
9461 CORINFO_FIELD_INFO fieldInfo;
9463 tiRetVal = typeInfo(); // Default type info
9465 //---------------------------------------------------------------------
9467 /* We need to restrict the max tree depth as many of the Compiler
9468 functions are recursive. We do this by spilling the stack */
9470 if (verCurrentState.esStackDepth)
9472 /* Has it been a while since we last saw a non-empty stack (which
9473 guarantees that the tree depth isnt accumulating. */
9475 if ((opcodeOffs - lastSpillOffs) > 200)
9477 impSpillStackEnsure();
9478 lastSpillOffs = opcodeOffs;
9483 lastSpillOffs = opcodeOffs;
9484 impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
9487 /* Compute the current instr offset */
9489 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9492 if (opts.compDbgInfo)
9495 if (!compIsForInlining())
9498 (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
9500 /* Have we reached the next stmt boundary ? */
9502 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
9504 assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
9506 if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
9508 /* We need to provide accurate IP-mapping at this point.
9509 So spill anything on the stack so that it will form
9510 gtStmts with the correct stmt offset noted */
9512 impSpillStackEnsure(true);
9515 // Has impCurStmtOffs been reported in any tree?
9517 if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
9519 GenTreePtr placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
9520 impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9522 assert(impCurStmtOffs == BAD_IL_OFFSET);
9525 if (impCurStmtOffs == BAD_IL_OFFSET)
9527 /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
9528 If opcodeOffs has gone past nxtStmtIndex, catch up */
9530 while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
9531 info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
9536 /* Go to the new stmt */
9538 impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
9540 /* Update the stmt boundary index */
9543 assert(nxtStmtIndex <= info.compStmtOffsetsCount);
9545 /* Are there any more line# entries after this one? */
9547 if (nxtStmtIndex < info.compStmtOffsetsCount)
9549 /* Remember where the next line# starts */
9551 nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
9555 /* No more line# entries */
9557 nxtStmtOffs = BAD_IL_OFFSET;
9561 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
9562 (verCurrentState.esStackDepth == 0))
9564 /* At stack-empty locations, we have already added the tree to
9565 the stmt list with the last offset. We just need to update
9569 impCurStmtOffsSet(opcodeOffs);
9571 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
9572 impOpcodeIsCallSiteBoundary(prevOpcode))
9574 /* Make sure we have a type cached */
9575 assert(callTyp != TYP_COUNT);
9577 if (callTyp == TYP_VOID)
9579 impCurStmtOffsSet(opcodeOffs);
9581 else if (opts.compDbgCode)
9583 impSpillStackEnsure(true);
9584 impCurStmtOffsSet(opcodeOffs);
9587 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
9589 if (opts.compDbgCode)
9591 impSpillStackEnsure(true);
9594 impCurStmtOffsSet(opcodeOffs);
9597 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
9598 jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
9602 CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL);
9603 CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
9604 CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
9606 var_types lclTyp, ovflType = TYP_UNKNOWN;
9607 GenTreePtr op1 = DUMMY_INIT(NULL);
9608 GenTreePtr op2 = DUMMY_INIT(NULL);
9609 GenTreeArgList* args = nullptr; // What good do these "DUMMY_INIT"s do?
9610 GenTreePtr newObjThisPtr = DUMMY_INIT(NULL);
9611 bool uns = DUMMY_INIT(false);
9613 /* Get the next opcode and the size of its parameters */
9615 OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9616 codeAddr += sizeof(__int8);
9619 impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9620 JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
9625 // Return if any previous code has caused inline to fail.
9626 if (compDonotInline())
9631 /* Get the size of additional parameters */
9633 signed int sz = opcodeSizes[opcode];
9636 clsHnd = NO_CLASS_HANDLE;
9638 callTyp = TYP_COUNT;
9640 impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9641 impCurOpcName = opcodeNames[opcode];
9643 if (verbose && (opcode != CEE_PREFIX1))
9645 printf("%s", impCurOpcName);
9648 /* Use assertImp() to display the opcode */
9650 op1 = op2 = nullptr;
9653 /* See what kind of an opcode we have, then */
9655 unsigned mflags = 0;
9656 unsigned clsFlags = 0;
9669 CORINFO_SIG_INFO sig;
9672 bool ovfl, unordered, callNode;
9674 CORINFO_CLASS_HANDLE tokenType;
9684 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9685 codeAddr += sizeof(__int8);
9686 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9691 // We need to call impSpillLclRefs() for a struct type lclVar.
9692 // This is done for non-block assignments in the handling of stloc.
9693 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
9694 (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
9696 impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
9699 /* Append 'op1' to the list of statements */
9700 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
9705 /* Append 'op1' to the list of statements */
9707 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9713 // Remember at which BC offset the tree was finished
9714 impNoteLastILoffs();
9719 impPushNullObjRefOnStack();
9732 cval.intVal = (opcode - CEE_LDC_I4_0);
9733 assert(-1 <= cval.intVal && cval.intVal <= 8);
9737 cval.intVal = getI1LittleEndian(codeAddr);
9740 cval.intVal = getI4LittleEndian(codeAddr);
9743 JITDUMP(" %d", cval.intVal);
9744 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
9748 cval.lngVal = getI8LittleEndian(codeAddr);
9749 JITDUMP(" 0x%016llx", cval.lngVal);
9750 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
9754 cval.dblVal = getR8LittleEndian(codeAddr);
9755 JITDUMP(" %#.17g", cval.dblVal);
9756 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
9760 cval.dblVal = getR4LittleEndian(codeAddr);
9761 JITDUMP(" %#.17g", cval.dblVal);
9763 GenTreePtr cnsOp = gtNewDconNode(cval.dblVal);
9764 #if !FEATURE_X87_DOUBLES
9765 // X87 stack doesn't differentiate between float/double
9766 // so R4 is treated as R8, but everybody else does
9767 cnsOp->gtType = TYP_FLOAT;
9768 #endif // FEATURE_X87_DOUBLES
9769 impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
9775 if (compIsForInlining())
9777 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
9779 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
9784 val = getU4LittleEndian(codeAddr);
9785 JITDUMP(" %08X", val);
9786 if (tiVerificationNeeded)
9788 Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
9789 tiRetVal = typeInfo(TI_REF, impGetStringClass());
9791 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
9796 lclNum = getU2LittleEndian(codeAddr);
9797 JITDUMP(" %u", lclNum);
9798 impLoadArg(lclNum, opcodeOffs + sz + 1);
9802 lclNum = getU1LittleEndian(codeAddr);
9803 JITDUMP(" %u", lclNum);
9804 impLoadArg(lclNum, opcodeOffs + sz + 1);
9811 lclNum = (opcode - CEE_LDARG_0);
9812 assert(lclNum >= 0 && lclNum < 4);
9813 impLoadArg(lclNum, opcodeOffs + sz + 1);
9817 lclNum = getU2LittleEndian(codeAddr);
9818 JITDUMP(" %u", lclNum);
9819 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9823 lclNum = getU1LittleEndian(codeAddr);
9824 JITDUMP(" %u", lclNum);
9825 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9832 lclNum = (opcode - CEE_LDLOC_0);
9833 assert(lclNum >= 0 && lclNum < 4);
9834 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9838 lclNum = getU2LittleEndian(codeAddr);
9842 lclNum = getU1LittleEndian(codeAddr);
9844 JITDUMP(" %u", lclNum);
9846 if (tiVerificationNeeded)
9848 Verify(lclNum < info.compILargsCount, "bad arg num");
9851 if (compIsForInlining())
9853 op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
9854 noway_assert(op1->gtOper == GT_LCL_VAR);
9855 lclNum = op1->AsLclVar()->gtLclNum;
9860 lclNum = compMapILargNum(lclNum); // account for possible hidden param
9861 assertImp(lclNum < numArgs);
9863 if (lclNum == info.compThisArg)
9865 lclNum = lvaArg0Var;
9867 lvaTable[lclNum].lvArgWrite = 1;
9869 if (tiVerificationNeeded)
9871 typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
9872 Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
9875 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
9877 Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
9884 lclNum = getU2LittleEndian(codeAddr);
9885 JITDUMP(" %u", lclNum);
9889 lclNum = getU1LittleEndian(codeAddr);
9890 JITDUMP(" %u", lclNum);
9897 lclNum = (opcode - CEE_STLOC_0);
9898 assert(lclNum >= 0 && lclNum < 4);
9901 if (tiVerificationNeeded)
9903 Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
9904 Verify(tiCompatibleWith(impStackTop().seTypeInfo,
9905 NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
9909 if (compIsForInlining())
9911 lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
9913 /* Have we allocated a temp for this local? */
9915 lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
9924 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
9926 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9932 /* if it is a struct assignment, make certain we don't overflow the buffer */
9933 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
9935 if (lvaTable[lclNum].lvNormalizeOnLoad())
9937 lclTyp = lvaGetRealType(lclNum);
9941 lclTyp = lvaGetActualType(lclNum);
9945 /* Pop the value being assigned */
9948 StackEntry se = impPopStack(clsHnd);
9950 tiRetVal = se.seTypeInfo;
9954 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
9956 assert(op1->TypeGet() == TYP_STRUCT);
9957 op1->gtType = lclTyp;
9959 #endif // FEATURE_SIMD
9961 op1 = impImplicitIorI4Cast(op1, lclTyp);
9963 #ifdef _TARGET_64BIT_
9964 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
9965 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
9967 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9968 op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
9970 #endif // _TARGET_64BIT_
9972 // We had better assign it a value of the correct type
9974 genActualType(lclTyp) == genActualType(op1->gtType) ||
9975 genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
9976 (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
9977 (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
9978 (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
9979 ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
9981 /* If op1 is "&var" then its type is the transient "*" and it can
9982 be used either as TYP_BYREF or TYP_I_IMPL */
9984 if (op1->IsVarAddr())
9986 assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
9988 /* When "&var" is created, we assume it is a byref. If it is
9989 being assigned to a TYP_I_IMPL var, change the type to
9990 prevent unnecessary GC info */
9992 if (genActualType(lclTyp) == TYP_I_IMPL)
9994 op1->gtType = TYP_I_IMPL;
9998 /* Filter out simple assignments to itself */
10000 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
10004 // This is a sequence of (ldloc, dup, stloc). Can simplify
10005 // to (ldloc, stloc). Goto LDVAR to reconstruct the ldloc node.
10006 CLANG_FORMAT_COMMENT_ANCHOR;
10009 if (tiVerificationNeeded)
10012 typeInfo::AreEquivalent(tiRetVal, NormaliseForStack(lvaTable[lclNum].lvVerTypeInfo)));
10017 insertLdloc = false;
10019 impLoadVar(lclNum, opcodeOffs + sz + 1);
10022 else if (opts.compDbgCode)
10024 op1 = gtNewNothingNode();
10033 /* Create the assignment node */
10035 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
10037 /* If the local is aliased, we need to spill calls and
10038 indirections from the stack. */
10040 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp) &&
10041 verCurrentState.esStackDepth > 0)
10043 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased"));
10046 /* Spill any refs to the local from the stack */
10048 impSpillLclRefs(lclNum);
10050 #if !FEATURE_X87_DOUBLES
10051 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
10052 // We insert a cast to the dest 'op2' type
10054 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
10055 varTypeIsFloating(op2->gtType))
10057 op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
10059 #endif // !FEATURE_X87_DOUBLES
10061 if (varTypeIsStruct(lclTyp))
10063 op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
10067 // The code generator generates GC tracking information
10068 // based on the RHS of the assignment. Later the LHS (which is
10069 // is a BYREF) gets used and the emitter checks that that variable
10070 // is being tracked. It is not (since the RHS was an int and did
10071 // not need tracking). To keep this assert happy, we change the RHS
10072 if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
10074 op1->gtType = TYP_BYREF;
10076 op1 = gtNewAssignNode(op2, op1);
10079 /* If insertLdloc is true, then we need to insert a ldloc following the
10080 stloc. This is done when converting a (dup, stloc) sequence into
10081 a (stloc, ldloc) sequence. */
10085 // From SPILL_APPEND
10086 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10089 // From DONE_APPEND
10090 impNoteLastILoffs();
10093 insertLdloc = false;
10095 impLoadVar(lclNum, opcodeOffs + sz + 1, tiRetVal);
10102 lclNum = getU2LittleEndian(codeAddr);
10106 lclNum = getU1LittleEndian(codeAddr);
10108 JITDUMP(" %u", lclNum);
10109 if (tiVerificationNeeded)
10111 Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10112 Verify(info.compInitMem, "initLocals not set");
10115 if (compIsForInlining())
10117 // Get the local type
10118 lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10120 /* Have we allocated a temp for this local? */
10122 lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10124 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10130 assertImp(lclNum < info.compLocalsCount);
10134 lclNum = getU2LittleEndian(codeAddr);
10138 lclNum = getU1LittleEndian(codeAddr);
10140 JITDUMP(" %u", lclNum);
10141 Verify(lclNum < info.compILargsCount, "bad arg num");
10143 if (compIsForInlining())
10145 // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10146 // followed by a ldfld to load the field.
10148 op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10149 if (op1->gtOper != GT_LCL_VAR)
10151 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10155 assert(op1->gtOper == GT_LCL_VAR);
10160 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10161 assertImp(lclNum < numArgs);
10163 if (lclNum == info.compThisArg)
10165 lclNum = lvaArg0Var;
10172 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10175 assert(op1->gtOper == GT_LCL_VAR);
10177 /* Note that this is supposed to create the transient type "*"
10178 which may be used as a TYP_I_IMPL. However we catch places
10179 where it is used as a TYP_I_IMPL and change the node if needed.
10180 Thus we are pessimistic and may report byrefs in the GC info
10181 where it was not absolutely needed, but it is safer this way.
10183 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10185 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10186 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10188 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10189 if (tiVerificationNeeded)
10191 // Don't allow taking address of uninit this ptr.
10192 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10194 Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10197 if (!tiRetVal.IsByRef())
10199 tiRetVal.MakeByRef();
10203 Verify(false, "byref to byref");
10207 impPushOnStack(op1, tiRetVal);
10212 if (!info.compIsVarArgs)
10214 BADCODE("arglist in non-vararg method");
10217 if (tiVerificationNeeded)
10219 tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10221 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10223 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10224 adjusted the arg count cos this is like fetching the last param */
10225 assertImp(0 < numArgs);
10226 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10227 lclNum = lvaVarargsHandleArg;
10228 op1 = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10229 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10230 impPushOnStack(op1, tiRetVal);
10233 case CEE_ENDFINALLY:
10235 if (compIsForInlining())
10237 assert(!"Shouldn't have exception handlers in the inliner!");
10238 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10242 if (verCurrentState.esStackDepth > 0)
10244 impEvalSideEffects();
10247 if (info.compXcptnsCount == 0)
10249 BADCODE("endfinally outside finally");
10252 assert(verCurrentState.esStackDepth == 0);
10254 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10257 case CEE_ENDFILTER:
10259 if (compIsForInlining())
10261 assert(!"Shouldn't have exception handlers in the inliner!");
10262 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10266 block->bbSetRunRarely(); // filters are rare
10268 if (info.compXcptnsCount == 0)
10270 BADCODE("endfilter outside filter");
10273 if (tiVerificationNeeded)
10275 Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
10278 op1 = impPopStack().val;
10279 assertImp(op1->gtType == TYP_INT);
10280 if (!bbInFilterILRange(block))
10282 BADCODE("EndFilter outside a filter handler");
10285 /* Mark current bb as end of filter */
10287 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
10288 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
10290 /* Mark catch handler as successor */
10292 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
10293 if (verCurrentState.esStackDepth != 0)
10295 verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
10296 DEBUGARG(__LINE__));
10301 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
10303 if (!impReturnInstruction(block, prefixFlags, opcode))
10314 assert(!compIsForInlining());
10316 if (tiVerificationNeeded)
10318 Verify(false, "Invalid opcode: CEE_JMP");
10321 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
10323 /* CEE_JMP does not make sense in some "protected" regions. */
10325 BADCODE("Jmp not allowed in protected region");
10328 if (verCurrentState.esStackDepth != 0)
10330 BADCODE("Stack must be empty after CEE_JMPs");
10333 _impResolveToken(CORINFO_TOKENKIND_Method);
10335 JITDUMP(" %08X", resolvedToken.token);
10337 /* The signature of the target has to be identical to ours.
10338 At least check that argCnt and returnType match */
10340 eeGetMethodSig(resolvedToken.hMethod, &sig);
10341 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
10342 sig.retType != info.compMethodInfo->args.retType ||
10343 sig.callConv != info.compMethodInfo->args.callConv)
10345 BADCODE("Incompatible target for CEE_JMPs");
10348 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
10350 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
10352 /* Mark the basic block as being a JUMP instead of RETURN */
10354 block->bbFlags |= BBF_HAS_JMP;
10356 /* Set this flag to make sure register arguments have a location assigned
10357 * even if we don't use them inside the method */
10359 compJmpOpUsed = true;
10361 fgNoStructPromotion = true;
10365 #else // !_TARGET_XARCH_ && !_TARGET_ARMARCH_
10367 // Import this just like a series of LDARGs + tail. + call + ret
10369 if (info.compIsVarArgs)
10371 // For now we don't implement true tail calls, so this breaks varargs.
10372 // So warn the user instead of generating bad code.
10373 // This is a semi-temporary workaround for DevDiv 173860, until we can properly
10374 // implement true tail calls.
10375 IMPL_LIMITATION("varags + CEE_JMP doesn't work yet");
10378 // First load up the arguments (0 - N)
10379 for (unsigned argNum = 0; argNum < info.compILargsCount; argNum++)
10381 impLoadArg(argNum, opcodeOffs + sz + 1);
10384 // Now generate the tail call
10385 noway_assert(prefixFlags == 0);
10386 prefixFlags = PREFIX_TAILCALL_EXPLICIT;
10389 eeGetCallInfo(&resolvedToken, NULL,
10390 combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), &callInfo);
10392 // All calls and delegates need a security callout.
10393 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
10395 callTyp = impImportCall(CEE_CALL, &resolvedToken, NULL, NULL, PREFIX_TAILCALL_EXPLICIT, &callInfo,
10398 // And finish with the ret
10401 #endif // _TARGET_XARCH_ || _TARGET_ARMARCH_
10404 assertImp(sz == sizeof(unsigned));
10406 _impResolveToken(CORINFO_TOKENKIND_Class);
10408 JITDUMP(" %08X", resolvedToken.token);
10410 ldelemClsHnd = resolvedToken.hClass;
10412 if (tiVerificationNeeded)
10414 typeInfo tiArray = impStackTop(1).seTypeInfo;
10415 typeInfo tiIndex = impStackTop().seTypeInfo;
10417 // As per ECMA 'index' specified can be either int32 or native int.
10418 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10420 typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
10421 Verify(tiArray.IsNullObjRef() ||
10422 typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
10425 tiRetVal = arrayElemType;
10426 tiRetVal.MakeByRef();
10427 if (prefixFlags & PREFIX_READONLY)
10429 tiRetVal.SetIsReadonlyByRef();
10432 // an array interior pointer is always in the heap
10433 tiRetVal.SetIsPermanentHomeByRef();
10436 // If it's a value class array we just do a simple address-of
10437 if (eeIsValueClass(ldelemClsHnd))
10439 CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
10440 if (cit == CORINFO_TYPE_UNDEF)
10442 lclTyp = TYP_STRUCT;
10446 lclTyp = JITtype2varType(cit);
10448 goto ARR_LD_POST_VERIFY;
10451 // Similarly, if its a readonly access, we can do a simple address-of
10452 // without doing a runtime type-check
10453 if (prefixFlags & PREFIX_READONLY)
10456 goto ARR_LD_POST_VERIFY;
10459 // Otherwise we need the full helper function with run-time type check
10460 op1 = impTokenToHandle(&resolvedToken);
10461 if (op1 == nullptr)
10462 { // compDonotInline()
10466 args = gtNewArgList(op1); // Type
10467 args = gtNewListNode(impPopStack().val, args); // index
10468 args = gtNewListNode(impPopStack().val, args); // array
10469 op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, GTF_EXCEPT, args);
10471 impPushOnStack(op1, tiRetVal);
10474 // ldelem for reference and value types
10476 assertImp(sz == sizeof(unsigned));
10478 _impResolveToken(CORINFO_TOKENKIND_Class);
10480 JITDUMP(" %08X", resolvedToken.token);
10482 ldelemClsHnd = resolvedToken.hClass;
10484 if (tiVerificationNeeded)
10486 typeInfo tiArray = impStackTop(1).seTypeInfo;
10487 typeInfo tiIndex = impStackTop().seTypeInfo;
10489 // As per ECMA 'index' specified can be either int32 or native int.
10490 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10491 tiRetVal = verMakeTypeInfo(ldelemClsHnd);
10493 Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
10494 "type of array incompatible with type operand");
10495 tiRetVal.NormaliseForStack();
10498 // If it's a reference type or generic variable type
10499 // then just generate code as though it's a ldelem.ref instruction
10500 if (!eeIsValueClass(ldelemClsHnd))
10503 opcode = CEE_LDELEM_REF;
10507 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
10508 lclTyp = JITtype2varType(jitTyp);
10509 tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
10510 tiRetVal.NormaliseForStack();
10512 goto ARR_LD_POST_VERIFY;
10514 case CEE_LDELEM_I1:
10517 case CEE_LDELEM_I2:
10518 lclTyp = TYP_SHORT;
10521 lclTyp = TYP_I_IMPL;
10524 // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
10525 // and treating it as TYP_INT avoids other asserts.
10526 case CEE_LDELEM_U4:
10530 case CEE_LDELEM_I4:
10533 case CEE_LDELEM_I8:
10536 case CEE_LDELEM_REF:
10539 case CEE_LDELEM_R4:
10540 lclTyp = TYP_FLOAT;
10542 case CEE_LDELEM_R8:
10543 lclTyp = TYP_DOUBLE;
10545 case CEE_LDELEM_U1:
10546 lclTyp = TYP_UBYTE;
10548 case CEE_LDELEM_U2:
10554 if (tiVerificationNeeded)
10556 typeInfo tiArray = impStackTop(1).seTypeInfo;
10557 typeInfo tiIndex = impStackTop().seTypeInfo;
10559 // As per ECMA 'index' specified can be either int32 or native int.
10560 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10561 if (tiArray.IsNullObjRef())
10563 if (lclTyp == TYP_REF)
10564 { // we will say a deref of a null array yields a null ref
10565 tiRetVal = typeInfo(TI_NULL);
10569 tiRetVal = typeInfo(lclTyp);
10574 tiRetVal = verGetArrayElemType(tiArray);
10575 typeInfo arrayElemTi = typeInfo(lclTyp);
10576 #ifdef _TARGET_64BIT_
10577 if (opcode == CEE_LDELEM_I)
10579 arrayElemTi = typeInfo::nativeInt();
10582 if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
10584 Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
10587 #endif // _TARGET_64BIT_
10589 Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
10592 tiRetVal.NormaliseForStack();
10594 ARR_LD_POST_VERIFY:
10596 /* Pull the index value and array address */
10597 op2 = impPopStack().val;
10598 op1 = impPopStack().val;
10599 assertImp(op1->gtType == TYP_REF);
10601 /* Check for null pointer - in the inliner case we simply abort */
10603 if (compIsForInlining())
10605 if (op1->gtOper == GT_CNS_INT)
10607 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
10612 op1 = impCheckForNullPointer(op1);
10614 /* Mark the block as containing an index expression */
10616 if (op1->gtOper == GT_LCL_VAR)
10618 if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
10620 block->bbFlags |= BBF_HAS_IDX_LEN;
10621 optMethodFlags |= OMF_HAS_ARRAYREF;
10625 /* Create the index node and push it on the stack */
10627 op1 = gtNewIndexRef(lclTyp, op1, op2);
10629 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
10631 if ((opcode == CEE_LDELEMA) || ldstruct ||
10632 (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
10634 assert(ldelemClsHnd != DUMMY_INIT(NULL));
10636 // remember the element size
10637 if (lclTyp == TYP_REF)
10639 op1->gtIndex.gtIndElemSize = sizeof(void*);
10643 // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
10644 if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
10646 op1->gtIndex.gtStructElemClass = ldelemClsHnd;
10648 assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
10649 if (lclTyp == TYP_STRUCT)
10651 size = info.compCompHnd->getClassSize(ldelemClsHnd);
10652 op1->gtIndex.gtIndElemSize = size;
10653 op1->gtType = lclTyp;
10657 if ((opcode == CEE_LDELEMA) || ldstruct)
10660 lclTyp = TYP_BYREF;
10662 op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
10666 assert(lclTyp != TYP_STRUCT);
10672 // Create an OBJ for the result
10673 op1 = gtNewObjNode(ldelemClsHnd, op1);
10674 op1->gtFlags |= GTF_EXCEPT;
10676 impPushOnStack(op1, tiRetVal);
10679 // stelem for reference and value types
10682 assertImp(sz == sizeof(unsigned));
10684 _impResolveToken(CORINFO_TOKENKIND_Class);
10686 JITDUMP(" %08X", resolvedToken.token);
10688 stelemClsHnd = resolvedToken.hClass;
10690 if (tiVerificationNeeded)
10692 typeInfo tiArray = impStackTop(2).seTypeInfo;
10693 typeInfo tiIndex = impStackTop(1).seTypeInfo;
10694 typeInfo tiValue = impStackTop().seTypeInfo;
10696 // As per ECMA 'index' specified can be either int32 or native int.
10697 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10698 typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
10700 Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
10701 "type operand incompatible with array element type");
10702 arrayElem.NormaliseForStack();
10703 Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
10706 // If it's a reference type just behave as though it's a stelem.ref instruction
10707 if (!eeIsValueClass(stelemClsHnd))
10709 goto STELEM_REF_POST_VERIFY;
10712 // Otherwise extract the type
10714 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
10715 lclTyp = JITtype2varType(jitTyp);
10716 goto ARR_ST_POST_VERIFY;
10719 case CEE_STELEM_REF:
10721 if (tiVerificationNeeded)
10723 typeInfo tiArray = impStackTop(2).seTypeInfo;
10724 typeInfo tiIndex = impStackTop(1).seTypeInfo;
10725 typeInfo tiValue = impStackTop().seTypeInfo;
10727 // As per ECMA 'index' specified can be either int32 or native int.
10728 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10729 Verify(tiValue.IsObjRef(), "bad value");
10731 // we only check that it is an object referece, The helper does additional checks
10732 Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
10735 arrayNodeTo = impStackTop(2).val;
10736 arrayNodeToIndex = impStackTop(1).val;
10737 arrayNodeFrom = impStackTop().val;
10740 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
10741 // lot of cases because of covariance. ie. foo[] can be cast to object[].
10744 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
10745 // This does not need CORINFO_HELP_ARRADDR_ST
10747 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
10748 arrayNodeTo->gtOper == GT_LCL_VAR &&
10749 arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
10750 !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
10753 goto ARR_ST_POST_VERIFY;
10756 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
10758 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
10760 assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
10763 goto ARR_ST_POST_VERIFY;
10766 STELEM_REF_POST_VERIFY:
10768 /* Call a helper function to do the assignment */
10769 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, 0, impPopList(3, &flags, nullptr));
10773 case CEE_STELEM_I1:
10776 case CEE_STELEM_I2:
10777 lclTyp = TYP_SHORT;
10780 lclTyp = TYP_I_IMPL;
10782 case CEE_STELEM_I4:
10785 case CEE_STELEM_I8:
10788 case CEE_STELEM_R4:
10789 lclTyp = TYP_FLOAT;
10791 case CEE_STELEM_R8:
10792 lclTyp = TYP_DOUBLE;
10797 if (tiVerificationNeeded)
10799 typeInfo tiArray = impStackTop(2).seTypeInfo;
10800 typeInfo tiIndex = impStackTop(1).seTypeInfo;
10801 typeInfo tiValue = impStackTop().seTypeInfo;
10803 // As per ECMA 'index' specified can be either int32 or native int.
10804 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10805 typeInfo arrayElem = typeInfo(lclTyp);
10806 #ifdef _TARGET_64BIT_
10807 if (opcode == CEE_STELEM_I)
10809 arrayElem = typeInfo::nativeInt();
10811 #endif // _TARGET_64BIT_
10812 Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
10815 Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
10819 ARR_ST_POST_VERIFY:
10820 /* The strict order of evaluation is LHS-operands, RHS-operands,
10821 range-check, and then assignment. However, codegen currently
10822 does the range-check before evaluation the RHS-operands. So to
10823 maintain strict ordering, we spill the stack. */
10825 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
10827 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
10828 "Strict ordering of exceptions for Array store"));
10831 /* Pull the new value from the stack */
10832 op2 = impPopStack().val;
10834 /* Pull the index value */
10835 op1 = impPopStack().val;
10837 /* Pull the array address */
10838 op3 = impPopStack().val;
10840 assertImp(op3->gtType == TYP_REF);
10841 if (op2->IsVarAddr())
10843 op2->gtType = TYP_I_IMPL;
10846 op3 = impCheckForNullPointer(op3);
10848 // Mark the block as containing an index expression
10850 if (op3->gtOper == GT_LCL_VAR)
10852 if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
10854 block->bbFlags |= BBF_HAS_IDX_LEN;
10855 optMethodFlags |= OMF_HAS_ARRAYREF;
10859 /* Create the index node */
10861 op1 = gtNewIndexRef(lclTyp, op3, op1);
10863 /* Create the assignment node and append it */
10865 if (lclTyp == TYP_STRUCT)
10867 assert(stelemClsHnd != DUMMY_INIT(NULL));
10869 op1->gtIndex.gtStructElemClass = stelemClsHnd;
10870 op1->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd);
10872 if (varTypeIsStruct(op1))
10874 op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
10878 op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
10879 op1 = gtNewAssignNode(op1, op2);
10882 /* Mark the expression as containing an assignment */
10884 op1->gtFlags |= GTF_ASG;
10895 case CEE_ADD_OVF_UN:
10903 goto MATH_OP2_FLAGS;
10912 case CEE_SUB_OVF_UN:
10920 goto MATH_OP2_FLAGS;
10924 goto MATH_MAYBE_CALL_NO_OVF;
10929 case CEE_MUL_OVF_UN:
10936 goto MATH_MAYBE_CALL_OVF;
10938 // Other binary math operations
10942 goto MATH_MAYBE_CALL_NO_OVF;
10946 goto MATH_MAYBE_CALL_NO_OVF;
10950 goto MATH_MAYBE_CALL_NO_OVF;
10954 goto MATH_MAYBE_CALL_NO_OVF;
10956 MATH_MAYBE_CALL_NO_OVF:
10958 MATH_MAYBE_CALL_OVF:
10959 // Morpher has some complex logic about when to turn different
10960 // typed nodes on different platforms into helper calls. We
10961 // need to either duplicate that logic here, or just
10962 // pessimistically make all the nodes large enough to become
10963 // call nodes. Since call nodes aren't that much larger and
10964 // these opcodes are infrequent enough I chose the latter.
10966 goto MATH_OP2_FLAGS;
10978 MATH_OP2: // For default values of 'ovfl' and 'callNode'
10983 MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
10985 /* Pull two values and push back the result */
10987 if (tiVerificationNeeded)
10989 const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
10990 const typeInfo& tiOp2 = impStackTop().seTypeInfo;
10992 Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
10993 if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
10995 Verify(tiOp1.IsNumberType(), "not number");
10999 Verify(tiOp1.IsIntegerType(), "not integer");
11002 Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
11006 #ifdef _TARGET_64BIT_
11007 if (tiOp2.IsNativeIntType())
11011 #endif // _TARGET_64BIT_
11014 op2 = impPopStack().val;
11015 op1 = impPopStack().val;
11017 #if !CPU_HAS_FP_SUPPORT
11018 if (varTypeIsFloating(op1->gtType))
11023 /* Can't do arithmetic with references */
11024 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
11026 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
11027 // if it is in the stack)
11028 impBashVarAddrsToI(op1, op2);
11030 type = impGetByRefResultType(oper, uns, &op1, &op2);
11032 assert(!ovfl || !varTypeIsFloating(op1->gtType));
11034 /* Special case: "int+0", "int-0", "int*1", "int/1" */
11036 if (op2->gtOper == GT_CNS_INT)
11038 if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
11039 (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
11042 impPushOnStack(op1, tiRetVal);
11047 #if !FEATURE_X87_DOUBLES
11048 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
11050 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
11052 if (op1->TypeGet() != type)
11054 // We insert a cast of op1 to 'type'
11055 op1 = gtNewCastNode(type, op1, type);
11057 if (op2->TypeGet() != type)
11059 // We insert a cast of op2 to 'type'
11060 op2 = gtNewCastNode(type, op2, type);
11063 #endif // !FEATURE_X87_DOUBLES
11065 #if SMALL_TREE_NODES
11068 /* These operators can later be transformed into 'GT_CALL' */
11070 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
11071 #ifndef _TARGET_ARM_
11072 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
11073 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
11074 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11075 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11077 // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11078 // that we'll need to transform into a general large node, but rather specifically
11079 // to a call: by doing it this way, things keep working if there are multiple sizes,
11080 // and a CALL is no longer the largest.
11081 // That said, as of now it *is* a large node, so we'll do this with an assert rather
11083 assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11084 op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11087 #endif // SMALL_TREE_NODES
11089 op1 = gtNewOperNode(oper, type, op1, op2);
11092 /* Special case: integer/long division may throw an exception */
11094 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow())
11096 op1->gtFlags |= GTF_EXCEPT;
11101 assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11102 if (ovflType != TYP_UNKNOWN)
11104 op1->gtType = ovflType;
11106 op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11109 op1->gtFlags |= GTF_UNSIGNED;
11113 impPushOnStack(op1, tiRetVal);
11128 if (tiVerificationNeeded)
11130 const typeInfo& tiVal = impStackTop(1).seTypeInfo;
11131 const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11132 Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11135 op2 = impPopStack().val;
11136 op1 = impPopStack().val; // operand to be shifted
11137 impBashVarAddrsToI(op1, op2);
11139 type = genActualType(op1->TypeGet());
11140 op1 = gtNewOperNode(oper, type, op1, op2);
11142 impPushOnStack(op1, tiRetVal);
11146 if (tiVerificationNeeded)
11148 tiRetVal = impStackTop().seTypeInfo;
11149 Verify(tiRetVal.IsIntegerType(), "bad int value");
11152 op1 = impPopStack().val;
11153 impBashVarAddrsToI(op1, nullptr);
11154 type = genActualType(op1->TypeGet());
11155 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11159 if (tiVerificationNeeded)
11161 tiRetVal = impStackTop().seTypeInfo;
11162 Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11164 op1 = impPopStack().val;
11165 type = op1->TypeGet();
11166 op1 = gtNewOperNode(GT_CKFINITE, type, op1);
11167 op1->gtFlags |= GTF_EXCEPT;
11169 impPushOnStack(op1, tiRetVal);
11174 val = getI4LittleEndian(codeAddr); // jump distance
11175 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11179 val = getI1LittleEndian(codeAddr); // jump distance
11180 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11184 if (compIsForInlining())
11186 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11190 JITDUMP(" %04X", jmpAddr);
11191 if (block->bbJumpKind != BBJ_LEAVE)
11193 impResetLeaveBlock(block, jmpAddr);
11196 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11197 impImportLeave(block);
11198 impNoteBranchOffs();
11204 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11206 if (compIsForInlining() && jmpDist == 0)
11211 impNoteBranchOffs();
11217 case CEE_BRFALSE_S:
11219 /* Pop the comparand (now there's a neat term) from the stack */
11220 if (tiVerificationNeeded)
11222 typeInfo& tiVal = impStackTop().seTypeInfo;
11223 Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11227 op1 = impPopStack().val;
11228 type = op1->TypeGet();
11230 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11231 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11233 block->bbJumpKind = BBJ_NONE;
11235 if (op1->gtFlags & GTF_GLOB_EFFECT)
11237 op1 = gtUnusedValNode(op1);
11246 if (op1->OperIsCompare())
11248 if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11250 // Flip the sense of the compare
11252 op1 = gtReverseCond(op1);
11257 /* We'll compare against an equally-sized integer 0 */
11258 /* For small types, we always compare against int */
11259 op2 = gtNewZeroConNode(genActualType(op1->gtType));
11261 /* Create the comparison operator and try to fold it */
11263 oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11264 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11271 /* Fold comparison if we can */
11273 op1 = gtFoldExpr(op1);
11275 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11276 /* Don't make any blocks unreachable in import only mode */
11278 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11280 /* gtFoldExpr() should prevent this as we don't want to make any blocks
11281 unreachable under compDbgCode */
11282 assert(!opts.compDbgCode);
11284 BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11285 assertImp((block->bbJumpKind == BBJ_COND) // normal case
11286 || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11287 // block for the second time
11289 block->bbJumpKind = foldedJumpKind;
11293 if (op1->gtIntCon.gtIconVal)
11295 printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11296 block->bbJumpDest->bbNum);
11300 printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11307 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11309 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
11310 in impImportBlock(block). For correct line numbers, spill stack. */
11312 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
11314 impSpillStackEnsure(true);
11341 if (tiVerificationNeeded)
11343 verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11344 tiRetVal = typeInfo(TI_INT);
11347 op2 = impPopStack().val;
11348 op1 = impPopStack().val;
11350 #ifdef _TARGET_64BIT_
11351 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
11353 op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11355 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
11357 op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11359 #endif // _TARGET_64BIT_
11361 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11362 varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11363 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11365 /* Create the comparison node */
11367 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11369 /* TODO: setting both flags when only one is appropriate */
11370 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
11372 op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
11375 impPushOnStack(op1, tiRetVal);
11381 goto CMP_2_OPs_AND_BR;
11386 goto CMP_2_OPs_AND_BR;
11391 goto CMP_2_OPs_AND_BR_UN;
11396 goto CMP_2_OPs_AND_BR;
11401 goto CMP_2_OPs_AND_BR_UN;
11406 goto CMP_2_OPs_AND_BR;
11411 goto CMP_2_OPs_AND_BR_UN;
11416 goto CMP_2_OPs_AND_BR;
11421 goto CMP_2_OPs_AND_BR_UN;
11426 goto CMP_2_OPs_AND_BR_UN;
11428 CMP_2_OPs_AND_BR_UN:
11431 goto CMP_2_OPs_AND_BR_ALL;
11435 goto CMP_2_OPs_AND_BR_ALL;
11436 CMP_2_OPs_AND_BR_ALL:
11438 if (tiVerificationNeeded)
11440 verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11443 /* Pull two values */
11444 op2 = impPopStack().val;
11445 op1 = impPopStack().val;
11447 #ifdef _TARGET_64BIT_
11448 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
11450 op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11452 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
11454 op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11456 #endif // _TARGET_64BIT_
11458 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11459 varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11460 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11462 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11464 block->bbJumpKind = BBJ_NONE;
11466 if (op1->gtFlags & GTF_GLOB_EFFECT)
11468 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11469 "Branch to next Optimization, op1 side effect"));
11470 impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11472 if (op2->gtFlags & GTF_GLOB_EFFECT)
11474 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11475 "Branch to next Optimization, op2 side effect"));
11476 impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11480 if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
11482 impNoteLastILoffs();
11487 #if !FEATURE_X87_DOUBLES
11488 // We can generate an compare of different sized floating point op1 and op2
11489 // We insert a cast
11491 if (varTypeIsFloating(op1->TypeGet()))
11493 if (op1->TypeGet() != op2->TypeGet())
11495 assert(varTypeIsFloating(op2->TypeGet()));
11497 // say op1=double, op2=float. To avoid loss of precision
11498 // while comparing, op2 is converted to double and double
11499 // comparison is done.
11500 if (op1->TypeGet() == TYP_DOUBLE)
11502 // We insert a cast of op2 to TYP_DOUBLE
11503 op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
11505 else if (op2->TypeGet() == TYP_DOUBLE)
11507 // We insert a cast of op1 to TYP_DOUBLE
11508 op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
11512 #endif // !FEATURE_X87_DOUBLES
11514 /* Create and append the operator */
11516 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11520 op1->gtFlags |= GTF_UNSIGNED;
11525 op1->gtFlags |= GTF_RELOP_NAN_UN;
11531 assert(!compIsForInlining());
11533 if (tiVerificationNeeded)
11535 Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
11537 /* Pop the switch value off the stack */
11538 op1 = impPopStack().val;
11539 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
11541 #ifdef _TARGET_64BIT_
11542 // Widen 'op1' on 64-bit targets
11543 if (op1->TypeGet() != TYP_I_IMPL)
11545 if (op1->OperGet() == GT_CNS_INT)
11547 op1->gtType = TYP_I_IMPL;
11551 op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
11554 #endif // _TARGET_64BIT_
11555 assert(genActualType(op1->TypeGet()) == TYP_I_IMPL);
11557 /* We can create a switch node */
11559 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
11561 val = (int)getU4LittleEndian(codeAddr);
11562 codeAddr += 4 + val * 4; // skip over the switch-table
11566 /************************** Casting OPCODES ***************************/
11568 case CEE_CONV_OVF_I1:
11571 case CEE_CONV_OVF_I2:
11572 lclTyp = TYP_SHORT;
11574 case CEE_CONV_OVF_I:
11575 lclTyp = TYP_I_IMPL;
11577 case CEE_CONV_OVF_I4:
11580 case CEE_CONV_OVF_I8:
11584 case CEE_CONV_OVF_U1:
11585 lclTyp = TYP_UBYTE;
11587 case CEE_CONV_OVF_U2:
11590 case CEE_CONV_OVF_U:
11591 lclTyp = TYP_U_IMPL;
11593 case CEE_CONV_OVF_U4:
11596 case CEE_CONV_OVF_U8:
11597 lclTyp = TYP_ULONG;
11600 case CEE_CONV_OVF_I1_UN:
11603 case CEE_CONV_OVF_I2_UN:
11604 lclTyp = TYP_SHORT;
11606 case CEE_CONV_OVF_I_UN:
11607 lclTyp = TYP_I_IMPL;
11609 case CEE_CONV_OVF_I4_UN:
11612 case CEE_CONV_OVF_I8_UN:
11616 case CEE_CONV_OVF_U1_UN:
11617 lclTyp = TYP_UBYTE;
11619 case CEE_CONV_OVF_U2_UN:
11622 case CEE_CONV_OVF_U_UN:
11623 lclTyp = TYP_U_IMPL;
11625 case CEE_CONV_OVF_U4_UN:
11628 case CEE_CONV_OVF_U8_UN:
11629 lclTyp = TYP_ULONG;
11634 goto CONV_OVF_COMMON;
11637 goto CONV_OVF_COMMON;
11647 lclTyp = TYP_SHORT;
11650 lclTyp = TYP_I_IMPL;
11660 lclTyp = TYP_UBYTE;
11665 #if (REGSIZE_BYTES == 8)
11667 lclTyp = TYP_U_IMPL;
11671 lclTyp = TYP_U_IMPL;
11678 lclTyp = TYP_ULONG;
11682 lclTyp = TYP_FLOAT;
11685 lclTyp = TYP_DOUBLE;
11688 case CEE_CONV_R_UN:
11689 lclTyp = TYP_DOUBLE;
11703 // just check that we have a number on the stack
11704 if (tiVerificationNeeded)
11706 const typeInfo& tiVal = impStackTop().seTypeInfo;
11707 Verify(tiVal.IsNumberType(), "bad arg");
11709 #ifdef _TARGET_64BIT_
11710 bool isNative = false;
11714 case CEE_CONV_OVF_I:
11715 case CEE_CONV_OVF_I_UN:
11717 case CEE_CONV_OVF_U:
11718 case CEE_CONV_OVF_U_UN:
11722 // leave 'isNative' = false;
11727 tiRetVal = typeInfo::nativeInt();
11730 #endif // _TARGET_64BIT_
11732 tiRetVal = typeInfo(lclTyp).NormaliseForStack();
11736 // only converts from FLOAT or DOUBLE to an integer type
11737 // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls
11739 if (varTypeIsFloating(lclTyp))
11741 callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
11742 #ifdef _TARGET_64BIT_
11743 // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
11744 // TYP_BYREF could be used as TYP_I_IMPL which is long.
11745 // TODO-CQ: remove this when we lower casts long/ulong --> float/double
11746 // and generate SSE2 code instead of going through helper calls.
11747 || (impStackTop().val->TypeGet() == TYP_BYREF)
11753 callNode = varTypeIsFloating(impStackTop().val->TypeGet());
11756 // At this point uns, ovf, callNode all set
11758 op1 = impPopStack().val;
11759 impBashVarAddrsToI(op1);
11761 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
11763 op2 = op1->gtOp.gtOp2;
11765 if (op2->gtOper == GT_CNS_INT)
11767 ssize_t ival = op2->gtIntCon.gtIconVal;
11768 ssize_t mask, umask;
11784 assert(!"unexpected type");
11788 if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
11790 /* Toss the cast, it's a waste of time */
11792 impPushOnStack(op1, tiRetVal);
11795 else if (ival == mask)
11797 /* Toss the masking, it's a waste of time, since
11798 we sign-extend from the small value anyways */
11800 op1 = op1->gtOp.gtOp1;
11805 /* The 'op2' sub-operand of a cast is the 'real' type number,
11806 since the result of a cast to one of the 'small' integer
11807 types is an integer.
11810 type = genActualType(lclTyp);
11812 #if SMALL_TREE_NODES
11815 op1 = gtNewCastNodeL(type, op1, lclTyp);
11818 #endif // SMALL_TREE_NODES
11820 op1 = gtNewCastNode(type, op1, lclTyp);
11825 op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
11829 op1->gtFlags |= GTF_UNSIGNED;
11831 impPushOnStack(op1, tiRetVal);
11835 if (tiVerificationNeeded)
11837 tiRetVal = impStackTop().seTypeInfo;
11838 Verify(tiRetVal.IsNumberType(), "Bad arg");
11841 op1 = impPopStack().val;
11842 impBashVarAddrsToI(op1, nullptr);
11843 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
11847 if (tiVerificationNeeded)
11852 /* Pull the top value from the stack */
11854 op1 = impPopStack(clsHnd).val;
11856 /* Get hold of the type of the value being duplicated */
11858 lclTyp = genActualType(op1->gtType);
11860 /* Does the value have any side effects? */
11862 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
11864 // Since we are throwing away the value, just normalize
11865 // it to its address. This is more efficient.
11867 if (varTypeIsStruct(op1))
11869 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
11870 // Non-calls, such as obj or ret_expr, have to go through this.
11871 // Calls with large struct return value have to go through this.
11872 // Helper calls with small struct return value also have to go
11873 // through this since they do not follow Unix calling convention.
11874 if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
11875 op1->AsCall()->gtCallType == CT_HELPER)
11876 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
11878 op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
11882 // If op1 is non-overflow cast, throw it away since it is useless.
11883 // Another reason for throwing away the useless cast is in the context of
11884 // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
11885 // The cast gets added as part of importing GT_CALL, which gets in the way
11886 // of fgMorphCall() on the forms of tail call nodes that we assert.
11887 if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
11889 op1 = op1->gtOp.gtOp1;
11892 // If 'op1' is an expression, create an assignment node.
11893 // Helps analyses (like CSE) to work fine.
11895 if (op1->gtOper != GT_CALL)
11897 op1 = gtUnusedValNode(op1);
11900 /* Append the value to the tree list */
11904 /* No side effects - just throw the <BEEP> thing away */
11909 if (tiVerificationNeeded)
11911 // Dup could start the begining of delegate creation sequence, remember that
11912 delegateCreateStart = codeAddr - 1;
11916 // Convert a (dup, stloc) sequence into a (stloc, ldloc) sequence in the following cases:
11917 // - If this is non-debug code - so that CSE will recognize the two as equal.
11918 // This helps eliminate a redundant bounds check in cases such as:
11919 // ariba[i+3] += some_value;
11920 // - If the top of the stack is a non-leaf that may be expensive to clone.
11922 if (codeAddr < codeEndp)
11924 OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddr);
11925 if (impIsAnySTLOC(nextOpcode))
11927 if (!opts.compDbgCode)
11929 insertLdloc = true;
11932 GenTree* stackTop = impStackTop().val;
11933 if (!stackTop->IsIntegralConst(0) && !stackTop->IsFPZero() && !stackTop->IsLocal())
11935 insertLdloc = true;
11941 /* Pull the top value from the stack */
11942 op1 = impPopStack(tiRetVal);
11944 /* Clone the value */
11945 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
11946 nullptr DEBUGARG("DUP instruction"));
11948 /* Either the tree started with no global effects, or impCloneExpr
11949 evaluated the tree to a temp and returned two copies of that
11950 temp. Either way, neither op1 nor op2 should have side effects.
11952 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
11954 /* Push the tree/temp back on the stack */
11955 impPushOnStack(op1, tiRetVal);
11957 /* Push the copy on the stack */
11958 impPushOnStack(op2, tiRetVal);
11966 lclTyp = TYP_SHORT;
11975 lclTyp = TYP_I_IMPL;
11977 case CEE_STIND_REF:
11981 lclTyp = TYP_FLOAT;
11984 lclTyp = TYP_DOUBLE;
11988 if (tiVerificationNeeded)
11990 typeInfo instrType(lclTyp);
11991 #ifdef _TARGET_64BIT_
11992 if (opcode == CEE_STIND_I)
11994 instrType = typeInfo::nativeInt();
11996 #endif // _TARGET_64BIT_
11997 verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
12001 compUnsafeCastUsed = true; // Have to go conservative
12006 op2 = impPopStack().val; // value to store
12007 op1 = impPopStack().val; // address to store to
12009 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
12010 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12012 impBashVarAddrsToI(op1, op2);
12014 op2 = impImplicitR4orR8Cast(op2, lclTyp);
12016 #ifdef _TARGET_64BIT_
12017 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
12018 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
12020 op2->gtType = TYP_I_IMPL;
12024 // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
12026 if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
12028 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12029 op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
12031 // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12033 if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
12035 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12036 op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
12039 #endif // _TARGET_64BIT_
12041 if (opcode == CEE_STIND_REF)
12043 // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
12044 assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
12045 lclTyp = genActualType(op2->TypeGet());
12048 // Check target type.
12050 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
12052 if (op2->gtType == TYP_BYREF)
12054 assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
12056 else if (lclTyp == TYP_BYREF)
12058 assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
12063 assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
12064 ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
12065 (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
12069 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12071 // stind could point anywhere, example a boxed class static int
12072 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12074 if (prefixFlags & PREFIX_VOLATILE)
12076 assert(op1->OperGet() == GT_IND);
12077 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
12078 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12079 op1->gtFlags |= GTF_IND_VOLATILE;
12082 if (prefixFlags & PREFIX_UNALIGNED)
12084 assert(op1->OperGet() == GT_IND);
12085 op1->gtFlags |= GTF_IND_UNALIGNED;
12088 op1 = gtNewAssignNode(op1, op2);
12089 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12091 // Spill side-effects AND global-data-accesses
12092 if (verCurrentState.esStackDepth > 0)
12094 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12103 lclTyp = TYP_SHORT;
12112 case CEE_LDIND_REF:
12116 lclTyp = TYP_I_IMPL;
12119 lclTyp = TYP_FLOAT;
12122 lclTyp = TYP_DOUBLE;
12125 lclTyp = TYP_UBYTE;
12132 if (tiVerificationNeeded)
12134 typeInfo lclTiType(lclTyp);
12135 #ifdef _TARGET_64BIT_
12136 if (opcode == CEE_LDIND_I)
12138 lclTiType = typeInfo::nativeInt();
12140 #endif // _TARGET_64BIT_
12141 tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12142 tiRetVal.NormaliseForStack();
12146 compUnsafeCastUsed = true; // Have to go conservative
12151 op1 = impPopStack().val; // address to load from
12152 impBashVarAddrsToI(op1);
12154 #ifdef _TARGET_64BIT_
12155 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12157 if (genActualType(op1->gtType) == TYP_INT)
12159 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12160 op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
12164 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12166 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12168 // ldind could point anywhere, example a boxed class static int
12169 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12171 if (prefixFlags & PREFIX_VOLATILE)
12173 assert(op1->OperGet() == GT_IND);
12174 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
12175 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12176 op1->gtFlags |= GTF_IND_VOLATILE;
12179 if (prefixFlags & PREFIX_UNALIGNED)
12181 assert(op1->OperGet() == GT_IND);
12182 op1->gtFlags |= GTF_IND_UNALIGNED;
12185 impPushOnStack(op1, tiRetVal);
12189 case CEE_UNALIGNED:
12192 val = getU1LittleEndian(codeAddr);
12194 JITDUMP(" %u", val);
12195 if ((val != 1) && (val != 2) && (val != 4))
12197 BADCODE("Alignment unaligned. must be 1, 2, or 4");
12200 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12201 prefixFlags |= PREFIX_UNALIGNED;
12203 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12206 opcode = (OPCODE)getU1LittleEndian(codeAddr);
12207 codeAddr += sizeof(__int8);
12208 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12209 goto DECODE_OPCODE;
12213 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12214 prefixFlags |= PREFIX_VOLATILE;
12216 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12223 // Need to do a lookup here so that we perform an access check
12224 // and do a NOWAY if protections are violated
12225 _impResolveToken(CORINFO_TOKENKIND_Method);
12227 JITDUMP(" %08X", resolvedToken.token);
12229 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12230 addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12233 // This check really only applies to intrinsic Array.Address methods
12234 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12236 NO_WAY("Currently do not support LDFTN of Parameterized functions");
12239 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12240 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12242 if (tiVerificationNeeded)
12244 // LDFTN could start the begining of delegate creation sequence, remember that
12245 delegateCreateStart = codeAddr - 2;
12247 // check any constraints on the callee's class and type parameters
12248 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12249 "method has unsatisfied class constraints");
12250 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12251 resolvedToken.hMethod),
12252 "method has unsatisfied method constraints");
12254 mflags = callInfo.verMethodFlags;
12255 Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12259 op1 = impMethodPointer(&resolvedToken, &callInfo);
12260 if (compDonotInline())
12265 impPushOnStack(op1, typeInfo(resolvedToken.hMethod));
12270 case CEE_LDVIRTFTN:
12272 /* Get the method token */
12274 _impResolveToken(CORINFO_TOKENKIND_Method);
12276 JITDUMP(" %08X", resolvedToken.token);
12278 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12279 addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12280 CORINFO_CALLINFO_CALLVIRT)),
12283 // This check really only applies to intrinsic Array.Address methods
12284 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12286 NO_WAY("Currently do not support LDFTN of Parameterized functions");
12289 mflags = callInfo.methodFlags;
12291 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12293 if (compIsForInlining())
12295 if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12297 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12302 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12304 if (tiVerificationNeeded)
12307 Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12308 Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12310 // JIT32 verifier rejects verifiable ldvirtftn pattern
12311 typeInfo declType =
12312 verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12314 typeInfo arg = impStackTop().seTypeInfo;
12315 Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12318 CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12319 if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12321 instanceClassHnd = arg.GetClassHandleForObjRef();
12324 // check any constraints on the method's class and type parameters
12325 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12326 "method has unsatisfied class constraints");
12327 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12328 resolvedToken.hMethod),
12329 "method has unsatisfied method constraints");
12331 if (mflags & CORINFO_FLG_PROTECTED)
12333 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12334 "Accessing protected method through wrong type.");
12338 /* Get the object-ref */
12339 op1 = impPopStack().val;
12340 assertImp(op1->gtType == TYP_REF);
12342 if (opts.IsReadyToRun())
12344 if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
12346 if (op1->gtFlags & GTF_SIDE_EFFECT)
12348 op1 = gtUnusedValNode(op1);
12349 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12354 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12356 if (op1->gtFlags & GTF_SIDE_EFFECT)
12358 op1 = gtUnusedValNode(op1);
12359 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12364 GenTreePtr fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
12365 if (compDonotInline())
12370 impPushOnStack(fptr, typeInfo(resolvedToken.hMethod));
12375 case CEE_CONSTRAINED:
12377 assertImp(sz == sizeof(unsigned));
12378 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
12379 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
12380 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
12382 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
12383 prefixFlags |= PREFIX_CONSTRAINED;
12386 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12387 if (actualOpcode != CEE_CALLVIRT)
12389 BADCODE("constrained. has to be followed by callvirt");
12396 JITDUMP(" readonly.");
12398 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
12399 prefixFlags |= PREFIX_READONLY;
12402 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12403 if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
12405 BADCODE("readonly. has to be followed by ldelema or call");
12415 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
12416 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12419 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12420 if (!impOpcodeIsCallOpcode(actualOpcode))
12422 BADCODE("tailcall. has to be followed by call, callvirt or calli");
12430 /* Since we will implicitly insert newObjThisPtr at the start of the
12431 argument list, spill any GTF_ORDER_SIDEEFF */
12432 impSpillSpecialSideEff();
12434 /* NEWOBJ does not respond to TAIL */
12435 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
12437 /* NEWOBJ does not respond to CONSTRAINED */
12438 prefixFlags &= ~PREFIX_CONSTRAINED;
12440 #if COR_JIT_EE_VERSION > 460
12441 _impResolveToken(CORINFO_TOKENKIND_NewObj);
12443 _impResolveToken(CORINFO_TOKENKIND_Method);
12446 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12447 addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
12450 if (compIsForInlining())
12452 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12454 // Check to see if this call violates the boundary.
12455 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
12460 mflags = callInfo.methodFlags;
12462 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
12464 BADCODE("newobj on static or abstract method");
12467 // Insert the security callout before any actual code is generated
12468 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12470 // There are three different cases for new
12471 // Object size is variable (depends on arguments)
12472 // 1) Object is an array (arrays treated specially by the EE)
12473 // 2) Object is some other variable sized object (e.g. String)
12474 // 3) Class Size can be determined beforehand (normal case)
12475 // In the first case, we need to call a NEWOBJ helper (multinewarray)
12476 // in the second case we call the constructor with a '0' this pointer
12477 // In the third case we alloc the memory, then call the constuctor
12479 clsFlags = callInfo.classFlags;
12480 if (clsFlags & CORINFO_FLG_ARRAY)
12482 if (tiVerificationNeeded)
12484 CORINFO_CLASS_HANDLE elemTypeHnd;
12485 INDEBUG(CorInfoType corType =)
12486 info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
12487 assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
12488 Verify(elemTypeHnd == nullptr ||
12489 !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
12490 "newarr of byref-like objects");
12491 verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
12492 ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
12493 &callInfo DEBUGARG(info.compFullName));
12495 // Arrays need to call the NEWOBJ helper.
12496 assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
12498 impImportNewObjArray(&resolvedToken, &callInfo);
12499 if (compDonotInline())
12507 // At present this can only be String
12508 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
12510 if (IsTargetAbi(CORINFO_CORERT_ABI))
12512 // The dummy argument does not exist in CoreRT
12513 newObjThisPtr = nullptr;
12517 // This is the case for variable-sized objects that are not
12518 // arrays. In this case, call the constructor with a null 'this'
12520 newObjThisPtr = gtNewIconNode(0, TYP_REF);
12523 /* Remember that this basic block contains 'new' of an object */
12524 block->bbFlags |= BBF_HAS_NEWOBJ;
12525 optMethodFlags |= OMF_HAS_NEWOBJ;
12529 // This is the normal case where the size of the object is
12530 // fixed. Allocate the memory and call the constructor.
12532 // Note: We cannot add a peep to avoid use of temp here
12533 // becase we don't have enough interference info to detect when
12534 // sources and destination interfere, example: s = new S(ref);
12536 // TODO: We find the correct place to introduce a general
12537 // reverse copy prop for struct return values from newobj or
12538 // any function returning structs.
12540 /* get a temporary for the new object */
12541 lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
12543 // In the value class case we only need clsHnd for size calcs.
12545 // The lookup of the code pointer will be handled by CALL in this case
12546 if (clsFlags & CORINFO_FLG_VALUECLASS)
12548 if (compIsForInlining())
12550 // If value class has GC fields, inform the inliner. It may choose to
12551 // bail out on the inline.
12552 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
12553 if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
12555 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
12556 if (compInlineResult->IsFailure())
12561 // Do further notification in the case where the call site is rare;
12562 // some policies do not track the relative hotness of call sites for
12563 // "always" inline cases.
12564 if (impInlineInfo->iciBlock->isRunRarely())
12566 compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
12567 if (compInlineResult->IsFailure())
12575 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
12576 unsigned size = info.compCompHnd->getClassSize(resolvedToken.hClass);
12578 if (impIsPrimitive(jitTyp))
12580 lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
12584 // The local variable itself is the allocated space.
12585 // Here we need unsafe value cls check, since the address of struct is taken for further use
12586 // and potentially exploitable.
12587 lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
12590 // Append a tree to zero-out the temp
12591 newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
12593 newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest
12594 gtNewIconNode(0), // Value
12596 false, // isVolatile
12597 false); // not copyBlock
12598 impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12600 // Obtain the address of the temp
12602 gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
12606 #ifdef FEATURE_READYTORUN_COMPILER
12607 if (opts.IsReadyToRun())
12609 op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
12610 usingReadyToRunHelper = (op1 != nullptr);
12613 if (!usingReadyToRunHelper)
12616 op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
12617 if (op1 == nullptr)
12618 { // compDonotInline()
12622 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
12623 // and the newfast call with a single call to a dynamic R2R cell that will:
12624 // 1) Load the context
12625 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate
12627 // 3) Allocate and return the new object
12628 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
12630 op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
12631 resolvedToken.hClass, TYP_REF, op1);
12634 // Remember that this basic block contains 'new' of an object
12635 block->bbFlags |= BBF_HAS_NEWOBJ;
12636 optMethodFlags |= OMF_HAS_NEWOBJ;
12638 // Append the assignment to the temp/local. Dont need to spill
12639 // at all as we are just calling an EE-Jit helper which can only
12640 // cause an (async) OutOfMemoryException.
12642 // We assign the newly allocated object (by a GT_ALLOCOBJ node)
12643 // to a temp. Note that the pattern "temp = allocObj" is required
12644 // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
12645 // without exhaustive walk over all expressions.
12647 impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
12649 newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
12656 /* CALLI does not respond to CONSTRAINED */
12657 prefixFlags &= ~PREFIX_CONSTRAINED;
12659 if (compIsForInlining())
12661 // CALLI doesn't have a method handle, so assume the worst.
12662 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12664 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
12674 // We can't call getCallInfo on the token from a CALLI, but we need it in
12675 // many other places. We unfortunately embed that knowledge here.
12676 if (opcode != CEE_CALLI)
12678 _impResolveToken(CORINFO_TOKENKIND_Method);
12680 eeGetCallInfo(&resolvedToken,
12681 (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
12682 // this is how impImportCall invokes getCallInfo
12684 combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
12685 (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
12686 : CORINFO_CALLINFO_NONE)),
12691 // Suppress uninitialized use warning.
12692 memset(&resolvedToken, 0, sizeof(resolvedToken));
12693 memset(&callInfo, 0, sizeof(callInfo));
12695 resolvedToken.token = getU4LittleEndian(codeAddr);
12698 CALL: // memberRef should be set.
12699 // newObjThisPtr should be set for CEE_NEWOBJ
12701 JITDUMP(" %08X", resolvedToken.token);
12702 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
12704 bool newBBcreatedForTailcallStress;
12706 newBBcreatedForTailcallStress = false;
12708 if (compIsForInlining())
12710 if (compDonotInline())
12714 // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
12715 assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
12719 if (compTailCallStress())
12721 // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
12722 // Tail call stress only recognizes call+ret patterns and forces them to be
12723 // explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress
12724 // doesn't import 'ret' opcode following the call into the basic block containing
12725 // the call instead imports it to a new basic block. Note that fgMakeBasicBlocks()
12726 // is already checking that there is an opcode following call and hence it is
12727 // safe here to read next opcode without bounds check.
12728 newBBcreatedForTailcallStress =
12729 impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
12730 // make it jump to RET.
12731 (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
12733 if (newBBcreatedForTailcallStress &&
12734 !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
12735 verCheckTailCallConstraint(opcode, &resolvedToken,
12736 constraintCall ? &constrainedResolvedToken : nullptr,
12737 true) // Is it legal to do talcall?
12740 // Stress the tailcall.
12741 JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
12742 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12746 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
12747 // hence will not be considered for implicit tail calling.
12748 bool isRecursive = (callInfo.hMethod == info.compMethodHnd);
12749 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
12751 JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
12752 prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
12756 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
12757 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
12758 readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
12760 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
12762 // All calls and delegates need a security callout.
12763 // For delegates, this is the call to the delegate constructor, not the access check on the
12765 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12767 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
12769 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
12770 // and the field it is reading, thus it is now unverifiable to not immediately precede with
12771 // ldtoken <filed token>, and we now check accessibility
12772 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
12773 (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
12775 if (prevOpcode != CEE_LDTOKEN)
12777 Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
12781 assert(lastLoadToken != NULL);
12782 // Now that we know we have a token, verify that it is accessible for loading
12783 CORINFO_RESOLVED_TOKEN resolvedLoadField;
12784 impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
12785 eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
12786 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12790 #endif // DevDiv 410397
12793 if (tiVerificationNeeded)
12795 verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12796 explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
12797 &callInfo DEBUGARG(info.compFullName));
12800 // Insert delegate callout here.
12801 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
12804 // We should do this only if verification is enabled
12805 // If verification is disabled, delegateCreateStart will not be initialized correctly
12806 if (tiVerificationNeeded)
12808 mdMemberRef delegateMethodRef = mdMemberRefNil;
12809 // We should get here only for well formed delegate creation.
12810 assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
12814 #ifdef FEATURE_CORECLR
12815 // In coreclr the delegate transparency rule needs to be enforced even if verification is disabled
12816 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
12817 CORINFO_METHOD_HANDLE delegateMethodHandle = tiActualFtn.GetMethod2();
12819 impInsertCalloutForDelegate(info.compMethodHnd, delegateMethodHandle, resolvedToken.hClass);
12820 #endif // FEATURE_CORECLR
12823 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12824 newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
12825 if (compDonotInline())
12830 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
12831 // have created a new BB after the "call"
12832 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
12834 assert(!compIsForInlining());
12846 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
12847 BOOL isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
12849 /* Get the CP_Fieldref index */
12850 assertImp(sz == sizeof(unsigned));
12852 _impResolveToken(CORINFO_TOKENKIND_Field);
12854 JITDUMP(" %08X", resolvedToken.token);
12856 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
12858 GenTreePtr obj = nullptr;
12859 typeInfo* tiObj = nullptr;
12860 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
12862 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
12864 tiObj = &impStackTop().seTypeInfo;
12865 obj = impPopStack(objType).val;
12867 if (impIsThis(obj))
12869 aflags |= CORINFO_ACCESS_THIS;
12871 // An optimization for Contextful classes:
12872 // we unwrap the proxy when we have a 'this reference'
12874 if (info.compUnwrapContextful)
12876 aflags |= CORINFO_ACCESS_UNWRAP;
12881 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
12883 // Figure out the type of the member. We always call canAccessField, so you always need this
12885 CorInfoType ciType = fieldInfo.fieldType;
12886 clsHnd = fieldInfo.structType;
12888 lclTyp = JITtype2varType(ciType);
12890 #ifdef _TARGET_AMD64
12891 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
12892 #endif // _TARGET_AMD64
12894 if (compIsForInlining())
12896 switch (fieldInfo.fieldAccessor)
12898 case CORINFO_FIELD_INSTANCE_HELPER:
12899 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
12900 case CORINFO_FIELD_STATIC_ADDR_HELPER:
12901 case CORINFO_FIELD_STATIC_TLS:
12903 compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
12906 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
12907 #if COR_JIT_EE_VERSION > 460
12908 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
12910 /* We may be able to inline the field accessors in specific instantiations of generic
12912 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
12919 if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
12922 if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
12923 !(info.compFlags & CORINFO_FLG_FORCEINLINE))
12925 // Loading a static valuetype field usually will cause a JitHelper to be called
12926 // for the static base. This will bloat the code.
12927 compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
12929 if (compInlineResult->IsFailure())
12937 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
12940 tiRetVal.MakeByRef();
12944 tiRetVal.NormaliseForStack();
12947 // Perform this check always to ensure that we get field access exceptions even with
12948 // SkipVerification.
12949 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12951 if (tiVerificationNeeded)
12953 // You can also pass the unboxed struct to LDFLD
12954 BOOL bAllowPlainValueTypeAsThis = FALSE;
12955 if (opcode == CEE_LDFLD && impIsValueType(tiObj))
12957 bAllowPlainValueTypeAsThis = TRUE;
12960 verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
12962 // If we're doing this on a heap object or from a 'safe' byref
12963 // then the result is a safe byref too
12964 if (isLoadAddress) // load address
12966 if (fieldInfo.fieldFlags &
12967 CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
12969 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
12971 tiRetVal.SetIsPermanentHomeByRef();
12974 else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
12976 // ldflda of byref is safe if done on a gc object or on a
12978 tiRetVal.SetIsPermanentHomeByRef();
12984 // tiVerificationNeeded is false.
12985 // Raise InvalidProgramException if static load accesses non-static field
12986 if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
12988 BADCODE("static access on an instance field");
12992 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
12993 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
12995 if (obj->gtFlags & GTF_SIDE_EFFECT)
12997 obj = gtUnusedValNode(obj);
12998 impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13003 /* Preserve 'small' int types */
13004 if (lclTyp > TYP_INT)
13006 lclTyp = genActualType(lclTyp);
13009 bool usesHelper = false;
13011 switch (fieldInfo.fieldAccessor)
13013 case CORINFO_FIELD_INSTANCE:
13014 #ifdef FEATURE_READYTORUN_COMPILER
13015 case CORINFO_FIELD_INSTANCE_WITH_BASE:
13018 bool nullcheckNeeded = false;
13020 obj = impCheckForNullPointer(obj);
13022 if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
13024 nullcheckNeeded = true;
13027 // If the object is a struct, what we really want is
13028 // for the field to operate on the address of the struct.
13029 if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
13031 assert(opcode == CEE_LDFLD && objType != nullptr);
13033 obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
13036 /* Create the data member node */
13037 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
13039 #ifdef FEATURE_READYTORUN_COMPILER
13040 if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13042 op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13046 op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13048 if (fgAddrCouldBeNull(obj))
13050 op1->gtFlags |= GTF_EXCEPT;
13053 // If gtFldObj is a BYREF then our target is a value class and
13054 // it could point anywhere, example a boxed class static int
13055 if (obj->gtType == TYP_BYREF)
13057 op1->gtFlags |= GTF_IND_TGTANYWHERE;
13060 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13061 if (StructHasOverlappingFields(typeFlags))
13063 op1->gtField.gtFldMayOverlap = true;
13066 // wrap it in a address of operator if necessary
13069 op1 = gtNewOperNode(GT_ADDR,
13070 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
13074 if (compIsForInlining() &&
13075 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13076 impInlineInfo->inlArgInfo))
13078 impInlineInfo->thisDereferencedFirst = true;
13084 case CORINFO_FIELD_STATIC_TLS:
13085 #ifdef _TARGET_X86_
13086 // Legacy TLS access is implemented as intrinsic on x86 only
13088 /* Create the data member node */
13089 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13090 op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13094 op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13098 fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13103 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13104 case CORINFO_FIELD_INSTANCE_HELPER:
13105 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13106 op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13111 case CORINFO_FIELD_STATIC_ADDRESS:
13112 // Replace static read-only fields with constant if possible
13113 if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13114 !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13115 (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13117 CorInfoInitClassResult initClassResult =
13118 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13119 impTokenLookupContextHandle);
13121 if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13123 void** pFldAddr = nullptr;
13125 info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13127 // We should always be able to access this static's address directly
13128 assert(pFldAddr == nullptr);
13130 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13137 case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13138 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13139 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13140 #if COR_JIT_EE_VERSION > 460
13141 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13143 op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13147 case CORINFO_FIELD_INTRINSIC_ZERO:
13149 assert(aflags & CORINFO_ACCESS_GET);
13150 op1 = gtNewIconNode(0, lclTyp);
13155 case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13157 assert(aflags & CORINFO_ACCESS_GET);
13160 InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13161 op1 = gtNewStringLiteralNode(iat, pValue);
13167 assert(!"Unexpected fieldAccessor");
13170 if (!isLoadAddress)
13173 if (prefixFlags & PREFIX_VOLATILE)
13175 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
13176 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13180 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13181 (op1->OperGet() == GT_OBJ));
13182 op1->gtFlags |= GTF_IND_VOLATILE;
13186 if (prefixFlags & PREFIX_UNALIGNED)
13190 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13191 (op1->OperGet() == GT_OBJ));
13192 op1->gtFlags |= GTF_IND_UNALIGNED;
13197 /* Check if the class needs explicit initialization */
13199 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13201 GenTreePtr helperNode = impInitClass(&resolvedToken);
13202 if (compDonotInline())
13206 if (helperNode != nullptr)
13208 op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13213 impPushOnStack(op1, tiRetVal);
13221 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13223 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13225 /* Get the CP_Fieldref index */
13227 assertImp(sz == sizeof(unsigned));
13229 _impResolveToken(CORINFO_TOKENKIND_Field);
13231 JITDUMP(" %08X", resolvedToken.token);
13233 int aflags = CORINFO_ACCESS_SET;
13234 GenTreePtr obj = nullptr;
13235 typeInfo* tiObj = nullptr;
13238 /* Pull the value from the stack */
13239 op2 = impPopStack(tiVal);
13240 clsHnd = tiVal.GetClassHandle();
13242 if (opcode == CEE_STFLD)
13244 tiObj = &impStackTop().seTypeInfo;
13245 obj = impPopStack().val;
13247 if (impIsThis(obj))
13249 aflags |= CORINFO_ACCESS_THIS;
13251 // An optimization for Contextful classes:
13252 // we unwrap the proxy when we have a 'this reference'
13254 if (info.compUnwrapContextful)
13256 aflags |= CORINFO_ACCESS_UNWRAP;
13261 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13263 // Figure out the type of the member. We always call canAccessField, so you always need this
13265 CorInfoType ciType = fieldInfo.fieldType;
13266 fieldClsHnd = fieldInfo.structType;
13268 lclTyp = JITtype2varType(ciType);
13270 if (compIsForInlining())
13272 /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13273 * per-inst static? */
13275 switch (fieldInfo.fieldAccessor)
13277 case CORINFO_FIELD_INSTANCE_HELPER:
13278 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13279 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13280 case CORINFO_FIELD_STATIC_TLS:
13282 compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13285 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13286 #if COR_JIT_EE_VERSION > 460
13287 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13290 /* We may be able to inline the field accessors in specific instantiations of generic
13292 compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13300 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13302 if (tiVerificationNeeded)
13304 verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13305 typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13306 Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13310 // tiVerificationNeed is false.
13311 // Raise InvalidProgramException if static store accesses non-static field
13312 if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13314 BADCODE("static access on an instance field");
13318 // We are using stfld on a static field.
13319 // We allow it, but need to eval any side-effects for obj
13320 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13322 if (obj->gtFlags & GTF_SIDE_EFFECT)
13324 obj = gtUnusedValNode(obj);
13325 impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13330 /* Preserve 'small' int types */
13331 if (lclTyp > TYP_INT)
13333 lclTyp = genActualType(lclTyp);
13336 switch (fieldInfo.fieldAccessor)
13338 case CORINFO_FIELD_INSTANCE:
13339 #ifdef FEATURE_READYTORUN_COMPILER
13340 case CORINFO_FIELD_INSTANCE_WITH_BASE:
13343 obj = impCheckForNullPointer(obj);
13345 /* Create the data member node */
13346 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
13347 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13348 if (StructHasOverlappingFields(typeFlags))
13350 op1->gtField.gtFldMayOverlap = true;
13353 #ifdef FEATURE_READYTORUN_COMPILER
13354 if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13356 op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13360 op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13362 if (fgAddrCouldBeNull(obj))
13364 op1->gtFlags |= GTF_EXCEPT;
13367 // If gtFldObj is a BYREF then our target is a value class and
13368 // it could point anywhere, example a boxed class static int
13369 if (obj->gtType == TYP_BYREF)
13371 op1->gtFlags |= GTF_IND_TGTANYWHERE;
13374 if (compIsForInlining() &&
13375 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
13377 impInlineInfo->thisDereferencedFirst = true;
13382 case CORINFO_FIELD_STATIC_TLS:
13383 #ifdef _TARGET_X86_
13384 // Legacy TLS access is implemented as intrinsic on x86 only
13386 /* Create the data member node */
13387 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13388 op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13392 fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13397 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13398 case CORINFO_FIELD_INSTANCE_HELPER:
13399 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13400 op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13404 case CORINFO_FIELD_STATIC_ADDRESS:
13405 case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13406 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13407 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13408 #if COR_JIT_EE_VERSION > 460
13409 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13411 op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13416 assert(!"Unexpected fieldAccessor");
13419 // Create the member assignment, unless we have a struct.
13420 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
13421 bool deferStructAssign = varTypeIsStruct(lclTyp);
13423 if (!deferStructAssign)
13425 if (prefixFlags & PREFIX_VOLATILE)
13427 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13428 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
13429 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13430 op1->gtFlags |= GTF_IND_VOLATILE;
13432 if (prefixFlags & PREFIX_UNALIGNED)
13434 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13435 op1->gtFlags |= GTF_IND_UNALIGNED;
13438 /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
13440 apps). The reason this works is that JIT stores an i4 constant in Gentree union during
13442 and reads from the union as if it were a long during code generation. Though this can potentially
13443 read garbage, one can get lucky to have this working correctly.
13445 This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
13447 switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency
13449 it. To be backward compatible, we will explicitly add an upward cast here so that it works
13453 Note that this is limited to x86 alone as thereis no back compat to be addressed for Arm JIT for
13456 CLANG_FORMAT_COMMENT_ANCHOR;
13458 #ifdef _TARGET_X86_
13459 if (op1->TypeGet() != op2->TypeGet() && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
13460 varTypeIsLong(op1->TypeGet()))
13462 op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13466 #ifdef _TARGET_64BIT_
13467 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
13468 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
13470 op2->gtType = TYP_I_IMPL;
13474 // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
13476 if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
13478 op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
13480 // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13482 if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
13484 op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
13489 #if !FEATURE_X87_DOUBLES
13490 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
13491 // We insert a cast to the dest 'op1' type
13493 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
13494 varTypeIsFloating(op2->gtType))
13496 op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13498 #endif // !FEATURE_X87_DOUBLES
13500 op1 = gtNewAssignNode(op1, op2);
13502 /* Mark the expression as containing an assignment */
13504 op1->gtFlags |= GTF_ASG;
13507 /* Check if the class needs explicit initialization */
13509 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13511 GenTreePtr helperNode = impInitClass(&resolvedToken);
13512 if (compDonotInline())
13516 if (helperNode != nullptr)
13518 op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13522 /* stfld can interfere with value classes (consider the sequence
13523 ldloc, ldloca, ..., stfld, stloc). We will be conservative and
13524 spill all value class references from the stack. */
13526 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
13530 if (impIsValueType(tiObj))
13532 impSpillEvalStack();
13536 impSpillValueClasses();
13540 /* Spill any refs to the same member from the stack */
13542 impSpillLclRefs((ssize_t)resolvedToken.hField);
13544 /* stsfld also interferes with indirect accesses (for aliased
13545 statics) and calls. But don't need to spill other statics
13546 as we have explicitly spilled this particular static field. */
13548 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
13550 if (deferStructAssign)
13552 op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
13560 /* Get the class type index operand */
13562 _impResolveToken(CORINFO_TOKENKIND_Newarr);
13564 JITDUMP(" %08X", resolvedToken.token);
13566 if (!opts.IsReadyToRun())
13568 // Need to restore array classes before creating array objects on the heap
13569 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13570 if (op1 == nullptr)
13571 { // compDonotInline()
13576 if (tiVerificationNeeded)
13578 // As per ECMA 'numElems' specified can be either int32 or native int.
13579 Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
13581 CORINFO_CLASS_HANDLE elemTypeHnd;
13582 info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13583 Verify(elemTypeHnd == nullptr ||
13584 !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13585 "array of byref-like type");
13586 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13589 accessAllowedResult =
13590 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13591 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13593 /* Form the arglist: array class handle, size */
13594 op2 = impPopStack().val;
13595 assertImp(genActualTypeIsIntOrI(op2->gtType));
13597 #ifdef FEATURE_READYTORUN_COMPILER
13598 if (opts.IsReadyToRun())
13600 op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
13601 gtNewArgList(op2));
13602 usingReadyToRunHelper = (op1 != nullptr);
13604 if (!usingReadyToRunHelper)
13606 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13607 // and the newarr call with a single call to a dynamic R2R cell that will:
13608 // 1) Load the context
13609 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13610 // 3) Allocate the new array
13611 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13613 // Need to restore array classes before creating array objects on the heap
13614 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13615 if (op1 == nullptr)
13616 { // compDonotInline()
13622 if (!usingReadyToRunHelper)
13625 args = gtNewArgList(op1, op2);
13627 /* Create a call to 'new' */
13629 // Note that this only works for shared generic code because the same helper is used for all
13630 // reference array types
13632 gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, 0, args);
13635 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
13637 /* Remember that this basic block contains 'new' of an sd array */
13639 block->bbFlags |= BBF_HAS_NEWARRAY;
13640 optMethodFlags |= OMF_HAS_NEWARRAY;
13642 /* Push the result of the call on the stack */
13644 impPushOnStack(op1, tiRetVal);
13651 assert(!compIsForInlining());
13653 if (tiVerificationNeeded)
13655 Verify(false, "bad opcode");
13658 // We don't allow locallocs inside handlers
13659 if (block->hasHndIndex())
13661 BADCODE("Localloc can't be inside handler");
13664 /* The FP register may not be back to the original value at the end
13665 of the method, even if the frame size is 0, as localloc may
13666 have modified it. So we will HAVE to reset it */
13668 compLocallocUsed = true;
13669 setNeedsGSSecurityCookie();
13671 // Get the size to allocate
13673 op2 = impPopStack().val;
13674 assertImp(genActualTypeIsIntOrI(op2->gtType));
13676 if (verCurrentState.esStackDepth != 0)
13678 BADCODE("Localloc can only be used when the stack is empty");
13681 op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
13683 // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
13685 op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
13687 impPushOnStack(op1, tiRetVal);
13692 /* Get the type token */
13693 assertImp(sz == sizeof(unsigned));
13695 _impResolveToken(CORINFO_TOKENKIND_Casting);
13697 JITDUMP(" %08X", resolvedToken.token);
13699 if (!opts.IsReadyToRun())
13701 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13702 if (op2 == nullptr)
13703 { // compDonotInline()
13708 if (tiVerificationNeeded)
13710 Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
13711 // Even if this is a value class, we know it is boxed.
13712 tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
13714 accessAllowedResult =
13715 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13716 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13718 op1 = impPopStack().val;
13720 #ifdef FEATURE_READYTORUN_COMPILER
13721 if (opts.IsReadyToRun())
13723 GenTreePtr opLookup =
13724 impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
13725 gtNewArgList(op1));
13726 usingReadyToRunHelper = (opLookup != nullptr);
13727 op1 = (usingReadyToRunHelper ? opLookup : op1);
13729 if (!usingReadyToRunHelper)
13731 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13732 // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
13733 // 1) Load the context
13734 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13735 // 3) Perform the 'is instance' check on the input object
13736 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13738 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13739 if (op2 == nullptr)
13740 { // compDonotInline()
13746 if (!usingReadyToRunHelper)
13749 op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
13751 if (compDonotInline())
13756 impPushOnStack(op1, tiRetVal);
13760 case CEE_REFANYVAL:
13762 // get the class handle and make a ICON node out of it
13764 _impResolveToken(CORINFO_TOKENKIND_Class);
13766 JITDUMP(" %08X", resolvedToken.token);
13768 op2 = impTokenToHandle(&resolvedToken);
13769 if (op2 == nullptr)
13770 { // compDonotInline()
13774 if (tiVerificationNeeded)
13776 Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13778 tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
13781 op1 = impPopStack().val;
13782 // make certain it is normalized;
13783 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13785 // Call helper GETREFANY(classHandle, op1);
13786 args = gtNewArgList(op2, op1);
13787 op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, 0, args);
13789 impPushOnStack(op1, tiRetVal);
13792 case CEE_REFANYTYPE:
13794 if (tiVerificationNeeded)
13796 Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13800 op1 = impPopStack().val;
13802 // make certain it is normalized;
13803 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13805 if (op1->gtOper == GT_OBJ)
13807 // Get the address of the refany
13808 op1 = op1->gtOp.gtOp1;
13810 // Fetch the type from the correct slot
13811 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
13812 gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
13813 op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
13817 assertImp(op1->gtOper == GT_MKREFANY);
13819 // The pointer may have side-effects
13820 if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
13822 impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13824 impNoteLastILoffs();
13828 // We already have the class handle
13829 op1 = op1->gtOp.gtOp2;
13832 // convert native TypeHandle to RuntimeTypeHandle
13834 GenTreeArgList* helperArgs = gtNewArgList(op1);
13836 op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, GTF_EXCEPT,
13839 // The handle struct is returned in register
13840 op1->gtCall.gtReturnType = TYP_REF;
13842 tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
13845 impPushOnStack(op1, tiRetVal);
13850 /* Get the Class index */
13851 assertImp(sz == sizeof(unsigned));
13852 lastLoadToken = codeAddr;
13853 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
13855 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
13857 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
13858 if (op1 == nullptr)
13859 { // compDonotInline()
13863 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
13864 assert(resolvedToken.hClass != nullptr);
13866 if (resolvedToken.hMethod != nullptr)
13868 helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
13870 else if (resolvedToken.hField != nullptr)
13872 helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
13875 GenTreeArgList* helperArgs = gtNewArgList(op1);
13877 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, GTF_EXCEPT, helperArgs);
13879 // The handle struct is returned in register
13880 op1->gtCall.gtReturnType = TYP_REF;
13882 tiRetVal = verMakeTypeInfo(tokenType);
13883 impPushOnStack(op1, tiRetVal);
13888 case CEE_UNBOX_ANY:
13890 /* Get the Class index */
13891 assertImp(sz == sizeof(unsigned));
13893 _impResolveToken(CORINFO_TOKENKIND_Class);
13895 JITDUMP(" %08X", resolvedToken.token);
13897 BOOL runtimeLookup;
13898 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
13899 if (op2 == nullptr)
13900 { // compDonotInline()
13904 // Run this always so we can get access exceptions even with SkipVerification.
13905 accessAllowedResult =
13906 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13907 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13909 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
13911 if (tiVerificationNeeded)
13913 typeInfo tiUnbox = impStackTop().seTypeInfo;
13914 Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
13915 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13916 tiRetVal.NormaliseForStack();
13918 op1 = impPopStack().val;
13922 /* Pop the object and create the unbox helper call */
13923 /* You might think that for UNBOX_ANY we need to push a different */
13924 /* (non-byref) type, but here we're making the tiRetVal that is used */
13925 /* for the intermediate pointer which we then transfer onto the OBJ */
13926 /* instruction. OBJ then creates the appropriate tiRetVal. */
13927 if (tiVerificationNeeded)
13929 typeInfo tiUnbox = impStackTop().seTypeInfo;
13930 Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
13932 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13933 Verify(tiRetVal.IsValueClass(), "not value class");
13934 tiRetVal.MakeByRef();
13936 // We always come from an objref, so this is safe byref
13937 tiRetVal.SetIsPermanentHomeByRef();
13938 tiRetVal.SetIsReadonlyByRef();
13941 op1 = impPopStack().val;
13942 assertImp(op1->gtType == TYP_REF);
13944 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
13945 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
13947 // We only want to expand inline the normal UNBOX helper;
13948 expandInline = (helper == CORINFO_HELP_UNBOX);
13952 if (compCurBB->isRunRarely())
13954 expandInline = false; // not worth the code expansion
13960 // we are doing normal unboxing
13961 // inline the common case of the unbox helper
13962 // UNBOX(exp) morphs into
13963 // clone = pop(exp);
13964 // ((*clone == typeToken) ? nop : helper(clone, typeToken));
13965 // push(clone + sizeof(void*))
13967 GenTreePtr cloneOperand;
13968 op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
13969 nullptr DEBUGARG("inline UNBOX clone1"));
13970 op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
13972 GenTreePtr condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
13974 op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
13975 nullptr DEBUGARG("inline UNBOX clone2"));
13976 op2 = impTokenToHandle(&resolvedToken);
13977 if (op2 == nullptr)
13978 { // compDonotInline()
13981 args = gtNewArgList(op2, op1);
13982 op1 = gtNewHelperCallNode(helper, TYP_VOID, 0, args);
13984 op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
13985 op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
13986 condBox->gtFlags |= GTF_RELOP_QMARK;
13988 // QMARK nodes cannot reside on the evaluation stack. Because there
13989 // may be other trees on the evaluation stack that side-effect the
13990 // sources of the UNBOX operation we must spill the stack.
13992 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13994 // Create the address-expression to reference past the object header
13995 // to the beginning of the value-type. Today this means adjusting
13996 // past the base of the objects vtable field which is pointer sized.
13998 op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
13999 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
14003 unsigned callFlags = (helper == CORINFO_HELP_UNBOX) ? 0 : GTF_EXCEPT;
14005 // Don't optimize, just call the helper and be done with it
14006 args = gtNewArgList(op2, op1);
14007 op1 = gtNewHelperCallNode(helper,
14008 (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT),
14012 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
14013 helper == CORINFO_HELP_UNBOX_NULLABLE &&
14014 varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
14018 ----------------------------------------------------------------------
14021 | \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE |
14022 | \ | (which returns a BYREF) | (which returns a STRUCT) | |
14024 |---------------------------------------------------------------------
14025 | UNBOX | push the BYREF | spill the STRUCT to a local, |
14026 | | | push the BYREF to this local |
14027 |---------------------------------------------------------------------
14028 | UNBOX_ANY | push a GT_OBJ of | push the STRUCT |
14029 | | the BYREF | For Linux when the |
14030 | | | struct is returned in two |
14031 | | | registers create a temp |
14032 | | | which address is passed to |
14033 | | | the unbox_nullable helper. |
14034 |---------------------------------------------------------------------
14037 if (opcode == CEE_UNBOX)
14039 if (helper == CORINFO_HELP_UNBOX_NULLABLE)
14041 // Unbox nullable helper returns a struct type.
14042 // We need to spill it to a temp so than can take the address of it.
14043 // Here we need unsafe value cls check, since the address of struct is taken to be used
14044 // further along and potetially be exploitable.
14046 unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
14047 lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14049 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14050 op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14051 assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14053 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14054 op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14055 op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14058 assert(op1->gtType == TYP_BYREF);
14059 assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14063 assert(opcode == CEE_UNBOX_ANY);
14065 if (helper == CORINFO_HELP_UNBOX)
14067 // Normal unbox helper returns a TYP_BYREF.
14068 impPushOnStack(op1, tiRetVal);
14073 assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
14075 #if FEATURE_MULTIREG_RET
14077 if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14079 // Unbox nullable helper returns a TYP_STRUCT.
14080 // For the multi-reg case we need to spill it to a temp so that
14081 // we can pass the address to the unbox_nullable jit helper.
14083 unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14084 lvaTable[tmp].lvIsMultiRegArg = true;
14085 lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14087 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14088 op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14089 assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14091 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14092 op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14093 op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14095 // In this case the return value of the unbox helper is TYP_BYREF.
14096 // Make sure the right type is placed on the operand type stack.
14097 impPushOnStack(op1, tiRetVal);
14099 // Load the struct.
14102 assert(op1->gtType == TYP_BYREF);
14103 assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14109 #endif // !FEATURE_MULTIREG_RET
14112 // If non register passable struct we have it materialized in the RetBuf.
14113 assert(op1->gtType == TYP_STRUCT);
14114 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14115 assert(tiRetVal.IsValueClass());
14119 impPushOnStack(op1, tiRetVal);
14125 /* Get the Class index */
14126 assertImp(sz == sizeof(unsigned));
14128 _impResolveToken(CORINFO_TOKENKIND_Box);
14130 JITDUMP(" %08X", resolvedToken.token);
14132 if (tiVerificationNeeded)
14134 typeInfo tiActual = impStackTop().seTypeInfo;
14135 typeInfo tiBox = verMakeTypeInfo(resolvedToken.hClass);
14137 Verify(verIsBoxable(tiBox), "boxable type expected");
14139 // check the class constraints of the boxed type in case we are boxing an uninitialized value
14140 Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14141 "boxed type has unsatisfied class constraints");
14143 Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14145 // Observation: the following code introduces a boxed value class on the stack, but,
14146 // according to the ECMA spec, one would simply expect: tiRetVal =
14147 // typeInfo(TI_REF,impGetObjectClass());
14149 // Push the result back on the stack,
14150 // even if clsHnd is a value class we want the TI_REF
14151 // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14152 tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14155 accessAllowedResult =
14156 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14157 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14159 // Note BOX can be used on things that are not value classes, in which
14160 // case we get a NOP. However the verifier's view of the type on the
14161 // stack changes (in generic code a 'T' becomes a 'boxed T')
14162 if (!eeIsValueClass(resolvedToken.hClass))
14164 verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14168 // Look ahead for unbox.any
14169 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14171 DWORD classAttribs = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14172 if (!(classAttribs & CORINFO_FLG_SHAREDINST))
14174 CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14176 impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14178 if (unboxResolvedToken.hClass == resolvedToken.hClass)
14180 // Skip the next unbox.any instruction
14181 sz += sizeof(mdToken) + 1;
14187 impImportAndPushBox(&resolvedToken);
14188 if (compDonotInline())
14197 /* Get the Class index */
14198 assertImp(sz == sizeof(unsigned));
14200 _impResolveToken(CORINFO_TOKENKIND_Class);
14202 JITDUMP(" %08X", resolvedToken.token);
14204 if (tiVerificationNeeded)
14206 tiRetVal = typeInfo(TI_INT);
14209 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14210 impPushOnStack(op1, tiRetVal);
14213 case CEE_CASTCLASS:
14215 /* Get the Class index */
14217 assertImp(sz == sizeof(unsigned));
14219 _impResolveToken(CORINFO_TOKENKIND_Casting);
14221 JITDUMP(" %08X", resolvedToken.token);
14223 if (!opts.IsReadyToRun())
14225 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14226 if (op2 == nullptr)
14227 { // compDonotInline()
14232 if (tiVerificationNeeded)
14234 Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14236 tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14239 accessAllowedResult =
14240 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14241 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14243 op1 = impPopStack().val;
14245 /* Pop the address and create the 'checked cast' helper call */
14247 // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
14248 // and op2 to contain code that creates the type handle corresponding to typeRef
14251 #ifdef FEATURE_READYTORUN_COMPILER
14252 if (opts.IsReadyToRun())
14254 GenTreePtr opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST,
14255 TYP_REF, gtNewArgList(op1));
14256 usingReadyToRunHelper = (opLookup != nullptr);
14257 op1 = (usingReadyToRunHelper ? opLookup : op1);
14259 if (!usingReadyToRunHelper)
14261 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14262 // and the chkcastany call with a single call to a dynamic R2R cell that will:
14263 // 1) Load the context
14264 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14265 // 3) Check the object on the stack for the type-cast
14266 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14268 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14269 if (op2 == nullptr)
14270 { // compDonotInline()
14276 if (!usingReadyToRunHelper)
14279 op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
14281 if (compDonotInline())
14286 /* Push the result back on the stack */
14287 impPushOnStack(op1, tiRetVal);
14292 if (compIsForInlining())
14294 // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14295 // TODO: Will this be too strict, given that we will inline many basic blocks?
14296 // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14298 /* Do we have just the exception on the stack ?*/
14300 if (verCurrentState.esStackDepth != 1)
14302 /* if not, just don't inline the method */
14304 compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
14309 if (tiVerificationNeeded)
14311 tiRetVal = impStackTop().seTypeInfo;
14312 Verify(tiRetVal.IsObjRef(), "object ref expected");
14313 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
14315 Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
14319 block->bbSetRunRarely(); // any block with a throw is rare
14320 /* Pop the exception object and create the 'throw' helper call */
14322 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, GTF_EXCEPT, gtNewArgList(impPopStack().val));
14325 if (verCurrentState.esStackDepth > 0)
14327 impEvalSideEffects();
14330 assert(verCurrentState.esStackDepth == 0);
14336 assert(!compIsForInlining());
14338 if (info.compXcptnsCount == 0)
14340 BADCODE("rethrow outside catch");
14343 if (tiVerificationNeeded)
14345 Verify(block->hasHndIndex(), "rethrow outside catch");
14346 if (block->hasHndIndex())
14348 EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
14349 Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
14350 if (HBtab->HasFilter())
14352 // we better be in the handler clause part, not the filter part
14353 Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
14354 "rethrow in filter");
14359 /* Create the 'rethrow' helper call */
14361 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID, GTF_EXCEPT);
14367 assertImp(sz == sizeof(unsigned));
14369 _impResolveToken(CORINFO_TOKENKIND_Class);
14371 JITDUMP(" %08X", resolvedToken.token);
14373 if (tiVerificationNeeded)
14375 typeInfo tiTo = impStackTop().seTypeInfo;
14376 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14378 Verify(tiTo.IsByRef(), "byref expected");
14379 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14381 Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14382 "type operand incompatible with type of address");
14385 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
14386 op2 = gtNewIconNode(0); // Value
14387 op1 = impPopStack().val; // Dest
14388 op1 = gtNewBlockVal(op1, size);
14389 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14394 if (tiVerificationNeeded)
14396 Verify(false, "bad opcode");
14399 op3 = impPopStack().val; // Size
14400 op2 = impPopStack().val; // Value
14401 op1 = impPopStack().val; // Dest
14403 if (op3->IsCnsIntOrI())
14405 size = (unsigned)op3->AsIntConCommon()->IconValue();
14406 op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14410 op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14413 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14419 if (tiVerificationNeeded)
14421 Verify(false, "bad opcode");
14423 op3 = impPopStack().val; // Size
14424 op2 = impPopStack().val; // Src
14425 op1 = impPopStack().val; // Dest
14427 if (op3->IsCnsIntOrI())
14429 size = (unsigned)op3->AsIntConCommon()->IconValue();
14430 op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14434 op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14437 if (op2->OperGet() == GT_ADDR)
14439 op2 = op2->gtOp.gtOp1;
14443 op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
14446 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
14451 assertImp(sz == sizeof(unsigned));
14453 _impResolveToken(CORINFO_TOKENKIND_Class);
14455 JITDUMP(" %08X", resolvedToken.token);
14457 if (tiVerificationNeeded)
14459 typeInfo tiFrom = impStackTop().seTypeInfo;
14460 typeInfo tiTo = impStackTop(1).seTypeInfo;
14461 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14463 Verify(tiFrom.IsByRef(), "expected byref source");
14464 Verify(tiTo.IsByRef(), "expected byref destination");
14466 Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
14467 "type of source address incompatible with type operand");
14468 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14469 Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14470 "type operand incompatible with type of destination address");
14473 if (!eeIsValueClass(resolvedToken.hClass))
14475 op1 = impPopStack().val; // address to load from
14477 impBashVarAddrsToI(op1);
14479 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
14481 op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
14482 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
14484 impPushOnStackNoType(op1);
14485 opcode = CEE_STIND_REF;
14487 goto STIND_POST_VERIFY;
14490 op2 = impPopStack().val; // Src
14491 op1 = impPopStack().val; // Dest
14492 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
14497 assertImp(sz == sizeof(unsigned));
14499 _impResolveToken(CORINFO_TOKENKIND_Class);
14501 JITDUMP(" %08X", resolvedToken.token);
14503 if (eeIsValueClass(resolvedToken.hClass))
14505 lclTyp = TYP_STRUCT;
14512 if (tiVerificationNeeded)
14515 typeInfo tiPtr = impStackTop(1).seTypeInfo;
14517 // Make sure we have a good looking byref
14518 Verify(tiPtr.IsByRef(), "pointer not byref");
14519 Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
14520 if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
14522 compUnsafeCastUsed = true;
14525 typeInfo ptrVal = DereferenceByRef(tiPtr);
14526 typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
14528 if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
14530 Verify(false, "type of value incompatible with type operand");
14531 compUnsafeCastUsed = true;
14534 if (!tiCompatibleWith(argVal, ptrVal, false))
14536 Verify(false, "type operand incompatible with type of address");
14537 compUnsafeCastUsed = true;
14542 compUnsafeCastUsed = true;
14545 if (lclTyp == TYP_REF)
14547 opcode = CEE_STIND_REF;
14548 goto STIND_POST_VERIFY;
14551 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14552 if (impIsPrimitive(jitTyp))
14554 lclTyp = JITtype2varType(jitTyp);
14555 goto STIND_POST_VERIFY;
14558 op2 = impPopStack().val; // Value
14559 op1 = impPopStack().val; // Ptr
14561 assertImp(varTypeIsStruct(op2));
14563 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14569 assert(!compIsForInlining());
14571 // Being lazy here. Refanys are tricky in terms of gc tracking.
14572 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
14574 JITDUMP("disabling struct promotion because of mkrefany\n");
14575 fgNoStructPromotion = true;
14577 oper = GT_MKREFANY;
14578 assertImp(sz == sizeof(unsigned));
14580 _impResolveToken(CORINFO_TOKENKIND_Class);
14582 JITDUMP(" %08X", resolvedToken.token);
14584 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14585 if (op2 == nullptr)
14586 { // compDonotInline()
14590 if (tiVerificationNeeded)
14592 typeInfo tiPtr = impStackTop().seTypeInfo;
14593 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14595 Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
14596 Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
14597 Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
14600 accessAllowedResult =
14601 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14602 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14604 op1 = impPopStack().val;
14606 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
14607 // But JIT32 allowed it, so we continue to allow it.
14608 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
14610 // MKREFANY returns a struct. op2 is the class token.
14611 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
14613 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
14619 assertImp(sz == sizeof(unsigned));
14621 _impResolveToken(CORINFO_TOKENKIND_Class);
14623 JITDUMP(" %08X", resolvedToken.token);
14627 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14629 if (tiVerificationNeeded)
14631 typeInfo tiPtr = impStackTop().seTypeInfo;
14633 // Make sure we have a byref
14634 if (!tiPtr.IsByRef())
14636 Verify(false, "pointer not byref");
14637 compUnsafeCastUsed = true;
14639 typeInfo tiPtrVal = DereferenceByRef(tiPtr);
14641 if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
14643 Verify(false, "type of address incompatible with type operand");
14644 compUnsafeCastUsed = true;
14646 tiRetVal.NormaliseForStack();
14650 compUnsafeCastUsed = true;
14653 if (eeIsValueClass(resolvedToken.hClass))
14655 lclTyp = TYP_STRUCT;
14660 opcode = CEE_LDIND_REF;
14661 goto LDIND_POST_VERIFY;
14664 op1 = impPopStack().val;
14666 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
14668 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14669 if (impIsPrimitive(jitTyp))
14671 op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
14673 // Could point anywhere, example a boxed class static int
14674 op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
14675 assertImp(varTypeIsArithmetic(op1->gtType));
14679 // OBJ returns a struct
14680 // and an inline argument which is the class token of the loaded obj
14681 op1 = gtNewObjNode(resolvedToken.hClass, op1);
14683 op1->gtFlags |= GTF_EXCEPT;
14685 impPushOnStack(op1, tiRetVal);
14690 if (tiVerificationNeeded)
14692 typeInfo tiArray = impStackTop().seTypeInfo;
14693 Verify(verIsSDArray(tiArray), "bad array");
14694 tiRetVal = typeInfo(TI_INT);
14697 op1 = impPopStack().val;
14698 if (!opts.MinOpts() && !opts.compDbgCode)
14700 /* Use GT_ARR_LENGTH operator so rng check opts see this */
14701 GenTreeArrLen* arrLen =
14702 new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
14704 /* Mark the block as containing a length expression */
14706 if (op1->gtOper == GT_LCL_VAR)
14708 block->bbFlags |= BBF_HAS_IDX_LEN;
14715 /* Create the expression "*(array_addr + ArrLenOffs)" */
14716 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14717 gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
14718 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
14719 op1->gtFlags |= GTF_IND_ARR_LEN;
14722 /* An indirection will cause a GPF if the address is null */
14723 op1->gtFlags |= GTF_EXCEPT;
14725 /* Push the result back on the stack */
14726 impPushOnStack(op1, tiRetVal);
14730 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
14734 if (opts.compDbgCode)
14736 op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
14741 /******************************** NYI *******************************/
14744 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
14747 case CEE_MACRO_END:
14750 BADCODE3("unknown opcode", ": %02X", (int)opcode);
14754 prevOpcode = opcode;
14757 assert(!insertLdloc || opcode == CEE_DUP);
14760 assert(!insertLdloc);
14763 #undef _impResolveToken
14766 #pragma warning(pop)
14769 // Push a local/argument treeon the operand stack
14770 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
14772 tiRetVal.NormaliseForStack();
14774 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
14776 tiRetVal.SetUninitialisedObjRef();
14779 impPushOnStack(op, tiRetVal);
14782 // Load a local/argument on the operand stack
14783 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
14784 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
14788 if (lvaTable[lclNum].lvNormalizeOnLoad())
14790 lclTyp = lvaGetRealType(lclNum);
14794 lclTyp = lvaGetActualType(lclNum);
14797 impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
14800 // Load an argument on the operand stack
14801 // Shared by the various CEE_LDARG opcodes
14802 // ilArgNum is the argument index as specified in IL.
14803 // It will be mapped to the correct lvaTable index
14804 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
14806 Verify(ilArgNum < info.compILargsCount, "bad arg num");
14808 if (compIsForInlining())
14810 if (ilArgNum >= info.compArgsCount)
14812 compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
14816 impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
14817 impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
14821 if (ilArgNum >= info.compArgsCount)
14826 unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
14828 if (lclNum == info.compThisArg)
14830 lclNum = lvaArg0Var;
14833 impLoadVar(lclNum, offset);
14837 // Load a local on the operand stack
14838 // Shared by the various CEE_LDLOC opcodes
14839 // ilLclNum is the local index as specified in IL.
14840 // It will be mapped to the correct lvaTable index
14841 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
14843 if (tiVerificationNeeded)
14845 Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
14846 Verify(info.compInitMem, "initLocals not set");
14849 if (compIsForInlining())
14851 if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14853 compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
14857 // Get the local type
14858 var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
14860 typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
14862 /* Have we allocated a temp for this local? */
14864 unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
14866 // All vars of inlined methods should be !lvNormalizeOnLoad()
14868 assert(!lvaTable[lclNum].lvNormalizeOnLoad());
14869 lclTyp = genActualType(lclTyp);
14871 impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
14875 if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14880 unsigned lclNum = info.compArgsCount + ilLclNum;
14882 impLoadVar(lclNum, offset);
14886 #ifdef _TARGET_ARM_
14887 /**************************************************************************************
14889 * When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
14890 * dst struct, because struct promotion will turn it into a float/double variable while
14891 * the rhs will be an int/long variable. We don't code generate assignment of int into
14892 * a float, but there is nothing that might prevent us from doing so. The tree however
14893 * would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
14895 * tmpNum - the lcl dst variable num that is a struct.
14896 * src - the src tree assigned to the dest that is a struct/int (when varargs call.)
14897 * hClass - the type handle for the struct variable.
14899 * TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
14900 * however, we could do a codegen of transferring from int to float registers
14901 * (transfer, not a cast.)
14904 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr src, CORINFO_CLASS_HANDLE hClass)
14906 if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
14908 int hfaSlots = GetHfaCount(hClass);
14909 var_types hfaType = GetHfaType(hClass);
14911 // If we have varargs we morph the method's return type to be "int" irrespective of its original
14912 // type: struct/float at importer because the ABI calls out return in integer registers.
14913 // We don't want struct promotion to replace an expression like this:
14914 // lclFld_int = callvar_int() into lclFld_float = callvar_int();
14915 // This means an int is getting assigned to a float without a cast. Prevent the promotion.
14916 if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
14917 (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
14919 // Make sure this struct type stays as struct so we can receive the call in a struct.
14920 lvaTable[tmpNum].lvIsMultiRegRet = true;
14924 #endif // _TARGET_ARM_
14926 #if FEATURE_MULTIREG_RET
14927 GenTreePtr Compiler::impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HANDLE hClass)
14929 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
14930 impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_NONE);
14931 GenTreePtr ret = gtNewLclvNode(tmpNum, op->gtType);
14933 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
14934 ret->gtFlags |= GTF_DONT_CSE;
14936 assert(IsMultiRegReturnedType(hClass));
14938 // Mark the var so that fields are not promoted and stay together.
14939 lvaTable[tmpNum].lvIsMultiRegRet = true;
14943 #endif // FEATURE_MULTIREG_RET
14945 // do import for a return
14946 // returns false if inlining was aborted
14947 // opcode can be ret or call in the case of a tail.call
14948 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
14950 if (tiVerificationNeeded)
14952 verVerifyThisPtrInitialised();
14954 unsigned expectedStack = 0;
14955 if (info.compRetType != TYP_VOID)
14957 typeInfo tiVal = impStackTop().seTypeInfo;
14958 typeInfo tiDeclared =
14959 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
14961 Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
14963 Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
14966 Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
14969 GenTree* op2 = nullptr;
14970 GenTree* op1 = nullptr;
14971 CORINFO_CLASS_HANDLE retClsHnd = nullptr;
14973 if (info.compRetType != TYP_VOID)
14975 StackEntry se = impPopStack(retClsHnd);
14978 if (!compIsForInlining())
14980 impBashVarAddrsToI(op2);
14981 op2 = impImplicitIorI4Cast(op2, info.compRetType);
14982 op2 = impImplicitR4orR8Cast(op2, info.compRetType);
14983 assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
14984 ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
14985 ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
14986 (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
14987 (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
14990 if (opts.compGcChecks && info.compRetType == TYP_REF)
14992 // DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path
14993 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
14996 assert(op2->gtType == TYP_REF);
14998 // confirm that the argument is a GC pointer (for debugging (GC stress))
14999 GenTreeArgList* args = gtNewArgList(op2);
15000 op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, 0, args);
15004 printf("\ncompGcChecks tree:\n");
15012 // inlinee's stack should be empty now.
15013 assert(verCurrentState.esStackDepth == 0);
15018 printf("\n\n Inlinee Return expression (before normalization) =>\n");
15023 // Make sure the type matches the original call.
15025 var_types returnType = genActualType(op2->gtType);
15026 var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
15027 if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
15029 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
15032 if (returnType != originalCallType)
15034 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
15038 // Below, we are going to set impInlineInfo->retExpr to the tree with the return
15039 // expression. At this point, retExpr could already be set if there are multiple
15040 // return blocks (meaning lvaInlineeReturnSpillTemp != BAD_VAR_NUM) and one of
15041 // the other blocks already set it. If there is only a single return block,
15042 // retExpr shouldn't be set. However, this is not true if we reimport a block
15043 // with a return. In that case, retExpr will be set, then the block will be
15044 // reimported, but retExpr won't get cleared as part of setting the block to
15045 // be reimported. The reimported retExpr value should be the same, so even if
15046 // we don't unconditionally overwrite it, it shouldn't matter.
15047 if (info.compRetNativeType != TYP_STRUCT)
15049 // compRetNativeType is not TYP_STRUCT.
15050 // This implies it could be either a scalar type or SIMD vector type or
15051 // a struct type that can be normalized to a scalar type.
15053 if (varTypeIsStruct(info.compRetType))
15055 noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
15056 // adjust the type away from struct to integral
15057 // and no normalizing
15058 op2 = impFixupStructReturnType(op2, retClsHnd);
15062 // Do we have to normalize?
15063 var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
15064 if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
15065 fgCastNeeded(op2, fncRealRetType))
15067 // Small-typed return values are normalized by the callee
15068 op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
15072 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15074 assert(info.compRetNativeType != TYP_VOID &&
15075 (fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals));
15077 // This is a bit of a workaround...
15078 // If we are inlining a call that returns a struct, where the actual "native" return type is
15079 // not a struct (for example, the struct is composed of exactly one int, and the native
15080 // return type is thus an int), and the inlinee has multiple return blocks (thus,
15081 // lvaInlineeReturnSpillTemp is != BAD_VAR_NUM, and is the index of a local var that is set
15082 // to the *native* return type), and at least one of the return blocks is the result of
15083 // a call, then we have a problem. The situation is like this (from a failed test case):
15086 // // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15087 // call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15088 // plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15092 // ldobj !!T // this gets bashed to a GT_LCL_FLD, type TYP_INT
15095 // call !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15096 // object&, class System.Func`1<!!0>)
15099 // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15100 // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15101 // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15102 // inlining properly by leaving the correct type on the GT_CALL node through importing.
15104 // To fix this, for this case, we temporarily change the GT_CALL node type to the
15105 // native return type, which is what it will be set to eventually. We generate the
15106 // assignment to the return temp, using the correct type, and then restore the GT_CALL
15107 // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15109 bool restoreType = false;
15110 if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15112 noway_assert(op2->TypeGet() == TYP_STRUCT);
15113 op2->gtType = info.compRetNativeType;
15114 restoreType = true;
15117 impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15118 (unsigned)CHECK_SPILL_ALL);
15120 GenTreePtr tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15124 op2->gtType = TYP_STRUCT; // restore it to what it was
15130 if (impInlineInfo->retExpr)
15132 // Some other block(s) have seen the CEE_RET first.
15133 // Better they spilled to the same temp.
15134 assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15135 assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15143 printf("\n\n Inlinee Return expression (after normalization) =>\n");
15148 // Report the return expression
15149 impInlineInfo->retExpr = op2;
15153 // compRetNativeType is TYP_STRUCT.
15154 // This implies that struct return via RetBuf arg or multi-reg struct return
15156 GenTreePtr iciCall = impInlineInfo->iciCall;
15157 assert(iciCall->gtOper == GT_CALL);
15159 // Assign the inlinee return into a spill temp.
15160 // spill temp only exists if there are multiple return points
15161 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15163 // in this case we have to insert multiple struct copies to the temp
15164 // and the retexpr is just the temp.
15165 assert(info.compRetNativeType != TYP_VOID);
15166 assert(fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals);
15168 impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15169 (unsigned)CHECK_SPILL_ALL);
15172 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15173 #if defined(_TARGET_ARM_)
15174 // TODO-ARM64-NYI: HFA
15175 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15176 // next ifdefs could be refactored in a single method with the ifdef inside.
15177 if (IsHfa(retClsHnd))
15179 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15180 #else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15181 ReturnTypeDesc retTypeDesc;
15182 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15183 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15185 if (retRegCount != 0)
15187 // If single eightbyte, the return type would have been normalized and there won't be a temp var.
15188 // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
15190 assert(retRegCount == MAX_RET_REG_COUNT);
15191 // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
15192 CLANG_FORMAT_COMMENT_ANCHOR;
15193 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15195 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15197 if (!impInlineInfo->retExpr)
15199 #if defined(_TARGET_ARM_)
15200 impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
15201 #else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15202 // The inlinee compiler has figured out the type of the temp already. Use it here.
15203 impInlineInfo->retExpr =
15204 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15205 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15210 impInlineInfo->retExpr = op2;
15214 #elif defined(_TARGET_ARM64_)
15215 ReturnTypeDesc retTypeDesc;
15216 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15217 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15219 if (retRegCount != 0)
15221 assert(!iciCall->AsCall()->HasRetBufArg());
15222 assert(retRegCount >= 2);
15223 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15225 if (!impInlineInfo->retExpr)
15227 // The inlinee compiler has figured out the type of the temp already. Use it here.
15228 impInlineInfo->retExpr =
15229 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15234 impInlineInfo->retExpr = op2;
15238 #endif // defined(_TARGET_ARM64_)
15240 assert(iciCall->AsCall()->HasRetBufArg());
15241 GenTreePtr dest = gtCloneExpr(iciCall->gtCall.gtCallArgs->gtOp.gtOp1);
15242 // spill temp only exists if there are multiple return points
15243 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15245 // if this is the first return we have seen set the retExpr
15246 if (!impInlineInfo->retExpr)
15248 impInlineInfo->retExpr =
15249 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
15250 retClsHnd, (unsigned)CHECK_SPILL_ALL);
15255 impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15262 if (compIsForInlining())
15267 if (info.compRetType == TYP_VOID)
15270 op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15272 else if (info.compRetBuffArg != BAD_VAR_NUM)
15274 // Assign value to return buff (first param)
15275 GenTreePtr retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
15277 op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15278 impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15280 // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
15281 CLANG_FORMAT_COMMENT_ANCHOR;
15283 #if defined(_TARGET_AMD64_)
15285 // x64 (System V and Win64) calling convention requires to
15286 // return the implicit return buffer explicitly (in RAX).
15287 // Change the return type to be BYREF.
15288 op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15289 #else // !defined(_TARGET_AMD64_)
15290 // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
15291 // In such case the return value of the function is changed to BYREF.
15292 // If profiler hook is not needed the return type of the function is TYP_VOID.
15293 if (compIsProfilerHookNeeded())
15295 op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15300 op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15302 #endif // !defined(_TARGET_AMD64_)
15304 else if (varTypeIsStruct(info.compRetType))
15306 #if !FEATURE_MULTIREG_RET
15307 // For both ARM architectures the HFA native types are maintained as structs.
15308 // Also on System V AMD64 the multireg structs returns are also left as structs.
15309 noway_assert(info.compRetNativeType != TYP_STRUCT);
15311 op2 = impFixupStructReturnType(op2, retClsHnd);
15313 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
15318 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
15321 // We must have imported a tailcall and jumped to RET
15322 if (prefixFlags & PREFIX_TAILCALL)
15324 #ifndef _TARGET_AMD64_
15326 // This cannot be asserted on Amd64 since we permit the following IL pattern:
15330 assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
15333 opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
15335 // impImportCall() would have already appended TYP_VOID calls
15336 if (info.compRetType == TYP_VOID)
15342 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15344 // Remember at which BC offset the tree was finished
15345 impNoteLastILoffs();
15350 /*****************************************************************************
15351 * Mark the block as unimported.
15352 * Note that the caller is responsible for calling impImportBlockPending(),
15353 * with the appropriate stack-state
15356 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
15359 if (verbose && (block->bbFlags & BBF_IMPORTED))
15361 printf("\nBB%02u will be reimported\n", block->bbNum);
15365 block->bbFlags &= ~BBF_IMPORTED;
15368 /*****************************************************************************
15369 * Mark the successors of the given block as unimported.
15370 * Note that the caller is responsible for calling impImportBlockPending()
15371 * for all the successors, with the appropriate stack-state.
15374 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
15376 for (unsigned i = 0; i < block->NumSucc(); i++)
15378 impReimportMarkBlock(block->GetSucc(i));
15382 /*****************************************************************************
15384 * Filter wrapper to handle only passed in exception code
15388 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
15390 if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
15392 return EXCEPTION_EXECUTE_HANDLER;
15395 return EXCEPTION_CONTINUE_SEARCH;
15398 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
15400 assert(block->hasTryIndex());
15401 assert(!compIsForInlining());
15403 unsigned tryIndex = block->getTryIndex();
15404 EHblkDsc* HBtab = ehGetDsc(tryIndex);
15408 assert(block->bbFlags & BBF_TRY_BEG);
15410 // The Stack must be empty
15412 if (block->bbStkDepth != 0)
15414 BADCODE("Evaluation stack must be empty on entry into a try block");
15418 // Save the stack contents, we'll need to restore it later
15420 SavedStack blockState;
15421 impSaveStackState(&blockState, false);
15423 while (HBtab != nullptr)
15427 // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
15428 // We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
15430 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15432 // We trigger an invalid program exception here unless we have a try/fault region.
15434 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
15437 "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
15441 // Allow a try/fault region to proceed.
15442 assert(HBtab->HasFaultHandler());
15446 /* Recursively process the handler block */
15447 BasicBlock* hndBegBB = HBtab->ebdHndBeg;
15449 // Construct the proper verification stack state
15450 // either empty or one that contains just
15451 // the Exception Object that we are dealing with
15453 verCurrentState.esStackDepth = 0;
15455 if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
15457 CORINFO_CLASS_HANDLE clsHnd;
15459 if (HBtab->HasFilter())
15461 clsHnd = impGetObjectClass();
15465 CORINFO_RESOLVED_TOKEN resolvedToken;
15467 resolvedToken.tokenContext = impTokenLookupContextHandle;
15468 resolvedToken.tokenScope = info.compScopeHnd;
15469 resolvedToken.token = HBtab->ebdTyp;
15470 resolvedToken.tokenType = CORINFO_TOKENKIND_Class;
15471 info.compCompHnd->resolveToken(&resolvedToken);
15473 clsHnd = resolvedToken.hClass;
15476 // push catch arg the stack, spill to a temp if necessary
15477 // Note: can update HBtab->ebdHndBeg!
15478 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd);
15481 // Queue up the handler for importing
15483 impImportBlockPending(hndBegBB);
15485 if (HBtab->HasFilter())
15487 /* @VERIFICATION : Ideally the end of filter state should get
15488 propagated to the catch handler, this is an incompleteness,
15489 but is not a security/compliance issue, since the only
15490 interesting state is the 'thisInit' state.
15493 verCurrentState.esStackDepth = 0;
15495 BasicBlock* filterBB = HBtab->ebdFilter;
15497 // push catch arg the stack, spill to a temp if necessary
15498 // Note: can update HBtab->ebdFilter!
15499 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass());
15501 impImportBlockPending(filterBB);
15504 else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
15506 /* Recursively process the handler block */
15508 verCurrentState.esStackDepth = 0;
15510 // Queue up the fault handler for importing
15512 impImportBlockPending(HBtab->ebdHndBeg);
15515 // Now process our enclosing try index (if any)
15517 tryIndex = HBtab->ebdEnclosingTryIndex;
15518 if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
15524 HBtab = ehGetDsc(tryIndex);
15528 // Restore the stack contents
15529 impRestoreStackState(&blockState);
15532 //***************************************************************
15533 // Import the instructions for the given basic block. Perform
15534 // verification, throwing an exception on failure. Push any successor blocks that are enabled for the first
15535 // time, or whose verification pre-state is changed.
15538 #pragma warning(push)
15539 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
15541 void Compiler::impImportBlock(BasicBlock* block)
15543 // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
15544 // handle them specially. In particular, there is no IL to import for them, but we do need
15545 // to mark them as imported and put their successors on the pending import list.
15546 if (block->bbFlags & BBF_INTERNAL)
15548 JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
15549 block->bbFlags |= BBF_IMPORTED;
15551 for (unsigned i = 0; i < block->NumSucc(); i++)
15553 impImportBlockPending(block->GetSucc(i));
15563 /* Make the block globaly available */
15568 /* Initialize the debug variables */
15569 impCurOpcName = "unknown";
15570 impCurOpcOffs = block->bbCodeOffs;
15573 /* Set the current stack state to the merged result */
15574 verResetCurrentState(block, &verCurrentState);
15576 /* Now walk the code and import the IL into GenTrees */
15578 struct FilterVerificationExceptionsParam
15583 FilterVerificationExceptionsParam param;
15585 param.pThis = this;
15586 param.block = block;
15588 PAL_TRY(FilterVerificationExceptionsParam*, pParam, ¶m)
15590 /* @VERIFICATION : For now, the only state propagation from try
15591 to it's handler is "thisInit" state (stack is empty at start of try).
15592 In general, for state that we track in verification, we need to
15593 model the possibility that an exception might happen at any IL
15594 instruction, so we really need to merge all states that obtain
15595 between IL instructions in a try block into the start states of
15598 However we do not allow the 'this' pointer to be uninitialized when
15599 entering most kinds try regions (only try/fault are allowed to have
15600 an uninitialized this pointer on entry to the try)
15602 Fortunately, the stack is thrown away when an exception
15603 leads to a handler, so we don't have to worry about that.
15604 We DO, however, have to worry about the "thisInit" state.
15605 But only for the try/fault case.
15607 The only allowed transition is from TIS_Uninit to TIS_Init.
15609 So for a try/fault region for the fault handler block
15610 we will merge the start state of the try begin
15611 and the post-state of each block that is part of this try region
15614 // merge the start state of the try begin
15616 if (pParam->block->bbFlags & BBF_TRY_BEG)
15618 pParam->pThis->impVerifyEHBlock(pParam->block, true);
15621 pParam->pThis->impImportBlockCode(pParam->block);
15623 // As discussed above:
15624 // merge the post-state of each block that is part of this try region
15626 if (pParam->block->hasTryIndex())
15628 pParam->pThis->impVerifyEHBlock(pParam->block, false);
15631 PAL_EXCEPT_FILTER(FilterVerificationExceptions)
15633 verHandleVerificationFailure(block DEBUGARG(false));
15637 if (compDonotInline())
15642 assert(!compDonotInline());
15644 markImport = false;
15648 unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks
15649 bool reimportSpillClique = false;
15650 BasicBlock* tgtBlock = nullptr;
15652 /* If the stack is non-empty, we might have to spill its contents */
15654 if (verCurrentState.esStackDepth != 0)
15656 impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
15657 // on the stack, its lifetime is hard to determine, simply
15658 // don't reuse such temps.
15660 GenTreePtr addStmt = nullptr;
15662 /* Do the successors of 'block' have any other predecessors ?
15663 We do not want to do some of the optimizations related to multiRef
15664 if we can reimport blocks */
15666 unsigned multRef = impCanReimport ? unsigned(~0) : 0;
15668 switch (block->bbJumpKind)
15672 /* Temporarily remove the 'jtrue' from the end of the tree list */
15674 assert(impTreeLast);
15675 assert(impTreeLast->gtOper == GT_STMT);
15676 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
15678 addStmt = impTreeLast;
15679 impTreeLast = impTreeLast->gtPrev;
15681 /* Note if the next block has more than one ancestor */
15683 multRef |= block->bbNext->bbRefs;
15685 /* Does the next block have temps assigned? */
15687 baseTmp = block->bbNext->bbStkTempsIn;
15688 tgtBlock = block->bbNext;
15690 if (baseTmp != NO_BASE_TMP)
15695 /* Try the target of the jump then */
15697 multRef |= block->bbJumpDest->bbRefs;
15698 baseTmp = block->bbJumpDest->bbStkTempsIn;
15699 tgtBlock = block->bbJumpDest;
15703 multRef |= block->bbJumpDest->bbRefs;
15704 baseTmp = block->bbJumpDest->bbStkTempsIn;
15705 tgtBlock = block->bbJumpDest;
15709 multRef |= block->bbNext->bbRefs;
15710 baseTmp = block->bbNext->bbStkTempsIn;
15711 tgtBlock = block->bbNext;
15716 BasicBlock** jmpTab;
15719 /* Temporarily remove the GT_SWITCH from the end of the tree list */
15721 assert(impTreeLast);
15722 assert(impTreeLast->gtOper == GT_STMT);
15723 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
15725 addStmt = impTreeLast;
15726 impTreeLast = impTreeLast->gtPrev;
15728 jmpCnt = block->bbJumpSwt->bbsCount;
15729 jmpTab = block->bbJumpSwt->bbsDstTab;
15733 tgtBlock = (*jmpTab);
15735 multRef |= tgtBlock->bbRefs;
15737 // Thanks to spill cliques, we should have assigned all or none
15738 assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
15739 baseTmp = tgtBlock->bbStkTempsIn;
15744 } while (++jmpTab, --jmpCnt);
15748 case BBJ_CALLFINALLY:
15749 case BBJ_EHCATCHRET:
15751 case BBJ_EHFINALLYRET:
15752 case BBJ_EHFILTERRET:
15754 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
15758 noway_assert(!"Unexpected bbJumpKind");
15762 assert(multRef >= 1);
15764 /* Do we have a base temp number? */
15766 bool newTemps = (baseTmp == NO_BASE_TMP);
15770 /* Grab enough temps for the whole stack */
15771 baseTmp = impGetSpillTmpBase(block);
15774 /* Spill all stack entries into temps */
15775 unsigned level, tempNum;
15777 JITDUMP("\nSpilling stack entries into temps\n");
15778 for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
15780 GenTreePtr tree = verCurrentState.esStack[level].val;
15782 /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
15783 the other. This should merge to a byref in unverifiable code.
15784 However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
15785 successor would be imported assuming there was a TYP_I_IMPL on
15786 the stack. Thus the value would not get GC-tracked. Hence,
15787 change the temp to TYP_BYREF and reimport the successors.
15788 Note: We should only allow this in unverifiable code.
15790 if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
15792 lvaTable[tempNum].lvType = TYP_BYREF;
15793 impReimportMarkSuccessors(block);
15797 #ifdef _TARGET_64BIT_
15798 if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
15800 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
15801 (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
15803 // Merge the current state into the entry state of block;
15804 // the call to verMergeEntryStates must have changed
15805 // the entry state of the block by merging the int local var
15806 // and the native-int stack entry.
15807 bool changed = false;
15808 if (verMergeEntryStates(tgtBlock, &changed))
15810 impRetypeEntryStateTemps(tgtBlock);
15811 impReimportBlockPending(tgtBlock);
15816 tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
15821 // Some other block in the spill clique set this to "int", but now we have "native int".
15822 // Change the type and go back to re-import any blocks that used the wrong type.
15823 lvaTable[tempNum].lvType = TYP_I_IMPL;
15824 reimportSpillClique = true;
15826 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
15828 // Spill clique has decided this should be "native int", but this block only pushes an "int".
15829 // Insert a sign-extension to "native int" so we match the clique.
15830 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15833 // Consider the case where one branch left a 'byref' on the stack and the other leaves
15834 // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
15835 // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
15836 // behavior instead of asserting and then generating bad code (where we save/restore the
15837 // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
15838 // imported already, we need to change the type of the local and reimport the spill clique.
15839 // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
15840 // the 'byref' size.
15841 if (!tiVerificationNeeded)
15843 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
15845 // Some other block in the spill clique set this to "int", but now we have "byref".
15846 // Change the type and go back to re-import any blocks that used the wrong type.
15847 lvaTable[tempNum].lvType = TYP_BYREF;
15848 reimportSpillClique = true;
15850 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
15852 // Spill clique has decided this should be "byref", but this block only pushes an "int".
15853 // Insert a sign-extension to "native int" so we match the clique size.
15854 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15857 #endif // _TARGET_64BIT_
15859 #if FEATURE_X87_DOUBLES
15860 // X87 stack doesn't differentiate between float/double
15861 // so promoting is no big deal.
15862 // For everybody else keep it as float until we have a collision and then promote
15863 // Just like for x64's TYP_INT<->TYP_I_IMPL
15865 if (multRef > 1 && tree->gtType == TYP_FLOAT)
15867 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15870 #else // !FEATURE_X87_DOUBLES
15872 if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
15874 // Some other block in the spill clique set this to "float", but now we have "double".
15875 // Change the type and go back to re-import any blocks that used the wrong type.
15876 lvaTable[tempNum].lvType = TYP_DOUBLE;
15877 reimportSpillClique = true;
15879 else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
15881 // Spill clique has decided this should be "double", but this block only pushes a "float".
15882 // Insert a cast to "double" so we match the clique.
15883 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15886 #endif // FEATURE_X87_DOUBLES
15888 /* If addStmt has a reference to tempNum (can only happen if we
15889 are spilling to the temps already used by a previous block),
15890 we need to spill addStmt */
15892 if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
15894 GenTreePtr addTree = addStmt->gtStmt.gtStmtExpr;
15896 if (addTree->gtOper == GT_JTRUE)
15898 GenTreePtr relOp = addTree->gtOp.gtOp1;
15899 assert(relOp->OperIsCompare());
15901 var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
15903 if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
15905 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
15906 impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
15907 type = genActualType(lvaTable[temp].TypeGet());
15908 relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
15911 if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
15913 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
15914 impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
15915 type = genActualType(lvaTable[temp].TypeGet());
15916 relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
15921 assert(addTree->gtOper == GT_SWITCH && genActualType(addTree->gtOp.gtOp1->gtType) == TYP_I_IMPL);
15923 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
15924 impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
15925 addTree->gtOp.gtOp1 = gtNewLclvNode(temp, TYP_I_IMPL);
15929 /* Spill the stack entry, and replace with the temp */
15931 if (!impSpillStackEntry(level, tempNum
15934 true, "Spill Stack Entry"
15940 BADCODE("bad stack state");
15943 // Oops. Something went wrong when spilling. Bad code.
15944 verHandleVerificationFailure(block DEBUGARG(true));
15950 /* Put back the 'jtrue'/'switch' if we removed it earlier */
15954 impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
15958 // Some of the append/spill logic works on compCurBB
15960 assert(compCurBB == block);
15962 /* Save the tree list in the block */
15963 impEndTreeList(block);
15965 // impEndTreeList sets BBF_IMPORTED on the block
15966 // We do *NOT* want to set it later than this because
15967 // impReimportSpillClique might clear it if this block is both a
15968 // predecessor and successor in the current spill clique
15969 assert(block->bbFlags & BBF_IMPORTED);
15971 // If we had a int/native int, or float/double collision, we need to re-import
15972 if (reimportSpillClique)
15974 // This will re-import all the successors of block (as well as each of their predecessors)
15975 impReimportSpillClique(block);
15977 // For blocks that haven't been imported yet, we still need to mark them as pending import.
15978 for (unsigned i = 0; i < block->NumSucc(); i++)
15980 BasicBlock* succ = block->GetSucc(i);
15981 if ((succ->bbFlags & BBF_IMPORTED) == 0)
15983 impImportBlockPending(succ);
15987 else // the normal case
15989 // otherwise just import the successors of block
15991 /* Does this block jump to any other blocks? */
15992 for (unsigned i = 0; i < block->NumSucc(); i++)
15994 impImportBlockPending(block->GetSucc(i));
15999 #pragma warning(pop)
16002 /*****************************************************************************/
16004 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16005 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16006 // impPendingBlockMembers). Merges the current verification state into the verification state of "block"
16007 // (its "pre-state").
16009 void Compiler::impImportBlockPending(BasicBlock* block)
16014 printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
16018 // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
16019 // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
16020 // (When we're doing verification, we always attempt the merge to detect verification errors.)
16022 // If the block has not been imported, add to pending set.
16023 bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
16025 // Initialize bbEntryState just the first time we try to add this block to the pending list
16026 // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
16027 // We use NULL to indicate the 'common' state to avoid memory allocation
16028 if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
16029 (impGetPendingBlockMember(block) == 0))
16031 verInitBBEntryState(block, &verCurrentState);
16032 assert(block->bbStkDepth == 0);
16033 block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
16034 assert(addToPending);
16035 assert(impGetPendingBlockMember(block) == 0);
16039 // The stack should have the same height on entry to the block from all its predecessors.
16040 if (block->bbStkDepth != verCurrentState.esStackDepth)
16044 sprintf_s(buffer, sizeof(buffer),
16045 "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
16046 "Previous depth was %d, current depth is %d",
16047 block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
16048 verCurrentState.esStackDepth);
16049 buffer[400 - 1] = 0;
16052 NO_WAY("Block entered with different stack depths");
16056 // Additionally, if we need to verify, merge the verification state.
16057 if (tiVerificationNeeded)
16059 // Merge the current state into the entry state of block; if this does not change the entry state
16060 // by merging, do not add the block to the pending-list.
16061 bool changed = false;
16062 if (!verMergeEntryStates(block, &changed))
16064 block->bbFlags |= BBF_FAILED_VERIFICATION;
16065 addToPending = true; // We will pop it off, and check the flag set above.
16069 addToPending = true;
16071 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
16080 if (block->bbStkDepth > 0)
16082 // We need to fix the types of any spill temps that might have changed:
16083 // int->native int, float->double, int->byref, etc.
16084 impRetypeEntryStateTemps(block);
16087 // OK, we must add to the pending list, if it's not already in it.
16088 if (impGetPendingBlockMember(block) != 0)
16094 // Get an entry to add to the pending list
16098 if (impPendingFree)
16100 // We can reuse one of the freed up dscs.
16101 dsc = impPendingFree;
16102 impPendingFree = dsc->pdNext;
16106 // We have to create a new dsc
16107 dsc = new (this, CMK_Unknown) PendingDsc;
16111 dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16112 dsc->pdThisPtrInit = verCurrentState.thisInitialized;
16114 // Save the stack trees for later
16116 if (verCurrentState.esStackDepth)
16118 impSaveStackState(&dsc->pdSavedStack, false);
16121 // Add the entry to the pending list
16123 dsc->pdNext = impPendingList;
16124 impPendingList = dsc;
16125 impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16127 // Various assertions require us to now to consider the block as not imported (at least for
16128 // the final time...)
16129 block->bbFlags &= ~BBF_IMPORTED;
16134 printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16139 /*****************************************************************************/
16141 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16142 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16143 // impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block.
16145 void Compiler::impReimportBlockPending(BasicBlock* block)
16147 JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16149 assert(block->bbFlags & BBF_IMPORTED);
16151 // OK, we must add to the pending list, if it's not already in it.
16152 if (impGetPendingBlockMember(block) != 0)
16157 // Get an entry to add to the pending list
16161 if (impPendingFree)
16163 // We can reuse one of the freed up dscs.
16164 dsc = impPendingFree;
16165 impPendingFree = dsc->pdNext;
16169 // We have to create a new dsc
16170 dsc = new (this, CMK_ImpStack) PendingDsc;
16175 if (block->bbEntryState)
16177 dsc->pdThisPtrInit = block->bbEntryState->thisInitialized;
16178 dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
16179 dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
16183 dsc->pdThisPtrInit = TIS_Bottom;
16184 dsc->pdSavedStack.ssDepth = 0;
16185 dsc->pdSavedStack.ssTrees = nullptr;
16188 // Add the entry to the pending list
16190 dsc->pdNext = impPendingList;
16191 impPendingList = dsc;
16192 impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16194 // Various assertions require us to now to consider the block as not imported (at least for
16195 // the final time...)
16196 block->bbFlags &= ~BBF_IMPORTED;
16201 printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16206 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
16208 if (comp->impBlockListNodeFreeList == nullptr)
16210 return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
16214 BlockListNode* res = comp->impBlockListNodeFreeList;
16215 comp->impBlockListNodeFreeList = res->m_next;
16220 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
16222 node->m_next = impBlockListNodeFreeList;
16223 impBlockListNodeFreeList = node;
16226 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
16230 noway_assert(!fgComputePredsDone);
16231 if (!fgCheapPredsValid)
16233 fgComputeCheapPreds();
16236 BlockListNode* succCliqueToDo = nullptr;
16237 BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
16241 // Look at the successors of every member of the predecessor to-do list.
16242 while (predCliqueToDo != nullptr)
16244 BlockListNode* node = predCliqueToDo;
16245 predCliqueToDo = node->m_next;
16246 BasicBlock* blk = node->m_blk;
16247 FreeBlockListNode(node);
16249 for (unsigned succNum = 0; succNum < blk->NumSucc(); succNum++)
16251 BasicBlock* succ = blk->GetSucc(succNum);
16252 // If it's not already in the clique, add it, and also add it
16253 // as a member of the successor "toDo" set.
16254 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
16256 callback->Visit(SpillCliqueSucc, succ);
16257 impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
16258 succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
16263 // Look at the predecessors of every member of the successor to-do list.
16264 while (succCliqueToDo != nullptr)
16266 BlockListNode* node = succCliqueToDo;
16267 succCliqueToDo = node->m_next;
16268 BasicBlock* blk = node->m_blk;
16269 FreeBlockListNode(node);
16271 for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
16273 BasicBlock* predBlock = pred->block;
16274 // If it's not already in the clique, add it, and also add it
16275 // as a member of the predecessor "toDo" set.
16276 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
16278 callback->Visit(SpillCliquePred, predBlock);
16279 impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
16280 predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
16287 // If this fails, it means we didn't walk the spill clique properly and somehow managed
16288 // miss walking back to include the predecessor we started from.
16289 // This most likely cause: missing or out of date bbPreds
16290 assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
16293 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16295 if (predOrSucc == SpillCliqueSucc)
16297 assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
16298 blk->bbStkTempsIn = m_baseTmp;
16302 assert(predOrSucc == SpillCliquePred);
16303 assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
16304 blk->bbStkTempsOut = m_baseTmp;
16308 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16310 // For Preds we could be a little smarter and just find the existing store
16311 // and re-type it/add a cast, but that is complicated and hopefully very rare, so
16312 // just re-import the whole block (just like we do for successors)
16314 if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
16316 // If we haven't imported this block and we're not going to (because it isn't on
16317 // the pending list) then just ignore it for now.
16319 // This block has either never been imported (EntryState == NULL) or it failed
16320 // verification. Neither state requires us to force it to be imported now.
16321 assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
16325 // For successors we have a valid verCurrentState, so just mark them for reimport
16326 // the 'normal' way
16327 // Unlike predecessors, we *DO* need to reimport the current block because the
16328 // initial import had the wrong entry state types.
16329 // Similarly, blocks that are currently on the pending list, still need to call
16330 // impImportBlockPending to fixup their entry state.
16331 if (predOrSucc == SpillCliqueSucc)
16333 m_pComp->impReimportMarkBlock(blk);
16335 // Set the current stack state to that of the blk->bbEntryState
16336 m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
16337 assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
16339 m_pComp->impImportBlockPending(blk);
16341 else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
16343 // As described above, we are only visiting predecessors so they can
16344 // add the appropriate casts, since we have already done that for the current
16345 // block, it does not need to be reimported.
16346 // Nor do we need to reimport blocks that are still pending, but not yet
16349 // For predecessors, we have no state to seed the EntryState, so we just have
16350 // to assume the existing one is correct.
16351 // If the block is also a successor, it will get the EntryState properly
16352 // updated when it is visited as a successor in the above "if" block.
16353 assert(predOrSucc == SpillCliquePred);
16354 m_pComp->impReimportBlockPending(blk);
16358 // Re-type the incoming lclVar nodes to match the varDsc.
16359 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
16361 if (blk->bbEntryState != nullptr)
16363 EntryState* es = blk->bbEntryState;
16364 for (unsigned level = 0; level < es->esStackDepth; level++)
16366 GenTreePtr tree = es->esStack[level].val;
16367 if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
16369 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
16370 noway_assert(lclNum < lvaCount);
16371 LclVarDsc* varDsc = lvaTable + lclNum;
16372 es->esStack[level].val->gtType = varDsc->TypeGet();
16378 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
16380 if (block->bbStkTempsOut != NO_BASE_TMP)
16382 return block->bbStkTempsOut;
16388 printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
16392 // Otherwise, choose one, and propagate to all members of the spill clique.
16393 // Grab enough temps for the whole stack.
16394 unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
16395 SetSpillTempsBase callback(baseTmp);
16397 // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
16398 // to one spill clique, and similarly can only be the sucessor to one spill clique
16399 impWalkSpillCliqueFromPred(block, &callback);
16404 void Compiler::impReimportSpillClique(BasicBlock* block)
16409 printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
16413 // If we get here, it is because this block is already part of a spill clique
16414 // and one predecessor had an outgoing live stack slot of type int, and this
16415 // block has an outgoing live stack slot of type native int.
16416 // We need to reset these before traversal because they have already been set
16417 // by the previous walk to determine all the members of the spill clique.
16418 impInlineRoot()->impSpillCliquePredMembers.Reset();
16419 impInlineRoot()->impSpillCliqueSuccMembers.Reset();
16421 ReimportSpillClique callback(this);
16423 impWalkSpillCliqueFromPred(block, &callback);
16426 // Set the pre-state of "block" (which should not have a pre-state allocated) to
16427 // a copy of "srcState", cloning tree pointers as required.
16428 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
16430 if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
16432 block->bbEntryState = nullptr;
16436 block->bbEntryState = (EntryState*)compGetMemA(sizeof(EntryState));
16438 // block->bbEntryState.esRefcount = 1;
16440 block->bbEntryState->esStackDepth = srcState->esStackDepth;
16441 block->bbEntryState->thisInitialized = TIS_Bottom;
16443 if (srcState->esStackDepth > 0)
16445 block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
16446 unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
16448 memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
16449 for (unsigned level = 0; level < srcState->esStackDepth; level++)
16451 GenTreePtr tree = srcState->esStack[level].val;
16452 block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
16456 if (verTrackObjCtorInitState)
16458 verSetThisInit(block, srcState->thisInitialized);
16464 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
16466 assert(tis != TIS_Bottom); // Precondition.
16467 if (block->bbEntryState == nullptr)
16469 block->bbEntryState = new (this, CMK_Unknown) EntryState();
16472 block->bbEntryState->thisInitialized = tis;
16476 * Resets the current state to the state at the start of the basic block
16478 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
16481 if (block->bbEntryState == nullptr)
16483 destState->esStackDepth = 0;
16484 destState->thisInitialized = TIS_Bottom;
16488 destState->esStackDepth = block->bbEntryState->esStackDepth;
16490 if (destState->esStackDepth > 0)
16492 unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
16494 memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
16497 destState->thisInitialized = block->bbThisOnEntry();
16502 ThisInitState BasicBlock::bbThisOnEntry()
16504 return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
16507 unsigned BasicBlock::bbStackDepthOnEntry()
16509 return (bbEntryState ? bbEntryState->esStackDepth : 0);
16512 void BasicBlock::bbSetStack(void* stackBuffer)
16514 assert(bbEntryState);
16515 assert(stackBuffer);
16516 bbEntryState->esStack = (StackEntry*)stackBuffer;
16519 StackEntry* BasicBlock::bbStackOnEntry()
16521 assert(bbEntryState);
16522 return bbEntryState->esStack;
16525 void Compiler::verInitCurrentState()
16527 verTrackObjCtorInitState = FALSE;
16528 verCurrentState.thisInitialized = TIS_Bottom;
16530 if (tiVerificationNeeded)
16532 // Track this ptr initialization
16533 if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
16535 verTrackObjCtorInitState = TRUE;
16536 verCurrentState.thisInitialized = TIS_Uninit;
16540 // initialize stack info
16542 verCurrentState.esStackDepth = 0;
16543 assert(verCurrentState.esStack != nullptr);
16545 // copy current state to entry state of first BB
16546 verInitBBEntryState(fgFirstBB, &verCurrentState);
16549 Compiler* Compiler::impInlineRoot()
16551 if (impInlineInfo == nullptr)
16557 return impInlineInfo->InlineRoot;
16561 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
16563 if (predOrSucc == SpillCliquePred)
16565 return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
16569 assert(predOrSucc == SpillCliqueSucc);
16570 return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
16574 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
16576 if (predOrSucc == SpillCliquePred)
16578 impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
16582 assert(predOrSucc == SpillCliqueSucc);
16583 impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
16587 /*****************************************************************************
16589 * Convert the instrs ("import") into our internal format (trees). The
16590 * basic flowgraph has already been constructed and is passed in.
16593 void Compiler::impImport(BasicBlock* method)
16598 printf("*************** In impImport() for %s\n", info.compFullName);
16602 /* Allocate the stack contents */
16604 if (info.compMaxStack <= sizeof(impSmallStack) / sizeof(impSmallStack[0]))
16606 /* Use local variable, don't waste time allocating on the heap */
16608 impStkSize = sizeof(impSmallStack) / sizeof(impSmallStack[0]);
16609 verCurrentState.esStack = impSmallStack;
16613 impStkSize = info.compMaxStack;
16614 verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
16617 // initialize the entry state at start of method
16618 verInitCurrentState();
16620 // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
16621 Compiler* inlineRoot = impInlineRoot();
16622 if (this == inlineRoot) // These are only used on the root of the inlining tree.
16624 // We have initialized these previously, but to size 0. Make them larger.
16625 impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
16626 impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
16627 impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
16629 inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
16630 inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
16631 inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
16632 impBlockListNodeFreeList = nullptr;
16635 impLastILoffsStmt = nullptr;
16636 impNestedStackSpill = false;
16638 impBoxTemp = BAD_VAR_NUM;
16640 impPendingList = impPendingFree = nullptr;
16642 /* Add the entry-point to the worker-list */
16644 // Skip leading internal blocks. There can be one as a leading scratch BB, and more
16645 // from EH normalization.
16646 // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
16648 for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
16650 // Treat these as imported.
16651 assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
16652 JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
16653 method->bbFlags |= BBF_IMPORTED;
16656 impImportBlockPending(method);
16658 /* Import blocks in the worker-list until there are no more */
16660 while (impPendingList)
16662 /* Remove the entry at the front of the list */
16664 PendingDsc* dsc = impPendingList;
16665 impPendingList = impPendingList->pdNext;
16666 impSetPendingBlockMember(dsc->pdBB, 0);
16668 /* Restore the stack state */
16670 verCurrentState.thisInitialized = dsc->pdThisPtrInit;
16671 verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth;
16672 if (verCurrentState.esStackDepth)
16674 impRestoreStackState(&dsc->pdSavedStack);
16677 /* Add the entry to the free list for reuse */
16679 dsc->pdNext = impPendingFree;
16680 impPendingFree = dsc;
16682 /* Now import the block */
16684 if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
16687 #ifdef _TARGET_64BIT_
16688 // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
16689 // coupled with the JIT64 IL Verification logic. Look inside verHandleVerificationFailure
16690 // method for further explanation on why we raise this exception instead of making the jitted
16691 // code throw the verification exception during execution.
16692 if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
16694 BADCODE("Basic block marked as not verifiable");
16697 #endif // _TARGET_64BIT_
16699 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
16700 impEndTreeList(dsc->pdBB);
16705 impImportBlock(dsc->pdBB);
16707 if (compDonotInline())
16711 if (compIsForImportOnly() && !tiVerificationNeeded)
16719 if (verbose && info.compXcptnsCount)
16721 printf("\nAfter impImport() added block for try,catch,finally");
16722 fgDispBasicBlocks();
16726 // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
16727 for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
16729 block->bbFlags &= ~BBF_VISITED;
16733 assert(!compIsForInlining() || !tiVerificationNeeded);
16736 // Checks if a typeinfo (usually stored in the type stack) is a struct.
16737 // The invariant here is that if it's not a ref or a method and has a class handle
16738 // it's a valuetype
16739 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
16741 if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
16751 /*****************************************************************************
16752 * Check to see if the tree is the address of a local or
16753 the address of a field in a local.
16755 *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
16759 BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr* lclVarTreeOut)
16761 if (tree->gtOper != GT_ADDR)
16766 GenTreePtr op = tree->gtOp.gtOp1;
16767 while (op->gtOper == GT_FIELD)
16769 op = op->gtField.gtFldObj;
16770 if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
16772 op = op->gtOp.gtOp1;
16780 if (op->gtOper == GT_LCL_VAR)
16782 *lclVarTreeOut = op;
16791 //------------------------------------------------------------------------
16792 // impMakeDiscretionaryInlineObservations: make observations that help
16793 // determine the profitability of a discretionary inline
16796 // pInlineInfo -- InlineInfo for the inline, or null for the prejit root
16797 // inlineResult -- InlineResult accumulating information about this inline
16800 // If inlining or prejitting the root, this method also makes
16801 // various observations about the method that factor into inline
16802 // decisions. It sets `compNativeSizeEstimate` as a side effect.
16804 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
16806 assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
16807 pInlineInfo == nullptr && !compIsForInlining() // Calculate the static inlining hint for ngen.
16810 // If we're really inlining, we should just have one result in play.
16811 assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
16813 // If this is a "forceinline" method, the JIT probably shouldn't have gone
16814 // to the trouble of estimating the native code size. Even if it did, it
16815 // shouldn't be relying on the result of this method.
16816 assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
16818 // Note if the caller contains NEWOBJ or NEWARR.
16819 Compiler* rootCompiler = impInlineRoot();
16821 if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
16823 inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
16826 if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
16828 inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
16831 bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0;
16832 bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
16834 if (isSpecialMethod)
16836 if (calleeIsStatic)
16838 inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
16842 inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
16845 else if (!calleeIsStatic)
16847 // Callee is an instance method.
16849 // Check if the callee has the same 'this' as the root.
16850 if (pInlineInfo != nullptr)
16852 GenTreePtr thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
16854 bool isSameThis = impIsThis(thisArg);
16855 inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
16859 // Note if the callee's class is a promotable struct
16860 if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
16862 lvaStructPromotionInfo structPromotionInfo;
16863 lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
16864 if (structPromotionInfo.canPromote)
16866 inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
16870 #ifdef FEATURE_SIMD
16872 // Note if this method is has SIMD args or return value
16873 if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
16875 inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
16878 #endif // FEATURE_SIMD
16880 // Roughly classify callsite frequency.
16881 InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
16883 // If this is a prejit root, or a maximally hot block...
16884 if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
16886 frequency = InlineCallsiteFrequency::HOT;
16888 // No training data. Look for loop-like things.
16889 // We consider a recursive call loop-like. Do not give the inlining boost to the method itself.
16890 // However, give it to things nearby.
16891 else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
16892 (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
16894 frequency = InlineCallsiteFrequency::LOOP;
16896 else if ((pInlineInfo->iciBlock->bbFlags & BBF_PROF_WEIGHT) && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
16898 frequency = InlineCallsiteFrequency::WARM;
16900 // Now modify the multiplier based on where we're called from.
16901 else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
16903 frequency = InlineCallsiteFrequency::RARE;
16907 frequency = InlineCallsiteFrequency::BORING;
16910 // Also capture the block weight of the call site. In the prejit
16911 // root case, assume there's some hot call site for this method.
16912 unsigned weight = 0;
16914 if (pInlineInfo != nullptr)
16916 weight = pInlineInfo->iciBlock->bbWeight;
16920 weight = BB_MAX_WEIGHT;
16923 inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
16924 inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
16927 /*****************************************************************************
16928 This method makes STATIC inlining decision based on the IL code.
16929 It should not make any inlining decision based on the context.
16930 If forceInline is true, then the inlining decision should not depend on
16931 performance heuristics (code size, etc.).
16934 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
16935 CORINFO_METHOD_INFO* methInfo,
16937 InlineResult* inlineResult)
16939 unsigned codeSize = methInfo->ILCodeSize;
16941 // We shouldn't have made up our minds yet...
16942 assert(!inlineResult->IsDecided());
16944 if (methInfo->EHcount)
16946 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
16950 if ((methInfo->ILCode == nullptr) || (codeSize == 0))
16952 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
16956 // For now we don't inline varargs (import code can't handle it)
16958 if (methInfo->args.isVarArg())
16960 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
16964 // Reject if it has too many locals.
16965 // This is currently an implementation limit due to fixed-size arrays in the
16966 // inline info, rather than a performance heuristic.
16968 inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
16970 if (methInfo->locals.numArgs > MAX_INL_LCLS)
16972 inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
16976 // Make sure there aren't too many arguments.
16977 // This is currently an implementation limit due to fixed-size arrays in the
16978 // inline info, rather than a performance heuristic.
16980 inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
16982 if (methInfo->args.numArgs > MAX_INL_ARGS)
16984 inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
16988 // Note force inline state
16990 inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
16992 // Note IL code size
16994 inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
16996 if (inlineResult->IsFailure())
17001 // Make sure maxstack is not too big
17003 inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
17005 if (inlineResult->IsFailure())
17011 /*****************************************************************************
17014 void Compiler::impCheckCanInline(GenTreePtr call,
17015 CORINFO_METHOD_HANDLE fncHandle,
17017 CORINFO_CONTEXT_HANDLE exactContextHnd,
17018 InlineCandidateInfo** ppInlineCandidateInfo,
17019 InlineResult* inlineResult)
17021 // Either EE or JIT might throw exceptions below.
17022 // If that happens, just don't inline the method.
17028 CORINFO_METHOD_HANDLE fncHandle;
17030 CORINFO_CONTEXT_HANDLE exactContextHnd;
17031 InlineResult* result;
17032 InlineCandidateInfo** ppInlineCandidateInfo;
17033 } param = {nullptr};
17035 param.pThis = this;
17037 param.fncHandle = fncHandle;
17038 param.methAttr = methAttr;
17039 param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
17040 param.result = inlineResult;
17041 param.ppInlineCandidateInfo = ppInlineCandidateInfo;
17043 bool success = eeRunWithErrorTrap<Param>(
17044 [](Param* pParam) {
17045 DWORD dwRestrictions = 0;
17046 CorInfoInitClassResult initClassResult;
17049 const char* methodName;
17050 const char* className;
17051 methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
17053 if (JitConfig.JitNoInline())
17055 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
17060 /* Try to get the code address/size for the method */
17062 CORINFO_METHOD_INFO methInfo;
17063 if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
17065 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
17070 forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
17072 pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
17074 if (pParam->result->IsFailure())
17076 assert(pParam->result->IsNever());
17080 // Speculatively check if initClass() can be done.
17081 // If it can be done, we will try to inline the method. If inlining
17082 // succeeds, then we will do the non-speculative initClass() and commit it.
17083 // If this speculative call to initClass() fails, there is no point
17084 // trying to inline this method.
17086 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17087 pParam->exactContextHnd /* context */,
17088 TRUE /* speculative */);
17090 if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17092 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17096 // Given the EE the final say in whether to inline or not.
17097 // This should be last since for verifiable code, this can be expensive
17099 /* VM Inline check also ensures that the method is verifiable if needed */
17100 CorInfoInline vmResult;
17101 vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17104 if (vmResult == INLINE_FAIL)
17106 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17108 else if (vmResult == INLINE_NEVER)
17110 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17113 if (pParam->result->IsFailure())
17115 // Make sure not to report this one. It was already reported by the VM.
17116 pParam->result->SetReported();
17120 // check for unsupported inlining restrictions
17121 assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17123 if (dwRestrictions & INLINE_SAME_THIS)
17125 GenTreePtr thisArg = pParam->call->gtCall.gtCallObjp;
17128 if (!pParam->pThis->impIsThis(thisArg))
17130 pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17135 /* Get the method properties */
17137 CORINFO_CLASS_HANDLE clsHandle;
17138 clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17140 clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17142 /* Get the return type */
17144 var_types fncRetType;
17145 fncRetType = pParam->call->TypeGet();
17148 var_types fncRealRetType;
17149 fncRealRetType = JITtype2varType(methInfo.args.retType);
17151 assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17152 // <BUGNUM> VSW 288602 </BUGNUM>
17153 // In case of IJW, we allow to assign a native pointer to a BYREF.
17154 (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17155 (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17159 // Allocate an InlineCandidateInfo structure
17161 InlineCandidateInfo* pInfo;
17162 pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17164 pInfo->dwRestrictions = dwRestrictions;
17165 pInfo->methInfo = methInfo;
17166 pInfo->methAttr = pParam->methAttr;
17167 pInfo->clsHandle = clsHandle;
17168 pInfo->clsAttr = clsAttr;
17169 pInfo->fncRetType = fncRetType;
17170 pInfo->exactContextHnd = pParam->exactContextHnd;
17171 pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd;
17172 pInfo->initClassResult = initClassResult;
17174 *(pParam->ppInlineCandidateInfo) = pInfo;
17181 param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
17185 void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo,
17186 GenTreePtr curArgVal,
17188 InlineResult* inlineResult)
17190 InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
17192 if (curArgVal->gtOper == GT_MKREFANY)
17194 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
17198 inlCurArgInfo->argNode = curArgVal;
17200 GenTreePtr lclVarTree;
17201 if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
17203 inlCurArgInfo->argIsByRefToStructLocal = true;
17204 #ifdef FEATURE_SIMD
17205 if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
17207 pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
17209 #endif // FEATURE_SIMD
17212 if (curArgVal->gtFlags & GTF_ALL_EFFECT)
17214 inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
17215 inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
17218 if (curArgVal->gtOper == GT_LCL_VAR)
17220 inlCurArgInfo->argIsLclVar = true;
17222 /* Remember the "original" argument number */
17223 curArgVal->gtLclVar.gtLclILoffs = argNum;
17226 if ((curArgVal->OperKind() & GTK_CONST) ||
17227 ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
17229 inlCurArgInfo->argIsInvariant = true;
17230 if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
17232 /* Abort, but do not mark as not inlinable */
17233 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
17238 if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
17240 inlCurArgInfo->argHasLdargaOp = true;
17246 if (inlCurArgInfo->argIsThis)
17248 printf("thisArg:");
17252 printf("\nArgument #%u:", argNum);
17254 if (inlCurArgInfo->argIsLclVar)
17256 printf(" is a local var");
17258 if (inlCurArgInfo->argIsInvariant)
17260 printf(" is a constant");
17262 if (inlCurArgInfo->argHasGlobRef)
17264 printf(" has global refs");
17266 if (inlCurArgInfo->argHasSideEff)
17268 printf(" has side effects");
17270 if (inlCurArgInfo->argHasLdargaOp)
17272 printf(" has ldarga effect");
17274 if (inlCurArgInfo->argHasStargOp)
17276 printf(" has starg effect");
17278 if (inlCurArgInfo->argIsByRefToStructLocal)
17280 printf(" is byref to a struct local");
17284 gtDispTree(curArgVal);
17290 /*****************************************************************************
17294 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
17296 assert(!compIsForInlining());
17298 GenTreePtr call = pInlineInfo->iciCall;
17299 CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo;
17300 unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr;
17301 InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo;
17302 InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo;
17303 InlineResult* inlineResult = pInlineInfo->inlineResult;
17305 const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
17307 /* init the argument stuct */
17309 memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
17311 /* Get hold of the 'this' pointer and the argument list proper */
17313 GenTreePtr thisArg = call->gtCall.gtCallObjp;
17314 GenTreePtr argList = call->gtCall.gtCallArgs;
17315 unsigned argCnt = 0; // Count of the arguments
17317 assert((methInfo->args.hasThis()) == (thisArg != nullptr));
17321 inlArgInfo[0].argIsThis = true;
17323 impInlineRecordArgInfo(pInlineInfo, thisArg, argCnt, inlineResult);
17325 if (inlineResult->IsFailure())
17330 /* Increment the argument count */
17334 /* Record some information about each of the arguments */
17335 bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
17337 #if USER_ARGS_COME_LAST
17338 unsigned typeCtxtArg = thisArg ? 1 : 0;
17339 #else // USER_ARGS_COME_LAST
17340 unsigned typeCtxtArg = methInfo->args.totalILArgs();
17341 #endif // USER_ARGS_COME_LAST
17343 for (GenTreePtr argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
17345 if (argTmp == argList && hasRetBuffArg)
17350 // Ignore the type context argument
17351 if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
17353 typeCtxtArg = 0xFFFFFFFF;
17357 assert(argTmp->gtOper == GT_LIST);
17358 GenTreePtr argVal = argTmp->gtOp.gtOp1;
17360 impInlineRecordArgInfo(pInlineInfo, argVal, argCnt, inlineResult);
17362 if (inlineResult->IsFailure())
17367 /* Increment the argument count */
17371 /* Make sure we got the arg number right */
17372 assert(argCnt == methInfo->args.totalILArgs());
17374 #ifdef FEATURE_SIMD
17375 bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
17376 #endif // FEATURE_SIMD
17378 /* We have typeless opcodes, get type information from the signature */
17384 if (clsAttr & CORINFO_FLG_VALUECLASS)
17386 sigType = TYP_BYREF;
17393 lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
17394 lclVarInfo[0].lclHasLdlocaOp = false;
17396 #ifdef FEATURE_SIMD
17397 // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
17398 // the inlining multiplier) for anything in that assembly.
17399 // But we only need to normalize it if it is a TYP_STRUCT
17400 // (which we need to do even if we have already set foundSIMDType).
17401 if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
17403 if (sigType == TYP_STRUCT)
17405 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
17407 foundSIMDType = true;
17409 #endif // FEATURE_SIMD
17410 lclVarInfo[0].lclTypeInfo = sigType;
17412 assert(varTypeIsGC(thisArg->gtType) || // "this" is managed
17413 (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
17414 (clsAttr & CORINFO_FLG_VALUECLASS)));
17416 if (genActualType(thisArg->gtType) != genActualType(sigType))
17418 if (sigType == TYP_REF)
17420 /* The argument cannot be bashed into a ref (see bug 750871) */
17421 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
17425 /* This can only happen with byrefs <-> ints/shorts */
17427 assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
17428 assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
17430 if (sigType == TYP_BYREF)
17432 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17434 else if (thisArg->gtType == TYP_BYREF)
17436 assert(sigType == TYP_I_IMPL);
17438 /* If possible change the BYREF to an int */
17439 if (thisArg->IsVarAddr())
17441 thisArg->gtType = TYP_I_IMPL;
17442 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17446 /* Arguments 'int <- byref' cannot be bashed */
17447 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17454 /* Init the types of the arguments and make sure the types
17455 * from the trees match the types in the signature */
17457 CORINFO_ARG_LIST_HANDLE argLst;
17458 argLst = methInfo->args.args;
17461 for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
17463 var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
17465 lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
17467 #ifdef FEATURE_SIMD
17468 if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
17470 // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
17471 // found a SIMD type, even if this may not be a type we recognize (the assumption is that
17472 // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
17473 foundSIMDType = true;
17474 if (sigType == TYP_STRUCT)
17476 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
17477 sigType = structType;
17480 #endif // FEATURE_SIMD
17482 lclVarInfo[i].lclTypeInfo = sigType;
17483 lclVarInfo[i].lclHasLdlocaOp = false;
17485 /* Does the tree type match the signature type? */
17487 GenTreePtr inlArgNode = inlArgInfo[i].argNode;
17489 if (sigType != inlArgNode->gtType)
17491 /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
17492 but in bad IL cases with caller-callee signature mismatches we can see other types.
17493 Intentionally reject cases with mismatches so the jit is more flexible when
17494 encountering bad IL. */
17496 bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
17497 (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
17498 (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
17500 if (!isPlausibleTypeMatch)
17502 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
17506 /* Is it a narrowing or widening cast?
17507 * Widening casts are ok since the value computed is already
17508 * normalized to an int (on the IL stack) */
17510 if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
17512 if (sigType == TYP_BYREF)
17514 lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17516 else if (inlArgNode->gtType == TYP_BYREF)
17518 assert(varTypeIsIntOrI(sigType));
17520 /* If possible bash the BYREF to an int */
17521 if (inlArgNode->IsVarAddr())
17523 inlArgNode->gtType = TYP_I_IMPL;
17524 lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17528 /* Arguments 'int <- byref' cannot be changed */
17529 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17533 else if (genTypeSize(sigType) < EA_PTRSIZE)
17535 /* Narrowing cast */
17537 if (inlArgNode->gtOper == GT_LCL_VAR &&
17538 !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
17539 sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
17541 /* We don't need to insert a cast here as the variable
17542 was assigned a normalized value of the right type */
17547 inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
17549 inlArgInfo[i].argIsLclVar = false;
17551 /* Try to fold the node in case we have constant arguments */
17553 if (inlArgInfo[i].argIsInvariant)
17555 inlArgNode = gtFoldExprConst(inlArgNode);
17556 inlArgInfo[i].argNode = inlArgNode;
17557 assert(inlArgNode->OperIsConst());
17560 #ifdef _TARGET_64BIT_
17561 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
17563 // This should only happen for int -> native int widening
17564 inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
17566 inlArgInfo[i].argIsLclVar = false;
17568 /* Try to fold the node in case we have constant arguments */
17570 if (inlArgInfo[i].argIsInvariant)
17572 inlArgNode = gtFoldExprConst(inlArgNode);
17573 inlArgInfo[i].argNode = inlArgNode;
17574 assert(inlArgNode->OperIsConst());
17577 #endif // _TARGET_64BIT_
17582 /* Init the types of the local variables */
17584 CORINFO_ARG_LIST_HANDLE localsSig;
17585 localsSig = methInfo->locals.args;
17587 for (i = 0; i < methInfo->locals.numArgs; i++)
17590 var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
17592 lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
17593 lclVarInfo[i + argCnt].lclIsPinned = isPinned;
17594 lclVarInfo[i + argCnt].lclTypeInfo = type;
17598 // Pinned locals may cause inlines to fail.
17599 inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
17600 if (inlineResult->IsFailure())
17606 lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
17608 // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
17609 // out on the inline.
17610 if (type == TYP_STRUCT)
17612 CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
17613 DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
17614 if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
17616 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
17617 if (inlineResult->IsFailure())
17622 // Do further notification in the case where the call site is rare; some policies do
17623 // not track the relative hotness of call sites for "always" inline cases.
17624 if (pInlineInfo->iciBlock->isRunRarely())
17626 inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
17627 if (inlineResult->IsFailure())
17636 localsSig = info.compCompHnd->getArgNext(localsSig);
17638 #ifdef FEATURE_SIMD
17639 if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
17641 foundSIMDType = true;
17642 if (featureSIMD && type == TYP_STRUCT)
17644 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
17645 lclVarInfo[i + argCnt].lclTypeInfo = structType;
17648 #endif // FEATURE_SIMD
17651 #ifdef FEATURE_SIMD
17652 if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
17654 foundSIMDType = true;
17656 pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
17657 #endif // FEATURE_SIMD
17660 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
17662 assert(compIsForInlining());
17664 unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
17666 if (tmpNum == BAD_VAR_NUM)
17668 var_types lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
17670 // The lifetime of this local might span multiple BBs.
17671 // So it is a long lifetime local.
17672 impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
17674 lvaTable[tmpNum].lvType = lclTyp;
17675 if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclHasLdlocaOp)
17677 lvaTable[tmpNum].lvHasLdAddrOp = 1;
17680 if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclIsPinned)
17682 lvaTable[tmpNum].lvPinned = 1;
17684 if (!impInlineInfo->hasPinnedLocals)
17686 // If the inlinee returns a value, use a spill temp
17687 // for the return value to ensure that even in case
17688 // where the return expression refers to one of the
17689 // pinned locals, we can unpin the local right after
17690 // the inlined method body.
17691 if ((info.compRetNativeType != TYP_VOID) && (lvaInlineeReturnSpillTemp == BAD_VAR_NUM))
17693 lvaInlineeReturnSpillTemp =
17694 lvaGrabTemp(false DEBUGARG("Inline candidate pinned local return spill temp"));
17695 lvaTable[lvaInlineeReturnSpillTemp].lvType = info.compRetNativeType;
17699 impInlineInfo->hasPinnedLocals = true;
17702 if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.IsStruct())
17704 if (varTypeIsStruct(lclTyp))
17706 lvaSetStruct(tmpNum,
17707 impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.GetClassHandle(),
17708 true /* unsafe value cls check */);
17712 // This is a wrapped primitive. Make sure the verstate knows that
17713 lvaTable[tmpNum].lvVerTypeInfo =
17714 impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo;
17722 // A method used to return the GenTree (usually a GT_LCL_VAR) representing the arguments of the inlined method.
17723 // Only use this method for the arguments of the inlinee method.
17724 // !!! Do not use it for the locals of the inlinee method. !!!!
17726 GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
17728 /* Get the argument type */
17729 var_types lclTyp = lclVarInfo[lclNum].lclTypeInfo;
17731 GenTreePtr op1 = nullptr;
17733 // constant or address of local
17734 if (inlArgInfo[lclNum].argIsInvariant && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17736 /* Clone the constant. Note that we cannot directly use argNode
17737 in the trees even if inlArgInfo[lclNum].argIsUsed==false as this
17738 would introduce aliasing between inlArgInfo[].argNode and
17739 impInlineExpr. Then gtFoldExpr() could change it, causing further
17740 references to the argument working off of the bashed copy. */
17742 op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17743 PREFIX_ASSUME(op1 != nullptr);
17744 inlArgInfo[lclNum].argTmpNum = (unsigned)-1; // illegal temp
17746 else if (inlArgInfo[lclNum].argIsLclVar && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17748 /* Argument is a local variable (of the caller)
17749 * Can we re-use the passed argument node? */
17751 op1 = inlArgInfo[lclNum].argNode;
17752 inlArgInfo[lclNum].argTmpNum = op1->gtLclVarCommon.gtLclNum;
17754 if (inlArgInfo[lclNum].argIsUsed)
17756 assert(op1->gtOper == GT_LCL_VAR);
17757 assert(lclNum == op1->gtLclVar.gtLclILoffs);
17759 if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
17761 lclTyp = genActualType(lclTyp);
17764 /* Create a new lcl var node - remember the argument lclNum */
17765 op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, lclTyp, op1->gtLclVar.gtLclILoffs);
17768 else if (inlArgInfo[lclNum].argIsByRefToStructLocal && !inlArgInfo[lclNum].argHasStargOp)
17770 /* Argument is a by-ref address to a struct, a normed struct, or its field.
17771 In these cases, don't spill the byref to a local, simply clone the tree and use it.
17772 This way we will increase the chance for this byref to be optimized away by
17773 a subsequent "dereference" operation.
17775 From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
17776 (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
17777 For example, if the caller is:
17778 ldloca.s V_1 // V_1 is a local struct
17779 call void Test.ILPart::RunLdargaOnPointerArg(int32*)
17780 and the callee being inlined has:
17781 .method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed
17783 call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
17784 then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
17785 soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
17787 assert(inlArgInfo[lclNum].argNode->TypeGet() == TYP_BYREF ||
17788 inlArgInfo[lclNum].argNode->TypeGet() == TYP_I_IMPL);
17789 op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17793 /* Argument is a complex expression - it must be evaluated into a temp */
17795 if (inlArgInfo[lclNum].argHasTmp)
17797 assert(inlArgInfo[lclNum].argIsUsed);
17798 assert(inlArgInfo[lclNum].argTmpNum < lvaCount);
17800 /* Create a new lcl var node - remember the argument lclNum */
17801 op1 = gtNewLclvNode(inlArgInfo[lclNum].argTmpNum, genActualType(lclTyp));
17803 /* This is the second or later use of the this argument,
17804 so we have to use the temp (instead of the actual arg) */
17805 inlArgInfo[lclNum].argBashTmpNode = nullptr;
17809 /* First time use */
17810 assert(inlArgInfo[lclNum].argIsUsed == false);
17812 /* Reserve a temp for the expression.
17813 * Use a large size node as we may change it later */
17815 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
17817 lvaTable[tmpNum].lvType = lclTyp;
17818 assert(lvaTable[tmpNum].lvAddrExposed == 0);
17819 if (inlArgInfo[lclNum].argHasLdargaOp)
17821 lvaTable[tmpNum].lvHasLdAddrOp = 1;
17824 if (lclVarInfo[lclNum].lclVerTypeInfo.IsStruct())
17826 if (varTypeIsStruct(lclTyp))
17828 lvaSetStruct(tmpNum, impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo.GetClassHandle(),
17829 true /* unsafe value cls check */);
17833 // This is a wrapped primitive. Make sure the verstate knows that
17834 lvaTable[tmpNum].lvVerTypeInfo = impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo;
17838 inlArgInfo[lclNum].argHasTmp = true;
17839 inlArgInfo[lclNum].argTmpNum = tmpNum;
17841 // If we require strict exception order, then arguments must
17842 // be evaluated in sequence before the body of the inlined method.
17843 // So we need to evaluate them to a temp.
17844 // Also, if arguments have global references, we need to
17845 // evaluate them to a temp before the inlined body as the
17846 // inlined body may be modifying the global ref.
17847 // TODO-1stClassStructs: We currently do not reuse an existing lclVar
17848 // if it is a struct, because it requires some additional handling.
17850 if (!varTypeIsStruct(lclTyp) && (!inlArgInfo[lclNum].argHasSideEff) && (!inlArgInfo[lclNum].argHasGlobRef))
17852 /* Get a *LARGE* LCL_VAR node */
17853 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
17855 /* Record op1 as the very first use of this argument.
17856 If there are no further uses of the arg, we may be
17857 able to use the actual arg node instead of the temp.
17858 If we do see any further uses, we will clear this. */
17859 inlArgInfo[lclNum].argBashTmpNode = op1;
17863 /* Get a small LCL_VAR node */
17864 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
17865 /* No bashing of this argument */
17866 inlArgInfo[lclNum].argBashTmpNode = nullptr;
17871 /* Mark the argument as used */
17873 inlArgInfo[lclNum].argIsUsed = true;
17878 /******************************************************************************
17879 Is this the original "this" argument to the call being inlined?
17881 Note that we do not inline methods with "starg 0", and so we do not need to
17885 BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo* inlArgInfo)
17887 assert(compIsForInlining());
17888 return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
17891 //-----------------------------------------------------------------------------
17892 // This function checks if a dereference in the inlinee can guarantee that
17893 // the "this" is non-NULL.
17894 // If we haven't hit a branch or a side effect, and we are dereferencing
17895 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
17896 // then we can avoid a separate null pointer check.
17898 // "additionalTreesToBeEvaluatedBefore"
17899 // is the set of pending trees that have not yet been added to the statement list,
17900 // and which have been removed from verCurrentState.esStack[]
17902 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTreePtr additionalTreesToBeEvaluatedBefore,
17903 GenTreePtr variableBeingDereferenced,
17904 InlArgInfo* inlArgInfo)
17906 assert(compIsForInlining());
17907 assert(opts.OptEnabled(CLFLG_INLINING));
17909 BasicBlock* block = compCurBB;
17914 if (block != fgFirstBB)
17919 if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
17924 if (additionalTreesToBeEvaluatedBefore &&
17925 GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
17930 for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
17932 expr = stmt->gtStmt.gtStmtExpr;
17934 if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
17940 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
17942 unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
17943 if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
17952 /******************************************************************************/
17953 // Check the inlining eligibility of this GT_CALL node.
17954 // Mark GTF_CALL_INLINE_CANDIDATE on the GT_CALL node
17956 // Todo: find a way to record the failure reasons in the IR (or
17957 // otherwise build tree context) so when we do the inlining pass we
17958 // can capture these reasons
17960 void Compiler::impMarkInlineCandidate(GenTreePtr callNode,
17961 CORINFO_CONTEXT_HANDLE exactContextHnd,
17962 CORINFO_CALL_INFO* callInfo)
17964 // Let the strategy know there's another call
17965 impInlineRoot()->m_inlineStrategy->NoteCall();
17967 if (!opts.OptEnabled(CLFLG_INLINING))
17969 /* XXX Mon 8/18/2008
17970 * This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before
17971 * calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and
17972 * CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and
17973 * figure out why we did not set MAXOPT for this compile.
17975 assert(!compIsForInlining());
17979 if (compIsForImportOnly())
17981 // Don't bother creating the inline candidate during verification.
17982 // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
17983 // that leads to the creation of multiple instances of Compiler.
17987 GenTreeCall* call = callNode->AsCall();
17988 InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
17990 // Don't inline if not optimizing root method
17991 if (opts.compDbgCode)
17993 inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
17997 // Don't inline if inlining into root method is disabled.
17998 if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
18000 inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
18004 // Inlining candidate determination needs to honor only IL tail prefix.
18005 // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
18006 if (call->IsTailPrefixedCall())
18008 inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
18012 // Tail recursion elimination takes precedence over inlining.
18013 // TODO: We may want to do some of the additional checks from fgMorphCall
18014 // here to reduce the chance we don't inline a call that won't be optimized
18015 // as a fast tail call or turned into a loop.
18016 if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
18018 inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
18022 if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
18024 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
18028 /* Ignore helper calls */
18030 if (call->gtCallType == CT_HELPER)
18032 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
18036 /* Ignore indirect calls */
18037 if (call->gtCallType == CT_INDIRECT)
18039 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
18043 /* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less
18044 * restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding
18045 * inlining in throw blocks. I should consider the same thing for catch and filter regions. */
18047 CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
18050 // Reuse method flags from the original callInfo if possible
18051 if (fncHandle == callInfo->hMethod)
18053 methAttr = callInfo->methodFlags;
18057 methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
18061 if (compStressCompile(STRESS_FORCE_INLINE, 0))
18063 methAttr |= CORINFO_FLG_FORCEINLINE;
18067 // Check for COMPlus_AggressiveInlining
18068 if (compDoAggressiveInlining)
18070 methAttr |= CORINFO_FLG_FORCEINLINE;
18073 if (!(methAttr & CORINFO_FLG_FORCEINLINE))
18075 /* Don't bother inline blocks that are in the filter region */
18076 if (bbInCatchHandlerILRange(compCurBB))
18081 printf("\nWill not inline blocks that are in the catch handler region\n");
18086 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
18090 if (bbInFilterILRange(compCurBB))
18095 printf("\nWill not inline blocks that are in the filter region\n");
18099 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
18104 /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
18106 if (opts.compNeedSecurityCheck)
18108 inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
18112 /* Check if we tried to inline this method before */
18114 if (methAttr & CORINFO_FLG_DONT_INLINE)
18116 inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
18120 /* Cannot inline synchronized methods */
18122 if (methAttr & CORINFO_FLG_SYNCH)
18124 inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
18128 /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
18130 if (methAttr & CORINFO_FLG_SECURITYCHECK)
18132 inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
18136 InlineCandidateInfo* inlineCandidateInfo = nullptr;
18137 impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
18139 if (inlineResult.IsFailure())
18144 // The old value should be NULL
18145 assert(call->gtInlineCandidateInfo == nullptr);
18147 call->gtInlineCandidateInfo = inlineCandidateInfo;
18149 // Mark the call node as inline candidate.
18150 call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
18152 // Let the strategy know there's another candidate.
18153 impInlineRoot()->m_inlineStrategy->NoteCandidate();
18155 // Since we're not actually inlining yet, and this call site is
18156 // still just an inline candidate, there's nothing to report.
18157 inlineResult.SetReported();
18160 /******************************************************************************/
18161 // Returns true if the given intrinsic will be implemented by target-specific
18164 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
18166 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
18167 switch (intrinsicId)
18169 // Amd64 only has SSE2 instruction to directly compute sqrt/abs.
18171 // TODO: Because the x86 backend only targets SSE for floating-point code,
18172 // it does not treat Sine, Cosine, or Round as intrinsics (JIT32
18173 // implemented those intrinsics as x87 instructions). If this poses
18174 // a CQ problem, it may be necessary to change the implementation of
18175 // the helper calls to decrease call overhead or switch back to the
18176 // x87 instructions. This is tracked by #7097.
18177 case CORINFO_INTRINSIC_Sqrt:
18178 case CORINFO_INTRINSIC_Abs:
18184 #elif defined(_TARGET_ARM64_)
18185 switch (intrinsicId)
18187 case CORINFO_INTRINSIC_Sqrt:
18188 case CORINFO_INTRINSIC_Abs:
18189 case CORINFO_INTRINSIC_Round:
18195 #elif defined(_TARGET_ARM_)
18196 switch (intrinsicId)
18198 case CORINFO_INTRINSIC_Sqrt:
18199 case CORINFO_INTRINSIC_Abs:
18200 case CORINFO_INTRINSIC_Round:
18206 #elif defined(_TARGET_X86_)
18207 switch (intrinsicId)
18209 case CORINFO_INTRINSIC_Sin:
18210 case CORINFO_INTRINSIC_Cos:
18211 case CORINFO_INTRINSIC_Sqrt:
18212 case CORINFO_INTRINSIC_Abs:
18213 case CORINFO_INTRINSIC_Round:
18220 // TODO: This portion of logic is not implemented for other arch.
18221 // The reason for returning true is that on all other arch the only intrinsic
18222 // enabled are target intrinsics.
18224 #endif //_TARGET_AMD64_
18227 /******************************************************************************/
18228 // Returns true if the given intrinsic will be implemented by calling System.Math
18231 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
18233 // Currently, if an math intrisic is not implemented by target-specific
18234 // intructions, it will be implemented by a System.Math call. In the
18235 // future, if we turn to implementing some of them with helper callers,
18236 // this predicate needs to be revisited.
18237 return !IsTargetIntrinsic(intrinsicId);
18240 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
18242 switch (intrinsicId)
18244 case CORINFO_INTRINSIC_Sin:
18245 case CORINFO_INTRINSIC_Sqrt:
18246 case CORINFO_INTRINSIC_Abs:
18247 case CORINFO_INTRINSIC_Cos:
18248 case CORINFO_INTRINSIC_Round:
18249 case CORINFO_INTRINSIC_Cosh:
18250 case CORINFO_INTRINSIC_Sinh:
18251 case CORINFO_INTRINSIC_Tan:
18252 case CORINFO_INTRINSIC_Tanh:
18253 case CORINFO_INTRINSIC_Asin:
18254 case CORINFO_INTRINSIC_Acos:
18255 case CORINFO_INTRINSIC_Atan:
18256 case CORINFO_INTRINSIC_Atan2:
18257 case CORINFO_INTRINSIC_Log10:
18258 case CORINFO_INTRINSIC_Pow:
18259 case CORINFO_INTRINSIC_Exp:
18260 case CORINFO_INTRINSIC_Ceiling:
18261 case CORINFO_INTRINSIC_Floor:
18268 bool Compiler::IsMathIntrinsic(GenTreePtr tree)
18270 return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
18272 /*****************************************************************************/