1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
10 XX Imports the given method and converts it to semantic trees XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
23 #define Verify(cond, msg) \
28 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
32 #define VerifyOrReturn(cond, msg) \
37 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
42 #define VerifyOrReturnSpeculative(cond, msg, speculative) \
56 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
62 /*****************************************************************************/
64 void Compiler::impInit()
68 impTreeList = nullptr;
69 impTreeLast = nullptr;
70 impInlinedCodeSize = 0;
74 /*****************************************************************************
76 * Pushes the given tree on the stack.
79 void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
81 /* Check for overflow. If inlining, we may be using a bigger stack */
83 if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84 (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
86 BADCODE("stack overflow");
90 // If we are pushing a struct, make certain we know the precise type!
91 if (tree->TypeGet() == TYP_STRUCT)
93 assert(ti.IsType(TI_STRUCT));
94 CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95 assert(clsHnd != NO_CLASS_HANDLE);
98 if (tiVerificationNeeded && !ti.IsDead())
100 assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
102 // The ti type is consistent with the tree type.
105 // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106 // In the verification type system, we always transform "native int" to "TI_INT".
107 // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108 // attempts to do that have proved too difficult. Instead, we'll assume that in checks like this,
109 // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110 // method used in the last disjunct allows exactly this mismatch.
111 assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112 ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113 ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114 ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115 typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116 NormaliseForStack(typeInfo(tree->TypeGet()))));
118 // If it is a struct type, make certain we normalized the primitive types
119 assert(!ti.IsType(TI_STRUCT) ||
120 info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
124 if (VERBOSE && tiVerificationNeeded)
127 printf(TI_DUMP_PADDING);
128 printf("About to push to stack: ");
131 #endif // VERBOSE_VERIFY
135 verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136 verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
138 if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
142 else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
144 compFloatingPointUsed = true;
148 /******************************************************************************/
149 // used in the inliner, where we can assume typesafe code. please don't use in the importer!!
150 inline void Compiler::impPushOnStackNoType(GenTreePtr tree)
152 assert(verCurrentState.esStackDepth < impStkSize);
153 INDEBUG(verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = typeInfo());
154 verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
156 if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
160 else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
162 compFloatingPointUsed = true;
166 inline void Compiler::impPushNullObjRefOnStack()
168 impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
171 // This method gets called when we run into unverifiable code
172 // (and we are verifying the method)
174 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
175 DEBUGARG(unsigned line))
177 // Remember that the code is not verifiable
178 // Note that the method may yet pass canSkipMethodVerification(),
179 // and so the presence of unverifiable code may not be an issue.
180 tiIsVerifiableCode = FALSE;
183 const char* tail = strrchr(file, '\\');
189 if (JitConfig.JitBreakOnUnsafeCode())
191 assert(!"Unsafe code detected");
195 JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
196 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
198 if (verNeedsVerification() || compIsForImportOnly())
200 JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
201 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
202 verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
206 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
207 DEBUGARG(unsigned line))
209 JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
210 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
213 // BreakIfDebuggerPresent();
214 if (getBreakOnBadCode())
216 assert(!"Typechecking error");
220 RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
224 // helper function that will tell us if the IL instruction at the addr passed
225 // by param consumes an address at the top of the stack. We use it to save
227 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
229 assert(!compIsForInlining());
233 opcode = (OPCODE)getU1LittleEndian(codeAddr);
237 // case CEE_LDFLDA: We're taking this one out as if you have a sequence
243 // of a primitivelike struct, you end up after morphing with addr of a local
244 // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
245 // for structs that contain other structs, which isnt a case we handle very
246 // well now for other reasons.
250 // We won't collapse small fields. This is probably not the right place to have this
251 // check, but we're only using the function for this purpose, and is easy to factor
252 // out if we need to do so.
254 CORINFO_RESOLVED_TOKEN resolvedToken;
255 impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
257 CORINFO_CLASS_HANDLE clsHnd;
258 var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
260 // Preserve 'small' int types
261 if (lclTyp > TYP_INT)
263 lclTyp = genActualType(lclTyp);
266 if (varTypeIsSmall(lclTyp))
280 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
282 pResolvedToken->tokenContext = impTokenLookupContextHandle;
283 pResolvedToken->tokenScope = info.compScopeHnd;
284 pResolvedToken->token = getU4LittleEndian(addr);
285 pResolvedToken->tokenType = kind;
287 if (!tiVerificationNeeded)
289 info.compCompHnd->resolveToken(pResolvedToken);
293 Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
297 /*****************************************************************************
299 * Pop one tree from the stack.
302 StackEntry Compiler::impPopStack()
304 if (verCurrentState.esStackDepth == 0)
306 BADCODE("stack underflow");
311 if (VERBOSE && tiVerificationNeeded)
314 printf(TI_DUMP_PADDING);
315 printf("About to pop from the stack: ");
316 const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
319 #endif // VERBOSE_VERIFY
322 return verCurrentState.esStack[--verCurrentState.esStackDepth];
325 StackEntry Compiler::impPopStack(CORINFO_CLASS_HANDLE& structType)
327 StackEntry ret = impPopStack();
328 structType = verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo.GetClassHandle();
332 GenTreePtr Compiler::impPopStack(typeInfo& ti)
334 StackEntry ret = impPopStack();
339 /*****************************************************************************
341 * Peep at n'th (0-based) tree on the top of the stack.
344 StackEntry& Compiler::impStackTop(unsigned n)
346 if (verCurrentState.esStackDepth <= n)
348 BADCODE("stack underflow");
351 return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
353 /*****************************************************************************
354 * Some of the trees are spilled specially. While unspilling them, or
355 * making a copy, these need to be handled specially. The function
356 * enumerates the operators possible after spilling.
359 #ifdef DEBUG // only used in asserts
360 static bool impValidSpilledStackEntry(GenTreePtr tree)
362 if (tree->gtOper == GT_LCL_VAR)
367 if (tree->OperIsConst())
376 /*****************************************************************************
378 * The following logic is used to save/restore stack contents.
379 * If 'copy' is true, then we make a copy of the trees on the stack. These
380 * have to all be cloneable/spilled values.
383 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
385 savePtr->ssDepth = verCurrentState.esStackDepth;
387 if (verCurrentState.esStackDepth)
389 savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
390 size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
394 StackEntry* table = savePtr->ssTrees;
396 /* Make a fresh copy of all the stack entries */
398 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
400 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
401 GenTreePtr tree = verCurrentState.esStack[level].val;
403 assert(impValidSpilledStackEntry(tree));
405 switch (tree->gtOper)
412 table->val = gtCloneExpr(tree);
416 assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
423 memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
428 void Compiler::impRestoreStackState(SavedStack* savePtr)
430 verCurrentState.esStackDepth = savePtr->ssDepth;
432 if (verCurrentState.esStackDepth)
434 memcpy(verCurrentState.esStack, savePtr->ssTrees,
435 verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
439 /*****************************************************************************
441 * Get the tree list started for a new basic block.
443 inline void Compiler::impBeginTreeList()
445 assert(impTreeList == nullptr && impTreeLast == nullptr);
447 impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
450 /*****************************************************************************
452 * Store the given start and end stmt in the given basic block. This is
453 * mostly called by impEndTreeList(BasicBlock *block). It is called
454 * directly only for handling CEE_LEAVEs out of finally-protected try's.
457 inline void Compiler::impEndTreeList(BasicBlock* block, GenTreePtr firstStmt, GenTreePtr lastStmt)
459 assert(firstStmt->gtOper == GT_STMT);
460 assert(lastStmt->gtOper == GT_STMT);
462 /* Make the list circular, so that we can easily walk it backwards */
464 firstStmt->gtPrev = lastStmt;
466 /* Store the tree list in the basic block */
468 block->bbTreeList = firstStmt;
470 /* The block should not already be marked as imported */
471 assert((block->bbFlags & BBF_IMPORTED) == 0);
473 block->bbFlags |= BBF_IMPORTED;
476 /*****************************************************************************
478 * Store the current tree list in the given basic block.
481 inline void Compiler::impEndTreeList(BasicBlock* block)
483 assert(impTreeList->gtOper == GT_BEG_STMTS);
485 GenTreePtr firstTree = impTreeList->gtNext;
489 /* The block should not already be marked as imported */
490 assert((block->bbFlags & BBF_IMPORTED) == 0);
492 // Empty block. Just mark it as imported
493 block->bbFlags |= BBF_IMPORTED;
497 // Ignore the GT_BEG_STMTS
498 assert(firstTree->gtPrev == impTreeList);
500 impEndTreeList(block, firstTree, impTreeLast);
504 if (impLastILoffsStmt != nullptr)
506 impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
507 impLastILoffsStmt = nullptr;
510 impTreeList = impTreeLast = nullptr;
514 /*****************************************************************************
516 * Check that storing the given tree doesnt mess up the semantic order. Note
517 * that this has only limited value as we can only check [0..chkLevel).
520 inline void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel)
525 assert(stmt->gtOper == GT_STMT);
527 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
529 chkLevel = verCurrentState.esStackDepth;
532 if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
537 GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
539 // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
541 if (tree->gtFlags & GTF_CALL)
543 for (unsigned level = 0; level < chkLevel; level++)
545 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
549 if (tree->gtOper == GT_ASG)
551 // For an assignment to a local variable, all references of that
552 // variable have to be spilled. If it is aliased, all calls and
553 // indirect accesses have to be spilled
555 if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
557 unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
558 for (unsigned level = 0; level < chkLevel; level++)
560 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
561 assert(!lvaTable[lclNum].lvAddrExposed ||
562 (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
566 // If the access may be to global memory, all side effects have to be spilled.
568 else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
570 for (unsigned level = 0; level < chkLevel; level++)
572 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
579 /*****************************************************************************
581 * Append the given GT_STMT node to the current block's tree list.
582 * [0..chkLevel) is the portion of the stack which we will check for
583 * interference with stmt and spill if needed.
586 inline void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
588 assert(stmt->gtOper == GT_STMT);
589 noway_assert(impTreeLast != nullptr);
591 /* If the statement being appended has any side-effects, check the stack
592 to see if anything needs to be spilled to preserve correct ordering. */
594 GenTreePtr expr = stmt->gtStmt.gtStmtExpr;
595 unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
597 // Assignment to (unaliased) locals don't count as a side-effect as
598 // we handle them specially using impSpillLclRefs(). Temp locals should
601 if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
602 !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
604 unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
605 assert(flags == (op2Flags | GTF_ASG));
609 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
611 chkLevel = verCurrentState.esStackDepth;
614 if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
616 assert(chkLevel <= verCurrentState.esStackDepth);
620 // If there is a call, we have to spill global refs
621 bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
623 if (expr->gtOper == GT_ASG)
625 GenTree* lhs = expr->gtGetOp1();
626 // If we are assigning to a global ref, we have to spill global refs on stack.
627 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
628 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
629 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
630 if (!expr->OperIsBlkOp())
632 // If we are assigning to a global ref, we have to spill global refs on stack
633 if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
635 spillGlobEffects = true;
638 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
639 ((lhs->OperGet() == GT_LCL_VAR) &&
640 (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
642 spillGlobEffects = true;
646 impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
650 impSpillSpecialSideEff();
654 impAppendStmtCheck(stmt, chkLevel);
656 /* Point 'prev' at the previous node, so that we can walk backwards */
658 stmt->gtPrev = impTreeLast;
660 /* Append the expression statement to the list */
662 impTreeLast->gtNext = stmt;
666 impMarkContiguousSIMDFieldAssignments(stmt);
669 /* Once we set impCurStmtOffs in an appended tree, we are ready to
670 report the following offsets. So reset impCurStmtOffs */
672 if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
674 impCurStmtOffsSet(BAD_IL_OFFSET);
678 if (impLastILoffsStmt == nullptr)
680 impLastILoffsStmt = stmt;
691 /*****************************************************************************
693 * Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
696 inline void Compiler::impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore)
698 assert(stmt->gtOper == GT_STMT);
699 assert(stmtBefore->gtOper == GT_STMT);
701 GenTreePtr stmtPrev = stmtBefore->gtPrev;
702 stmt->gtPrev = stmtPrev;
703 stmt->gtNext = stmtBefore;
704 stmtPrev->gtNext = stmt;
705 stmtBefore->gtPrev = stmt;
708 /*****************************************************************************
710 * Append the given expression tree to the current block's tree list.
711 * Return the newly created statement.
714 GenTreePtr Compiler::impAppendTree(GenTreePtr tree, unsigned chkLevel, IL_OFFSETX offset)
718 /* Allocate an 'expression statement' node */
720 GenTreePtr expr = gtNewStmt(tree, offset);
722 /* Append the statement to the current block's stmt list */
724 impAppendStmt(expr, chkLevel);
729 /*****************************************************************************
731 * Insert the given exression tree before GT_STMT "stmtBefore"
734 void Compiler::impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore)
736 assert(stmtBefore->gtOper == GT_STMT);
738 /* Allocate an 'expression statement' node */
740 GenTreePtr expr = gtNewStmt(tree, offset);
742 /* Append the statement to the current block's stmt list */
744 impInsertStmtBefore(expr, stmtBefore);
747 /*****************************************************************************
749 * Append an assignment of the given value to a temp to the current tree list.
750 * curLevel is the stack level for which the spill to the temp is being done.
753 void Compiler::impAssignTempGen(unsigned tmp,
756 GenTreePtr* pAfterStmt, /* = NULL */
757 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
758 BasicBlock* block /* = NULL */
761 GenTreePtr asg = gtNewTempAssign(tmp, val);
763 if (!asg->IsNothingNode())
767 GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
768 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
772 impAppendTree(asg, curLevel, impCurStmtOffs);
777 /*****************************************************************************
778 * same as above, but handle the valueclass case too
781 void Compiler::impAssignTempGen(unsigned tmpNum,
783 CORINFO_CLASS_HANDLE structType,
785 GenTreePtr* pAfterStmt, /* = NULL */
786 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
787 BasicBlock* block /* = NULL */
792 if (varTypeIsStruct(val))
794 assert(tmpNum < lvaCount);
795 assert(structType != NO_CLASS_HANDLE);
797 // if the method is non-verifiable the assert is not true
798 // so at least ignore it in the case when verification is turned on
799 // since any block that tries to use the temp would have failed verification.
800 var_types varType = lvaTable[tmpNum].lvType;
801 assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
802 lvaSetStruct(tmpNum, structType, false);
804 // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
805 // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
806 // that has been passed in for the value being assigned to the temp, in which case we
807 // need to set 'val' to that same type.
808 // Note also that if we always normalized the types of any node that might be a struct
809 // type, this would not be necessary - but that requires additional JIT/EE interface
810 // calls that may not actually be required - e.g. if we only access a field of a struct.
812 val->gtType = lvaTable[tmpNum].lvType;
814 GenTreePtr dst = gtNewLclvNode(tmpNum, val->gtType);
815 asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
819 asg = gtNewTempAssign(tmpNum, val);
822 if (!asg->IsNothingNode())
826 GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
827 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
831 impAppendTree(asg, curLevel, impCurStmtOffs);
836 /*****************************************************************************
838 * Pop the given number of values from the stack and return a list node with
840 * The 'prefixTree' argument may optionally contain an argument
841 * list that is prepended to the list returned from this function.
843 * The notion of prepended is a bit misleading in that the list is backwards
844 * from the way I would expect: The first element popped is at the end of
845 * the returned list, and prefixTree is 'before' that, meaning closer to
846 * the end of the list. To get to prefixTree, you have to walk to the
849 * For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
850 * such we reverse its meaning such that returnValue has a reversed
851 * prefixTree at the head of the list.
854 GenTreeArgList* Compiler::impPopList(unsigned count,
856 CORINFO_SIG_INFO* sig,
857 GenTreeArgList* prefixTree)
859 assert(sig == nullptr || count == sig->numArgs);
862 CORINFO_CLASS_HANDLE structType;
863 GenTreeArgList* treeList;
865 if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
871 treeList = prefixTree;
876 StackEntry se = impPopStack();
877 typeInfo ti = se.seTypeInfo;
878 GenTreePtr temp = se.val;
880 if (varTypeIsStruct(temp))
882 // Morph trees that aren't already OBJs or MKREFANY to be OBJs
883 assert(ti.IsType(TI_STRUCT));
884 structType = ti.GetClassHandleForValueClass();
885 temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
888 /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
889 flags |= temp->gtFlags;
890 treeList = gtNewListNode(temp, treeList);
897 if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
898 sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
900 // Make sure that all valuetypes (including enums) that we push are loaded.
901 // This is to guarantee that if a GC is triggerred from the prestub of this methods,
902 // all valuetypes in the method signature are already loaded.
903 // We need to be able to find the size of the valuetypes, but we cannot
904 // do a class-load from within GC.
905 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
908 CORINFO_ARG_LIST_HANDLE argLst = sig->args;
909 CORINFO_CLASS_HANDLE argClass;
910 CORINFO_CLASS_HANDLE argRealClass;
911 GenTreeArgList* args;
914 for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
916 PREFIX_ASSUME(args != nullptr);
918 CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
920 // insert implied casts (from float to double or double to float)
922 if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
924 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
926 else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
928 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
931 // insert any widening or narrowing casts for backwards compatibility
933 args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
935 if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
936 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
938 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
939 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
941 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
943 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
945 args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
948 // Make sure that all valuetypes (including enums) that we push are loaded.
949 // This is to guarantee that if a GC is triggered from the prestub of this methods,
950 // all valuetypes in the method signature are already loaded.
951 // We need to be able to find the size of the valuetypes, but we cannot
952 // do a class-load from within GC.
953 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
956 argLst = info.compCompHnd->getArgNext(argLst);
960 if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
962 // Prepend the prefixTree
964 // Simple in-place reversal to place treeList
965 // at the end of a reversed prefixTree
966 while (prefixTree != nullptr)
968 GenTreeArgList* next = prefixTree->Rest();
969 prefixTree->Rest() = treeList;
970 treeList = prefixTree;
977 /*****************************************************************************
979 * Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
980 * The first "skipReverseCount" items are not reversed.
983 GenTreeArgList* Compiler::impPopRevList(unsigned count,
985 CORINFO_SIG_INFO* sig,
986 unsigned skipReverseCount)
989 assert(skipReverseCount <= count);
991 GenTreeArgList* list = impPopList(count, flagsPtr, sig);
994 if (list == nullptr || skipReverseCount == count)
999 GenTreeArgList* ptr = nullptr; // Initialized to the first node that needs to be reversed
1000 GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
1002 if (skipReverseCount == 0)
1008 lastSkipNode = list;
1009 // Get to the first node that needs to be reversed
1010 for (unsigned i = 0; i < skipReverseCount - 1; i++)
1012 lastSkipNode = lastSkipNode->Rest();
1015 PREFIX_ASSUME(lastSkipNode != nullptr);
1016 ptr = lastSkipNode->Rest();
1019 GenTreeArgList* reversedList = nullptr;
1023 GenTreeArgList* tmp = ptr->Rest();
1024 ptr->Rest() = reversedList;
1027 } while (ptr != nullptr);
1029 if (skipReverseCount)
1031 lastSkipNode->Rest() = reversedList;
1036 return reversedList;
1040 /*****************************************************************************
1041 Assign (copy) the structure from 'src' to 'dest'. The structure is a value
1042 class of type 'clsHnd'. It returns the tree that should be appended to the
1043 statement list that represents the assignment.
1044 Temp assignments may be appended to impTreeList if spilling is necessary.
1045 curLevel is the stack level for which a spill may be being done.
1048 GenTreePtr Compiler::impAssignStruct(GenTreePtr dest,
1050 CORINFO_CLASS_HANDLE structHnd,
1052 GenTreePtr* pAfterStmt, /* = NULL */
1053 BasicBlock* block /* = NULL */
1056 assert(varTypeIsStruct(dest));
1058 while (dest->gtOper == GT_COMMA)
1060 assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1062 // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1065 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1069 impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1072 // set dest to the second thing
1073 dest = dest->gtOp.gtOp2;
1076 assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1077 dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1079 if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1080 src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1083 return gtNewNothingNode();
1086 // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1087 // or re-creating a Blk node if it is.
1088 GenTreePtr destAddr;
1090 if (dest->gtOper == GT_IND || dest->OperIsBlk())
1092 destAddr = dest->gtOp.gtOp1;
1096 destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1099 return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1102 /*****************************************************************************/
1104 GenTreePtr Compiler::impAssignStructPtr(GenTreePtr destAddr,
1106 CORINFO_CLASS_HANDLE structHnd,
1108 GenTreePtr* pAfterStmt, /* = NULL */
1109 BasicBlock* block /* = NULL */
1113 GenTreePtr dest = nullptr;
1114 unsigned destFlags = 0;
1116 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1117 assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1118 // TODO-ARM-BUG: Does ARM need this?
1119 // TODO-ARM64-BUG: Does ARM64 need this?
1120 assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1121 src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1122 src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1123 (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1124 #else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1125 assert(varTypeIsStruct(src));
1127 assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1128 src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1129 src->gtOper == GT_COMMA ||
1130 (src->TypeGet() != TYP_STRUCT && (GenTree::OperIsSIMD(src->gtOper) || src->gtOper == GT_LCL_FLD)));
1131 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1132 if (destAddr->OperGet() == GT_ADDR)
1134 GenTree* destNode = destAddr->gtGetOp1();
1135 // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1136 // will be morphed, don't insert an OBJ(ADDR).
1137 if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1138 #ifndef LEGACY_BACKEND
1139 || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1140 #endif // !LEGACY_BACKEND
1145 destType = destNode->TypeGet();
1149 destType = src->TypeGet();
1152 var_types asgType = src->TypeGet();
1154 if (src->gtOper == GT_CALL)
1156 if (src->AsCall()->TreatAsHasRetBufArg(this))
1158 // Case of call returning a struct via hidden retbuf arg
1160 // insert the return value buffer into the argument list as first byref parameter
1161 src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1163 // now returns void, not a struct
1164 src->gtType = TYP_VOID;
1166 // return the morphed call node
1171 // Case of call returning a struct in one or more registers.
1173 var_types returnType = (var_types)src->gtCall.gtReturnType;
1175 // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1176 src->gtType = genActualType(returnType);
1178 // First we try to change this to "LclVar/LclFld = call"
1180 if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1182 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1183 // That is, the IR will be of the form lclVar = call for multi-reg return
1185 GenTreePtr lcl = destAddr->gtOp.gtOp1;
1186 if (src->AsCall()->HasMultiRegRetVal())
1188 // Mark the struct LclVar as used in a MultiReg return context
1189 // which currently makes it non promotable.
1190 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1191 // handle multireg returns.
1192 lcl->gtFlags |= GTF_DONT_CSE;
1193 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1195 else // The call result is not a multireg return
1197 // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1198 lcl->ChangeOper(GT_LCL_FLD);
1199 fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1202 lcl->gtType = src->gtType;
1203 asgType = src->gtType;
1206 #if defined(_TARGET_ARM_)
1207 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1208 // but that method has not been updadted to include ARM.
1209 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1210 lcl->gtFlags |= GTF_DONT_CSE;
1211 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1212 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1213 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1215 // Make the struct non promotable. The eightbytes could contain multiple fields.
1216 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1217 // handle multireg returns.
1218 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1219 // non-multireg returns.
1220 lcl->gtFlags |= GTF_DONT_CSE;
1221 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1224 else // we don't have a GT_ADDR of a GT_LCL_VAR
1226 // !!! The destination could be on stack. !!!
1227 // This flag will let us choose the correct write barrier.
1228 asgType = returnType;
1229 destFlags = GTF_IND_TGTANYWHERE;
1233 else if (src->gtOper == GT_RET_EXPR)
1235 GenTreePtr call = src->gtRetExpr.gtInlineCandidate;
1236 noway_assert(call->gtOper == GT_CALL);
1238 if (call->AsCall()->HasRetBufArg())
1240 // insert the return value buffer into the argument list as first byref parameter
1241 call->gtCall.gtCallArgs = gtNewListNode(destAddr, call->gtCall.gtCallArgs);
1243 // now returns void, not a struct
1244 src->gtType = TYP_VOID;
1245 call->gtType = TYP_VOID;
1247 // We already have appended the write to 'dest' GT_CALL's args
1248 // So now we just return an empty node (pruning the GT_RET_EXPR)
1253 // Case of inline method returning a struct in one or more registers.
1255 var_types returnType = (var_types)call->gtCall.gtReturnType;
1257 // We won't need a return buffer
1258 asgType = returnType;
1259 src->gtType = genActualType(returnType);
1260 call->gtType = src->gtType;
1262 // If we've changed the type, and it no longer matches a local destination,
1263 // we must use an indirection.
1264 if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1269 // !!! The destination could be on stack. !!!
1270 // This flag will let us choose the correct write barrier.
1271 destFlags = GTF_IND_TGTANYWHERE;
1274 else if (src->OperIsBlk())
1276 asgType = impNormStructType(structHnd);
1277 if (src->gtOper == GT_OBJ)
1279 assert(src->gtObj.gtClass == structHnd);
1282 else if (src->gtOper == GT_INDEX)
1284 asgType = impNormStructType(structHnd);
1285 assert(src->gtIndex.gtStructElemClass == structHnd);
1287 else if (src->gtOper == GT_MKREFANY)
1289 // Since we are assigning the result of a GT_MKREFANY,
1290 // "destAddr" must point to a refany.
1292 GenTreePtr destAddrClone;
1294 impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1296 assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1297 assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1298 GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1299 GenTreePtr ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1300 GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1301 typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1302 GenTreePtr typeSlot =
1303 gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1305 // append the assign of the pointer value
1306 GenTreePtr asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1309 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1313 impAppendTree(asg, curLevel, impCurStmtOffs);
1316 // return the assign of the type value, to be appended
1317 return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1319 else if (src->gtOper == GT_COMMA)
1321 // The second thing is the struct or its address.
1322 assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1325 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1329 impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1332 // Evaluate the second thing using recursion.
1333 return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1335 else if (src->IsLocal())
1337 asgType = src->TypeGet();
1339 else if (asgType == TYP_STRUCT)
1341 asgType = impNormStructType(structHnd);
1342 src->gtType = asgType;
1343 #ifdef LEGACY_BACKEND
1344 if (asgType == TYP_STRUCT)
1346 GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1347 src = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1351 if (dest == nullptr)
1353 // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1354 // if this is a known struct type.
1355 if (asgType == TYP_STRUCT)
1357 dest = gtNewObjNode(structHnd, destAddr);
1358 gtSetObjGcInfo(dest->AsObj());
1359 // Although an obj as a call argument was always assumed to be a globRef
1360 // (which is itself overly conservative), that is not true of the operands
1361 // of a block assignment.
1362 dest->gtFlags &= ~GTF_GLOB_REF;
1363 dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1365 else if (varTypeIsStruct(asgType))
1367 dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1371 dest = gtNewOperNode(GT_IND, asgType, destAddr);
1376 dest->gtType = asgType;
1379 dest->gtFlags |= destFlags;
1380 destFlags = dest->gtFlags;
1382 // return an assignment node, to be appended
1383 GenTree* asgNode = gtNewAssignNode(dest, src);
1384 gtBlockOpInit(asgNode, dest, src, false);
1386 // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1388 if ((destFlags & GTF_DONT_CSE) == 0)
1390 dest->gtFlags &= ~(GTF_DONT_CSE);
1395 /*****************************************************************************
1396 Given a struct value, and the class handle for that structure, return
1397 the expression for the address for that structure value.
1399 willDeref - does the caller guarantee to dereference the pointer.
1402 GenTreePtr Compiler::impGetStructAddr(GenTreePtr structVal,
1403 CORINFO_CLASS_HANDLE structHnd,
1407 assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1409 var_types type = structVal->TypeGet();
1411 genTreeOps oper = structVal->gtOper;
1413 if (oper == GT_OBJ && willDeref)
1415 assert(structVal->gtObj.gtClass == structHnd);
1416 return (structVal->gtObj.Addr());
1418 else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
1420 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1422 impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1424 // The 'return value' is now the temp itself
1426 type = genActualType(lvaTable[tmpNum].TypeGet());
1427 GenTreePtr temp = gtNewLclvNode(tmpNum, type);
1428 temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1431 else if (oper == GT_COMMA)
1433 assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1435 GenTreePtr oldTreeLast = impTreeLast;
1436 structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1437 structVal->gtType = TYP_BYREF;
1439 if (oldTreeLast != impTreeLast)
1441 // Some temp assignment statement was placed on the statement list
1442 // for Op2, but that would be out of order with op1, so we need to
1443 // spill op1 onto the statement list after whatever was last
1444 // before we recursed on Op2 (i.e. before whatever Op2 appended).
1445 impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1446 structVal->gtOp.gtOp1 = gtNewNothingNode();
1452 return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1455 //------------------------------------------------------------------------
1456 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1457 // and optionally determine the GC layout of the struct.
1460 // structHnd - The class handle for the struct type of interest.
1461 // gcLayout - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1462 // into which the gcLayout will be written.
1463 // pNumGCVars - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1464 // which will be set to the number of GC fields in the struct.
1465 // pSimdBaseType - (optional, default nullptr) - if non-null, and the struct is a SIMD
1466 // type, set to the SIMD base type
1469 // The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1470 // The gcLayout will be returned using the pointers provided by the caller, if non-null.
1471 // It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1474 // The caller must set gcLayout to nullptr OR ensure that it is large enough
1475 // (see ICorStaticInfo::getClassGClayout in corinfo.h).
1478 // Normalizing the type involves examining the struct type to determine if it should
1479 // be modified to one that is handled specially by the JIT, possibly being a candidate
1480 // for full enregistration, e.g. TYP_SIMD16.
1482 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1484 unsigned* pNumGCVars,
1485 var_types* pSimdBaseType)
1487 assert(structHnd != NO_CLASS_HANDLE);
1489 const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1490 var_types structType = TYP_STRUCT;
1492 #ifdef FEATURE_CORECLR
1493 const bool hasGCPtrs = (structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0;
1495 // Desktop CLR won't report FLG_CONTAINS_GC_PTR for RefAnyClass - need to check explicitly.
1496 const bool isRefAny = (structHnd == impGetRefAnyClass());
1497 const bool hasGCPtrs = isRefAny || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0);
1501 // Check to see if this is a SIMD type.
1502 if (featureSIMD && !hasGCPtrs)
1504 unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1506 if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1508 unsigned int sizeBytes;
1509 var_types simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1510 if (simdBaseType != TYP_UNKNOWN)
1512 assert(sizeBytes == originalSize);
1513 structType = getSIMDTypeForSize(sizeBytes);
1514 if (pSimdBaseType != nullptr)
1516 *pSimdBaseType = simdBaseType;
1518 #ifdef _TARGET_AMD64_
1519 // Amd64: also indicate that we use floating point registers
1520 compFloatingPointUsed = true;
1525 #endif // FEATURE_SIMD
1527 // Fetch GC layout info if requested
1528 if (gcLayout != nullptr)
1530 unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1532 // Verify that the quick test up above via the class attributes gave a
1533 // safe view of the type's GCness.
1535 // Note there are cases where hasGCPtrs is true but getClassGClayout
1536 // does not report any gc fields.
1537 assert(hasGCPtrs || (numGCVars == 0));
1539 if (pNumGCVars != nullptr)
1541 *pNumGCVars = numGCVars;
1546 // Can't safely ask for number of GC pointers without also
1547 // asking for layout.
1548 assert(pNumGCVars == nullptr);
1554 //****************************************************************************
1555 // Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1556 // it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1558 GenTreePtr Compiler::impNormStructVal(GenTreePtr structVal,
1559 CORINFO_CLASS_HANDLE structHnd,
1561 bool forceNormalization /*=false*/)
1563 assert(forceNormalization || varTypeIsStruct(structVal));
1564 assert(structHnd != NO_CLASS_HANDLE);
1565 var_types structType = structVal->TypeGet();
1566 bool makeTemp = false;
1567 if (structType == TYP_STRUCT)
1569 structType = impNormStructType(structHnd);
1571 bool alreadyNormalized = false;
1572 GenTreeLclVarCommon* structLcl = nullptr;
1574 genTreeOps oper = structVal->OperGet();
1577 // GT_RETURN and GT_MKREFANY don't capture the handle.
1581 alreadyNormalized = true;
1585 structVal->gtCall.gtRetClsHnd = structHnd;
1590 structVal->gtRetExpr.gtRetClsHnd = structHnd;
1595 structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1599 // This will be transformed to an OBJ later.
1600 alreadyNormalized = true;
1601 structVal->gtIndex.gtStructElemClass = structHnd;
1602 structVal->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(structHnd);
1606 // Wrap it in a GT_OBJ.
1607 structVal->gtType = structType;
1608 structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1613 structLcl = structVal->AsLclVarCommon();
1614 // Wrap it in a GT_OBJ.
1615 structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1622 // These should already have the appropriate type.
1623 assert(structVal->gtType == structType);
1624 alreadyNormalized = true;
1628 assert(structVal->gtType == structType);
1629 structVal = gtNewObjNode(structHnd, structVal->gtGetOp1());
1630 alreadyNormalized = true;
1635 assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1637 #endif // FEATURE_SIMD
1641 // The second thing is the block node.
1642 GenTree* blockNode = structVal->gtOp.gtOp2;
1643 assert(blockNode->gtType == structType);
1644 // It had better be a block node - any others should not occur here.
1645 assert(blockNode->OperIsBlk());
1647 // Sink the GT_COMMA below the blockNode addr.
1648 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1649 assert(blockNodeAddr->gtType == TYP_BYREF);
1650 GenTree* commaNode = structVal;
1651 commaNode->gtType = TYP_BYREF;
1652 commaNode->gtOp.gtOp2 = blockNodeAddr;
1653 blockNode->gtOp.gtOp1 = commaNode;
1654 structVal = blockNode;
1655 alreadyNormalized = true;
1660 assert(!"Unexpected node in impNormStructVal()");
1663 structVal->gtType = structType;
1664 GenTree* structObj = structVal;
1666 if (!alreadyNormalized || forceNormalization)
1670 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1672 impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1674 // The structVal is now the temp itself
1676 structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1677 // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1678 structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1680 else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1682 // Wrap it in a GT_OBJ
1683 structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1687 if (structLcl != nullptr)
1689 // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1690 // so we don't set GTF_EXCEPT here.
1691 if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1693 structObj->gtFlags &= ~GTF_GLOB_REF;
1698 // In general a OBJ is an indirection and could raise an exception.
1699 structObj->gtFlags |= GTF_EXCEPT;
1704 /******************************************************************************/
1705 // Given a type token, generate code that will evaluate to the correct
1706 // handle representation of that token (type handle, field handle, or method handle)
1708 // For most cases, the handle is determined at compile-time, and the code
1709 // generated is simply an embedded handle.
1711 // Run-time lookup is required if the enclosing method is shared between instantiations
1712 // and the token refers to formal type parameters whose instantiation is not known
1715 GenTreePtr Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1716 BOOL* pRuntimeLookup /* = NULL */,
1717 BOOL mustRestoreHandle /* = FALSE */,
1718 BOOL importParent /* = FALSE */)
1720 assert(!fgGlobalMorph);
1722 CORINFO_GENERICHANDLE_RESULT embedInfo;
1723 info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1727 *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1730 if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1732 switch (embedInfo.handleType)
1734 case CORINFO_HANDLETYPE_CLASS:
1735 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1738 case CORINFO_HANDLETYPE_METHOD:
1739 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1742 case CORINFO_HANDLETYPE_FIELD:
1743 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1744 info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1752 return impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1753 embedInfo.compileTimeHandle);
1756 GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1757 CORINFO_LOOKUP* pLookup,
1758 unsigned handleFlags,
1759 void* compileTimeHandle)
1761 if (!pLookup->lookupKind.needsRuntimeLookup)
1763 // No runtime lookup is required.
1764 // Access is direct or memory-indirect (of a fixed address) reference
1766 CORINFO_GENERIC_HANDLE handle = nullptr;
1767 void* pIndirection = nullptr;
1768 assert(pLookup->constLookup.accessType != IAT_PPVALUE);
1770 if (pLookup->constLookup.accessType == IAT_VALUE)
1772 handle = pLookup->constLookup.handle;
1774 else if (pLookup->constLookup.accessType == IAT_PVALUE)
1776 pIndirection = pLookup->constLookup.addr;
1778 return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1780 else if (compIsForInlining())
1782 // Don't import runtime lookups when inlining
1783 // Inlining has to be aborted in such a case
1784 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1789 // Need to use dictionary-based access which depends on the typeContext
1790 // which is only available at runtime, not at compile-time.
1792 return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1796 #ifdef FEATURE_READYTORUN_COMPILER
1797 GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1798 unsigned handleFlags,
1799 void* compileTimeHandle)
1801 CORINFO_GENERIC_HANDLE handle = nullptr;
1802 void* pIndirection = nullptr;
1803 assert(pLookup->accessType != IAT_PPVALUE);
1805 if (pLookup->accessType == IAT_VALUE)
1807 handle = pLookup->handle;
1809 else if (pLookup->accessType == IAT_PVALUE)
1811 pIndirection = pLookup->addr;
1813 return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, 0, nullptr, compileTimeHandle);
1816 GenTreePtr Compiler::impReadyToRunHelperToTree(
1817 CORINFO_RESOLVED_TOKEN* pResolvedToken,
1818 CorInfoHelpFunc helper,
1820 GenTreeArgList* args /* =NULL*/,
1821 CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */)
1823 CORINFO_CONST_LOOKUP lookup;
1824 #if COR_JIT_EE_VERSION > 460
1825 if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1830 info.compCompHnd->getReadyToRunHelper(pResolvedToken, helper, &lookup);
1833 GenTreePtr op1 = gtNewHelperCallNode(helper, type, GTF_EXCEPT, args);
1835 op1->gtCall.setEntryPoint(lookup);
1841 GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1843 GenTreePtr op1 = nullptr;
1845 switch (pCallInfo->kind)
1848 op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1850 #ifdef FEATURE_READYTORUN_COMPILER
1851 if (opts.IsReadyToRun())
1853 op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
1854 op1->gtFptrVal.gtLdftnResolvedToken = new (this, CMK_Unknown) CORINFO_RESOLVED_TOKEN;
1855 *op1->gtFptrVal.gtLdftnResolvedToken = *pResolvedToken;
1859 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1864 case CORINFO_CALL_CODE_POINTER:
1865 if (compIsForInlining())
1867 // Don't import runtime lookups when inlining
1868 // Inlining has to be aborted in such a case
1869 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1873 op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1877 noway_assert(!"unknown call kind");
1884 //------------------------------------------------------------------------
1885 // getRuntimeContextTree: find pointer to context for runtime lookup.
1888 // kind - lookup kind.
1891 // Return GenTree pointer to generic shared context.
1894 // Reports about generic context using.
1896 GenTreePtr Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1898 GenTreePtr ctxTree = nullptr;
1900 // Collectible types requires that for shared generic code, if we use the generic context parameter
1901 // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1902 // context parameter is this that we don't need the eager reporting logic.)
1903 lvaGenericsContextUsed = true;
1905 if (kind == CORINFO_LOOKUP_THISOBJ)
1908 ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1910 // Vtable pointer of this object
1911 ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1912 ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1913 ctxTree->gtFlags |= GTF_IND_INVARIANT;
1917 assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1919 ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1924 /*****************************************************************************/
1925 /* Import a dictionary lookup to access a handle in code shared between
1926 generic instantiations.
1927 The lookup depends on the typeContext which is only available at
1928 runtime, and not at compile-time.
1929 pLookup->token1 and pLookup->token2 specify the handle that is needed.
1932 1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1933 instantiation-specific handle, and the tokens to lookup the handle.
1934 2. pLookup->indirections != CORINFO_USEHELPER :
1935 2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1937 2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1938 If it is non-NULL, it is the handle required. Else, call a helper
1939 to lookup the handle.
1942 GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1943 CORINFO_LOOKUP* pLookup,
1944 void* compileTimeHandle)
1947 // This method can only be called from the importer instance of the Compiler.
1948 // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1949 assert(!compIsForInlining());
1951 GenTreePtr ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1953 #ifdef FEATURE_READYTORUN_COMPILER
1954 if (opts.IsReadyToRun())
1956 return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1957 gtNewArgList(ctxTree), &pLookup->lookupKind);
1961 CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1962 // It's available only via the run-time helper function
1963 if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1965 GenTreeArgList* helperArgs =
1966 gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0,
1967 nullptr, compileTimeHandle));
1969 return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
1973 GenTreePtr slotPtrTree = ctxTree;
1975 if (pRuntimeLookup->testForNull)
1977 slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
1978 nullptr DEBUGARG("impRuntimeLookup slot"));
1981 // Applied repeated indirections
1982 for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
1986 slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
1987 slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
1988 slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
1990 if (pRuntimeLookup->offsets[i] != 0)
1993 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
1997 // No null test required
1998 if (!pRuntimeLookup->testForNull)
2000 if (pRuntimeLookup->indirections == 0)
2005 slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2006 slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2008 if (!pRuntimeLookup->testForFixup)
2013 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2015 GenTreePtr op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2016 nullptr DEBUGARG("impRuntimeLookup test"));
2017 op1 = impImplicitIorI4Cast(op1, TYP_INT); // downcast the pointer to a TYP_INT on 64-bit targets
2019 // Use a GT_AND to check for the lowest bit and indirect if it is set
2020 GenTreePtr testTree = gtNewOperNode(GT_AND, TYP_INT, op1, gtNewIconNode(1));
2021 GenTreePtr relop = gtNewOperNode(GT_EQ, TYP_INT, testTree, gtNewIconNode(0));
2022 relop->gtFlags |= GTF_RELOP_QMARK;
2024 op1 = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2025 nullptr DEBUGARG("impRuntimeLookup indir"));
2026 op1 = gtNewOperNode(GT_ADD, TYP_I_IMPL, op1, gtNewIconNode(-1, TYP_I_IMPL)); // subtract 1 from the pointer
2027 GenTreePtr indirTree = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
2028 GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL, slotPtrTree, indirTree);
2030 GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2032 unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark0"));
2033 impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2034 return gtNewLclvNode(tmp, TYP_I_IMPL);
2037 assert(pRuntimeLookup->indirections != 0);
2039 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2041 // Extract the handle
2042 GenTreePtr handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2043 handle->gtFlags |= GTF_IND_NONFAULTING;
2045 GenTreePtr handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2046 nullptr DEBUGARG("impRuntimeLookup typehandle"));
2049 GenTreeArgList* helperArgs =
2050 gtNewArgList(ctxTree, gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, 0, nullptr,
2051 compileTimeHandle));
2052 GenTreePtr helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, GTF_EXCEPT, helperArgs);
2054 // Check for null and possibly call helper
2055 GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2056 relop->gtFlags |= GTF_RELOP_QMARK;
2058 GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2059 gtNewNothingNode(), // do nothing if nonnull
2062 GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2065 if (handleCopy->IsLocal())
2067 tmp = handleCopy->gtLclVarCommon.gtLclNum;
2071 tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2074 impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2075 return gtNewLclvNode(tmp, TYP_I_IMPL);
2078 /******************************************************************************
2079 * Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2080 * If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2081 * else, grab a new temp.
2082 * For structs (which can be pushed on the stack using obj, etc),
2083 * special handling is needed
2086 struct RecursiveGuard
2091 m_pAddress = nullptr;
2098 *m_pAddress = false;
2102 void Init(bool* pAddress, bool bInitialize)
2104 assert(pAddress && *pAddress == false && "Recursive guard violation");
2105 m_pAddress = pAddress;
2117 bool Compiler::impSpillStackEntry(unsigned level,
2121 bool bAssertOnRecursion,
2128 RecursiveGuard guard;
2129 guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2132 GenTreePtr tree = verCurrentState.esStack[level].val;
2134 /* Allocate a temp if we haven't been asked to use a particular one */
2136 if (tiVerificationNeeded)
2138 // Ignore bad temp requests (they will happen with bad code and will be
2139 // catched when importing the destblock)
2140 if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2147 if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2153 if (tnum == BAD_VAR_NUM)
2155 tnum = lvaGrabTemp(true DEBUGARG(reason));
2157 else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2159 // if verification is needed and tnum's type is incompatible with
2160 // type on that stack, we grab a new temp. This is safe since
2161 // we will throw a verification exception in the dest block.
2163 var_types valTyp = tree->TypeGet();
2164 var_types dstTyp = lvaTable[tnum].TypeGet();
2166 // if the two types are different, we return. This will only happen with bad code and will
2167 // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2168 if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2170 #ifndef _TARGET_64BIT_
2171 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2172 #endif // !_TARGET_64BIT_
2173 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2175 if (verNeedsVerification())
2182 /* Assign the spilled entry to the temp */
2183 impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2185 // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2186 var_types type = genActualType(lvaTable[tnum].TypeGet());
2187 GenTreePtr temp = gtNewLclvNode(tnum, type);
2188 verCurrentState.esStack[level].val = temp;
2193 /*****************************************************************************
2195 * Ensure that the stack has only spilled values
2198 void Compiler::impSpillStackEnsure(bool spillLeaves)
2200 assert(!spillLeaves || opts.compDbgCode);
2202 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2204 GenTreePtr tree = verCurrentState.esStack[level].val;
2206 if (!spillLeaves && tree->OperIsLeaf())
2211 // Temps introduced by the importer itself don't need to be spilled
2213 bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2220 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2224 void Compiler::impSpillEvalStack()
2226 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2228 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2232 /*****************************************************************************
2234 * If the stack contains any trees with side effects in them, assign those
2235 * trees to temps and append the assignments to the statement list.
2236 * On return the stack is guaranteed to be empty.
2239 inline void Compiler::impEvalSideEffects()
2241 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2242 verCurrentState.esStackDepth = 0;
2245 /*****************************************************************************
2247 * If the stack contains any trees with side effects in them, assign those
2248 * trees to temps and replace them on the stack with refs to their temps.
2249 * [0..chkLevel) is the portion of the stack which will be checked and spilled.
2252 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2254 assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2256 /* Before we make any appends to the tree list we must spill the
2257 * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2259 impSpillSpecialSideEff();
2261 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2263 chkLevel = verCurrentState.esStackDepth;
2266 assert(chkLevel <= verCurrentState.esStackDepth);
2268 unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2270 for (unsigned i = 0; i < chkLevel; i++)
2272 GenTreePtr tree = verCurrentState.esStack[i].val;
2274 GenTreePtr lclVarTree;
2276 if ((tree->gtFlags & spillFlags) != 0 ||
2277 (spillGlobEffects && // Only consider the following when spillGlobEffects == TRUE
2278 !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2279 gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2280 // lvAddrTaken flag.
2282 impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2287 /*****************************************************************************
2289 * If the stack contains any trees with special side effects in them, assign
2290 * those trees to temps and replace them on the stack with refs to their temps.
2293 inline void Compiler::impSpillSpecialSideEff()
2295 // Only exception objects need to be carefully handled
2297 if (!compCurBB->bbCatchTyp)
2302 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2304 GenTreePtr tree = verCurrentState.esStack[level].val;
2305 // Make sure if we have an exception object in the sub tree we spill ourselves.
2306 if (gtHasCatchArg(tree))
2308 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2313 /*****************************************************************************
2315 * Spill all stack references to value classes (TYP_STRUCT nodes)
2318 void Compiler::impSpillValueClasses()
2320 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2322 GenTreePtr tree = verCurrentState.esStack[level].val;
2324 if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2326 // Tree walk was aborted, which means that we found a
2327 // value class on the stack. Need to spill that
2330 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2335 /*****************************************************************************
2337 * Callback that checks if a tree node is TYP_STRUCT
2340 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr* pTree, fgWalkData* data)
2342 fgWalkResult walkResult = WALK_CONTINUE;
2344 if ((*pTree)->gtType == TYP_STRUCT)
2346 // Abort the walk and indicate that we found a value class
2348 walkResult = WALK_ABORT;
2354 /*****************************************************************************
2356 * If the stack contains any trees with references to local #lclNum, assign
2357 * those trees to temps and replace their place on the stack with refs to
2361 void Compiler::impSpillLclRefs(ssize_t lclNum)
2363 /* Before we make any appends to the tree list we must spill the
2364 * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2366 impSpillSpecialSideEff();
2368 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2370 GenTreePtr tree = verCurrentState.esStack[level].val;
2372 /* If the tree may throw an exception, and the block has a handler,
2373 then we need to spill assignments to the local if the local is
2374 live on entry to the handler.
2375 Just spill 'em all without considering the liveness */
2377 bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2379 /* Skip the tree if it doesn't have an affected reference,
2380 unless xcptnCaught */
2382 if (xcptnCaught || gtHasRef(tree, lclNum, false))
2384 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2389 /*****************************************************************************
2391 * Push catch arg onto the stack.
2392 * If there are jumps to the beginning of the handler, insert basic block
2393 * and spill catch arg to a temp. Update the handler block if necessary.
2395 * Returns the basic block of the actual handler.
2398 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd)
2400 // Do not inject the basic block twice on reimport. This should be
2401 // hit only under JIT stress. See if the block is the one we injected.
2402 // Note that EH canonicalization can inject internal blocks here. We might
2403 // be able to re-use such a block (but we don't, right now).
2404 if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2405 (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2407 GenTreePtr tree = hndBlk->bbTreeList;
2409 if (tree != nullptr && tree->gtOper == GT_STMT)
2411 tree = tree->gtStmt.gtStmtExpr;
2412 assert(tree != nullptr);
2414 if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2415 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2417 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2419 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2421 return hndBlk->bbNext;
2425 // If we get here, it must have been some other kind of internal block. It's possible that
2426 // someone prepended something to our injected block, but that's unlikely.
2429 /* Push the exception address value on the stack */
2430 GenTreePtr arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2432 /* Mark the node as having a side-effect - i.e. cannot be
2433 * moved around since it is tied to a fixed location (EAX) */
2434 arg->gtFlags |= GTF_ORDER_SIDEEFF;
2436 /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2437 if (hndBlk->bbRefs > 1 || compStressCompile(STRESS_CATCH_ARG, 5))
2439 if (hndBlk->bbRefs == 1)
2444 /* Create extra basic block for the spill */
2445 BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2446 newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2447 newBlk->setBBWeight(hndBlk->bbWeight);
2448 newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2450 /* Account for the new link we are about to create */
2453 /* Spill into a temp */
2454 unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2455 lvaTable[tempNum].lvType = TYP_REF;
2456 arg = gtNewTempAssign(tempNum, arg);
2458 hndBlk->bbStkTempsIn = tempNum;
2460 /* Report the debug info. impImportBlockCode won't treat
2461 * the actual handler as exception block and thus won't do it for us. */
2462 if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2464 impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2465 arg = gtNewStmt(arg, impCurStmtOffs);
2468 fgInsertStmtAtEnd(newBlk, arg);
2470 arg = gtNewLclvNode(tempNum, TYP_REF);
2473 impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2478 /*****************************************************************************
2480 * Given a tree, clone it. *pClone is set to the cloned tree.
2481 * Returns the original tree if the cloning was easy,
2482 * else returns the temp to which the tree had to be spilled to.
2483 * If the tree has side-effects, it will be spilled to a temp.
2486 GenTreePtr Compiler::impCloneExpr(GenTreePtr tree,
2488 CORINFO_CLASS_HANDLE structHnd,
2490 GenTreePtr* pAfterStmt DEBUGARG(const char* reason))
2492 if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2494 GenTreePtr clone = gtClone(tree, true);
2503 /* Store the operand in a temp and return the temp */
2505 unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2507 // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2508 // return a struct type. It also may modify the struct type to a more
2509 // specialized type (e.g. a SIMD type). So we will get the type from
2510 // the lclVar AFTER calling impAssignTempGen().
2512 impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2513 var_types type = genActualType(lvaTable[temp].TypeGet());
2515 *pClone = gtNewLclvNode(temp, type);
2516 return gtNewLclvNode(temp, type);
2519 /*****************************************************************************
2520 * Remember the IL offset (including stack-empty info) for the trees we will
2524 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2526 if (compIsForInlining())
2528 GenTreePtr callStmt = impInlineInfo->iciStmt;
2529 assert(callStmt->gtOper == GT_STMT);
2530 impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2534 assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2535 IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2536 impCurStmtOffs = offs | stkBit;
2540 /*****************************************************************************
2541 * Returns current IL offset with stack-empty and call-instruction info incorporated
2543 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2545 if (compIsForInlining())
2547 return BAD_IL_OFFSET;
2551 assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2552 IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2553 IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2554 return offs | stkBit | callInstructionBit;
2558 /*****************************************************************************
2560 * Remember the instr offset for the statements
2562 * When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2563 * impCurOpcOffs, if the append was done because of a partial stack spill,
2564 * as some of the trees corresponding to code up to impCurOpcOffs might
2565 * still be sitting on the stack.
2566 * So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2567 * This should be called when an opcode finally/explicitly causes
2568 * impAppendTree(tree) to be called (as opposed to being called because of
2569 * a spill caused by the opcode)
2574 void Compiler::impNoteLastILoffs()
2576 if (impLastILoffsStmt == nullptr)
2578 // We should have added a statement for the current basic block
2579 // Is this assert correct ?
2581 assert(impTreeLast);
2582 assert(impTreeLast->gtOper == GT_STMT);
2584 impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2588 impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2589 impLastILoffsStmt = nullptr;
2595 /*****************************************************************************
2596 * We don't create any GenTree (excluding spills) for a branch.
2597 * For debugging info, we need a placeholder so that we can note
2598 * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2601 void Compiler::impNoteBranchOffs()
2603 if (opts.compDbgCode)
2605 impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2609 /*****************************************************************************
2610 * Locate the next stmt boundary for which we need to record info.
2611 * We will have to spill the stack at such boundaries if it is not
2613 * Returns the next stmt boundary (after the start of the block)
2616 unsigned Compiler::impInitBlockLineInfo()
2618 /* Assume the block does not correspond with any IL offset. This prevents
2619 us from reporting extra offsets. Extra mappings can cause confusing
2620 stepping, especially if the extra mapping is a jump-target, and the
2621 debugger does not ignore extra mappings, but instead rewinds to the
2622 nearest known offset */
2624 impCurStmtOffsSet(BAD_IL_OFFSET);
2626 if (compIsForInlining())
2631 IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2633 if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2635 impCurStmtOffsSet(blockOffs);
2638 if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2640 impCurStmtOffsSet(blockOffs);
2643 /* Always report IL offset 0 or some tests get confused.
2644 Probably a good idea anyways */
2648 impCurStmtOffsSet(blockOffs);
2651 if (!info.compStmtOffsetsCount)
2656 /* Find the lowest explicit stmt boundary within the block */
2658 /* Start looking at an entry that is based on our instr offset */
2660 unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2662 if (index >= info.compStmtOffsetsCount)
2664 index = info.compStmtOffsetsCount - 1;
2667 /* If we've guessed too far, back up */
2669 while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2674 /* If we guessed short, advance ahead */
2676 while (info.compStmtOffsets[index] < blockOffs)
2680 if (index == info.compStmtOffsetsCount)
2682 return info.compStmtOffsetsCount;
2686 assert(index < info.compStmtOffsetsCount);
2688 if (info.compStmtOffsets[index] == blockOffs)
2690 /* There is an explicit boundary for the start of this basic block.
2691 So we will start with bbCodeOffs. Else we will wait until we
2692 get to the next explicit boundary */
2694 impCurStmtOffsSet(blockOffs);
2702 /*****************************************************************************/
2704 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2718 /*****************************************************************************/
2720 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2737 /*****************************************************************************/
2739 // One might think it is worth caching these values, but results indicate
2741 // In addition, caching them causes SuperPMI to be unable to completely
2742 // encapsulate an individual method context.
2743 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2745 CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2746 assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2750 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2752 CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2753 assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2754 return typeHandleClass;
2757 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2759 CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2760 assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2761 return argIteratorClass;
2764 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2766 CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2767 assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2771 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2773 CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2774 assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2778 /*****************************************************************************
2779 * "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2780 * set its type to TYP_BYREF when we create it. We know if it can be
2781 * changed to TYP_I_IMPL only at the point where we use it
2785 void Compiler::impBashVarAddrsToI(GenTreePtr tree1, GenTreePtr tree2)
2787 if (tree1->IsVarAddr())
2789 tree1->gtType = TYP_I_IMPL;
2792 if (tree2 && tree2->IsVarAddr())
2794 tree2->gtType = TYP_I_IMPL;
2798 /*****************************************************************************
2799 * TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2800 * to make that an explicit cast in our trees, so any implicit casts that
2801 * exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2802 * turned into explicit casts here.
2803 * We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2806 GenTreePtr Compiler::impImplicitIorI4Cast(GenTreePtr tree, var_types dstTyp)
2808 var_types currType = genActualType(tree->gtType);
2809 var_types wantedType = genActualType(dstTyp);
2811 if (wantedType != currType)
2813 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2814 if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2816 if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2818 tree->gtType = TYP_I_IMPL;
2821 #ifdef _TARGET_64BIT_
2822 else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2824 // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2825 tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
2827 else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2829 // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2830 tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
2832 #endif // _TARGET_64BIT_
2838 /*****************************************************************************
2839 * TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2840 * but we want to make that an explicit cast in our trees, so any implicit casts
2841 * that exist in the IL are turned into explicit casts here.
2844 GenTreePtr Compiler::impImplicitR4orR8Cast(GenTreePtr tree, var_types dstTyp)
2846 #ifndef LEGACY_BACKEND
2847 if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2849 tree = gtNewCastNode(dstTyp, tree, dstTyp);
2851 #endif // !LEGACY_BACKEND
2856 //------------------------------------------------------------------------
2857 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2858 // with a GT_COPYBLK node.
2861 // sig - The InitializeArray signature.
2864 // A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2865 // nullptr otherwise.
2868 // The function recognizes the following IL pattern:
2869 // ldc <length> or a list of ldc <lower bound>/<length>
2872 // ldtoken <field handle>
2873 // call InitializeArray
2874 // The lower bounds need not be constant except when the array rank is 1.
2875 // The function recognizes all kinds of arrays thus enabling a small runtime
2876 // such as CoreRT to skip providing an implementation for InitializeArray.
2878 GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2880 assert(sig->numArgs == 2);
2882 GenTreePtr fieldTokenNode = impStackTop(0).val;
2883 GenTreePtr arrayLocalNode = impStackTop(1).val;
2886 // Verify that the field token is known and valid. Note that It's also
2887 // possible for the token to come from reflection, in which case we cannot do
2888 // the optimization and must therefore revert to calling the helper. You can
2889 // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2892 // Check to see if the ldtoken helper call is what we see here.
2893 if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2894 (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2899 // Strip helper call away
2900 fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2902 if (fieldTokenNode->gtOper == GT_IND)
2904 fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2907 // Check for constant
2908 if (fieldTokenNode->gtOper != GT_CNS_INT)
2913 CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2914 if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2920 // We need to get the number of elements in the array and the size of each element.
2921 // We verify that the newarr statement is exactly what we expect it to be.
2922 // If it's not then we just return NULL and we don't optimize this call
2926 // It is possible the we don't have any statements in the block yet
2928 if (impTreeLast->gtOper != GT_STMT)
2930 assert(impTreeLast->gtOper == GT_BEG_STMTS);
2935 // We start by looking at the last statement, making sure it's an assignment, and
2936 // that the target of the assignment is the array passed to InitializeArray.
2938 GenTreePtr arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
2939 if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
2940 (arrayLocalNode->gtOper != GT_LCL_VAR) ||
2941 (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
2947 // Make sure that the object being assigned is a helper call.
2950 GenTreePtr newArrayCall = arrayAssignment->gtOp.gtOp2;
2951 if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
2957 // Verify that it is one of the new array helpers.
2960 bool isMDArray = false;
2962 if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
2963 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
2964 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
2965 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
2966 #ifdef FEATURE_READYTORUN_COMPILER
2967 && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
2971 #if COR_JIT_EE_VERSION > 460
2972 if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
2981 CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
2984 // Make sure we found a compile time handle to the array
2993 S_UINT32 numElements;
2997 rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3004 GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3005 assert(tokenArg != nullptr);
3006 GenTreeArgList* numArgsArg = tokenArg->Rest();
3007 assert(numArgsArg != nullptr);
3008 GenTreeArgList* argsArg = numArgsArg->Rest();
3009 assert(argsArg != nullptr);
3012 // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3013 // so at least one length must be present and the rank can't exceed 32 so there can
3014 // be at most 64 arguments - 32 lengths and 32 lower bounds.
3017 if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3018 (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3023 unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3024 bool lowerBoundsSpecified;
3026 if (numArgs == rank * 2)
3028 lowerBoundsSpecified = true;
3030 else if (numArgs == rank)
3032 lowerBoundsSpecified = false;
3035 // If the rank is 1 and a lower bound isn't specified then the runtime creates
3036 // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3037 // we get a SDArray as well, see the for loop below.
3051 // The rank is known to be at least 1 so we can start with numElements being 1
3052 // to avoid the need to special case the first dimension.
3055 numElements = S_UINT32(1);
3059 static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3061 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3062 IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3065 static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3067 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3068 (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3069 IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3072 static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3074 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3075 (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3078 static bool IsComma(GenTree* tree)
3080 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3084 unsigned argIndex = 0;
3087 for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3089 if (lowerBoundsSpecified)
3092 // In general lower bounds can be ignored because they're not needed to
3093 // calculate the total number of elements. But for single dimensional arrays
3094 // we need to know if the lower bound is 0 because in this case the runtime
3095 // creates a SDArray and this affects the way the array data offset is calculated.
3100 GenTree* lowerBoundAssign = comma->gtGetOp1();
3101 assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3102 GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3104 if (lowerBoundNode->IsIntegralConst(0))
3110 comma = comma->gtGetOp2();
3114 GenTree* lengthNodeAssign = comma->gtGetOp1();
3115 assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3116 GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3118 if (!lengthNode->IsCnsIntOrI())
3123 numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3127 assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3129 if (argIndex != numArgs)
3137 // Make sure there are exactly two arguments: the array class and
3138 // the number of elements.
3141 GenTreePtr arrayLengthNode;
3143 GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3144 #ifdef FEATURE_READYTORUN_COMPILER
3145 if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3147 // Array length is 1st argument for readytorun helper
3148 arrayLengthNode = args->Current();
3153 // Array length is 2nd argument for regular helper
3154 arrayLengthNode = args->Rest()->Current();
3158 // Make sure that the number of elements look valid.
3160 if (arrayLengthNode->gtOper != GT_CNS_INT)
3165 numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3167 if (!info.compCompHnd->isSDArray(arrayClsHnd))
3173 CORINFO_CLASS_HANDLE elemClsHnd;
3174 var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3177 // Note that genTypeSize will return zero for non primitive types, which is exactly
3178 // what we want (size will then be 0, and we will catch this in the conditional below).
3179 // Note that we don't expect this to fail for valid binaries, so we assert in the
3180 // non-verification case (the verification case should not assert but rather correctly
3181 // handle bad binaries). This assert is not guarding any specific invariant, but rather
3182 // saying that we don't expect this to happen, and if it is hit, we need to investigate
3186 S_UINT32 elemSize(genTypeSize(elementType));
3187 S_UINT32 size = elemSize * S_UINT32(numElements);
3189 if (size.IsOverflow())
3194 if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3196 assert(verNeedsVerification());
3200 void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3207 // At this point we are ready to commit to implementing the InitializeArray
3208 // intrinsic using a struct assignment. Pop the arguments from the stack and
3209 // return the struct assignment node.
3215 const unsigned blkSize = size.Value();
3220 unsigned dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3222 dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3226 dst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewIndexRef(elementType, arrayLocalNode, gtNewIconNode(0)));
3228 GenTreePtr blk = gtNewBlockVal(dst, blkSize);
3229 GenTreePtr srcAddr = gtNewIconHandleNode((size_t)initData, GTF_ICON_STATIC_HDL);
3230 GenTreePtr src = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
3232 return gtNewBlkOpNode(blk, // dst
3239 /*****************************************************************************/
3240 // Returns the GenTree that should be used to do the intrinsic instead of the call.
3241 // Returns NULL if an intrinsic cannot be used
3243 GenTreePtr Compiler::impIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
3244 CORINFO_METHOD_HANDLE method,
3245 CORINFO_SIG_INFO* sig,
3249 CorInfoIntrinsics* pIntrinsicID)
3251 bool mustExpand = false;
3252 #if COR_JIT_EE_VERSION > 460
3253 CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3255 CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method);
3257 *pIntrinsicID = intrinsicID;
3259 #ifndef _TARGET_ARM_
3260 genTreeOps interlockedOperator;
3263 if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3265 // must be done regardless of DbgCode and MinOpts
3266 return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3268 #ifdef _TARGET_64BIT_
3269 if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3271 // must be done regardless of DbgCode and MinOpts
3272 return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3275 assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3278 GenTreePtr retNode = nullptr;
3281 // We disable the inlining of instrinsics for MinOpts.
3283 if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3285 *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3289 // Currently we don't have CORINFO_INTRINSIC_Exp because it does not
3290 // seem to work properly for Infinity values, we don't do
3291 // CORINFO_INTRINSIC_Pow because it needs a Helper which we currently don't have
3293 var_types callType = JITtype2varType(sig->retType);
3295 /* First do the intrinsics which are always smaller than a call */
3297 switch (intrinsicID)
3299 GenTreePtr op1, op2;
3301 case CORINFO_INTRINSIC_Sin:
3302 case CORINFO_INTRINSIC_Sqrt:
3303 case CORINFO_INTRINSIC_Abs:
3304 case CORINFO_INTRINSIC_Cos:
3305 case CORINFO_INTRINSIC_Round:
3306 case CORINFO_INTRINSIC_Cosh:
3307 case CORINFO_INTRINSIC_Sinh:
3308 case CORINFO_INTRINSIC_Tan:
3309 case CORINFO_INTRINSIC_Tanh:
3310 case CORINFO_INTRINSIC_Asin:
3311 case CORINFO_INTRINSIC_Acos:
3312 case CORINFO_INTRINSIC_Atan:
3313 case CORINFO_INTRINSIC_Atan2:
3314 case CORINFO_INTRINSIC_Log10:
3315 case CORINFO_INTRINSIC_Pow:
3316 case CORINFO_INTRINSIC_Exp:
3317 case CORINFO_INTRINSIC_Ceiling:
3318 case CORINFO_INTRINSIC_Floor:
3320 // These are math intrinsics
3322 assert(callType != TYP_STRUCT);
3326 #if defined(LEGACY_BACKEND)
3327 if (IsTargetIntrinsic(intrinsicID))
3328 #elif !defined(_TARGET_X86_)
3329 // Intrinsics that are not implemented directly by target instructions will
3330 // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3331 // don't do this optimization, because
3332 // a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3333 // b) It will be non-trivial task or too late to re-materialize a surviving
3334 // tail prefixed GT_INTRINSIC as tail call in rationalizer.
3335 if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3337 // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3338 // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3339 // code generation for certain EH constructs.
3340 if (!IsIntrinsicImplementedByUserCall(intrinsicID))
3343 switch (sig->numArgs)
3346 op1 = impPopStack().val;
3348 #if FEATURE_X87_DOUBLES
3350 // X87 stack doesn't differentiate between float/double
3351 // so it doesn't need a cast, but everybody else does
3352 // Just double check it is at least a FP type
3353 noway_assert(varTypeIsFloating(op1));
3355 #else // FEATURE_X87_DOUBLES
3357 if (op1->TypeGet() != callType)
3359 op1 = gtNewCastNode(callType, op1, callType);
3362 #endif // FEATURE_X87_DOUBLES
3364 op1 = new (this, GT_INTRINSIC)
3365 GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3369 op2 = impPopStack().val;
3370 op1 = impPopStack().val;
3372 #if FEATURE_X87_DOUBLES
3374 // X87 stack doesn't differentiate between float/double
3375 // so it doesn't need a cast, but everybody else does
3376 // Just double check it is at least a FP type
3377 noway_assert(varTypeIsFloating(op2));
3378 noway_assert(varTypeIsFloating(op1));
3380 #else // FEATURE_X87_DOUBLES
3382 if (op2->TypeGet() != callType)
3384 op2 = gtNewCastNode(callType, op2, callType);
3386 if (op1->TypeGet() != callType)
3388 op1 = gtNewCastNode(callType, op1, callType);
3391 #endif // FEATURE_X87_DOUBLES
3393 op1 = new (this, GT_INTRINSIC)
3394 GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
3398 NO_WAY("Unsupported number of args for Math Instrinsic");
3401 #ifndef LEGACY_BACKEND
3402 if (IsIntrinsicImplementedByUserCall(intrinsicID))
3404 op1->gtFlags |= GTF_CALL;
3412 #ifdef _TARGET_XARCH_
3413 // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3414 case CORINFO_INTRINSIC_InterlockedAdd32:
3415 interlockedOperator = GT_LOCKADD;
3416 goto InterlockedBinOpCommon;
3417 case CORINFO_INTRINSIC_InterlockedXAdd32:
3418 interlockedOperator = GT_XADD;
3419 goto InterlockedBinOpCommon;
3420 case CORINFO_INTRINSIC_InterlockedXchg32:
3421 interlockedOperator = GT_XCHG;
3422 goto InterlockedBinOpCommon;
3424 #ifdef _TARGET_AMD64_
3425 case CORINFO_INTRINSIC_InterlockedAdd64:
3426 interlockedOperator = GT_LOCKADD;
3427 goto InterlockedBinOpCommon;
3428 case CORINFO_INTRINSIC_InterlockedXAdd64:
3429 interlockedOperator = GT_XADD;
3430 goto InterlockedBinOpCommon;
3431 case CORINFO_INTRINSIC_InterlockedXchg64:
3432 interlockedOperator = GT_XCHG;
3433 goto InterlockedBinOpCommon;
3434 #endif // _TARGET_AMD64_
3436 InterlockedBinOpCommon:
3437 assert(callType != TYP_STRUCT);
3438 assert(sig->numArgs == 2);
3440 op2 = impPopStack().val;
3441 op1 = impPopStack().val;
3447 // field (for example)
3449 // In the case where the first argument is the address of a local, we might
3450 // want to make this *not* make the var address-taken -- but atomic instructions
3451 // on a local are probably pretty useless anyway, so we probably don't care.
3453 op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3454 op1->gtFlags |= GTF_GLOB_EFFECT;
3457 #endif // _TARGET_XARCH_
3459 case CORINFO_INTRINSIC_MemoryBarrier:
3461 assert(sig->numArgs == 0);
3463 op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3464 op1->gtFlags |= GTF_GLOB_EFFECT;
3468 #ifdef _TARGET_XARCH_
3469 // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3470 case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3471 #ifdef _TARGET_AMD64_
3472 case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3475 assert(callType != TYP_STRUCT);
3476 assert(sig->numArgs == 3);
3479 op3 = impPopStack().val; // comparand
3480 op2 = impPopStack().val; // value
3481 op1 = impPopStack().val; // location
3483 GenTreePtr node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3485 node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3491 case CORINFO_INTRINSIC_StringLength:
3492 op1 = impPopStack().val;
3493 if (!opts.MinOpts() && !opts.compDbgCode)
3495 GenTreeArrLen* arrLen =
3496 new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3501 /* Create the expression "*(str_addr + stringLengthOffset)" */
3502 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3503 gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3504 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3509 case CORINFO_INTRINSIC_StringGetChar:
3510 op2 = impPopStack().val;
3511 op1 = impPopStack().val;
3512 op1 = gtNewIndexRef(TYP_CHAR, op1, op2);
3513 op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3517 case CORINFO_INTRINSIC_InitializeArray:
3518 retNode = impInitializeArrayIntrinsic(sig);
3521 case CORINFO_INTRINSIC_Array_Address:
3522 case CORINFO_INTRINSIC_Array_Get:
3523 case CORINFO_INTRINSIC_Array_Set:
3524 retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3527 case CORINFO_INTRINSIC_GetTypeFromHandle:
3528 op1 = impStackTop(0).val;
3529 if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3530 gtIsTypeHandleToRuntimeTypeHelper(op1))
3532 op1 = impPopStack().val;
3533 // Change call to return RuntimeType directly.
3534 op1->gtType = TYP_REF;
3537 // Call the regular function.
3540 case CORINFO_INTRINSIC_RTH_GetValueInternal:
3541 op1 = impStackTop(0).val;
3542 if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3543 gtIsTypeHandleToRuntimeTypeHelper(op1))
3546 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3549 // TreeToGetNativeTypeHandle
3551 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3554 op1 = impPopStack().val;
3556 // Get native TypeHandle argument to old helper
3557 op1 = op1->gtCall.gtCallArgs;
3558 assert(op1->OperIsList());
3559 assert(op1->gtOp.gtOp2 == nullptr);
3560 op1 = op1->gtOp.gtOp1;
3563 // Call the regular function.
3566 #ifndef LEGACY_BACKEND
3567 case CORINFO_INTRINSIC_Object_GetType:
3569 op1 = impPopStack().val;
3570 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3572 // Set the CALL flag to indicate that the operator is implemented by a call.
3573 // Set also the EXCEPTION flag because the native implementation of
3574 // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3575 op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3581 /* Unknown intrinsic */
3587 if (retNode == nullptr)
3589 NO_WAY("JIT must expand the intrinsic!");
3596 /*****************************************************************************/
3598 GenTreePtr Compiler::impArrayAccessIntrinsic(
3599 CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
3601 /* If we are generating SMALL_CODE, we don't want to use intrinsics for
3602 the following, as it generates fatter code.
3605 if (compCodeOpt() == SMALL_CODE)
3610 /* These intrinsics generate fatter (but faster) code and are only
3611 done if we don't need SMALL_CODE */
3613 unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
3615 // The rank 1 case is special because it has to handle two array formats
3616 // we will simply not do that case
3617 if (rank > GT_ARR_MAX_RANK || rank <= 1)
3622 CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
3623 var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
3625 // For the ref case, we will only be able to inline if the types match
3626 // (verifier checks for this, we don't care for the nonverified case and the
3627 // type is final (so we don't need to do the cast)
3628 if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
3630 // Get the call site signature
3631 CORINFO_SIG_INFO LocalSig;
3632 eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
3633 assert(LocalSig.hasThis());
3635 CORINFO_CLASS_HANDLE actualElemClsHnd;
3637 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3639 // Fetch the last argument, the one that indicates the type we are setting.
3640 CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
3641 for (unsigned r = 0; r < rank; r++)
3643 argType = info.compCompHnd->getArgNext(argType);
3646 typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
3647 actualElemClsHnd = argInfo.GetClassHandle();
3651 assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
3653 // Fetch the return type
3654 typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
3655 assert(retInfo.IsByRef());
3656 actualElemClsHnd = retInfo.GetClassHandle();
3659 // if it's not final, we can't do the optimization
3660 if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
3666 unsigned arrayElemSize;
3667 if (elemType == TYP_STRUCT)
3669 assert(arrElemClsHnd);
3671 arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
3675 arrayElemSize = genTypeSize(elemType);
3678 if ((unsigned char)arrayElemSize != arrayElemSize)
3680 // arrayElemSize would be truncated as an unsigned char.
3681 // This means the array element is too large. Don't do the optimization.
3685 GenTreePtr val = nullptr;
3687 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3689 // Assignment of a struct is more work, and there are more gets than sets.
3690 if (elemType == TYP_STRUCT)
3695 val = impPopStack().val;
3696 assert(genActualType(elemType) == genActualType(val->gtType) ||
3697 (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
3698 (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
3699 (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
3702 noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
3704 GenTreePtr inds[GT_ARR_MAX_RANK];
3705 for (unsigned k = rank; k > 0; k--)
3707 inds[k - 1] = impPopStack().val;
3710 GenTreePtr arr = impPopStack().val;
3711 assert(arr->gtType == TYP_REF);
3713 GenTreePtr arrElem =
3714 new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
3715 static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
3717 if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
3719 arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
3722 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
3724 assert(val != nullptr);
3725 return gtNewAssignNode(arrElem, val);
3733 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
3737 // do some basic checks first
3738 if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
3743 if (verCurrentState.esStackDepth > 0)
3745 // merge stack types
3746 StackEntry* parentStack = block->bbStackOnEntry();
3747 StackEntry* childStack = verCurrentState.esStack;
3749 for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
3751 if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
3758 // merge initialization status of this ptr
3760 if (verTrackObjCtorInitState)
3762 // If we're tracking the CtorInitState, then it must not be unknown in the current state.
3763 assert(verCurrentState.thisInitialized != TIS_Bottom);
3765 // If the successor block's thisInit state is unknown, copy it from the current state.
3766 if (block->bbThisOnEntry() == TIS_Bottom)
3769 verSetThisInit(block, verCurrentState.thisInitialized);
3771 else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
3773 if (block->bbThisOnEntry() != TIS_Top)
3776 verSetThisInit(block, TIS_Top);
3778 if (block->bbFlags & BBF_FAILED_VERIFICATION)
3780 // The block is bad. Control can flow through the block to any handler that catches the
3781 // verification exception, but the importer ignores bad blocks and therefore won't model
3782 // this flow in the normal way. To complete the merge into the bad block, the new state
3783 // needs to be manually pushed to the handlers that may be reached after the verification
3784 // exception occurs.
3786 // Usually, the new state was already propagated to the relevant handlers while processing
3787 // the predecessors of the bad block. The exception is when the bad block is at the start
3788 // of a try region, meaning it is protected by additional handlers that do not protect its
3791 if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
3793 // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
3794 // recursive calls back into this code path (if successors of the current bad block are
3795 // also bad blocks).
3797 ThisInitState origTIS = verCurrentState.thisInitialized;
3798 verCurrentState.thisInitialized = TIS_Top;
3799 impVerifyEHBlock(block, true);
3800 verCurrentState.thisInitialized = origTIS;
3808 assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
3814 /*****************************************************************************
3815 * 'logMsg' is true if a log message needs to be logged. false if the caller has
3816 * already logged it (presumably in a more detailed fashion than done here)
3817 * 'bVerificationException' is true for a verification exception, false for a
3818 * "call unauthorized by host" exception.
3821 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
3823 block->bbJumpKind = BBJ_THROW;
3824 block->bbFlags |= BBF_FAILED_VERIFICATION;
3826 impCurStmtOffsSet(block->bbCodeOffs);
3829 // we need this since BeginTreeList asserts otherwise
3830 impTreeList = impTreeLast = nullptr;
3831 block->bbFlags &= ~BBF_IMPORTED;
3835 JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
3836 block->bbCodeOffs, block->bbCodeOffsEnd));
3839 printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
3843 if (JitConfig.DebugBreakOnVerificationFailure())
3851 // if the stack is non-empty evaluate all the side-effects
3852 if (verCurrentState.esStackDepth > 0)
3854 impEvalSideEffects();
3856 assert(verCurrentState.esStackDepth == 0);
3858 GenTreePtr op1 = gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, GTF_EXCEPT,
3859 gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
3860 // verCurrentState.esStackDepth = 0;
3861 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
3863 // The inliner is not able to handle methods that require throw block, so
3864 // make sure this methods never gets inlined.
3865 info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
3868 /*****************************************************************************
3871 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
3874 // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
3875 // slightly different mechanism in which it calls the JIT to perform IL verification:
3876 // in the case of transparent methods the VM calls for a predicate IsVerifiable()
3877 // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
3878 // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
3879 // it bubble up until reported by the runtime. Currently in RyuJIT, this method doesn't bubble
3880 // up the exception, instead it embeds a throw inside the offending basic block and lets this
3881 // to fail upon runtime of the jitted method.
3883 // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
3884 // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
3885 // just try to find out whether to fail this method before even actually jitting it. So, in case
3886 // we detect these two conditions, instead of generating a throw statement inside the offending
3887 // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
3888 // to return false and make RyuJIT behave the same way JIT64 does.
3890 // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
3891 // RyuJIT for the time being until we completely replace JIT64.
3892 // TODO-ARM64-Cleanup: We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
3894 // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
3895 // exception if we are only importing and verifying. The method verNeedsVerification() can also modify the
3896 // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
3897 // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
3898 // be turned off during importation).
3899 CLANG_FORMAT_COMMENT_ANCHOR;
3901 #ifdef _TARGET_64BIT_
3904 bool canSkipVerificationResult =
3905 info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
3906 assert(tiVerificationNeeded || canSkipVerificationResult);
3909 // Add the non verifiable flag to the compiler
3910 if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
3912 tiIsVerifiableCode = FALSE;
3914 #endif //_TARGET_64BIT_
3915 verResetCurrentState(block, &verCurrentState);
3916 verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
3919 impNoteLastILoffs(); // Remember at which BC offset the tree was finished
3923 /******************************************************************************/
3924 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
3926 assert(ciType < CORINFO_TYPE_COUNT);
3931 case CORINFO_TYPE_STRING:
3932 case CORINFO_TYPE_CLASS:
3933 tiResult = verMakeTypeInfo(clsHnd);
3934 if (!tiResult.IsType(TI_REF))
3935 { // type must be consistent with element type
3940 #ifdef _TARGET_64BIT_
3941 case CORINFO_TYPE_NATIVEINT:
3942 case CORINFO_TYPE_NATIVEUINT:
3945 // If we have more precise information, use it
3946 return verMakeTypeInfo(clsHnd);
3950 return typeInfo::nativeInt();
3953 #endif // _TARGET_64BIT_
3955 case CORINFO_TYPE_VALUECLASS:
3956 case CORINFO_TYPE_REFANY:
3957 tiResult = verMakeTypeInfo(clsHnd);
3958 // type must be constant with element type;
3959 if (!tiResult.IsValueClass())
3964 case CORINFO_TYPE_VAR:
3965 return verMakeTypeInfo(clsHnd);
3967 case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
3968 case CORINFO_TYPE_VOID:
3972 case CORINFO_TYPE_BYREF:
3974 CORINFO_CLASS_HANDLE childClassHandle;
3975 CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
3976 return ByRef(verMakeTypeInfo(childType, childClassHandle));
3982 { // If we have more precise information, use it
3983 return typeInfo(TI_STRUCT, clsHnd);
3987 return typeInfo(JITtype2tiType(ciType));
3993 /******************************************************************************/
3995 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
3997 if (clsHnd == nullptr)
4002 // Byrefs should only occur in method and local signatures, which are accessed
4003 // using ICorClassInfo and ICorClassInfo.getChildType.
4004 // So findClass() and getClassAttribs() should not be called for byrefs
4006 if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4008 assert(!"Did findClass() return a Byref?");
4012 unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4014 if (attribs & CORINFO_FLG_VALUECLASS)
4016 CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4018 // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4019 // not occur here, so we may want to change this to an assert instead.
4020 if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4025 #ifdef _TARGET_64BIT_
4026 if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4028 return typeInfo::nativeInt();
4030 #endif // _TARGET_64BIT_
4032 if (t != CORINFO_TYPE_UNDEF)
4034 return (typeInfo(JITtype2tiType(t)));
4036 else if (bashStructToRef)
4038 return (typeInfo(TI_REF, clsHnd));
4042 return (typeInfo(TI_STRUCT, clsHnd));
4045 else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4047 // See comment in _typeInfo.h for why we do it this way.
4048 return (typeInfo(TI_REF, clsHnd, true));
4052 return (typeInfo(TI_REF, clsHnd));
4056 /******************************************************************************/
4057 BOOL Compiler::verIsSDArray(typeInfo ti)
4059 if (ti.IsNullObjRef())
4060 { // nulls are SD arrays
4064 if (!ti.IsType(TI_REF))
4069 if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4076 /******************************************************************************/
4077 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4078 /* Returns an error type if anything goes wrong */
4080 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4082 assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4084 if (!verIsSDArray(arrayObjectType))
4089 CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4090 CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4092 return verMakeTypeInfo(ciType, childClassHandle);
4095 /*****************************************************************************
4097 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4099 CORINFO_CLASS_HANDLE classHandle;
4100 CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4102 var_types type = JITtype2varType(ciType);
4103 if (varTypeIsGC(type))
4105 // For efficiency, getArgType only returns something in classHandle for
4106 // value types. For other types that have addition type info, you
4107 // have to call back explicitly
4108 classHandle = info.compCompHnd->getArgClass(sig, args);
4111 NO_WAY("Could not figure out Class specified in argument or local signature");
4115 return verMakeTypeInfo(ciType, classHandle);
4118 /*****************************************************************************/
4120 // This does the expensive check to figure out whether the method
4121 // needs to be verified. It is called only when we fail verification,
4122 // just before throwing the verification exception.
4124 BOOL Compiler::verNeedsVerification()
4126 // If we have previously determined that verification is NOT needed
4127 // (for example in Compiler::compCompile), that means verification is really not needed.
4128 // Return the same decision we made before.
4129 // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4131 if (!tiVerificationNeeded)
4133 return tiVerificationNeeded;
4136 assert(tiVerificationNeeded);
4138 // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4139 // obtain the answer.
4140 CorInfoCanSkipVerificationResult canSkipVerificationResult =
4141 info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4143 // canSkipVerification will return one of the following three values:
4144 // CORINFO_VERIFICATION_CANNOT_SKIP = 0, // Cannot skip verification during jit time.
4145 // CORINFO_VERIFICATION_CAN_SKIP = 1, // Can skip verification during jit time.
4146 // CORINFO_VERIFICATION_RUNTIME_CHECK = 2, // Skip verification during jit time,
4147 // but need to insert a callout to the VM to ask during runtime
4148 // whether to skip verification or not.
4150 // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4151 if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4153 tiRuntimeCalloutNeeded = true;
4156 if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4158 // Dev10 706080 - Testers don't like the assert, so just silence it
4159 // by not using the macros that invoke debugAssert.
4163 // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4164 // The following line means we will NOT do jit time verification if canSkipVerification
4165 // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4166 tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4167 return tiVerificationNeeded;
4170 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4176 if (!ti.IsType(TI_STRUCT))
4180 return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4183 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4185 if (ti.IsPermanentHomeByRef())
4195 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4197 return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4198 || ti.IsUnboxedGenericTypeVar() ||
4199 (ti.IsType(TI_STRUCT) &&
4200 // exclude byreflike structs
4201 !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4204 // Is it a boxed value type?
4205 bool Compiler::verIsBoxedValueType(typeInfo ti)
4207 if (ti.GetType() == TI_REF)
4209 CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4210 return !!eeIsValueClass(clsHnd);
4218 /*****************************************************************************
4220 * Check if a TailCall is legal.
4223 bool Compiler::verCheckTailCallConstraint(
4225 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4226 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4227 bool speculative // If true, won't throw if verificatoin fails. Instead it will
4228 // return false to the caller.
4229 // If false, it will throw.
4233 CORINFO_SIG_INFO sig;
4234 unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
4235 // this counter is used to keep track of how many items have been
4238 CORINFO_METHOD_HANDLE methodHnd = nullptr;
4239 CORINFO_CLASS_HANDLE methodClassHnd = nullptr;
4240 unsigned methodClassFlgs = 0;
4242 assert(impOpcodeIsCallOpcode(opcode));
4244 if (compIsForInlining())
4249 // for calli, VerifyOrReturn that this is not a virtual method
4250 if (opcode == CEE_CALLI)
4252 /* Get the call sig */
4253 eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4255 // We don't know the target method, so we have to infer the flags, or
4256 // assume the worst-case.
4257 mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4261 methodHnd = pResolvedToken->hMethod;
4263 mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4265 // When verifying generic code we pair the method handle with its
4266 // owning class to get the exact method signature.
4267 methodClassHnd = pResolvedToken->hClass;
4268 assert(methodClassHnd);
4270 eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4272 // opcode specific check
4273 methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4276 // We must have got the methodClassHnd if opcode is not CEE_CALLI
4277 assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4279 if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4281 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4284 // check compatibility of the arguments
4285 unsigned int argCount;
4286 argCount = sig.numArgs;
4287 CORINFO_ARG_LIST_HANDLE args;
4291 typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4293 // check that the argument is not a byref for tailcalls
4294 VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4296 // For unsafe code, we might have parameters containing pointer to the stack location.
4297 // Disallow the tailcall for this kind.
4298 CORINFO_CLASS_HANDLE classHandle;
4299 CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4300 VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4302 args = info.compCompHnd->getArgNext(args);
4306 popCount += sig.numArgs;
4308 // check for 'this' which is on non-static methods, not called via NEWOBJ
4309 if (!(mflags & CORINFO_FLG_STATIC))
4311 // Always update the popCount.
4312 // This is crucial for the stack calculation to be correct.
4313 typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4316 if (opcode == CEE_CALLI)
4318 // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4320 if (tiThis.IsValueClass())
4324 VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4328 // Check type compatibility of the this argument
4329 typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4330 if (tiDeclaredThis.IsValueClass())
4332 tiDeclaredThis.MakeByRef();
4335 VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4339 // Tail calls on constrained calls should be illegal too:
4340 // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4341 VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4343 // Get the exact view of the signature for an array method
4344 if (sig.retType != CORINFO_TYPE_VOID)
4346 if (methodClassFlgs & CORINFO_FLG_ARRAY)
4348 assert(opcode != CEE_CALLI);
4349 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4353 typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4354 typeInfo tiCallerRetType =
4355 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4357 // void return type gets morphed into the error type, so we have to treat them specially here
4358 if (sig.retType == CORINFO_TYPE_VOID)
4360 VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4365 VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4366 NormaliseForStack(tiCallerRetType), true),
4367 "tailcall return mismatch", speculative);
4370 // for tailcall, stack must be empty
4371 VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4373 return true; // Yes, tailcall is legal
4376 /*****************************************************************************
4378 * Checks the IL verification rules for the call
4381 void Compiler::verVerifyCall(OPCODE opcode,
4382 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4383 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4386 const BYTE* delegateCreateStart,
4387 const BYTE* codeAddr,
4388 CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4391 CORINFO_SIG_INFO* sig = nullptr;
4392 unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
4393 // this counter is used to keep track of how many items have been
4396 // for calli, VerifyOrReturn that this is not a virtual method
4397 if (opcode == CEE_CALLI)
4399 Verify(false, "Calli not verifiable");
4403 //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4404 mflags = callInfo->verMethodFlags;
4406 sig = &callInfo->verSig;
4408 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4410 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4413 // opcode specific check
4414 unsigned methodClassFlgs = callInfo->classFlags;
4418 // cannot do callvirt on valuetypes
4419 VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4420 VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4425 assert(!tailCall); // Importer should not allow this
4426 VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4427 "newobj must be on instance");
4429 if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4431 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4432 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4433 typeInfo tiDeclaredFtn =
4434 verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4435 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4437 assert(popCount == 0);
4438 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4439 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4441 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4442 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4443 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4444 "delegate object type mismatch");
4446 CORINFO_CLASS_HANDLE objTypeHandle =
4447 tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4449 // the method signature must be compatible with the delegate's invoke method
4451 // check that for virtual functions, the type of the object used to get the
4452 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4453 // since this is a bit of work to determine in general, we pattern match stylized
4456 // the delegate creation code check, which used to be done later, is now done here
4457 // so we can read delegateMethodRef directly from
4458 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
4459 // we then use it in our call to isCompatibleDelegate().
4461 mdMemberRef delegateMethodRef = mdMemberRefNil;
4462 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
4463 "must create delegates with certain IL");
4465 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
4466 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
4467 delegateResolvedToken.tokenScope = info.compScopeHnd;
4468 delegateResolvedToken.token = delegateMethodRef;
4469 delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method;
4470 info.compCompHnd->resolveToken(&delegateResolvedToken);
4472 CORINFO_CALL_INFO delegateCallInfo;
4473 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
4474 addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
4476 BOOL isOpenDelegate = FALSE;
4477 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
4478 tiActualFtn.GetMethod(), pResolvedToken->hClass,
4480 "function incompatible with delegate");
4482 // check the constraints on the target method
4483 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
4484 "delegate target has unsatisfied class constraints");
4485 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
4486 tiActualFtn.GetMethod()),
4487 "delegate target has unsatisfied method constraints");
4489 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
4490 // for additional verification rules for delegates
4491 CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod();
4492 DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
4493 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4496 if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
4498 && StrictCheckForNonVirtualCallToVirtualMethod()
4502 if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4504 VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
4505 verIsBoxedValueType(tiActualObj),
4506 "The 'this' parameter to the call must be either the calling method's "
4507 "'this' parameter or "
4508 "a boxed value type.");
4513 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
4515 BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
4517 Verify(targetIsStatic || !isOpenDelegate,
4518 "Unverifiable creation of an open instance delegate for a protected member.");
4520 CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
4522 : tiActualObj.GetClassHandleForObjRef();
4524 // In the case of protected methods, it is a requirement that the 'this'
4525 // pointer be a subclass of the current context. Perform this check.
4526 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4527 "Accessing protected method through wrong type.");
4532 // fall thru to default checks
4534 VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
4536 VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
4537 "can only newobj a delegate constructor");
4539 // check compatibility of the arguments
4540 unsigned int argCount;
4541 argCount = sig->numArgs;
4542 CORINFO_ARG_LIST_HANDLE args;
4546 typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
4548 typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
4549 VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
4551 args = info.compCompHnd->getArgNext(args);
4557 popCount += sig->numArgs;
4559 // check for 'this' which are is non-static methods, not called via NEWOBJ
4560 CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
4561 if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
4563 typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4566 // If it is null, we assume we can access it (since it will AV shortly)
4567 // If it is anything but a reference class, there is no hierarchy, so
4568 // again, we don't need the precise instance class to compute 'protected' access
4569 if (tiThis.IsType(TI_REF))
4571 instanceClassHnd = tiThis.GetClassHandleForObjRef();
4574 // Check type compatibility of the this argument
4575 typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
4576 if (tiDeclaredThis.IsValueClass())
4578 tiDeclaredThis.MakeByRef();
4581 // If this is a call to the base class .ctor, set thisPtr Init for
4583 if (mflags & CORINFO_FLG_CONSTRUCTOR)
4585 if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
4586 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
4588 assert(verCurrentState.thisInitialized !=
4589 TIS_Bottom); // This should never be the case just from the logic of the verifier.
4590 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
4591 "Call to base class constructor when 'this' is possibly initialized");
4592 // Otherwise, 'this' is now initialized.
4593 verCurrentState.thisInitialized = TIS_Init;
4594 tiThis.SetInitialisedObjRef();
4598 // We allow direct calls to value type constructors
4599 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
4600 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
4601 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
4602 "Bad call to a constructor");
4606 if (pConstrainedResolvedToken != nullptr)
4608 VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
4610 typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
4612 // We just dereference this and test for equality
4613 tiThis.DereferenceByRef();
4614 VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
4615 "this type mismatch with constrained type operand");
4617 // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
4618 tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
4621 // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
4622 if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
4624 tiDeclaredThis.SetIsReadonlyByRef();
4627 VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
4629 if (tiThis.IsByRef())
4631 // Find the actual type where the method exists (as opposed to what is declared
4632 // in the metadata). This is to prevent passing a byref as the "this" argument
4633 // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
4635 CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
4636 VerifyOrReturn(eeIsValueClass(actualClassHnd),
4637 "Call to base type of valuetype (which is never a valuetype)");
4640 // Rules for non-virtual call to a non-final virtual method:
4643 // The "this" pointer is considered to be "possibly written" if
4644 // 1. Its address have been taken (LDARGA 0) anywhere in the method.
4646 // 2. It has been stored to (STARG.0) anywhere in the method.
4648 // A non-virtual call to a non-final virtual method is only allowed if
4649 // 1. The this pointer passed to the callee is an instance of a boxed value type.
4651 // 2. The this pointer passed to the callee is the current method's this pointer.
4652 // (and) The current method's this pointer is not "possibly written".
4654 // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
4655 // virtual methods. (Luckily this does affect .ctors, since they are not virtual).
4656 // This is stronger that is strictly needed, but implementing a laxer rule is significantly
4657 // hard and more error prone.
4659 if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
4661 && StrictCheckForNonVirtualCallToVirtualMethod()
4665 if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
4668 tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
4669 "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
4670 "a boxed value type.");
4675 // check any constraints on the callee's class and type parameters
4676 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
4677 "method has unsatisfied class constraints");
4678 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
4679 "method has unsatisfied method constraints");
4681 if (mflags & CORINFO_FLG_PROTECTED)
4683 VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
4684 "Can't access protected method");
4687 // Get the exact view of the signature for an array method
4688 if (sig->retType != CORINFO_TYPE_VOID)
4690 eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
4693 // "readonly." prefixed calls only allowed for the Address operation on arrays.
4694 // The methods supported by array types are under the control of the EE
4695 // so we can trust that only the Address operation returns a byref.
4698 typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
4699 VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
4700 "unexpected use of readonly prefix");
4703 // Verify the tailcall
4706 verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
4710 /*****************************************************************************
4711 * Checks that a delegate creation is done using the following pattern:
4713 * ldvirtftn targetMemberRef
4715 * ldftn targetMemberRef
4717 * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
4718 * not in this basic block)
4720 * targetMemberRef is read from the code sequence.
4721 * targetMemberRef is validated iff verificationNeeded.
4724 BOOL Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart,
4725 const BYTE* codeAddr,
4726 mdMemberRef& targetMemberRef)
4728 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
4730 targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
4733 else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
4735 targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
4742 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
4744 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
4745 typeInfo ptrVal = verVerifyLDIND(tiTo, instrType);
4746 typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
4747 if (!tiCompatibleWith(value, normPtrVal, true))
4749 Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
4750 compUnsafeCastUsed = true;
4755 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
4757 assert(!instrType.IsStruct());
4762 ptrVal = DereferenceByRef(ptr);
4763 if (instrType.IsObjRef() && !ptrVal.IsObjRef())
4765 Verify(false, "bad pointer");
4766 compUnsafeCastUsed = true;
4768 else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
4770 Verify(false, "pointer not consistent with instr");
4771 compUnsafeCastUsed = true;
4776 Verify(false, "pointer not byref");
4777 compUnsafeCastUsed = true;
4783 // Verify that the field is used properly. 'tiThis' is NULL for statics,
4784 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
4785 // ld*flda or a st*fld.
4786 // 'enclosingClass' is given if we are accessing a field in some specific type.
4788 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken,
4789 const CORINFO_FIELD_INFO& fieldInfo,
4790 const typeInfo* tiThis,
4792 BOOL allowPlainStructAsThis)
4794 CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
4795 unsigned fieldFlags = fieldInfo.fieldFlags;
4796 CORINFO_CLASS_HANDLE instanceClass =
4797 info.compClassHnd; // for statics, we imagine the instance is the current class.
4799 bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
4802 Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
4803 if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
4805 Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
4806 info.compIsStatic == isStaticField,
4807 "bad use of initonly field (set or address taken)");
4811 if (tiThis == nullptr)
4813 Verify(isStaticField, "used static opcode with non-static field");
4817 typeInfo tThis = *tiThis;
4819 if (allowPlainStructAsThis && tThis.IsValueClass())
4824 // If it is null, we assume we can access it (since it will AV shortly)
4825 // If it is anything but a refernce class, there is no hierarchy, so
4826 // again, we don't need the precise instance class to compute 'protected' access
4827 if (tiThis->IsType(TI_REF))
4829 instanceClass = tiThis->GetClassHandleForObjRef();
4832 // Note that even if the field is static, we require that the this pointer
4833 // satisfy the same constraints as a non-static field This happens to
4834 // be simpler and seems reasonable
4835 typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
4836 if (tiDeclaredThis.IsValueClass())
4838 tiDeclaredThis.MakeByRef();
4840 // we allow read-only tThis, on any field access (even stores!), because if the
4841 // class implementor wants to prohibit stores he should make the field private.
4842 // we do this by setting the read-only bit on the type we compare tThis to.
4843 tiDeclaredThis.SetIsReadonlyByRef();
4845 else if (verTrackObjCtorInitState && tThis.IsThisPtr())
4847 // Any field access is legal on "uninitialized" this pointers.
4848 // The easiest way to implement this is to simply set the
4849 // initialized bit for the duration of the type check on the
4850 // field access only. It does not change the state of the "this"
4851 // for the function as a whole. Note that the "tThis" is a copy
4852 // of the original "this" type (*tiThis) passed in.
4853 tThis.SetInitialisedObjRef();
4856 Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
4859 // Presently the JIT does not check that we don't store or take the address of init-only fields
4860 // since we cannot guarantee their immutability and it is not a security issue.
4862 // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
4863 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
4864 "field has unsatisfied class constraints");
4865 if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
4867 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
4868 "Accessing protected method through wrong type.");
4872 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
4874 if (tiOp1.IsNumberType())
4876 #ifdef _TARGET_64BIT_
4877 Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
4878 #else // _TARGET_64BIT
4879 // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
4880 // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
4881 // but compatible, since we can coalesce native int with int32 (see section III.1.5).
4882 Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
4883 #endif // !_TARGET_64BIT_
4885 else if (tiOp1.IsObjRef())
4897 Verify(FALSE, "Cond not allowed on object types");
4899 Verify(tiOp2.IsObjRef(), "Cond type mismatch");
4901 else if (tiOp1.IsByRef())
4903 Verify(tiOp2.IsByRef(), "Cond type mismatch");
4907 Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
4911 void Compiler::verVerifyThisPtrInitialised()
4913 if (verTrackObjCtorInitState)
4915 Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
4919 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
4921 // Either target == context, in this case calling an alternate .ctor
4922 // Or target is the immediate parent of context
4924 return ((target == context) || (target == info.compCompHnd->getParentType(context)));
4927 GenTreePtr Compiler::impImportLdvirtftn(GenTreePtr thisPtr,
4928 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4929 CORINFO_CALL_INFO* pCallInfo)
4931 if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
4933 NO_WAY("Virtual call to a function added via EnC is not supported");
4936 #ifdef FEATURE_READYTORUN_COMPILER
4937 if (opts.IsReadyToRun())
4939 if (!pCallInfo->exactContextNeedsRuntimeLookup)
4941 GenTreeCall* call = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT,
4942 gtNewArgList(thisPtr));
4944 call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
4949 // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
4950 if (IsTargetAbi(CORINFO_CORERT_ABI))
4952 GenTreePtr ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
4954 return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
4955 gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
4960 // Get the exact descriptor for the static callsite
4961 GenTreePtr exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
4962 if (exactTypeDesc == nullptr)
4963 { // compDonotInline()
4967 GenTreePtr exactMethodDesc = impTokenToHandle(pResolvedToken);
4968 if (exactMethodDesc == nullptr)
4969 { // compDonotInline()
4973 GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
4975 helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
4977 helpArgs = gtNewListNode(thisPtr, helpArgs);
4979 // Call helper function. This gets the target address of the final destination callsite.
4981 return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, GTF_EXCEPT, helpArgs);
4984 /*****************************************************************************
4986 * Build and import a box node
4989 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
4991 // Get the tree for the type handle for the boxed object. In the case
4992 // of shared generic code or ngen'd code this might be an embedded
4994 // Note we can only box do it if the class construtor has been called
4995 // We can always do it on primitive types
4997 GenTreePtr op1 = nullptr;
4998 GenTreePtr op2 = nullptr;
5001 impSpillSpecialSideEff();
5003 // Now get the expression to box from the stack.
5004 CORINFO_CLASS_HANDLE operCls;
5005 GenTreePtr exprToBox = impPopStack(operCls).val;
5007 CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5008 if (boxHelper == CORINFO_HELP_BOX)
5010 // we are doing 'normal' boxing. This means that we can inline the box operation
5011 // Box(expr) gets morphed into
5012 // temp = new(clsHnd)
5013 // cpobj(temp+4, expr, clsHnd)
5015 // The code paths differ slightly below for structs and primitives because
5016 // "cpobj" differs in these cases. In one case you get
5017 // impAssignStructPtr(temp+4, expr, clsHnd)
5018 // and the other you get
5021 if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5023 impBoxTemp = lvaGrabTemp(true DEBUGARG("Box Helper"));
5026 // needs to stay in use until this box expression is appended
5027 // some other node. We approximate this by keeping it alive until
5028 // the opcode stack becomes empty
5029 impBoxTempInUse = true;
5031 #ifdef FEATURE_READYTORUN_COMPILER
5032 bool usingReadyToRunHelper = false;
5034 if (opts.IsReadyToRun())
5036 op1 = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5037 usingReadyToRunHelper = (op1 != nullptr);
5040 if (!usingReadyToRunHelper)
5043 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5044 // and the newfast call with a single call to a dynamic R2R cell that will:
5045 // 1) Load the context
5046 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5047 // 3) Allocate and return the new object for boxing
5048 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5050 // Ensure that the value class is restored
5051 op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5053 { // compDonotInline()
5057 op1 = gtNewHelperCallNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd), TYP_REF, 0,
5061 /* Remember that this basic block contains 'new' of an array */
5062 compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5064 GenTreePtr asg = gtNewTempAssign(impBoxTemp, op1);
5066 GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5068 op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5069 op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
5070 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5072 if (varTypeIsStruct(exprToBox))
5074 assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5075 op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5079 lclTyp = exprToBox->TypeGet();
5080 if (lclTyp == TYP_BYREF)
5082 lclTyp = TYP_I_IMPL;
5084 CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5085 if (impIsPrimitive(jitType))
5087 lclTyp = JITtype2varType(jitType);
5089 assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5090 varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5091 var_types srcTyp = exprToBox->TypeGet();
5092 var_types dstTyp = lclTyp;
5094 if (srcTyp != dstTyp)
5096 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5097 (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5098 exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
5100 op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5103 op2 = gtNewLclvNode(impBoxTemp, TYP_REF);
5104 op1 = gtNewOperNode(GT_COMMA, TYP_REF, op1, op2);
5106 // Record that this is a "box" node.
5107 op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt);
5109 // If it is a value class, mark the "box" node. We can use this information
5110 // to optimise several cases:
5111 // "box(x) == null" --> false
5112 // "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5113 // "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5115 op1->gtFlags |= GTF_BOX_VALUE;
5116 assert(op1->IsBoxedValue());
5117 assert(asg->gtOper == GT_ASG);
5121 // Don't optimize, just call the helper and be done with it
5123 // Ensure that the value class is restored
5124 op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5126 { // compDonotInline()
5130 GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5131 op1 = gtNewHelperCallNode(boxHelper, TYP_REF, GTF_EXCEPT, args);
5134 /* Push the result back on the stack, */
5135 /* even if clsHnd is a value class we want the TI_REF */
5136 typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5137 impPushOnStack(op1, tiRetVal);
5140 //------------------------------------------------------------------------
5141 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5144 // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5145 // by a call to CEEInfo::resolveToken().
5146 // pCallInfo - The CORINFO_CALL_INFO that has been initialized
5147 // by a call to CEEInfo::getCallInfo().
5150 // The multi-dimensional array constructor arguments (array dimensions) are
5151 // pushed on the IL stack on entry to this method.
5154 // Multi-dimensional array constructors are imported as calls to a JIT
5155 // helper, not as regular calls.
5157 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5159 GenTreePtr classHandle = impParentClassTokenToHandle(pResolvedToken);
5160 if (classHandle == nullptr)
5161 { // compDonotInline()
5165 assert(pCallInfo->sig.numArgs);
5168 GenTreeArgList* args;
5171 // There are two different JIT helpers that can be used to allocate
5172 // multi-dimensional arrays:
5174 // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5175 // This variant is deprecated. It should be eventually removed.
5177 // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5178 // pointer to block of int32s. This variant is more portable.
5180 // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5181 // unconditionally would require ReadyToRun version bump.
5183 CLANG_FORMAT_COMMENT_ANCHOR;
5185 #if COR_JIT_EE_VERSION > 460
5186 if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5188 LclVarDsc* newObjArrayArgsVar;
5190 // Reuse the temp used to pass the array dimensions to avoid bloating
5191 // the stack frame in case there are multiple calls to multi-dim array
5192 // constructors within a single method.
5193 if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5195 lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5196 lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK;
5197 lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5200 // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5201 // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5202 lvaTable[lvaNewObjArrayArgs].lvExactSize =
5203 max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5205 // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5206 // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5207 // to one allocation at a time.
5208 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5211 // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5212 // - Array class handle
5213 // - Number of dimension arguments
5214 // - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp.
5217 node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5218 node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5220 // Pop dimension arguments from the stack one at a time and store it
5221 // into lvaNewObjArrayArgs temp.
5222 for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5224 GenTreePtr arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5226 GenTreePtr dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5227 dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5228 dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5229 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5230 dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5232 node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5235 args = gtNewArgList(node);
5237 // pass number of arguments to the helper
5238 args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5240 args = gtNewListNode(classHandle, args);
5242 node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, 0, args);
5248 // The varargs helper needs the type and method handles as last
5249 // and last-1 param (this is a cdecl call, so args will be
5250 // pushed in reverse order on the CPU stack)
5253 args = gtNewArgList(classHandle);
5255 // pass number of arguments to the helper
5256 args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5258 unsigned argFlags = 0;
5259 args = impPopList(pCallInfo->sig.numArgs, &argFlags, &pCallInfo->sig, args);
5261 node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, 0, args);
5263 // varargs, so we pop the arguments
5264 node->gtFlags |= GTF_CALL_POP_ARGS;
5267 // At the present time we don't track Caller pop arguments
5268 // that have GC references in them
5269 for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5271 assert(temp->Current()->gtType != TYP_REF);
5276 node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5277 node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5279 // Remember that this basic block contains 'new' of a md array
5280 compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5282 impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5285 GenTreePtr Compiler::impTransformThis(GenTreePtr thisPtr,
5286 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5287 CORINFO_THIS_TRANSFORM transform)
5291 case CORINFO_DEREF_THIS:
5293 GenTreePtr obj = thisPtr;
5295 // This does a LDIND on the obj, which should be a byref. pointing to a ref
5296 impBashVarAddrsToI(obj);
5297 assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5298 CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5300 obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5301 // ldind could point anywhere, example a boxed class static int
5302 obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5307 case CORINFO_BOX_THIS:
5309 // Constraint calls where there might be no
5310 // unboxed entry point require us to implement the call via helper.
5311 // These only occur when a possible target of the call
5312 // may have inherited an implementation of an interface
5313 // method from System.Object or System.ValueType. The EE does not provide us with
5314 // "unboxed" versions of these methods.
5316 GenTreePtr obj = thisPtr;
5318 assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5319 obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5320 obj->gtFlags |= GTF_EXCEPT;
5322 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5323 var_types objType = JITtype2varType(jitTyp);
5324 if (impIsPrimitive(jitTyp))
5326 if (obj->OperIsBlk())
5328 obj->ChangeOperUnchecked(GT_IND);
5330 // Obj could point anywhere, example a boxed class static int
5331 obj->gtFlags |= GTF_IND_TGTANYWHERE;
5332 obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5335 obj->gtType = JITtype2varType(jitTyp);
5336 assert(varTypeIsArithmetic(obj->gtType));
5339 // This pushes on the dereferenced byref
5340 // This is then used immediately to box.
5341 impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5343 // This pops off the byref-to-a-value-type remaining on the stack and
5344 // replaces it with a boxed object.
5345 // This is then used as the object to the virtual call immediately below.
5346 impImportAndPushBox(pConstrainedResolvedToken);
5347 if (compDonotInline())
5352 obj = impPopStack().val;
5355 case CORINFO_NO_THIS_TRANSFORM:
5361 //------------------------------------------------------------------------
5362 // impCanPInvokeInline: examine information from a call to see if the call
5363 // qualifies as an inline pinvoke.
5366 // block - block contaning the call, or for inlinees, block
5367 // containing the call being inlined
5370 // true if this call qualifies as an inline pinvoke, false otherwise
5373 // Checks basic legality and then a number of ambient conditions
5374 // where we could pinvoke but choose not to
5376 bool Compiler::impCanPInvokeInline(BasicBlock* block)
5378 return impCanPInvokeInlineCallSite(block) && getInlinePInvokeEnabled() && (!opts.compDbgCode) &&
5379 (compCodeOpt() != SMALL_CODE) && (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
5383 //------------------------------------------------------------------------
5384 // impCanPInvokeInlineSallSite: basic legality checks using information
5385 // from a call to see if the call qualifies as an inline pinvoke.
5388 // block - block contaning the call, or for inlinees, block
5389 // containing the call being inlined
5392 // true if this call can legally qualify as an inline pinvoke, false otherwise
5395 // For runtimes that support exception handling interop there are
5396 // restrictions on using inline pinvoke in handler regions.
5398 // * We have to disable pinvoke inlining inside of filters because
5399 // in case the main execution (i.e. in the try block) is inside
5400 // unmanaged code, we cannot reuse the inlined stub (we still need
5401 // the original state until we are in the catch handler)
5403 // * We disable pinvoke inlining inside handlers since the GSCookie
5404 // is in the inlined Frame (see
5405 // CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
5406 // this would not protect framelets/return-address of handlers.
5408 // These restrictions are currently also in place for CoreCLR but
5409 // can be relaxed when coreclr/#8459 is addressed.
5411 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
5413 #ifdef _TARGET_AMD64_
5414 // On x64, we disable pinvoke inlining inside of try regions.
5415 // Here is the comment from JIT64 explaining why:
5417 // [VSWhidbey: 611015] - because the jitted code links in the
5418 // Frame (instead of the stub) we rely on the Frame not being
5419 // 'active' until inside the stub. This normally happens by the
5420 // stub setting the return address pointer in the Frame object
5421 // inside the stub. On a normal return, the return address
5422 // pointer is zeroed out so the Frame can be safely re-used, but
5423 // if an exception occurs, nobody zeros out the return address
5424 // pointer. Thus if we re-used the Frame object, it would go
5425 // 'active' as soon as we link it into the Frame chain.
5427 // Technically we only need to disable PInvoke inlining if we're
5428 // in a handler or if we're in a try body with a catch or
5429 // filter/except where other non-handler code in this method
5430 // might run and try to re-use the dirty Frame object.
5432 // A desktop test case where this seems to matter is
5433 // jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
5434 const bool inX64Try = block->hasTryIndex();
5436 const bool inX64Try = false;
5437 #endif // _TARGET_AMD64_
5439 return !inX64Try && !block->hasHndIndex();
5442 //------------------------------------------------------------------------
5443 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
5444 // if it can be expressed as an inline pinvoke.
5447 // call - tree for the call
5448 // methHnd - handle for the method being called (may be null)
5449 // sig - signature of the method being called
5450 // mflags - method flags for the method being called
5451 // block - block contaning the call, or for inlinees, block
5452 // containing the call being inlined
5455 // Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
5457 // Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
5458 // call passes a combination of legality and profitabilty checks.
5460 // If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
5462 void Compiler::impCheckForPInvokeCall(
5463 GenTreePtr call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
5465 CorInfoUnmanagedCallConv unmanagedCallConv;
5467 // If VM flagged it as Pinvoke, flag the call node accordingly
5468 if ((mflags & CORINFO_FLG_PINVOKE) != 0)
5470 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
5475 if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
5480 unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
5484 CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
5485 if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
5487 // Used by the IL Stubs.
5488 callConv = CORINFO_CALLCONV_C;
5490 static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
5491 static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
5492 static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
5493 unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
5495 assert(!call->gtCall.gtCallCookie);
5498 if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
5499 unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
5503 optNativeCallCount++;
5505 if (opts.compMustInlinePInvokeCalli && methHnd == nullptr)
5507 // Always inline pinvoke.
5511 // Check legality and profitability.
5512 if (!impCanPInvokeInline(block))
5517 if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
5522 // Size-speed tradeoff: don't use inline pinvoke at rarely
5523 // executed call sites. The non-inline version is more
5525 if (block->isRunRarely())
5531 JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
5533 call->gtFlags |= GTF_CALL_UNMANAGED;
5534 info.compCallUnmanaged++;
5536 // AMD64 convention is same for native and managed
5537 if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
5539 call->gtFlags |= GTF_CALL_POP_ARGS;
5542 if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
5544 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
5548 GenTreePtr Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
5550 var_types callRetTyp = JITtype2varType(sig->retType);
5552 /* The function pointer is on top of the stack - It may be a
5553 * complex expression. As it is evaluated after the args,
5554 * it may cause registered args to be spilled. Simply spill it.
5557 // Ignore this trivial case.
5558 if (impStackTop().val->gtOper != GT_LCL_VAR)
5560 impSpillStackEntry(verCurrentState.esStackDepth - 1,
5561 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
5564 /* Get the function pointer */
5566 GenTreePtr fptr = impPopStack().val;
5567 assert(genActualType(fptr->gtType) == TYP_I_IMPL);
5570 // This temporary must never be converted to a double in stress mode,
5571 // because that can introduce a call to the cast helper after the
5572 // arguments have already been evaluated.
5574 if (fptr->OperGet() == GT_LCL_VAR)
5576 lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
5580 /* Create the call node */
5582 GenTreePtr call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
5584 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
5589 /*****************************************************************************/
5591 void Compiler::impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig)
5593 assert(call->gtFlags & GTF_CALL_UNMANAGED);
5595 /* Since we push the arguments in reverse order (i.e. right -> left)
5596 * spill any side effects from the stack
5598 * OBS: If there is only one side effect we do not need to spill it
5599 * thus we have to spill all side-effects except last one
5602 unsigned lastLevelWithSideEffects = UINT_MAX;
5604 unsigned argsToReverse = sig->numArgs;
5606 // For "thiscall", the first argument goes in a register. Since its
5607 // order does not need to be changed, we do not need to spill it
5609 if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5611 assert(argsToReverse);
5615 #ifndef _TARGET_X86_
5616 // Don't reverse args on ARM or x64 - first four args always placed in regs in order
5620 for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
5622 if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
5624 assert(lastLevelWithSideEffects == UINT_MAX);
5626 impSpillStackEntry(level,
5627 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
5629 else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
5631 if (lastLevelWithSideEffects != UINT_MAX)
5633 /* We had a previous side effect - must spill it */
5634 impSpillStackEntry(lastLevelWithSideEffects,
5635 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
5637 /* Record the level for the current side effect in case we will spill it */
5638 lastLevelWithSideEffects = level;
5642 /* This is the first side effect encountered - record its level */
5644 lastLevelWithSideEffects = level;
5649 /* The argument list is now "clean" - no out-of-order side effects
5650 * Pop the argument list in reverse order */
5652 unsigned argFlags = 0;
5653 GenTreePtr args = call->gtCall.gtCallArgs =
5654 impPopRevList(sig->numArgs, &argFlags, sig, sig->numArgs - argsToReverse);
5656 if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
5658 GenTreePtr thisPtr = args->Current();
5659 impBashVarAddrsToI(thisPtr);
5660 assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
5665 call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5669 //------------------------------------------------------------------------
5670 // impInitClass: Build a node to initialize the class before accessing the
5671 // field if necessary
5674 // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5675 // by a call to CEEInfo::resolveToken().
5677 // Return Value: If needed, a pointer to the node that will perform the class
5678 // initializtion. Otherwise, nullptr.
5681 GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5683 CorInfoInitClassResult initClassResult =
5684 info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
5686 if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
5692 GenTreePtr node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
5694 if (node == nullptr)
5696 assert(compDonotInline());
5702 node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, 0, gtNewArgList(node));
5706 // Call the shared non gc static helper, as its the fastest
5707 node = fgGetSharedCCtor(pResolvedToken->hClass);
5713 GenTreePtr Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
5715 GenTreePtr op1 = nullptr;
5724 ival = *((bool*)fldAddr);
5728 ival = *((signed char*)fldAddr);
5732 ival = *((unsigned char*)fldAddr);
5736 ival = *((short*)fldAddr);
5741 ival = *((unsigned short*)fldAddr);
5746 ival = *((int*)fldAddr);
5748 op1 = gtNewIconNode(ival);
5753 lval = *((__int64*)fldAddr);
5754 op1 = gtNewLconNode(lval);
5758 dval = *((float*)fldAddr);
5759 op1 = gtNewDconNode(dval);
5760 #if !FEATURE_X87_DOUBLES
5761 // X87 stack doesn't differentiate between float/double
5762 // so R4 is treated as R8, but everybody else does
5763 op1->gtType = TYP_FLOAT;
5764 #endif // FEATURE_X87_DOUBLES
5768 dval = *((double*)fldAddr);
5769 op1 = gtNewDconNode(dval);
5773 assert(!"Unexpected lclTyp");
5780 GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
5781 CORINFO_ACCESS_FLAGS access,
5782 CORINFO_FIELD_INFO* pFieldInfo,
5787 switch (pFieldInfo->fieldAccessor)
5789 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
5791 assert(!compIsForInlining());
5793 // We first call a special helper to get the statics base pointer
5794 op1 = impParentClassTokenToHandle(pResolvedToken);
5796 // compIsForInlining() is false so we should not neve get NULL here
5797 assert(op1 != nullptr);
5799 var_types type = TYP_BYREF;
5801 switch (pFieldInfo->helper)
5803 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
5806 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
5807 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
5808 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
5811 assert(!"unknown generic statics helper");
5815 op1 = gtNewHelperCallNode(pFieldInfo->helper, type, 0, gtNewArgList(op1));
5817 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5818 op1 = gtNewOperNode(GT_ADD, type, op1,
5819 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5823 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
5825 #ifdef FEATURE_READYTORUN_COMPILER
5826 if (opts.IsReadyToRun())
5828 unsigned callFlags = 0;
5830 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5832 callFlags |= GTF_CALL_HOISTABLE;
5835 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF, callFlags);
5837 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5842 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
5846 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5847 op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
5848 new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
5852 #if COR_JIT_EE_VERSION > 460
5853 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
5855 #ifdef FEATURE_READYTORUN_COMPILER
5856 noway_assert(opts.IsReadyToRun());
5857 CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
5858 assert(kind.needsRuntimeLookup);
5860 GenTreePtr ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
5861 GenTreeArgList* args = gtNewArgList(ctxTree);
5863 unsigned callFlags = 0;
5865 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
5867 callFlags |= GTF_CALL_HOISTABLE;
5869 var_types type = TYP_BYREF;
5870 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, callFlags, args);
5872 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
5873 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5874 op1 = gtNewOperNode(GT_ADD, type, op1,
5875 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
5878 #endif // FEATURE_READYTORUN_COMPILER
5881 #endif // COR_JIT_EE_VERSION > 460
5884 if (!(access & CORINFO_ACCESS_ADDRESS))
5886 // In future, it may be better to just create the right tree here instead of folding it later.
5887 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
5889 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
5891 op1->gtType = TYP_REF; // points at boxed object
5892 FieldSeqNode* firstElemFldSeq =
5893 GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
5895 gtNewOperNode(GT_ADD, TYP_BYREF, op1,
5896 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), firstElemFldSeq));
5898 if (varTypeIsStruct(lclTyp))
5900 // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT.
5901 op1 = gtNewObjNode(pFieldInfo->structType, op1);
5905 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
5906 op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
5914 void** pFldAddr = nullptr;
5915 void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
5917 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
5919 /* Create the data member node */
5920 if (pFldAddr == nullptr)
5922 op1 = gtNewIconHandleNode((size_t)fldAddr, GTF_ICON_STATIC_HDL, fldSeq);
5926 op1 = gtNewIconHandleNode((size_t)pFldAddr, GTF_ICON_STATIC_HDL, fldSeq);
5928 // There are two cases here, either the static is RVA based,
5929 // in which case the type of the FIELD node is not a GC type
5930 // and the handle to the RVA is a TYP_I_IMPL. Or the FIELD node is
5931 // a GC type and the handle to it is a TYP_BYREF in the GC heap
5932 // because handles to statics now go into the large object heap
5934 var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
5935 op1 = gtNewOperNode(GT_IND, handleTyp, op1);
5936 op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
5943 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
5945 op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
5947 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
5949 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
5950 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(void*), fldSeq));
5953 if (!(access & CORINFO_ACCESS_ADDRESS))
5955 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
5956 op1->gtFlags |= GTF_GLOB_REF;
5962 // In general try to call this before most of the verification work. Most people expect the access
5963 // exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns
5964 // out if you can't access something we also think that you're unverifiable for other reasons.
5965 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
5967 if (result != CORINFO_ACCESS_ALLOWED)
5969 impHandleAccessAllowedInternal(result, helperCall);
5973 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
5977 case CORINFO_ACCESS_ALLOWED:
5979 case CORINFO_ACCESS_ILLEGAL:
5980 // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
5981 // method is verifiable. Otherwise, delay the exception to runtime.
5982 if (compIsForImportOnly())
5984 info.compCompHnd->ThrowExceptionForHelper(helperCall);
5988 impInsertHelperCall(helperCall);
5991 case CORINFO_ACCESS_RUNTIME_CHECK:
5992 impInsertHelperCall(helperCall);
5997 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
5999 // Construct the argument list
6000 GenTreeArgList* args = nullptr;
6001 assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6002 for (unsigned i = helperInfo->numArgs; i > 0; --i)
6004 const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1];
6005 GenTreePtr currentArg = nullptr;
6006 switch (helperArg.argType)
6008 case CORINFO_HELPER_ARG_TYPE_Field:
6009 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6010 info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6011 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6013 case CORINFO_HELPER_ARG_TYPE_Method:
6014 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6015 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6017 case CORINFO_HELPER_ARG_TYPE_Class:
6018 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6019 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6021 case CORINFO_HELPER_ARG_TYPE_Module:
6022 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6024 case CORINFO_HELPER_ARG_TYPE_Const:
6025 currentArg = gtNewIconNode(helperArg.constant);
6028 NO_WAY("Illegal helper arg type");
6030 args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6034 * Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee.
6035 * Also, consider sticking this in the first basic block.
6037 GenTreePtr callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, GTF_EXCEPT, args);
6038 impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6041 void Compiler::impInsertCalloutForDelegate(CORINFO_METHOD_HANDLE callerMethodHnd,
6042 CORINFO_METHOD_HANDLE calleeMethodHnd,
6043 CORINFO_CLASS_HANDLE delegateTypeHnd)
6045 #ifdef FEATURE_CORECLR
6046 if (!info.compCompHnd->isDelegateCreationAllowed(delegateTypeHnd, calleeMethodHnd))
6048 // Call the JIT_DelegateSecurityCheck helper before calling the actual function.
6049 // This helper throws an exception if the CLR host disallows the call.
6051 GenTreePtr helper = gtNewHelperCallNode(CORINFO_HELP_DELEGATE_SECURITY_CHECK, TYP_VOID, GTF_EXCEPT,
6052 gtNewArgList(gtNewIconEmbClsHndNode(delegateTypeHnd),
6053 gtNewIconEmbMethHndNode(calleeMethodHnd)));
6054 // Append the callout statement
6055 impAppendTree(helper, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6057 #endif // FEATURE_CORECLR
6060 // Checks whether the return types of caller and callee are compatible
6061 // so that callee can be tail called. Note that here we don't check
6062 // compatibility in IL Verifier sense, but on the lines of return type
6063 // sizes are equal and get returned in the same return register.
6064 bool Compiler::impTailCallRetTypeCompatible(var_types callerRetType,
6065 CORINFO_CLASS_HANDLE callerRetTypeClass,
6066 var_types calleeRetType,
6067 CORINFO_CLASS_HANDLE calleeRetTypeClass)
6069 // Note that we can not relax this condition with genActualType() as the
6070 // calling convention dictates that the caller of a function with a small
6071 // typed return value is responsible for normalizing the return val.
6072 if (callerRetType == calleeRetType)
6077 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6079 if (callerRetType == TYP_VOID)
6081 // This needs to be allowed to support the following IL pattern that Jit64 allows:
6086 // Note that the above IL pattern is not valid as per IL verification rules.
6087 // Therefore, only full trust code can take advantage of this pattern.
6091 // These checks return true if the return value type sizes are the same and
6092 // get returned in the same return register i.e. caller doesn't need to normalize
6093 // return value. Some of the tail calls permitted by below checks would have
6094 // been rejected by IL Verifier before we reached here. Therefore, only full
6095 // trust code can make those tail calls.
6096 unsigned callerRetTypeSize = 0;
6097 unsigned calleeRetTypeSize = 0;
6098 bool isCallerRetTypMBEnreg =
6099 VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6100 bool isCalleeRetTypMBEnreg =
6101 VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6103 if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6105 return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6107 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6115 PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6116 PREFIX_TAILCALL_IMPLICIT =
6117 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6118 PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6119 PREFIX_VOLATILE = 0x00000100,
6120 PREFIX_UNALIGNED = 0x00001000,
6121 PREFIX_CONSTRAINED = 0x00010000,
6122 PREFIX_READONLY = 0x00100000
6125 /********************************************************************************
6127 * Returns true if the current opcode and and the opcodes following it correspond
6128 * to a supported tail call IL pattern.
6131 bool Compiler::impIsTailCallILPattern(bool tailPrefixed,
6133 const BYTE* codeAddrOfNextOpcode,
6134 const BYTE* codeEnd,
6136 bool* isCallPopAndRet /* = nullptr */)
6138 // Bail out if the current opcode is not a call.
6139 if (!impOpcodeIsCallOpcode(curOpcode))
6144 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6145 // If shared ret tail opt is not enabled, we will enable
6146 // it for recursive methods.
6150 // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6151 // sequence. Make sure we don't go past the end of the IL however.
6152 codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6155 // Bail out if there is no next opcode after call
6156 if (codeAddrOfNextOpcode >= codeEnd)
6161 // Scan the opcodes to look for the following IL patterns if either
6162 // i) the call is not tail prefixed (i.e. implicit tail call) or
6163 // ii) if tail prefixed, IL verification is not needed for the method.
6165 // Only in the above two cases we can allow the below tail call patterns
6166 // violating ECMA spec.
6182 #ifdef _TARGET_AMD64_
6185 nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6186 codeAddrOfNextOpcode += sizeof(__int8);
6187 } while ((codeAddrOfNextOpcode < codeEnd) && // Haven't reached end of method
6188 (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6189 ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6190 // one pop seen so far.
6192 nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6195 if (isCallPopAndRet)
6197 // Allow call+pop+ret to be tail call optimized if caller ret type is void
6198 *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6201 #ifdef _TARGET_AMD64_
6203 // Tail call IL pattern could be either of the following
6204 // 1) call/callvirt/calli + ret
6205 // 2) call/callvirt/calli + pop + ret in a method returning void.
6206 return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6207 #else //!_TARGET_AMD64_
6208 return (nextOpcode == CEE_RET) && (cntPop == 0);
6212 /*****************************************************************************
6214 * Determine whether the call could be converted to an implicit tail call
6217 bool Compiler::impIsImplicitTailCallCandidate(
6218 OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6221 #if FEATURE_TAILCALL_OPT
6222 if (!opts.compTailCallOpt)
6227 if (opts.compDbgCode || opts.MinOpts())
6232 // must not be tail prefixed
6233 if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6238 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6239 // the block containing call is marked as BBJ_RETURN
6240 // We allow shared ret tail call optimization on recursive calls even under
6241 // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6242 if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6244 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6246 // must be call+ret or call+pop+ret
6247 if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6255 #endif // FEATURE_TAILCALL_OPT
6258 //------------------------------------------------------------------------
6259 // impImportCall: import a call-inspiring opcode
6262 // opcode - opcode that inspires the call
6263 // pResolvedToken - resolved token for the call target
6264 // pConstrainedResolvedToken - resolved constraint token (or nullptr)
6265 // newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr)
6266 // prefixFlags - IL prefix flags for the call
6267 // callInfo - EE supplied info for the call
6268 // rawILOffset - IL offset of the opcode
6271 // Type of the call's return value.
6274 // opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6276 // For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6277 // uninitalized object.
6280 #pragma warning(push)
6281 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6284 var_types Compiler::impImportCall(OPCODE opcode,
6285 CORINFO_RESOLVED_TOKEN* pResolvedToken,
6286 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6287 GenTreePtr newobjThis,
6289 CORINFO_CALL_INFO* callInfo,
6290 IL_OFFSET rawILOffset)
6292 assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6294 IL_OFFSETX ilOffset = impCurILOffset(rawILOffset, true);
6295 var_types callRetTyp = TYP_COUNT;
6296 CORINFO_SIG_INFO* sig = nullptr;
6297 CORINFO_METHOD_HANDLE methHnd = nullptr;
6298 CORINFO_CLASS_HANDLE clsHnd = nullptr;
6299 unsigned clsFlags = 0;
6300 unsigned mflags = 0;
6301 unsigned argFlags = 0;
6302 GenTreePtr call = nullptr;
6303 GenTreeArgList* args = nullptr;
6304 CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM;
6305 CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr;
6306 BOOL exactContextNeedsRuntimeLookup = FALSE;
6307 bool canTailCall = true;
6308 const char* szCanTailCallFailReason = nullptr;
6309 int tailCall = prefixFlags & PREFIX_TAILCALL;
6310 bool readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
6312 // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6313 // do that before tailcalls, but that is probably not the intended
6314 // semantic. So just disallow tailcalls from synchronized methods.
6315 // Also, popping arguments in a varargs function is more work and NYI
6316 // If we have a security object, we have to keep our frame around for callers
6317 // to see any imperative security.
6318 if (info.compFlags & CORINFO_FLG_SYNCH)
6320 canTailCall = false;
6321 szCanTailCallFailReason = "Caller is synchronized";
6323 #if !FEATURE_FIXED_OUT_ARGS
6324 else if (info.compIsVarArgs)
6326 canTailCall = false;
6327 szCanTailCallFailReason = "Caller is varargs";
6329 #endif // FEATURE_FIXED_OUT_ARGS
6330 else if (opts.compNeedSecurityCheck)
6332 canTailCall = false;
6333 szCanTailCallFailReason = "Caller requires a security check.";
6336 // We only need to cast the return value of pinvoke inlined calls that return small types
6338 // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6339 // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6340 // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6341 // the time being that the callee might be compiled by the other JIT and thus the return
6342 // value will need to be widened by us (or not widened at all...)
6344 // ReadyToRun code sticks with default calling convention that does not widen small return types.
6346 bool checkForSmallType = opts.IsJit64Compat() || opts.IsReadyToRun();
6347 bool bIntrinsicImported = false;
6349 CORINFO_SIG_INFO calliSig;
6350 GenTreeArgList* extraArg = nullptr;
6352 /*-------------------------------------------------------------------------
6353 * First create the call node
6356 if (opcode == CEE_CALLI)
6358 /* Get the call site sig */
6359 eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
6361 callRetTyp = JITtype2varType(calliSig.retType);
6363 call = impImportIndirectCall(&calliSig, ilOffset);
6365 // We don't know the target method, so we have to infer the flags, or
6366 // assume the worst-case.
6367 mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
6372 unsigned structSize =
6373 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
6374 printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6375 opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6378 // This should be checked in impImportBlockCode.
6379 assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
6384 // We cannot lazily obtain the signature of a CALLI call because it has no method
6385 // handle that we can use, so we need to save its full call signature here.
6386 assert(call->gtCall.callSig == nullptr);
6387 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6388 *call->gtCall.callSig = calliSig;
6391 else // (opcode != CEE_CALLI)
6393 CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
6395 // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
6396 // supply the instantiation parameters necessary to make direct calls to underlying
6397 // shared generic code, rather than calling through instantiating stubs. If the
6398 // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
6399 // must indeed pass an instantiation parameter.
6401 methHnd = callInfo->hMethod;
6403 sig = &(callInfo->sig);
6404 callRetTyp = JITtype2varType(sig->retType);
6406 mflags = callInfo->methodFlags;
6411 unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
6412 printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
6413 opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
6416 if (compIsForInlining())
6418 /* Does this call site have security boundary restrictions? */
6420 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
6422 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
6426 /* Does the inlinee need a security check token on the frame */
6428 if (mflags & CORINFO_FLG_SECURITYCHECK)
6430 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6434 /* Does the inlinee use StackCrawlMark */
6436 if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
6438 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
6442 /* For now ignore delegate invoke */
6444 if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6446 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
6450 /* For now ignore varargs */
6451 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6453 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
6457 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
6459 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
6463 if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
6465 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
6470 clsHnd = pResolvedToken->hClass;
6472 clsFlags = callInfo->classFlags;
6475 // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
6477 // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
6478 // These should be in mscorlib.h, and available through a JIT/EE interface call.
6479 const char* modName;
6480 const char* className;
6481 const char* methodName;
6482 if ((className = eeGetClassName(clsHnd)) != nullptr &&
6483 strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
6484 (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
6486 return impImportJitTestLabelMark(sig->numArgs);
6490 // <NICE> Factor this into getCallInfo </NICE>
6491 if ((mflags & CORINFO_FLG_INTRINSIC) && !pConstrainedResolvedToken)
6493 call = impIntrinsic(clsHnd, methHnd, sig, pResolvedToken->token, readonlyCall,
6494 (canTailCall && (tailCall != 0)), &intrinsicID);
6496 if (call != nullptr)
6498 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
6499 (clsFlags & CORINFO_FLG_FINAL));
6501 #ifdef FEATURE_READYTORUN_COMPILER
6502 if (call->OperGet() == GT_INTRINSIC)
6504 if (opts.IsReadyToRun())
6506 noway_assert(callInfo->kind == CORINFO_CALL);
6507 call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
6511 call->gtIntrinsic.gtEntryPoint.addr = nullptr;
6516 bIntrinsicImported = true;
6524 call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
6525 if (call != nullptr)
6527 bIntrinsicImported = true;
6531 #endif // FEATURE_SIMD
6533 if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
6535 NO_WAY("Virtual call to a function added via EnC is not supported");
6539 if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
6540 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6541 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
6543 BADCODE("Bad calling convention");
6546 //-------------------------------------------------------------------------
6547 // Construct the call node
6549 // Work out what sort of call we're making.
6550 // Dispense with virtual calls implemented via LDVIRTFTN immediately.
6552 constraintCallThisTransform = callInfo->thisTransform;
6554 exactContextHnd = callInfo->contextHandle;
6555 exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup;
6557 // Recursive call is treaded as a loop to the begining of the method.
6558 if (methHnd == info.compMethodHnd)
6563 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
6564 fgFirstBB->bbNum, compCurBB->bbNum);
6567 fgMarkBackwardJump(fgFirstBB, compCurBB);
6570 switch (callInfo->kind)
6573 case CORINFO_VIRTUALCALL_STUB:
6575 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6576 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6577 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
6580 if (compIsForInlining())
6582 // Don't import runtime lookups when inlining
6583 // Inlining has to be aborted in such a case
6584 /* XXX Fri 3/20/2009
6585 * By the way, this would never succeed. If the handle lookup is into the generic
6586 * dictionary for a candidate, you'll generate different dictionary offsets and the
6587 * inlined code will crash.
6589 * To anyone code reviewing this, when could this ever succeed in the future? It'll
6590 * always have a handle lookup. These lookups are safe intra-module, but we're just
6593 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
6597 GenTreePtr stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
6598 assert(!compDonotInline());
6600 // This is the rough code to set up an indirect stub call
6601 assert(stubAddr != nullptr);
6603 // The stubAddr may be a
6604 // complex expression. As it is evaluated after the args,
6605 // it may cause registered args to be spilled. Simply spill it.
6607 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
6608 impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
6609 stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6611 // Create the actual call node
6613 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6614 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6616 call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
6618 call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
6619 call->gtFlags |= GTF_CALL_VIRT_STUB;
6622 // No tailcalls allowed for these yet...
6623 canTailCall = false;
6624 szCanTailCallFailReason = "VirtualCall with runtime lookup";
6629 // ok, the stub is available at compile type.
6631 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6632 call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
6633 call->gtFlags |= GTF_CALL_VIRT_STUB;
6634 assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
6635 if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
6637 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
6641 #ifdef FEATURE_READYTORUN_COMPILER
6642 if (opts.IsReadyToRun())
6644 // Null check is sometimes needed for ready to run to handle
6645 // non-virtual <-> virtual changes between versions
6646 if (callInfo->nullInstanceCheck)
6648 call->gtFlags |= GTF_CALL_NULLCHECK;
6656 case CORINFO_VIRTUALCALL_VTABLE:
6658 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6659 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6660 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6661 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
6665 case CORINFO_VIRTUALCALL_LDVIRTFTN:
6667 if (compIsForInlining())
6669 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
6673 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6674 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
6675 // OK, We've been told to call via LDVIRTFTN, so just
6676 // take the call now....
6678 args = impPopList(sig->numArgs, &argFlags, sig);
6680 GenTreePtr thisPtr = impPopStack().val;
6681 thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
6682 if (compDonotInline())
6687 // Clone the (possibly transformed) "this" pointer
6688 GenTreePtr thisPtrCopy;
6689 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
6690 nullptr DEBUGARG("LDVIRTFTN this pointer"));
6692 GenTreePtr fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
6693 if (compDonotInline())
6698 thisPtr = nullptr; // can't reuse it
6700 // Now make an indirect call through the function pointer
6702 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
6703 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6704 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6706 // Create the actual call node
6708 call = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
6709 call->gtCall.gtCallObjp = thisPtrCopy;
6710 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6712 #ifdef FEATURE_READYTORUN_COMPILER
6713 if (opts.IsReadyToRun())
6715 // Null check is needed for ready to run to handle
6716 // non-virtual <-> virtual changes between versions
6717 call->gtFlags |= GTF_CALL_NULLCHECK;
6721 // Sine we are jumping over some code, check that its OK to skip that code
6722 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
6723 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6729 // This is for a non-virtual, non-interface etc. call
6730 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
6732 // We remove the nullcheck for the GetType call instrinsic.
6733 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
6735 if (callInfo->nullInstanceCheck &&
6736 !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
6738 call->gtFlags |= GTF_CALL_NULLCHECK;
6741 #ifdef FEATURE_READYTORUN_COMPILER
6742 if (opts.IsReadyToRun())
6744 call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
6750 case CORINFO_CALL_CODE_POINTER:
6752 // The EE has asked us to call by computing a code pointer and then doing an
6753 // indirect call. This is because a runtime lookup is required to get the code entry point.
6755 // These calls always follow a uniform calling convention, i.e. no extra hidden params
6756 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
6758 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
6759 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
6762 impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
6764 if (compDonotInline())
6769 // Now make an indirect call through the function pointer
6771 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
6772 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
6773 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
6775 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6776 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6777 if (callInfo->nullInstanceCheck)
6779 call->gtFlags |= GTF_CALL_NULLCHECK;
6786 assert(!"unknown call kind");
6790 //-------------------------------------------------------------------------
6793 PREFIX_ASSUME(call != nullptr);
6795 if (mflags & CORINFO_FLG_NOGCCHECK)
6797 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
6800 // Mark call if it's one of the ones we will maybe treat as an intrinsic
6801 if (intrinsicID == CORINFO_INTRINSIC_Object_GetType || intrinsicID == CORINFO_INTRINSIC_TypeEQ ||
6802 intrinsicID == CORINFO_INTRINSIC_TypeNEQ || intrinsicID == CORINFO_INTRINSIC_GetCurrentManagedThread ||
6803 intrinsicID == CORINFO_INTRINSIC_GetManagedThreadId)
6805 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
6809 assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
6811 /* Some sanity checks */
6813 // CALL_VIRT and NEWOBJ must have a THIS pointer
6814 assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
6815 // static bit and hasThis are negations of one another
6816 assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
6817 assert(call != nullptr);
6819 /*-------------------------------------------------------------------------
6820 * Check special-cases etc
6823 /* Special case - Check if it is a call to Delegate.Invoke(). */
6825 if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
6827 assert(!compIsForInlining());
6828 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
6829 assert(mflags & CORINFO_FLG_FINAL);
6831 /* Set the delegate flag */
6832 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
6834 if (callInfo->secureDelegateInvoke)
6836 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
6839 if (opcode == CEE_CALLVIRT)
6841 assert(mflags & CORINFO_FLG_FINAL);
6843 /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
6844 assert(call->gtFlags & GTF_CALL_NULLCHECK);
6845 call->gtFlags &= ~GTF_CALL_NULLCHECK;
6849 CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
6850 actualMethodRetTypeSigClass = sig->retTypeSigClass;
6851 if (varTypeIsStruct(callRetTyp))
6853 callRetTyp = impNormStructType(actualMethodRetTypeSigClass);
6854 call->gtType = callRetTyp;
6858 /* Check for varargs */
6859 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6860 (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6862 BADCODE("Varargs not supported.");
6864 #endif // !FEATURE_VARARG
6866 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
6867 (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
6869 assert(!compIsForInlining());
6871 /* Set the right flags */
6873 call->gtFlags |= GTF_CALL_POP_ARGS;
6874 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
6876 /* Can't allow tailcall for varargs as it is caller-pop. The caller
6877 will be expecting to pop a certain number of arguments, but if we
6878 tailcall to a function with a different number of arguments, we
6879 are hosed. There are ways around this (caller remembers esp value,
6880 varargs is not caller-pop, etc), but not worth it. */
6881 CLANG_FORMAT_COMMENT_ANCHOR;
6886 canTailCall = false;
6887 szCanTailCallFailReason = "Callee is varargs";
6891 /* Get the total number of arguments - this is already correct
6892 * for CALLI - for methods we have to get it from the call site */
6894 if (opcode != CEE_CALLI)
6897 unsigned numArgsDef = sig->numArgs;
6899 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
6902 // We cannot lazily obtain the signature of a vararg call because using its method
6903 // handle will give us only the declared argument list, not the full argument list.
6904 assert(call->gtCall.callSig == nullptr);
6905 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
6906 *call->gtCall.callSig = *sig;
6909 // For vararg calls we must be sure to load the return type of the
6910 // method actually being called, as well as the return types of the
6911 // specified in the vararg signature. With type equivalency, these types
6912 // may not be the same.
6913 if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
6915 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
6916 sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
6917 sig->retType != CORINFO_TYPE_VAR)
6919 // Make sure that all valuetypes (including enums) that we push are loaded.
6920 // This is to guarantee that if a GC is triggerred from the prestub of this methods,
6921 // all valuetypes in the method signature are already loaded.
6922 // We need to be able to find the size of the valuetypes, but we cannot
6923 // do a class-load from within GC.
6924 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
6928 assert(numArgsDef <= sig->numArgs);
6931 /* We will have "cookie" as the last argument but we cannot push
6932 * it on the operand stack because we may overflow, so we append it
6933 * to the arg list next after we pop them */
6936 if (mflags & CORINFO_FLG_SECURITYCHECK)
6938 assert(!compIsForInlining());
6940 // Need security prolog/epilog callouts when there is
6941 // imperative security in the method. This is to give security a
6942 // chance to do any setup in the prolog and cleanup in the epilog if needed.
6944 if (compIsForInlining())
6946 // Cannot handle this if the method being imported is an inlinee by itself.
6947 // Because inlinee method does not have its own frame.
6949 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
6954 tiSecurityCalloutNeeded = true;
6956 // If the current method calls a method which needs a security check,
6957 // (i.e. the method being compiled has imperative security)
6958 // we need to reserve a slot for the security object in
6959 // the current method's stack frame
6960 opts.compNeedSecurityCheck = true;
6964 //--------------------------- Inline NDirect ------------------------------
6966 // For inline cases we technically should look at both the current
6967 // block and the call site block (or just the latter if we've
6968 // fused the EH trees). However the block-related checks pertain to
6969 // EH and we currently won't inline a method with EH. So for
6970 // inlinees, just checking the call site block is sufficient.
6972 // New lexical block here to avoid compilation errors because of GOTOs.
6973 BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
6974 impCheckForPInvokeCall(call, methHnd, sig, mflags, block);
6977 if (call->gtFlags & GTF_CALL_UNMANAGED)
6979 // We set up the unmanaged call by linking the frame, disabling GC, etc
6980 // This needs to be cleaned up on return
6983 canTailCall = false;
6984 szCanTailCallFailReason = "Callee is native";
6987 checkForSmallType = true;
6989 impPopArgsForUnmanagedCall(call, sig);
6993 else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
6994 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
6995 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
6996 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
6998 if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
7000 // Normally this only happens with inlining.
7001 // However, a generic method (or type) being NGENd into another module
7002 // can run into this issue as well. There's not an easy fall-back for NGEN
7003 // so instead we fallback to JIT.
7004 if (compIsForInlining())
7006 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7010 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7016 GenTreePtr cookie = eeGetPInvokeCookie(sig);
7018 // This cookie is required to be either a simple GT_CNS_INT or
7019 // an indirection of a GT_CNS_INT
7021 GenTreePtr cookieConst = cookie;
7022 if (cookie->gtOper == GT_IND)
7024 cookieConst = cookie->gtOp.gtOp1;
7026 assert(cookieConst->gtOper == GT_CNS_INT);
7028 // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7029 // we won't allow this tree to participate in any CSE logic
7031 cookie->gtFlags |= GTF_DONT_CSE;
7032 cookieConst->gtFlags |= GTF_DONT_CSE;
7034 call->gtCall.gtCallCookie = cookie;
7038 canTailCall = false;
7039 szCanTailCallFailReason = "PInvoke calli";
7043 /*-------------------------------------------------------------------------
7044 * Create the argument list
7047 //-------------------------------------------------------------------------
7048 // Special case - for varargs we have an implicit last argument
7050 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7052 assert(!compIsForInlining());
7054 void *varCookie, *pVarCookie;
7055 if (!info.compCompHnd->canGetVarArgsHandle(sig))
7057 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7061 varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7062 assert((!varCookie) != (!pVarCookie));
7063 GenTreePtr cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL);
7065 assert(extraArg == nullptr);
7066 extraArg = gtNewArgList(cookie);
7069 //-------------------------------------------------------------------------
7070 // Extra arg for shared generic code and array methods
7072 // Extra argument containing instantiation information is passed in the
7073 // following circumstances:
7074 // (a) To the "Address" method on array classes; the extra parameter is
7075 // the array's type handle (a TypeDesc)
7076 // (b) To shared-code instance methods in generic structs; the extra parameter
7077 // is the struct's type handle (a vtable ptr)
7078 // (c) To shared-code per-instantiation non-generic static methods in generic
7079 // classes and structs; the extra parameter is the type handle
7080 // (d) To shared-code generic methods; the extra parameter is an
7081 // exact-instantiation MethodDesc
7083 // We also set the exact type context associated with the call so we can
7084 // inline the call correctly later on.
7086 if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7088 assert(call->gtCall.gtCallType == CT_USER_FUNC);
7089 if (clsHnd == nullptr)
7091 NO_WAY("CALLI on parameterized type");
7094 assert(opcode != CEE_CALLI);
7096 GenTreePtr instParam;
7099 // Instantiated generic method
7100 if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7102 CORINFO_METHOD_HANDLE exactMethodHandle =
7103 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7105 if (!exactContextNeedsRuntimeLookup)
7107 #ifdef FEATURE_READYTORUN_COMPILER
7108 if (opts.IsReadyToRun())
7111 impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7112 if (instParam == nullptr)
7120 instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7121 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7126 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7127 if (instParam == nullptr)
7134 // otherwise must be an instance method in a generic struct,
7135 // a static method in a generic type, or a runtime-generated array method
7138 assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7139 CORINFO_CLASS_HANDLE exactClassHandle =
7140 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7142 if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7144 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7148 if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7150 // We indicate "readonly" to the Address operation by using a null
7152 instParam = gtNewIconNode(0, TYP_REF);
7155 if (!exactContextNeedsRuntimeLookup)
7157 #ifdef FEATURE_READYTORUN_COMPILER
7158 if (opts.IsReadyToRun())
7161 impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7162 if (instParam == nullptr)
7170 instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7171 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7176 instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7177 if (instParam == nullptr)
7184 assert(extraArg == nullptr);
7185 extraArg = gtNewArgList(instParam);
7188 // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7189 // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7190 // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7191 // exactContextHnd is not currently required when inlining shared generic code into shared
7192 // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7193 // (e.g. anything marked needsRuntimeLookup)
7194 if (exactContextNeedsRuntimeLookup)
7196 exactContextHnd = nullptr;
7199 //-------------------------------------------------------------------------
7200 // The main group of arguments
7202 args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, &argFlags, sig, extraArg);
7206 call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7209 //-------------------------------------------------------------------------
7210 // The "this" pointer
7212 if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7216 if (opcode == CEE_NEWOBJ)
7222 obj = impPopStack().val;
7223 obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7224 if (compDonotInline())
7230 /* Is this a virtual or interface call? */
7232 if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
7234 /* only true object pointers can be virtual */
7236 assert(obj->gtType == TYP_REF);
7242 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7246 /* Store the "this" value in the call */
7248 call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7249 call->gtCall.gtCallObjp = obj;
7252 //-------------------------------------------------------------------------
7253 // The "this" pointer for "newobj"
7255 if (opcode == CEE_NEWOBJ)
7257 if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7259 assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7260 // This is a 'new' of a variable sized object, wher
7261 // the constructor is to return the object. In this case
7262 // the constructor claims to return VOID but we know it
7263 // actually returns the new object
7264 assert(callRetTyp == TYP_VOID);
7265 callRetTyp = TYP_REF;
7266 call->gtType = TYP_REF;
7267 impSpillSpecialSideEff();
7269 impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7273 if (clsFlags & CORINFO_FLG_DELEGATE)
7275 // New inliner morph it in impImportCall.
7276 // This will allow us to inline the call to the delegate constructor.
7277 call = fgOptimizeDelegateConstructor(call, &exactContextHnd);
7280 if (!bIntrinsicImported)
7283 #if defined(DEBUG) || defined(INLINE_DATA)
7285 // Keep track of the raw IL offset of the call
7286 call->gtCall.gtRawILOffset = rawILOffset;
7288 #endif // defined(DEBUG) || defined(INLINE_DATA)
7290 // Is it an inline candidate?
7291 impMarkInlineCandidate(call, exactContextHnd, callInfo);
7294 // append the call node.
7295 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7297 // Now push the value of the 'new onto the stack
7299 // This is a 'new' of a non-variable sized object.
7300 // Append the new node (op1) to the statement list,
7301 // and then push the local holding the value of this
7302 // new instruction on the stack.
7304 if (clsFlags & CORINFO_FLG_VALUECLASS)
7306 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
7308 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
7309 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
7313 if (newobjThis->gtOper == GT_COMMA)
7315 // In coreclr the callout can be inserted even if verification is disabled
7316 // so we cannot rely on tiVerificationNeeded alone
7318 // We must have inserted the callout. Get the real newobj.
7319 newobjThis = newobjThis->gtOp.gtOp2;
7322 assert(newobjThis->gtOper == GT_LCL_VAR);
7323 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
7333 // This check cannot be performed for implicit tail calls for the reason
7334 // that impIsImplicitTailCallCandidate() is not checking whether return
7335 // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
7336 // As a result it is possible that in the following case, we find that
7337 // the type stack is non-empty if Callee() is considered for implicit
7339 // int Caller(..) { .... void Callee(); ret val; ... }
7341 // Note that we cannot check return type compatibility before ImpImportCall()
7342 // as we don't have required info or need to duplicate some of the logic of
7345 // For implicit tail calls, we perform this check after return types are
7346 // known to be compatible.
7347 if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
7349 BADCODE("Stack should be empty after tailcall");
7352 // Note that we can not relax this condition with genActualType() as
7353 // the calling convention dictates that the caller of a function with
7354 // a small-typed return value is responsible for normalizing the return val
7357 !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
7358 callInfo->sig.retTypeClass))
7360 canTailCall = false;
7361 szCanTailCallFailReason = "Return types are not tail call compatible";
7364 // Stack empty check for implicit tail calls.
7365 if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
7367 #ifdef _TARGET_AMD64_
7368 // JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException
7369 // in JIT64, not an InvalidProgramException.
7370 Verify(false, "Stack should be empty after tailcall");
7371 #else // _TARGET_64BIT_
7372 BADCODE("Stack should be empty after tailcall");
7373 #endif //!_TARGET_64BIT_
7376 // assert(compCurBB is not a catch, finally or filter block);
7377 // assert(compCurBB is not a try block protected by a finally block);
7379 // Check for permission to tailcall
7380 bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
7382 assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
7386 // True virtual or indirect calls, shouldn't pass in a callee handle.
7387 CORINFO_METHOD_HANDLE exactCalleeHnd = ((call->gtCall.gtCallType != CT_USER_FUNC) ||
7388 ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT))
7391 GenTreePtr thisArg = call->gtCall.gtCallObjp;
7393 if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
7396 if (explicitTailCall)
7398 // In case of explicit tail calls, mark it so that it is not considered
7400 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
7404 printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
7412 #if FEATURE_TAILCALL_OPT
7413 // Must be an implicit tail call.
7414 assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
7416 // It is possible that a call node is both an inline candidate and marked
7417 // for opportunistic tail calling. In-lining happens before morhphing of
7418 // trees. If in-lining of an in-line candidate gets aborted for whatever
7419 // reason, it will survive to the morphing stage at which point it will be
7420 // transformed into a tail call after performing additional checks.
7422 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
7426 printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
7432 #else //! FEATURE_TAILCALL_OPT
7433 NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
7435 #endif // FEATURE_TAILCALL_OPT
7438 // we can't report success just yet...
7442 canTailCall = false;
7443 // canTailCall reported its reasons already
7447 printf("\ninfo.compCompHnd->canTailCall returned false for call ");
7456 // If this assert fires it means that canTailCall was set to false without setting a reason!
7457 assert(szCanTailCallFailReason != nullptr);
7462 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
7464 printf(": %s\n", szCanTailCallFailReason);
7467 info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
7468 szCanTailCallFailReason);
7472 // Note: we assume that small return types are already normalized by the managed callee
7473 // or by the pinvoke stub for calls to unmanaged code.
7477 if (!bIntrinsicImported)
7480 // Things needed to be checked when bIntrinsicImported is false.
7483 assert(call->gtOper == GT_CALL);
7484 assert(sig != nullptr);
7486 // Tail calls require us to save the call site's sig info so we can obtain an argument
7487 // copying thunk from the EE later on.
7488 if (call->gtCall.callSig == nullptr)
7490 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7491 *call->gtCall.callSig = *sig;
7494 if (compIsForInlining() && opcode == CEE_CALLVIRT)
7496 GenTreePtr callObj = call->gtCall.gtCallObjp;
7497 assert(callObj != nullptr);
7499 unsigned callKind = call->gtFlags & GTF_CALL_VIRT_KIND_MASK;
7501 if (((callKind != GTF_CALL_NONVIRT) || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
7502 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
7503 impInlineInfo->inlArgInfo))
7505 impInlineInfo->thisDereferencedFirst = true;
7509 #if defined(DEBUG) || defined(INLINE_DATA)
7511 // Keep track of the raw IL offset of the call
7512 call->gtCall.gtRawILOffset = rawILOffset;
7514 #endif // defined(DEBUG) || defined(INLINE_DATA)
7516 // Is it an inline candidate?
7517 impMarkInlineCandidate(call, exactContextHnd, callInfo);
7520 // Push or append the result of the call
7521 if (callRetTyp == TYP_VOID)
7523 if (opcode == CEE_NEWOBJ)
7525 // we actually did push something, so don't spill the thing we just pushed.
7526 assert(verCurrentState.esStackDepth > 0);
7527 impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
7531 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7536 impSpillSpecialSideEff();
7538 if (clsFlags & CORINFO_FLG_ARRAY)
7540 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
7543 // Find the return type used for verification by interpreting the method signature.
7544 // NB: we are clobbering the already established sig.
7545 if (tiVerificationNeeded)
7547 // Actually, we never get the sig for the original method.
7548 sig = &(callInfo->verSig);
7551 typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
7552 tiRetVal.NormaliseForStack();
7554 // The CEE_READONLY prefix modifies the verification semantics of an Address
7555 // operation on an array type.
7556 if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
7558 tiRetVal.SetIsReadonlyByRef();
7561 if (tiVerificationNeeded)
7563 // We assume all calls return permanent home byrefs. If they
7564 // didn't they wouldn't be verifiable. This is also covering
7565 // the Address() helper for multidimensional arrays.
7566 if (tiRetVal.IsByRef())
7568 tiRetVal.SetIsPermanentHomeByRef();
7572 if (call->gtOper == GT_CALL)
7574 // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
7575 if (varTypeIsStruct(callRetTyp))
7577 call = impFixupCallStructReturn(call, sig->retTypeClass);
7580 if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
7582 assert(opts.OptEnabled(CLFLG_INLINING));
7584 // Make the call its own tree (spill the stack if needed).
7585 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
7587 // TODO: Still using the widened type.
7588 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
7592 // For non-candidates we must also spill, since we
7593 // might have locals live on the eval stack that this
7595 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
7599 if (!bIntrinsicImported)
7601 //-------------------------------------------------------------------------
7603 /* If the call is of a small type and the callee is managed, the callee will normalize the result
7605 However, we need to normalize small type values returned by unmanaged
7606 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
7607 if we use the shorter inlined pinvoke stub. */
7609 if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
7611 call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
7615 impPushOnStack(call, tiRetVal);
7618 // VSD functions get a new call target each time we getCallInfo, so clear the cache.
7619 // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
7620 // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
7621 // callInfoCache.uncacheCallInfo();
7626 #pragma warning(pop)
7629 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
7631 CorInfoType corType = methInfo->args.retType;
7633 if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
7635 // We have some kind of STRUCT being returned
7637 structPassingKind howToReturnStruct = SPK_Unknown;
7639 var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
7641 if (howToReturnStruct == SPK_ByReference)
7652 var_types Compiler::impImportJitTestLabelMark(int numArgs)
7654 TestLabelAndNum tlAndN;
7658 StackEntry se = impPopStack();
7659 assert(se.seTypeInfo.GetType() == TI_INT);
7660 GenTreePtr val = se.val;
7661 assert(val->IsCnsIntOrI());
7662 tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7664 else if (numArgs == 3)
7666 StackEntry se = impPopStack();
7667 assert(se.seTypeInfo.GetType() == TI_INT);
7668 GenTreePtr val = se.val;
7669 assert(val->IsCnsIntOrI());
7670 tlAndN.m_num = val->AsIntConCommon()->IconValue();
7672 assert(se.seTypeInfo.GetType() == TI_INT);
7674 assert(val->IsCnsIntOrI());
7675 tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
7682 StackEntry expSe = impPopStack();
7683 GenTreePtr node = expSe.val;
7685 // There are a small number of special cases, where we actually put the annotation on a subnode.
7686 if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
7688 // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
7689 // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
7690 // offset within the the static field block whose address is returned by the helper call.
7691 // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
7692 GenTreePtr helperCall = nullptr;
7693 assert(node->OperGet() == GT_IND);
7694 tlAndN.m_num -= 100;
7695 GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
7696 GetNodeTestData()->Remove(node);
7700 GetNodeTestData()->Set(node, tlAndN);
7703 impPushOnStack(node, expSe.seTypeInfo);
7704 return node->TypeGet();
7708 //-----------------------------------------------------------------------------------
7709 // impFixupCallStructReturn: For a call node that returns a struct type either
7710 // adjust the return type to an enregisterable type, or set the flag to indicate
7711 // struct return via retbuf arg.
7714 // call - GT_CALL GenTree node
7715 // retClsHnd - Class handle of return type of the call
7718 // Returns new GenTree node after fixing struct return of call node
7720 GenTreePtr Compiler::impFixupCallStructReturn(GenTreePtr call, CORINFO_CLASS_HANDLE retClsHnd)
7722 assert(call->gtOper == GT_CALL);
7724 if (!varTypeIsStruct(call))
7729 call->gtCall.gtRetClsHnd = retClsHnd;
7731 GenTreeCall* callNode = call->AsCall();
7733 #if FEATURE_MULTIREG_RET
7734 // Initialize Return type descriptor of call node
7735 ReturnTypeDesc* retTypeDesc = callNode->GetReturnTypeDesc();
7736 retTypeDesc->InitializeStructReturnType(this, retClsHnd);
7737 #endif // FEATURE_MULTIREG_RET
7739 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7741 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
7742 assert(!callNode->IsVarargs() && "varargs not allowed for System V OSs.");
7744 // The return type will remain as the incoming struct type unless normalized to a
7745 // single eightbyte return type below.
7746 callNode->gtReturnType = call->gtType;
7748 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7749 if (retRegCount != 0)
7751 if (retRegCount == 1)
7753 // struct returned in a single register
7754 callNode->gtReturnType = retTypeDesc->GetReturnRegType(0);
7758 // must be a struct returned in two registers
7759 assert(retRegCount == 2);
7761 if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7763 // Force a call returning multi-reg struct to be always of the IR form
7766 // No need to assign a multi-reg struct to a local var if:
7767 // - It is a tail call or
7768 // - The call is marked for in-lining later
7769 return impAssignMultiRegTypeToVar(call, retClsHnd);
7775 // struct not returned in registers i.e returned via hiddden retbuf arg.
7776 callNode->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7779 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7781 #if FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7782 // There is no fixup necessary if the return type is a HFA struct.
7783 // HFA structs are returned in registers for ARM32 and ARM64
7785 if (!call->gtCall.IsVarargs() && IsHfa(retClsHnd))
7787 if (call->gtCall.CanTailCall())
7789 if (info.compIsVarArgs)
7791 // We cannot tail call because control needs to return to fixup the calling
7792 // convention for result return.
7793 call->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7797 // If we can tail call returning HFA, then don't assign it to
7798 // a variable back and forth.
7803 if (call->gtFlags & GTF_CALL_INLINE_CANDIDATE)
7808 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7809 if (retRegCount >= 2)
7811 return impAssignMultiRegTypeToVar(call, retClsHnd);
7814 #endif // _TARGET_ARM_
7816 // Check for TYP_STRUCT type that wraps a primitive type
7817 // Such structs are returned using a single register
7818 // and we change the return type on those calls here.
7820 structPassingKind howToReturnStruct;
7821 var_types returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
7823 if (howToReturnStruct == SPK_ByReference)
7825 assert(returnType == TYP_UNKNOWN);
7826 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
7830 assert(returnType != TYP_UNKNOWN);
7831 call->gtCall.gtReturnType = returnType;
7833 // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
7834 if ((returnType == TYP_LONG) && (compLongUsed == false))
7836 compLongUsed = true;
7838 else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
7840 compFloatingPointUsed = true;
7843 #if FEATURE_MULTIREG_RET
7844 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
7845 assert(retRegCount != 0);
7847 if (retRegCount >= 2)
7849 if ((!callNode->CanTailCall()) && (!callNode->IsInlineCandidate()))
7851 // Force a call returning multi-reg struct to be always of the IR form
7854 // No need to assign a multi-reg struct to a local var if:
7855 // - It is a tail call or
7856 // - The call is marked for in-lining later
7857 return impAssignMultiRegTypeToVar(call, retClsHnd);
7860 #endif // FEATURE_MULTIREG_RET
7863 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
7868 /*****************************************************************************
7869 For struct return values, re-type the operand in the case where the ABI
7870 does not use a struct return buffer
7871 Note that this method is only call for !_TARGET_X86_
7874 GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd)
7876 assert(varTypeIsStruct(info.compRetType));
7877 assert(info.compRetBuffArg == BAD_VAR_NUM);
7879 #if defined(_TARGET_XARCH_)
7881 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
7882 // No VarArgs for CoreCLR on x64 Unix
7883 assert(!info.compIsVarArgs);
7885 // Is method returning a multi-reg struct?
7886 if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
7888 // In case of multi-reg struct return, we force IR to be one of the following:
7889 // GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a
7890 // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
7892 if (op->gtOper == GT_LCL_VAR)
7894 // Make sure that this struct stays in memory and doesn't get promoted.
7895 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
7896 lvaTable[lclNum].lvIsMultiRegRet = true;
7898 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
7899 op->gtFlags |= GTF_DONT_CSE;
7904 if (op->gtOper == GT_CALL)
7909 return impAssignMultiRegTypeToVar(op, retClsHnd);
7911 #else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7912 assert(info.compRetNativeType != TYP_STRUCT);
7913 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
7915 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
7917 if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
7919 if (op->gtOper == GT_LCL_VAR)
7921 // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
7922 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
7923 // Make sure this struct type stays as struct so that we can return it as an HFA
7924 lvaTable[lclNum].lvIsMultiRegRet = true;
7926 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
7927 op->gtFlags |= GTF_DONT_CSE;
7932 if (op->gtOper == GT_CALL)
7934 if (op->gtCall.IsVarargs())
7936 // We cannot tail call because control needs to return to fixup the calling
7937 // convention for result return.
7938 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
7939 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7946 return impAssignMultiRegTypeToVar(op, retClsHnd);
7949 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
7951 // Is method returning a multi-reg struct?
7952 if (IsMultiRegReturnedType(retClsHnd))
7954 if (op->gtOper == GT_LCL_VAR)
7956 // This LCL_VAR stays as a TYP_STRUCT
7957 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
7959 // Make sure this struct type is not struct promoted
7960 lvaTable[lclNum].lvIsMultiRegRet = true;
7962 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
7963 op->gtFlags |= GTF_DONT_CSE;
7968 if (op->gtOper == GT_CALL)
7970 if (op->gtCall.IsVarargs())
7972 // We cannot tail call because control needs to return to fixup the calling
7973 // convention for result return.
7974 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
7975 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
7982 return impAssignMultiRegTypeToVar(op, retClsHnd);
7985 #endif // FEATURE_MULTIREG_RET && FEATURE_HFA
7988 // adjust the type away from struct to integral
7989 // and no normalizing
7990 if (op->gtOper == GT_LCL_VAR)
7992 op->ChangeOper(GT_LCL_FLD);
7994 else if (op->gtOper == GT_OBJ)
7996 GenTreePtr op1 = op->AsObj()->Addr();
7998 // We will fold away OBJ/ADDR
7999 // except for OBJ/ADDR/INDEX
8000 // as the array type influences the array element's offset
8001 // Later in this method we change op->gtType to info.compRetNativeType
8002 // This is not correct when op is a GT_INDEX as the starting offset
8003 // for the array elements 'elemOffs' is different for an array of
8004 // TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8005 // Also refer to the GTF_INX_REFARR_LAYOUT flag
8007 if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8009 // Change '*(&X)' to 'X' and see if we can do better
8010 op = op1->gtOp.gtOp1;
8011 goto REDO_RETURN_NODE;
8013 op->gtObj.gtClass = NO_CLASS_HANDLE;
8014 op->ChangeOperUnchecked(GT_IND);
8015 op->gtFlags |= GTF_IND_TGTANYWHERE;
8017 else if (op->gtOper == GT_CALL)
8019 if (op->AsCall()->TreatAsHasRetBufArg(this))
8021 // This must be one of those 'special' helpers that don't
8022 // really have a return buffer, but instead use it as a way
8023 // to keep the trees cleaner with fewer address-taken temps.
8025 // Well now we have to materialize the the return buffer as
8026 // an address-taken temp. Then we can return the temp.
8028 // NOTE: this code assumes that since the call directly
8029 // feeds the return, then the call must be returning the
8030 // same structure/class/type.
8032 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8034 // No need to spill anything as we're about to return.
8035 impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8037 // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8038 // jump directly to a GT_LCL_FLD.
8039 op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8040 op->ChangeOper(GT_LCL_FLD);
8044 assert(info.compRetNativeType == op->gtCall.gtReturnType);
8046 // Don't change the gtType of the node just yet, it will get changed later.
8050 else if (op->gtOper == GT_COMMA)
8052 op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8055 op->gtType = info.compRetNativeType;
8060 /*****************************************************************************
8061 CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8062 finally-protected try. We find the finally blocks protecting the current
8063 offset (in order) by walking over the complete exception table and
8064 finding enclosing clauses. This assumes that the table is sorted.
8065 This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8067 If we are leaving a catch handler, we need to attach the
8068 CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8070 After this function, the BBJ_LEAVE block has been converted to a different type.
8073 #if !FEATURE_EH_FUNCLETS
8075 void Compiler::impImportLeave(BasicBlock* block)
8080 printf("\nBefore import CEE_LEAVE:\n");
8081 fgDispBasicBlocks();
8086 bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8087 unsigned blkAddr = block->bbCodeOffs;
8088 BasicBlock* leaveTarget = block->bbJumpDest;
8089 unsigned jmpAddr = leaveTarget->bbCodeOffs;
8091 // LEAVE clears the stack, spill side effects, and set stack to 0
8093 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8094 verCurrentState.esStackDepth = 0;
8096 assert(block->bbJumpKind == BBJ_LEAVE);
8097 assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8099 BasicBlock* step = DUMMY_INIT(NULL);
8100 unsigned encFinallies = 0; // Number of enclosing finallies.
8101 GenTreePtr endCatches = NULL;
8102 GenTreePtr endLFin = NULL; // The statement tree to indicate the end of locally-invoked finally.
8107 for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8109 // Grab the handler offsets
8111 IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8112 IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8113 IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8114 IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8116 /* Is this a catch-handler we are CEE_LEAVEing out of?
8117 * If so, we need to call CORINFO_HELP_ENDCATCH.
8120 if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8122 // Can't CEE_LEAVE out of a finally/fault handler
8123 if (HBtab->HasFinallyOrFaultHandler())
8124 BADCODE("leave out of fault/finally block");
8126 // Create the call to CORINFO_HELP_ENDCATCH
8127 GenTreePtr endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8129 // Make a list of all the currently pending endCatches
8131 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8133 endCatches = endCatch;
8138 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8139 "CORINFO_HELP_ENDCATCH\n",
8140 block->bbNum, XTnum);
8144 else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8145 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8147 /* This is a finally-protected try we are jumping out of */
8149 /* If there are any pending endCatches, and we have already
8150 jumped out of a finally-protected try, then the endCatches
8151 have to be put in a block in an outer try for async
8152 exceptions to work correctly.
8153 Else, just use append to the original block */
8155 BasicBlock* callBlock;
8157 assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8159 if (encFinallies == 0)
8161 assert(step == DUMMY_INIT(NULL));
8163 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8166 impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8171 printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8172 "block BB%02u [%08p]\n",
8173 callBlock->bbNum, dspPtr(callBlock));
8179 assert(step != DUMMY_INIT(NULL));
8181 /* Calling the finally block */
8182 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8183 assert(step->bbJumpKind == BBJ_ALWAYS);
8184 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8185 // finally in the chain)
8186 step->bbJumpDest->bbRefs++;
8188 /* The new block will inherit this block's weight */
8189 callBlock->setBBWeight(block->bbWeight);
8190 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8195 printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block BB%02u "
8197 callBlock->bbNum, dspPtr(callBlock));
8201 GenTreePtr lastStmt;
8205 lastStmt = gtNewStmt(endCatches);
8206 endLFin->gtNext = lastStmt;
8207 lastStmt->gtPrev = endLFin;
8214 // note that this sets BBF_IMPORTED on the block
8215 impEndTreeList(callBlock, endLFin, lastStmt);
8218 step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8219 /* The new block will inherit this block's weight */
8220 step->setBBWeight(block->bbWeight);
8221 step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8226 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block "
8228 step->bbNum, dspPtr(step));
8232 unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8233 assert(finallyNesting <= compHndBBtabCount);
8235 callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8236 endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8237 endLFin = gtNewStmt(endLFin);
8242 invalidatePreds = true;
8246 /* Append any remaining endCatches, if any */
8248 assert(!encFinallies == !endLFin);
8250 if (encFinallies == 0)
8252 assert(step == DUMMY_INIT(NULL));
8253 block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8256 impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8261 printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8262 "block BB%02u [%08p]\n",
8263 block->bbNum, dspPtr(block));
8269 // If leaveTarget is the start of another try block, we want to make sure that
8270 // we do not insert finalStep into that try block. Hence, we find the enclosing
8272 unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8274 // Insert a new BB either in the try region indicated by tryIndex or
8275 // the handler region indicated by leaveTarget->bbHndIndex,
8276 // depending on which is the inner region.
8277 BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8278 finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8279 step->bbJumpDest = finalStep;
8281 /* The new block will inherit this block's weight */
8282 finalStep->setBBWeight(block->bbWeight);
8283 finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8288 printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block BB%02u [%08p]\n",
8289 encFinallies, finalStep->bbNum, dspPtr(finalStep));
8293 GenTreePtr lastStmt;
8297 lastStmt = gtNewStmt(endCatches);
8298 endLFin->gtNext = lastStmt;
8299 lastStmt->gtPrev = endLFin;
8306 impEndTreeList(finalStep, endLFin, lastStmt);
8308 finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8310 // Queue up the jump target for importing
8312 impImportBlockPending(leaveTarget);
8314 invalidatePreds = true;
8317 if (invalidatePreds && fgComputePredsDone)
8319 JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8324 fgVerifyHandlerTab();
8328 printf("\nAfter import CEE_LEAVE:\n");
8329 fgDispBasicBlocks();
8335 #else // FEATURE_EH_FUNCLETS
8337 void Compiler::impImportLeave(BasicBlock* block)
8342 printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
8343 fgDispBasicBlocks();
8348 bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8349 unsigned blkAddr = block->bbCodeOffs;
8350 BasicBlock* leaveTarget = block->bbJumpDest;
8351 unsigned jmpAddr = leaveTarget->bbCodeOffs;
8353 // LEAVE clears the stack, spill side effects, and set stack to 0
8355 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8356 verCurrentState.esStackDepth = 0;
8358 assert(block->bbJumpKind == BBJ_LEAVE);
8359 assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
8361 BasicBlock* step = nullptr;
8365 // No step type; step == NULL.
8368 // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
8369 // That is, is step->bbJumpDest where a finally will return to?
8372 // The step block is a catch return.
8375 // The step block is in a "try", created as the target for a finally return or the target for a catch return.
8378 StepType stepType = ST_None;
8383 for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8385 // Grab the handler offsets
8387 IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8388 IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8389 IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8390 IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8392 /* Is this a catch-handler we are CEE_LEAVEing out of?
8395 if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8397 // Can't CEE_LEAVE out of a finally/fault handler
8398 if (HBtab->HasFinallyOrFaultHandler())
8400 BADCODE("leave out of fault/finally block");
8403 /* We are jumping out of a catch */
8405 if (step == nullptr)
8408 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
8409 stepType = ST_Catch;
8414 printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
8416 XTnum, step->bbNum);
8422 BasicBlock* exitBlock;
8424 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
8426 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
8428 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8429 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
8430 // exit) returns to this block
8431 step->bbJumpDest->bbRefs++;
8433 #if defined(_TARGET_ARM_)
8434 if (stepType == ST_FinallyReturn)
8436 assert(step->bbJumpKind == BBJ_ALWAYS);
8437 // Mark the target of a finally return
8438 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8440 #endif // defined(_TARGET_ARM_)
8442 /* The new block will inherit this block's weight */
8443 exitBlock->setBBWeight(block->bbWeight);
8444 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8446 /* This exit block is the new step */
8448 stepType = ST_Catch;
8450 invalidatePreds = true;
8455 printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
8461 else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8462 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8464 /* We are jumping out of a finally-protected try */
8466 BasicBlock* callBlock;
8468 if (step == nullptr)
8470 #if FEATURE_EH_CALLFINALLY_THUNKS
8472 // Put the call to the finally in the enclosing region.
8473 unsigned callFinallyTryIndex =
8474 (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8475 unsigned callFinallyHndIndex =
8476 (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8477 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
8479 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
8480 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
8481 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
8482 // next block, and flow optimizations will remove it.
8483 block->bbJumpKind = BBJ_ALWAYS;
8484 block->bbJumpDest = callBlock;
8485 block->bbJumpDest->bbRefs++;
8487 /* The new block will inherit this block's weight */
8488 callBlock->setBBWeight(block->bbWeight);
8489 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8494 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8495 "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
8496 XTnum, block->bbNum, callBlock->bbNum);
8500 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8503 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8508 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
8509 "BBJ_CALLFINALLY block\n",
8510 XTnum, callBlock->bbNum);
8514 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8518 // Calling the finally block. We already have a step block that is either the call-to-finally from a
8519 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
8520 // a 'finally'), or the step block is the return from a catch.
8522 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
8523 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
8524 // automatically re-raise the exception, using the return address of the catch (that is, the target
8525 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
8526 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
8527 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
8528 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
8529 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
8530 // within the 'try' region protected by the finally, since we generate code in such a way that execution
8531 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
8534 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
8536 #if FEATURE_EH_CALLFINALLY_THUNKS
8537 if (step->bbJumpKind == BBJ_EHCATCHRET)
8539 // Need to create another step block in the 'try' region that will actually branch to the
8540 // call-to-finally thunk.
8541 BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8542 step->bbJumpDest = step2;
8543 step->bbJumpDest->bbRefs++;
8544 step2->setBBWeight(block->bbWeight);
8545 step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8550 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
8551 "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
8552 XTnum, step->bbNum, step2->bbNum);
8557 assert(stepType == ST_Catch); // Leave it as catch type for now.
8559 #endif // FEATURE_EH_CALLFINALLY_THUNKS
8561 #if FEATURE_EH_CALLFINALLY_THUNKS
8562 unsigned callFinallyTryIndex =
8563 (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
8564 unsigned callFinallyHndIndex =
8565 (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
8566 #else // !FEATURE_EH_CALLFINALLY_THUNKS
8567 unsigned callFinallyTryIndex = XTnum + 1;
8568 unsigned callFinallyHndIndex = 0; // don't care
8569 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
8571 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
8572 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8573 // finally in the chain)
8574 step->bbJumpDest->bbRefs++;
8576 #if defined(_TARGET_ARM_)
8577 if (stepType == ST_FinallyReturn)
8579 assert(step->bbJumpKind == BBJ_ALWAYS);
8580 // Mark the target of a finally return
8581 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8583 #endif // defined(_TARGET_ARM_)
8585 /* The new block will inherit this block's weight */
8586 callBlock->setBBWeight(block->bbWeight);
8587 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8592 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
8594 XTnum, callBlock->bbNum);
8599 step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8600 stepType = ST_FinallyReturn;
8602 /* The new block will inherit this block's weight */
8603 step->setBBWeight(block->bbWeight);
8604 step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8609 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
8611 XTnum, step->bbNum);
8615 callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8617 invalidatePreds = true;
8619 else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8620 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8622 // We are jumping out of a catch-protected try.
8624 // If we are returning from a call to a finally, then we must have a step block within a try
8625 // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
8626 // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
8627 // and invoke the appropriate catch.
8629 // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
8630 // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
8631 // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
8632 // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
8633 // address of the catch return as the new exception address. That is, the re-raised exception appears to
8634 // occur at the catch return address. If this exception return address skips an enclosing try/catch that
8635 // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
8640 // // something here raises ThreadAbortException
8641 // LEAVE LABEL_1; // no need to stop at LABEL_2
8642 // } catch (Exception) {
8643 // // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
8644 // // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
8645 // // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
8646 // // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
8647 // // need to do this transformation if the current EH block is a try/catch that catches
8648 // // ThreadAbortException (or one of its parents), however we might not be able to find that
8649 // // information, so currently we do it for all catch types.
8650 // LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
8652 // LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
8653 // } catch (ThreadAbortException) {
8657 // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
8660 if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
8662 BasicBlock* catchStep;
8666 if (stepType == ST_FinallyReturn)
8668 assert(step->bbJumpKind == BBJ_ALWAYS);
8672 assert(stepType == ST_Catch);
8673 assert(step->bbJumpKind == BBJ_EHCATCHRET);
8676 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
8677 catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
8678 step->bbJumpDest = catchStep;
8679 step->bbJumpDest->bbRefs++;
8681 #if defined(_TARGET_ARM_)
8682 if (stepType == ST_FinallyReturn)
8684 // Mark the target of a finally return
8685 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8687 #endif // defined(_TARGET_ARM_)
8689 /* The new block will inherit this block's weight */
8690 catchStep->setBBWeight(block->bbWeight);
8691 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
8696 if (stepType == ST_FinallyReturn)
8698 printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
8699 "BBJ_ALWAYS block BB%02u\n",
8700 XTnum, catchStep->bbNum);
8704 assert(stepType == ST_Catch);
8705 printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
8706 "BBJ_ALWAYS block BB%02u\n",
8707 XTnum, catchStep->bbNum);
8712 /* This block is the new step */
8716 invalidatePreds = true;
8721 if (step == nullptr)
8723 block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8728 printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
8729 "block BB%02u to BBJ_ALWAYS\n",
8736 step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
8738 #if defined(_TARGET_ARM_)
8739 if (stepType == ST_FinallyReturn)
8741 assert(step->bbJumpKind == BBJ_ALWAYS);
8742 // Mark the target of a finally return
8743 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
8745 #endif // defined(_TARGET_ARM_)
8750 printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
8754 // Queue up the jump target for importing
8756 impImportBlockPending(leaveTarget);
8759 if (invalidatePreds && fgComputePredsDone)
8761 JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
8766 fgVerifyHandlerTab();
8770 printf("\nAfter import CEE_LEAVE:\n");
8771 fgDispBasicBlocks();
8777 #endif // FEATURE_EH_FUNCLETS
8779 /*****************************************************************************/
8780 // This is called when reimporting a leave block. It resets the JumpKind,
8781 // JumpDest, and bbNext to the original values
8783 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
8785 #if FEATURE_EH_FUNCLETS
8786 // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
8787 // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0,
8788 // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
8789 // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
8790 // only predecessor are also considered orphans and attempted to be deleted.
8797 // leave OUTSIDE; // B0 is the block containing this leave, following this would be B1
8802 // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
8803 // where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block.
8804 // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To
8805 // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
8806 // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
8807 // will be treated as pair and handled correctly.
8808 if (block->bbJumpKind == BBJ_CALLFINALLY)
8810 BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
8811 dupBlock->bbFlags = block->bbFlags;
8812 dupBlock->bbJumpDest = block->bbJumpDest;
8813 dupBlock->copyEHRegion(block);
8814 dupBlock->bbCatchTyp = block->bbCatchTyp;
8816 // Mark this block as
8817 // a) not referenced by any other block to make sure that it gets deleted
8819 // c) prevent from being imported
8822 dupBlock->bbRefs = 0;
8823 dupBlock->bbWeight = 0;
8824 dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
8826 // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
8827 // will be next to each other.
8828 fgInsertBBafter(block, dupBlock);
8833 printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
8837 #endif // FEATURE_EH_FUNCLETS
8839 block->bbJumpKind = BBJ_LEAVE;
8841 block->bbJumpDest = fgLookupBB(jmpAddr);
8843 // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
8844 // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
8845 // reason we don't want to remove the block at this point is that if we call
8846 // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
8847 // added and the linked list length will be different than fgBBcount.
8850 /*****************************************************************************/
8851 // Get the first non-prefix opcode. Used for verification of valid combinations
8852 // of prefixes and actual opcodes.
8854 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
8856 while (codeAddr < codeEndp)
8858 OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
8859 codeAddr += sizeof(__int8);
8861 if (opcode == CEE_PREFIX1)
8863 if (codeAddr >= codeEndp)
8867 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
8868 codeAddr += sizeof(__int8);
8876 case CEE_CONSTRAINED:
8883 codeAddr += opcodeSizes[opcode];
8889 /*****************************************************************************/
8890 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
8892 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
8894 OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
8897 // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
8898 ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
8899 (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
8900 (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
8901 // volatile. prefix is allowed with the ldsfld and stsfld
8902 (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
8904 BADCODE("Invalid opcode for unaligned. or volatile. prefix");
8908 /*****************************************************************************/
8912 #undef RETURN // undef contracts RETURN macro
8927 const static controlFlow_t controlFlow[] = {
8928 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
8929 #include "opcode.def"
8935 /*****************************************************************************
8936 * Determine the result type of an arithemetic operation
8937 * On 64-bit inserts upcasts when native int is mixed with int32
8939 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr* pOp1, GenTreePtr* pOp2)
8941 var_types type = TYP_UNDEF;
8942 GenTreePtr op1 = *pOp1, op2 = *pOp2;
8944 // Arithemetic operations are generally only allowed with
8945 // primitive types, but certain operations are allowed
8948 if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
8950 if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
8952 // byref1-byref2 => gives a native int
8955 else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
8957 // [native] int - byref => gives a native int
8960 // The reason is that it is possible, in managed C++,
8961 // to have a tree like this:
8968 // const(h) int addr byref
8970 // <BUGNUM> VSW 318822 </BUGNUM>
8972 // So here we decide to make the resulting type to be a native int.
8973 CLANG_FORMAT_COMMENT_ANCHOR;
8975 #ifdef _TARGET_64BIT_
8976 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
8978 // insert an explicit upcast
8979 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
8981 #endif // _TARGET_64BIT_
8987 // byref - [native] int => gives a byref
8988 assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
8990 #ifdef _TARGET_64BIT_
8991 if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
8993 // insert an explicit upcast
8994 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
8996 #endif // _TARGET_64BIT_
9001 else if ((oper == GT_ADD) &&
9002 (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9004 // byref + [native] int => gives a byref
9006 // [native] int + byref => gives a byref
9008 // only one can be a byref : byref op byref not allowed
9009 assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9010 assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9012 #ifdef _TARGET_64BIT_
9013 if (genActualType(op2->TypeGet()) == TYP_BYREF)
9015 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9017 // insert an explicit upcast
9018 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9021 else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9023 // insert an explicit upcast
9024 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9026 #endif // _TARGET_64BIT_
9030 #ifdef _TARGET_64BIT_
9031 else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9033 assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9035 // int + long => gives long
9036 // long + int => gives long
9037 // we get this because in the IL the long isn't Int64, it's just IntPtr
9039 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9041 // insert an explicit upcast
9042 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9044 else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9046 // insert an explicit upcast
9047 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
9052 #else // 32-bit TARGET
9053 else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9055 assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9057 // int + long => gives long
9058 // long + int => gives long
9062 #endif // _TARGET_64BIT_
9065 // int + int => gives an int
9066 assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9068 assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9069 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9071 type = genActualType(op1->gtType);
9073 #if FEATURE_X87_DOUBLES
9075 // For x87, since we only have 1 size of registers, prefer double
9076 // For everybody else, be more precise
9077 if (type == TYP_FLOAT)
9080 #else // !FEATURE_X87_DOUBLES
9082 // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9083 // Otherwise, turn floats into doubles
9084 if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9086 assert(genActualType(op2->gtType) == TYP_DOUBLE);
9090 #endif // FEATURE_X87_DOUBLES
9093 #if FEATURE_X87_DOUBLES
9094 assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9095 #else // FEATURE_X87_DOUBLES
9096 assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9097 #endif // FEATURE_X87_DOUBLES
9102 /*****************************************************************************
9103 * Casting Helper Function to service both CEE_CASTCLASS and CEE_ISINST
9105 * typeRef contains the token, op1 to contain the value being cast,
9106 * and op2 to contain code that creates the type handle corresponding to typeRef
9107 * isCastClass = true means CEE_CASTCLASS, false means CEE_ISINST
9109 GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr op1,
9111 CORINFO_RESOLVED_TOKEN* pResolvedToken,
9116 assert(op1->TypeGet() == TYP_REF);
9118 CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9122 // We only want to expand inline the normal CHKCASTCLASS helper;
9123 expandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9127 if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9129 // Get the Class Handle abd class attributes for the type we are casting to
9131 DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9134 // If the class handle is marked as final we can also expand the IsInst check inline
9136 expandInline = ((flags & CORINFO_FLG_FINAL) != 0);
9139 // But don't expand inline these two cases
9141 if (flags & CORINFO_FLG_MARSHAL_BYREF)
9143 expandInline = false;
9145 else if (flags & CORINFO_FLG_CONTEXTFUL)
9147 expandInline = false;
9153 // We can't expand inline any other helpers
9155 expandInline = false;
9161 if (compCurBB->isRunRarely())
9163 expandInline = false; // not worth the code expansion in a rarely run block
9166 if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9168 expandInline = false; // not worth creating an untracked local variable
9174 // If we CSE this class handle we prevent assertionProp from making SubType assertions
9175 // so instead we force the CSE logic to not consider CSE-ing this class handle.
9177 op2->gtFlags |= GTF_DONT_CSE;
9179 return gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2, op1));
9182 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
9187 // expand the methodtable match:
9191 // GT_IND op2 (typically CNS_INT)
9196 // This can replace op1 with a GT_COMMA that evaluates op1 into a local
9198 op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
9200 // op1 is now known to be a non-complex tree
9201 // thus we can use gtClone(op1) from now on
9204 GenTreePtr op2Var = op2;
9207 op2Var = fgInsertCommaFormTemp(&op2);
9208 lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
9210 temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
9211 temp->gtFlags |= GTF_EXCEPT;
9212 condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
9214 GenTreePtr condNull;
9216 // expand the null check:
9218 // condNull ==> GT_EQ
9223 condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
9226 // expand the true and false trees for the condMT
9228 GenTreePtr condFalse = gtClone(op1);
9229 GenTreePtr condTrue;
9233 // use the special helper that skips the cases checked by our inlined cast
9235 helper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
9237 condTrue = gtNewHelperCallNode(helper, TYP_REF, 0, gtNewArgList(op2Var, gtClone(op1)));
9241 condTrue = gtNewIconNode(0, TYP_REF);
9244 #define USE_QMARK_TREES
9246 #ifdef USE_QMARK_TREES
9249 // Generate first QMARK - COLON tree
9251 // qmarkMT ==> GT_QMARK
9255 // condFalse condTrue
9257 temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
9258 qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
9259 condMT->gtFlags |= GTF_RELOP_QMARK;
9261 GenTreePtr qmarkNull;
9263 // Generate second QMARK - COLON tree
9265 // qmarkNull ==> GT_QMARK
9267 // condNull GT_COLON
9271 temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
9272 qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
9273 qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
9274 condNull->gtFlags |= GTF_RELOP_QMARK;
9276 // Make QMark node a top level node by spilling it.
9277 unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
9278 impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
9279 return gtNewLclvNode(tmp, TYP_REF);
9284 #define assertImp(cond) ((void)0)
9286 #define assertImp(cond) \
9291 const int cchAssertImpBuf = 600; \
9292 char* assertImpBuf = (char*)alloca(cchAssertImpBuf); \
9293 _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \
9294 "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \
9295 impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \
9296 op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \
9297 assertAbort(assertImpBuf, __FILE__, __LINE__); \
9303 #pragma warning(push)
9304 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
9306 /*****************************************************************************
9307 * Import the instr for the given basic block
9309 void Compiler::impImportBlockCode(BasicBlock* block)
9311 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
9317 printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
9321 unsigned nxtStmtIndex = impInitBlockLineInfo();
9322 IL_OFFSET nxtStmtOffs;
9324 GenTreePtr arrayNodeFrom, arrayNodeTo, arrayNodeToIndex;
9326 CorInfoHelpFunc helper;
9327 CorInfoIsAccessAllowedResult accessAllowedResult;
9328 CORINFO_HELPER_DESC calloutHelper;
9329 const BYTE* lastLoadToken = nullptr;
9331 // reject cyclic constraints
9332 if (tiVerificationNeeded)
9334 Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
9335 Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
9338 /* Get the tree list started */
9342 /* Walk the opcodes that comprise the basic block */
9344 const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
9345 const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
9347 IL_OFFSET opcodeOffs = block->bbCodeOffs;
9348 IL_OFFSET lastSpillOffs = opcodeOffs;
9352 /* remember the start of the delegate creation sequence (used for verification) */
9353 const BYTE* delegateCreateStart = nullptr;
9355 int prefixFlags = 0;
9356 bool explicitTailCall, constraintCall, readonlyCall;
9358 bool insertLdloc = false; // set by CEE_DUP and cleared by following store
9361 unsigned numArgs = info.compArgsCount;
9363 /* Now process all the opcodes in the block */
9365 var_types callTyp = TYP_COUNT;
9366 OPCODE prevOpcode = CEE_ILLEGAL;
9368 if (block->bbCatchTyp)
9370 if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
9372 impCurStmtOffsSet(block->bbCodeOffs);
9375 // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
9376 // to a temp. This is a trade off for code simplicity
9377 impSpillSpecialSideEff();
9380 while (codeAddr < codeEndp)
9382 bool usingReadyToRunHelper = false;
9383 CORINFO_RESOLVED_TOKEN resolvedToken;
9384 CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
9385 CORINFO_CALL_INFO callInfo;
9386 CORINFO_FIELD_INFO fieldInfo;
9388 tiRetVal = typeInfo(); // Default type info
9390 //---------------------------------------------------------------------
9392 /* We need to restrict the max tree depth as many of the Compiler
9393 functions are recursive. We do this by spilling the stack */
9395 if (verCurrentState.esStackDepth)
9397 /* Has it been a while since we last saw a non-empty stack (which
9398 guarantees that the tree depth isnt accumulating. */
9400 if ((opcodeOffs - lastSpillOffs) > 200)
9402 impSpillStackEnsure();
9403 lastSpillOffs = opcodeOffs;
9408 lastSpillOffs = opcodeOffs;
9409 impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
9412 /* Compute the current instr offset */
9414 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9417 if (opts.compDbgInfo)
9420 if (!compIsForInlining())
9423 (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
9425 /* Have we reached the next stmt boundary ? */
9427 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
9429 assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
9431 if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
9433 /* We need to provide accurate IP-mapping at this point.
9434 So spill anything on the stack so that it will form
9435 gtStmts with the correct stmt offset noted */
9437 impSpillStackEnsure(true);
9440 // Has impCurStmtOffs been reported in any tree?
9442 if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
9444 GenTreePtr placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
9445 impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9447 assert(impCurStmtOffs == BAD_IL_OFFSET);
9450 if (impCurStmtOffs == BAD_IL_OFFSET)
9452 /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
9453 If opcodeOffs has gone past nxtStmtIndex, catch up */
9455 while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
9456 info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
9461 /* Go to the new stmt */
9463 impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
9465 /* Update the stmt boundary index */
9468 assert(nxtStmtIndex <= info.compStmtOffsetsCount);
9470 /* Are there any more line# entries after this one? */
9472 if (nxtStmtIndex < info.compStmtOffsetsCount)
9474 /* Remember where the next line# starts */
9476 nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
9480 /* No more line# entries */
9482 nxtStmtOffs = BAD_IL_OFFSET;
9486 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
9487 (verCurrentState.esStackDepth == 0))
9489 /* At stack-empty locations, we have already added the tree to
9490 the stmt list with the last offset. We just need to update
9494 impCurStmtOffsSet(opcodeOffs);
9496 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
9497 impOpcodeIsCallSiteBoundary(prevOpcode))
9499 /* Make sure we have a type cached */
9500 assert(callTyp != TYP_COUNT);
9502 if (callTyp == TYP_VOID)
9504 impCurStmtOffsSet(opcodeOffs);
9506 else if (opts.compDbgCode)
9508 impSpillStackEnsure(true);
9509 impCurStmtOffsSet(opcodeOffs);
9512 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
9514 if (opts.compDbgCode)
9516 impSpillStackEnsure(true);
9519 impCurStmtOffsSet(opcodeOffs);
9522 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
9523 jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
9527 CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL);
9528 CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
9529 CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
9531 var_types lclTyp, ovflType = TYP_UNKNOWN;
9532 GenTreePtr op1 = DUMMY_INIT(NULL);
9533 GenTreePtr op2 = DUMMY_INIT(NULL);
9534 GenTreeArgList* args = nullptr; // What good do these "DUMMY_INIT"s do?
9535 GenTreePtr newObjThisPtr = DUMMY_INIT(NULL);
9536 bool uns = DUMMY_INIT(false);
9538 /* Get the next opcode and the size of its parameters */
9540 OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9541 codeAddr += sizeof(__int8);
9544 impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9545 JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
9550 // Return if any previous code has caused inline to fail.
9551 if (compDonotInline())
9556 /* Get the size of additional parameters */
9558 signed int sz = opcodeSizes[opcode];
9561 clsHnd = NO_CLASS_HANDLE;
9563 callTyp = TYP_COUNT;
9565 impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
9566 impCurOpcName = opcodeNames[opcode];
9568 if (verbose && (opcode != CEE_PREFIX1))
9570 printf("%s", impCurOpcName);
9573 /* Use assertImp() to display the opcode */
9575 op1 = op2 = nullptr;
9578 /* See what kind of an opcode we have, then */
9580 unsigned mflags = 0;
9581 unsigned clsFlags = 0;
9594 CORINFO_SIG_INFO sig;
9597 bool ovfl, unordered, callNode;
9599 CORINFO_CLASS_HANDLE tokenType;
9609 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9610 codeAddr += sizeof(__int8);
9611 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
9616 // We need to call impSpillLclRefs() for a struct type lclVar.
9617 // This is done for non-block assignments in the handling of stloc.
9618 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
9619 (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
9621 impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
9624 /* Append 'op1' to the list of statements */
9625 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
9630 /* Append 'op1' to the list of statements */
9632 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9638 // Remember at which BC offset the tree was finished
9639 impNoteLastILoffs();
9644 impPushNullObjRefOnStack();
9657 cval.intVal = (opcode - CEE_LDC_I4_0);
9658 assert(-1 <= cval.intVal && cval.intVal <= 8);
9662 cval.intVal = getI1LittleEndian(codeAddr);
9665 cval.intVal = getI4LittleEndian(codeAddr);
9668 JITDUMP(" %d", cval.intVal);
9669 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
9673 cval.lngVal = getI8LittleEndian(codeAddr);
9674 JITDUMP(" 0x%016llx", cval.lngVal);
9675 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
9679 cval.dblVal = getR8LittleEndian(codeAddr);
9680 JITDUMP(" %#.17g", cval.dblVal);
9681 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
9685 cval.dblVal = getR4LittleEndian(codeAddr);
9686 JITDUMP(" %#.17g", cval.dblVal);
9688 GenTreePtr cnsOp = gtNewDconNode(cval.dblVal);
9689 #if !FEATURE_X87_DOUBLES
9690 // X87 stack doesn't differentiate between float/double
9691 // so R4 is treated as R8, but everybody else does
9692 cnsOp->gtType = TYP_FLOAT;
9693 #endif // FEATURE_X87_DOUBLES
9694 impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
9700 if (compIsForInlining())
9702 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
9704 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
9709 val = getU4LittleEndian(codeAddr);
9710 JITDUMP(" %08X", val);
9711 if (tiVerificationNeeded)
9713 Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
9714 tiRetVal = typeInfo(TI_REF, impGetStringClass());
9716 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
9721 lclNum = getU2LittleEndian(codeAddr);
9722 JITDUMP(" %u", lclNum);
9723 impLoadArg(lclNum, opcodeOffs + sz + 1);
9727 lclNum = getU1LittleEndian(codeAddr);
9728 JITDUMP(" %u", lclNum);
9729 impLoadArg(lclNum, opcodeOffs + sz + 1);
9736 lclNum = (opcode - CEE_LDARG_0);
9737 assert(lclNum >= 0 && lclNum < 4);
9738 impLoadArg(lclNum, opcodeOffs + sz + 1);
9742 lclNum = getU2LittleEndian(codeAddr);
9743 JITDUMP(" %u", lclNum);
9744 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9748 lclNum = getU1LittleEndian(codeAddr);
9749 JITDUMP(" %u", lclNum);
9750 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9757 lclNum = (opcode - CEE_LDLOC_0);
9758 assert(lclNum >= 0 && lclNum < 4);
9759 impLoadLoc(lclNum, opcodeOffs + sz + 1);
9763 lclNum = getU2LittleEndian(codeAddr);
9767 lclNum = getU1LittleEndian(codeAddr);
9769 JITDUMP(" %u", lclNum);
9771 if (tiVerificationNeeded)
9773 Verify(lclNum < info.compILargsCount, "bad arg num");
9776 if (compIsForInlining())
9778 op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
9779 noway_assert(op1->gtOper == GT_LCL_VAR);
9780 lclNum = op1->AsLclVar()->gtLclNum;
9785 lclNum = compMapILargNum(lclNum); // account for possible hidden param
9786 assertImp(lclNum < numArgs);
9788 if (lclNum == info.compThisArg)
9790 lclNum = lvaArg0Var;
9792 lvaTable[lclNum].lvArgWrite = 1;
9794 if (tiVerificationNeeded)
9796 typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
9797 Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
9800 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
9802 Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
9809 lclNum = getU2LittleEndian(codeAddr);
9810 JITDUMP(" %u", lclNum);
9814 lclNum = getU1LittleEndian(codeAddr);
9815 JITDUMP(" %u", lclNum);
9822 lclNum = (opcode - CEE_STLOC_0);
9823 assert(lclNum >= 0 && lclNum < 4);
9826 if (tiVerificationNeeded)
9828 Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
9829 Verify(tiCompatibleWith(impStackTop().seTypeInfo,
9830 NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
9834 if (compIsForInlining())
9836 lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
9838 /* Have we allocated a temp for this local? */
9840 lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
9849 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
9851 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9857 /* if it is a struct assignment, make certain we don't overflow the buffer */
9858 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
9860 if (lvaTable[lclNum].lvNormalizeOnLoad())
9862 lclTyp = lvaGetRealType(lclNum);
9866 lclTyp = lvaGetActualType(lclNum);
9870 /* Pop the value being assigned */
9873 StackEntry se = impPopStack(clsHnd);
9875 tiRetVal = se.seTypeInfo;
9879 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
9881 assert(op1->TypeGet() == TYP_STRUCT);
9882 op1->gtType = lclTyp;
9884 #endif // FEATURE_SIMD
9886 op1 = impImplicitIorI4Cast(op1, lclTyp);
9888 #ifdef _TARGET_64BIT_
9889 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
9890 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
9892 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
9893 op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
9895 #endif // _TARGET_64BIT_
9897 // We had better assign it a value of the correct type
9899 genActualType(lclTyp) == genActualType(op1->gtType) ||
9900 genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
9901 (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
9902 (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
9903 (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
9904 ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
9906 /* If op1 is "&var" then its type is the transient "*" and it can
9907 be used either as TYP_BYREF or TYP_I_IMPL */
9909 if (op1->IsVarAddr())
9911 assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
9913 /* When "&var" is created, we assume it is a byref. If it is
9914 being assigned to a TYP_I_IMPL var, change the type to
9915 prevent unnecessary GC info */
9917 if (genActualType(lclTyp) == TYP_I_IMPL)
9919 op1->gtType = TYP_I_IMPL;
9923 /* Filter out simple assignments to itself */
9925 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
9929 // This is a sequence of (ldloc, dup, stloc). Can simplify
9930 // to (ldloc, stloc). Goto LDVAR to reconstruct the ldloc node.
9931 CLANG_FORMAT_COMMENT_ANCHOR;
9934 if (tiVerificationNeeded)
9937 typeInfo::AreEquivalent(tiRetVal, NormaliseForStack(lvaTable[lclNum].lvVerTypeInfo)));
9942 insertLdloc = false;
9944 impLoadVar(lclNum, opcodeOffs + sz + 1);
9947 else if (opts.compDbgCode)
9949 op1 = gtNewNothingNode();
9958 /* Create the assignment node */
9960 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
9962 /* If the local is aliased, we need to spill calls and
9963 indirections from the stack. */
9965 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp) &&
9966 verCurrentState.esStackDepth > 0)
9968 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased"));
9971 /* Spill any refs to the local from the stack */
9973 impSpillLclRefs(lclNum);
9975 #if !FEATURE_X87_DOUBLES
9976 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
9977 // We insert a cast to the dest 'op2' type
9979 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
9980 varTypeIsFloating(op2->gtType))
9982 op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
9984 #endif // !FEATURE_X87_DOUBLES
9986 if (varTypeIsStruct(lclTyp))
9988 op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
9992 // The code generator generates GC tracking information
9993 // based on the RHS of the assignment. Later the LHS (which is
9994 // is a BYREF) gets used and the emitter checks that that variable
9995 // is being tracked. It is not (since the RHS was an int and did
9996 // not need tracking). To keep this assert happy, we change the RHS
9997 if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
9999 op1->gtType = TYP_BYREF;
10001 op1 = gtNewAssignNode(op2, op1);
10004 /* If insertLdloc is true, then we need to insert a ldloc following the
10005 stloc. This is done when converting a (dup, stloc) sequence into
10006 a (stloc, ldloc) sequence. */
10010 // From SPILL_APPEND
10011 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10014 // From DONE_APPEND
10015 impNoteLastILoffs();
10018 insertLdloc = false;
10020 impLoadVar(lclNum, opcodeOffs + sz + 1, tiRetVal);
10027 lclNum = getU2LittleEndian(codeAddr);
10031 lclNum = getU1LittleEndian(codeAddr);
10033 JITDUMP(" %u", lclNum);
10034 if (tiVerificationNeeded)
10036 Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10037 Verify(info.compInitMem, "initLocals not set");
10040 if (compIsForInlining())
10042 // Get the local type
10043 lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10045 /* Have we allocated a temp for this local? */
10047 lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10049 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10055 assertImp(lclNum < info.compLocalsCount);
10059 lclNum = getU2LittleEndian(codeAddr);
10063 lclNum = getU1LittleEndian(codeAddr);
10065 JITDUMP(" %u", lclNum);
10066 Verify(lclNum < info.compILargsCount, "bad arg num");
10068 if (compIsForInlining())
10070 // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10071 // followed by a ldfld to load the field.
10073 op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10074 if (op1->gtOper != GT_LCL_VAR)
10076 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10080 assert(op1->gtOper == GT_LCL_VAR);
10085 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10086 assertImp(lclNum < numArgs);
10088 if (lclNum == info.compThisArg)
10090 lclNum = lvaArg0Var;
10097 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10100 assert(op1->gtOper == GT_LCL_VAR);
10102 /* Note that this is supposed to create the transient type "*"
10103 which may be used as a TYP_I_IMPL. However we catch places
10104 where it is used as a TYP_I_IMPL and change the node if needed.
10105 Thus we are pessimistic and may report byrefs in the GC info
10106 where it was not absolutely needed, but it is safer this way.
10108 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10110 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10111 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10113 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10114 if (tiVerificationNeeded)
10116 // Don't allow taking address of uninit this ptr.
10117 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10119 Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10122 if (!tiRetVal.IsByRef())
10124 tiRetVal.MakeByRef();
10128 Verify(false, "byref to byref");
10132 impPushOnStack(op1, tiRetVal);
10137 if (!info.compIsVarArgs)
10139 BADCODE("arglist in non-vararg method");
10142 if (tiVerificationNeeded)
10144 tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10146 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10148 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10149 adjusted the arg count cos this is like fetching the last param */
10150 assertImp(0 < numArgs);
10151 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10152 lclNum = lvaVarargsHandleArg;
10153 op1 = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10154 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10155 impPushOnStack(op1, tiRetVal);
10158 case CEE_ENDFINALLY:
10160 if (compIsForInlining())
10162 assert(!"Shouldn't have exception handlers in the inliner!");
10163 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10167 if (verCurrentState.esStackDepth > 0)
10169 impEvalSideEffects();
10172 if (info.compXcptnsCount == 0)
10174 BADCODE("endfinally outside finally");
10177 assert(verCurrentState.esStackDepth == 0);
10179 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10182 case CEE_ENDFILTER:
10184 if (compIsForInlining())
10186 assert(!"Shouldn't have exception handlers in the inliner!");
10187 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10191 block->bbSetRunRarely(); // filters are rare
10193 if (info.compXcptnsCount == 0)
10195 BADCODE("endfilter outside filter");
10198 if (tiVerificationNeeded)
10200 Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
10203 op1 = impPopStack().val;
10204 assertImp(op1->gtType == TYP_INT);
10205 if (!bbInFilterILRange(block))
10207 BADCODE("EndFilter outside a filter handler");
10210 /* Mark current bb as end of filter */
10212 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
10213 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
10215 /* Mark catch handler as successor */
10217 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
10218 if (verCurrentState.esStackDepth != 0)
10220 verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
10221 DEBUGARG(__LINE__));
10226 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
10228 if (!impReturnInstruction(block, prefixFlags, opcode))
10239 assert(!compIsForInlining());
10241 if (tiVerificationNeeded)
10243 Verify(false, "Invalid opcode: CEE_JMP");
10246 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
10248 /* CEE_JMP does not make sense in some "protected" regions. */
10250 BADCODE("Jmp not allowed in protected region");
10253 if (verCurrentState.esStackDepth != 0)
10255 BADCODE("Stack must be empty after CEE_JMPs");
10258 _impResolveToken(CORINFO_TOKENKIND_Method);
10260 JITDUMP(" %08X", resolvedToken.token);
10262 /* The signature of the target has to be identical to ours.
10263 At least check that argCnt and returnType match */
10265 eeGetMethodSig(resolvedToken.hMethod, &sig);
10266 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
10267 sig.retType != info.compMethodInfo->args.retType ||
10268 sig.callConv != info.compMethodInfo->args.callConv)
10270 BADCODE("Incompatible target for CEE_JMPs");
10273 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARMARCH_)
10275 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
10277 /* Mark the basic block as being a JUMP instead of RETURN */
10279 block->bbFlags |= BBF_HAS_JMP;
10281 /* Set this flag to make sure register arguments have a location assigned
10282 * even if we don't use them inside the method */
10284 compJmpOpUsed = true;
10286 fgNoStructPromotion = true;
10290 #else // !_TARGET_XARCH_ && !_TARGET_ARMARCH_
10292 // Import this just like a series of LDARGs + tail. + call + ret
10294 if (info.compIsVarArgs)
10296 // For now we don't implement true tail calls, so this breaks varargs.
10297 // So warn the user instead of generating bad code.
10298 // This is a semi-temporary workaround for DevDiv 173860, until we can properly
10299 // implement true tail calls.
10300 IMPL_LIMITATION("varags + CEE_JMP doesn't work yet");
10303 // First load up the arguments (0 - N)
10304 for (unsigned argNum = 0; argNum < info.compILargsCount; argNum++)
10306 impLoadArg(argNum, opcodeOffs + sz + 1);
10309 // Now generate the tail call
10310 noway_assert(prefixFlags == 0);
10311 prefixFlags = PREFIX_TAILCALL_EXPLICIT;
10314 eeGetCallInfo(&resolvedToken, NULL,
10315 combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS), &callInfo);
10317 // All calls and delegates need a security callout.
10318 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
10320 callTyp = impImportCall(CEE_CALL, &resolvedToken, NULL, NULL, PREFIX_TAILCALL_EXPLICIT, &callInfo,
10323 // And finish with the ret
10326 #endif // _TARGET_XARCH_ || _TARGET_ARMARCH_
10329 assertImp(sz == sizeof(unsigned));
10331 _impResolveToken(CORINFO_TOKENKIND_Class);
10333 JITDUMP(" %08X", resolvedToken.token);
10335 ldelemClsHnd = resolvedToken.hClass;
10337 if (tiVerificationNeeded)
10339 typeInfo tiArray = impStackTop(1).seTypeInfo;
10340 typeInfo tiIndex = impStackTop().seTypeInfo;
10342 // As per ECMA 'index' specified can be either int32 or native int.
10343 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10345 typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
10346 Verify(tiArray.IsNullObjRef() ||
10347 typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
10350 tiRetVal = arrayElemType;
10351 tiRetVal.MakeByRef();
10352 if (prefixFlags & PREFIX_READONLY)
10354 tiRetVal.SetIsReadonlyByRef();
10357 // an array interior pointer is always in the heap
10358 tiRetVal.SetIsPermanentHomeByRef();
10361 // If it's a value class array we just do a simple address-of
10362 if (eeIsValueClass(ldelemClsHnd))
10364 CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
10365 if (cit == CORINFO_TYPE_UNDEF)
10367 lclTyp = TYP_STRUCT;
10371 lclTyp = JITtype2varType(cit);
10373 goto ARR_LD_POST_VERIFY;
10376 // Similarly, if its a readonly access, we can do a simple address-of
10377 // without doing a runtime type-check
10378 if (prefixFlags & PREFIX_READONLY)
10381 goto ARR_LD_POST_VERIFY;
10384 // Otherwise we need the full helper function with run-time type check
10385 op1 = impTokenToHandle(&resolvedToken);
10386 if (op1 == nullptr)
10387 { // compDonotInline()
10391 args = gtNewArgList(op1); // Type
10392 args = gtNewListNode(impPopStack().val, args); // index
10393 args = gtNewListNode(impPopStack().val, args); // array
10394 op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, GTF_EXCEPT, args);
10396 impPushOnStack(op1, tiRetVal);
10399 // ldelem for reference and value types
10401 assertImp(sz == sizeof(unsigned));
10403 _impResolveToken(CORINFO_TOKENKIND_Class);
10405 JITDUMP(" %08X", resolvedToken.token);
10407 ldelemClsHnd = resolvedToken.hClass;
10409 if (tiVerificationNeeded)
10411 typeInfo tiArray = impStackTop(1).seTypeInfo;
10412 typeInfo tiIndex = impStackTop().seTypeInfo;
10414 // As per ECMA 'index' specified can be either int32 or native int.
10415 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10416 tiRetVal = verMakeTypeInfo(ldelemClsHnd);
10418 Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
10419 "type of array incompatible with type operand");
10420 tiRetVal.NormaliseForStack();
10423 // If it's a reference type or generic variable type
10424 // then just generate code as though it's a ldelem.ref instruction
10425 if (!eeIsValueClass(ldelemClsHnd))
10428 opcode = CEE_LDELEM_REF;
10432 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
10433 lclTyp = JITtype2varType(jitTyp);
10434 tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
10435 tiRetVal.NormaliseForStack();
10437 goto ARR_LD_POST_VERIFY;
10439 case CEE_LDELEM_I1:
10442 case CEE_LDELEM_I2:
10443 lclTyp = TYP_SHORT;
10446 lclTyp = TYP_I_IMPL;
10449 // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
10450 // and treating it as TYP_INT avoids other asserts.
10451 case CEE_LDELEM_U4:
10455 case CEE_LDELEM_I4:
10458 case CEE_LDELEM_I8:
10461 case CEE_LDELEM_REF:
10464 case CEE_LDELEM_R4:
10465 lclTyp = TYP_FLOAT;
10467 case CEE_LDELEM_R8:
10468 lclTyp = TYP_DOUBLE;
10470 case CEE_LDELEM_U1:
10471 lclTyp = TYP_UBYTE;
10473 case CEE_LDELEM_U2:
10479 if (tiVerificationNeeded)
10481 typeInfo tiArray = impStackTop(1).seTypeInfo;
10482 typeInfo tiIndex = impStackTop().seTypeInfo;
10484 // As per ECMA 'index' specified can be either int32 or native int.
10485 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10486 if (tiArray.IsNullObjRef())
10488 if (lclTyp == TYP_REF)
10489 { // we will say a deref of a null array yields a null ref
10490 tiRetVal = typeInfo(TI_NULL);
10494 tiRetVal = typeInfo(lclTyp);
10499 tiRetVal = verGetArrayElemType(tiArray);
10500 typeInfo arrayElemTi = typeInfo(lclTyp);
10501 #ifdef _TARGET_64BIT_
10502 if (opcode == CEE_LDELEM_I)
10504 arrayElemTi = typeInfo::nativeInt();
10507 if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
10509 Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
10512 #endif // _TARGET_64BIT_
10514 Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
10517 tiRetVal.NormaliseForStack();
10519 ARR_LD_POST_VERIFY:
10521 /* Pull the index value and array address */
10522 op2 = impPopStack().val;
10523 op1 = impPopStack().val;
10524 assertImp(op1->gtType == TYP_REF);
10526 /* Check for null pointer - in the inliner case we simply abort */
10528 if (compIsForInlining())
10530 if (op1->gtOper == GT_CNS_INT)
10532 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
10537 op1 = impCheckForNullPointer(op1);
10539 /* Mark the block as containing an index expression */
10541 if (op1->gtOper == GT_LCL_VAR)
10543 if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
10545 block->bbFlags |= BBF_HAS_IDX_LEN;
10546 optMethodFlags |= OMF_HAS_ARRAYREF;
10550 /* Create the index node and push it on the stack */
10552 op1 = gtNewIndexRef(lclTyp, op1, op2);
10554 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
10556 if ((opcode == CEE_LDELEMA) || ldstruct ||
10557 (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
10559 assert(ldelemClsHnd != DUMMY_INIT(NULL));
10561 // remember the element size
10562 if (lclTyp == TYP_REF)
10564 op1->gtIndex.gtIndElemSize = sizeof(void*);
10568 // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
10569 if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
10571 op1->gtIndex.gtStructElemClass = ldelemClsHnd;
10573 assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
10574 if (lclTyp == TYP_STRUCT)
10576 size = info.compCompHnd->getClassSize(ldelemClsHnd);
10577 op1->gtIndex.gtIndElemSize = size;
10578 op1->gtType = lclTyp;
10582 if ((opcode == CEE_LDELEMA) || ldstruct)
10585 lclTyp = TYP_BYREF;
10587 op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
10591 assert(lclTyp != TYP_STRUCT);
10597 // Create an OBJ for the result
10598 op1 = gtNewObjNode(ldelemClsHnd, op1);
10599 op1->gtFlags |= GTF_EXCEPT;
10601 impPushOnStack(op1, tiRetVal);
10604 // stelem for reference and value types
10607 assertImp(sz == sizeof(unsigned));
10609 _impResolveToken(CORINFO_TOKENKIND_Class);
10611 JITDUMP(" %08X", resolvedToken.token);
10613 stelemClsHnd = resolvedToken.hClass;
10615 if (tiVerificationNeeded)
10617 typeInfo tiArray = impStackTop(2).seTypeInfo;
10618 typeInfo tiIndex = impStackTop(1).seTypeInfo;
10619 typeInfo tiValue = impStackTop().seTypeInfo;
10621 // As per ECMA 'index' specified can be either int32 or native int.
10622 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10623 typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
10625 Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
10626 "type operand incompatible with array element type");
10627 arrayElem.NormaliseForStack();
10628 Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
10631 // If it's a reference type just behave as though it's a stelem.ref instruction
10632 if (!eeIsValueClass(stelemClsHnd))
10634 goto STELEM_REF_POST_VERIFY;
10637 // Otherwise extract the type
10639 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
10640 lclTyp = JITtype2varType(jitTyp);
10641 goto ARR_ST_POST_VERIFY;
10644 case CEE_STELEM_REF:
10646 if (tiVerificationNeeded)
10648 typeInfo tiArray = impStackTop(2).seTypeInfo;
10649 typeInfo tiIndex = impStackTop(1).seTypeInfo;
10650 typeInfo tiValue = impStackTop().seTypeInfo;
10652 // As per ECMA 'index' specified can be either int32 or native int.
10653 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10654 Verify(tiValue.IsObjRef(), "bad value");
10656 // we only check that it is an object referece, The helper does additional checks
10657 Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
10660 arrayNodeTo = impStackTop(2).val;
10661 arrayNodeToIndex = impStackTop(1).val;
10662 arrayNodeFrom = impStackTop().val;
10665 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
10666 // lot of cases because of covariance. ie. foo[] can be cast to object[].
10669 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
10670 // This does not need CORINFO_HELP_ARRADDR_ST
10672 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
10673 arrayNodeTo->gtOper == GT_LCL_VAR &&
10674 arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
10675 !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
10678 goto ARR_ST_POST_VERIFY;
10681 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
10683 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
10685 assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
10688 goto ARR_ST_POST_VERIFY;
10691 STELEM_REF_POST_VERIFY:
10693 /* Call a helper function to do the assignment */
10694 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, 0, impPopList(3, &flags, nullptr));
10698 case CEE_STELEM_I1:
10701 case CEE_STELEM_I2:
10702 lclTyp = TYP_SHORT;
10705 lclTyp = TYP_I_IMPL;
10707 case CEE_STELEM_I4:
10710 case CEE_STELEM_I8:
10713 case CEE_STELEM_R4:
10714 lclTyp = TYP_FLOAT;
10716 case CEE_STELEM_R8:
10717 lclTyp = TYP_DOUBLE;
10722 if (tiVerificationNeeded)
10724 typeInfo tiArray = impStackTop(2).seTypeInfo;
10725 typeInfo tiIndex = impStackTop(1).seTypeInfo;
10726 typeInfo tiValue = impStackTop().seTypeInfo;
10728 // As per ECMA 'index' specified can be either int32 or native int.
10729 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
10730 typeInfo arrayElem = typeInfo(lclTyp);
10731 #ifdef _TARGET_64BIT_
10732 if (opcode == CEE_STELEM_I)
10734 arrayElem = typeInfo::nativeInt();
10736 #endif // _TARGET_64BIT_
10737 Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
10740 Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
10744 ARR_ST_POST_VERIFY:
10745 /* The strict order of evaluation is LHS-operands, RHS-operands,
10746 range-check, and then assignment. However, codegen currently
10747 does the range-check before evaluation the RHS-operands. So to
10748 maintain strict ordering, we spill the stack. */
10750 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
10752 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
10753 "Strict ordering of exceptions for Array store"));
10756 /* Pull the new value from the stack */
10757 op2 = impPopStack().val;
10759 /* Pull the index value */
10760 op1 = impPopStack().val;
10762 /* Pull the array address */
10763 op3 = impPopStack().val;
10765 assertImp(op3->gtType == TYP_REF);
10766 if (op2->IsVarAddr())
10768 op2->gtType = TYP_I_IMPL;
10771 op3 = impCheckForNullPointer(op3);
10773 // Mark the block as containing an index expression
10775 if (op3->gtOper == GT_LCL_VAR)
10777 if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
10779 block->bbFlags |= BBF_HAS_IDX_LEN;
10780 optMethodFlags |= OMF_HAS_ARRAYREF;
10784 /* Create the index node */
10786 op1 = gtNewIndexRef(lclTyp, op3, op1);
10788 /* Create the assignment node and append it */
10790 if (lclTyp == TYP_STRUCT)
10792 assert(stelemClsHnd != DUMMY_INIT(NULL));
10794 op1->gtIndex.gtStructElemClass = stelemClsHnd;
10795 op1->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd);
10797 if (varTypeIsStruct(op1))
10799 op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
10803 op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
10804 op1 = gtNewAssignNode(op1, op2);
10807 /* Mark the expression as containing an assignment */
10809 op1->gtFlags |= GTF_ASG;
10820 case CEE_ADD_OVF_UN:
10828 goto MATH_OP2_FLAGS;
10837 case CEE_SUB_OVF_UN:
10845 goto MATH_OP2_FLAGS;
10849 goto MATH_MAYBE_CALL_NO_OVF;
10854 case CEE_MUL_OVF_UN:
10861 goto MATH_MAYBE_CALL_OVF;
10863 // Other binary math operations
10867 goto MATH_MAYBE_CALL_NO_OVF;
10871 goto MATH_MAYBE_CALL_NO_OVF;
10875 goto MATH_MAYBE_CALL_NO_OVF;
10879 goto MATH_MAYBE_CALL_NO_OVF;
10881 MATH_MAYBE_CALL_NO_OVF:
10883 MATH_MAYBE_CALL_OVF:
10884 // Morpher has some complex logic about when to turn different
10885 // typed nodes on different platforms into helper calls. We
10886 // need to either duplicate that logic here, or just
10887 // pessimistically make all the nodes large enough to become
10888 // call nodes. Since call nodes aren't that much larger and
10889 // these opcodes are infrequent enough I chose the latter.
10891 goto MATH_OP2_FLAGS;
10903 MATH_OP2: // For default values of 'ovfl' and 'callNode'
10908 MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
10910 /* Pull two values and push back the result */
10912 if (tiVerificationNeeded)
10914 const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
10915 const typeInfo& tiOp2 = impStackTop().seTypeInfo;
10917 Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
10918 if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
10920 Verify(tiOp1.IsNumberType(), "not number");
10924 Verify(tiOp1.IsIntegerType(), "not integer");
10927 Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
10931 #ifdef _TARGET_64BIT_
10932 if (tiOp2.IsNativeIntType())
10936 #endif // _TARGET_64BIT_
10939 op2 = impPopStack().val;
10940 op1 = impPopStack().val;
10942 #if !CPU_HAS_FP_SUPPORT
10943 if (varTypeIsFloating(op1->gtType))
10948 /* Can't do arithmetic with references */
10949 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
10951 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
10952 // if it is in the stack)
10953 impBashVarAddrsToI(op1, op2);
10955 type = impGetByRefResultType(oper, uns, &op1, &op2);
10957 assert(!ovfl || !varTypeIsFloating(op1->gtType));
10959 /* Special case: "int+0", "int-0", "int*1", "int/1" */
10961 if (op2->gtOper == GT_CNS_INT)
10963 if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
10964 (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
10967 impPushOnStack(op1, tiRetVal);
10972 #if !FEATURE_X87_DOUBLES
10973 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
10975 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
10977 if (op1->TypeGet() != type)
10979 // We insert a cast of op1 to 'type'
10980 op1 = gtNewCastNode(type, op1, type);
10982 if (op2->TypeGet() != type)
10984 // We insert a cast of op2 to 'type'
10985 op2 = gtNewCastNode(type, op2, type);
10988 #endif // !FEATURE_X87_DOUBLES
10990 #if SMALL_TREE_NODES
10993 /* These operators can later be transformed into 'GT_CALL' */
10995 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
10996 #ifndef _TARGET_ARM_
10997 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
10998 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
10999 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11000 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11002 // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11003 // that we'll need to transform into a general large node, but rather specifically
11004 // to a call: by doing it this way, things keep working if there are multiple sizes,
11005 // and a CALL is no longer the largest.
11006 // That said, as of now it *is* a large node, so we'll do this with an assert rather
11008 assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11009 op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11012 #endif // SMALL_TREE_NODES
11014 op1 = gtNewOperNode(oper, type, op1, op2);
11017 /* Special case: integer/long division may throw an exception */
11019 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow())
11021 op1->gtFlags |= GTF_EXCEPT;
11026 assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11027 if (ovflType != TYP_UNKNOWN)
11029 op1->gtType = ovflType;
11031 op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11034 op1->gtFlags |= GTF_UNSIGNED;
11038 impPushOnStack(op1, tiRetVal);
11053 if (tiVerificationNeeded)
11055 const typeInfo& tiVal = impStackTop(1).seTypeInfo;
11056 const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11057 Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11060 op2 = impPopStack().val;
11061 op1 = impPopStack().val; // operand to be shifted
11062 impBashVarAddrsToI(op1, op2);
11064 type = genActualType(op1->TypeGet());
11065 op1 = gtNewOperNode(oper, type, op1, op2);
11067 impPushOnStack(op1, tiRetVal);
11071 if (tiVerificationNeeded)
11073 tiRetVal = impStackTop().seTypeInfo;
11074 Verify(tiRetVal.IsIntegerType(), "bad int value");
11077 op1 = impPopStack().val;
11078 impBashVarAddrsToI(op1, nullptr);
11079 type = genActualType(op1->TypeGet());
11080 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11084 if (tiVerificationNeeded)
11086 tiRetVal = impStackTop().seTypeInfo;
11087 Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11089 op1 = impPopStack().val;
11090 type = op1->TypeGet();
11091 op1 = gtNewOperNode(GT_CKFINITE, type, op1);
11092 op1->gtFlags |= GTF_EXCEPT;
11094 impPushOnStack(op1, tiRetVal);
11099 val = getI4LittleEndian(codeAddr); // jump distance
11100 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11104 val = getI1LittleEndian(codeAddr); // jump distance
11105 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11109 if (compIsForInlining())
11111 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11115 JITDUMP(" %04X", jmpAddr);
11116 if (block->bbJumpKind != BBJ_LEAVE)
11118 impResetLeaveBlock(block, jmpAddr);
11121 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11122 impImportLeave(block);
11123 impNoteBranchOffs();
11129 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11131 if (compIsForInlining() && jmpDist == 0)
11136 impNoteBranchOffs();
11142 case CEE_BRFALSE_S:
11144 /* Pop the comparand (now there's a neat term) from the stack */
11145 if (tiVerificationNeeded)
11147 typeInfo& tiVal = impStackTop().seTypeInfo;
11148 Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11152 op1 = impPopStack().val;
11153 type = op1->TypeGet();
11155 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11156 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11158 block->bbJumpKind = BBJ_NONE;
11160 if (op1->gtFlags & GTF_GLOB_EFFECT)
11162 op1 = gtUnusedValNode(op1);
11171 if (op1->OperIsCompare())
11173 if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11175 // Flip the sense of the compare
11177 op1 = gtReverseCond(op1);
11182 /* We'll compare against an equally-sized integer 0 */
11183 /* For small types, we always compare against int */
11184 op2 = gtNewZeroConNode(genActualType(op1->gtType));
11186 /* Create the comparison operator and try to fold it */
11188 oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11189 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11196 /* Fold comparison if we can */
11198 op1 = gtFoldExpr(op1);
11200 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11201 /* Don't make any blocks unreachable in import only mode */
11203 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11205 /* gtFoldExpr() should prevent this as we don't want to make any blocks
11206 unreachable under compDbgCode */
11207 assert(!opts.compDbgCode);
11209 BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11210 assertImp((block->bbJumpKind == BBJ_COND) // normal case
11211 || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11212 // block for the second time
11214 block->bbJumpKind = foldedJumpKind;
11218 if (op1->gtIntCon.gtIconVal)
11220 printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11221 block->bbJumpDest->bbNum);
11225 printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11232 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11234 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
11235 in impImportBlock(block). For correct line numbers, spill stack. */
11237 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
11239 impSpillStackEnsure(true);
11266 if (tiVerificationNeeded)
11268 verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11269 tiRetVal = typeInfo(TI_INT);
11272 op2 = impPopStack().val;
11273 op1 = impPopStack().val;
11275 #ifdef _TARGET_64BIT_
11276 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
11278 op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11280 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
11282 op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11284 #endif // _TARGET_64BIT_
11286 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11287 varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11288 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11290 /* Create the comparison node */
11292 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11294 /* TODO: setting both flags when only one is appropriate */
11295 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
11297 op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
11300 impPushOnStack(op1, tiRetVal);
11306 goto CMP_2_OPs_AND_BR;
11311 goto CMP_2_OPs_AND_BR;
11316 goto CMP_2_OPs_AND_BR_UN;
11321 goto CMP_2_OPs_AND_BR;
11326 goto CMP_2_OPs_AND_BR_UN;
11331 goto CMP_2_OPs_AND_BR;
11336 goto CMP_2_OPs_AND_BR_UN;
11341 goto CMP_2_OPs_AND_BR;
11346 goto CMP_2_OPs_AND_BR_UN;
11351 goto CMP_2_OPs_AND_BR_UN;
11353 CMP_2_OPs_AND_BR_UN:
11356 goto CMP_2_OPs_AND_BR_ALL;
11360 goto CMP_2_OPs_AND_BR_ALL;
11361 CMP_2_OPs_AND_BR_ALL:
11363 if (tiVerificationNeeded)
11365 verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
11368 /* Pull two values */
11369 op2 = impPopStack().val;
11370 op1 = impPopStack().val;
11372 #ifdef _TARGET_64BIT_
11373 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
11375 op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11377 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
11379 op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
11381 #endif // _TARGET_64BIT_
11383 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
11384 varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
11385 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
11387 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11389 block->bbJumpKind = BBJ_NONE;
11391 if (op1->gtFlags & GTF_GLOB_EFFECT)
11393 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11394 "Branch to next Optimization, op1 side effect"));
11395 impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11397 if (op2->gtFlags & GTF_GLOB_EFFECT)
11399 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11400 "Branch to next Optimization, op2 side effect"));
11401 impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
11405 if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
11407 impNoteLastILoffs();
11412 #if !FEATURE_X87_DOUBLES
11413 // We can generate an compare of different sized floating point op1 and op2
11414 // We insert a cast
11416 if (varTypeIsFloating(op1->TypeGet()))
11418 if (op1->TypeGet() != op2->TypeGet())
11420 assert(varTypeIsFloating(op2->TypeGet()));
11422 // say op1=double, op2=float. To avoid loss of precision
11423 // while comparing, op2 is converted to double and double
11424 // comparison is done.
11425 if (op1->TypeGet() == TYP_DOUBLE)
11427 // We insert a cast of op2 to TYP_DOUBLE
11428 op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
11430 else if (op2->TypeGet() == TYP_DOUBLE)
11432 // We insert a cast of op1 to TYP_DOUBLE
11433 op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
11437 #endif // !FEATURE_X87_DOUBLES
11439 /* Create and append the operator */
11441 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11445 op1->gtFlags |= GTF_UNSIGNED;
11450 op1->gtFlags |= GTF_RELOP_NAN_UN;
11456 assert(!compIsForInlining());
11458 if (tiVerificationNeeded)
11460 Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
11462 /* Pop the switch value off the stack */
11463 op1 = impPopStack().val;
11464 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
11466 #ifdef _TARGET_64BIT_
11467 // Widen 'op1' on 64-bit targets
11468 if (op1->TypeGet() != TYP_I_IMPL)
11470 if (op1->OperGet() == GT_CNS_INT)
11472 op1->gtType = TYP_I_IMPL;
11476 op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
11479 #endif // _TARGET_64BIT_
11480 assert(genActualType(op1->TypeGet()) == TYP_I_IMPL);
11482 /* We can create a switch node */
11484 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
11486 val = (int)getU4LittleEndian(codeAddr);
11487 codeAddr += 4 + val * 4; // skip over the switch-table
11491 /************************** Casting OPCODES ***************************/
11493 case CEE_CONV_OVF_I1:
11496 case CEE_CONV_OVF_I2:
11497 lclTyp = TYP_SHORT;
11499 case CEE_CONV_OVF_I:
11500 lclTyp = TYP_I_IMPL;
11502 case CEE_CONV_OVF_I4:
11505 case CEE_CONV_OVF_I8:
11509 case CEE_CONV_OVF_U1:
11510 lclTyp = TYP_UBYTE;
11512 case CEE_CONV_OVF_U2:
11515 case CEE_CONV_OVF_U:
11516 lclTyp = TYP_U_IMPL;
11518 case CEE_CONV_OVF_U4:
11521 case CEE_CONV_OVF_U8:
11522 lclTyp = TYP_ULONG;
11525 case CEE_CONV_OVF_I1_UN:
11528 case CEE_CONV_OVF_I2_UN:
11529 lclTyp = TYP_SHORT;
11531 case CEE_CONV_OVF_I_UN:
11532 lclTyp = TYP_I_IMPL;
11534 case CEE_CONV_OVF_I4_UN:
11537 case CEE_CONV_OVF_I8_UN:
11541 case CEE_CONV_OVF_U1_UN:
11542 lclTyp = TYP_UBYTE;
11544 case CEE_CONV_OVF_U2_UN:
11547 case CEE_CONV_OVF_U_UN:
11548 lclTyp = TYP_U_IMPL;
11550 case CEE_CONV_OVF_U4_UN:
11553 case CEE_CONV_OVF_U8_UN:
11554 lclTyp = TYP_ULONG;
11559 goto CONV_OVF_COMMON;
11562 goto CONV_OVF_COMMON;
11572 lclTyp = TYP_SHORT;
11575 lclTyp = TYP_I_IMPL;
11585 lclTyp = TYP_UBYTE;
11590 #if (REGSIZE_BYTES == 8)
11592 lclTyp = TYP_U_IMPL;
11596 lclTyp = TYP_U_IMPL;
11603 lclTyp = TYP_ULONG;
11607 lclTyp = TYP_FLOAT;
11610 lclTyp = TYP_DOUBLE;
11613 case CEE_CONV_R_UN:
11614 lclTyp = TYP_DOUBLE;
11628 // just check that we have a number on the stack
11629 if (tiVerificationNeeded)
11631 const typeInfo& tiVal = impStackTop().seTypeInfo;
11632 Verify(tiVal.IsNumberType(), "bad arg");
11634 #ifdef _TARGET_64BIT_
11635 bool isNative = false;
11639 case CEE_CONV_OVF_I:
11640 case CEE_CONV_OVF_I_UN:
11642 case CEE_CONV_OVF_U:
11643 case CEE_CONV_OVF_U_UN:
11647 // leave 'isNative' = false;
11652 tiRetVal = typeInfo::nativeInt();
11655 #endif // _TARGET_64BIT_
11657 tiRetVal = typeInfo(lclTyp).NormaliseForStack();
11661 // only converts from FLOAT or DOUBLE to an integer type
11662 // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls
11664 if (varTypeIsFloating(lclTyp))
11666 callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
11667 #ifdef _TARGET_64BIT_
11668 // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
11669 // TYP_BYREF could be used as TYP_I_IMPL which is long.
11670 // TODO-CQ: remove this when we lower casts long/ulong --> float/double
11671 // and generate SSE2 code instead of going through helper calls.
11672 || (impStackTop().val->TypeGet() == TYP_BYREF)
11678 callNode = varTypeIsFloating(impStackTop().val->TypeGet());
11681 // At this point uns, ovf, callNode all set
11683 op1 = impPopStack().val;
11684 impBashVarAddrsToI(op1);
11686 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
11688 op2 = op1->gtOp.gtOp2;
11690 if (op2->gtOper == GT_CNS_INT)
11692 ssize_t ival = op2->gtIntCon.gtIconVal;
11693 ssize_t mask, umask;
11709 assert(!"unexpected type");
11713 if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
11715 /* Toss the cast, it's a waste of time */
11717 impPushOnStack(op1, tiRetVal);
11720 else if (ival == mask)
11722 /* Toss the masking, it's a waste of time, since
11723 we sign-extend from the small value anyways */
11725 op1 = op1->gtOp.gtOp1;
11730 /* The 'op2' sub-operand of a cast is the 'real' type number,
11731 since the result of a cast to one of the 'small' integer
11732 types is an integer.
11735 type = genActualType(lclTyp);
11737 #if SMALL_TREE_NODES
11740 op1 = gtNewCastNodeL(type, op1, lclTyp);
11743 #endif // SMALL_TREE_NODES
11745 op1 = gtNewCastNode(type, op1, lclTyp);
11750 op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
11754 op1->gtFlags |= GTF_UNSIGNED;
11756 impPushOnStack(op1, tiRetVal);
11760 if (tiVerificationNeeded)
11762 tiRetVal = impStackTop().seTypeInfo;
11763 Verify(tiRetVal.IsNumberType(), "Bad arg");
11766 op1 = impPopStack().val;
11767 impBashVarAddrsToI(op1, nullptr);
11768 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
11772 if (tiVerificationNeeded)
11777 /* Pull the top value from the stack */
11779 op1 = impPopStack(clsHnd).val;
11781 /* Get hold of the type of the value being duplicated */
11783 lclTyp = genActualType(op1->gtType);
11785 /* Does the value have any side effects? */
11787 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
11789 // Since we are throwing away the value, just normalize
11790 // it to its address. This is more efficient.
11792 if (varTypeIsStruct(op1))
11794 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
11795 // Non-calls, such as obj or ret_expr, have to go through this.
11796 // Calls with large struct return value have to go through this.
11797 // Helper calls with small struct return value also have to go
11798 // through this since they do not follow Unix calling convention.
11799 if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
11800 op1->AsCall()->gtCallType == CT_HELPER)
11801 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
11803 op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
11807 // If op1 is non-overflow cast, throw it away since it is useless.
11808 // Another reason for throwing away the useless cast is in the context of
11809 // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
11810 // The cast gets added as part of importing GT_CALL, which gets in the way
11811 // of fgMorphCall() on the forms of tail call nodes that we assert.
11812 if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
11814 op1 = op1->gtOp.gtOp1;
11817 // If 'op1' is an expression, create an assignment node.
11818 // Helps analyses (like CSE) to work fine.
11820 if (op1->gtOper != GT_CALL)
11822 op1 = gtUnusedValNode(op1);
11825 /* Append the value to the tree list */
11829 /* No side effects - just throw the <BEEP> thing away */
11834 if (tiVerificationNeeded)
11836 // Dup could start the begining of delegate creation sequence, remember that
11837 delegateCreateStart = codeAddr - 1;
11841 // Convert a (dup, stloc) sequence into a (stloc, ldloc) sequence in the following cases:
11842 // - If this is non-debug code - so that CSE will recognize the two as equal.
11843 // This helps eliminate a redundant bounds check in cases such as:
11844 // ariba[i+3] += some_value;
11845 // - If the top of the stack is a non-leaf that may be expensive to clone.
11847 if (codeAddr < codeEndp)
11849 OPCODE nextOpcode = (OPCODE)getU1LittleEndian(codeAddr);
11850 if (impIsAnySTLOC(nextOpcode))
11852 if (!opts.compDbgCode)
11854 insertLdloc = true;
11857 GenTree* stackTop = impStackTop().val;
11858 if (!stackTop->IsIntegralConst(0) && !stackTop->IsFPZero() && !stackTop->IsLocal())
11860 insertLdloc = true;
11866 /* Pull the top value from the stack */
11867 op1 = impPopStack(tiRetVal);
11869 /* Clone the value */
11870 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
11871 nullptr DEBUGARG("DUP instruction"));
11873 /* Either the tree started with no global effects, or impCloneExpr
11874 evaluated the tree to a temp and returned two copies of that
11875 temp. Either way, neither op1 nor op2 should have side effects.
11877 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
11879 /* Push the tree/temp back on the stack */
11880 impPushOnStack(op1, tiRetVal);
11882 /* Push the copy on the stack */
11883 impPushOnStack(op2, tiRetVal);
11891 lclTyp = TYP_SHORT;
11900 lclTyp = TYP_I_IMPL;
11902 case CEE_STIND_REF:
11906 lclTyp = TYP_FLOAT;
11909 lclTyp = TYP_DOUBLE;
11913 if (tiVerificationNeeded)
11915 typeInfo instrType(lclTyp);
11916 #ifdef _TARGET_64BIT_
11917 if (opcode == CEE_STIND_I)
11919 instrType = typeInfo::nativeInt();
11921 #endif // _TARGET_64BIT_
11922 verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
11926 compUnsafeCastUsed = true; // Have to go conservative
11931 op2 = impPopStack().val; // value to store
11932 op1 = impPopStack().val; // address to store to
11934 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
11935 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
11937 impBashVarAddrsToI(op1, op2);
11939 op2 = impImplicitR4orR8Cast(op2, lclTyp);
11941 #ifdef _TARGET_64BIT_
11942 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
11943 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
11945 op2->gtType = TYP_I_IMPL;
11949 // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
11951 if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
11953 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
11954 op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
11956 // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
11958 if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
11960 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
11961 op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
11964 #endif // _TARGET_64BIT_
11966 if (opcode == CEE_STIND_REF)
11968 // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
11969 assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
11970 lclTyp = genActualType(op2->TypeGet());
11973 // Check target type.
11975 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
11977 if (op2->gtType == TYP_BYREF)
11979 assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
11981 else if (lclTyp == TYP_BYREF)
11983 assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
11988 assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
11989 ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
11990 (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
11994 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
11996 // stind could point anywhere, example a boxed class static int
11997 op1->gtFlags |= GTF_IND_TGTANYWHERE;
11999 if (prefixFlags & PREFIX_VOLATILE)
12001 assert(op1->OperGet() == GT_IND);
12002 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
12003 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12004 op1->gtFlags |= GTF_IND_VOLATILE;
12007 if (prefixFlags & PREFIX_UNALIGNED)
12009 assert(op1->OperGet() == GT_IND);
12010 op1->gtFlags |= GTF_IND_UNALIGNED;
12013 op1 = gtNewAssignNode(op1, op2);
12014 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12016 // Spill side-effects AND global-data-accesses
12017 if (verCurrentState.esStackDepth > 0)
12019 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12028 lclTyp = TYP_SHORT;
12037 case CEE_LDIND_REF:
12041 lclTyp = TYP_I_IMPL;
12044 lclTyp = TYP_FLOAT;
12047 lclTyp = TYP_DOUBLE;
12050 lclTyp = TYP_UBYTE;
12057 if (tiVerificationNeeded)
12059 typeInfo lclTiType(lclTyp);
12060 #ifdef _TARGET_64BIT_
12061 if (opcode == CEE_LDIND_I)
12063 lclTiType = typeInfo::nativeInt();
12065 #endif // _TARGET_64BIT_
12066 tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12067 tiRetVal.NormaliseForStack();
12071 compUnsafeCastUsed = true; // Have to go conservative
12076 op1 = impPopStack().val; // address to load from
12077 impBashVarAddrsToI(op1);
12079 #ifdef _TARGET_64BIT_
12080 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12082 if (genActualType(op1->gtType) == TYP_INT)
12084 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12085 op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
12089 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12091 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12093 // ldind could point anywhere, example a boxed class static int
12094 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12096 if (prefixFlags & PREFIX_VOLATILE)
12098 assert(op1->OperGet() == GT_IND);
12099 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
12100 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12101 op1->gtFlags |= GTF_IND_VOLATILE;
12104 if (prefixFlags & PREFIX_UNALIGNED)
12106 assert(op1->OperGet() == GT_IND);
12107 op1->gtFlags |= GTF_IND_UNALIGNED;
12110 impPushOnStack(op1, tiRetVal);
12114 case CEE_UNALIGNED:
12117 val = getU1LittleEndian(codeAddr);
12119 JITDUMP(" %u", val);
12120 if ((val != 1) && (val != 2) && (val != 4))
12122 BADCODE("Alignment unaligned. must be 1, 2, or 4");
12125 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12126 prefixFlags |= PREFIX_UNALIGNED;
12128 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12131 opcode = (OPCODE)getU1LittleEndian(codeAddr);
12132 codeAddr += sizeof(__int8);
12133 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12134 goto DECODE_OPCODE;
12138 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12139 prefixFlags |= PREFIX_VOLATILE;
12141 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12148 // Need to do a lookup here so that we perform an access check
12149 // and do a NOWAY if protections are violated
12150 _impResolveToken(CORINFO_TOKENKIND_Method);
12152 JITDUMP(" %08X", resolvedToken.token);
12154 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12155 addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12158 // This check really only applies to intrinsic Array.Address methods
12159 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12161 NO_WAY("Currently do not support LDFTN of Parameterized functions");
12164 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12165 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12167 if (tiVerificationNeeded)
12169 // LDFTN could start the begining of delegate creation sequence, remember that
12170 delegateCreateStart = codeAddr - 2;
12172 // check any constraints on the callee's class and type parameters
12173 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12174 "method has unsatisfied class constraints");
12175 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12176 resolvedToken.hMethod),
12177 "method has unsatisfied method constraints");
12179 mflags = callInfo.verMethodFlags;
12180 Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12184 op1 = impMethodPointer(&resolvedToken, &callInfo);
12185 if (compDonotInline())
12190 impPushOnStack(op1, typeInfo(resolvedToken.hMethod));
12195 case CEE_LDVIRTFTN:
12197 /* Get the method token */
12199 _impResolveToken(CORINFO_TOKENKIND_Method);
12201 JITDUMP(" %08X", resolvedToken.token);
12203 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12204 addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12205 CORINFO_CALLINFO_CALLVIRT)),
12208 // This check really only applies to intrinsic Array.Address methods
12209 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12211 NO_WAY("Currently do not support LDFTN of Parameterized functions");
12214 mflags = callInfo.methodFlags;
12216 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12218 if (compIsForInlining())
12220 if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12222 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12227 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12229 if (tiVerificationNeeded)
12232 Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12233 Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12235 // JIT32 verifier rejects verifiable ldvirtftn pattern
12236 typeInfo declType =
12237 verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12239 typeInfo arg = impStackTop().seTypeInfo;
12240 Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12243 CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12244 if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12246 instanceClassHnd = arg.GetClassHandleForObjRef();
12249 // check any constraints on the method's class and type parameters
12250 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12251 "method has unsatisfied class constraints");
12252 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12253 resolvedToken.hMethod),
12254 "method has unsatisfied method constraints");
12256 if (mflags & CORINFO_FLG_PROTECTED)
12258 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12259 "Accessing protected method through wrong type.");
12263 /* Get the object-ref */
12264 op1 = impPopStack().val;
12265 assertImp(op1->gtType == TYP_REF);
12267 if (opts.IsReadyToRun())
12269 if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
12271 if (op1->gtFlags & GTF_SIDE_EFFECT)
12273 op1 = gtUnusedValNode(op1);
12274 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12279 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12281 if (op1->gtFlags & GTF_SIDE_EFFECT)
12283 op1 = gtUnusedValNode(op1);
12284 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12289 GenTreePtr fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
12290 if (compDonotInline())
12295 impPushOnStack(fptr, typeInfo(resolvedToken.hMethod));
12300 case CEE_CONSTRAINED:
12302 assertImp(sz == sizeof(unsigned));
12303 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
12304 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
12305 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
12307 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
12308 prefixFlags |= PREFIX_CONSTRAINED;
12311 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12312 if (actualOpcode != CEE_CALLVIRT)
12314 BADCODE("constrained. has to be followed by callvirt");
12321 JITDUMP(" readonly.");
12323 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
12324 prefixFlags |= PREFIX_READONLY;
12327 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12328 if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
12330 BADCODE("readonly. has to be followed by ldelema or call");
12340 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
12341 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12344 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
12345 if (!impOpcodeIsCallOpcode(actualOpcode))
12347 BADCODE("tailcall. has to be followed by call, callvirt or calli");
12355 /* Since we will implicitly insert newObjThisPtr at the start of the
12356 argument list, spill any GTF_ORDER_SIDEEFF */
12357 impSpillSpecialSideEff();
12359 /* NEWOBJ does not respond to TAIL */
12360 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
12362 /* NEWOBJ does not respond to CONSTRAINED */
12363 prefixFlags &= ~PREFIX_CONSTRAINED;
12365 #if COR_JIT_EE_VERSION > 460
12366 _impResolveToken(CORINFO_TOKENKIND_NewObj);
12368 _impResolveToken(CORINFO_TOKENKIND_Method);
12371 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12372 addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
12375 if (compIsForInlining())
12377 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12379 // Check to see if this call violates the boundary.
12380 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
12385 mflags = callInfo.methodFlags;
12387 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
12389 BADCODE("newobj on static or abstract method");
12392 // Insert the security callout before any actual code is generated
12393 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12395 // There are three different cases for new
12396 // Object size is variable (depends on arguments)
12397 // 1) Object is an array (arrays treated specially by the EE)
12398 // 2) Object is some other variable sized object (e.g. String)
12399 // 3) Class Size can be determined beforehand (normal case)
12400 // In the first case, we need to call a NEWOBJ helper (multinewarray)
12401 // in the second case we call the constructor with a '0' this pointer
12402 // In the third case we alloc the memory, then call the constuctor
12404 clsFlags = callInfo.classFlags;
12405 if (clsFlags & CORINFO_FLG_ARRAY)
12407 if (tiVerificationNeeded)
12409 CORINFO_CLASS_HANDLE elemTypeHnd;
12410 INDEBUG(CorInfoType corType =)
12411 info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
12412 assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
12413 Verify(elemTypeHnd == nullptr ||
12414 !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
12415 "newarr of byref-like objects");
12416 verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
12417 ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
12418 &callInfo DEBUGARG(info.compFullName));
12420 // Arrays need to call the NEWOBJ helper.
12421 assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
12423 impImportNewObjArray(&resolvedToken, &callInfo);
12424 if (compDonotInline())
12432 // At present this can only be String
12433 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
12435 if (IsTargetAbi(CORINFO_CORERT_ABI))
12437 // The dummy argument does not exist in CoreRT
12438 newObjThisPtr = nullptr;
12442 // This is the case for variable-sized objects that are not
12443 // arrays. In this case, call the constructor with a null 'this'
12445 newObjThisPtr = gtNewIconNode(0, TYP_REF);
12448 /* Remember that this basic block contains 'new' of an object */
12449 block->bbFlags |= BBF_HAS_NEWOBJ;
12450 optMethodFlags |= OMF_HAS_NEWOBJ;
12454 // This is the normal case where the size of the object is
12455 // fixed. Allocate the memory and call the constructor.
12457 // Note: We cannot add a peep to avoid use of temp here
12458 // becase we don't have enough interference info to detect when
12459 // sources and destination interfere, example: s = new S(ref);
12461 // TODO: We find the correct place to introduce a general
12462 // reverse copy prop for struct return values from newobj or
12463 // any function returning structs.
12465 /* get a temporary for the new object */
12466 lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
12468 // In the value class case we only need clsHnd for size calcs.
12470 // The lookup of the code pointer will be handled by CALL in this case
12471 if (clsFlags & CORINFO_FLG_VALUECLASS)
12473 if (compIsForInlining())
12475 // If value class has GC fields, inform the inliner. It may choose to
12476 // bail out on the inline.
12477 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
12478 if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
12480 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
12481 if (compInlineResult->IsFailure())
12486 // Do further notification in the case where the call site is rare;
12487 // some policies do not track the relative hotness of call sites for
12488 // "always" inline cases.
12489 if (impInlineInfo->iciBlock->isRunRarely())
12491 compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
12492 if (compInlineResult->IsFailure())
12500 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
12501 unsigned size = info.compCompHnd->getClassSize(resolvedToken.hClass);
12503 if (impIsPrimitive(jitTyp))
12505 lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
12509 // The local variable itself is the allocated space.
12510 // Here we need unsafe value cls check, since the address of struct is taken for further use
12511 // and potentially exploitable.
12512 lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
12515 // Append a tree to zero-out the temp
12516 newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
12518 newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest
12519 gtNewIconNode(0), // Value
12521 false, // isVolatile
12522 false); // not copyBlock
12523 impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12525 // Obtain the address of the temp
12527 gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
12531 #ifdef FEATURE_READYTORUN_COMPILER
12532 if (opts.IsReadyToRun())
12534 op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
12535 usingReadyToRunHelper = (op1 != nullptr);
12538 if (!usingReadyToRunHelper)
12541 op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
12542 if (op1 == nullptr)
12543 { // compDonotInline()
12547 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
12548 // and the newfast call with a single call to a dynamic R2R cell that will:
12549 // 1) Load the context
12550 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate
12552 // 3) Allocate and return the new object
12553 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
12555 op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
12556 resolvedToken.hClass, TYP_REF, op1);
12559 // Remember that this basic block contains 'new' of an object
12560 block->bbFlags |= BBF_HAS_NEWOBJ;
12561 optMethodFlags |= OMF_HAS_NEWOBJ;
12563 // Append the assignment to the temp/local. Dont need to spill
12564 // at all as we are just calling an EE-Jit helper which can only
12565 // cause an (async) OutOfMemoryException.
12567 // We assign the newly allocated object (by a GT_ALLOCOBJ node)
12568 // to a temp. Note that the pattern "temp = allocObj" is required
12569 // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
12570 // without exhaustive walk over all expressions.
12572 impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
12574 newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
12581 /* CALLI does not respond to CONSTRAINED */
12582 prefixFlags &= ~PREFIX_CONSTRAINED;
12584 if (compIsForInlining())
12586 // CALLI doesn't have a method handle, so assume the worst.
12587 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
12589 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
12599 // We can't call getCallInfo on the token from a CALLI, but we need it in
12600 // many other places. We unfortunately embed that knowledge here.
12601 if (opcode != CEE_CALLI)
12603 _impResolveToken(CORINFO_TOKENKIND_Method);
12605 eeGetCallInfo(&resolvedToken,
12606 (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
12607 // this is how impImportCall invokes getCallInfo
12609 combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
12610 (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
12611 : CORINFO_CALLINFO_NONE)),
12616 // Suppress uninitialized use warning.
12617 memset(&resolvedToken, 0, sizeof(resolvedToken));
12618 memset(&callInfo, 0, sizeof(callInfo));
12620 resolvedToken.token = getU4LittleEndian(codeAddr);
12623 CALL: // memberRef should be set.
12624 // newObjThisPtr should be set for CEE_NEWOBJ
12626 JITDUMP(" %08X", resolvedToken.token);
12627 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
12629 bool newBBcreatedForTailcallStress;
12631 newBBcreatedForTailcallStress = false;
12633 if (compIsForInlining())
12635 if (compDonotInline())
12639 // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
12640 assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
12644 if (compTailCallStress())
12646 // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
12647 // Tail call stress only recognizes call+ret patterns and forces them to be
12648 // explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress
12649 // doesn't import 'ret' opcode following the call into the basic block containing
12650 // the call instead imports it to a new basic block. Note that fgMakeBasicBlocks()
12651 // is already checking that there is an opcode following call and hence it is
12652 // safe here to read next opcode without bounds check.
12653 newBBcreatedForTailcallStress =
12654 impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
12655 // make it jump to RET.
12656 (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
12658 if (newBBcreatedForTailcallStress &&
12659 !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
12660 verCheckTailCallConstraint(opcode, &resolvedToken,
12661 constraintCall ? &constrainedResolvedToken : nullptr,
12662 true) // Is it legal to do talcall?
12665 // Stress the tailcall.
12666 JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
12667 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
12671 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
12672 // hence will not be considered for implicit tail calling.
12673 bool isRecursive = (callInfo.hMethod == info.compMethodHnd);
12674 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
12676 JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
12677 prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
12681 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
12682 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
12683 readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
12685 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
12687 // All calls and delegates need a security callout.
12688 // For delegates, this is the call to the delegate constructor, not the access check on the
12690 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12692 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
12694 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
12695 // and the field it is reading, thus it is now unverifiable to not immediately precede with
12696 // ldtoken <filed token>, and we now check accessibility
12697 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
12698 (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
12700 if (prevOpcode != CEE_LDTOKEN)
12702 Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
12706 assert(lastLoadToken != NULL);
12707 // Now that we know we have a token, verify that it is accessible for loading
12708 CORINFO_RESOLVED_TOKEN resolvedLoadField;
12709 impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
12710 eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
12711 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12715 #endif // DevDiv 410397
12718 if (tiVerificationNeeded)
12720 verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12721 explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
12722 &callInfo DEBUGARG(info.compFullName));
12725 // Insert delegate callout here.
12726 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
12729 // We should do this only if verification is enabled
12730 // If verification is disabled, delegateCreateStart will not be initialized correctly
12731 if (tiVerificationNeeded)
12733 mdMemberRef delegateMethodRef = mdMemberRefNil;
12734 // We should get here only for well formed delegate creation.
12735 assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
12739 #ifdef FEATURE_CORECLR
12740 // In coreclr the delegate transparency rule needs to be enforced even if verification is disabled
12741 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
12742 CORINFO_METHOD_HANDLE delegateMethodHandle = tiActualFtn.GetMethod2();
12744 impInsertCalloutForDelegate(info.compMethodHnd, delegateMethodHandle, resolvedToken.hClass);
12745 #endif // FEATURE_CORECLR
12748 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
12749 newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
12750 if (compDonotInline())
12755 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
12756 // have created a new BB after the "call"
12757 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
12759 assert(!compIsForInlining());
12771 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
12772 BOOL isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
12774 /* Get the CP_Fieldref index */
12775 assertImp(sz == sizeof(unsigned));
12777 _impResolveToken(CORINFO_TOKENKIND_Field);
12779 JITDUMP(" %08X", resolvedToken.token);
12781 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
12783 GenTreePtr obj = nullptr;
12784 typeInfo* tiObj = nullptr;
12785 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
12787 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
12789 tiObj = &impStackTop().seTypeInfo;
12790 obj = impPopStack(objType).val;
12792 if (impIsThis(obj))
12794 aflags |= CORINFO_ACCESS_THIS;
12796 // An optimization for Contextful classes:
12797 // we unwrap the proxy when we have a 'this reference'
12799 if (info.compUnwrapContextful)
12801 aflags |= CORINFO_ACCESS_UNWRAP;
12806 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
12808 // Figure out the type of the member. We always call canAccessField, so you always need this
12810 CorInfoType ciType = fieldInfo.fieldType;
12811 clsHnd = fieldInfo.structType;
12813 lclTyp = JITtype2varType(ciType);
12815 #ifdef _TARGET_AMD64
12816 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
12817 #endif // _TARGET_AMD64
12819 if (compIsForInlining())
12821 switch (fieldInfo.fieldAccessor)
12823 case CORINFO_FIELD_INSTANCE_HELPER:
12824 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
12825 case CORINFO_FIELD_STATIC_ADDR_HELPER:
12826 case CORINFO_FIELD_STATIC_TLS:
12828 compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
12831 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
12832 #if COR_JIT_EE_VERSION > 460
12833 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
12835 /* We may be able to inline the field accessors in specific instantiations of generic
12837 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
12844 if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
12847 if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
12848 !(info.compFlags & CORINFO_FLG_FORCEINLINE))
12850 // Loading a static valuetype field usually will cause a JitHelper to be called
12851 // for the static base. This will bloat the code.
12852 compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
12854 if (compInlineResult->IsFailure())
12862 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
12865 tiRetVal.MakeByRef();
12869 tiRetVal.NormaliseForStack();
12872 // Perform this check always to ensure that we get field access exceptions even with
12873 // SkipVerification.
12874 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
12876 if (tiVerificationNeeded)
12878 // You can also pass the unboxed struct to LDFLD
12879 BOOL bAllowPlainValueTypeAsThis = FALSE;
12880 if (opcode == CEE_LDFLD && impIsValueType(tiObj))
12882 bAllowPlainValueTypeAsThis = TRUE;
12885 verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
12887 // If we're doing this on a heap object or from a 'safe' byref
12888 // then the result is a safe byref too
12889 if (isLoadAddress) // load address
12891 if (fieldInfo.fieldFlags &
12892 CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
12894 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
12896 tiRetVal.SetIsPermanentHomeByRef();
12899 else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
12901 // ldflda of byref is safe if done on a gc object or on a
12903 tiRetVal.SetIsPermanentHomeByRef();
12909 // tiVerificationNeeded is false.
12910 // Raise InvalidProgramException if static load accesses non-static field
12911 if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
12913 BADCODE("static access on an instance field");
12917 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
12918 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
12920 if (obj->gtFlags & GTF_SIDE_EFFECT)
12922 obj = gtUnusedValNode(obj);
12923 impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
12928 /* Preserve 'small' int types */
12929 if (lclTyp > TYP_INT)
12931 lclTyp = genActualType(lclTyp);
12934 bool usesHelper = false;
12936 switch (fieldInfo.fieldAccessor)
12938 case CORINFO_FIELD_INSTANCE:
12939 #ifdef FEATURE_READYTORUN_COMPILER
12940 case CORINFO_FIELD_INSTANCE_WITH_BASE:
12943 bool nullcheckNeeded = false;
12945 obj = impCheckForNullPointer(obj);
12947 if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
12949 nullcheckNeeded = true;
12952 // If the object is a struct, what we really want is
12953 // for the field to operate on the address of the struct.
12954 if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
12956 assert(opcode == CEE_LDFLD && objType != nullptr);
12958 obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
12961 /* Create the data member node */
12962 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
12964 #ifdef FEATURE_READYTORUN_COMPILER
12965 if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
12967 op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
12971 op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
12973 if (fgAddrCouldBeNull(obj))
12975 op1->gtFlags |= GTF_EXCEPT;
12978 // If gtFldObj is a BYREF then our target is a value class and
12979 // it could point anywhere, example a boxed class static int
12980 if (obj->gtType == TYP_BYREF)
12982 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12985 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
12986 if (StructHasOverlappingFields(typeFlags))
12988 op1->gtField.gtFldMayOverlap = true;
12991 // wrap it in a address of operator if necessary
12994 op1 = gtNewOperNode(GT_ADDR,
12995 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
12999 if (compIsForInlining() &&
13000 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13001 impInlineInfo->inlArgInfo))
13003 impInlineInfo->thisDereferencedFirst = true;
13009 case CORINFO_FIELD_STATIC_TLS:
13010 #ifdef _TARGET_X86_
13011 // Legacy TLS access is implemented as intrinsic on x86 only
13013 /* Create the data member node */
13014 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13015 op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13019 op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13023 fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13028 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13029 case CORINFO_FIELD_INSTANCE_HELPER:
13030 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13031 op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13036 case CORINFO_FIELD_STATIC_ADDRESS:
13037 // Replace static read-only fields with constant if possible
13038 if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13039 !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13040 (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13042 CorInfoInitClassResult initClassResult =
13043 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13044 impTokenLookupContextHandle);
13046 if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13048 void** pFldAddr = nullptr;
13050 info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13052 // We should always be able to access this static's address directly
13053 assert(pFldAddr == nullptr);
13055 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13062 case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13063 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13064 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13065 #if COR_JIT_EE_VERSION > 460
13066 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13068 op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13072 case CORINFO_FIELD_INTRINSIC_ZERO:
13074 assert(aflags & CORINFO_ACCESS_GET);
13075 op1 = gtNewIconNode(0, lclTyp);
13080 case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13082 assert(aflags & CORINFO_ACCESS_GET);
13085 InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13086 op1 = gtNewStringLiteralNode(iat, pValue);
13092 assert(!"Unexpected fieldAccessor");
13095 if (!isLoadAddress)
13098 if (prefixFlags & PREFIX_VOLATILE)
13100 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
13101 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13105 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13106 (op1->OperGet() == GT_OBJ));
13107 op1->gtFlags |= GTF_IND_VOLATILE;
13111 if (prefixFlags & PREFIX_UNALIGNED)
13115 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13116 (op1->OperGet() == GT_OBJ));
13117 op1->gtFlags |= GTF_IND_UNALIGNED;
13122 /* Check if the class needs explicit initialization */
13124 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13126 GenTreePtr helperNode = impInitClass(&resolvedToken);
13127 if (compDonotInline())
13131 if (helperNode != nullptr)
13133 op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13138 impPushOnStack(op1, tiRetVal);
13146 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13148 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13150 /* Get the CP_Fieldref index */
13152 assertImp(sz == sizeof(unsigned));
13154 _impResolveToken(CORINFO_TOKENKIND_Field);
13156 JITDUMP(" %08X", resolvedToken.token);
13158 int aflags = CORINFO_ACCESS_SET;
13159 GenTreePtr obj = nullptr;
13160 typeInfo* tiObj = nullptr;
13163 /* Pull the value from the stack */
13164 op2 = impPopStack(tiVal);
13165 clsHnd = tiVal.GetClassHandle();
13167 if (opcode == CEE_STFLD)
13169 tiObj = &impStackTop().seTypeInfo;
13170 obj = impPopStack().val;
13172 if (impIsThis(obj))
13174 aflags |= CORINFO_ACCESS_THIS;
13176 // An optimization for Contextful classes:
13177 // we unwrap the proxy when we have a 'this reference'
13179 if (info.compUnwrapContextful)
13181 aflags |= CORINFO_ACCESS_UNWRAP;
13186 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13188 // Figure out the type of the member. We always call canAccessField, so you always need this
13190 CorInfoType ciType = fieldInfo.fieldType;
13191 fieldClsHnd = fieldInfo.structType;
13193 lclTyp = JITtype2varType(ciType);
13195 if (compIsForInlining())
13197 /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13198 * per-inst static? */
13200 switch (fieldInfo.fieldAccessor)
13202 case CORINFO_FIELD_INSTANCE_HELPER:
13203 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13204 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13205 case CORINFO_FIELD_STATIC_TLS:
13207 compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13210 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13211 #if COR_JIT_EE_VERSION > 460
13212 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13215 /* We may be able to inline the field accessors in specific instantiations of generic
13217 compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13225 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13227 if (tiVerificationNeeded)
13229 verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13230 typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13231 Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
13235 // tiVerificationNeed is false.
13236 // Raise InvalidProgramException if static store accesses non-static field
13237 if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13239 BADCODE("static access on an instance field");
13243 // We are using stfld on a static field.
13244 // We allow it, but need to eval any side-effects for obj
13245 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13247 if (obj->gtFlags & GTF_SIDE_EFFECT)
13249 obj = gtUnusedValNode(obj);
13250 impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13255 /* Preserve 'small' int types */
13256 if (lclTyp > TYP_INT)
13258 lclTyp = genActualType(lclTyp);
13261 switch (fieldInfo.fieldAccessor)
13263 case CORINFO_FIELD_INSTANCE:
13264 #ifdef FEATURE_READYTORUN_COMPILER
13265 case CORINFO_FIELD_INSTANCE_WITH_BASE:
13268 obj = impCheckForNullPointer(obj);
13270 /* Create the data member node */
13271 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
13272 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13273 if (StructHasOverlappingFields(typeFlags))
13275 op1->gtField.gtFldMayOverlap = true;
13278 #ifdef FEATURE_READYTORUN_COMPILER
13279 if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13281 op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13285 op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13287 if (fgAddrCouldBeNull(obj))
13289 op1->gtFlags |= GTF_EXCEPT;
13292 // If gtFldObj is a BYREF then our target is a value class and
13293 // it could point anywhere, example a boxed class static int
13294 if (obj->gtType == TYP_BYREF)
13296 op1->gtFlags |= GTF_IND_TGTANYWHERE;
13299 if (compIsForInlining() &&
13300 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
13302 impInlineInfo->thisDereferencedFirst = true;
13307 case CORINFO_FIELD_STATIC_TLS:
13308 #ifdef _TARGET_X86_
13309 // Legacy TLS access is implemented as intrinsic on x86 only
13311 /* Create the data member node */
13312 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13313 op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13317 fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13322 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13323 case CORINFO_FIELD_INSTANCE_HELPER:
13324 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13325 op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13329 case CORINFO_FIELD_STATIC_ADDRESS:
13330 case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13331 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13332 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13333 #if COR_JIT_EE_VERSION > 460
13334 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13336 op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13341 assert(!"Unexpected fieldAccessor");
13344 // Create the member assignment, unless we have a struct.
13345 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
13346 bool deferStructAssign = varTypeIsStruct(lclTyp);
13348 if (!deferStructAssign)
13350 if (prefixFlags & PREFIX_VOLATILE)
13352 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13353 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
13354 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13355 op1->gtFlags |= GTF_IND_VOLATILE;
13357 if (prefixFlags & PREFIX_UNALIGNED)
13359 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
13360 op1->gtFlags |= GTF_IND_UNALIGNED;
13363 /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
13365 apps). The reason this works is that JIT stores an i4 constant in Gentree union during
13367 and reads from the union as if it were a long during code generation. Though this can potentially
13368 read garbage, one can get lucky to have this working correctly.
13370 This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
13372 switch (default when compiling retail configs in Dev10) and a customer app has taken a dependency
13374 it. To be backward compatible, we will explicitly add an upward cast here so that it works
13378 Note that this is limited to x86 alone as thereis no back compat to be addressed for Arm JIT for
13381 CLANG_FORMAT_COMMENT_ANCHOR;
13383 #ifdef _TARGET_X86_
13384 if (op1->TypeGet() != op2->TypeGet() && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
13385 varTypeIsLong(op1->TypeGet()))
13387 op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13391 #ifdef _TARGET_64BIT_
13392 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
13393 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
13395 op2->gtType = TYP_I_IMPL;
13399 // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
13401 if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
13403 op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
13405 // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13407 if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
13409 op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
13414 #if !FEATURE_X87_DOUBLES
13415 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
13416 // We insert a cast to the dest 'op1' type
13418 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
13419 varTypeIsFloating(op2->gtType))
13421 op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
13423 #endif // !FEATURE_X87_DOUBLES
13425 op1 = gtNewAssignNode(op1, op2);
13427 /* Mark the expression as containing an assignment */
13429 op1->gtFlags |= GTF_ASG;
13432 /* Check if the class needs explicit initialization */
13434 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13436 GenTreePtr helperNode = impInitClass(&resolvedToken);
13437 if (compDonotInline())
13441 if (helperNode != nullptr)
13443 op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13447 /* stfld can interfere with value classes (consider the sequence
13448 ldloc, ldloca, ..., stfld, stloc). We will be conservative and
13449 spill all value class references from the stack. */
13451 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
13455 if (impIsValueType(tiObj))
13457 impSpillEvalStack();
13461 impSpillValueClasses();
13465 /* Spill any refs to the same member from the stack */
13467 impSpillLclRefs((ssize_t)resolvedToken.hField);
13469 /* stsfld also interferes with indirect accesses (for aliased
13470 statics) and calls. But don't need to spill other statics
13471 as we have explicitly spilled this particular static field. */
13473 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
13475 if (deferStructAssign)
13477 op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
13485 /* Get the class type index operand */
13487 _impResolveToken(CORINFO_TOKENKIND_Newarr);
13489 JITDUMP(" %08X", resolvedToken.token);
13491 if (!opts.IsReadyToRun())
13493 // Need to restore array classes before creating array objects on the heap
13494 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13495 if (op1 == nullptr)
13496 { // compDonotInline()
13501 if (tiVerificationNeeded)
13503 // As per ECMA 'numElems' specified can be either int32 or native int.
13504 Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
13506 CORINFO_CLASS_HANDLE elemTypeHnd;
13507 info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13508 Verify(elemTypeHnd == nullptr ||
13509 !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13510 "array of byref-like type");
13511 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13514 accessAllowedResult =
13515 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13516 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13518 /* Form the arglist: array class handle, size */
13519 op2 = impPopStack().val;
13520 assertImp(genActualTypeIsIntOrI(op2->gtType));
13522 #ifdef FEATURE_READYTORUN_COMPILER
13523 if (opts.IsReadyToRun())
13525 op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
13526 gtNewArgList(op2));
13527 usingReadyToRunHelper = (op1 != nullptr);
13529 if (!usingReadyToRunHelper)
13531 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13532 // and the newarr call with a single call to a dynamic R2R cell that will:
13533 // 1) Load the context
13534 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13535 // 3) Allocate the new array
13536 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13538 // Need to restore array classes before creating array objects on the heap
13539 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
13540 if (op1 == nullptr)
13541 { // compDonotInline()
13547 if (!usingReadyToRunHelper)
13550 args = gtNewArgList(op1, op2);
13552 /* Create a call to 'new' */
13554 // Note that this only works for shared generic code because the same helper is used for all
13555 // reference array types
13557 gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, 0, args);
13560 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
13562 /* Remember that this basic block contains 'new' of an sd array */
13564 block->bbFlags |= BBF_HAS_NEWARRAY;
13565 optMethodFlags |= OMF_HAS_NEWARRAY;
13567 /* Push the result of the call on the stack */
13569 impPushOnStack(op1, tiRetVal);
13576 assert(!compIsForInlining());
13578 if (tiVerificationNeeded)
13580 Verify(false, "bad opcode");
13583 // We don't allow locallocs inside handlers
13584 if (block->hasHndIndex())
13586 BADCODE("Localloc can't be inside handler");
13589 /* The FP register may not be back to the original value at the end
13590 of the method, even if the frame size is 0, as localloc may
13591 have modified it. So we will HAVE to reset it */
13593 compLocallocUsed = true;
13594 setNeedsGSSecurityCookie();
13596 // Get the size to allocate
13598 op2 = impPopStack().val;
13599 assertImp(genActualTypeIsIntOrI(op2->gtType));
13601 if (verCurrentState.esStackDepth != 0)
13603 BADCODE("Localloc can only be used when the stack is empty");
13606 op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
13608 // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
13610 op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
13612 impPushOnStack(op1, tiRetVal);
13617 /* Get the type token */
13618 assertImp(sz == sizeof(unsigned));
13620 _impResolveToken(CORINFO_TOKENKIND_Casting);
13622 JITDUMP(" %08X", resolvedToken.token);
13624 if (!opts.IsReadyToRun())
13626 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13627 if (op2 == nullptr)
13628 { // compDonotInline()
13633 if (tiVerificationNeeded)
13635 Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
13636 // Even if this is a value class, we know it is boxed.
13637 tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
13639 accessAllowedResult =
13640 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13641 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13643 op1 = impPopStack().val;
13645 #ifdef FEATURE_READYTORUN_COMPILER
13646 if (opts.IsReadyToRun())
13648 GenTreePtr opLookup =
13649 impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
13650 gtNewArgList(op1));
13651 usingReadyToRunHelper = (opLookup != nullptr);
13652 op1 = (usingReadyToRunHelper ? opLookup : op1);
13654 if (!usingReadyToRunHelper)
13656 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13657 // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
13658 // 1) Load the context
13659 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
13660 // 3) Perform the 'is instance' check on the input object
13661 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13663 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
13664 if (op2 == nullptr)
13665 { // compDonotInline()
13671 if (!usingReadyToRunHelper)
13674 op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
13676 if (compDonotInline())
13681 impPushOnStack(op1, tiRetVal);
13685 case CEE_REFANYVAL:
13687 // get the class handle and make a ICON node out of it
13689 _impResolveToken(CORINFO_TOKENKIND_Class);
13691 JITDUMP(" %08X", resolvedToken.token);
13693 op2 = impTokenToHandle(&resolvedToken);
13694 if (op2 == nullptr)
13695 { // compDonotInline()
13699 if (tiVerificationNeeded)
13701 Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13703 tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
13706 op1 = impPopStack().val;
13707 // make certain it is normalized;
13708 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13710 // Call helper GETREFANY(classHandle, op1);
13711 args = gtNewArgList(op2, op1);
13712 op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, 0, args);
13714 impPushOnStack(op1, tiRetVal);
13717 case CEE_REFANYTYPE:
13719 if (tiVerificationNeeded)
13721 Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
13725 op1 = impPopStack().val;
13727 // make certain it is normalized;
13728 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
13730 if (op1->gtOper == GT_OBJ)
13732 // Get the address of the refany
13733 op1 = op1->gtOp.gtOp1;
13735 // Fetch the type from the correct slot
13736 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
13737 gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
13738 op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
13742 assertImp(op1->gtOper == GT_MKREFANY);
13744 // The pointer may have side-effects
13745 if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
13747 impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13749 impNoteLastILoffs();
13753 // We already have the class handle
13754 op1 = op1->gtOp.gtOp2;
13757 // convert native TypeHandle to RuntimeTypeHandle
13759 GenTreeArgList* helperArgs = gtNewArgList(op1);
13761 op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, GTF_EXCEPT,
13764 // The handle struct is returned in register
13765 op1->gtCall.gtReturnType = TYP_REF;
13767 tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
13770 impPushOnStack(op1, tiRetVal);
13775 /* Get the Class index */
13776 assertImp(sz == sizeof(unsigned));
13777 lastLoadToken = codeAddr;
13778 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
13780 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
13782 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
13783 if (op1 == nullptr)
13784 { // compDonotInline()
13788 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
13789 assert(resolvedToken.hClass != nullptr);
13791 if (resolvedToken.hMethod != nullptr)
13793 helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
13795 else if (resolvedToken.hField != nullptr)
13797 helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
13800 GenTreeArgList* helperArgs = gtNewArgList(op1);
13802 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, GTF_EXCEPT, helperArgs);
13804 // The handle struct is returned in register
13805 op1->gtCall.gtReturnType = TYP_REF;
13807 tiRetVal = verMakeTypeInfo(tokenType);
13808 impPushOnStack(op1, tiRetVal);
13813 case CEE_UNBOX_ANY:
13815 /* Get the Class index */
13816 assertImp(sz == sizeof(unsigned));
13818 _impResolveToken(CORINFO_TOKENKIND_Class);
13820 JITDUMP(" %08X", resolvedToken.token);
13822 BOOL runtimeLookup;
13823 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
13824 if (op2 == nullptr)
13825 { // compDonotInline()
13829 // Run this always so we can get access exceptions even with SkipVerification.
13830 accessAllowedResult =
13831 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
13832 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
13834 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
13836 if (tiVerificationNeeded)
13838 typeInfo tiUnbox = impStackTop().seTypeInfo;
13839 Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
13840 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13841 tiRetVal.NormaliseForStack();
13843 op1 = impPopStack().val;
13847 /* Pop the object and create the unbox helper call */
13848 /* You might think that for UNBOX_ANY we need to push a different */
13849 /* (non-byref) type, but here we're making the tiRetVal that is used */
13850 /* for the intermediate pointer which we then transfer onto the OBJ */
13851 /* instruction. OBJ then creates the appropriate tiRetVal. */
13852 if (tiVerificationNeeded)
13854 typeInfo tiUnbox = impStackTop().seTypeInfo;
13855 Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
13857 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
13858 Verify(tiRetVal.IsValueClass(), "not value class");
13859 tiRetVal.MakeByRef();
13861 // We always come from an objref, so this is safe byref
13862 tiRetVal.SetIsPermanentHomeByRef();
13863 tiRetVal.SetIsReadonlyByRef();
13866 op1 = impPopStack().val;
13867 assertImp(op1->gtType == TYP_REF);
13869 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
13870 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
13872 // We only want to expand inline the normal UNBOX helper;
13873 expandInline = (helper == CORINFO_HELP_UNBOX);
13877 if (compCurBB->isRunRarely())
13879 expandInline = false; // not worth the code expansion
13885 // we are doing normal unboxing
13886 // inline the common case of the unbox helper
13887 // UNBOX(exp) morphs into
13888 // clone = pop(exp);
13889 // ((*clone == typeToken) ? nop : helper(clone, typeToken));
13890 // push(clone + sizeof(void*))
13892 GenTreePtr cloneOperand;
13893 op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
13894 nullptr DEBUGARG("inline UNBOX clone1"));
13895 op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
13897 GenTreePtr condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
13899 op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
13900 nullptr DEBUGARG("inline UNBOX clone2"));
13901 op2 = impTokenToHandle(&resolvedToken);
13902 if (op2 == nullptr)
13903 { // compDonotInline()
13906 args = gtNewArgList(op2, op1);
13907 op1 = gtNewHelperCallNode(helper, TYP_VOID, 0, args);
13909 op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
13910 op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
13911 condBox->gtFlags |= GTF_RELOP_QMARK;
13913 // QMARK nodes cannot reside on the evaluation stack. Because there
13914 // may be other trees on the evaluation stack that side-effect the
13915 // sources of the UNBOX operation we must spill the stack.
13917 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13919 // Create the address-expression to reference past the object header
13920 // to the beginning of the value-type. Today this means adjusting
13921 // past the base of the objects vtable field which is pointer sized.
13923 op2 = gtNewIconNode(sizeof(void*), TYP_I_IMPL);
13924 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
13928 unsigned callFlags = (helper == CORINFO_HELP_UNBOX) ? 0 : GTF_EXCEPT;
13930 // Don't optimize, just call the helper and be done with it
13931 args = gtNewArgList(op2, op1);
13932 op1 = gtNewHelperCallNode(helper,
13933 (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT),
13937 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
13938 helper == CORINFO_HELP_UNBOX_NULLABLE &&
13939 varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
13943 ----------------------------------------------------------------------
13946 | \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE |
13947 | \ | (which returns a BYREF) | (which returns a STRUCT) | |
13949 |---------------------------------------------------------------------
13950 | UNBOX | push the BYREF | spill the STRUCT to a local, |
13951 | | | push the BYREF to this local |
13952 |---------------------------------------------------------------------
13953 | UNBOX_ANY | push a GT_OBJ of | push the STRUCT |
13954 | | the BYREF | For Linux when the |
13955 | | | struct is returned in two |
13956 | | | registers create a temp |
13957 | | | which address is passed to |
13958 | | | the unbox_nullable helper. |
13959 |---------------------------------------------------------------------
13962 if (opcode == CEE_UNBOX)
13964 if (helper == CORINFO_HELP_UNBOX_NULLABLE)
13966 // Unbox nullable helper returns a struct type.
13967 // We need to spill it to a temp so than can take the address of it.
13968 // Here we need unsafe value cls check, since the address of struct is taken to be used
13969 // further along and potetially be exploitable.
13971 unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
13972 lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
13974 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
13975 op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
13976 assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
13978 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
13979 op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
13980 op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
13983 assert(op1->gtType == TYP_BYREF);
13984 assert(!tiVerificationNeeded || tiRetVal.IsByRef());
13988 assert(opcode == CEE_UNBOX_ANY);
13990 if (helper == CORINFO_HELP_UNBOX)
13992 // Normal unbox helper returns a TYP_BYREF.
13993 impPushOnStack(op1, tiRetVal);
13998 assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
14000 #if FEATURE_MULTIREG_RET
14002 if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14004 // Unbox nullable helper returns a TYP_STRUCT.
14005 // For the multi-reg case we need to spill it to a temp so that
14006 // we can pass the address to the unbox_nullable jit helper.
14008 unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14009 lvaTable[tmp].lvIsMultiRegArg = true;
14010 lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14012 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14013 op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14014 assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14016 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14017 op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14018 op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14020 // In this case the return value of the unbox helper is TYP_BYREF.
14021 // Make sure the right type is placed on the operand type stack.
14022 impPushOnStack(op1, tiRetVal);
14024 // Load the struct.
14027 assert(op1->gtType == TYP_BYREF);
14028 assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14034 #endif // !FEATURE_MULTIREG_RET
14037 // If non register passable struct we have it materialized in the RetBuf.
14038 assert(op1->gtType == TYP_STRUCT);
14039 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14040 assert(tiRetVal.IsValueClass());
14044 impPushOnStack(op1, tiRetVal);
14050 /* Get the Class index */
14051 assertImp(sz == sizeof(unsigned));
14053 _impResolveToken(CORINFO_TOKENKIND_Box);
14055 JITDUMP(" %08X", resolvedToken.token);
14057 if (tiVerificationNeeded)
14059 typeInfo tiActual = impStackTop().seTypeInfo;
14060 typeInfo tiBox = verMakeTypeInfo(resolvedToken.hClass);
14062 Verify(verIsBoxable(tiBox), "boxable type expected");
14064 // check the class constraints of the boxed type in case we are boxing an uninitialized value
14065 Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14066 "boxed type has unsatisfied class constraints");
14068 Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14070 // Observation: the following code introduces a boxed value class on the stack, but,
14071 // according to the ECMA spec, one would simply expect: tiRetVal =
14072 // typeInfo(TI_REF,impGetObjectClass());
14074 // Push the result back on the stack,
14075 // even if clsHnd is a value class we want the TI_REF
14076 // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14077 tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14080 accessAllowedResult =
14081 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14082 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14084 // Note BOX can be used on things that are not value classes, in which
14085 // case we get a NOP. However the verifier's view of the type on the
14086 // stack changes (in generic code a 'T' becomes a 'boxed T')
14087 if (!eeIsValueClass(resolvedToken.hClass))
14089 verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14093 // Look ahead for unbox.any
14094 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14096 DWORD classAttribs = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14097 if (!(classAttribs & CORINFO_FLG_SHAREDINST))
14099 CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14101 impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14103 if (unboxResolvedToken.hClass == resolvedToken.hClass)
14105 // Skip the next unbox.any instruction
14106 sz += sizeof(mdToken) + 1;
14112 impImportAndPushBox(&resolvedToken);
14113 if (compDonotInline())
14122 /* Get the Class index */
14123 assertImp(sz == sizeof(unsigned));
14125 _impResolveToken(CORINFO_TOKENKIND_Class);
14127 JITDUMP(" %08X", resolvedToken.token);
14129 if (tiVerificationNeeded)
14131 tiRetVal = typeInfo(TI_INT);
14134 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14135 impPushOnStack(op1, tiRetVal);
14138 case CEE_CASTCLASS:
14140 /* Get the Class index */
14142 assertImp(sz == sizeof(unsigned));
14144 _impResolveToken(CORINFO_TOKENKIND_Casting);
14146 JITDUMP(" %08X", resolvedToken.token);
14148 if (!opts.IsReadyToRun())
14150 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14151 if (op2 == nullptr)
14152 { // compDonotInline()
14157 if (tiVerificationNeeded)
14159 Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14161 tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14164 accessAllowedResult =
14165 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14166 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14168 op1 = impPopStack().val;
14170 /* Pop the address and create the 'checked cast' helper call */
14172 // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
14173 // and op2 to contain code that creates the type handle corresponding to typeRef
14176 #ifdef FEATURE_READYTORUN_COMPILER
14177 if (opts.IsReadyToRun())
14179 GenTreePtr opLookup = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST,
14180 TYP_REF, gtNewArgList(op1));
14181 usingReadyToRunHelper = (opLookup != nullptr);
14182 op1 = (usingReadyToRunHelper ? opLookup : op1);
14184 if (!usingReadyToRunHelper)
14186 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14187 // and the chkcastany call with a single call to a dynamic R2R cell that will:
14188 // 1) Load the context
14189 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14190 // 3) Check the object on the stack for the type-cast
14191 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14193 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14194 if (op2 == nullptr)
14195 { // compDonotInline()
14201 if (!usingReadyToRunHelper)
14204 op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
14206 if (compDonotInline())
14211 /* Push the result back on the stack */
14212 impPushOnStack(op1, tiRetVal);
14217 if (compIsForInlining())
14219 // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14220 // TODO: Will this be too strict, given that we will inline many basic blocks?
14221 // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
14223 /* Do we have just the exception on the stack ?*/
14225 if (verCurrentState.esStackDepth != 1)
14227 /* if not, just don't inline the method */
14229 compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
14234 if (tiVerificationNeeded)
14236 tiRetVal = impStackTop().seTypeInfo;
14237 Verify(tiRetVal.IsObjRef(), "object ref expected");
14238 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
14240 Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
14244 block->bbSetRunRarely(); // any block with a throw is rare
14245 /* Pop the exception object and create the 'throw' helper call */
14247 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, GTF_EXCEPT, gtNewArgList(impPopStack().val));
14250 if (verCurrentState.esStackDepth > 0)
14252 impEvalSideEffects();
14255 assert(verCurrentState.esStackDepth == 0);
14261 assert(!compIsForInlining());
14263 if (info.compXcptnsCount == 0)
14265 BADCODE("rethrow outside catch");
14268 if (tiVerificationNeeded)
14270 Verify(block->hasHndIndex(), "rethrow outside catch");
14271 if (block->hasHndIndex())
14273 EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
14274 Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
14275 if (HBtab->HasFilter())
14277 // we better be in the handler clause part, not the filter part
14278 Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
14279 "rethrow in filter");
14284 /* Create the 'rethrow' helper call */
14286 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID, GTF_EXCEPT);
14292 assertImp(sz == sizeof(unsigned));
14294 _impResolveToken(CORINFO_TOKENKIND_Class);
14296 JITDUMP(" %08X", resolvedToken.token);
14298 if (tiVerificationNeeded)
14300 typeInfo tiTo = impStackTop().seTypeInfo;
14301 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14303 Verify(tiTo.IsByRef(), "byref expected");
14304 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14306 Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14307 "type operand incompatible with type of address");
14310 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
14311 op2 = gtNewIconNode(0); // Value
14312 op1 = impPopStack().val; // Dest
14313 op1 = gtNewBlockVal(op1, size);
14314 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14319 if (tiVerificationNeeded)
14321 Verify(false, "bad opcode");
14324 op3 = impPopStack().val; // Size
14325 op2 = impPopStack().val; // Value
14326 op1 = impPopStack().val; // Dest
14328 if (op3->IsCnsIntOrI())
14330 size = (unsigned)op3->AsIntConCommon()->IconValue();
14331 op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14335 op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14338 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
14344 if (tiVerificationNeeded)
14346 Verify(false, "bad opcode");
14348 op3 = impPopStack().val; // Size
14349 op2 = impPopStack().val; // Src
14350 op1 = impPopStack().val; // Dest
14352 if (op3->IsCnsIntOrI())
14354 size = (unsigned)op3->AsIntConCommon()->IconValue();
14355 op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
14359 op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
14362 if (op2->OperGet() == GT_ADDR)
14364 op2 = op2->gtOp.gtOp1;
14368 op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
14371 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
14376 assertImp(sz == sizeof(unsigned));
14378 _impResolveToken(CORINFO_TOKENKIND_Class);
14380 JITDUMP(" %08X", resolvedToken.token);
14382 if (tiVerificationNeeded)
14384 typeInfo tiFrom = impStackTop().seTypeInfo;
14385 typeInfo tiTo = impStackTop(1).seTypeInfo;
14386 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14388 Verify(tiFrom.IsByRef(), "expected byref source");
14389 Verify(tiTo.IsByRef(), "expected byref destination");
14391 Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
14392 "type of source address incompatible with type operand");
14393 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
14394 Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
14395 "type operand incompatible with type of destination address");
14398 if (!eeIsValueClass(resolvedToken.hClass))
14400 op1 = impPopStack().val; // address to load from
14402 impBashVarAddrsToI(op1);
14404 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
14406 op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
14407 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
14409 impPushOnStackNoType(op1);
14410 opcode = CEE_STIND_REF;
14412 goto STIND_POST_VERIFY;
14415 op2 = impPopStack().val; // Src
14416 op1 = impPopStack().val; // Dest
14417 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
14422 assertImp(sz == sizeof(unsigned));
14424 _impResolveToken(CORINFO_TOKENKIND_Class);
14426 JITDUMP(" %08X", resolvedToken.token);
14428 if (eeIsValueClass(resolvedToken.hClass))
14430 lclTyp = TYP_STRUCT;
14437 if (tiVerificationNeeded)
14440 typeInfo tiPtr = impStackTop(1).seTypeInfo;
14442 // Make sure we have a good looking byref
14443 Verify(tiPtr.IsByRef(), "pointer not byref");
14444 Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
14445 if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
14447 compUnsafeCastUsed = true;
14450 typeInfo ptrVal = DereferenceByRef(tiPtr);
14451 typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
14453 if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
14455 Verify(false, "type of value incompatible with type operand");
14456 compUnsafeCastUsed = true;
14459 if (!tiCompatibleWith(argVal, ptrVal, false))
14461 Verify(false, "type operand incompatible with type of address");
14462 compUnsafeCastUsed = true;
14467 compUnsafeCastUsed = true;
14470 if (lclTyp == TYP_REF)
14472 opcode = CEE_STIND_REF;
14473 goto STIND_POST_VERIFY;
14476 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14477 if (impIsPrimitive(jitTyp))
14479 lclTyp = JITtype2varType(jitTyp);
14480 goto STIND_POST_VERIFY;
14483 op2 = impPopStack().val; // Value
14484 op1 = impPopStack().val; // Ptr
14486 assertImp(varTypeIsStruct(op2));
14488 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14494 assert(!compIsForInlining());
14496 // Being lazy here. Refanys are tricky in terms of gc tracking.
14497 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
14499 JITDUMP("disabling struct promotion because of mkrefany\n");
14500 fgNoStructPromotion = true;
14502 oper = GT_MKREFANY;
14503 assertImp(sz == sizeof(unsigned));
14505 _impResolveToken(CORINFO_TOKENKIND_Class);
14507 JITDUMP(" %08X", resolvedToken.token);
14509 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14510 if (op2 == nullptr)
14511 { // compDonotInline()
14515 if (tiVerificationNeeded)
14517 typeInfo tiPtr = impStackTop().seTypeInfo;
14518 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
14520 Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
14521 Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
14522 Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
14525 accessAllowedResult =
14526 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14527 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14529 op1 = impPopStack().val;
14531 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
14532 // But JIT32 allowed it, so we continue to allow it.
14533 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
14535 // MKREFANY returns a struct. op2 is the class token.
14536 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
14538 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
14544 assertImp(sz == sizeof(unsigned));
14546 _impResolveToken(CORINFO_TOKENKIND_Class);
14548 JITDUMP(" %08X", resolvedToken.token);
14552 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14554 if (tiVerificationNeeded)
14556 typeInfo tiPtr = impStackTop().seTypeInfo;
14558 // Make sure we have a byref
14559 if (!tiPtr.IsByRef())
14561 Verify(false, "pointer not byref");
14562 compUnsafeCastUsed = true;
14564 typeInfo tiPtrVal = DereferenceByRef(tiPtr);
14566 if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
14568 Verify(false, "type of address incompatible with type operand");
14569 compUnsafeCastUsed = true;
14571 tiRetVal.NormaliseForStack();
14575 compUnsafeCastUsed = true;
14578 if (eeIsValueClass(resolvedToken.hClass))
14580 lclTyp = TYP_STRUCT;
14585 opcode = CEE_LDIND_REF;
14586 goto LDIND_POST_VERIFY;
14589 op1 = impPopStack().val;
14591 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
14593 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
14594 if (impIsPrimitive(jitTyp))
14596 op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
14598 // Could point anywhere, example a boxed class static int
14599 op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
14600 assertImp(varTypeIsArithmetic(op1->gtType));
14604 // OBJ returns a struct
14605 // and an inline argument which is the class token of the loaded obj
14606 op1 = gtNewObjNode(resolvedToken.hClass, op1);
14608 op1->gtFlags |= GTF_EXCEPT;
14610 impPushOnStack(op1, tiRetVal);
14615 if (tiVerificationNeeded)
14617 typeInfo tiArray = impStackTop().seTypeInfo;
14618 Verify(verIsSDArray(tiArray), "bad array");
14619 tiRetVal = typeInfo(TI_INT);
14622 op1 = impPopStack().val;
14623 if (!opts.MinOpts() && !opts.compDbgCode)
14625 /* Use GT_ARR_LENGTH operator so rng check opts see this */
14626 GenTreeArrLen* arrLen =
14627 new (this, GT_ARR_LENGTH) GenTreeArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
14629 /* Mark the block as containing a length expression */
14631 if (op1->gtOper == GT_LCL_VAR)
14633 block->bbFlags |= BBF_HAS_IDX_LEN;
14640 /* Create the expression "*(array_addr + ArrLenOffs)" */
14641 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14642 gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
14643 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
14644 op1->gtFlags |= GTF_IND_ARR_LEN;
14647 /* An indirection will cause a GPF if the address is null */
14648 op1->gtFlags |= GTF_EXCEPT;
14650 /* Push the result back on the stack */
14651 impPushOnStack(op1, tiRetVal);
14655 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
14659 if (opts.compDbgCode)
14661 op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
14666 /******************************** NYI *******************************/
14669 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
14672 case CEE_MACRO_END:
14675 BADCODE3("unknown opcode", ": %02X", (int)opcode);
14679 prevOpcode = opcode;
14682 assert(!insertLdloc || opcode == CEE_DUP);
14685 assert(!insertLdloc);
14688 #undef _impResolveToken
14691 #pragma warning(pop)
14694 // Push a local/argument treeon the operand stack
14695 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
14697 tiRetVal.NormaliseForStack();
14699 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
14701 tiRetVal.SetUninitialisedObjRef();
14704 impPushOnStack(op, tiRetVal);
14707 // Load a local/argument on the operand stack
14708 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
14709 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
14713 if (lvaTable[lclNum].lvNormalizeOnLoad())
14715 lclTyp = lvaGetRealType(lclNum);
14719 lclTyp = lvaGetActualType(lclNum);
14722 impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
14725 // Load an argument on the operand stack
14726 // Shared by the various CEE_LDARG opcodes
14727 // ilArgNum is the argument index as specified in IL.
14728 // It will be mapped to the correct lvaTable index
14729 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
14731 Verify(ilArgNum < info.compILargsCount, "bad arg num");
14733 if (compIsForInlining())
14735 if (ilArgNum >= info.compArgsCount)
14737 compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
14741 impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
14742 impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
14746 if (ilArgNum >= info.compArgsCount)
14751 unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
14753 if (lclNum == info.compThisArg)
14755 lclNum = lvaArg0Var;
14758 impLoadVar(lclNum, offset);
14762 // Load a local on the operand stack
14763 // Shared by the various CEE_LDLOC opcodes
14764 // ilLclNum is the local index as specified in IL.
14765 // It will be mapped to the correct lvaTable index
14766 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
14768 if (tiVerificationNeeded)
14770 Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
14771 Verify(info.compInitMem, "initLocals not set");
14774 if (compIsForInlining())
14776 if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14778 compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
14782 // Get the local type
14783 var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
14785 typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
14787 /* Have we allocated a temp for this local? */
14789 unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
14791 // All vars of inlined methods should be !lvNormalizeOnLoad()
14793 assert(!lvaTable[lclNum].lvNormalizeOnLoad());
14794 lclTyp = genActualType(lclTyp);
14796 impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
14800 if (ilLclNum >= info.compMethodInfo->locals.numArgs)
14805 unsigned lclNum = info.compArgsCount + ilLclNum;
14807 impLoadVar(lclNum, offset);
14811 #ifdef _TARGET_ARM_
14812 /**************************************************************************************
14814 * When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
14815 * dst struct, because struct promotion will turn it into a float/double variable while
14816 * the rhs will be an int/long variable. We don't code generate assignment of int into
14817 * a float, but there is nothing that might prevent us from doing so. The tree however
14818 * would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
14820 * tmpNum - the lcl dst variable num that is a struct.
14821 * src - the src tree assigned to the dest that is a struct/int (when varargs call.)
14822 * hClass - the type handle for the struct variable.
14824 * TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
14825 * however, we could do a codegen of transferring from int to float registers
14826 * (transfer, not a cast.)
14829 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr src, CORINFO_CLASS_HANDLE hClass)
14831 if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
14833 int hfaSlots = GetHfaCount(hClass);
14834 var_types hfaType = GetHfaType(hClass);
14836 // If we have varargs we morph the method's return type to be "int" irrespective of its original
14837 // type: struct/float at importer because the ABI calls out return in integer registers.
14838 // We don't want struct promotion to replace an expression like this:
14839 // lclFld_int = callvar_int() into lclFld_float = callvar_int();
14840 // This means an int is getting assigned to a float without a cast. Prevent the promotion.
14841 if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
14842 (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
14844 // Make sure this struct type stays as struct so we can receive the call in a struct.
14845 lvaTable[tmpNum].lvIsMultiRegRet = true;
14849 #endif // _TARGET_ARM_
14851 #if FEATURE_MULTIREG_RET
14852 GenTreePtr Compiler::impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HANDLE hClass)
14854 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
14855 impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_NONE);
14856 GenTreePtr ret = gtNewLclvNode(tmpNum, op->gtType);
14858 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
14859 ret->gtFlags |= GTF_DONT_CSE;
14861 assert(IsMultiRegReturnedType(hClass));
14863 // Mark the var so that fields are not promoted and stay together.
14864 lvaTable[tmpNum].lvIsMultiRegRet = true;
14868 #endif // FEATURE_MULTIREG_RET
14870 // do import for a return
14871 // returns false if inlining was aborted
14872 // opcode can be ret or call in the case of a tail.call
14873 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
14875 if (tiVerificationNeeded)
14877 verVerifyThisPtrInitialised();
14879 unsigned expectedStack = 0;
14880 if (info.compRetType != TYP_VOID)
14882 typeInfo tiVal = impStackTop().seTypeInfo;
14883 typeInfo tiDeclared =
14884 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
14886 Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
14888 Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
14891 Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
14894 GenTree* op2 = nullptr;
14895 GenTree* op1 = nullptr;
14896 CORINFO_CLASS_HANDLE retClsHnd = nullptr;
14898 if (info.compRetType != TYP_VOID)
14900 StackEntry se = impPopStack(retClsHnd);
14903 if (!compIsForInlining())
14905 impBashVarAddrsToI(op2);
14906 op2 = impImplicitIorI4Cast(op2, info.compRetType);
14907 op2 = impImplicitR4orR8Cast(op2, info.compRetType);
14908 assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
14909 ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
14910 ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
14911 (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
14912 (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
14915 if (opts.compGcChecks && info.compRetType == TYP_REF)
14917 // DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path
14918 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
14921 assert(op2->gtType == TYP_REF);
14923 // confirm that the argument is a GC pointer (for debugging (GC stress))
14924 GenTreeArgList* args = gtNewArgList(op2);
14925 op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, 0, args);
14929 printf("\ncompGcChecks tree:\n");
14937 // inlinee's stack should be empty now.
14938 assert(verCurrentState.esStackDepth == 0);
14943 printf("\n\n Inlinee Return expression (before normalization) =>\n");
14948 // Make sure the type matches the original call.
14950 var_types returnType = genActualType(op2->gtType);
14951 var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
14952 if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
14954 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
14957 if (returnType != originalCallType)
14959 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
14963 // Below, we are going to set impInlineInfo->retExpr to the tree with the return
14964 // expression. At this point, retExpr could already be set if there are multiple
14965 // return blocks (meaning lvaInlineeReturnSpillTemp != BAD_VAR_NUM) and one of
14966 // the other blocks already set it. If there is only a single return block,
14967 // retExpr shouldn't be set. However, this is not true if we reimport a block
14968 // with a return. In that case, retExpr will be set, then the block will be
14969 // reimported, but retExpr won't get cleared as part of setting the block to
14970 // be reimported. The reimported retExpr value should be the same, so even if
14971 // we don't unconditionally overwrite it, it shouldn't matter.
14972 if (info.compRetNativeType != TYP_STRUCT)
14974 // compRetNativeType is not TYP_STRUCT.
14975 // This implies it could be either a scalar type or SIMD vector type or
14976 // a struct type that can be normalized to a scalar type.
14978 if (varTypeIsStruct(info.compRetType))
14980 noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
14981 // adjust the type away from struct to integral
14982 // and no normalizing
14983 op2 = impFixupStructReturnType(op2, retClsHnd);
14987 // Do we have to normalize?
14988 var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
14989 if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
14990 fgCastNeeded(op2, fncRealRetType))
14992 // Small-typed return values are normalized by the callee
14993 op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
14997 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
14999 assert(info.compRetNativeType != TYP_VOID &&
15000 (fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals));
15002 // This is a bit of a workaround...
15003 // If we are inlining a call that returns a struct, where the actual "native" return type is
15004 // not a struct (for example, the struct is composed of exactly one int, and the native
15005 // return type is thus an int), and the inlinee has multiple return blocks (thus,
15006 // lvaInlineeReturnSpillTemp is != BAD_VAR_NUM, and is the index of a local var that is set
15007 // to the *native* return type), and at least one of the return blocks is the result of
15008 // a call, then we have a problem. The situation is like this (from a failed test case):
15011 // // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15012 // call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15013 // plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15017 // ldobj !!T // this gets bashed to a GT_LCL_FLD, type TYP_INT
15020 // call !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15021 // object&, class System.Func`1<!!0>)
15024 // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15025 // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15026 // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15027 // inlining properly by leaving the correct type on the GT_CALL node through importing.
15029 // To fix this, for this case, we temporarily change the GT_CALL node type to the
15030 // native return type, which is what it will be set to eventually. We generate the
15031 // assignment to the return temp, using the correct type, and then restore the GT_CALL
15032 // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15034 bool restoreType = false;
15035 if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15037 noway_assert(op2->TypeGet() == TYP_STRUCT);
15038 op2->gtType = info.compRetNativeType;
15039 restoreType = true;
15042 impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15043 (unsigned)CHECK_SPILL_ALL);
15045 GenTreePtr tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15049 op2->gtType = TYP_STRUCT; // restore it to what it was
15055 if (impInlineInfo->retExpr)
15057 // Some other block(s) have seen the CEE_RET first.
15058 // Better they spilled to the same temp.
15059 assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15060 assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15068 printf("\n\n Inlinee Return expression (after normalization) =>\n");
15073 // Report the return expression
15074 impInlineInfo->retExpr = op2;
15078 // compRetNativeType is TYP_STRUCT.
15079 // This implies that struct return via RetBuf arg or multi-reg struct return
15081 GenTreePtr iciCall = impInlineInfo->iciCall;
15082 assert(iciCall->gtOper == GT_CALL);
15084 // Assign the inlinee return into a spill temp.
15085 // spill temp only exists if there are multiple return points
15086 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15088 // in this case we have to insert multiple struct copies to the temp
15089 // and the retexpr is just the temp.
15090 assert(info.compRetNativeType != TYP_VOID);
15091 assert(fgMoreThanOneReturnBlock() || impInlineInfo->hasPinnedLocals);
15093 impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15094 (unsigned)CHECK_SPILL_ALL);
15097 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15098 #if defined(_TARGET_ARM_)
15099 // TODO-ARM64-NYI: HFA
15100 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15101 // next ifdefs could be refactored in a single method with the ifdef inside.
15102 if (IsHfa(retClsHnd))
15104 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15105 #else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15106 ReturnTypeDesc retTypeDesc;
15107 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15108 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15110 if (retRegCount != 0)
15112 // If single eightbyte, the return type would have been normalized and there won't be a temp var.
15113 // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
15115 assert(retRegCount == MAX_RET_REG_COUNT);
15116 // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
15117 CLANG_FORMAT_COMMENT_ANCHOR;
15118 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15120 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15122 if (!impInlineInfo->retExpr)
15124 #if defined(_TARGET_ARM_)
15125 impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
15126 #else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15127 // The inlinee compiler has figured out the type of the temp already. Use it here.
15128 impInlineInfo->retExpr =
15129 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15130 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15135 impInlineInfo->retExpr = op2;
15139 #elif defined(_TARGET_ARM64_)
15140 ReturnTypeDesc retTypeDesc;
15141 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15142 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
15144 if (retRegCount != 0)
15146 assert(!iciCall->AsCall()->HasRetBufArg());
15147 assert(retRegCount >= 2);
15148 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15150 if (!impInlineInfo->retExpr)
15152 // The inlinee compiler has figured out the type of the temp already. Use it here.
15153 impInlineInfo->retExpr =
15154 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
15159 impInlineInfo->retExpr = op2;
15163 #endif // defined(_TARGET_ARM64_)
15165 assert(iciCall->AsCall()->HasRetBufArg());
15166 GenTreePtr dest = gtCloneExpr(iciCall->gtCall.gtCallArgs->gtOp.gtOp1);
15167 // spill temp only exists if there are multiple return points
15168 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15170 // if this is the first return we have seen set the retExpr
15171 if (!impInlineInfo->retExpr)
15173 impInlineInfo->retExpr =
15174 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
15175 retClsHnd, (unsigned)CHECK_SPILL_ALL);
15180 impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15187 if (compIsForInlining())
15192 if (info.compRetType == TYP_VOID)
15195 op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15197 else if (info.compRetBuffArg != BAD_VAR_NUM)
15199 // Assign value to return buff (first param)
15200 GenTreePtr retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
15202 op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
15203 impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15205 // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
15206 CLANG_FORMAT_COMMENT_ANCHOR;
15208 #if defined(_TARGET_AMD64_)
15210 // x64 (System V and Win64) calling convention requires to
15211 // return the implicit return buffer explicitly (in RAX).
15212 // Change the return type to be BYREF.
15213 op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15214 #else // !defined(_TARGET_AMD64_)
15215 // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
15216 // In such case the return value of the function is changed to BYREF.
15217 // If profiler hook is not needed the return type of the function is TYP_VOID.
15218 if (compIsProfilerHookNeeded())
15220 op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
15225 op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
15227 #endif // !defined(_TARGET_AMD64_)
15229 else if (varTypeIsStruct(info.compRetType))
15231 #if !FEATURE_MULTIREG_RET
15232 // For both ARM architectures the HFA native types are maintained as structs.
15233 // Also on System V AMD64 the multireg structs returns are also left as structs.
15234 noway_assert(info.compRetNativeType != TYP_STRUCT);
15236 op2 = impFixupStructReturnType(op2, retClsHnd);
15238 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
15243 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
15246 // We must have imported a tailcall and jumped to RET
15247 if (prefixFlags & PREFIX_TAILCALL)
15249 #ifndef _TARGET_AMD64_
15251 // This cannot be asserted on Amd64 since we permit the following IL pattern:
15255 assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
15258 opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
15260 // impImportCall() would have already appended TYP_VOID calls
15261 if (info.compRetType == TYP_VOID)
15267 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
15269 // Remember at which BC offset the tree was finished
15270 impNoteLastILoffs();
15275 /*****************************************************************************
15276 * Mark the block as unimported.
15277 * Note that the caller is responsible for calling impImportBlockPending(),
15278 * with the appropriate stack-state
15281 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
15284 if (verbose && (block->bbFlags & BBF_IMPORTED))
15286 printf("\nBB%02u will be reimported\n", block->bbNum);
15290 block->bbFlags &= ~BBF_IMPORTED;
15293 /*****************************************************************************
15294 * Mark the successors of the given block as unimported.
15295 * Note that the caller is responsible for calling impImportBlockPending()
15296 * for all the successors, with the appropriate stack-state.
15299 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
15301 for (unsigned i = 0; i < block->NumSucc(); i++)
15303 impReimportMarkBlock(block->GetSucc(i));
15307 /*****************************************************************************
15309 * Filter wrapper to handle only passed in exception code
15313 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
15315 if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
15317 return EXCEPTION_EXECUTE_HANDLER;
15320 return EXCEPTION_CONTINUE_SEARCH;
15323 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
15325 assert(block->hasTryIndex());
15326 assert(!compIsForInlining());
15328 unsigned tryIndex = block->getTryIndex();
15329 EHblkDsc* HBtab = ehGetDsc(tryIndex);
15333 assert(block->bbFlags & BBF_TRY_BEG);
15335 // The Stack must be empty
15337 if (block->bbStkDepth != 0)
15339 BADCODE("Evaluation stack must be empty on entry into a try block");
15343 // Save the stack contents, we'll need to restore it later
15345 SavedStack blockState;
15346 impSaveStackState(&blockState, false);
15348 while (HBtab != nullptr)
15352 // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
15353 // We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
15355 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15357 // We trigger an invalid program exception here unless we have a try/fault region.
15359 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
15362 "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
15366 // Allow a try/fault region to proceed.
15367 assert(HBtab->HasFaultHandler());
15371 /* Recursively process the handler block */
15372 BasicBlock* hndBegBB = HBtab->ebdHndBeg;
15374 // Construct the proper verification stack state
15375 // either empty or one that contains just
15376 // the Exception Object that we are dealing with
15378 verCurrentState.esStackDepth = 0;
15380 if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
15382 CORINFO_CLASS_HANDLE clsHnd;
15384 if (HBtab->HasFilter())
15386 clsHnd = impGetObjectClass();
15390 CORINFO_RESOLVED_TOKEN resolvedToken;
15392 resolvedToken.tokenContext = impTokenLookupContextHandle;
15393 resolvedToken.tokenScope = info.compScopeHnd;
15394 resolvedToken.token = HBtab->ebdTyp;
15395 resolvedToken.tokenType = CORINFO_TOKENKIND_Class;
15396 info.compCompHnd->resolveToken(&resolvedToken);
15398 clsHnd = resolvedToken.hClass;
15401 // push catch arg the stack, spill to a temp if necessary
15402 // Note: can update HBtab->ebdHndBeg!
15403 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd);
15406 // Queue up the handler for importing
15408 impImportBlockPending(hndBegBB);
15410 if (HBtab->HasFilter())
15412 /* @VERIFICATION : Ideally the end of filter state should get
15413 propagated to the catch handler, this is an incompleteness,
15414 but is not a security/compliance issue, since the only
15415 interesting state is the 'thisInit' state.
15418 verCurrentState.esStackDepth = 0;
15420 BasicBlock* filterBB = HBtab->ebdFilter;
15422 // push catch arg the stack, spill to a temp if necessary
15423 // Note: can update HBtab->ebdFilter!
15424 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass());
15426 impImportBlockPending(filterBB);
15429 else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
15431 /* Recursively process the handler block */
15433 verCurrentState.esStackDepth = 0;
15435 // Queue up the fault handler for importing
15437 impImportBlockPending(HBtab->ebdHndBeg);
15440 // Now process our enclosing try index (if any)
15442 tryIndex = HBtab->ebdEnclosingTryIndex;
15443 if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
15449 HBtab = ehGetDsc(tryIndex);
15453 // Restore the stack contents
15454 impRestoreStackState(&blockState);
15457 //***************************************************************
15458 // Import the instructions for the given basic block. Perform
15459 // verification, throwing an exception on failure. Push any successor blocks that are enabled for the first
15460 // time, or whose verification pre-state is changed.
15463 #pragma warning(push)
15464 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
15466 void Compiler::impImportBlock(BasicBlock* block)
15468 // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
15469 // handle them specially. In particular, there is no IL to import for them, but we do need
15470 // to mark them as imported and put their successors on the pending import list.
15471 if (block->bbFlags & BBF_INTERNAL)
15473 JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
15474 block->bbFlags |= BBF_IMPORTED;
15476 for (unsigned i = 0; i < block->NumSucc(); i++)
15478 impImportBlockPending(block->GetSucc(i));
15488 /* Make the block globaly available */
15493 /* Initialize the debug variables */
15494 impCurOpcName = "unknown";
15495 impCurOpcOffs = block->bbCodeOffs;
15498 /* Set the current stack state to the merged result */
15499 verResetCurrentState(block, &verCurrentState);
15501 /* Now walk the code and import the IL into GenTrees */
15503 struct FilterVerificationExceptionsParam
15508 FilterVerificationExceptionsParam param;
15510 param.pThis = this;
15511 param.block = block;
15513 PAL_TRY(FilterVerificationExceptionsParam*, pParam, ¶m)
15515 /* @VERIFICATION : For now, the only state propagation from try
15516 to it's handler is "thisInit" state (stack is empty at start of try).
15517 In general, for state that we track in verification, we need to
15518 model the possibility that an exception might happen at any IL
15519 instruction, so we really need to merge all states that obtain
15520 between IL instructions in a try block into the start states of
15523 However we do not allow the 'this' pointer to be uninitialized when
15524 entering most kinds try regions (only try/fault are allowed to have
15525 an uninitialized this pointer on entry to the try)
15527 Fortunately, the stack is thrown away when an exception
15528 leads to a handler, so we don't have to worry about that.
15529 We DO, however, have to worry about the "thisInit" state.
15530 But only for the try/fault case.
15532 The only allowed transition is from TIS_Uninit to TIS_Init.
15534 So for a try/fault region for the fault handler block
15535 we will merge the start state of the try begin
15536 and the post-state of each block that is part of this try region
15539 // merge the start state of the try begin
15541 if (pParam->block->bbFlags & BBF_TRY_BEG)
15543 pParam->pThis->impVerifyEHBlock(pParam->block, true);
15546 pParam->pThis->impImportBlockCode(pParam->block);
15548 // As discussed above:
15549 // merge the post-state of each block that is part of this try region
15551 if (pParam->block->hasTryIndex())
15553 pParam->pThis->impVerifyEHBlock(pParam->block, false);
15556 PAL_EXCEPT_FILTER(FilterVerificationExceptions)
15558 verHandleVerificationFailure(block DEBUGARG(false));
15562 if (compDonotInline())
15567 assert(!compDonotInline());
15569 markImport = false;
15573 unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks
15574 bool reimportSpillClique = false;
15575 BasicBlock* tgtBlock = nullptr;
15577 /* If the stack is non-empty, we might have to spill its contents */
15579 if (verCurrentState.esStackDepth != 0)
15581 impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
15582 // on the stack, its lifetime is hard to determine, simply
15583 // don't reuse such temps.
15585 GenTreePtr addStmt = nullptr;
15587 /* Do the successors of 'block' have any other predecessors ?
15588 We do not want to do some of the optimizations related to multiRef
15589 if we can reimport blocks */
15591 unsigned multRef = impCanReimport ? unsigned(~0) : 0;
15593 switch (block->bbJumpKind)
15597 /* Temporarily remove the 'jtrue' from the end of the tree list */
15599 assert(impTreeLast);
15600 assert(impTreeLast->gtOper == GT_STMT);
15601 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
15603 addStmt = impTreeLast;
15604 impTreeLast = impTreeLast->gtPrev;
15606 /* Note if the next block has more than one ancestor */
15608 multRef |= block->bbNext->bbRefs;
15610 /* Does the next block have temps assigned? */
15612 baseTmp = block->bbNext->bbStkTempsIn;
15613 tgtBlock = block->bbNext;
15615 if (baseTmp != NO_BASE_TMP)
15620 /* Try the target of the jump then */
15622 multRef |= block->bbJumpDest->bbRefs;
15623 baseTmp = block->bbJumpDest->bbStkTempsIn;
15624 tgtBlock = block->bbJumpDest;
15628 multRef |= block->bbJumpDest->bbRefs;
15629 baseTmp = block->bbJumpDest->bbStkTempsIn;
15630 tgtBlock = block->bbJumpDest;
15634 multRef |= block->bbNext->bbRefs;
15635 baseTmp = block->bbNext->bbStkTempsIn;
15636 tgtBlock = block->bbNext;
15641 BasicBlock** jmpTab;
15644 /* Temporarily remove the GT_SWITCH from the end of the tree list */
15646 assert(impTreeLast);
15647 assert(impTreeLast->gtOper == GT_STMT);
15648 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
15650 addStmt = impTreeLast;
15651 impTreeLast = impTreeLast->gtPrev;
15653 jmpCnt = block->bbJumpSwt->bbsCount;
15654 jmpTab = block->bbJumpSwt->bbsDstTab;
15658 tgtBlock = (*jmpTab);
15660 multRef |= tgtBlock->bbRefs;
15662 // Thanks to spill cliques, we should have assigned all or none
15663 assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
15664 baseTmp = tgtBlock->bbStkTempsIn;
15669 } while (++jmpTab, --jmpCnt);
15673 case BBJ_CALLFINALLY:
15674 case BBJ_EHCATCHRET:
15676 case BBJ_EHFINALLYRET:
15677 case BBJ_EHFILTERRET:
15679 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
15683 noway_assert(!"Unexpected bbJumpKind");
15687 assert(multRef >= 1);
15689 /* Do we have a base temp number? */
15691 bool newTemps = (baseTmp == NO_BASE_TMP);
15695 /* Grab enough temps for the whole stack */
15696 baseTmp = impGetSpillTmpBase(block);
15699 /* Spill all stack entries into temps */
15700 unsigned level, tempNum;
15702 JITDUMP("\nSpilling stack entries into temps\n");
15703 for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
15705 GenTreePtr tree = verCurrentState.esStack[level].val;
15707 /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
15708 the other. This should merge to a byref in unverifiable code.
15709 However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
15710 successor would be imported assuming there was a TYP_I_IMPL on
15711 the stack. Thus the value would not get GC-tracked. Hence,
15712 change the temp to TYP_BYREF and reimport the successors.
15713 Note: We should only allow this in unverifiable code.
15715 if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
15717 lvaTable[tempNum].lvType = TYP_BYREF;
15718 impReimportMarkSuccessors(block);
15722 #ifdef _TARGET_64BIT_
15723 if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
15725 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
15726 (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
15728 // Merge the current state into the entry state of block;
15729 // the call to verMergeEntryStates must have changed
15730 // the entry state of the block by merging the int local var
15731 // and the native-int stack entry.
15732 bool changed = false;
15733 if (verMergeEntryStates(tgtBlock, &changed))
15735 impRetypeEntryStateTemps(tgtBlock);
15736 impReimportBlockPending(tgtBlock);
15741 tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
15746 // Some other block in the spill clique set this to "int", but now we have "native int".
15747 // Change the type and go back to re-import any blocks that used the wrong type.
15748 lvaTable[tempNum].lvType = TYP_I_IMPL;
15749 reimportSpillClique = true;
15751 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
15753 // Spill clique has decided this should be "native int", but this block only pushes an "int".
15754 // Insert a sign-extension to "native int" so we match the clique.
15755 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15758 // Consider the case where one branch left a 'byref' on the stack and the other leaves
15759 // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
15760 // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
15761 // behavior instead of asserting and then generating bad code (where we save/restore the
15762 // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
15763 // imported already, we need to change the type of the local and reimport the spill clique.
15764 // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
15765 // the 'byref' size.
15766 if (!tiVerificationNeeded)
15768 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
15770 // Some other block in the spill clique set this to "int", but now we have "byref".
15771 // Change the type and go back to re-import any blocks that used the wrong type.
15772 lvaTable[tempNum].lvType = TYP_BYREF;
15773 reimportSpillClique = true;
15775 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
15777 // Spill clique has decided this should be "byref", but this block only pushes an "int".
15778 // Insert a sign-extension to "native int" so we match the clique size.
15779 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
15782 #endif // _TARGET_64BIT_
15784 #if FEATURE_X87_DOUBLES
15785 // X87 stack doesn't differentiate between float/double
15786 // so promoting is no big deal.
15787 // For everybody else keep it as float until we have a collision and then promote
15788 // Just like for x64's TYP_INT<->TYP_I_IMPL
15790 if (multRef > 1 && tree->gtType == TYP_FLOAT)
15792 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15795 #else // !FEATURE_X87_DOUBLES
15797 if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
15799 // Some other block in the spill clique set this to "float", but now we have "double".
15800 // Change the type and go back to re-import any blocks that used the wrong type.
15801 lvaTable[tempNum].lvType = TYP_DOUBLE;
15802 reimportSpillClique = true;
15804 else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
15806 // Spill clique has decided this should be "double", but this block only pushes a "float".
15807 // Insert a cast to "double" so we match the clique.
15808 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
15811 #endif // FEATURE_X87_DOUBLES
15813 /* If addStmt has a reference to tempNum (can only happen if we
15814 are spilling to the temps already used by a previous block),
15815 we need to spill addStmt */
15817 if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
15819 GenTreePtr addTree = addStmt->gtStmt.gtStmtExpr;
15821 if (addTree->gtOper == GT_JTRUE)
15823 GenTreePtr relOp = addTree->gtOp.gtOp1;
15824 assert(relOp->OperIsCompare());
15826 var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
15828 if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
15830 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
15831 impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
15832 type = genActualType(lvaTable[temp].TypeGet());
15833 relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
15836 if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
15838 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
15839 impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
15840 type = genActualType(lvaTable[temp].TypeGet());
15841 relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
15846 assert(addTree->gtOper == GT_SWITCH && genActualType(addTree->gtOp.gtOp1->gtType) == TYP_I_IMPL);
15848 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
15849 impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
15850 addTree->gtOp.gtOp1 = gtNewLclvNode(temp, TYP_I_IMPL);
15854 /* Spill the stack entry, and replace with the temp */
15856 if (!impSpillStackEntry(level, tempNum
15859 true, "Spill Stack Entry"
15865 BADCODE("bad stack state");
15868 // Oops. Something went wrong when spilling. Bad code.
15869 verHandleVerificationFailure(block DEBUGARG(true));
15875 /* Put back the 'jtrue'/'switch' if we removed it earlier */
15879 impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
15883 // Some of the append/spill logic works on compCurBB
15885 assert(compCurBB == block);
15887 /* Save the tree list in the block */
15888 impEndTreeList(block);
15890 // impEndTreeList sets BBF_IMPORTED on the block
15891 // We do *NOT* want to set it later than this because
15892 // impReimportSpillClique might clear it if this block is both a
15893 // predecessor and successor in the current spill clique
15894 assert(block->bbFlags & BBF_IMPORTED);
15896 // If we had a int/native int, or float/double collision, we need to re-import
15897 if (reimportSpillClique)
15899 // This will re-import all the successors of block (as well as each of their predecessors)
15900 impReimportSpillClique(block);
15902 // For blocks that haven't been imported yet, we still need to mark them as pending import.
15903 for (unsigned i = 0; i < block->NumSucc(); i++)
15905 BasicBlock* succ = block->GetSucc(i);
15906 if ((succ->bbFlags & BBF_IMPORTED) == 0)
15908 impImportBlockPending(succ);
15912 else // the normal case
15914 // otherwise just import the successors of block
15916 /* Does this block jump to any other blocks? */
15917 for (unsigned i = 0; i < block->NumSucc(); i++)
15919 impImportBlockPending(block->GetSucc(i));
15924 #pragma warning(pop)
15927 /*****************************************************************************/
15929 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
15930 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
15931 // impPendingBlockMembers). Merges the current verification state into the verification state of "block"
15932 // (its "pre-state").
15934 void Compiler::impImportBlockPending(BasicBlock* block)
15939 printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
15943 // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
15944 // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
15945 // (When we're doing verification, we always attempt the merge to detect verification errors.)
15947 // If the block has not been imported, add to pending set.
15948 bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
15950 // Initialize bbEntryState just the first time we try to add this block to the pending list
15951 // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
15952 // We use NULL to indicate the 'common' state to avoid memory allocation
15953 if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
15954 (impGetPendingBlockMember(block) == 0))
15956 verInitBBEntryState(block, &verCurrentState);
15957 assert(block->bbStkDepth == 0);
15958 block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
15959 assert(addToPending);
15960 assert(impGetPendingBlockMember(block) == 0);
15964 // The stack should have the same height on entry to the block from all its predecessors.
15965 if (block->bbStkDepth != verCurrentState.esStackDepth)
15969 sprintf_s(buffer, sizeof(buffer),
15970 "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
15971 "Previous depth was %d, current depth is %d",
15972 block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
15973 verCurrentState.esStackDepth);
15974 buffer[400 - 1] = 0;
15977 NO_WAY("Block entered with different stack depths");
15981 // Additionally, if we need to verify, merge the verification state.
15982 if (tiVerificationNeeded)
15984 // Merge the current state into the entry state of block; if this does not change the entry state
15985 // by merging, do not add the block to the pending-list.
15986 bool changed = false;
15987 if (!verMergeEntryStates(block, &changed))
15989 block->bbFlags |= BBF_FAILED_VERIFICATION;
15990 addToPending = true; // We will pop it off, and check the flag set above.
15994 addToPending = true;
15996 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
16005 if (block->bbStkDepth > 0)
16007 // We need to fix the types of any spill temps that might have changed:
16008 // int->native int, float->double, int->byref, etc.
16009 impRetypeEntryStateTemps(block);
16012 // OK, we must add to the pending list, if it's not already in it.
16013 if (impGetPendingBlockMember(block) != 0)
16019 // Get an entry to add to the pending list
16023 if (impPendingFree)
16025 // We can reuse one of the freed up dscs.
16026 dsc = impPendingFree;
16027 impPendingFree = dsc->pdNext;
16031 // We have to create a new dsc
16032 dsc = new (this, CMK_Unknown) PendingDsc;
16036 dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16037 dsc->pdThisPtrInit = verCurrentState.thisInitialized;
16039 // Save the stack trees for later
16041 if (verCurrentState.esStackDepth)
16043 impSaveStackState(&dsc->pdSavedStack, false);
16046 // Add the entry to the pending list
16048 dsc->pdNext = impPendingList;
16049 impPendingList = dsc;
16050 impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16052 // Various assertions require us to now to consider the block as not imported (at least for
16053 // the final time...)
16054 block->bbFlags &= ~BBF_IMPORTED;
16059 printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16064 /*****************************************************************************/
16066 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16067 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16068 // impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block.
16070 void Compiler::impReimportBlockPending(BasicBlock* block)
16072 JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16074 assert(block->bbFlags & BBF_IMPORTED);
16076 // OK, we must add to the pending list, if it's not already in it.
16077 if (impGetPendingBlockMember(block) != 0)
16082 // Get an entry to add to the pending list
16086 if (impPendingFree)
16088 // We can reuse one of the freed up dscs.
16089 dsc = impPendingFree;
16090 impPendingFree = dsc->pdNext;
16094 // We have to create a new dsc
16095 dsc = new (this, CMK_ImpStack) PendingDsc;
16100 if (block->bbEntryState)
16102 dsc->pdThisPtrInit = block->bbEntryState->thisInitialized;
16103 dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
16104 dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
16108 dsc->pdThisPtrInit = TIS_Bottom;
16109 dsc->pdSavedStack.ssDepth = 0;
16110 dsc->pdSavedStack.ssTrees = nullptr;
16113 // Add the entry to the pending list
16115 dsc->pdNext = impPendingList;
16116 impPendingList = dsc;
16117 impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16119 // Various assertions require us to now to consider the block as not imported (at least for
16120 // the final time...)
16121 block->bbFlags &= ~BBF_IMPORTED;
16126 printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16131 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
16133 if (comp->impBlockListNodeFreeList == nullptr)
16135 return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
16139 BlockListNode* res = comp->impBlockListNodeFreeList;
16140 comp->impBlockListNodeFreeList = res->m_next;
16145 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
16147 node->m_next = impBlockListNodeFreeList;
16148 impBlockListNodeFreeList = node;
16151 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
16155 noway_assert(!fgComputePredsDone);
16156 if (!fgCheapPredsValid)
16158 fgComputeCheapPreds();
16161 BlockListNode* succCliqueToDo = nullptr;
16162 BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
16166 // Look at the successors of every member of the predecessor to-do list.
16167 while (predCliqueToDo != nullptr)
16169 BlockListNode* node = predCliqueToDo;
16170 predCliqueToDo = node->m_next;
16171 BasicBlock* blk = node->m_blk;
16172 FreeBlockListNode(node);
16174 for (unsigned succNum = 0; succNum < blk->NumSucc(); succNum++)
16176 BasicBlock* succ = blk->GetSucc(succNum);
16177 // If it's not already in the clique, add it, and also add it
16178 // as a member of the successor "toDo" set.
16179 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
16181 callback->Visit(SpillCliqueSucc, succ);
16182 impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
16183 succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
16188 // Look at the predecessors of every member of the successor to-do list.
16189 while (succCliqueToDo != nullptr)
16191 BlockListNode* node = succCliqueToDo;
16192 succCliqueToDo = node->m_next;
16193 BasicBlock* blk = node->m_blk;
16194 FreeBlockListNode(node);
16196 for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
16198 BasicBlock* predBlock = pred->block;
16199 // If it's not already in the clique, add it, and also add it
16200 // as a member of the predecessor "toDo" set.
16201 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
16203 callback->Visit(SpillCliquePred, predBlock);
16204 impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
16205 predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
16212 // If this fails, it means we didn't walk the spill clique properly and somehow managed
16213 // miss walking back to include the predecessor we started from.
16214 // This most likely cause: missing or out of date bbPreds
16215 assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
16218 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16220 if (predOrSucc == SpillCliqueSucc)
16222 assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
16223 blk->bbStkTempsIn = m_baseTmp;
16227 assert(predOrSucc == SpillCliquePred);
16228 assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
16229 blk->bbStkTempsOut = m_baseTmp;
16233 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
16235 // For Preds we could be a little smarter and just find the existing store
16236 // and re-type it/add a cast, but that is complicated and hopefully very rare, so
16237 // just re-import the whole block (just like we do for successors)
16239 if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
16241 // If we haven't imported this block and we're not going to (because it isn't on
16242 // the pending list) then just ignore it for now.
16244 // This block has either never been imported (EntryState == NULL) or it failed
16245 // verification. Neither state requires us to force it to be imported now.
16246 assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
16250 // For successors we have a valid verCurrentState, so just mark them for reimport
16251 // the 'normal' way
16252 // Unlike predecessors, we *DO* need to reimport the current block because the
16253 // initial import had the wrong entry state types.
16254 // Similarly, blocks that are currently on the pending list, still need to call
16255 // impImportBlockPending to fixup their entry state.
16256 if (predOrSucc == SpillCliqueSucc)
16258 m_pComp->impReimportMarkBlock(blk);
16260 // Set the current stack state to that of the blk->bbEntryState
16261 m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
16262 assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
16264 m_pComp->impImportBlockPending(blk);
16266 else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
16268 // As described above, we are only visiting predecessors so they can
16269 // add the appropriate casts, since we have already done that for the current
16270 // block, it does not need to be reimported.
16271 // Nor do we need to reimport blocks that are still pending, but not yet
16274 // For predecessors, we have no state to seed the EntryState, so we just have
16275 // to assume the existing one is correct.
16276 // If the block is also a successor, it will get the EntryState properly
16277 // updated when it is visited as a successor in the above "if" block.
16278 assert(predOrSucc == SpillCliquePred);
16279 m_pComp->impReimportBlockPending(blk);
16283 // Re-type the incoming lclVar nodes to match the varDsc.
16284 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
16286 if (blk->bbEntryState != nullptr)
16288 EntryState* es = blk->bbEntryState;
16289 for (unsigned level = 0; level < es->esStackDepth; level++)
16291 GenTreePtr tree = es->esStack[level].val;
16292 if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
16294 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
16295 noway_assert(lclNum < lvaCount);
16296 LclVarDsc* varDsc = lvaTable + lclNum;
16297 es->esStack[level].val->gtType = varDsc->TypeGet();
16303 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
16305 if (block->bbStkTempsOut != NO_BASE_TMP)
16307 return block->bbStkTempsOut;
16313 printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
16317 // Otherwise, choose one, and propagate to all members of the spill clique.
16318 // Grab enough temps for the whole stack.
16319 unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
16320 SetSpillTempsBase callback(baseTmp);
16322 // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
16323 // to one spill clique, and similarly can only be the sucessor to one spill clique
16324 impWalkSpillCliqueFromPred(block, &callback);
16329 void Compiler::impReimportSpillClique(BasicBlock* block)
16334 printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
16338 // If we get here, it is because this block is already part of a spill clique
16339 // and one predecessor had an outgoing live stack slot of type int, and this
16340 // block has an outgoing live stack slot of type native int.
16341 // We need to reset these before traversal because they have already been set
16342 // by the previous walk to determine all the members of the spill clique.
16343 impInlineRoot()->impSpillCliquePredMembers.Reset();
16344 impInlineRoot()->impSpillCliqueSuccMembers.Reset();
16346 ReimportSpillClique callback(this);
16348 impWalkSpillCliqueFromPred(block, &callback);
16351 // Set the pre-state of "block" (which should not have a pre-state allocated) to
16352 // a copy of "srcState", cloning tree pointers as required.
16353 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
16355 if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
16357 block->bbEntryState = nullptr;
16361 block->bbEntryState = (EntryState*)compGetMemA(sizeof(EntryState));
16363 // block->bbEntryState.esRefcount = 1;
16365 block->bbEntryState->esStackDepth = srcState->esStackDepth;
16366 block->bbEntryState->thisInitialized = TIS_Bottom;
16368 if (srcState->esStackDepth > 0)
16370 block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
16371 unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
16373 memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
16374 for (unsigned level = 0; level < srcState->esStackDepth; level++)
16376 GenTreePtr tree = srcState->esStack[level].val;
16377 block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
16381 if (verTrackObjCtorInitState)
16383 verSetThisInit(block, srcState->thisInitialized);
16389 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
16391 assert(tis != TIS_Bottom); // Precondition.
16392 if (block->bbEntryState == nullptr)
16394 block->bbEntryState = new (this, CMK_Unknown) EntryState();
16397 block->bbEntryState->thisInitialized = tis;
16401 * Resets the current state to the state at the start of the basic block
16403 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
16406 if (block->bbEntryState == nullptr)
16408 destState->esStackDepth = 0;
16409 destState->thisInitialized = TIS_Bottom;
16413 destState->esStackDepth = block->bbEntryState->esStackDepth;
16415 if (destState->esStackDepth > 0)
16417 unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
16419 memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
16422 destState->thisInitialized = block->bbThisOnEntry();
16427 ThisInitState BasicBlock::bbThisOnEntry()
16429 return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
16432 unsigned BasicBlock::bbStackDepthOnEntry()
16434 return (bbEntryState ? bbEntryState->esStackDepth : 0);
16437 void BasicBlock::bbSetStack(void* stackBuffer)
16439 assert(bbEntryState);
16440 assert(stackBuffer);
16441 bbEntryState->esStack = (StackEntry*)stackBuffer;
16444 StackEntry* BasicBlock::bbStackOnEntry()
16446 assert(bbEntryState);
16447 return bbEntryState->esStack;
16450 void Compiler::verInitCurrentState()
16452 verTrackObjCtorInitState = FALSE;
16453 verCurrentState.thisInitialized = TIS_Bottom;
16455 if (tiVerificationNeeded)
16457 // Track this ptr initialization
16458 if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
16460 verTrackObjCtorInitState = TRUE;
16461 verCurrentState.thisInitialized = TIS_Uninit;
16465 // initialize stack info
16467 verCurrentState.esStackDepth = 0;
16468 assert(verCurrentState.esStack != nullptr);
16470 // copy current state to entry state of first BB
16471 verInitBBEntryState(fgFirstBB, &verCurrentState);
16474 Compiler* Compiler::impInlineRoot()
16476 if (impInlineInfo == nullptr)
16482 return impInlineInfo->InlineRoot;
16486 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
16488 if (predOrSucc == SpillCliquePred)
16490 return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
16494 assert(predOrSucc == SpillCliqueSucc);
16495 return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
16499 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
16501 if (predOrSucc == SpillCliquePred)
16503 impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
16507 assert(predOrSucc == SpillCliqueSucc);
16508 impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
16512 /*****************************************************************************
16514 * Convert the instrs ("import") into our internal format (trees). The
16515 * basic flowgraph has already been constructed and is passed in.
16518 void Compiler::impImport(BasicBlock* method)
16523 printf("*************** In impImport() for %s\n", info.compFullName);
16527 /* Allocate the stack contents */
16529 if (info.compMaxStack <= sizeof(impSmallStack) / sizeof(impSmallStack[0]))
16531 /* Use local variable, don't waste time allocating on the heap */
16533 impStkSize = sizeof(impSmallStack) / sizeof(impSmallStack[0]);
16534 verCurrentState.esStack = impSmallStack;
16538 impStkSize = info.compMaxStack;
16539 verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
16542 // initialize the entry state at start of method
16543 verInitCurrentState();
16545 // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
16546 Compiler* inlineRoot = impInlineRoot();
16547 if (this == inlineRoot) // These are only used on the root of the inlining tree.
16549 // We have initialized these previously, but to size 0. Make them larger.
16550 impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
16551 impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
16552 impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
16554 inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
16555 inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
16556 inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
16557 impBlockListNodeFreeList = nullptr;
16560 impLastILoffsStmt = nullptr;
16561 impNestedStackSpill = false;
16563 impBoxTemp = BAD_VAR_NUM;
16565 impPendingList = impPendingFree = nullptr;
16567 /* Add the entry-point to the worker-list */
16569 // Skip leading internal blocks. There can be one as a leading scratch BB, and more
16570 // from EH normalization.
16571 // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
16573 for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
16575 // Treat these as imported.
16576 assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
16577 JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
16578 method->bbFlags |= BBF_IMPORTED;
16581 impImportBlockPending(method);
16583 /* Import blocks in the worker-list until there are no more */
16585 while (impPendingList)
16587 /* Remove the entry at the front of the list */
16589 PendingDsc* dsc = impPendingList;
16590 impPendingList = impPendingList->pdNext;
16591 impSetPendingBlockMember(dsc->pdBB, 0);
16593 /* Restore the stack state */
16595 verCurrentState.thisInitialized = dsc->pdThisPtrInit;
16596 verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth;
16597 if (verCurrentState.esStackDepth)
16599 impRestoreStackState(&dsc->pdSavedStack);
16602 /* Add the entry to the free list for reuse */
16604 dsc->pdNext = impPendingFree;
16605 impPendingFree = dsc;
16607 /* Now import the block */
16609 if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
16612 #ifdef _TARGET_64BIT_
16613 // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
16614 // coupled with the JIT64 IL Verification logic. Look inside verHandleVerificationFailure
16615 // method for further explanation on why we raise this exception instead of making the jitted
16616 // code throw the verification exception during execution.
16617 if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
16619 BADCODE("Basic block marked as not verifiable");
16622 #endif // _TARGET_64BIT_
16624 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
16625 impEndTreeList(dsc->pdBB);
16630 impImportBlock(dsc->pdBB);
16632 if (compDonotInline())
16636 if (compIsForImportOnly() && !tiVerificationNeeded)
16644 if (verbose && info.compXcptnsCount)
16646 printf("\nAfter impImport() added block for try,catch,finally");
16647 fgDispBasicBlocks();
16651 // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
16652 for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
16654 block->bbFlags &= ~BBF_VISITED;
16658 assert(!compIsForInlining() || !tiVerificationNeeded);
16661 // Checks if a typeinfo (usually stored in the type stack) is a struct.
16662 // The invariant here is that if it's not a ref or a method and has a class handle
16663 // it's a valuetype
16664 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
16666 if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
16676 /*****************************************************************************
16677 * Check to see if the tree is the address of a local or
16678 the address of a field in a local.
16680 *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
16684 BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr* lclVarTreeOut)
16686 if (tree->gtOper != GT_ADDR)
16691 GenTreePtr op = tree->gtOp.gtOp1;
16692 while (op->gtOper == GT_FIELD)
16694 op = op->gtField.gtFldObj;
16695 if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
16697 op = op->gtOp.gtOp1;
16705 if (op->gtOper == GT_LCL_VAR)
16707 *lclVarTreeOut = op;
16716 //------------------------------------------------------------------------
16717 // impMakeDiscretionaryInlineObservations: make observations that help
16718 // determine the profitability of a discretionary inline
16721 // pInlineInfo -- InlineInfo for the inline, or null for the prejit root
16722 // inlineResult -- InlineResult accumulating information about this inline
16725 // If inlining or prejitting the root, this method also makes
16726 // various observations about the method that factor into inline
16727 // decisions. It sets `compNativeSizeEstimate` as a side effect.
16729 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
16731 assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
16732 pInlineInfo == nullptr && !compIsForInlining() // Calculate the static inlining hint for ngen.
16735 // If we're really inlining, we should just have one result in play.
16736 assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
16738 // If this is a "forceinline" method, the JIT probably shouldn't have gone
16739 // to the trouble of estimating the native code size. Even if it did, it
16740 // shouldn't be relying on the result of this method.
16741 assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
16743 // Note if the caller contains NEWOBJ or NEWARR.
16744 Compiler* rootCompiler = impInlineRoot();
16746 if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
16748 inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
16751 if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
16753 inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
16756 bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0;
16757 bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
16759 if (isSpecialMethod)
16761 if (calleeIsStatic)
16763 inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
16767 inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
16770 else if (!calleeIsStatic)
16772 // Callee is an instance method.
16774 // Check if the callee has the same 'this' as the root.
16775 if (pInlineInfo != nullptr)
16777 GenTreePtr thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
16779 bool isSameThis = impIsThis(thisArg);
16780 inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
16784 // Note if the callee's class is a promotable struct
16785 if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
16787 lvaStructPromotionInfo structPromotionInfo;
16788 lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
16789 if (structPromotionInfo.canPromote)
16791 inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
16795 #ifdef FEATURE_SIMD
16797 // Note if this method is has SIMD args or return value
16798 if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
16800 inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
16803 #endif // FEATURE_SIMD
16805 // Roughly classify callsite frequency.
16806 InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
16808 // If this is a prejit root, or a maximally hot block...
16809 if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
16811 frequency = InlineCallsiteFrequency::HOT;
16813 // No training data. Look for loop-like things.
16814 // We consider a recursive call loop-like. Do not give the inlining boost to the method itself.
16815 // However, give it to things nearby.
16816 else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
16817 (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
16819 frequency = InlineCallsiteFrequency::LOOP;
16821 else if ((pInlineInfo->iciBlock->bbFlags & BBF_PROF_WEIGHT) && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
16823 frequency = InlineCallsiteFrequency::WARM;
16825 // Now modify the multiplier based on where we're called from.
16826 else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
16828 frequency = InlineCallsiteFrequency::RARE;
16832 frequency = InlineCallsiteFrequency::BORING;
16835 // Also capture the block weight of the call site. In the prejit
16836 // root case, assume there's some hot call site for this method.
16837 unsigned weight = 0;
16839 if (pInlineInfo != nullptr)
16841 weight = pInlineInfo->iciBlock->bbWeight;
16845 weight = BB_MAX_WEIGHT;
16848 inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
16849 inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
16852 /*****************************************************************************
16853 This method makes STATIC inlining decision based on the IL code.
16854 It should not make any inlining decision based on the context.
16855 If forceInline is true, then the inlining decision should not depend on
16856 performance heuristics (code size, etc.).
16859 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
16860 CORINFO_METHOD_INFO* methInfo,
16862 InlineResult* inlineResult)
16864 unsigned codeSize = methInfo->ILCodeSize;
16866 // We shouldn't have made up our minds yet...
16867 assert(!inlineResult->IsDecided());
16869 if (methInfo->EHcount)
16871 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
16875 if ((methInfo->ILCode == nullptr) || (codeSize == 0))
16877 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
16881 // For now we don't inline varargs (import code can't handle it)
16883 if (methInfo->args.isVarArg())
16885 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
16889 // Reject if it has too many locals.
16890 // This is currently an implementation limit due to fixed-size arrays in the
16891 // inline info, rather than a performance heuristic.
16893 inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
16895 if (methInfo->locals.numArgs > MAX_INL_LCLS)
16897 inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
16901 // Make sure there aren't too many arguments.
16902 // This is currently an implementation limit due to fixed-size arrays in the
16903 // inline info, rather than a performance heuristic.
16905 inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
16907 if (methInfo->args.numArgs > MAX_INL_ARGS)
16909 inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
16913 // Note force inline state
16915 inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
16917 // Note IL code size
16919 inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
16921 if (inlineResult->IsFailure())
16926 // Make sure maxstack is not too big
16928 inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
16930 if (inlineResult->IsFailure())
16936 /*****************************************************************************
16939 void Compiler::impCheckCanInline(GenTreePtr call,
16940 CORINFO_METHOD_HANDLE fncHandle,
16942 CORINFO_CONTEXT_HANDLE exactContextHnd,
16943 InlineCandidateInfo** ppInlineCandidateInfo,
16944 InlineResult* inlineResult)
16946 // Either EE or JIT might throw exceptions below.
16947 // If that happens, just don't inline the method.
16953 CORINFO_METHOD_HANDLE fncHandle;
16955 CORINFO_CONTEXT_HANDLE exactContextHnd;
16956 InlineResult* result;
16957 InlineCandidateInfo** ppInlineCandidateInfo;
16958 } param = {nullptr};
16960 param.pThis = this;
16962 param.fncHandle = fncHandle;
16963 param.methAttr = methAttr;
16964 param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
16965 param.result = inlineResult;
16966 param.ppInlineCandidateInfo = ppInlineCandidateInfo;
16968 bool success = eeRunWithErrorTrap<Param>(
16969 [](Param* pParam) {
16970 DWORD dwRestrictions = 0;
16971 CorInfoInitClassResult initClassResult;
16974 const char* methodName;
16975 const char* className;
16976 methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
16978 if (JitConfig.JitNoInline())
16980 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
16985 /* Try to get the code address/size for the method */
16987 CORINFO_METHOD_INFO methInfo;
16988 if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
16990 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
16995 forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
16997 pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
16999 if (pParam->result->IsFailure())
17001 assert(pParam->result->IsNever());
17005 // Speculatively check if initClass() can be done.
17006 // If it can be done, we will try to inline the method. If inlining
17007 // succeeds, then we will do the non-speculative initClass() and commit it.
17008 // If this speculative call to initClass() fails, there is no point
17009 // trying to inline this method.
17011 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17012 pParam->exactContextHnd /* context */,
17013 TRUE /* speculative */);
17015 if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17017 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17021 // Given the EE the final say in whether to inline or not.
17022 // This should be last since for verifiable code, this can be expensive
17024 /* VM Inline check also ensures that the method is verifiable if needed */
17025 CorInfoInline vmResult;
17026 vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17029 if (vmResult == INLINE_FAIL)
17031 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17033 else if (vmResult == INLINE_NEVER)
17035 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17038 if (pParam->result->IsFailure())
17040 // Make sure not to report this one. It was already reported by the VM.
17041 pParam->result->SetReported();
17045 // check for unsupported inlining restrictions
17046 assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17048 if (dwRestrictions & INLINE_SAME_THIS)
17050 GenTreePtr thisArg = pParam->call->gtCall.gtCallObjp;
17053 if (!pParam->pThis->impIsThis(thisArg))
17055 pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17060 /* Get the method properties */
17062 CORINFO_CLASS_HANDLE clsHandle;
17063 clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17065 clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17067 /* Get the return type */
17069 var_types fncRetType;
17070 fncRetType = pParam->call->TypeGet();
17073 var_types fncRealRetType;
17074 fncRealRetType = JITtype2varType(methInfo.args.retType);
17076 assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17077 // <BUGNUM> VSW 288602 </BUGNUM>
17078 // In case of IJW, we allow to assign a native pointer to a BYREF.
17079 (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17080 (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17084 // Allocate an InlineCandidateInfo structure
17086 InlineCandidateInfo* pInfo;
17087 pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17089 pInfo->dwRestrictions = dwRestrictions;
17090 pInfo->methInfo = methInfo;
17091 pInfo->methAttr = pParam->methAttr;
17092 pInfo->clsHandle = clsHandle;
17093 pInfo->clsAttr = clsAttr;
17094 pInfo->fncRetType = fncRetType;
17095 pInfo->exactContextHnd = pParam->exactContextHnd;
17096 pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd;
17097 pInfo->initClassResult = initClassResult;
17099 *(pParam->ppInlineCandidateInfo) = pInfo;
17106 param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
17110 void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo,
17111 GenTreePtr curArgVal,
17113 InlineResult* inlineResult)
17115 InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
17117 if (curArgVal->gtOper == GT_MKREFANY)
17119 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
17123 inlCurArgInfo->argNode = curArgVal;
17125 GenTreePtr lclVarTree;
17126 if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
17128 inlCurArgInfo->argIsByRefToStructLocal = true;
17129 #ifdef FEATURE_SIMD
17130 if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
17132 pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
17134 #endif // FEATURE_SIMD
17137 if (curArgVal->gtFlags & GTF_ALL_EFFECT)
17139 inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
17140 inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
17143 if (curArgVal->gtOper == GT_LCL_VAR)
17145 inlCurArgInfo->argIsLclVar = true;
17147 /* Remember the "original" argument number */
17148 curArgVal->gtLclVar.gtLclILoffs = argNum;
17151 if ((curArgVal->OperKind() & GTK_CONST) ||
17152 ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
17154 inlCurArgInfo->argIsInvariant = true;
17155 if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
17157 /* Abort, but do not mark as not inlinable */
17158 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
17163 if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
17165 inlCurArgInfo->argHasLdargaOp = true;
17171 if (inlCurArgInfo->argIsThis)
17173 printf("thisArg:");
17177 printf("\nArgument #%u:", argNum);
17179 if (inlCurArgInfo->argIsLclVar)
17181 printf(" is a local var");
17183 if (inlCurArgInfo->argIsInvariant)
17185 printf(" is a constant");
17187 if (inlCurArgInfo->argHasGlobRef)
17189 printf(" has global refs");
17191 if (inlCurArgInfo->argHasSideEff)
17193 printf(" has side effects");
17195 if (inlCurArgInfo->argHasLdargaOp)
17197 printf(" has ldarga effect");
17199 if (inlCurArgInfo->argHasStargOp)
17201 printf(" has starg effect");
17203 if (inlCurArgInfo->argIsByRefToStructLocal)
17205 printf(" is byref to a struct local");
17209 gtDispTree(curArgVal);
17215 /*****************************************************************************
17219 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
17221 assert(!compIsForInlining());
17223 GenTreePtr call = pInlineInfo->iciCall;
17224 CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo;
17225 unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr;
17226 InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo;
17227 InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo;
17228 InlineResult* inlineResult = pInlineInfo->inlineResult;
17230 const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
17232 /* init the argument stuct */
17234 memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
17236 /* Get hold of the 'this' pointer and the argument list proper */
17238 GenTreePtr thisArg = call->gtCall.gtCallObjp;
17239 GenTreePtr argList = call->gtCall.gtCallArgs;
17240 unsigned argCnt = 0; // Count of the arguments
17242 assert((methInfo->args.hasThis()) == (thisArg != nullptr));
17246 inlArgInfo[0].argIsThis = true;
17248 impInlineRecordArgInfo(pInlineInfo, thisArg, argCnt, inlineResult);
17250 if (inlineResult->IsFailure())
17255 /* Increment the argument count */
17259 /* Record some information about each of the arguments */
17260 bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
17262 #if USER_ARGS_COME_LAST
17263 unsigned typeCtxtArg = thisArg ? 1 : 0;
17264 #else // USER_ARGS_COME_LAST
17265 unsigned typeCtxtArg = methInfo->args.totalILArgs();
17266 #endif // USER_ARGS_COME_LAST
17268 for (GenTreePtr argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
17270 if (argTmp == argList && hasRetBuffArg)
17275 // Ignore the type context argument
17276 if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
17278 typeCtxtArg = 0xFFFFFFFF;
17282 assert(argTmp->gtOper == GT_LIST);
17283 GenTreePtr argVal = argTmp->gtOp.gtOp1;
17285 impInlineRecordArgInfo(pInlineInfo, argVal, argCnt, inlineResult);
17287 if (inlineResult->IsFailure())
17292 /* Increment the argument count */
17296 /* Make sure we got the arg number right */
17297 assert(argCnt == methInfo->args.totalILArgs());
17299 #ifdef FEATURE_SIMD
17300 bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
17301 #endif // FEATURE_SIMD
17303 /* We have typeless opcodes, get type information from the signature */
17309 if (clsAttr & CORINFO_FLG_VALUECLASS)
17311 sigType = TYP_BYREF;
17318 lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
17319 lclVarInfo[0].lclHasLdlocaOp = false;
17321 #ifdef FEATURE_SIMD
17322 // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
17323 // the inlining multiplier) for anything in that assembly.
17324 // But we only need to normalize it if it is a TYP_STRUCT
17325 // (which we need to do even if we have already set foundSIMDType).
17326 if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
17328 if (sigType == TYP_STRUCT)
17330 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
17332 foundSIMDType = true;
17334 #endif // FEATURE_SIMD
17335 lclVarInfo[0].lclTypeInfo = sigType;
17337 assert(varTypeIsGC(thisArg->gtType) || // "this" is managed
17338 (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
17339 (clsAttr & CORINFO_FLG_VALUECLASS)));
17341 if (genActualType(thisArg->gtType) != genActualType(sigType))
17343 if (sigType == TYP_REF)
17345 /* The argument cannot be bashed into a ref (see bug 750871) */
17346 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
17350 /* This can only happen with byrefs <-> ints/shorts */
17352 assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
17353 assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
17355 if (sigType == TYP_BYREF)
17357 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17359 else if (thisArg->gtType == TYP_BYREF)
17361 assert(sigType == TYP_I_IMPL);
17363 /* If possible change the BYREF to an int */
17364 if (thisArg->IsVarAddr())
17366 thisArg->gtType = TYP_I_IMPL;
17367 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17371 /* Arguments 'int <- byref' cannot be bashed */
17372 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17379 /* Init the types of the arguments and make sure the types
17380 * from the trees match the types in the signature */
17382 CORINFO_ARG_LIST_HANDLE argLst;
17383 argLst = methInfo->args.args;
17386 for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
17388 var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
17390 lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
17392 #ifdef FEATURE_SIMD
17393 if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
17395 // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
17396 // found a SIMD type, even if this may not be a type we recognize (the assumption is that
17397 // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
17398 foundSIMDType = true;
17399 if (sigType == TYP_STRUCT)
17401 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
17402 sigType = structType;
17405 #endif // FEATURE_SIMD
17407 lclVarInfo[i].lclTypeInfo = sigType;
17408 lclVarInfo[i].lclHasLdlocaOp = false;
17410 /* Does the tree type match the signature type? */
17412 GenTreePtr inlArgNode = inlArgInfo[i].argNode;
17414 if (sigType != inlArgNode->gtType)
17416 /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
17417 but in bad IL cases with caller-callee signature mismatches we can see other types.
17418 Intentionally reject cases with mismatches so the jit is more flexible when
17419 encountering bad IL. */
17421 bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
17422 (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
17423 (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
17425 if (!isPlausibleTypeMatch)
17427 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
17431 /* Is it a narrowing or widening cast?
17432 * Widening casts are ok since the value computed is already
17433 * normalized to an int (on the IL stack) */
17435 if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
17437 if (sigType == TYP_BYREF)
17439 lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17441 else if (inlArgNode->gtType == TYP_BYREF)
17443 assert(varTypeIsIntOrI(sigType));
17445 /* If possible bash the BYREF to an int */
17446 if (inlArgNode->IsVarAddr())
17448 inlArgNode->gtType = TYP_I_IMPL;
17449 lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
17453 /* Arguments 'int <- byref' cannot be changed */
17454 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
17458 else if (genTypeSize(sigType) < EA_PTRSIZE)
17460 /* Narrowing cast */
17462 if (inlArgNode->gtOper == GT_LCL_VAR &&
17463 !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
17464 sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
17466 /* We don't need to insert a cast here as the variable
17467 was assigned a normalized value of the right type */
17472 inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
17474 inlArgInfo[i].argIsLclVar = false;
17476 /* Try to fold the node in case we have constant arguments */
17478 if (inlArgInfo[i].argIsInvariant)
17480 inlArgNode = gtFoldExprConst(inlArgNode);
17481 inlArgInfo[i].argNode = inlArgNode;
17482 assert(inlArgNode->OperIsConst());
17485 #ifdef _TARGET_64BIT_
17486 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
17488 // This should only happen for int -> native int widening
17489 inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
17491 inlArgInfo[i].argIsLclVar = false;
17493 /* Try to fold the node in case we have constant arguments */
17495 if (inlArgInfo[i].argIsInvariant)
17497 inlArgNode = gtFoldExprConst(inlArgNode);
17498 inlArgInfo[i].argNode = inlArgNode;
17499 assert(inlArgNode->OperIsConst());
17502 #endif // _TARGET_64BIT_
17507 /* Init the types of the local variables */
17509 CORINFO_ARG_LIST_HANDLE localsSig;
17510 localsSig = methInfo->locals.args;
17512 for (i = 0; i < methInfo->locals.numArgs; i++)
17515 var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
17517 lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
17518 lclVarInfo[i + argCnt].lclIsPinned = isPinned;
17519 lclVarInfo[i + argCnt].lclTypeInfo = type;
17523 // Pinned locals may cause inlines to fail.
17524 inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
17525 if (inlineResult->IsFailure())
17531 lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
17533 // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
17534 // out on the inline.
17535 if (type == TYP_STRUCT)
17537 CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
17538 DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
17539 if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
17541 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
17542 if (inlineResult->IsFailure())
17547 // Do further notification in the case where the call site is rare; some policies do
17548 // not track the relative hotness of call sites for "always" inline cases.
17549 if (pInlineInfo->iciBlock->isRunRarely())
17551 inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
17552 if (inlineResult->IsFailure())
17561 localsSig = info.compCompHnd->getArgNext(localsSig);
17563 #ifdef FEATURE_SIMD
17564 if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
17566 foundSIMDType = true;
17567 if (featureSIMD && type == TYP_STRUCT)
17569 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
17570 lclVarInfo[i + argCnt].lclTypeInfo = structType;
17573 #endif // FEATURE_SIMD
17576 #ifdef FEATURE_SIMD
17577 if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
17579 foundSIMDType = true;
17581 pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
17582 #endif // FEATURE_SIMD
17585 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
17587 assert(compIsForInlining());
17589 unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
17591 if (tmpNum == BAD_VAR_NUM)
17593 var_types lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
17595 // The lifetime of this local might span multiple BBs.
17596 // So it is a long lifetime local.
17597 impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
17599 lvaTable[tmpNum].lvType = lclTyp;
17600 if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclHasLdlocaOp)
17602 lvaTable[tmpNum].lvHasLdAddrOp = 1;
17605 if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclIsPinned)
17607 lvaTable[tmpNum].lvPinned = 1;
17609 if (!impInlineInfo->hasPinnedLocals)
17611 // If the inlinee returns a value, use a spill temp
17612 // for the return value to ensure that even in case
17613 // where the return expression refers to one of the
17614 // pinned locals, we can unpin the local right after
17615 // the inlined method body.
17616 if ((info.compRetNativeType != TYP_VOID) && (lvaInlineeReturnSpillTemp == BAD_VAR_NUM))
17618 lvaInlineeReturnSpillTemp =
17619 lvaGrabTemp(false DEBUGARG("Inline candidate pinned local return spill temp"));
17620 lvaTable[lvaInlineeReturnSpillTemp].lvType = info.compRetNativeType;
17624 impInlineInfo->hasPinnedLocals = true;
17627 if (impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.IsStruct())
17629 if (varTypeIsStruct(lclTyp))
17631 lvaSetStruct(tmpNum,
17632 impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo.GetClassHandle(),
17633 true /* unsafe value cls check */);
17637 // This is a wrapped primitive. Make sure the verstate knows that
17638 lvaTable[tmpNum].lvVerTypeInfo =
17639 impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclVerTypeInfo;
17647 // A method used to return the GenTree (usually a GT_LCL_VAR) representing the arguments of the inlined method.
17648 // Only use this method for the arguments of the inlinee method.
17649 // !!! Do not use it for the locals of the inlinee method. !!!!
17651 GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
17653 /* Get the argument type */
17654 var_types lclTyp = lclVarInfo[lclNum].lclTypeInfo;
17656 GenTreePtr op1 = nullptr;
17658 // constant or address of local
17659 if (inlArgInfo[lclNum].argIsInvariant && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17661 /* Clone the constant. Note that we cannot directly use argNode
17662 in the trees even if inlArgInfo[lclNum].argIsUsed==false as this
17663 would introduce aliasing between inlArgInfo[].argNode and
17664 impInlineExpr. Then gtFoldExpr() could change it, causing further
17665 references to the argument working off of the bashed copy. */
17667 op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17668 PREFIX_ASSUME(op1 != nullptr);
17669 inlArgInfo[lclNum].argTmpNum = (unsigned)-1; // illegal temp
17671 else if (inlArgInfo[lclNum].argIsLclVar && !inlArgInfo[lclNum].argHasLdargaOp && !inlArgInfo[lclNum].argHasStargOp)
17673 /* Argument is a local variable (of the caller)
17674 * Can we re-use the passed argument node? */
17676 op1 = inlArgInfo[lclNum].argNode;
17677 inlArgInfo[lclNum].argTmpNum = op1->gtLclVarCommon.gtLclNum;
17679 if (inlArgInfo[lclNum].argIsUsed)
17681 assert(op1->gtOper == GT_LCL_VAR);
17682 assert(lclNum == op1->gtLclVar.gtLclILoffs);
17684 if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
17686 lclTyp = genActualType(lclTyp);
17689 /* Create a new lcl var node - remember the argument lclNum */
17690 op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, lclTyp, op1->gtLclVar.gtLclILoffs);
17693 else if (inlArgInfo[lclNum].argIsByRefToStructLocal && !inlArgInfo[lclNum].argHasStargOp)
17695 /* Argument is a by-ref address to a struct, a normed struct, or its field.
17696 In these cases, don't spill the byref to a local, simply clone the tree and use it.
17697 This way we will increase the chance for this byref to be optimized away by
17698 a subsequent "dereference" operation.
17700 From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
17701 (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
17702 For example, if the caller is:
17703 ldloca.s V_1 // V_1 is a local struct
17704 call void Test.ILPart::RunLdargaOnPointerArg(int32*)
17705 and the callee being inlined has:
17706 .method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed
17708 call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
17709 then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
17710 soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
17712 assert(inlArgInfo[lclNum].argNode->TypeGet() == TYP_BYREF ||
17713 inlArgInfo[lclNum].argNode->TypeGet() == TYP_I_IMPL);
17714 op1 = gtCloneExpr(inlArgInfo[lclNum].argNode);
17718 /* Argument is a complex expression - it must be evaluated into a temp */
17720 if (inlArgInfo[lclNum].argHasTmp)
17722 assert(inlArgInfo[lclNum].argIsUsed);
17723 assert(inlArgInfo[lclNum].argTmpNum < lvaCount);
17725 /* Create a new lcl var node - remember the argument lclNum */
17726 op1 = gtNewLclvNode(inlArgInfo[lclNum].argTmpNum, genActualType(lclTyp));
17728 /* This is the second or later use of the this argument,
17729 so we have to use the temp (instead of the actual arg) */
17730 inlArgInfo[lclNum].argBashTmpNode = nullptr;
17734 /* First time use */
17735 assert(inlArgInfo[lclNum].argIsUsed == false);
17737 /* Reserve a temp for the expression.
17738 * Use a large size node as we may change it later */
17740 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
17742 lvaTable[tmpNum].lvType = lclTyp;
17743 assert(lvaTable[tmpNum].lvAddrExposed == 0);
17744 if (inlArgInfo[lclNum].argHasLdargaOp)
17746 lvaTable[tmpNum].lvHasLdAddrOp = 1;
17749 if (lclVarInfo[lclNum].lclVerTypeInfo.IsStruct())
17751 if (varTypeIsStruct(lclTyp))
17753 lvaSetStruct(tmpNum, impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo.GetClassHandle(),
17754 true /* unsafe value cls check */);
17758 // This is a wrapped primitive. Make sure the verstate knows that
17759 lvaTable[tmpNum].lvVerTypeInfo = impInlineInfo->lclVarInfo[lclNum].lclVerTypeInfo;
17763 inlArgInfo[lclNum].argHasTmp = true;
17764 inlArgInfo[lclNum].argTmpNum = tmpNum;
17766 // If we require strict exception order, then arguments must
17767 // be evaluated in sequence before the body of the inlined method.
17768 // So we need to evaluate them to a temp.
17769 // Also, if arguments have global references, we need to
17770 // evaluate them to a temp before the inlined body as the
17771 // inlined body may be modifying the global ref.
17772 // TODO-1stClassStructs: We currently do not reuse an existing lclVar
17773 // if it is a struct, because it requires some additional handling.
17775 if (!varTypeIsStruct(lclTyp) && (!inlArgInfo[lclNum].argHasSideEff) && (!inlArgInfo[lclNum].argHasGlobRef))
17777 /* Get a *LARGE* LCL_VAR node */
17778 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
17780 /* Record op1 as the very first use of this argument.
17781 If there are no further uses of the arg, we may be
17782 able to use the actual arg node instead of the temp.
17783 If we do see any further uses, we will clear this. */
17784 inlArgInfo[lclNum].argBashTmpNode = op1;
17788 /* Get a small LCL_VAR node */
17789 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
17790 /* No bashing of this argument */
17791 inlArgInfo[lclNum].argBashTmpNode = nullptr;
17796 /* Mark the argument as used */
17798 inlArgInfo[lclNum].argIsUsed = true;
17803 /******************************************************************************
17804 Is this the original "this" argument to the call being inlined?
17806 Note that we do not inline methods with "starg 0", and so we do not need to
17810 BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo* inlArgInfo)
17812 assert(compIsForInlining());
17813 return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
17816 //-----------------------------------------------------------------------------
17817 // This function checks if a dereference in the inlinee can guarantee that
17818 // the "this" is non-NULL.
17819 // If we haven't hit a branch or a side effect, and we are dereferencing
17820 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
17821 // then we can avoid a separate null pointer check.
17823 // "additionalTreesToBeEvaluatedBefore"
17824 // is the set of pending trees that have not yet been added to the statement list,
17825 // and which have been removed from verCurrentState.esStack[]
17827 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTreePtr additionalTreesToBeEvaluatedBefore,
17828 GenTreePtr variableBeingDereferenced,
17829 InlArgInfo* inlArgInfo)
17831 assert(compIsForInlining());
17832 assert(opts.OptEnabled(CLFLG_INLINING));
17834 BasicBlock* block = compCurBB;
17839 if (block != fgFirstBB)
17844 if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
17849 if (additionalTreesToBeEvaluatedBefore &&
17850 GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
17855 for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
17857 expr = stmt->gtStmt.gtStmtExpr;
17859 if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
17865 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
17867 unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
17868 if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
17877 /******************************************************************************/
17878 // Check the inlining eligibility of this GT_CALL node.
17879 // Mark GTF_CALL_INLINE_CANDIDATE on the GT_CALL node
17881 // Todo: find a way to record the failure reasons in the IR (or
17882 // otherwise build tree context) so when we do the inlining pass we
17883 // can capture these reasons
17885 void Compiler::impMarkInlineCandidate(GenTreePtr callNode,
17886 CORINFO_CONTEXT_HANDLE exactContextHnd,
17887 CORINFO_CALL_INFO* callInfo)
17889 // Let the strategy know there's another call
17890 impInlineRoot()->m_inlineStrategy->NoteCall();
17892 if (!opts.OptEnabled(CLFLG_INLINING))
17894 /* XXX Mon 8/18/2008
17895 * This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before
17896 * calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and
17897 * CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and
17898 * figure out why we did not set MAXOPT for this compile.
17900 assert(!compIsForInlining());
17904 if (compIsForImportOnly())
17906 // Don't bother creating the inline candidate during verification.
17907 // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
17908 // that leads to the creation of multiple instances of Compiler.
17912 GenTreeCall* call = callNode->AsCall();
17913 InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
17915 // Don't inline if not optimizing root method
17916 if (opts.compDbgCode)
17918 inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
17922 // Don't inline if inlining into root method is disabled.
17923 if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
17925 inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
17929 // Inlining candidate determination needs to honor only IL tail prefix.
17930 // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
17931 if (call->IsTailPrefixedCall())
17933 inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
17937 // Tail recursion elimination takes precedence over inlining.
17938 // TODO: We may want to do some of the additional checks from fgMorphCall
17939 // here to reduce the chance we don't inline a call that won't be optimized
17940 // as a fast tail call or turned into a loop.
17941 if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
17943 inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
17947 if ((call->gtFlags & GTF_CALL_VIRT_KIND_MASK) != GTF_CALL_NONVIRT)
17949 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
17953 /* Ignore helper calls */
17955 if (call->gtCallType == CT_HELPER)
17957 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
17961 /* Ignore indirect calls */
17962 if (call->gtCallType == CT_INDIRECT)
17964 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
17968 /* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less
17969 * restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding
17970 * inlining in throw blocks. I should consider the same thing for catch and filter regions. */
17972 CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
17975 // Reuse method flags from the original callInfo if possible
17976 if (fncHandle == callInfo->hMethod)
17978 methAttr = callInfo->methodFlags;
17982 methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
17986 if (compStressCompile(STRESS_FORCE_INLINE, 0))
17988 methAttr |= CORINFO_FLG_FORCEINLINE;
17992 // Check for COMPlus_AggressiveInlining
17993 if (compDoAggressiveInlining)
17995 methAttr |= CORINFO_FLG_FORCEINLINE;
17998 if (!(methAttr & CORINFO_FLG_FORCEINLINE))
18000 /* Don't bother inline blocks that are in the filter region */
18001 if (bbInCatchHandlerILRange(compCurBB))
18006 printf("\nWill not inline blocks that are in the catch handler region\n");
18011 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
18015 if (bbInFilterILRange(compCurBB))
18020 printf("\nWill not inline blocks that are in the filter region\n");
18024 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
18029 /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
18031 if (opts.compNeedSecurityCheck)
18033 inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
18037 /* Check if we tried to inline this method before */
18039 if (methAttr & CORINFO_FLG_DONT_INLINE)
18041 inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
18045 /* Cannot inline synchronized methods */
18047 if (methAttr & CORINFO_FLG_SYNCH)
18049 inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
18053 /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
18055 if (methAttr & CORINFO_FLG_SECURITYCHECK)
18057 inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
18061 InlineCandidateInfo* inlineCandidateInfo = nullptr;
18062 impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
18064 if (inlineResult.IsFailure())
18069 // The old value should be NULL
18070 assert(call->gtInlineCandidateInfo == nullptr);
18072 call->gtInlineCandidateInfo = inlineCandidateInfo;
18074 // Mark the call node as inline candidate.
18075 call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
18077 // Let the strategy know there's another candidate.
18078 impInlineRoot()->m_inlineStrategy->NoteCandidate();
18080 // Since we're not actually inlining yet, and this call site is
18081 // still just an inline candidate, there's nothing to report.
18082 inlineResult.SetReported();
18085 /******************************************************************************/
18086 // Returns true if the given intrinsic will be implemented by target-specific
18089 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
18091 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
18092 switch (intrinsicId)
18094 // Amd64 only has SSE2 instruction to directly compute sqrt/abs.
18096 // TODO: Because the x86 backend only targets SSE for floating-point code,
18097 // it does not treat Sine, Cosine, or Round as intrinsics (JIT32
18098 // implemented those intrinsics as x87 instructions). If this poses
18099 // a CQ problem, it may be necessary to change the implementation of
18100 // the helper calls to decrease call overhead or switch back to the
18101 // x87 instructions. This is tracked by #7097.
18102 case CORINFO_INTRINSIC_Sqrt:
18103 case CORINFO_INTRINSIC_Abs:
18109 #elif defined(_TARGET_ARM64_)
18110 switch (intrinsicId)
18112 case CORINFO_INTRINSIC_Sqrt:
18113 case CORINFO_INTRINSIC_Abs:
18114 case CORINFO_INTRINSIC_Round:
18120 #elif defined(_TARGET_ARM_)
18121 switch (intrinsicId)
18123 case CORINFO_INTRINSIC_Sqrt:
18124 case CORINFO_INTRINSIC_Abs:
18125 case CORINFO_INTRINSIC_Round:
18131 #elif defined(_TARGET_X86_)
18132 switch (intrinsicId)
18134 case CORINFO_INTRINSIC_Sin:
18135 case CORINFO_INTRINSIC_Cos:
18136 case CORINFO_INTRINSIC_Sqrt:
18137 case CORINFO_INTRINSIC_Abs:
18138 case CORINFO_INTRINSIC_Round:
18145 // TODO: This portion of logic is not implemented for other arch.
18146 // The reason for returning true is that on all other arch the only intrinsic
18147 // enabled are target intrinsics.
18149 #endif //_TARGET_AMD64_
18152 /******************************************************************************/
18153 // Returns true if the given intrinsic will be implemented by calling System.Math
18156 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
18158 // Currently, if an math intrisic is not implemented by target-specific
18159 // intructions, it will be implemented by a System.Math call. In the
18160 // future, if we turn to implementing some of them with helper callers,
18161 // this predicate needs to be revisited.
18162 return !IsTargetIntrinsic(intrinsicId);
18165 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
18167 switch (intrinsicId)
18169 case CORINFO_INTRINSIC_Sin:
18170 case CORINFO_INTRINSIC_Sqrt:
18171 case CORINFO_INTRINSIC_Abs:
18172 case CORINFO_INTRINSIC_Cos:
18173 case CORINFO_INTRINSIC_Round:
18174 case CORINFO_INTRINSIC_Cosh:
18175 case CORINFO_INTRINSIC_Sinh:
18176 case CORINFO_INTRINSIC_Tan:
18177 case CORINFO_INTRINSIC_Tanh:
18178 case CORINFO_INTRINSIC_Asin:
18179 case CORINFO_INTRINSIC_Acos:
18180 case CORINFO_INTRINSIC_Atan:
18181 case CORINFO_INTRINSIC_Atan2:
18182 case CORINFO_INTRINSIC_Log10:
18183 case CORINFO_INTRINSIC_Pow:
18184 case CORINFO_INTRINSIC_Exp:
18185 case CORINFO_INTRINSIC_Ceiling:
18186 case CORINFO_INTRINSIC_Floor:
18193 bool Compiler::IsMathIntrinsic(GenTreePtr tree)
18195 return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
18197 /*****************************************************************************/