1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
10 XX Imports the given method and converts it to semantic trees XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
23 #define Verify(cond, msg) \
28 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
32 #define VerifyOrReturn(cond, msg) \
37 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
42 #define VerifyOrReturnSpeculative(cond, msg, speculative) \
56 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
62 /*****************************************************************************/
64 void Compiler::impInit()
68 impTreeList = nullptr;
69 impTreeLast = nullptr;
70 impInlinedCodeSize = 0;
74 /*****************************************************************************
76 * Pushes the given tree on the stack.
79 void Compiler::impPushOnStack(GenTree* tree, typeInfo ti)
81 /* Check for overflow. If inlining, we may be using a bigger stack */
83 if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84 (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
86 BADCODE("stack overflow");
90 // If we are pushing a struct, make certain we know the precise type!
91 if (tree->TypeGet() == TYP_STRUCT)
93 assert(ti.IsType(TI_STRUCT));
94 CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95 assert(clsHnd != NO_CLASS_HANDLE);
98 if (tiVerificationNeeded && !ti.IsDead())
100 assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
102 // The ti type is consistent with the tree type.
105 // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106 // In the verification type system, we always transform "native int" to "TI_INT".
107 // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108 // attempts to do that have proved too difficult. Instead, we'll assume that in checks like this,
109 // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110 // method used in the last disjunct allows exactly this mismatch.
111 assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112 ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113 ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114 ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115 typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116 NormaliseForStack(typeInfo(tree->TypeGet()))));
118 // If it is a struct type, make certain we normalized the primitive types
119 assert(!ti.IsType(TI_STRUCT) ||
120 info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
124 if (VERBOSE && tiVerificationNeeded)
127 printf(TI_DUMP_PADDING);
128 printf("About to push to stack: ");
131 #endif // VERBOSE_VERIFY
135 verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136 verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
138 if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
142 else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
144 compFloatingPointUsed = true;
148 inline void Compiler::impPushNullObjRefOnStack()
150 impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
153 // This method gets called when we run into unverifiable code
154 // (and we are verifying the method)
156 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
157 DEBUGARG(unsigned line))
159 // Remember that the code is not verifiable
160 // Note that the method may yet pass canSkipMethodVerification(),
161 // and so the presence of unverifiable code may not be an issue.
162 tiIsVerifiableCode = FALSE;
165 const char* tail = strrchr(file, '\\');
171 if (JitConfig.JitBreakOnUnsafeCode())
173 assert(!"Unsafe code detected");
177 JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
178 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
180 if (verNeedsVerification() || compIsForImportOnly())
182 JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
183 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
184 verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
188 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
189 DEBUGARG(unsigned line))
191 JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
192 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
195 // BreakIfDebuggerPresent();
196 if (getBreakOnBadCode())
198 assert(!"Typechecking error");
202 RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
206 // helper function that will tell us if the IL instruction at the addr passed
207 // by param consumes an address at the top of the stack. We use it to save
209 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
211 assert(!compIsForInlining());
215 opcode = (OPCODE)getU1LittleEndian(codeAddr);
219 // case CEE_LDFLDA: We're taking this one out as if you have a sequence
225 // of a primitivelike struct, you end up after morphing with addr of a local
226 // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
227 // for structs that contain other structs, which isnt a case we handle very
228 // well now for other reasons.
232 // We won't collapse small fields. This is probably not the right place to have this
233 // check, but we're only using the function for this purpose, and is easy to factor
234 // out if we need to do so.
236 CORINFO_RESOLVED_TOKEN resolvedToken;
237 impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
239 CORINFO_CLASS_HANDLE clsHnd;
240 var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField, &clsHnd));
242 // Preserve 'small' int types
243 if (!varTypeIsSmall(lclTyp))
245 lclTyp = genActualType(lclTyp);
248 if (varTypeIsSmall(lclTyp))
262 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
264 pResolvedToken->tokenContext = impTokenLookupContextHandle;
265 pResolvedToken->tokenScope = info.compScopeHnd;
266 pResolvedToken->token = getU4LittleEndian(addr);
267 pResolvedToken->tokenType = kind;
269 if (!tiVerificationNeeded)
271 info.compCompHnd->resolveToken(pResolvedToken);
275 Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
279 /*****************************************************************************
281 * Pop one tree from the stack.
284 StackEntry Compiler::impPopStack()
286 if (verCurrentState.esStackDepth == 0)
288 BADCODE("stack underflow");
293 if (VERBOSE && tiVerificationNeeded)
296 printf(TI_DUMP_PADDING);
297 printf("About to pop from the stack: ");
298 const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
301 #endif // VERBOSE_VERIFY
304 return verCurrentState.esStack[--verCurrentState.esStackDepth];
307 /*****************************************************************************
309 * Peep at n'th (0-based) tree on the top of the stack.
312 StackEntry& Compiler::impStackTop(unsigned n)
314 if (verCurrentState.esStackDepth <= n)
316 BADCODE("stack underflow");
319 return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
322 unsigned Compiler::impStackHeight()
324 return verCurrentState.esStackDepth;
327 /*****************************************************************************
328 * Some of the trees are spilled specially. While unspilling them, or
329 * making a copy, these need to be handled specially. The function
330 * enumerates the operators possible after spilling.
333 #ifdef DEBUG // only used in asserts
334 static bool impValidSpilledStackEntry(GenTree* tree)
336 if (tree->gtOper == GT_LCL_VAR)
341 if (tree->OperIsConst())
350 /*****************************************************************************
352 * The following logic is used to save/restore stack contents.
353 * If 'copy' is true, then we make a copy of the trees on the stack. These
354 * have to all be cloneable/spilled values.
357 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
359 savePtr->ssDepth = verCurrentState.esStackDepth;
361 if (verCurrentState.esStackDepth)
363 savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
364 size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
368 StackEntry* table = savePtr->ssTrees;
370 /* Make a fresh copy of all the stack entries */
372 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
374 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
375 GenTree* tree = verCurrentState.esStack[level].val;
377 assert(impValidSpilledStackEntry(tree));
379 switch (tree->gtOper)
386 table->val = gtCloneExpr(tree);
390 assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
397 memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
402 void Compiler::impRestoreStackState(SavedStack* savePtr)
404 verCurrentState.esStackDepth = savePtr->ssDepth;
406 if (verCurrentState.esStackDepth)
408 memcpy(verCurrentState.esStack, savePtr->ssTrees,
409 verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
413 /*****************************************************************************
415 * Get the tree list started for a new basic block.
417 inline void Compiler::impBeginTreeList()
419 assert(impTreeList == nullptr && impTreeLast == nullptr);
421 impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
424 /*****************************************************************************
426 * Store the given start and end stmt in the given basic block. This is
427 * mostly called by impEndTreeList(BasicBlock *block). It is called
428 * directly only for handling CEE_LEAVEs out of finally-protected try's.
431 inline void Compiler::impEndTreeList(BasicBlock* block, GenTree* firstStmt, GenTree* lastStmt)
433 assert(firstStmt->gtOper == GT_STMT);
434 assert(lastStmt->gtOper == GT_STMT);
436 /* Make the list circular, so that we can easily walk it backwards */
438 firstStmt->gtPrev = lastStmt;
440 /* Store the tree list in the basic block */
442 block->bbTreeList = firstStmt;
444 /* The block should not already be marked as imported */
445 assert((block->bbFlags & BBF_IMPORTED) == 0);
447 block->bbFlags |= BBF_IMPORTED;
450 /*****************************************************************************
452 * Store the current tree list in the given basic block.
455 inline void Compiler::impEndTreeList(BasicBlock* block)
457 assert(impTreeList->gtOper == GT_BEG_STMTS);
459 GenTree* firstTree = impTreeList->gtNext;
463 /* The block should not already be marked as imported */
464 assert((block->bbFlags & BBF_IMPORTED) == 0);
466 // Empty block. Just mark it as imported
467 block->bbFlags |= BBF_IMPORTED;
471 // Ignore the GT_BEG_STMTS
472 assert(firstTree->gtPrev == impTreeList);
474 impEndTreeList(block, firstTree, impTreeLast);
478 if (impLastILoffsStmt != nullptr)
480 impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
481 impLastILoffsStmt = nullptr;
484 impTreeList = impTreeLast = nullptr;
488 /*****************************************************************************
490 * Check that storing the given tree doesnt mess up the semantic order. Note
491 * that this has only limited value as we can only check [0..chkLevel).
494 inline void Compiler::impAppendStmtCheck(GenTree* stmt, unsigned chkLevel)
499 assert(stmt->gtOper == GT_STMT);
501 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
503 chkLevel = verCurrentState.esStackDepth;
506 if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
511 GenTree* tree = stmt->gtStmt.gtStmtExpr;
513 // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
515 if (tree->gtFlags & GTF_CALL)
517 for (unsigned level = 0; level < chkLevel; level++)
519 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
523 if (tree->gtOper == GT_ASG)
525 // For an assignment to a local variable, all references of that
526 // variable have to be spilled. If it is aliased, all calls and
527 // indirect accesses have to be spilled
529 if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
531 unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
532 for (unsigned level = 0; level < chkLevel; level++)
534 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
535 assert(!lvaTable[lclNum].lvAddrExposed ||
536 (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
540 // If the access may be to global memory, all side effects have to be spilled.
542 else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
544 for (unsigned level = 0; level < chkLevel; level++)
546 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
553 /*****************************************************************************
555 * Append the given GT_STMT node to the current block's tree list.
556 * [0..chkLevel) is the portion of the stack which we will check for
557 * interference with stmt and spill if needed.
560 inline void Compiler::impAppendStmt(GenTree* stmt, unsigned chkLevel)
562 assert(stmt->gtOper == GT_STMT);
563 noway_assert(impTreeLast != nullptr);
565 /* If the statement being appended has any side-effects, check the stack
566 to see if anything needs to be spilled to preserve correct ordering. */
568 GenTree* expr = stmt->gtStmt.gtStmtExpr;
569 unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
571 // Assignment to (unaliased) locals don't count as a side-effect as
572 // we handle them specially using impSpillLclRefs(). Temp locals should
575 if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
576 !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
578 unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
579 assert(flags == (op2Flags | GTF_ASG));
583 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
585 chkLevel = verCurrentState.esStackDepth;
588 if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
590 assert(chkLevel <= verCurrentState.esStackDepth);
594 // If there is a call, we have to spill global refs
595 bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
597 if (expr->gtOper == GT_ASG)
599 GenTree* lhs = expr->gtGetOp1();
600 // If we are assigning to a global ref, we have to spill global refs on stack.
601 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
602 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
603 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
604 if (!expr->OperIsBlkOp())
606 // If we are assigning to a global ref, we have to spill global refs on stack
607 if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
609 spillGlobEffects = true;
612 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
613 ((lhs->OperGet() == GT_LCL_VAR) &&
614 (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
616 spillGlobEffects = true;
620 impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
624 impSpillSpecialSideEff();
628 impAppendStmtCheck(stmt, chkLevel);
630 /* Point 'prev' at the previous node, so that we can walk backwards */
632 stmt->gtPrev = impTreeLast;
634 /* Append the expression statement to the list */
636 impTreeLast->gtNext = stmt;
640 impMarkContiguousSIMDFieldAssignments(stmt);
643 /* Once we set impCurStmtOffs in an appended tree, we are ready to
644 report the following offsets. So reset impCurStmtOffs */
646 if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
648 impCurStmtOffsSet(BAD_IL_OFFSET);
652 if (impLastILoffsStmt == nullptr)
654 impLastILoffsStmt = stmt;
665 /*****************************************************************************
667 * Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
670 inline void Compiler::impInsertStmtBefore(GenTree* stmt, GenTree* stmtBefore)
672 assert(stmt->gtOper == GT_STMT);
673 assert(stmtBefore->gtOper == GT_STMT);
675 GenTree* stmtPrev = stmtBefore->gtPrev;
676 stmt->gtPrev = stmtPrev;
677 stmt->gtNext = stmtBefore;
678 stmtPrev->gtNext = stmt;
679 stmtBefore->gtPrev = stmt;
682 /*****************************************************************************
684 * Append the given expression tree to the current block's tree list.
685 * Return the newly created statement.
688 GenTree* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, IL_OFFSETX offset)
692 /* Allocate an 'expression statement' node */
694 GenTree* expr = gtNewStmt(tree, offset);
696 /* Append the statement to the current block's stmt list */
698 impAppendStmt(expr, chkLevel);
703 /*****************************************************************************
705 * Insert the given exression tree before GT_STMT "stmtBefore"
708 void Compiler::impInsertTreeBefore(GenTree* tree, IL_OFFSETX offset, GenTree* stmtBefore)
710 assert(stmtBefore->gtOper == GT_STMT);
712 /* Allocate an 'expression statement' node */
714 GenTree* expr = gtNewStmt(tree, offset);
716 /* Append the statement to the current block's stmt list */
718 impInsertStmtBefore(expr, stmtBefore);
721 /*****************************************************************************
723 * Append an assignment of the given value to a temp to the current tree list.
724 * curLevel is the stack level for which the spill to the temp is being done.
727 void Compiler::impAssignTempGen(unsigned tmp,
730 GenTree** pAfterStmt, /* = NULL */
731 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
732 BasicBlock* block /* = NULL */
735 GenTree* asg = gtNewTempAssign(tmp, val);
737 if (!asg->IsNothingNode())
741 GenTree* asgStmt = gtNewStmt(asg, ilOffset);
742 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
746 impAppendTree(asg, curLevel, impCurStmtOffs);
751 /*****************************************************************************
752 * same as above, but handle the valueclass case too
755 void Compiler::impAssignTempGen(unsigned tmpNum,
757 CORINFO_CLASS_HANDLE structType,
759 GenTree** pAfterStmt, /* = NULL */
760 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
761 BasicBlock* block /* = NULL */
766 if (varTypeIsStruct(val))
768 assert(tmpNum < lvaCount);
769 assert(structType != NO_CLASS_HANDLE);
771 // if the method is non-verifiable the assert is not true
772 // so at least ignore it in the case when verification is turned on
773 // since any block that tries to use the temp would have failed verification.
774 var_types varType = lvaTable[tmpNum].lvType;
775 assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
776 lvaSetStruct(tmpNum, structType, false);
778 // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
779 // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
780 // that has been passed in for the value being assigned to the temp, in which case we
781 // need to set 'val' to that same type.
782 // Note also that if we always normalized the types of any node that might be a struct
783 // type, this would not be necessary - but that requires additional JIT/EE interface
784 // calls that may not actually be required - e.g. if we only access a field of a struct.
786 val->gtType = lvaTable[tmpNum].lvType;
788 GenTree* dst = gtNewLclvNode(tmpNum, val->gtType);
789 asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
793 asg = gtNewTempAssign(tmpNum, val);
796 if (!asg->IsNothingNode())
800 GenTree* asgStmt = gtNewStmt(asg, ilOffset);
801 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
805 impAppendTree(asg, curLevel, impCurStmtOffs);
810 /*****************************************************************************
812 * Pop the given number of values from the stack and return a list node with
814 * The 'prefixTree' argument may optionally contain an argument
815 * list that is prepended to the list returned from this function.
817 * The notion of prepended is a bit misleading in that the list is backwards
818 * from the way I would expect: The first element popped is at the end of
819 * the returned list, and prefixTree is 'before' that, meaning closer to
820 * the end of the list. To get to prefixTree, you have to walk to the
823 * For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
824 * such we reverse its meaning such that returnValue has a reversed
825 * prefixTree at the head of the list.
828 GenTreeArgList* Compiler::impPopList(unsigned count, CORINFO_SIG_INFO* sig, GenTreeArgList* prefixTree)
830 assert(sig == nullptr || count == sig->numArgs);
832 CORINFO_CLASS_HANDLE structType;
833 GenTreeArgList* treeList;
835 if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
841 treeList = prefixTree;
846 StackEntry se = impPopStack();
847 typeInfo ti = se.seTypeInfo;
848 GenTree* temp = se.val;
850 if (varTypeIsStruct(temp))
852 // Morph trees that aren't already OBJs or MKREFANY to be OBJs
853 assert(ti.IsType(TI_STRUCT));
854 structType = ti.GetClassHandleForValueClass();
858 printf("Calling impNormStructVal on:\n");
862 temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
866 printf("resulting tree:\n");
872 /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
873 treeList = gtNewListNode(temp, treeList);
878 if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
879 sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
881 // Make sure that all valuetypes (including enums) that we push are loaded.
882 // This is to guarantee that if a GC is triggerred from the prestub of this methods,
883 // all valuetypes in the method signature are already loaded.
884 // We need to be able to find the size of the valuetypes, but we cannot
885 // do a class-load from within GC.
886 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
889 CORINFO_ARG_LIST_HANDLE argLst = sig->args;
890 CORINFO_CLASS_HANDLE argClass;
891 CORINFO_CLASS_HANDLE argRealClass;
892 GenTreeArgList* args;
894 for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
896 PREFIX_ASSUME(args != nullptr);
898 CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
900 // insert implied casts (from float to double or double to float)
902 if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
904 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), false, TYP_DOUBLE);
906 else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
908 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), false, TYP_FLOAT);
911 // insert any widening or narrowing casts for backwards compatibility
913 args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
915 if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
916 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
918 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
919 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
921 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
923 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
925 args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
928 // Make sure that all valuetypes (including enums) that we push are loaded.
929 // This is to guarantee that if a GC is triggered from the prestub of this methods,
930 // all valuetypes in the method signature are already loaded.
931 // We need to be able to find the size of the valuetypes, but we cannot
932 // do a class-load from within GC.
933 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
936 argLst = info.compCompHnd->getArgNext(argLst);
940 if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
942 // Prepend the prefixTree
944 // Simple in-place reversal to place treeList
945 // at the end of a reversed prefixTree
946 while (prefixTree != nullptr)
948 GenTreeArgList* next = prefixTree->Rest();
949 prefixTree->Rest() = treeList;
950 treeList = prefixTree;
957 /*****************************************************************************
959 * Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
960 * The first "skipReverseCount" items are not reversed.
963 GenTreeArgList* Compiler::impPopRevList(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount)
966 assert(skipReverseCount <= count);
968 GenTreeArgList* list = impPopList(count, sig);
971 if (list == nullptr || skipReverseCount == count)
976 GenTreeArgList* ptr = nullptr; // Initialized to the first node that needs to be reversed
977 GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
979 if (skipReverseCount == 0)
986 // Get to the first node that needs to be reversed
987 for (unsigned i = 0; i < skipReverseCount - 1; i++)
989 lastSkipNode = lastSkipNode->Rest();
992 PREFIX_ASSUME(lastSkipNode != nullptr);
993 ptr = lastSkipNode->Rest();
996 GenTreeArgList* reversedList = nullptr;
1000 GenTreeArgList* tmp = ptr->Rest();
1001 ptr->Rest() = reversedList;
1004 } while (ptr != nullptr);
1006 if (skipReverseCount)
1008 lastSkipNode->Rest() = reversedList;
1013 return reversedList;
1017 /*****************************************************************************
1018 Assign (copy) the structure from 'src' to 'dest'. The structure is a value
1019 class of type 'clsHnd'. It returns the tree that should be appended to the
1020 statement list that represents the assignment.
1021 Temp assignments may be appended to impTreeList if spilling is necessary.
1022 curLevel is the stack level for which a spill may be being done.
1025 GenTree* Compiler::impAssignStruct(GenTree* dest,
1027 CORINFO_CLASS_HANDLE structHnd,
1029 GenTree** pAfterStmt, /* = NULL */
1030 BasicBlock* block /* = NULL */
1033 assert(varTypeIsStruct(dest));
1035 while (dest->gtOper == GT_COMMA)
1037 assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1039 // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1042 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, impCurStmtOffs));
1046 impAppendTree(dest->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1049 // set dest to the second thing
1050 dest = dest->gtOp.gtOp2;
1053 assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1054 dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1056 if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1057 src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1060 return gtNewNothingNode();
1063 // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1064 // or re-creating a Blk node if it is.
1067 if (dest->gtOper == GT_IND || dest->OperIsBlk())
1069 destAddr = dest->gtOp.gtOp1;
1073 destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1076 return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, block));
1079 /*****************************************************************************/
1081 GenTree* Compiler::impAssignStructPtr(GenTree* destAddr,
1083 CORINFO_CLASS_HANDLE structHnd,
1085 GenTree** pAfterStmt, /* = NULL */
1086 BasicBlock* block /* = NULL */
1090 GenTree* dest = nullptr;
1091 unsigned destFlags = 0;
1093 #if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1094 assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1095 // TODO-ARM-BUG: Does ARM need this?
1096 // TODO-ARM64-BUG: Does ARM64 need this?
1097 assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1098 src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1099 src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1100 (src->TypeGet() != TYP_STRUCT &&
1101 (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1102 #else // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1103 assert(varTypeIsStruct(src));
1105 assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1106 src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1107 src->gtOper == GT_COMMA ||
1108 (src->TypeGet() != TYP_STRUCT &&
1109 (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1110 #endif // !defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1111 if (destAddr->OperGet() == GT_ADDR)
1113 GenTree* destNode = destAddr->gtGetOp1();
1114 // If the actual destination is a local (for non-LEGACY_BACKEND), or already a block node, or is a node that
1115 // will be morphed, don't insert an OBJ(ADDR).
1116 if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk()
1117 #ifndef LEGACY_BACKEND
1118 || ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet()))
1119 #endif // !LEGACY_BACKEND
1124 destType = destNode->TypeGet();
1128 destType = src->TypeGet();
1131 var_types asgType = src->TypeGet();
1133 if (src->gtOper == GT_CALL)
1135 if (src->AsCall()->TreatAsHasRetBufArg(this))
1137 // Case of call returning a struct via hidden retbuf arg
1139 // insert the return value buffer into the argument list as first byref parameter
1140 src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1142 // now returns void, not a struct
1143 src->gtType = TYP_VOID;
1145 // return the morphed call node
1150 // Case of call returning a struct in one or more registers.
1152 var_types returnType = (var_types)src->gtCall.gtReturnType;
1154 // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1155 src->gtType = genActualType(returnType);
1157 // First we try to change this to "LclVar/LclFld = call"
1159 if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1161 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1162 // That is, the IR will be of the form lclVar = call for multi-reg return
1164 GenTree* lcl = destAddr->gtOp.gtOp1;
1165 if (src->AsCall()->HasMultiRegRetVal())
1167 // Mark the struct LclVar as used in a MultiReg return context
1168 // which currently makes it non promotable.
1169 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1170 // handle multireg returns.
1171 lcl->gtFlags |= GTF_DONT_CSE;
1172 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1174 else // The call result is not a multireg return
1176 // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1177 lcl->ChangeOper(GT_LCL_FLD);
1178 fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1179 lcl->gtType = src->gtType;
1180 asgType = src->gtType;
1185 #if defined(_TARGET_ARM_)
1186 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1187 // but that method has not been updadted to include ARM.
1188 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1189 lcl->gtFlags |= GTF_DONT_CSE;
1190 #elif defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
1191 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1192 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1194 // Make the struct non promotable. The eightbytes could contain multiple fields.
1195 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1196 // handle multireg returns.
1197 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1198 // non-multireg returns.
1199 lcl->gtFlags |= GTF_DONT_CSE;
1200 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1203 else // we don't have a GT_ADDR of a GT_LCL_VAR
1205 // !!! The destination could be on stack. !!!
1206 // This flag will let us choose the correct write barrier.
1207 asgType = returnType;
1208 destFlags = GTF_IND_TGTANYWHERE;
1212 else if (src->gtOper == GT_RET_EXPR)
1214 GenTreeCall* call = src->gtRetExpr.gtInlineCandidate->AsCall();
1215 noway_assert(call->gtOper == GT_CALL);
1217 if (call->HasRetBufArg())
1219 // insert the return value buffer into the argument list as first byref parameter
1220 call->gtCallArgs = gtNewListNode(destAddr, call->gtCallArgs);
1222 // now returns void, not a struct
1223 src->gtType = TYP_VOID;
1224 call->gtType = TYP_VOID;
1226 // We already have appended the write to 'dest' GT_CALL's args
1227 // So now we just return an empty node (pruning the GT_RET_EXPR)
1232 // Case of inline method returning a struct in one or more registers.
1234 var_types returnType = (var_types)call->gtReturnType;
1236 // We won't need a return buffer
1237 asgType = returnType;
1238 src->gtType = genActualType(returnType);
1239 call->gtType = src->gtType;
1241 // If we've changed the type, and it no longer matches a local destination,
1242 // we must use an indirection.
1243 if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1248 // !!! The destination could be on stack. !!!
1249 // This flag will let us choose the correct write barrier.
1250 destFlags = GTF_IND_TGTANYWHERE;
1253 else if (src->OperIsBlk())
1255 asgType = impNormStructType(structHnd);
1256 if (src->gtOper == GT_OBJ)
1258 assert(src->gtObj.gtClass == structHnd);
1261 else if (src->gtOper == GT_INDEX)
1263 asgType = impNormStructType(structHnd);
1264 assert(src->gtIndex.gtStructElemClass == structHnd);
1266 else if (src->gtOper == GT_MKREFANY)
1268 // Since we are assigning the result of a GT_MKREFANY,
1269 // "destAddr" must point to a refany.
1271 GenTree* destAddrClone;
1273 impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1275 assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
1276 assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1277 GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1278 GenTree* ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1279 GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
1280 typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1282 gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1284 // append the assign of the pointer value
1285 GenTree* asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1288 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
1292 impAppendTree(asg, curLevel, impCurStmtOffs);
1295 // return the assign of the type value, to be appended
1296 return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1298 else if (src->gtOper == GT_COMMA)
1300 // The second thing is the struct or its address.
1301 assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1304 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, impCurStmtOffs));
1308 impAppendTree(src->gtOp.gtOp1, curLevel, impCurStmtOffs); // do the side effect
1311 // Evaluate the second thing using recursion.
1312 return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, block);
1314 else if (src->IsLocal())
1316 asgType = src->TypeGet();
1318 else if (asgType == TYP_STRUCT)
1320 asgType = impNormStructType(structHnd);
1321 src->gtType = asgType;
1322 #ifdef LEGACY_BACKEND
1323 if (asgType == TYP_STRUCT)
1325 GenTree* srcAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, src);
1326 src = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
1330 if (dest == nullptr)
1332 // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1333 // if this is a known struct type.
1334 if (asgType == TYP_STRUCT)
1336 dest = gtNewObjNode(structHnd, destAddr);
1337 gtSetObjGcInfo(dest->AsObj());
1338 // Although an obj as a call argument was always assumed to be a globRef
1339 // (which is itself overly conservative), that is not true of the operands
1340 // of a block assignment.
1341 dest->gtFlags &= ~GTF_GLOB_REF;
1342 dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1344 else if (varTypeIsStruct(asgType))
1346 dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1350 dest = gtNewOperNode(GT_IND, asgType, destAddr);
1355 dest->gtType = asgType;
1358 dest->gtFlags |= destFlags;
1359 destFlags = dest->gtFlags;
1361 // return an assignment node, to be appended
1362 GenTree* asgNode = gtNewAssignNode(dest, src);
1363 gtBlockOpInit(asgNode, dest, src, false);
1365 // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1367 if ((destFlags & GTF_DONT_CSE) == 0)
1369 dest->gtFlags &= ~(GTF_DONT_CSE);
1374 /*****************************************************************************
1375 Given a struct value, and the class handle for that structure, return
1376 the expression for the address for that structure value.
1378 willDeref - does the caller guarantee to dereference the pointer.
1381 GenTree* Compiler::impGetStructAddr(GenTree* structVal,
1382 CORINFO_CLASS_HANDLE structHnd,
1386 assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1388 var_types type = structVal->TypeGet();
1390 genTreeOps oper = structVal->gtOper;
1392 if (oper == GT_OBJ && willDeref)
1394 assert(structVal->gtObj.gtClass == structHnd);
1395 return (structVal->gtObj.Addr());
1397 else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY ||
1398 structVal->OperIsSimdHWIntrinsic())
1400 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1402 impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1404 // The 'return value' is now the temp itself
1406 type = genActualType(lvaTable[tmpNum].TypeGet());
1407 GenTree* temp = gtNewLclvNode(tmpNum, type);
1408 temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1411 else if (oper == GT_COMMA)
1413 assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1415 GenTree* oldTreeLast = impTreeLast;
1416 structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1417 structVal->gtType = TYP_BYREF;
1419 if (oldTreeLast != impTreeLast)
1421 // Some temp assignment statement was placed on the statement list
1422 // for Op2, but that would be out of order with op1, so we need to
1423 // spill op1 onto the statement list after whatever was last
1424 // before we recursed on Op2 (i.e. before whatever Op2 appended).
1425 impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1426 structVal->gtOp.gtOp1 = gtNewNothingNode();
1432 return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1435 //------------------------------------------------------------------------
1436 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1437 // and optionally determine the GC layout of the struct.
1440 // structHnd - The class handle for the struct type of interest.
1441 // gcLayout - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1442 // into which the gcLayout will be written.
1443 // pNumGCVars - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1444 // which will be set to the number of GC fields in the struct.
1445 // pSimdBaseType - (optional, default nullptr) - if non-null, and the struct is a SIMD
1446 // type, set to the SIMD base type
1449 // The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1450 // The gcLayout will be returned using the pointers provided by the caller, if non-null.
1451 // It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1454 // The caller must set gcLayout to nullptr OR ensure that it is large enough
1455 // (see ICorStaticInfo::getClassGClayout in corinfo.h).
1458 // Normalizing the type involves examining the struct type to determine if it should
1459 // be modified to one that is handled specially by the JIT, possibly being a candidate
1460 // for full enregistration, e.g. TYP_SIMD16.
1462 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1464 unsigned* pNumGCVars,
1465 var_types* pSimdBaseType)
1467 assert(structHnd != NO_CLASS_HANDLE);
1469 const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1470 var_types structType = TYP_STRUCT;
1472 // On coreclr the check for GC includes a "may" to account for the special
1473 // ByRef like span structs. The added check for "CONTAINS_STACK_PTR" is the particular bit.
1474 // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1476 const bool mayContainGCPtrs =
1477 ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1480 // Check to see if this is a SIMD type.
1481 if (featureSIMD && !mayContainGCPtrs)
1483 unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1485 if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1487 unsigned int sizeBytes;
1488 var_types simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1489 if (simdBaseType != TYP_UNKNOWN)
1491 assert(sizeBytes == originalSize);
1492 structType = getSIMDTypeForSize(sizeBytes);
1493 if (pSimdBaseType != nullptr)
1495 *pSimdBaseType = simdBaseType;
1497 // Also indicate that we use floating point registers.
1498 compFloatingPointUsed = true;
1502 #endif // FEATURE_SIMD
1504 // Fetch GC layout info if requested
1505 if (gcLayout != nullptr)
1507 unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1509 // Verify that the quick test up above via the class attributes gave a
1510 // safe view of the type's GCness.
1512 // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1513 // does not report any gc fields.
1515 assert(mayContainGCPtrs || (numGCVars == 0));
1517 if (pNumGCVars != nullptr)
1519 *pNumGCVars = numGCVars;
1524 // Can't safely ask for number of GC pointers without also
1525 // asking for layout.
1526 assert(pNumGCVars == nullptr);
1532 //****************************************************************************
1533 // Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1534 // it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1536 GenTree* Compiler::impNormStructVal(GenTree* structVal,
1537 CORINFO_CLASS_HANDLE structHnd,
1539 bool forceNormalization /*=false*/)
1541 assert(forceNormalization || varTypeIsStruct(structVal));
1542 assert(structHnd != NO_CLASS_HANDLE);
1543 var_types structType = structVal->TypeGet();
1544 bool makeTemp = false;
1545 if (structType == TYP_STRUCT)
1547 structType = impNormStructType(structHnd);
1549 bool alreadyNormalized = false;
1550 GenTreeLclVarCommon* structLcl = nullptr;
1552 genTreeOps oper = structVal->OperGet();
1555 // GT_RETURN and GT_MKREFANY don't capture the handle.
1559 alreadyNormalized = true;
1563 structVal->gtCall.gtRetClsHnd = structHnd;
1568 structVal->gtRetExpr.gtRetClsHnd = structHnd;
1573 structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1577 // This will be transformed to an OBJ later.
1578 alreadyNormalized = true;
1579 structVal->gtIndex.gtStructElemClass = structHnd;
1580 structVal->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(structHnd);
1584 // Wrap it in a GT_OBJ.
1585 structVal->gtType = structType;
1586 structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1591 structLcl = structVal->AsLclVarCommon();
1592 // Wrap it in a GT_OBJ.
1593 structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1600 // These should already have the appropriate type.
1601 assert(structVal->gtType == structType);
1602 alreadyNormalized = true;
1606 assert(structVal->gtType == structType);
1607 structVal = gtNewObjNode(structHnd, structVal->gtGetOp1());
1608 alreadyNormalized = true;
1613 assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1615 #endif // FEATURE_SIMD
1616 #ifdef FEATURE_HW_INTRINSICS
1617 case GT_HWIntrinsic:
1618 assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1624 // The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node.
1625 GenTree* blockNode = structVal->gtOp.gtOp2;
1626 assert(blockNode->gtType == structType);
1628 // Is this GT_COMMA(op1, GT_COMMA())?
1629 GenTree* parent = structVal;
1630 if (blockNode->OperGet() == GT_COMMA)
1632 // Find the last node in the comma chain.
1635 assert(blockNode->gtType == structType);
1637 blockNode = blockNode->gtOp.gtOp2;
1638 } while (blockNode->OperGet() == GT_COMMA);
1641 if (blockNode->OperGet() == GT_FIELD)
1643 // If we have a GT_FIELD then wrap it in a GT_OBJ.
1644 blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode));
1648 if (blockNode->OperIsSIMDorSimdHWintrinsic())
1650 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1651 alreadyNormalized = true;
1656 noway_assert(blockNode->OperIsBlk());
1658 // Sink the GT_COMMA below the blockNode addr.
1659 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1660 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1662 // In case of a chained GT_COMMA case, we sink the last
1663 // GT_COMMA below the blockNode addr.
1664 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1665 assert(blockNodeAddr->gtType == TYP_BYREF);
1666 GenTree* commaNode = parent;
1667 commaNode->gtType = TYP_BYREF;
1668 commaNode->gtOp.gtOp2 = blockNodeAddr;
1669 blockNode->gtOp.gtOp1 = commaNode;
1670 if (parent == structVal)
1672 structVal = blockNode;
1674 alreadyNormalized = true;
1680 noway_assert(!"Unexpected node in impNormStructVal()");
1683 structVal->gtType = structType;
1684 GenTree* structObj = structVal;
1686 if (!alreadyNormalized || forceNormalization)
1690 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1692 impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1694 // The structVal is now the temp itself
1696 structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1697 // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1698 structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1700 else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1702 // Wrap it in a GT_OBJ
1703 structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1707 if (structLcl != nullptr)
1709 // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1710 // so we don't set GTF_EXCEPT here.
1711 if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1713 structObj->gtFlags &= ~GTF_GLOB_REF;
1718 // In general a OBJ is an indirection and could raise an exception.
1719 structObj->gtFlags |= GTF_EXCEPT;
1724 /******************************************************************************/
1725 // Given a type token, generate code that will evaluate to the correct
1726 // handle representation of that token (type handle, field handle, or method handle)
1728 // For most cases, the handle is determined at compile-time, and the code
1729 // generated is simply an embedded handle.
1731 // Run-time lookup is required if the enclosing method is shared between instantiations
1732 // and the token refers to formal type parameters whose instantiation is not known
1735 GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1736 BOOL* pRuntimeLookup /* = NULL */,
1737 BOOL mustRestoreHandle /* = FALSE */,
1738 BOOL importParent /* = FALSE */)
1740 assert(!fgGlobalMorph);
1742 CORINFO_GENERICHANDLE_RESULT embedInfo;
1743 info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1747 *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1750 if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1752 switch (embedInfo.handleType)
1754 case CORINFO_HANDLETYPE_CLASS:
1755 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1758 case CORINFO_HANDLETYPE_METHOD:
1759 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1762 case CORINFO_HANDLETYPE_FIELD:
1763 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1764 info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1772 // Generate the full lookup tree. May be null if we're abandoning an inline attempt.
1773 GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1774 embedInfo.compileTimeHandle);
1776 // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node.
1777 if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup)
1779 result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result);
1785 GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1786 CORINFO_LOOKUP* pLookup,
1787 unsigned handleFlags,
1788 void* compileTimeHandle)
1790 if (!pLookup->lookupKind.needsRuntimeLookup)
1792 // No runtime lookup is required.
1793 // Access is direct or memory-indirect (of a fixed address) reference
1795 CORINFO_GENERIC_HANDLE handle = nullptr;
1796 void* pIndirection = nullptr;
1797 assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE);
1799 if (pLookup->constLookup.accessType == IAT_VALUE)
1801 handle = pLookup->constLookup.handle;
1803 else if (pLookup->constLookup.accessType == IAT_PVALUE)
1805 pIndirection = pLookup->constLookup.addr;
1807 return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1809 else if (compIsForInlining())
1811 // Don't import runtime lookups when inlining
1812 // Inlining has to be aborted in such a case
1813 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1818 // Need to use dictionary-based access which depends on the typeContext
1819 // which is only available at runtime, not at compile-time.
1821 return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1825 #ifdef FEATURE_READYTORUN_COMPILER
1826 GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1827 unsigned handleFlags,
1828 void* compileTimeHandle)
1830 CORINFO_GENERIC_HANDLE handle = nullptr;
1831 void* pIndirection = nullptr;
1832 assert(pLookup->accessType != IAT_PPVALUE && pLookup->accessType != IAT_RELPVALUE);
1834 if (pLookup->accessType == IAT_VALUE)
1836 handle = pLookup->handle;
1838 else if (pLookup->accessType == IAT_PVALUE)
1840 pIndirection = pLookup->addr;
1842 return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1845 GenTreeCall* Compiler::impReadyToRunHelperToTree(
1846 CORINFO_RESOLVED_TOKEN* pResolvedToken,
1847 CorInfoHelpFunc helper,
1849 GenTreeArgList* args /* =NULL*/,
1850 CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */)
1852 CORINFO_CONST_LOOKUP lookup;
1853 if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1858 GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args);
1860 op1->setEntryPoint(lookup);
1866 GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1868 GenTree* op1 = nullptr;
1870 switch (pCallInfo->kind)
1873 op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1875 #ifdef FEATURE_READYTORUN_COMPILER
1876 if (opts.IsReadyToRun())
1878 op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
1882 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1883 op1->gtFptrVal.gtEntryPoint.accessType = IAT_VALUE;
1888 case CORINFO_CALL_CODE_POINTER:
1889 if (compIsForInlining())
1891 // Don't import runtime lookups when inlining
1892 // Inlining has to be aborted in such a case
1893 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1897 op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1901 noway_assert(!"unknown call kind");
1908 //------------------------------------------------------------------------
1909 // getRuntimeContextTree: find pointer to context for runtime lookup.
1912 // kind - lookup kind.
1915 // Return GenTree pointer to generic shared context.
1918 // Reports about generic context using.
1920 GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1922 GenTree* ctxTree = nullptr;
1924 // Collectible types requires that for shared generic code, if we use the generic context parameter
1925 // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1926 // context parameter is this that we don't need the eager reporting logic.)
1927 lvaGenericsContextUseCount++;
1929 if (kind == CORINFO_LOOKUP_THISOBJ)
1932 ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1934 // Vtable pointer of this object
1935 ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1936 ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1937 ctxTree->gtFlags |= GTF_IND_INVARIANT;
1941 assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1943 ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1948 /*****************************************************************************/
1949 /* Import a dictionary lookup to access a handle in code shared between
1950 generic instantiations.
1951 The lookup depends on the typeContext which is only available at
1952 runtime, and not at compile-time.
1953 pLookup->token1 and pLookup->token2 specify the handle that is needed.
1956 1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1957 instantiation-specific handle, and the tokens to lookup the handle.
1958 2. pLookup->indirections != CORINFO_USEHELPER :
1959 2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1961 2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1962 If it is non-NULL, it is the handle required. Else, call a helper
1963 to lookup the handle.
1966 GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1967 CORINFO_LOOKUP* pLookup,
1968 void* compileTimeHandle)
1971 // This method can only be called from the importer instance of the Compiler.
1972 // In other word, it cannot be called by the instance of the Compiler for the inlinee.
1973 assert(!compIsForInlining());
1975 GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
1977 CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
1978 // It's available only via the run-time helper function
1979 if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
1981 #ifdef FEATURE_READYTORUN_COMPILER
1982 if (opts.IsReadyToRun())
1984 return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
1985 gtNewArgList(ctxTree), &pLookup->lookupKind);
1989 gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
1990 GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
1992 return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
1996 GenTree* slotPtrTree = ctxTree;
1998 if (pRuntimeLookup->testForNull)
2000 slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2001 nullptr DEBUGARG("impRuntimeLookup slot"));
2004 GenTree* indOffTree = nullptr;
2006 // Applied repeated indirections
2007 for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
2009 if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2011 indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2012 nullptr DEBUGARG("impRuntimeLookup indirectOffset"));
2017 slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2018 slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2019 slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
2022 if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2024 slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree);
2027 if (pRuntimeLookup->offsets[i] != 0)
2030 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
2034 // No null test required
2035 if (!pRuntimeLookup->testForNull)
2037 if (pRuntimeLookup->indirections == 0)
2042 slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2043 slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2045 if (!pRuntimeLookup->testForFixup)
2050 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2052 unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test"));
2053 impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtOffs);
2055 GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2056 // downcast the pointer to a TYP_INT on 64-bit targets
2057 slot = impImplicitIorI4Cast(slot, TYP_INT);
2058 // Use a GT_AND to check for the lowest bit and indirect if it is set
2059 GenTree* test = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1));
2060 GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0));
2061 relop->gtFlags |= GTF_RELOP_QMARK;
2063 // slot = GT_IND(slot - 1)
2064 slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2065 GenTree* add = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL));
2066 GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add);
2067 indir->gtFlags |= GTF_IND_NONFAULTING;
2068 indir->gtFlags |= GTF_IND_INVARIANT;
2070 slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2071 GenTree* asg = gtNewAssignNode(slot, indir);
2072 GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg);
2073 GenTree* qmark = gtNewQmarkNode(TYP_VOID, relop, colon);
2074 impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2076 return gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2079 assert(pRuntimeLookup->indirections != 0);
2081 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2083 // Extract the handle
2084 GenTree* handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2085 handle->gtFlags |= GTF_IND_NONFAULTING;
2087 GenTree* handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2088 nullptr DEBUGARG("impRuntimeLookup typehandle"));
2091 GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
2093 GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
2094 GenTree* helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
2096 // Check for null and possibly call helper
2097 GenTree* relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2098 relop->gtFlags |= GTF_RELOP_QMARK;
2100 GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2101 gtNewNothingNode(), // do nothing if nonnull
2104 GenTree* qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2107 if (handleCopy->IsLocal())
2109 tmp = handleCopy->gtLclVarCommon.gtLclNum;
2113 tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2116 impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2117 return gtNewLclvNode(tmp, TYP_I_IMPL);
2120 /******************************************************************************
2121 * Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2122 * If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2123 * else, grab a new temp.
2124 * For structs (which can be pushed on the stack using obj, etc),
2125 * special handling is needed
2128 struct RecursiveGuard
2133 m_pAddress = nullptr;
2140 *m_pAddress = false;
2144 void Init(bool* pAddress, bool bInitialize)
2146 assert(pAddress && *pAddress == false && "Recursive guard violation");
2147 m_pAddress = pAddress;
2159 bool Compiler::impSpillStackEntry(unsigned level,
2163 bool bAssertOnRecursion,
2170 RecursiveGuard guard;
2171 guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2174 GenTree* tree = verCurrentState.esStack[level].val;
2176 /* Allocate a temp if we haven't been asked to use a particular one */
2178 if (tiVerificationNeeded)
2180 // Ignore bad temp requests (they will happen with bad code and will be
2181 // catched when importing the destblock)
2182 if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2189 if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2195 bool isNewTemp = false;
2197 if (tnum == BAD_VAR_NUM)
2199 tnum = lvaGrabTemp(true DEBUGARG(reason));
2202 else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2204 // if verification is needed and tnum's type is incompatible with
2205 // type on that stack, we grab a new temp. This is safe since
2206 // we will throw a verification exception in the dest block.
2208 var_types valTyp = tree->TypeGet();
2209 var_types dstTyp = lvaTable[tnum].TypeGet();
2211 // if the two types are different, we return. This will only happen with bad code and will
2212 // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2213 if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2215 #ifndef _TARGET_64BIT_
2216 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2217 #endif // !_TARGET_64BIT_
2218 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2220 if (verNeedsVerification())
2227 /* Assign the spilled entry to the temp */
2228 impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2230 // If temp is newly introduced and a ref type, grab what type info we can.
2231 if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF))
2233 CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle();
2234 lvaSetClass(tnum, tree, stkHnd);
2237 // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2238 var_types type = genActualType(lvaTable[tnum].TypeGet());
2239 GenTree* temp = gtNewLclvNode(tnum, type);
2240 verCurrentState.esStack[level].val = temp;
2245 /*****************************************************************************
2247 * Ensure that the stack has only spilled values
2250 void Compiler::impSpillStackEnsure(bool spillLeaves)
2252 assert(!spillLeaves || opts.compDbgCode);
2254 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2256 GenTree* tree = verCurrentState.esStack[level].val;
2258 if (!spillLeaves && tree->OperIsLeaf())
2263 // Temps introduced by the importer itself don't need to be spilled
2265 bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2272 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2276 void Compiler::impSpillEvalStack()
2278 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2280 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2284 /*****************************************************************************
2286 * If the stack contains any trees with side effects in them, assign those
2287 * trees to temps and append the assignments to the statement list.
2288 * On return the stack is guaranteed to be empty.
2291 inline void Compiler::impEvalSideEffects()
2293 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2294 verCurrentState.esStackDepth = 0;
2297 /*****************************************************************************
2299 * If the stack contains any trees with side effects in them, assign those
2300 * trees to temps and replace them on the stack with refs to their temps.
2301 * [0..chkLevel) is the portion of the stack which will be checked and spilled.
2304 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2306 assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2308 /* Before we make any appends to the tree list we must spill the
2309 * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2311 impSpillSpecialSideEff();
2313 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2315 chkLevel = verCurrentState.esStackDepth;
2318 assert(chkLevel <= verCurrentState.esStackDepth);
2320 unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2322 for (unsigned i = 0; i < chkLevel; i++)
2324 GenTree* tree = verCurrentState.esStack[i].val;
2326 GenTree* lclVarTree;
2328 if ((tree->gtFlags & spillFlags) != 0 ||
2329 (spillGlobEffects && // Only consider the following when spillGlobEffects == TRUE
2330 !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2331 gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2332 // lvAddrTaken flag.
2334 impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2339 /*****************************************************************************
2341 * If the stack contains any trees with special side effects in them, assign
2342 * those trees to temps and replace them on the stack with refs to their temps.
2345 inline void Compiler::impSpillSpecialSideEff()
2347 // Only exception objects need to be carefully handled
2349 if (!compCurBB->bbCatchTyp)
2354 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2356 GenTree* tree = verCurrentState.esStack[level].val;
2357 // Make sure if we have an exception object in the sub tree we spill ourselves.
2358 if (gtHasCatchArg(tree))
2360 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2365 /*****************************************************************************
2367 * Spill all stack references to value classes (TYP_STRUCT nodes)
2370 void Compiler::impSpillValueClasses()
2372 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2374 GenTree* tree = verCurrentState.esStack[level].val;
2376 if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2378 // Tree walk was aborted, which means that we found a
2379 // value class on the stack. Need to spill that
2382 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2387 /*****************************************************************************
2389 * Callback that checks if a tree node is TYP_STRUCT
2392 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data)
2394 fgWalkResult walkResult = WALK_CONTINUE;
2396 if ((*pTree)->gtType == TYP_STRUCT)
2398 // Abort the walk and indicate that we found a value class
2400 walkResult = WALK_ABORT;
2406 /*****************************************************************************
2408 * If the stack contains any trees with references to local #lclNum, assign
2409 * those trees to temps and replace their place on the stack with refs to
2413 void Compiler::impSpillLclRefs(ssize_t lclNum)
2415 /* Before we make any appends to the tree list we must spill the
2416 * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2418 impSpillSpecialSideEff();
2420 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2422 GenTree* tree = verCurrentState.esStack[level].val;
2424 /* If the tree may throw an exception, and the block has a handler,
2425 then we need to spill assignments to the local if the local is
2426 live on entry to the handler.
2427 Just spill 'em all without considering the liveness */
2429 bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2431 /* Skip the tree if it doesn't have an affected reference,
2432 unless xcptnCaught */
2434 if (xcptnCaught || gtHasRef(tree, lclNum, false))
2436 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2441 /*****************************************************************************
2443 * Push catch arg onto the stack.
2444 * If there are jumps to the beginning of the handler, insert basic block
2445 * and spill catch arg to a temp. Update the handler block if necessary.
2447 * Returns the basic block of the actual handler.
2450 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter)
2452 // Do not inject the basic block twice on reimport. This should be
2453 // hit only under JIT stress. See if the block is the one we injected.
2454 // Note that EH canonicalization can inject internal blocks here. We might
2455 // be able to re-use such a block (but we don't, right now).
2456 if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2457 (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2459 GenTree* tree = hndBlk->bbTreeList;
2461 if (tree != nullptr && tree->gtOper == GT_STMT)
2463 tree = tree->gtStmt.gtStmtExpr;
2464 assert(tree != nullptr);
2466 if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2467 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2469 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2471 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2473 return hndBlk->bbNext;
2477 // If we get here, it must have been some other kind of internal block. It's possible that
2478 // someone prepended something to our injected block, but that's unlikely.
2481 /* Push the exception address value on the stack */
2482 GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2484 /* Mark the node as having a side-effect - i.e. cannot be
2485 * moved around since it is tied to a fixed location (EAX) */
2486 arg->gtFlags |= GTF_ORDER_SIDEEFF;
2488 #if defined(JIT32_GCENCODER)
2489 const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5);
2491 const bool forceInsertNewBlock = compStressCompile(STRESS_CATCH_ARG, 5);
2492 #endif // defined(JIT32_GCENCODER)
2494 /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2495 if (hndBlk->bbRefs > 1 || forceInsertNewBlock)
2497 if (hndBlk->bbRefs == 1)
2502 /* Create extra basic block for the spill */
2503 BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2504 newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2505 newBlk->setBBWeight(hndBlk->bbWeight);
2506 newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2508 /* Account for the new link we are about to create */
2511 /* Spill into a temp */
2512 unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2513 lvaTable[tempNum].lvType = TYP_REF;
2514 arg = gtNewTempAssign(tempNum, arg);
2516 hndBlk->bbStkTempsIn = tempNum;
2518 /* Report the debug info. impImportBlockCode won't treat
2519 * the actual handler as exception block and thus won't do it for us. */
2520 if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2522 impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2523 arg = gtNewStmt(arg, impCurStmtOffs);
2526 fgInsertStmtAtEnd(newBlk, arg);
2528 arg = gtNewLclvNode(tempNum, TYP_REF);
2531 impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2536 /*****************************************************************************
2538 * Given a tree, clone it. *pClone is set to the cloned tree.
2539 * Returns the original tree if the cloning was easy,
2540 * else returns the temp to which the tree had to be spilled to.
2541 * If the tree has side-effects, it will be spilled to a temp.
2544 GenTree* Compiler::impCloneExpr(GenTree* tree,
2546 CORINFO_CLASS_HANDLE structHnd,
2548 GenTree** pAfterStmt DEBUGARG(const char* reason))
2550 if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2552 GenTree* clone = gtClone(tree, true);
2561 /* Store the operand in a temp and return the temp */
2563 unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2565 // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2566 // return a struct type. It also may modify the struct type to a more
2567 // specialized type (e.g. a SIMD type). So we will get the type from
2568 // the lclVar AFTER calling impAssignTempGen().
2570 impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2571 var_types type = genActualType(lvaTable[temp].TypeGet());
2573 *pClone = gtNewLclvNode(temp, type);
2574 return gtNewLclvNode(temp, type);
2577 /*****************************************************************************
2578 * Remember the IL offset (including stack-empty info) for the trees we will
2582 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2584 if (compIsForInlining())
2586 GenTree* callStmt = impInlineInfo->iciStmt;
2587 assert(callStmt->gtOper == GT_STMT);
2588 impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2592 assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2593 IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2594 impCurStmtOffs = offs | stkBit;
2598 /*****************************************************************************
2599 * Returns current IL offset with stack-empty and call-instruction info incorporated
2601 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2603 if (compIsForInlining())
2605 return BAD_IL_OFFSET;
2609 assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2610 IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2611 IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2612 return offs | stkBit | callInstructionBit;
2616 //------------------------------------------------------------------------
2617 // impCanSpillNow: check is it possible to spill all values from eeStack to local variables.
2620 // prevOpcode - last importer opcode
2623 // true if it is legal, false if it could be a sequence that we do not want to divide.
2624 bool Compiler::impCanSpillNow(OPCODE prevOpcode)
2626 // Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence.
2627 // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed.
2628 return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ);
2631 /*****************************************************************************
2633 * Remember the instr offset for the statements
2635 * When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2636 * impCurOpcOffs, if the append was done because of a partial stack spill,
2637 * as some of the trees corresponding to code up to impCurOpcOffs might
2638 * still be sitting on the stack.
2639 * So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2640 * This should be called when an opcode finally/explicitly causes
2641 * impAppendTree(tree) to be called (as opposed to being called because of
2642 * a spill caused by the opcode)
2647 void Compiler::impNoteLastILoffs()
2649 if (impLastILoffsStmt == nullptr)
2651 // We should have added a statement for the current basic block
2652 // Is this assert correct ?
2654 assert(impTreeLast);
2655 assert(impTreeLast->gtOper == GT_STMT);
2657 impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2661 impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2662 impLastILoffsStmt = nullptr;
2668 /*****************************************************************************
2669 * We don't create any GenTree (excluding spills) for a branch.
2670 * For debugging info, we need a placeholder so that we can note
2671 * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2674 void Compiler::impNoteBranchOffs()
2676 if (opts.compDbgCode)
2678 impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2682 /*****************************************************************************
2683 * Locate the next stmt boundary for which we need to record info.
2684 * We will have to spill the stack at such boundaries if it is not
2686 * Returns the next stmt boundary (after the start of the block)
2689 unsigned Compiler::impInitBlockLineInfo()
2691 /* Assume the block does not correspond with any IL offset. This prevents
2692 us from reporting extra offsets. Extra mappings can cause confusing
2693 stepping, especially if the extra mapping is a jump-target, and the
2694 debugger does not ignore extra mappings, but instead rewinds to the
2695 nearest known offset */
2697 impCurStmtOffsSet(BAD_IL_OFFSET);
2699 if (compIsForInlining())
2704 IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2706 if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2708 impCurStmtOffsSet(blockOffs);
2711 if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2713 impCurStmtOffsSet(blockOffs);
2716 /* Always report IL offset 0 or some tests get confused.
2717 Probably a good idea anyways */
2721 impCurStmtOffsSet(blockOffs);
2724 if (!info.compStmtOffsetsCount)
2729 /* Find the lowest explicit stmt boundary within the block */
2731 /* Start looking at an entry that is based on our instr offset */
2733 unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2735 if (index >= info.compStmtOffsetsCount)
2737 index = info.compStmtOffsetsCount - 1;
2740 /* If we've guessed too far, back up */
2742 while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2747 /* If we guessed short, advance ahead */
2749 while (info.compStmtOffsets[index] < blockOffs)
2753 if (index == info.compStmtOffsetsCount)
2755 return info.compStmtOffsetsCount;
2759 assert(index < info.compStmtOffsetsCount);
2761 if (info.compStmtOffsets[index] == blockOffs)
2763 /* There is an explicit boundary for the start of this basic block.
2764 So we will start with bbCodeOffs. Else we will wait until we
2765 get to the next explicit boundary */
2767 impCurStmtOffsSet(blockOffs);
2775 /*****************************************************************************/
2777 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2791 /*****************************************************************************/
2793 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2810 /*****************************************************************************/
2812 // One might think it is worth caching these values, but results indicate
2814 // In addition, caching them causes SuperPMI to be unable to completely
2815 // encapsulate an individual method context.
2816 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2818 CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2819 assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2823 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2825 CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2826 assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2827 return typeHandleClass;
2830 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2832 CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2833 assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2834 return argIteratorClass;
2837 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2839 CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2840 assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2844 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2846 CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2847 assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2851 /*****************************************************************************
2852 * "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2853 * set its type to TYP_BYREF when we create it. We know if it can be
2854 * changed to TYP_I_IMPL only at the point where we use it
2858 void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2)
2860 if (tree1->IsVarAddr())
2862 tree1->gtType = TYP_I_IMPL;
2865 if (tree2 && tree2->IsVarAddr())
2867 tree2->gtType = TYP_I_IMPL;
2871 /*****************************************************************************
2872 * TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2873 * to make that an explicit cast in our trees, so any implicit casts that
2874 * exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2875 * turned into explicit casts here.
2876 * We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2879 GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp)
2881 var_types currType = genActualType(tree->gtType);
2882 var_types wantedType = genActualType(dstTyp);
2884 if (wantedType != currType)
2886 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2887 if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2889 if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2891 tree->gtType = TYP_I_IMPL;
2894 #ifdef _TARGET_64BIT_
2895 else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2897 // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2898 tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
2900 else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2902 // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2903 tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT);
2905 #endif // _TARGET_64BIT_
2911 /*****************************************************************************
2912 * TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2913 * but we want to make that an explicit cast in our trees, so any implicit casts
2914 * that exist in the IL are turned into explicit casts here.
2917 GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp)
2919 #ifndef LEGACY_BACKEND
2920 if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2922 tree = gtNewCastNode(dstTyp, tree, false, dstTyp);
2924 #endif // !LEGACY_BACKEND
2929 //------------------------------------------------------------------------
2930 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2931 // with a GT_COPYBLK node.
2934 // sig - The InitializeArray signature.
2937 // A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2938 // nullptr otherwise.
2941 // The function recognizes the following IL pattern:
2942 // ldc <length> or a list of ldc <lower bound>/<length>
2945 // ldtoken <field handle>
2946 // call InitializeArray
2947 // The lower bounds need not be constant except when the array rank is 1.
2948 // The function recognizes all kinds of arrays thus enabling a small runtime
2949 // such as CoreRT to skip providing an implementation for InitializeArray.
2951 GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2953 assert(sig->numArgs == 2);
2955 GenTree* fieldTokenNode = impStackTop(0).val;
2956 GenTree* arrayLocalNode = impStackTop(1).val;
2959 // Verify that the field token is known and valid. Note that It's also
2960 // possible for the token to come from reflection, in which case we cannot do
2961 // the optimization and must therefore revert to calling the helper. You can
2962 // see an example of this in bvt\DynIL\initarray2.exe (in Main).
2965 // Check to see if the ldtoken helper call is what we see here.
2966 if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
2967 (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
2972 // Strip helper call away
2973 fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
2975 if (fieldTokenNode->gtOper == GT_IND)
2977 fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
2980 // Check for constant
2981 if (fieldTokenNode->gtOper != GT_CNS_INT)
2986 CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
2987 if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
2993 // We need to get the number of elements in the array and the size of each element.
2994 // We verify that the newarr statement is exactly what we expect it to be.
2995 // If it's not then we just return NULL and we don't optimize this call
2999 // It is possible the we don't have any statements in the block yet
3001 if (impTreeLast->gtOper != GT_STMT)
3003 assert(impTreeLast->gtOper == GT_BEG_STMTS);
3008 // We start by looking at the last statement, making sure it's an assignment, and
3009 // that the target of the assignment is the array passed to InitializeArray.
3011 GenTree* arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
3012 if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
3013 (arrayLocalNode->gtOper != GT_LCL_VAR) ||
3014 (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
3020 // Make sure that the object being assigned is a helper call.
3023 GenTree* newArrayCall = arrayAssignment->gtOp.gtOp2;
3024 if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
3030 // Verify that it is one of the new array helpers.
3033 bool isMDArray = false;
3035 if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
3036 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
3037 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
3038 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
3039 #ifdef FEATURE_READYTORUN_COMPILER
3040 && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_R2R_DIRECT) &&
3041 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
3045 if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3053 CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3056 // Make sure we found a compile time handle to the array
3065 S_UINT32 numElements;
3069 rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3076 GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3077 assert(tokenArg != nullptr);
3078 GenTreeArgList* numArgsArg = tokenArg->Rest();
3079 assert(numArgsArg != nullptr);
3080 GenTreeArgList* argsArg = numArgsArg->Rest();
3081 assert(argsArg != nullptr);
3084 // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3085 // so at least one length must be present and the rank can't exceed 32 so there can
3086 // be at most 64 arguments - 32 lengths and 32 lower bounds.
3089 if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3090 (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3095 unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3096 bool lowerBoundsSpecified;
3098 if (numArgs == rank * 2)
3100 lowerBoundsSpecified = true;
3102 else if (numArgs == rank)
3104 lowerBoundsSpecified = false;
3107 // If the rank is 1 and a lower bound isn't specified then the runtime creates
3108 // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3109 // we get a SDArray as well, see the for loop below.
3123 // The rank is known to be at least 1 so we can start with numElements being 1
3124 // to avoid the need to special case the first dimension.
3127 numElements = S_UINT32(1);
3131 static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3133 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3134 IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3137 static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3139 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3140 (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3141 IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3144 static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3146 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3147 (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3150 static bool IsComma(GenTree* tree)
3152 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3156 unsigned argIndex = 0;
3159 for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3161 if (lowerBoundsSpecified)
3164 // In general lower bounds can be ignored because they're not needed to
3165 // calculate the total number of elements. But for single dimensional arrays
3166 // we need to know if the lower bound is 0 because in this case the runtime
3167 // creates a SDArray and this affects the way the array data offset is calculated.
3172 GenTree* lowerBoundAssign = comma->gtGetOp1();
3173 assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3174 GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3176 if (lowerBoundNode->IsIntegralConst(0))
3182 comma = comma->gtGetOp2();
3186 GenTree* lengthNodeAssign = comma->gtGetOp1();
3187 assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3188 GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3190 if (!lengthNode->IsCnsIntOrI())
3195 numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3199 assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3201 if (argIndex != numArgs)
3209 // Make sure there are exactly two arguments: the array class and
3210 // the number of elements.
3213 GenTree* arrayLengthNode;
3215 GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3216 #ifdef FEATURE_READYTORUN_COMPILER
3217 if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3219 // Array length is 1st argument for readytorun helper
3220 arrayLengthNode = args->Current();
3225 // Array length is 2nd argument for regular helper
3226 arrayLengthNode = args->Rest()->Current();
3230 // Make sure that the number of elements look valid.
3232 if (arrayLengthNode->gtOper != GT_CNS_INT)
3237 numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3239 if (!info.compCompHnd->isSDArray(arrayClsHnd))
3245 CORINFO_CLASS_HANDLE elemClsHnd;
3246 var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3249 // Note that genTypeSize will return zero for non primitive types, which is exactly
3250 // what we want (size will then be 0, and we will catch this in the conditional below).
3251 // Note that we don't expect this to fail for valid binaries, so we assert in the
3252 // non-verification case (the verification case should not assert but rather correctly
3253 // handle bad binaries). This assert is not guarding any specific invariant, but rather
3254 // saying that we don't expect this to happen, and if it is hit, we need to investigate
3258 S_UINT32 elemSize(genTypeSize(elementType));
3259 S_UINT32 size = elemSize * S_UINT32(numElements);
3261 if (size.IsOverflow())
3266 if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3268 assert(verNeedsVerification());
3272 void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3279 // At this point we are ready to commit to implementing the InitializeArray
3280 // intrinsic using a struct assignment. Pop the arguments from the stack and
3281 // return the struct assignment node.
3287 const unsigned blkSize = size.Value();
3288 unsigned dataOffset;
3292 dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3296 dataOffset = eeGetArrayDataOffset(elementType);
3299 GenTree* dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3300 GenTree* blk = gtNewBlockVal(dst, blkSize);
3301 GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_STATIC_HDL, false);
3303 return gtNewBlkOpNode(blk, // dst
3310 //------------------------------------------------------------------------
3311 // impIntrinsic: possibly expand intrinsic call into alternate IR sequence
3314 // newobjThis - for constructor calls, the tree for the newly allocated object
3315 // clsHnd - handle for the intrinsic method's class
3316 // method - handle for the intrinsic method
3317 // sig - signature of the intrinsic method
3318 // methodFlags - CORINFO_FLG_XXX flags of the intrinsic method
3319 // memberRef - the token for the intrinsic method
3320 // readonlyCall - true if call has a readonly prefix
3321 // tailCall - true if call is in tail position
3322 // pConstrainedResolvedToken -- resolved token for constrained call, or nullptr
3323 // if call is not constrained
3324 // constraintCallThisTransform -- this transform to apply for a constrained call
3325 // pIntrinsicID [OUT] -- intrinsic ID (see enumeration in corinfo.h)
3326 // for "traditional" jit intrinsics
3327 // isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call
3328 // that is amenable to special downstream optimization opportunities
3331 // IR tree to use in place of the call, or nullptr if the jit should treat
3332 // the intrinsic call like a normal call.
3334 // pIntrinsicID set to non-illegal value if the call is recognized as a
3335 // traditional jit intrinsic, even if the intrinsic is not expaned.
3337 // isSpecial set true if the expansion is subject to special
3338 // optimizations later in the jit processing
3341 // On success the IR tree may be a call to a different method or an inline
3342 // sequence. If it is a call, then the intrinsic processing here is responsible
3343 // for handling all the special cases, as upon return to impImportCall
3344 // expanded intrinsics bypass most of the normal call processing.
3346 // Intrinsics are generally not recognized in minopts and debug codegen.
3348 // However, certain traditional intrinsics are identifed as "must expand"
3349 // if there is no fallback implmentation to invoke; these must be handled
3350 // in all codegen modes.
3352 // New style intrinsics (where the fallback implementation is in IL) are
3353 // identified as "must expand" if they are invoked from within their
3354 // own method bodies.
3357 GenTree* Compiler::impIntrinsic(GenTree* newobjThis,
3358 CORINFO_CLASS_HANDLE clsHnd,
3359 CORINFO_METHOD_HANDLE method,
3360 CORINFO_SIG_INFO* sig,
3361 unsigned methodFlags,
3365 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
3366 CORINFO_THIS_TRANSFORM constraintCallThisTransform,
3367 CorInfoIntrinsics* pIntrinsicID,
3368 bool* isSpecialIntrinsic)
3370 assert((methodFlags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0);
3372 bool mustExpand = false;
3373 bool isSpecial = false;
3374 CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Illegal;
3375 NamedIntrinsic ni = NI_Illegal;
3377 if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0)
3379 intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3382 if ((methodFlags & CORINFO_FLG_JIT_INTRINSIC) != 0)
3384 // The recursive calls to Jit intrinsics are must-expand by convention.
3385 mustExpand = mustExpand || gtIsRecursiveCall(method);
3387 if (intrinsicID == CORINFO_INTRINSIC_Illegal)
3389 ni = lookupNamedIntrinsic(method);
3391 #ifdef FEATURE_HW_INTRINSICS
3392 if (ni > NI_HW_INTRINSIC_START && ni < NI_HW_INTRINSIC_END)
3394 return impHWIntrinsic(ni, method, sig, mustExpand);
3396 #endif // FEATURE_HW_INTRINSICS
3400 *pIntrinsicID = intrinsicID;
3402 #ifndef _TARGET_ARM_
3403 genTreeOps interlockedOperator;
3406 if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3408 // must be done regardless of DbgCode and MinOpts
3409 return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3411 #ifdef _TARGET_64BIT_
3412 if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3414 // must be done regardless of DbgCode and MinOpts
3415 return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3418 assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3421 GenTree* retNode = nullptr;
3423 // Under debug and minopts, only expand what is required.
3424 if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3426 *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3430 var_types callType = JITtype2varType(sig->retType);
3432 /* First do the intrinsics which are always smaller than a call */
3434 switch (intrinsicID)
3439 case CORINFO_INTRINSIC_Sin:
3440 case CORINFO_INTRINSIC_Cbrt:
3441 case CORINFO_INTRINSIC_Sqrt:
3442 case CORINFO_INTRINSIC_Abs:
3443 case CORINFO_INTRINSIC_Cos:
3444 case CORINFO_INTRINSIC_Round:
3445 case CORINFO_INTRINSIC_Cosh:
3446 case CORINFO_INTRINSIC_Sinh:
3447 case CORINFO_INTRINSIC_Tan:
3448 case CORINFO_INTRINSIC_Tanh:
3449 case CORINFO_INTRINSIC_Asin:
3450 case CORINFO_INTRINSIC_Asinh:
3451 case CORINFO_INTRINSIC_Acos:
3452 case CORINFO_INTRINSIC_Acosh:
3453 case CORINFO_INTRINSIC_Atan:
3454 case CORINFO_INTRINSIC_Atan2:
3455 case CORINFO_INTRINSIC_Atanh:
3456 case CORINFO_INTRINSIC_Log10:
3457 case CORINFO_INTRINSIC_Pow:
3458 case CORINFO_INTRINSIC_Exp:
3459 case CORINFO_INTRINSIC_Ceiling:
3460 case CORINFO_INTRINSIC_Floor:
3461 retNode = impMathIntrinsic(method, sig, callType, intrinsicID, tailCall);
3464 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3465 // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3466 case CORINFO_INTRINSIC_InterlockedAdd32:
3467 interlockedOperator = GT_LOCKADD;
3468 goto InterlockedBinOpCommon;
3469 case CORINFO_INTRINSIC_InterlockedXAdd32:
3470 interlockedOperator = GT_XADD;
3471 goto InterlockedBinOpCommon;
3472 case CORINFO_INTRINSIC_InterlockedXchg32:
3473 interlockedOperator = GT_XCHG;
3474 goto InterlockedBinOpCommon;
3476 #ifdef _TARGET_64BIT_
3477 case CORINFO_INTRINSIC_InterlockedAdd64:
3478 interlockedOperator = GT_LOCKADD;
3479 goto InterlockedBinOpCommon;
3480 case CORINFO_INTRINSIC_InterlockedXAdd64:
3481 interlockedOperator = GT_XADD;
3482 goto InterlockedBinOpCommon;
3483 case CORINFO_INTRINSIC_InterlockedXchg64:
3484 interlockedOperator = GT_XCHG;
3485 goto InterlockedBinOpCommon;
3486 #endif // _TARGET_AMD64_
3488 InterlockedBinOpCommon:
3489 assert(callType != TYP_STRUCT);
3490 assert(sig->numArgs == 2);
3492 op2 = impPopStack().val;
3493 op1 = impPopStack().val;
3499 // field (for example)
3501 // In the case where the first argument is the address of a local, we might
3502 // want to make this *not* make the var address-taken -- but atomic instructions
3503 // on a local are probably pretty useless anyway, so we probably don't care.
3505 op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3506 op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3509 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3511 case CORINFO_INTRINSIC_MemoryBarrier:
3513 assert(sig->numArgs == 0);
3515 op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3516 op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3520 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3521 // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3522 case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3523 #ifdef _TARGET_64BIT_
3524 case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3527 assert(callType != TYP_STRUCT);
3528 assert(sig->numArgs == 3);
3531 op3 = impPopStack().val; // comparand
3532 op2 = impPopStack().val; // value
3533 op1 = impPopStack().val; // location
3535 GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3537 node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3541 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3543 case CORINFO_INTRINSIC_StringLength:
3544 op1 = impPopStack().val;
3545 if (!opts.MinOpts() && !opts.compDbgCode)
3547 GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, offsetof(CORINFO_String, stringLen));
3552 /* Create the expression "*(str_addr + stringLengthOffset)" */
3553 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3554 gtNewIconNode(offsetof(CORINFO_String, stringLen), TYP_I_IMPL));
3555 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3558 // Getting the length of a null string should throw
3559 op1->gtFlags |= GTF_EXCEPT;
3564 case CORINFO_INTRINSIC_StringGetChar:
3565 op2 = impPopStack().val;
3566 op1 = impPopStack().val;
3567 op1 = gtNewIndexRef(TYP_USHORT, op1, op2);
3568 op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3572 case CORINFO_INTRINSIC_InitializeArray:
3573 retNode = impInitializeArrayIntrinsic(sig);
3576 case CORINFO_INTRINSIC_Array_Address:
3577 case CORINFO_INTRINSIC_Array_Get:
3578 case CORINFO_INTRINSIC_Array_Set:
3579 retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3582 case CORINFO_INTRINSIC_GetTypeFromHandle:
3583 op1 = impStackTop(0).val;
3584 if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3585 gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
3587 op1 = impPopStack().val;
3588 // Change call to return RuntimeType directly.
3589 op1->gtType = TYP_REF;
3592 // Call the regular function.
3595 case CORINFO_INTRINSIC_RTH_GetValueInternal:
3596 op1 = impStackTop(0).val;
3597 if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3598 gtIsTypeHandleToRuntimeTypeHelper(op1->AsCall()))
3601 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3604 // TreeToGetNativeTypeHandle
3606 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3609 op1 = impPopStack().val;
3611 // Get native TypeHandle argument to old helper
3612 op1 = op1->gtCall.gtCallArgs;
3613 assert(op1->OperIsList());
3614 assert(op1->gtOp.gtOp2 == nullptr);
3615 op1 = op1->gtOp.gtOp1;
3618 // Call the regular function.
3621 #ifndef LEGACY_BACKEND
3622 case CORINFO_INTRINSIC_Object_GetType:
3624 JITDUMP("\n impIntrinsic: call to Object.GetType\n");
3625 op1 = impStackTop(0).val;
3627 // If we're calling GetType on a boxed value, just get the type directly.
3628 if (op1->IsBoxedValue())
3630 JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n");
3632 // Try and clean up the box. Obtain the handle we
3633 // were going to pass to the newobj.
3634 GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE);
3636 if (boxTypeHandle != nullptr)
3638 // Note we don't need to play the TYP_STRUCT games here like
3639 // do for LDTOKEN since the return value of this operator is Type,
3640 // not RuntimeTypeHandle.
3642 GenTreeArgList* helperArgs = gtNewArgList(boxTypeHandle);
3643 GenTree* runtimeType =
3644 gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3645 retNode = runtimeType;
3649 // If we have a constrained callvirt with a "box this" transform
3650 // we know we have a value class and hence an exact type.
3652 // If so, instead of boxing and then extracting the type, just
3653 // construct the type directly.
3654 if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) &&
3655 (constraintCallThisTransform == CORINFO_BOX_THIS))
3657 // Ensure this is one of the is simple box cases (in particular, rule out nullables).
3658 const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass);
3659 const bool isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX);
3661 if (isSafeToOptimize)
3663 JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n");
3665 GenTree* typeHandleOp =
3666 impTokenToHandle(pConstrainedResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
3667 if (typeHandleOp == nullptr)
3669 assert(compDonotInline());
3672 GenTreeArgList* helperArgs = gtNewArgList(typeHandleOp);
3673 GenTree* runtimeType =
3674 gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3675 retNode = runtimeType;
3680 if (retNode != nullptr)
3682 JITDUMP("Optimized result for call to GetType is\n");
3685 gtDispTree(retNode);
3690 // Else expand as an intrinsic, unless the call is constrained,
3691 // in which case we defer expansion to allow impImportCall do the
3692 // special constraint processing.
3693 if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr))
3695 JITDUMP("Expanding as special intrinsic\n");
3697 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3699 // Set the CALL flag to indicate that the operator is implemented by a call.
3700 // Set also the EXCEPTION flag because the native implementation of
3701 // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3702 op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3704 // Might be further optimizable, so arrange to leave a mark behind
3708 if (retNode == nullptr)
3710 JITDUMP("Leaving as normal call\n");
3711 // Might be further optimizable, so arrange to leave a mark behind
3719 // Implement ByReference Ctor. This wraps the assignment of the ref into a byref-like field
3720 // in a value type. The canonical example of this is Span<T>. In effect this is just a
3721 // substitution. The parameter byref will be assigned into the newly allocated object.
3722 case CORINFO_INTRINSIC_ByReference_Ctor:
3724 // Remove call to constructor and directly assign the byref passed
3725 // to the call to the first slot of the ByReference struct.
3726 op1 = impPopStack().val;
3727 GenTree* thisptr = newobjThis;
3728 CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3729 GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0, false);
3730 GenTree* assign = gtNewAssignNode(field, op1);
3731 GenTree* byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3732 assert(byReferenceStruct != nullptr);
3733 impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3737 // Implement ptr value getter for ByReference struct.
3738 case CORINFO_INTRINSIC_ByReference_Value:
3740 op1 = impPopStack().val;
3741 CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3742 GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0, false);
3746 case CORINFO_INTRINSIC_Span_GetItem:
3747 case CORINFO_INTRINSIC_ReadOnlySpan_GetItem:
3749 // Have index, stack pointer-to Span<T> s on the stack. Expand to:
3753 // BoundsCheck(index, s->_length)
3754 // s->_pointer + index * sizeof(T)
3756 // For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref
3758 // Signature should show one class type parameter, which
3759 // we need to examine.
3760 assert(sig->sigInst.classInstCount == 1);
3761 CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0];
3762 const unsigned elemSize = info.compCompHnd->getClassSize(spanElemHnd);
3763 assert(elemSize > 0);
3765 const bool isReadOnly = (intrinsicID == CORINFO_INTRINSIC_ReadOnlySpan_GetItem);
3767 JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "",
3768 info.compCompHnd->getClassName(spanElemHnd), elemSize);
3770 GenTree* index = impPopStack().val;
3771 GenTree* ptrToSpan = impPopStack().val;
3772 GenTree* indexClone = nullptr;
3773 GenTree* ptrToSpanClone = nullptr;
3778 printf("with ptr-to-span\n");
3779 gtDispTree(ptrToSpan);
3780 printf("and index\n");
3783 #endif // defined(DEBUG)
3785 // We need to use both index and ptr-to-span twice, so clone or spill.
3786 index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3787 nullptr DEBUGARG("Span.get_Item index"));
3788 ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3789 nullptr DEBUGARG("Span.get_Item ptrToSpan"));
3792 CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(clsHnd, 1);
3793 const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd);
3794 GenTree* length = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset, false);
3795 GenTree* boundsCheck = new (this, GT_ARR_BOUNDS_CHECK)
3796 GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, index, length, SCK_RNGCHK_FAIL);
3799 GenTree* indexIntPtr = impImplicitIorI4Cast(indexClone, TYP_I_IMPL);
3800 GenTree* sizeofNode = gtNewIconNode(elemSize);
3801 GenTree* mulNode = gtNewOperNode(GT_MUL, TYP_I_IMPL, indexIntPtr, sizeofNode);
3802 CORINFO_FIELD_HANDLE ptrHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3803 const unsigned ptrOffset = info.compCompHnd->getFieldOffset(ptrHnd);
3804 GenTree* data = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset, false);
3805 GenTree* result = gtNewOperNode(GT_ADD, TYP_BYREF, data, mulNode);
3808 var_types resultType = JITtype2varType(sig->retType);
3809 assert(resultType == result->TypeGet());
3810 retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result);
3815 case CORINFO_INTRINSIC_GetRawHandle:
3817 noway_assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it.
3818 CORINFO_RESOLVED_TOKEN resolvedToken;
3819 resolvedToken.tokenContext = MAKE_METHODCONTEXT(info.compMethodHnd);
3820 resolvedToken.tokenScope = info.compScopeHnd;
3821 resolvedToken.token = memberRef;
3822 resolvedToken.tokenType = CORINFO_TOKENKIND_Method;
3824 CORINFO_GENERICHANDLE_RESULT embedInfo;
3825 info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo);
3827 GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef),
3828 embedInfo.compileTimeHandle);
3829 if (rawHandle == nullptr)
3834 noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL));
3836 unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle"));
3837 impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE);
3839 GenTree* lclVar = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL);
3840 GenTree* lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar);
3841 var_types resultType = JITtype2varType(sig->retType);
3842 retNode = gtNewOperNode(GT_IND, resultType, lclVarAddr);
3847 case CORINFO_INTRINSIC_TypeEQ:
3848 case CORINFO_INTRINSIC_TypeNEQ:
3850 JITDUMP("Importing Type.op_*Equality intrinsic\n");
3851 op1 = impStackTop(1).val;
3852 op2 = impStackTop(0).val;
3853 GenTree* optTree = gtFoldTypeEqualityCall(intrinsicID, op1, op2);
3854 if (optTree != nullptr)
3856 // Success, clean up the evaluation stack.
3860 // See if we can optimize even further, to a handle compare.
3861 optTree = gtFoldTypeCompare(optTree);
3863 // See if we can now fold a handle compare to a constant.
3864 optTree = gtFoldExpr(optTree);
3870 // Retry optimizing these later
3876 case CORINFO_INTRINSIC_GetCurrentManagedThread:
3877 case CORINFO_INTRINSIC_GetManagedThreadId:
3879 // Retry optimizing these during morph
3885 /* Unknown intrinsic */
3886 intrinsicID = CORINFO_INTRINSIC_Illegal;
3890 // Look for new-style jit intrinsics by name
3891 if (ni != NI_Illegal)
3893 assert(retNode == nullptr);
3896 case NI_System_Enum_HasFlag:
3898 GenTree* thisOp = impStackTop(1).val;
3899 GenTree* flagOp = impStackTop(0).val;
3900 GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp);
3902 if (optTree != nullptr)
3904 // Optimization successful. Pop the stack for real.
3911 // Retry optimizing this during morph.
3918 case NI_MathF_Round:
3921 // Math.Round and MathF.Round used to be a traditional JIT intrinsic. In order
3922 // to simplify the transition, we will just treat it as if it was still the
3923 // old intrinsic, CORINFO_INTRINSIC_Round. This should end up flowing properly
3926 retNode = impMathIntrinsic(method, sig, callType, CORINFO_INTRINSIC_Round, tailCall);
3930 case NI_System_Collections_Generic_EqualityComparer_get_Default:
3932 // Flag for later handling during devirtualization.
3944 if (retNode == nullptr)
3946 NO_WAY("JIT must expand the intrinsic!");
3950 // Optionally report if this intrinsic is special
3951 // (that is, potentially re-optimizable during morph).
3952 if (isSpecialIntrinsic != nullptr)
3954 *isSpecialIntrinsic = isSpecial;
3960 GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method,
3961 CORINFO_SIG_INFO* sig,
3963 CorInfoIntrinsics intrinsicID,
3969 assert(callType != TYP_STRUCT);
3970 assert((intrinsicID == CORINFO_INTRINSIC_Sin) || intrinsicID == CORINFO_INTRINSIC_Cbrt ||
3971 (intrinsicID == CORINFO_INTRINSIC_Sqrt) || (intrinsicID == CORINFO_INTRINSIC_Abs) ||
3972 (intrinsicID == CORINFO_INTRINSIC_Cos) || (intrinsicID == CORINFO_INTRINSIC_Round) ||
3973 (intrinsicID == CORINFO_INTRINSIC_Cosh) || (intrinsicID == CORINFO_INTRINSIC_Sinh) ||
3974 (intrinsicID == CORINFO_INTRINSIC_Tan) || (intrinsicID == CORINFO_INTRINSIC_Tanh) ||
3975 (intrinsicID == CORINFO_INTRINSIC_Asin) || (intrinsicID == CORINFO_INTRINSIC_Asinh) ||
3976 (intrinsicID == CORINFO_INTRINSIC_Acos) || (intrinsicID == CORINFO_INTRINSIC_Acosh) ||
3977 (intrinsicID == CORINFO_INTRINSIC_Atan) || (intrinsicID == CORINFO_INTRINSIC_Atan2) ||
3978 (intrinsicID == CORINFO_INTRINSIC_Atanh) || (intrinsicID == CORINFO_INTRINSIC_Log10) ||
3979 (intrinsicID == CORINFO_INTRINSIC_Pow) || (intrinsicID == CORINFO_INTRINSIC_Exp) ||
3980 (intrinsicID == CORINFO_INTRINSIC_Ceiling) || (intrinsicID == CORINFO_INTRINSIC_Floor));
3984 #if defined(LEGACY_BACKEND)
3985 if (IsTargetIntrinsic(intrinsicID))
3986 #elif !defined(_TARGET_X86_)
3987 // Intrinsics that are not implemented directly by target instructions will
3988 // be re-materialized as users calls in rationalizer. For prefixed tail calls,
3989 // don't do this optimization, because
3990 // a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
3991 // b) It will be non-trivial task or too late to re-materialize a surviving
3992 // tail prefixed GT_INTRINSIC as tail call in rationalizer.
3993 if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
3995 // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
3996 // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
3997 // code generation for certain EH constructs.
3998 if (!IsIntrinsicImplementedByUserCall(intrinsicID))
4001 switch (sig->numArgs)
4004 op1 = impPopStack().val;
4006 #if FEATURE_X87_DOUBLES
4008 // X87 stack doesn't differentiate between float/double
4009 // so it doesn't need a cast, but everybody else does
4010 // Just double check it is at least a FP type
4011 noway_assert(varTypeIsFloating(op1));
4013 #else // FEATURE_X87_DOUBLES
4014 assert(varTypeIsFloating(op1));
4016 if (op1->TypeGet() != callType)
4018 op1 = gtNewCastNode(callType, op1, false, callType);
4021 #endif // FEATURE_X87_DOUBLES
4023 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
4027 op2 = impPopStack().val;
4028 op1 = impPopStack().val;
4030 #if FEATURE_X87_DOUBLES
4032 // X87 stack doesn't differentiate between float/double
4033 // so it doesn't need a cast, but everybody else does
4034 // Just double check it is at least a FP type
4035 noway_assert(varTypeIsFloating(op2));
4036 noway_assert(varTypeIsFloating(op1));
4038 #else // FEATURE_X87_DOUBLES
4039 assert(varTypeIsFloating(op1));
4040 assert(varTypeIsFloating(op2));
4042 if (op2->TypeGet() != callType)
4044 op2 = gtNewCastNode(callType, op2, false, callType);
4046 if (op1->TypeGet() != callType)
4048 op1 = gtNewCastNode(callType, op1, false, callType);
4051 #endif // FEATURE_X87_DOUBLES
4053 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
4057 NO_WAY("Unsupported number of args for Math Instrinsic");
4060 #ifndef LEGACY_BACKEND
4061 if (IsIntrinsicImplementedByUserCall(intrinsicID))
4063 op1->gtFlags |= GTF_CALL;
4071 //------------------------------------------------------------------------
4072 // lookupNamedIntrinsic: map method to jit named intrinsic value
4075 // method -- method handle for method
4078 // Id for the named intrinsic, or Illegal if none.
4081 // method should have CORINFO_FLG_JIT_INTRINSIC set in its attributes,
4082 // otherwise it is not a named jit intrinsic.
4085 NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
4087 NamedIntrinsic result = NI_Illegal;
4089 const char* className = nullptr;
4090 const char* namespaceName = nullptr;
4091 const char* methodName = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName);
4093 if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr))
4098 if (strcmp(namespaceName, "System") == 0)
4100 if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0))
4102 result = NI_System_Enum_HasFlag;
4104 else if ((strcmp(className, "MathF") == 0) && (strcmp(methodName, "Round") == 0))
4106 result = NI_MathF_Round;
4108 else if ((strcmp(className, "Math") == 0) && (strcmp(methodName, "Round") == 0))
4110 result = NI_Math_Round;
4113 else if (strcmp(namespaceName, "System.Collections.Generic") == 0)
4115 if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0))
4117 result = NI_System_Collections_Generic_EqualityComparer_get_Default;
4121 #ifdef FEATURE_HW_INTRINSICS
4122 #if defined(_TARGET_XARCH_)
4123 if ((namespaceName != nullptr) && strcmp(namespaceName, "System.Runtime.Intrinsics.X86") == 0)
4125 InstructionSet isa = lookupHWIntrinsicISA(className);
4126 result = lookupHWIntrinsic(methodName, isa);
4128 #elif defined(_TARGET_ARM64_)
4129 if ((namespaceName != nullptr) && strcmp(namespaceName, "System.Runtime.Intrinsics.Arm.Arm64") == 0)
4131 result = lookupHWIntrinsic(className, methodName);
4133 #else // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_)
4134 #error Unsupported platform
4135 #endif // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_)
4136 #endif // FEATURE_HW_INTRINSICS
4140 /*****************************************************************************/
4142 GenTree* Compiler::impArrayAccessIntrinsic(
4143 CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
4145 /* If we are generating SMALL_CODE, we don't want to use intrinsics for
4146 the following, as it generates fatter code.
4149 if (compCodeOpt() == SMALL_CODE)
4154 /* These intrinsics generate fatter (but faster) code and are only
4155 done if we don't need SMALL_CODE */
4157 unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
4159 // The rank 1 case is special because it has to handle two array formats
4160 // we will simply not do that case
4161 if (rank > GT_ARR_MAX_RANK || rank <= 1)
4166 CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
4167 var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
4169 // For the ref case, we will only be able to inline if the types match
4170 // (verifier checks for this, we don't care for the nonverified case and the
4171 // type is final (so we don't need to do the cast)
4172 if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
4174 // Get the call site signature
4175 CORINFO_SIG_INFO LocalSig;
4176 eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
4177 assert(LocalSig.hasThis());
4179 CORINFO_CLASS_HANDLE actualElemClsHnd;
4181 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4183 // Fetch the last argument, the one that indicates the type we are setting.
4184 CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
4185 for (unsigned r = 0; r < rank; r++)
4187 argType = info.compCompHnd->getArgNext(argType);
4190 typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
4191 actualElemClsHnd = argInfo.GetClassHandle();
4195 assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
4197 // Fetch the return type
4198 typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
4199 assert(retInfo.IsByRef());
4200 actualElemClsHnd = retInfo.GetClassHandle();
4203 // if it's not final, we can't do the optimization
4204 if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
4210 unsigned arrayElemSize;
4211 if (elemType == TYP_STRUCT)
4213 assert(arrElemClsHnd);
4215 arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
4219 arrayElemSize = genTypeSize(elemType);
4222 if ((unsigned char)arrayElemSize != arrayElemSize)
4224 // arrayElemSize would be truncated as an unsigned char.
4225 // This means the array element is too large. Don't do the optimization.
4229 GenTree* val = nullptr;
4231 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4233 // Assignment of a struct is more work, and there are more gets than sets.
4234 if (elemType == TYP_STRUCT)
4239 val = impPopStack().val;
4240 assert(genActualType(elemType) == genActualType(val->gtType) ||
4241 (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
4242 (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
4243 (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
4246 noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
4248 GenTree* inds[GT_ARR_MAX_RANK];
4249 for (unsigned k = rank; k > 0; k--)
4251 inds[k - 1] = impPopStack().val;
4254 GenTree* arr = impPopStack().val;
4255 assert(arr->gtType == TYP_REF);
4258 new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
4259 static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
4261 if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
4263 arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
4266 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4268 assert(val != nullptr);
4269 return gtNewAssignNode(arrElem, val);
4277 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
4281 // do some basic checks first
4282 if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
4287 if (verCurrentState.esStackDepth > 0)
4289 // merge stack types
4290 StackEntry* parentStack = block->bbStackOnEntry();
4291 StackEntry* childStack = verCurrentState.esStack;
4293 for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
4295 if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
4302 // merge initialization status of this ptr
4304 if (verTrackObjCtorInitState)
4306 // If we're tracking the CtorInitState, then it must not be unknown in the current state.
4307 assert(verCurrentState.thisInitialized != TIS_Bottom);
4309 // If the successor block's thisInit state is unknown, copy it from the current state.
4310 if (block->bbThisOnEntry() == TIS_Bottom)
4313 verSetThisInit(block, verCurrentState.thisInitialized);
4315 else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
4317 if (block->bbThisOnEntry() != TIS_Top)
4320 verSetThisInit(block, TIS_Top);
4322 if (block->bbFlags & BBF_FAILED_VERIFICATION)
4324 // The block is bad. Control can flow through the block to any handler that catches the
4325 // verification exception, but the importer ignores bad blocks and therefore won't model
4326 // this flow in the normal way. To complete the merge into the bad block, the new state
4327 // needs to be manually pushed to the handlers that may be reached after the verification
4328 // exception occurs.
4330 // Usually, the new state was already propagated to the relevant handlers while processing
4331 // the predecessors of the bad block. The exception is when the bad block is at the start
4332 // of a try region, meaning it is protected by additional handlers that do not protect its
4335 if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
4337 // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
4338 // recursive calls back into this code path (if successors of the current bad block are
4339 // also bad blocks).
4341 ThisInitState origTIS = verCurrentState.thisInitialized;
4342 verCurrentState.thisInitialized = TIS_Top;
4343 impVerifyEHBlock(block, true);
4344 verCurrentState.thisInitialized = origTIS;
4352 assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
4358 /*****************************************************************************
4359 * 'logMsg' is true if a log message needs to be logged. false if the caller has
4360 * already logged it (presumably in a more detailed fashion than done here)
4361 * 'bVerificationException' is true for a verification exception, false for a
4362 * "call unauthorized by host" exception.
4365 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
4367 block->bbJumpKind = BBJ_THROW;
4368 block->bbFlags |= BBF_FAILED_VERIFICATION;
4370 impCurStmtOffsSet(block->bbCodeOffs);
4373 // we need this since BeginTreeList asserts otherwise
4374 impTreeList = impTreeLast = nullptr;
4375 block->bbFlags &= ~BBF_IMPORTED;
4379 JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
4380 block->bbCodeOffs, block->bbCodeOffsEnd));
4383 printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
4387 if (JitConfig.DebugBreakOnVerificationFailure())
4395 // if the stack is non-empty evaluate all the side-effects
4396 if (verCurrentState.esStackDepth > 0)
4398 impEvalSideEffects();
4400 assert(verCurrentState.esStackDepth == 0);
4403 gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
4404 // verCurrentState.esStackDepth = 0;
4405 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
4407 // The inliner is not able to handle methods that require throw block, so
4408 // make sure this methods never gets inlined.
4409 info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
4412 /*****************************************************************************
4415 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
4418 // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
4419 // slightly different mechanism in which it calls the JIT to perform IL verification:
4420 // in the case of transparent methods the VM calls for a predicate IsVerifiable()
4421 // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
4422 // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
4423 // it bubble up until reported by the runtime. Currently in RyuJIT, this method doesn't bubble
4424 // up the exception, instead it embeds a throw inside the offending basic block and lets this
4425 // to fail upon runtime of the jitted method.
4427 // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
4428 // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
4429 // just try to find out whether to fail this method before even actually jitting it. So, in case
4430 // we detect these two conditions, instead of generating a throw statement inside the offending
4431 // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
4432 // to return false and make RyuJIT behave the same way JIT64 does.
4434 // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
4435 // RyuJIT for the time being until we completely replace JIT64.
4436 // TODO-ARM64-Cleanup: We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
4438 // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
4439 // exception if we are only importing and verifying. The method verNeedsVerification() can also modify the
4440 // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
4441 // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
4442 // be turned off during importation).
4443 CLANG_FORMAT_COMMENT_ANCHOR;
4445 #ifdef _TARGET_64BIT_
4448 bool canSkipVerificationResult =
4449 info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
4450 assert(tiVerificationNeeded || canSkipVerificationResult);
4453 // Add the non verifiable flag to the compiler
4454 if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
4456 tiIsVerifiableCode = FALSE;
4458 #endif //_TARGET_64BIT_
4459 verResetCurrentState(block, &verCurrentState);
4460 verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
4463 impNoteLastILoffs(); // Remember at which BC offset the tree was finished
4467 /******************************************************************************/
4468 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
4470 assert(ciType < CORINFO_TYPE_COUNT);
4475 case CORINFO_TYPE_STRING:
4476 case CORINFO_TYPE_CLASS:
4477 tiResult = verMakeTypeInfo(clsHnd);
4478 if (!tiResult.IsType(TI_REF))
4479 { // type must be consistent with element type
4484 #ifdef _TARGET_64BIT_
4485 case CORINFO_TYPE_NATIVEINT:
4486 case CORINFO_TYPE_NATIVEUINT:
4489 // If we have more precise information, use it
4490 return verMakeTypeInfo(clsHnd);
4494 return typeInfo::nativeInt();
4497 #endif // _TARGET_64BIT_
4499 case CORINFO_TYPE_VALUECLASS:
4500 case CORINFO_TYPE_REFANY:
4501 tiResult = verMakeTypeInfo(clsHnd);
4502 // type must be constant with element type;
4503 if (!tiResult.IsValueClass())
4508 case CORINFO_TYPE_VAR:
4509 return verMakeTypeInfo(clsHnd);
4511 case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4512 case CORINFO_TYPE_VOID:
4516 case CORINFO_TYPE_BYREF:
4518 CORINFO_CLASS_HANDLE childClassHandle;
4519 CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4520 return ByRef(verMakeTypeInfo(childType, childClassHandle));
4526 { // If we have more precise information, use it
4527 return typeInfo(TI_STRUCT, clsHnd);
4531 return typeInfo(JITtype2tiType(ciType));
4537 /******************************************************************************/
4539 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4541 if (clsHnd == nullptr)
4546 // Byrefs should only occur in method and local signatures, which are accessed
4547 // using ICorClassInfo and ICorClassInfo.getChildType.
4548 // So findClass() and getClassAttribs() should not be called for byrefs
4550 if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4552 assert(!"Did findClass() return a Byref?");
4556 unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4558 if (attribs & CORINFO_FLG_VALUECLASS)
4560 CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4562 // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4563 // not occur here, so we may want to change this to an assert instead.
4564 if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4569 #ifdef _TARGET_64BIT_
4570 if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4572 return typeInfo::nativeInt();
4574 #endif // _TARGET_64BIT_
4576 if (t != CORINFO_TYPE_UNDEF)
4578 return (typeInfo(JITtype2tiType(t)));
4580 else if (bashStructToRef)
4582 return (typeInfo(TI_REF, clsHnd));
4586 return (typeInfo(TI_STRUCT, clsHnd));
4589 else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
4591 // See comment in _typeInfo.h for why we do it this way.
4592 return (typeInfo(TI_REF, clsHnd, true));
4596 return (typeInfo(TI_REF, clsHnd));
4600 /******************************************************************************/
4601 BOOL Compiler::verIsSDArray(typeInfo ti)
4603 if (ti.IsNullObjRef())
4604 { // nulls are SD arrays
4608 if (!ti.IsType(TI_REF))
4613 if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
4620 /******************************************************************************/
4621 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
4622 /* Returns an error type if anything goes wrong */
4624 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
4626 assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
4628 if (!verIsSDArray(arrayObjectType))
4633 CORINFO_CLASS_HANDLE childClassHandle = nullptr;
4634 CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
4636 return verMakeTypeInfo(ciType, childClassHandle);
4639 /*****************************************************************************
4641 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
4643 CORINFO_CLASS_HANDLE classHandle;
4644 CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
4646 var_types type = JITtype2varType(ciType);
4647 if (varTypeIsGC(type))
4649 // For efficiency, getArgType only returns something in classHandle for
4650 // value types. For other types that have addition type info, you
4651 // have to call back explicitly
4652 classHandle = info.compCompHnd->getArgClass(sig, args);
4655 NO_WAY("Could not figure out Class specified in argument or local signature");
4659 return verMakeTypeInfo(ciType, classHandle);
4662 /*****************************************************************************/
4664 // This does the expensive check to figure out whether the method
4665 // needs to be verified. It is called only when we fail verification,
4666 // just before throwing the verification exception.
4668 BOOL Compiler::verNeedsVerification()
4670 // If we have previously determined that verification is NOT needed
4671 // (for example in Compiler::compCompile), that means verification is really not needed.
4672 // Return the same decision we made before.
4673 // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
4675 if (!tiVerificationNeeded)
4677 return tiVerificationNeeded;
4680 assert(tiVerificationNeeded);
4682 // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
4683 // obtain the answer.
4684 CorInfoCanSkipVerificationResult canSkipVerificationResult =
4685 info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
4687 // canSkipVerification will return one of the following three values:
4688 // CORINFO_VERIFICATION_CANNOT_SKIP = 0, // Cannot skip verification during jit time.
4689 // CORINFO_VERIFICATION_CAN_SKIP = 1, // Can skip verification during jit time.
4690 // CORINFO_VERIFICATION_RUNTIME_CHECK = 2, // Skip verification during jit time,
4691 // but need to insert a callout to the VM to ask during runtime
4692 // whether to skip verification or not.
4694 // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
4695 if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
4697 tiRuntimeCalloutNeeded = true;
4700 if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
4702 // Dev10 706080 - Testers don't like the assert, so just silence it
4703 // by not using the macros that invoke debugAssert.
4707 // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
4708 // The following line means we will NOT do jit time verification if canSkipVerification
4709 // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
4710 tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
4711 return tiVerificationNeeded;
4714 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
4720 if (!ti.IsType(TI_STRUCT))
4724 return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
4727 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
4729 if (ti.IsPermanentHomeByRef())
4739 BOOL Compiler::verIsBoxable(const typeInfo& ti)
4741 return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
4742 || ti.IsUnboxedGenericTypeVar() ||
4743 (ti.IsType(TI_STRUCT) &&
4744 // exclude byreflike structs
4745 !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
4748 // Is it a boxed value type?
4749 bool Compiler::verIsBoxedValueType(typeInfo ti)
4751 if (ti.GetType() == TI_REF)
4753 CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
4754 return !!eeIsValueClass(clsHnd);
4762 /*****************************************************************************
4764 * Check if a TailCall is legal.
4767 bool Compiler::verCheckTailCallConstraint(
4769 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4770 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
4771 bool speculative // If true, won't throw if verificatoin fails. Instead it will
4772 // return false to the caller.
4773 // If false, it will throw.
4777 CORINFO_SIG_INFO sig;
4778 unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
4779 // this counter is used to keep track of how many items have been
4782 CORINFO_METHOD_HANDLE methodHnd = nullptr;
4783 CORINFO_CLASS_HANDLE methodClassHnd = nullptr;
4784 unsigned methodClassFlgs = 0;
4786 assert(impOpcodeIsCallOpcode(opcode));
4788 if (compIsForInlining())
4793 // for calli, VerifyOrReturn that this is not a virtual method
4794 if (opcode == CEE_CALLI)
4796 /* Get the call sig */
4797 eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4799 // We don't know the target method, so we have to infer the flags, or
4800 // assume the worst-case.
4801 mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
4805 methodHnd = pResolvedToken->hMethod;
4807 mflags = info.compCompHnd->getMethodAttribs(methodHnd);
4809 // When verifying generic code we pair the method handle with its
4810 // owning class to get the exact method signature.
4811 methodClassHnd = pResolvedToken->hClass;
4812 assert(methodClassHnd);
4814 eeGetMethodSig(methodHnd, &sig, methodClassHnd);
4816 // opcode specific check
4817 methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
4820 // We must have got the methodClassHnd if opcode is not CEE_CALLI
4821 assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
4823 if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4825 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4828 // check compatibility of the arguments
4829 unsigned int argCount;
4830 argCount = sig.numArgs;
4831 CORINFO_ARG_LIST_HANDLE args;
4835 typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
4837 // check that the argument is not a byref for tailcalls
4838 VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
4840 // For unsafe code, we might have parameters containing pointer to the stack location.
4841 // Disallow the tailcall for this kind.
4842 CORINFO_CLASS_HANDLE classHandle;
4843 CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
4844 VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
4846 args = info.compCompHnd->getArgNext(args);
4850 popCount += sig.numArgs;
4852 // check for 'this' which is on non-static methods, not called via NEWOBJ
4853 if (!(mflags & CORINFO_FLG_STATIC))
4855 // Always update the popCount.
4856 // This is crucial for the stack calculation to be correct.
4857 typeInfo tiThis = impStackTop(popCount).seTypeInfo;
4860 if (opcode == CEE_CALLI)
4862 // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
4864 if (tiThis.IsValueClass())
4868 VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
4872 // Check type compatibility of the this argument
4873 typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
4874 if (tiDeclaredThis.IsValueClass())
4876 tiDeclaredThis.MakeByRef();
4879 VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
4883 // Tail calls on constrained calls should be illegal too:
4884 // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
4885 VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
4887 // Get the exact view of the signature for an array method
4888 if (sig.retType != CORINFO_TYPE_VOID)
4890 if (methodClassFlgs & CORINFO_FLG_ARRAY)
4892 assert(opcode != CEE_CALLI);
4893 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &sig);
4897 typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
4898 typeInfo tiCallerRetType =
4899 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
4901 // void return type gets morphed into the error type, so we have to treat them specially here
4902 if (sig.retType == CORINFO_TYPE_VOID)
4904 VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
4909 VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
4910 NormaliseForStack(tiCallerRetType), true),
4911 "tailcall return mismatch", speculative);
4914 // for tailcall, stack must be empty
4915 VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
4917 return true; // Yes, tailcall is legal
4920 /*****************************************************************************
4922 * Checks the IL verification rules for the call
4925 void Compiler::verVerifyCall(OPCODE opcode,
4926 CORINFO_RESOLVED_TOKEN* pResolvedToken,
4927 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
4930 const BYTE* delegateCreateStart,
4931 const BYTE* codeAddr,
4932 CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
4935 CORINFO_SIG_INFO* sig = nullptr;
4936 unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
4937 // this counter is used to keep track of how many items have been
4940 // for calli, VerifyOrReturn that this is not a virtual method
4941 if (opcode == CEE_CALLI)
4943 Verify(false, "Calli not verifiable");
4947 //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
4948 mflags = callInfo->verMethodFlags;
4950 sig = &callInfo->verSig;
4952 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
4954 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
4957 // opcode specific check
4958 unsigned methodClassFlgs = callInfo->classFlags;
4962 // cannot do callvirt on valuetypes
4963 VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
4964 VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
4969 assert(!tailCall); // Importer should not allow this
4970 VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
4971 "newobj must be on instance");
4973 if (methodClassFlgs & CORINFO_FLG_DELEGATE)
4975 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
4976 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
4977 typeInfo tiDeclaredFtn =
4978 verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
4979 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
4981 assert(popCount == 0);
4982 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
4983 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
4985 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
4986 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
4987 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
4988 "delegate object type mismatch");
4990 CORINFO_CLASS_HANDLE objTypeHandle =
4991 tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
4993 // the method signature must be compatible with the delegate's invoke method
4995 // check that for virtual functions, the type of the object used to get the
4996 // ftn ptr is the same as the type of the object passed to the delegate ctor.
4997 // since this is a bit of work to determine in general, we pattern match stylized
5000 // the delegate creation code check, which used to be done later, is now done here
5001 // so we can read delegateMethodRef directly from
5002 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
5003 // we then use it in our call to isCompatibleDelegate().
5005 mdMemberRef delegateMethodRef = mdMemberRefNil;
5006 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
5007 "must create delegates with certain IL");
5009 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
5010 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
5011 delegateResolvedToken.tokenScope = info.compScopeHnd;
5012 delegateResolvedToken.token = delegateMethodRef;
5013 delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method;
5014 info.compCompHnd->resolveToken(&delegateResolvedToken);
5016 CORINFO_CALL_INFO delegateCallInfo;
5017 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
5018 addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
5020 BOOL isOpenDelegate = FALSE;
5021 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
5022 tiActualFtn.GetMethod(), pResolvedToken->hClass,
5024 "function incompatible with delegate");
5026 // check the constraints on the target method
5027 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
5028 "delegate target has unsatisfied class constraints");
5029 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
5030 tiActualFtn.GetMethod()),
5031 "delegate target has unsatisfied method constraints");
5033 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
5034 // for additional verification rules for delegates
5035 CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod();
5036 DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
5037 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5040 if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
5042 && StrictCheckForNonVirtualCallToVirtualMethod()
5046 if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5048 VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
5049 verIsBoxedValueType(tiActualObj),
5050 "The 'this' parameter to the call must be either the calling method's "
5051 "'this' parameter or "
5052 "a boxed value type.");
5057 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
5059 BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
5061 Verify(targetIsStatic || !isOpenDelegate,
5062 "Unverifiable creation of an open instance delegate for a protected member.");
5064 CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
5066 : tiActualObj.GetClassHandleForObjRef();
5068 // In the case of protected methods, it is a requirement that the 'this'
5069 // pointer be a subclass of the current context. Perform this check.
5070 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5071 "Accessing protected method through wrong type.");
5076 // fall thru to default checks
5078 VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
5080 VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
5081 "can only newobj a delegate constructor");
5083 // check compatibility of the arguments
5084 unsigned int argCount;
5085 argCount = sig->numArgs;
5086 CORINFO_ARG_LIST_HANDLE args;
5090 typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
5092 typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
5093 VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
5095 args = info.compCompHnd->getArgNext(args);
5101 popCount += sig->numArgs;
5103 // check for 'this' which are is non-static methods, not called via NEWOBJ
5104 CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
5105 if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
5107 typeInfo tiThis = impStackTop(popCount).seTypeInfo;
5110 // If it is null, we assume we can access it (since it will AV shortly)
5111 // If it is anything but a reference class, there is no hierarchy, so
5112 // again, we don't need the precise instance class to compute 'protected' access
5113 if (tiThis.IsType(TI_REF))
5115 instanceClassHnd = tiThis.GetClassHandleForObjRef();
5118 // Check type compatibility of the this argument
5119 typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
5120 if (tiDeclaredThis.IsValueClass())
5122 tiDeclaredThis.MakeByRef();
5125 // If this is a call to the base class .ctor, set thisPtr Init for
5127 if (mflags & CORINFO_FLG_CONSTRUCTOR)
5129 if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
5130 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
5132 assert(verCurrentState.thisInitialized !=
5133 TIS_Bottom); // This should never be the case just from the logic of the verifier.
5134 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
5135 "Call to base class constructor when 'this' is possibly initialized");
5136 // Otherwise, 'this' is now initialized.
5137 verCurrentState.thisInitialized = TIS_Init;
5138 tiThis.SetInitialisedObjRef();
5142 // We allow direct calls to value type constructors
5143 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
5144 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
5145 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
5146 "Bad call to a constructor");
5150 if (pConstrainedResolvedToken != nullptr)
5152 VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
5154 typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
5156 // We just dereference this and test for equality
5157 tiThis.DereferenceByRef();
5158 VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
5159 "this type mismatch with constrained type operand");
5161 // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
5162 tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
5165 // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
5166 if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
5168 tiDeclaredThis.SetIsReadonlyByRef();
5171 VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
5173 if (tiThis.IsByRef())
5175 // Find the actual type where the method exists (as opposed to what is declared
5176 // in the metadata). This is to prevent passing a byref as the "this" argument
5177 // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
5179 CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
5180 VerifyOrReturn(eeIsValueClass(actualClassHnd),
5181 "Call to base type of valuetype (which is never a valuetype)");
5184 // Rules for non-virtual call to a non-final virtual method:
5187 // The "this" pointer is considered to be "possibly written" if
5188 // 1. Its address have been taken (LDARGA 0) anywhere in the method.
5190 // 2. It has been stored to (STARG.0) anywhere in the method.
5192 // A non-virtual call to a non-final virtual method is only allowed if
5193 // 1. The this pointer passed to the callee is an instance of a boxed value type.
5195 // 2. The this pointer passed to the callee is the current method's this pointer.
5196 // (and) The current method's this pointer is not "possibly written".
5198 // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
5199 // virtual methods. (Luckily this does affect .ctors, since they are not virtual).
5200 // This is stronger that is strictly needed, but implementing a laxer rule is significantly
5201 // hard and more error prone.
5203 if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
5205 && StrictCheckForNonVirtualCallToVirtualMethod()
5209 if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5212 tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
5213 "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
5214 "a boxed value type.");
5219 // check any constraints on the callee's class and type parameters
5220 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
5221 "method has unsatisfied class constraints");
5222 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
5223 "method has unsatisfied method constraints");
5225 if (mflags & CORINFO_FLG_PROTECTED)
5227 VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5228 "Can't access protected method");
5231 // Get the exact view of the signature for an array method
5232 if (sig->retType != CORINFO_TYPE_VOID)
5234 eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
5237 // "readonly." prefixed calls only allowed for the Address operation on arrays.
5238 // The methods supported by array types are under the control of the EE
5239 // so we can trust that only the Address operation returns a byref.
5242 typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
5243 VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
5244 "unexpected use of readonly prefix");
5247 // Verify the tailcall
5250 verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
5254 /*****************************************************************************
5255 * Checks that a delegate creation is done using the following pattern:
5257 * ldvirtftn targetMemberRef
5259 * ldftn targetMemberRef
5261 * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
5262 * not in this basic block)
5264 * targetMemberRef is read from the code sequence.
5265 * targetMemberRef is validated iff verificationNeeded.
5268 BOOL Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart,
5269 const BYTE* codeAddr,
5270 mdMemberRef& targetMemberRef)
5272 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5274 targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
5277 else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
5279 targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
5286 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
5288 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
5289 typeInfo ptrVal = verVerifyLDIND(tiTo, instrType);
5290 typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
5291 if (!tiCompatibleWith(value, normPtrVal, true))
5293 Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
5294 compUnsafeCastUsed = true;
5299 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
5301 assert(!instrType.IsStruct());
5306 ptrVal = DereferenceByRef(ptr);
5307 if (instrType.IsObjRef() && !ptrVal.IsObjRef())
5309 Verify(false, "bad pointer");
5310 compUnsafeCastUsed = true;
5312 else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
5314 Verify(false, "pointer not consistent with instr");
5315 compUnsafeCastUsed = true;
5320 Verify(false, "pointer not byref");
5321 compUnsafeCastUsed = true;
5327 // Verify that the field is used properly. 'tiThis' is NULL for statics,
5328 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
5329 // ld*flda or a st*fld.
5330 // 'enclosingClass' is given if we are accessing a field in some specific type.
5332 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken,
5333 const CORINFO_FIELD_INFO& fieldInfo,
5334 const typeInfo* tiThis,
5336 BOOL allowPlainStructAsThis)
5338 CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
5339 unsigned fieldFlags = fieldInfo.fieldFlags;
5340 CORINFO_CLASS_HANDLE instanceClass =
5341 info.compClassHnd; // for statics, we imagine the instance is the current class.
5343 bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
5346 Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
5347 if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
5349 Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
5350 info.compIsStatic == isStaticField,
5351 "bad use of initonly field (set or address taken)");
5355 if (tiThis == nullptr)
5357 Verify(isStaticField, "used static opcode with non-static field");
5361 typeInfo tThis = *tiThis;
5363 if (allowPlainStructAsThis && tThis.IsValueClass())
5368 // If it is null, we assume we can access it (since it will AV shortly)
5369 // If it is anything but a refernce class, there is no hierarchy, so
5370 // again, we don't need the precise instance class to compute 'protected' access
5371 if (tiThis->IsType(TI_REF))
5373 instanceClass = tiThis->GetClassHandleForObjRef();
5376 // Note that even if the field is static, we require that the this pointer
5377 // satisfy the same constraints as a non-static field This happens to
5378 // be simpler and seems reasonable
5379 typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
5380 if (tiDeclaredThis.IsValueClass())
5382 tiDeclaredThis.MakeByRef();
5384 // we allow read-only tThis, on any field access (even stores!), because if the
5385 // class implementor wants to prohibit stores he should make the field private.
5386 // we do this by setting the read-only bit on the type we compare tThis to.
5387 tiDeclaredThis.SetIsReadonlyByRef();
5389 else if (verTrackObjCtorInitState && tThis.IsThisPtr())
5391 // Any field access is legal on "uninitialized" this pointers.
5392 // The easiest way to implement this is to simply set the
5393 // initialized bit for the duration of the type check on the
5394 // field access only. It does not change the state of the "this"
5395 // for the function as a whole. Note that the "tThis" is a copy
5396 // of the original "this" type (*tiThis) passed in.
5397 tThis.SetInitialisedObjRef();
5400 Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
5403 // Presently the JIT does not check that we don't store or take the address of init-only fields
5404 // since we cannot guarantee their immutability and it is not a security issue.
5406 // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
5407 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
5408 "field has unsatisfied class constraints");
5409 if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
5411 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
5412 "Accessing protected method through wrong type.");
5416 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
5418 if (tiOp1.IsNumberType())
5420 #ifdef _TARGET_64BIT_
5421 Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
5422 #else // _TARGET_64BIT
5423 // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
5424 // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
5425 // but compatible, since we can coalesce native int with int32 (see section III.1.5).
5426 Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
5427 #endif // !_TARGET_64BIT_
5429 else if (tiOp1.IsObjRef())
5441 Verify(FALSE, "Cond not allowed on object types");
5443 Verify(tiOp2.IsObjRef(), "Cond type mismatch");
5445 else if (tiOp1.IsByRef())
5447 Verify(tiOp2.IsByRef(), "Cond type mismatch");
5451 Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
5455 void Compiler::verVerifyThisPtrInitialised()
5457 if (verTrackObjCtorInitState)
5459 Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
5463 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
5465 // Either target == context, in this case calling an alternate .ctor
5466 // Or target is the immediate parent of context
5468 return ((target == context) || (target == info.compCompHnd->getParentType(context)));
5471 GenTree* Compiler::impImportLdvirtftn(GenTree* thisPtr,
5472 CORINFO_RESOLVED_TOKEN* pResolvedToken,
5473 CORINFO_CALL_INFO* pCallInfo)
5475 if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
5477 NO_WAY("Virtual call to a function added via EnC is not supported");
5480 // CoreRT generic virtual method
5481 if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
5483 GenTree* runtimeMethodHandle = nullptr;
5484 if (pCallInfo->exactContextNeedsRuntimeLookup)
5486 runtimeMethodHandle =
5487 impRuntimeLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, pCallInfo->hMethod);
5491 runtimeMethodHandle = gtNewIconEmbMethHndNode(pResolvedToken->hMethod);
5493 return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL,
5494 gtNewArgList(thisPtr, runtimeMethodHandle));
5497 #ifdef FEATURE_READYTORUN_COMPILER
5498 if (opts.IsReadyToRun())
5500 if (!pCallInfo->exactContextNeedsRuntimeLookup)
5503 gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewArgList(thisPtr));
5505 call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5510 // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5511 if (IsTargetAbi(CORINFO_CORERT_ABI))
5513 GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5515 return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5516 gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5521 // Get the exact descriptor for the static callsite
5522 GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5523 if (exactTypeDesc == nullptr)
5524 { // compDonotInline()
5528 GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken);
5529 if (exactMethodDesc == nullptr)
5530 { // compDonotInline()
5534 GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5536 helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5538 helpArgs = gtNewListNode(thisPtr, helpArgs);
5540 // Call helper function. This gets the target address of the final destination callsite.
5542 return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs);
5545 //------------------------------------------------------------------------
5546 // impImportAndPushBox: build and import a value-type box
5549 // pResolvedToken - resolved token from the box operation
5555 // The value to be boxed is popped from the stack, and a tree for
5556 // the boxed value is pushed. This method may create upstream
5557 // statements, spill side effecting trees, and create new temps.
5559 // If importing an inlinee, we may also discover the inline must
5560 // fail. If so there is no new value pushed on the stack. Callers
5561 // should use CompDoNotInline after calling this method to see if
5562 // ongoing importation should be aborted.
5565 // Boxing of ref classes results in the same value as the value on
5566 // the top of the stack, so is handled inline in impImportBlockCode
5567 // for the CEE_BOX case. Only value or primitive type boxes make it
5570 // Boxing for nullable types is done via a helper call; boxing
5571 // of other value types is expanded inline or handled via helper
5572 // call, depending on the jit's codegen mode.
5574 // When the jit is operating in size and time constrained modes,
5575 // using a helper call here can save jit time and code size. But it
5576 // also may inhibit cleanup optimizations that could have also had a
5577 // even greater benefit effect on code size and jit time. An optimal
5578 // strategy may need to peek ahead and see if it is easy to tell how
5579 // the box is being used. For now, we defer.
5581 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
5583 // Spill any special side effects
5584 impSpillSpecialSideEff();
5586 // Get get the expression to box from the stack.
5587 GenTree* op1 = nullptr;
5588 GenTree* op2 = nullptr;
5589 StackEntry se = impPopStack();
5590 CORINFO_CLASS_HANDLE operCls = se.seTypeInfo.GetClassHandle();
5591 GenTree* exprToBox = se.val;
5593 // Look at what helper we should use.
5594 CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
5596 // Determine what expansion to prefer.
5598 // In size/time/debuggable constrained modes, the helper call
5599 // expansion for box is generally smaller and is preferred, unless
5600 // the value to box is a struct that comes from a call. In that
5601 // case the call can construct its return value directly into the
5602 // box payload, saving possibly some up-front zeroing.
5604 // Currently primitive type boxes always get inline expanded. We may
5605 // want to do the same for small structs if they don't come from
5606 // calls and don't have GC pointers, since explicitly copying such
5607 // structs is cheap.
5608 JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via");
5609 bool canExpandInline = (boxHelper == CORINFO_HELP_BOX);
5610 bool optForSize = !exprToBox->IsCall() && (operCls != nullptr) && (opts.compDbgCode || opts.MinOpts());
5611 bool expandInline = canExpandInline && !optForSize;
5615 JITDUMP(" inline allocate/copy sequence\n");
5617 // we are doing 'normal' boxing. This means that we can inline the box operation
5618 // Box(expr) gets morphed into
5619 // temp = new(clsHnd)
5620 // cpobj(temp+4, expr, clsHnd)
5622 // The code paths differ slightly below for structs and primitives because
5623 // "cpobj" differs in these cases. In one case you get
5624 // impAssignStructPtr(temp+4, expr, clsHnd)
5625 // and the other you get
5628 if (opts.MinOpts() || opts.compDbgCode)
5630 // For minopts/debug code, try and minimize the total number
5631 // of box temps by reusing an existing temp when possible.
5632 if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
5634 impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper"));
5639 // When optimizing, use a new temp for each box operation
5640 // since we then know the exact class of the box temp.
5641 impBoxTemp = lvaGrabTemp(true DEBUGARG("Single-def Box Helper"));
5642 lvaTable[impBoxTemp].lvType = TYP_REF;
5643 const bool isExact = true;
5644 lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact);
5647 // needs to stay in use until this box expression is appended
5648 // some other node. We approximate this by keeping it alive until
5649 // the opcode stack becomes empty
5650 impBoxTempInUse = true;
5652 #ifdef FEATURE_READYTORUN_COMPILER
5653 bool usingReadyToRunHelper = false;
5655 if (opts.IsReadyToRun())
5657 op1 = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
5658 usingReadyToRunHelper = (op1 != nullptr);
5661 if (!usingReadyToRunHelper)
5664 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
5665 // and the newfast call with a single call to a dynamic R2R cell that will:
5666 // 1) Load the context
5667 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
5668 // 3) Allocate and return the new object for boxing
5669 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
5671 // Ensure that the value class is restored
5672 op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5675 // We must be backing out of an inline.
5676 assert(compDonotInline());
5680 op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd),
5681 pResolvedToken->hClass, TYP_REF, op2);
5684 /* Remember that this basic block contains 'new' of an object, and so does this method */
5685 compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
5686 optMethodFlags |= OMF_HAS_NEWOBJ;
5688 GenTree* asg = gtNewTempAssign(impBoxTemp, op1);
5690 GenTree* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5692 op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5693 op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
5694 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
5696 if (varTypeIsStruct(exprToBox))
5698 assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
5699 op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
5703 var_types lclTyp = exprToBox->TypeGet();
5704 if (lclTyp == TYP_BYREF)
5706 lclTyp = TYP_I_IMPL;
5708 CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
5709 if (impIsPrimitive(jitType))
5711 lclTyp = JITtype2varType(jitType);
5713 assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
5714 varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
5715 var_types srcTyp = exprToBox->TypeGet();
5716 var_types dstTyp = lclTyp;
5718 if (srcTyp != dstTyp)
5720 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
5721 (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
5722 exprToBox = gtNewCastNode(dstTyp, exprToBox, false, dstTyp);
5724 op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
5727 // Spill eval stack to flush out any pending side effects.
5728 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox"));
5730 // Set up this copy as a second assignment.
5731 GenTree* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
5733 op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
5735 // Record that this is a "box" node and keep track of the matching parts.
5736 op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt);
5738 // If it is a value class, mark the "box" node. We can use this information
5739 // to optimise several cases:
5740 // "box(x) == null" --> false
5741 // "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
5742 // "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
5744 op1->gtFlags |= GTF_BOX_VALUE;
5745 assert(op1->IsBoxedValue());
5746 assert(asg->gtOper == GT_ASG);
5750 // Don't optimize, just call the helper and be done with it.
5751 JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable");
5752 assert(operCls != nullptr);
5754 // Ensure that the value class is restored
5755 op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
5758 // We must be backing out of an inline.
5759 assert(compDonotInline());
5763 GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
5764 op1 = gtNewHelperCallNode(boxHelper, TYP_REF, args);
5767 /* Push the result back on the stack, */
5768 /* even if clsHnd is a value class we want the TI_REF */
5769 typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
5770 impPushOnStack(op1, tiRetVal);
5773 //------------------------------------------------------------------------
5774 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
5777 // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
5778 // by a call to CEEInfo::resolveToken().
5779 // pCallInfo - The CORINFO_CALL_INFO that has been initialized
5780 // by a call to CEEInfo::getCallInfo().
5783 // The multi-dimensional array constructor arguments (array dimensions) are
5784 // pushed on the IL stack on entry to this method.
5787 // Multi-dimensional array constructors are imported as calls to a JIT
5788 // helper, not as regular calls.
5790 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
5792 GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken);
5793 if (classHandle == nullptr)
5794 { // compDonotInline()
5798 assert(pCallInfo->sig.numArgs);
5801 GenTreeArgList* args;
5804 // There are two different JIT helpers that can be used to allocate
5805 // multi-dimensional arrays:
5807 // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
5808 // This variant is deprecated. It should be eventually removed.
5810 // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
5811 // pointer to block of int32s. This variant is more portable.
5813 // The non-varargs helper is enabled for CoreRT only for now. Enabling this
5814 // unconditionally would require ReadyToRun version bump.
5816 CLANG_FORMAT_COMMENT_ANCHOR;
5818 if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
5821 // Reuse the temp used to pass the array dimensions to avoid bloating
5822 // the stack frame in case there are multiple calls to multi-dim array
5823 // constructors within a single method.
5824 if (lvaNewObjArrayArgs == BAD_VAR_NUM)
5826 lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
5827 lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK;
5828 lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
5831 // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
5832 // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
5833 lvaTable[lvaNewObjArrayArgs].lvExactSize =
5834 max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
5836 // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
5837 // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
5838 // to one allocation at a time.
5839 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
5842 // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
5843 // - Array class handle
5844 // - Number of dimension arguments
5845 // - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp.
5848 node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5849 node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
5851 // Pop dimension arguments from the stack one at a time and store it
5852 // into lvaNewObjArrayArgs temp.
5853 for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
5855 GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
5857 GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
5858 dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
5859 dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
5860 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
5861 dest = gtNewOperNode(GT_IND, TYP_INT, dest);
5863 node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
5866 args = gtNewArgList(node);
5868 // pass number of arguments to the helper
5869 args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5871 args = gtNewListNode(classHandle, args);
5873 node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, args);
5878 // The varargs helper needs the type and method handles as last
5879 // and last-1 param (this is a cdecl call, so args will be
5880 // pushed in reverse order on the CPU stack)
5883 args = gtNewArgList(classHandle);
5885 // pass number of arguments to the helper
5886 args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
5888 unsigned argFlags = 0;
5889 args = impPopList(pCallInfo->sig.numArgs, &pCallInfo->sig, args);
5891 node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args);
5893 // varargs, so we pop the arguments
5894 node->gtFlags |= GTF_CALL_POP_ARGS;
5897 // At the present time we don't track Caller pop arguments
5898 // that have GC references in them
5899 for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
5901 assert(temp->Current()->gtType != TYP_REF);
5906 node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
5907 node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
5909 // Remember that this basic block contains 'new' of a md array
5910 compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
5912 impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
5915 GenTree* Compiler::impTransformThis(GenTree* thisPtr,
5916 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5917 CORINFO_THIS_TRANSFORM transform)
5921 case CORINFO_DEREF_THIS:
5923 GenTree* obj = thisPtr;
5925 // This does a LDIND on the obj, which should be a byref. pointing to a ref
5926 impBashVarAddrsToI(obj);
5927 assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
5928 CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5930 obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
5931 // ldind could point anywhere, example a boxed class static int
5932 obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
5937 case CORINFO_BOX_THIS:
5939 // Constraint calls where there might be no
5940 // unboxed entry point require us to implement the call via helper.
5941 // These only occur when a possible target of the call
5942 // may have inherited an implementation of an interface
5943 // method from System.Object or System.ValueType. The EE does not provide us with
5944 // "unboxed" versions of these methods.
5946 GenTree* obj = thisPtr;
5948 assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
5949 obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
5950 obj->gtFlags |= GTF_EXCEPT;
5952 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
5953 var_types objType = JITtype2varType(jitTyp);
5954 if (impIsPrimitive(jitTyp))
5956 if (obj->OperIsBlk())
5958 obj->ChangeOperUnchecked(GT_IND);
5960 // Obj could point anywhere, example a boxed class static int
5961 obj->gtFlags |= GTF_IND_TGTANYWHERE;
5962 obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
5965 obj->gtType = JITtype2varType(jitTyp);
5966 assert(varTypeIsArithmetic(obj->gtType));
5969 // This pushes on the dereferenced byref
5970 // This is then used immediately to box.
5971 impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
5973 // This pops off the byref-to-a-value-type remaining on the stack and
5974 // replaces it with a boxed object.
5975 // This is then used as the object to the virtual call immediately below.
5976 impImportAndPushBox(pConstrainedResolvedToken);
5977 if (compDonotInline())
5982 obj = impPopStack().val;
5985 case CORINFO_NO_THIS_TRANSFORM:
5991 //------------------------------------------------------------------------
5992 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
5995 // true if PInvoke inlining should be enabled in current method, false otherwise
5998 // Checks a number of ambient conditions where we could pinvoke but choose not to
6000 bool Compiler::impCanPInvokeInline()
6002 return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
6003 (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
6007 //------------------------------------------------------------------------
6008 // impCanPInvokeInlineCallSite: basic legality checks using information
6009 // from a call to see if the call qualifies as an inline pinvoke.
6012 // block - block contaning the call, or for inlinees, block
6013 // containing the call being inlined
6016 // true if this call can legally qualify as an inline pinvoke, false otherwise
6019 // For runtimes that support exception handling interop there are
6020 // restrictions on using inline pinvoke in handler regions.
6022 // * We have to disable pinvoke inlining inside of filters because
6023 // in case the main execution (i.e. in the try block) is inside
6024 // unmanaged code, we cannot reuse the inlined stub (we still need
6025 // the original state until we are in the catch handler)
6027 // * We disable pinvoke inlining inside handlers since the GSCookie
6028 // is in the inlined Frame (see
6029 // CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
6030 // this would not protect framelets/return-address of handlers.
6032 // These restrictions are currently also in place for CoreCLR but
6033 // can be relaxed when coreclr/#8459 is addressed.
6035 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
6037 if (block->hasHndIndex())
6042 // The remaining limitations do not apply to CoreRT
6043 if (IsTargetAbi(CORINFO_CORERT_ABI))
6048 #ifdef _TARGET_AMD64_
6049 // On x64, we disable pinvoke inlining inside of try regions.
6050 // Here is the comment from JIT64 explaining why:
6052 // [VSWhidbey: 611015] - because the jitted code links in the
6053 // Frame (instead of the stub) we rely on the Frame not being
6054 // 'active' until inside the stub. This normally happens by the
6055 // stub setting the return address pointer in the Frame object
6056 // inside the stub. On a normal return, the return address
6057 // pointer is zeroed out so the Frame can be safely re-used, but
6058 // if an exception occurs, nobody zeros out the return address
6059 // pointer. Thus if we re-used the Frame object, it would go
6060 // 'active' as soon as we link it into the Frame chain.
6062 // Technically we only need to disable PInvoke inlining if we're
6063 // in a handler or if we're in a try body with a catch or
6064 // filter/except where other non-handler code in this method
6065 // might run and try to re-use the dirty Frame object.
6067 // A desktop test case where this seems to matter is
6068 // jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
6069 if (block->hasTryIndex())
6073 #endif // _TARGET_AMD64_
6078 //------------------------------------------------------------------------
6079 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
6080 // if it can be expressed as an inline pinvoke.
6083 // call - tree for the call
6084 // methHnd - handle for the method being called (may be null)
6085 // sig - signature of the method being called
6086 // mflags - method flags for the method being called
6087 // block - block contaning the call, or for inlinees, block
6088 // containing the call being inlined
6091 // Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
6093 // Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
6094 // call passes a combination of legality and profitabilty checks.
6096 // If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
6098 void Compiler::impCheckForPInvokeCall(
6099 GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
6101 CorInfoUnmanagedCallConv unmanagedCallConv;
6103 // If VM flagged it as Pinvoke, flag the call node accordingly
6104 if ((mflags & CORINFO_FLG_PINVOKE) != 0)
6106 call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
6111 if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
6116 unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
6120 CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
6121 if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
6123 // Used by the IL Stubs.
6124 callConv = CORINFO_CALLCONV_C;
6126 static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
6127 static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
6128 static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
6129 unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
6131 assert(!call->gtCallCookie);
6134 if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
6135 unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
6139 optNativeCallCount++;
6141 if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && methHnd == nullptr)
6143 // PInvoke CALLI in IL stubs must be inlined
6148 if (!impCanPInvokeInlineCallSite(block))
6153 // PInvoke CALL in IL stubs must be inlined on CoreRT. Skip the ambient conditions checks and
6154 // profitability checks
6155 if (!(opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) && IsTargetAbi(CORINFO_CORERT_ABI)))
6157 if (!impCanPInvokeInline())
6162 // Size-speed tradeoff: don't use inline pinvoke at rarely
6163 // executed call sites. The non-inline version is more
6165 if (block->isRunRarely())
6171 // The expensive check should be last
6172 if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
6178 JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
6180 call->gtFlags |= GTF_CALL_UNMANAGED;
6181 info.compCallUnmanaged++;
6183 // AMD64 convention is same for native and managed
6184 if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
6186 call->gtFlags |= GTF_CALL_POP_ARGS;
6189 if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
6191 call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
6195 GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
6197 var_types callRetTyp = JITtype2varType(sig->retType);
6199 /* The function pointer is on top of the stack - It may be a
6200 * complex expression. As it is evaluated after the args,
6201 * it may cause registered args to be spilled. Simply spill it.
6204 // Ignore this trivial case.
6205 if (impStackTop().val->gtOper != GT_LCL_VAR)
6207 impSpillStackEntry(verCurrentState.esStackDepth - 1,
6208 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
6211 /* Get the function pointer */
6213 GenTree* fptr = impPopStack().val;
6215 // The function pointer is typically a sized to match the target pointer size
6216 // However, stubgen IL optimization can change LDC.I8 to LDC.I4
6217 // See ILCodeStream::LowerOpcode
6218 assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT);
6221 // This temporary must never be converted to a double in stress mode,
6222 // because that can introduce a call to the cast helper after the
6223 // arguments have already been evaluated.
6225 if (fptr->OperGet() == GT_LCL_VAR)
6227 lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
6231 /* Create the call node */
6233 GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6235 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6240 /*****************************************************************************/
6242 void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig)
6244 assert(call->gtFlags & GTF_CALL_UNMANAGED);
6246 /* Since we push the arguments in reverse order (i.e. right -> left)
6247 * spill any side effects from the stack
6249 * OBS: If there is only one side effect we do not need to spill it
6250 * thus we have to spill all side-effects except last one
6253 unsigned lastLevelWithSideEffects = UINT_MAX;
6255 unsigned argsToReverse = sig->numArgs;
6257 // For "thiscall", the first argument goes in a register. Since its
6258 // order does not need to be changed, we do not need to spill it
6260 if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6262 assert(argsToReverse);
6266 #ifndef _TARGET_X86_
6267 // Don't reverse args on ARM or x64 - first four args always placed in regs in order
6271 for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
6273 if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
6275 assert(lastLevelWithSideEffects == UINT_MAX);
6277 impSpillStackEntry(level,
6278 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
6280 else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
6282 if (lastLevelWithSideEffects != UINT_MAX)
6284 /* We had a previous side effect - must spill it */
6285 impSpillStackEntry(lastLevelWithSideEffects,
6286 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
6288 /* Record the level for the current side effect in case we will spill it */
6289 lastLevelWithSideEffects = level;
6293 /* This is the first side effect encountered - record its level */
6295 lastLevelWithSideEffects = level;
6300 /* The argument list is now "clean" - no out-of-order side effects
6301 * Pop the argument list in reverse order */
6303 GenTree* args = call->gtCall.gtCallArgs = impPopRevList(sig->numArgs, sig, sig->numArgs - argsToReverse);
6305 if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6307 GenTree* thisPtr = args->Current();
6308 impBashVarAddrsToI(thisPtr);
6309 assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
6314 call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
6318 //------------------------------------------------------------------------
6319 // impInitClass: Build a node to initialize the class before accessing the
6320 // field if necessary
6323 // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
6324 // by a call to CEEInfo::resolveToken().
6326 // Return Value: If needed, a pointer to the node that will perform the class
6327 // initializtion. Otherwise, nullptr.
6330 GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
6332 CorInfoInitClassResult initClassResult =
6333 info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
6335 if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
6341 GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
6343 if (node == nullptr)
6345 assert(compDonotInline());
6351 node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewArgList(node));
6355 // Call the shared non gc static helper, as its the fastest
6356 node = fgGetSharedCCtor(pResolvedToken->hClass);
6362 GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
6364 GenTree* op1 = nullptr;
6373 ival = *((bool*)fldAddr);
6377 ival = *((signed char*)fldAddr);
6381 ival = *((unsigned char*)fldAddr);
6385 ival = *((short*)fldAddr);
6389 ival = *((unsigned short*)fldAddr);
6394 ival = *((int*)fldAddr);
6396 op1 = gtNewIconNode(ival);
6401 lval = *((__int64*)fldAddr);
6402 op1 = gtNewLconNode(lval);
6406 dval = *((float*)fldAddr);
6407 op1 = gtNewDconNode(dval);
6408 #if !FEATURE_X87_DOUBLES
6409 // X87 stack doesn't differentiate between float/double
6410 // so R4 is treated as R8, but everybody else does
6411 op1->gtType = TYP_FLOAT;
6412 #endif // FEATURE_X87_DOUBLES
6416 dval = *((double*)fldAddr);
6417 op1 = gtNewDconNode(dval);
6421 assert(!"Unexpected lclTyp");
6428 GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
6429 CORINFO_ACCESS_FLAGS access,
6430 CORINFO_FIELD_INFO* pFieldInfo,
6435 switch (pFieldInfo->fieldAccessor)
6437 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
6439 assert(!compIsForInlining());
6441 // We first call a special helper to get the statics base pointer
6442 op1 = impParentClassTokenToHandle(pResolvedToken);
6444 // compIsForInlining() is false so we should not neve get NULL here
6445 assert(op1 != nullptr);
6447 var_types type = TYP_BYREF;
6449 switch (pFieldInfo->helper)
6451 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
6454 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
6455 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
6456 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
6459 assert(!"unknown generic statics helper");
6463 op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewArgList(op1));
6465 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6466 op1 = gtNewOperNode(GT_ADD, type, op1,
6467 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6471 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
6473 #ifdef FEATURE_READYTORUN_COMPILER
6474 if (opts.IsReadyToRun())
6476 unsigned callFlags = 0;
6478 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6480 callFlags |= GTF_CALL_HOISTABLE;
6483 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF);
6484 op1->gtFlags |= callFlags;
6486 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6491 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
6495 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6496 op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
6497 new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
6502 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
6504 #ifdef FEATURE_READYTORUN_COMPILER
6505 noway_assert(opts.IsReadyToRun());
6506 CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
6507 assert(kind.needsRuntimeLookup);
6509 GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
6510 GenTreeArgList* args = gtNewArgList(ctxTree);
6512 unsigned callFlags = 0;
6514 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6516 callFlags |= GTF_CALL_HOISTABLE;
6518 var_types type = TYP_BYREF;
6519 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args);
6520 op1->gtFlags |= callFlags;
6522 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6523 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6524 op1 = gtNewOperNode(GT_ADD, type, op1,
6525 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6528 #endif // FEATURE_READYTORUN_COMPILER
6534 if (!(access & CORINFO_ACCESS_ADDRESS))
6536 // In future, it may be better to just create the right tree here instead of folding it later.
6537 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
6539 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6541 op1->gtFlags |= GTF_FLD_INITCLASS;
6544 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6546 op1->gtType = TYP_REF; // points at boxed object
6547 FieldSeqNode* firstElemFldSeq =
6548 GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6549 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6550 new (this, GT_CNS_INT)
6551 GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, firstElemFldSeq));
6553 if (varTypeIsStruct(lclTyp))
6555 // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT.
6556 op1 = gtNewObjNode(pFieldInfo->structType, op1);
6560 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6561 op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
6569 void** pFldAddr = nullptr;
6570 void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
6572 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6574 /* Create the data member node */
6575 op1 = gtNewIconHandleNode(pFldAddr == nullptr ? (size_t)fldAddr : (size_t)pFldAddr, GTF_ICON_STATIC_HDL,
6578 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6580 op1->gtFlags |= GTF_ICON_INITCLASS;
6583 if (pFldAddr != nullptr)
6585 // There are two cases here, either the static is RVA based,
6586 // in which case the type of the FIELD node is not a GC type
6587 // and the handle to the RVA is a TYP_I_IMPL. Or the FIELD node is
6588 // a GC type and the handle to it is a TYP_BYREF in the GC heap
6589 // because handles to statics now go into the large object heap
6591 var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
6592 op1 = gtNewOperNode(GT_IND, handleTyp, op1);
6593 op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
6600 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6602 op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
6604 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6606 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6607 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, fldSeq));
6610 if (!(access & CORINFO_ACCESS_ADDRESS))
6612 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6613 op1->gtFlags |= GTF_GLOB_REF;
6619 // In general try to call this before most of the verification work. Most people expect the access
6620 // exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns
6621 // out if you can't access something we also think that you're unverifiable for other reasons.
6622 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6624 if (result != CORINFO_ACCESS_ALLOWED)
6626 impHandleAccessAllowedInternal(result, helperCall);
6630 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
6634 case CORINFO_ACCESS_ALLOWED:
6636 case CORINFO_ACCESS_ILLEGAL:
6637 // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
6638 // method is verifiable. Otherwise, delay the exception to runtime.
6639 if (compIsForImportOnly())
6641 info.compCompHnd->ThrowExceptionForHelper(helperCall);
6645 impInsertHelperCall(helperCall);
6648 case CORINFO_ACCESS_RUNTIME_CHECK:
6649 impInsertHelperCall(helperCall);
6654 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
6656 // Construct the argument list
6657 GenTreeArgList* args = nullptr;
6658 assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
6659 for (unsigned i = helperInfo->numArgs; i > 0; --i)
6661 const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1];
6662 GenTree* currentArg = nullptr;
6663 switch (helperArg.argType)
6665 case CORINFO_HELPER_ARG_TYPE_Field:
6666 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
6667 info.compCompHnd->getFieldClass(helperArg.fieldHandle));
6668 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
6670 case CORINFO_HELPER_ARG_TYPE_Method:
6671 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
6672 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
6674 case CORINFO_HELPER_ARG_TYPE_Class:
6675 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
6676 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
6678 case CORINFO_HELPER_ARG_TYPE_Module:
6679 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
6681 case CORINFO_HELPER_ARG_TYPE_Const:
6682 currentArg = gtNewIconNode(helperArg.constant);
6685 NO_WAY("Illegal helper arg type");
6687 args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
6691 * Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee.
6692 * Also, consider sticking this in the first basic block.
6694 GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args);
6695 impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6698 // Checks whether the return types of caller and callee are compatible
6699 // so that callee can be tail called. Note that here we don't check
6700 // compatibility in IL Verifier sense, but on the lines of return type
6701 // sizes are equal and get returned in the same return register.
6702 bool Compiler::impTailCallRetTypeCompatible(var_types callerRetType,
6703 CORINFO_CLASS_HANDLE callerRetTypeClass,
6704 var_types calleeRetType,
6705 CORINFO_CLASS_HANDLE calleeRetTypeClass)
6707 // Note that we can not relax this condition with genActualType() as the
6708 // calling convention dictates that the caller of a function with a small
6709 // typed return value is responsible for normalizing the return val.
6710 if (callerRetType == calleeRetType)
6715 // If the class handles are the same and not null, the return types are compatible.
6716 if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass))
6721 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
6723 if (callerRetType == TYP_VOID)
6725 // This needs to be allowed to support the following IL pattern that Jit64 allows:
6730 // Note that the above IL pattern is not valid as per IL verification rules.
6731 // Therefore, only full trust code can take advantage of this pattern.
6735 // These checks return true if the return value type sizes are the same and
6736 // get returned in the same return register i.e. caller doesn't need to normalize
6737 // return value. Some of the tail calls permitted by below checks would have
6738 // been rejected by IL Verifier before we reached here. Therefore, only full
6739 // trust code can make those tail calls.
6740 unsigned callerRetTypeSize = 0;
6741 unsigned calleeRetTypeSize = 0;
6742 bool isCallerRetTypMBEnreg =
6743 VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true);
6744 bool isCalleeRetTypMBEnreg =
6745 VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true);
6747 if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
6749 return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
6751 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
6759 PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
6760 PREFIX_TAILCALL_IMPLICIT =
6761 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
6762 PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
6763 PREFIX_VOLATILE = 0x00000100,
6764 PREFIX_UNALIGNED = 0x00001000,
6765 PREFIX_CONSTRAINED = 0x00010000,
6766 PREFIX_READONLY = 0x00100000
6769 /********************************************************************************
6771 * Returns true if the current opcode and and the opcodes following it correspond
6772 * to a supported tail call IL pattern.
6775 bool Compiler::impIsTailCallILPattern(bool tailPrefixed,
6777 const BYTE* codeAddrOfNextOpcode,
6778 const BYTE* codeEnd,
6780 bool* isCallPopAndRet /* = nullptr */)
6782 // Bail out if the current opcode is not a call.
6783 if (!impOpcodeIsCallOpcode(curOpcode))
6788 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6789 // If shared ret tail opt is not enabled, we will enable
6790 // it for recursive methods.
6794 // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
6795 // sequence. Make sure we don't go past the end of the IL however.
6796 codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
6799 // Bail out if there is no next opcode after call
6800 if (codeAddrOfNextOpcode >= codeEnd)
6805 // Scan the opcodes to look for the following IL patterns if either
6806 // i) the call is not tail prefixed (i.e. implicit tail call) or
6807 // ii) if tail prefixed, IL verification is not needed for the method.
6809 // Only in the above two cases we can allow the below tail call patterns
6810 // violating ECMA spec.
6826 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
6829 nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6830 codeAddrOfNextOpcode += sizeof(__int8);
6831 } while ((codeAddrOfNextOpcode < codeEnd) && // Haven't reached end of method
6832 (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
6833 ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
6834 // one pop seen so far.
6836 nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
6837 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
6839 if (isCallPopAndRet)
6841 // Allow call+pop+ret to be tail call optimized if caller ret type is void
6842 *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
6845 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
6847 // Tail call IL pattern could be either of the following
6848 // 1) call/callvirt/calli + ret
6849 // 2) call/callvirt/calli + pop + ret in a method returning void.
6850 return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
6852 return (nextOpcode == CEE_RET) && (cntPop == 0);
6853 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
6856 /*****************************************************************************
6858 * Determine whether the call could be converted to an implicit tail call
6861 bool Compiler::impIsImplicitTailCallCandidate(
6862 OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
6865 #if FEATURE_TAILCALL_OPT
6866 if (!opts.compTailCallOpt)
6871 if (opts.compDbgCode || opts.MinOpts())
6876 // must not be tail prefixed
6877 if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
6882 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
6883 // the block containing call is marked as BBJ_RETURN
6884 // We allow shared ret tail call optimization on recursive calls even under
6885 // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
6886 if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
6888 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
6890 // must be call+ret or call+pop+ret
6891 if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
6899 #endif // FEATURE_TAILCALL_OPT
6902 //------------------------------------------------------------------------
6903 // impImportCall: import a call-inspiring opcode
6906 // opcode - opcode that inspires the call
6907 // pResolvedToken - resolved token for the call target
6908 // pConstrainedResolvedToken - resolved constraint token (or nullptr)
6909 // newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr)
6910 // prefixFlags - IL prefix flags for the call
6911 // callInfo - EE supplied info for the call
6912 // rawILOffset - IL offset of the opcode
6915 // Type of the call's return value.
6916 // If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF.
6917 // However we can't assert for this here yet because there are cases we miss. See issue #13272.
6921 // opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
6923 // For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
6924 // uninitalized object.
6927 #pragma warning(push)
6928 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
6931 var_types Compiler::impImportCall(OPCODE opcode,
6932 CORINFO_RESOLVED_TOKEN* pResolvedToken,
6933 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6934 GenTree* newobjThis,
6936 CORINFO_CALL_INFO* callInfo,
6937 IL_OFFSET rawILOffset)
6939 assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
6941 IL_OFFSETX ilOffset = impCurILOffset(rawILOffset, true);
6942 var_types callRetTyp = TYP_COUNT;
6943 CORINFO_SIG_INFO* sig = nullptr;
6944 CORINFO_METHOD_HANDLE methHnd = nullptr;
6945 CORINFO_CLASS_HANDLE clsHnd = nullptr;
6946 unsigned clsFlags = 0;
6947 unsigned mflags = 0;
6948 unsigned argFlags = 0;
6949 GenTree* call = nullptr;
6950 GenTreeArgList* args = nullptr;
6951 CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM;
6952 CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr;
6953 bool exactContextNeedsRuntimeLookup = false;
6954 bool canTailCall = true;
6955 const char* szCanTailCallFailReason = nullptr;
6956 int tailCall = prefixFlags & PREFIX_TAILCALL;
6957 bool readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
6959 CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr;
6961 // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
6962 // do that before tailcalls, but that is probably not the intended
6963 // semantic. So just disallow tailcalls from synchronized methods.
6964 // Also, popping arguments in a varargs function is more work and NYI
6965 // If we have a security object, we have to keep our frame around for callers
6966 // to see any imperative security.
6967 if (info.compFlags & CORINFO_FLG_SYNCH)
6969 canTailCall = false;
6970 szCanTailCallFailReason = "Caller is synchronized";
6972 #if !FEATURE_FIXED_OUT_ARGS
6973 else if (info.compIsVarArgs)
6975 canTailCall = false;
6976 szCanTailCallFailReason = "Caller is varargs";
6978 #endif // FEATURE_FIXED_OUT_ARGS
6979 else if (opts.compNeedSecurityCheck)
6981 canTailCall = false;
6982 szCanTailCallFailReason = "Caller requires a security check.";
6985 // We only need to cast the return value of pinvoke inlined calls that return small types
6987 // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
6988 // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
6989 // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
6990 // the time being that the callee might be compiled by the other JIT and thus the return
6991 // value will need to be widened by us (or not widened at all...)
6993 // ReadyToRun code sticks with default calling convention that does not widen small return types.
6995 bool checkForSmallType = opts.IsJit64Compat() || opts.IsReadyToRun();
6996 bool bIntrinsicImported = false;
6998 CORINFO_SIG_INFO calliSig;
6999 GenTreeArgList* extraArg = nullptr;
7001 /*-------------------------------------------------------------------------
7002 * First create the call node
7005 if (opcode == CEE_CALLI)
7007 /* Get the call site sig */
7008 eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
7010 callRetTyp = JITtype2varType(calliSig.retType);
7012 call = impImportIndirectCall(&calliSig, ilOffset);
7014 // We don't know the target method, so we have to infer the flags, or
7015 // assume the worst-case.
7016 mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
7021 unsigned structSize =
7022 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
7023 printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7024 opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7027 // This should be checked in impImportBlockCode.
7028 assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
7033 // We cannot lazily obtain the signature of a CALLI call because it has no method
7034 // handle that we can use, so we need to save its full call signature here.
7035 assert(call->gtCall.callSig == nullptr);
7036 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7037 *call->gtCall.callSig = calliSig;
7040 if (IsTargetAbi(CORINFO_CORERT_ABI))
7042 bool managedCall = (((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_STDCALL) &&
7043 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_C) &&
7044 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_THISCALL) &&
7045 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_FASTCALL));
7048 addFatPointerCandidate(call->AsCall());
7052 else // (opcode != CEE_CALLI)
7054 CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
7056 // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
7057 // supply the instantiation parameters necessary to make direct calls to underlying
7058 // shared generic code, rather than calling through instantiating stubs. If the
7059 // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
7060 // must indeed pass an instantiation parameter.
7062 methHnd = callInfo->hMethod;
7064 sig = &(callInfo->sig);
7065 callRetTyp = JITtype2varType(sig->retType);
7067 mflags = callInfo->methodFlags;
7072 unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
7073 printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7074 opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7077 if (compIsForInlining())
7079 /* Does this call site have security boundary restrictions? */
7081 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
7083 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
7087 /* Does the inlinee need a security check token on the frame */
7089 if (mflags & CORINFO_FLG_SECURITYCHECK)
7091 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7095 /* Does the inlinee use StackCrawlMark */
7097 if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
7099 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
7103 /* For now ignore delegate invoke */
7105 if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7107 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
7111 /* For now ignore varargs */
7112 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7114 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
7118 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7120 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
7124 if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
7126 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
7131 clsHnd = pResolvedToken->hClass;
7133 clsFlags = callInfo->classFlags;
7136 // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
7138 // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
7139 // These should be in mscorlib.h, and available through a JIT/EE interface call.
7140 const char* modName;
7141 const char* className;
7142 const char* methodName;
7143 if ((className = eeGetClassName(clsHnd)) != nullptr &&
7144 strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
7145 (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
7147 return impImportJitTestLabelMark(sig->numArgs);
7151 // <NICE> Factor this into getCallInfo </NICE>
7152 bool isSpecialIntrinsic = false;
7153 if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0)
7155 const bool isTail = canTailCall && (tailCall != 0);
7157 call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, readonlyCall, isTail,
7158 pConstrainedResolvedToken, callInfo->thisTransform, &intrinsicID, &isSpecialIntrinsic);
7160 if (compDonotInline())
7165 if (call != nullptr)
7167 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
7168 (clsFlags & CORINFO_FLG_FINAL));
7170 #ifdef FEATURE_READYTORUN_COMPILER
7171 if (call->OperGet() == GT_INTRINSIC)
7173 if (opts.IsReadyToRun())
7175 noway_assert(callInfo->kind == CORINFO_CALL);
7176 call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
7180 call->gtIntrinsic.gtEntryPoint.addr = nullptr;
7181 call->gtIntrinsic.gtEntryPoint.accessType = IAT_VALUE;
7186 bIntrinsicImported = true;
7194 call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
7195 if (call != nullptr)
7197 bIntrinsicImported = true;
7201 #endif // FEATURE_SIMD
7203 if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
7205 NO_WAY("Virtual call to a function added via EnC is not supported");
7208 if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
7209 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7210 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
7212 BADCODE("Bad calling convention");
7215 //-------------------------------------------------------------------------
7216 // Construct the call node
7218 // Work out what sort of call we're making.
7219 // Dispense with virtual calls implemented via LDVIRTFTN immediately.
7221 constraintCallThisTransform = callInfo->thisTransform;
7222 exactContextHnd = callInfo->contextHandle;
7223 exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup == TRUE;
7225 // Recursive call is treated as a loop to the begining of the method.
7226 if (gtIsRecursiveCall(methHnd))
7231 JITDUMP("\nFound recursive call in the method. Mark BB%02u to BB%02u as having a backward branch.\n",
7232 fgFirstBB->bbNum, compCurBB->bbNum);
7235 fgMarkBackwardJump(fgFirstBB, compCurBB);
7238 switch (callInfo->kind)
7241 case CORINFO_VIRTUALCALL_STUB:
7243 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7244 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7245 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
7248 if (compIsForInlining())
7250 // Don't import runtime lookups when inlining
7251 // Inlining has to be aborted in such a case
7252 /* XXX Fri 3/20/2009
7253 * By the way, this would never succeed. If the handle lookup is into the generic
7254 * dictionary for a candidate, you'll generate different dictionary offsets and the
7255 * inlined code will crash.
7257 * To anyone code reviewing this, when could this ever succeed in the future? It'll
7258 * always have a handle lookup. These lookups are safe intra-module, but we're just
7261 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
7265 GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
7266 assert(!compDonotInline());
7268 // This is the rough code to set up an indirect stub call
7269 assert(stubAddr != nullptr);
7271 // The stubAddr may be a
7272 // complex expression. As it is evaluated after the args,
7273 // it may cause registered args to be spilled. Simply spill it.
7275 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
7276 impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
7277 stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7279 // Create the actual call node
7281 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7282 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7284 call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
7286 call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
7287 call->gtFlags |= GTF_CALL_VIRT_STUB;
7290 // No tailcalls allowed for these yet...
7291 canTailCall = false;
7292 szCanTailCallFailReason = "VirtualCall with runtime lookup";
7297 // ok, the stub is available at compile type.
7299 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7300 call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
7301 call->gtFlags |= GTF_CALL_VIRT_STUB;
7302 assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE &&
7303 callInfo->stubLookup.constLookup.accessType != IAT_RELPVALUE);
7304 if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
7306 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
7310 #ifdef FEATURE_READYTORUN_COMPILER
7311 if (opts.IsReadyToRun())
7313 // Null check is sometimes needed for ready to run to handle
7314 // non-virtual <-> virtual changes between versions
7315 if (callInfo->nullInstanceCheck)
7317 call->gtFlags |= GTF_CALL_NULLCHECK;
7325 case CORINFO_VIRTUALCALL_VTABLE:
7327 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7328 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7329 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7330 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
7334 case CORINFO_VIRTUALCALL_LDVIRTFTN:
7336 if (compIsForInlining())
7338 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
7342 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7343 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7344 // OK, We've been told to call via LDVIRTFTN, so just
7345 // take the call now....
7347 args = impPopList(sig->numArgs, sig);
7349 GenTree* thisPtr = impPopStack().val;
7350 thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
7351 assert(thisPtr != nullptr);
7353 // Clone the (possibly transformed) "this" pointer
7354 GenTree* thisPtrCopy;
7355 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
7356 nullptr DEBUGARG("LDVIRTFTN this pointer"));
7358 GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
7359 assert(fptr != nullptr);
7361 thisPtr = nullptr; // can't reuse it
7363 // Now make an indirect call through the function pointer
7365 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
7366 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7367 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7369 // Create the actual call node
7371 call = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
7372 call->gtCall.gtCallObjp = thisPtrCopy;
7373 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7375 if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
7377 // CoreRT generic virtual method: need to handle potential fat function pointers
7378 addFatPointerCandidate(call->AsCall());
7380 #ifdef FEATURE_READYTORUN_COMPILER
7381 if (opts.IsReadyToRun())
7383 // Null check is needed for ready to run to handle
7384 // non-virtual <-> virtual changes between versions
7385 call->gtFlags |= GTF_CALL_NULLCHECK;
7389 // Sine we are jumping over some code, check that its OK to skip that code
7390 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7391 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7397 // This is for a non-virtual, non-interface etc. call
7398 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7400 // We remove the nullcheck for the GetType call instrinsic.
7401 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
7403 if (callInfo->nullInstanceCheck &&
7404 !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
7406 call->gtFlags |= GTF_CALL_NULLCHECK;
7409 #ifdef FEATURE_READYTORUN_COMPILER
7410 if (opts.IsReadyToRun())
7412 call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
7418 case CORINFO_CALL_CODE_POINTER:
7420 // The EE has asked us to call by computing a code pointer and then doing an
7421 // indirect call. This is because a runtime lookup is required to get the code entry point.
7423 // These calls always follow a uniform calling convention, i.e. no extra hidden params
7424 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
7426 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
7427 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7430 impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
7432 if (compDonotInline())
7437 // Now make an indirect call through the function pointer
7439 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
7440 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7441 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7443 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
7444 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7445 if (callInfo->nullInstanceCheck)
7447 call->gtFlags |= GTF_CALL_NULLCHECK;
7454 assert(!"unknown call kind");
7458 //-------------------------------------------------------------------------
7461 PREFIX_ASSUME(call != nullptr);
7463 if (mflags & CORINFO_FLG_NOGCCHECK)
7465 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
7468 // Mark call if it's one of the ones we will maybe treat as an intrinsic
7469 if (isSpecialIntrinsic)
7471 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
7475 assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
7477 /* Some sanity checks */
7479 // CALL_VIRT and NEWOBJ must have a THIS pointer
7480 assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
7481 // static bit and hasThis are negations of one another
7482 assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
7483 assert(call != nullptr);
7485 /*-------------------------------------------------------------------------
7486 * Check special-cases etc
7489 /* Special case - Check if it is a call to Delegate.Invoke(). */
7491 if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7493 assert(!compIsForInlining());
7494 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7495 assert(mflags & CORINFO_FLG_FINAL);
7497 /* Set the delegate flag */
7498 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
7500 if (callInfo->secureDelegateInvoke)
7502 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
7505 if (opcode == CEE_CALLVIRT)
7507 assert(mflags & CORINFO_FLG_FINAL);
7509 /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
7510 assert(call->gtFlags & GTF_CALL_NULLCHECK);
7511 call->gtFlags &= ~GTF_CALL_NULLCHECK;
7515 CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
7516 actualMethodRetTypeSigClass = sig->retTypeSigClass;
7517 if (varTypeIsStruct(callRetTyp))
7519 callRetTyp = impNormStructType(actualMethodRetTypeSigClass);
7520 call->gtType = callRetTyp;
7524 /* Check for varargs */
7525 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7526 (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7528 BADCODE("Varargs not supported.");
7530 #endif // !FEATURE_VARARG
7533 if (call->gtCall.callSig == nullptr)
7535 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7536 *call->gtCall.callSig = *sig;
7538 #endif // UNIX_X86_ABI
7540 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7541 (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7543 assert(!compIsForInlining());
7545 /* Set the right flags */
7547 call->gtFlags |= GTF_CALL_POP_ARGS;
7548 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
7550 /* Can't allow tailcall for varargs as it is caller-pop. The caller
7551 will be expecting to pop a certain number of arguments, but if we
7552 tailcall to a function with a different number of arguments, we
7553 are hosed. There are ways around this (caller remembers esp value,
7554 varargs is not caller-pop, etc), but not worth it. */
7555 CLANG_FORMAT_COMMENT_ANCHOR;
7560 canTailCall = false;
7561 szCanTailCallFailReason = "Callee is varargs";
7565 /* Get the total number of arguments - this is already correct
7566 * for CALLI - for methods we have to get it from the call site */
7568 if (opcode != CEE_CALLI)
7571 unsigned numArgsDef = sig->numArgs;
7573 eeGetCallSiteSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, sig);
7576 // We cannot lazily obtain the signature of a vararg call because using its method
7577 // handle will give us only the declared argument list, not the full argument list.
7578 assert(call->gtCall.callSig == nullptr);
7579 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7580 *call->gtCall.callSig = *sig;
7583 // For vararg calls we must be sure to load the return type of the
7584 // method actually being called, as well as the return types of the
7585 // specified in the vararg signature. With type equivalency, these types
7586 // may not be the same.
7587 if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
7589 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
7590 sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
7591 sig->retType != CORINFO_TYPE_VAR)
7593 // Make sure that all valuetypes (including enums) that we push are loaded.
7594 // This is to guarantee that if a GC is triggerred from the prestub of this methods,
7595 // all valuetypes in the method signature are already loaded.
7596 // We need to be able to find the size of the valuetypes, but we cannot
7597 // do a class-load from within GC.
7598 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
7602 assert(numArgsDef <= sig->numArgs);
7605 /* We will have "cookie" as the last argument but we cannot push
7606 * it on the operand stack because we may overflow, so we append it
7607 * to the arg list next after we pop them */
7610 if (mflags & CORINFO_FLG_SECURITYCHECK)
7612 assert(!compIsForInlining());
7614 // Need security prolog/epilog callouts when there is
7615 // imperative security in the method. This is to give security a
7616 // chance to do any setup in the prolog and cleanup in the epilog if needed.
7618 if (compIsForInlining())
7620 // Cannot handle this if the method being imported is an inlinee by itself.
7621 // Because inlinee method does not have its own frame.
7623 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7628 tiSecurityCalloutNeeded = true;
7630 // If the current method calls a method which needs a security check,
7631 // (i.e. the method being compiled has imperative security)
7632 // we need to reserve a slot for the security object in
7633 // the current method's stack frame
7634 opts.compNeedSecurityCheck = true;
7638 //--------------------------- Inline NDirect ------------------------------
7640 // For inline cases we technically should look at both the current
7641 // block and the call site block (or just the latter if we've
7642 // fused the EH trees). However the block-related checks pertain to
7643 // EH and we currently won't inline a method with EH. So for
7644 // inlinees, just checking the call site block is sufficient.
7646 // New lexical block here to avoid compilation errors because of GOTOs.
7647 BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7648 impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block);
7651 if (call->gtFlags & GTF_CALL_UNMANAGED)
7653 // We set up the unmanaged call by linking the frame, disabling GC, etc
7654 // This needs to be cleaned up on return
7657 canTailCall = false;
7658 szCanTailCallFailReason = "Callee is native";
7661 checkForSmallType = true;
7663 impPopArgsForUnmanagedCall(call, sig);
7667 else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
7668 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
7669 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
7670 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
7672 if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
7674 // Normally this only happens with inlining.
7675 // However, a generic method (or type) being NGENd into another module
7676 // can run into this issue as well. There's not an easy fall-back for NGEN
7677 // so instead we fallback to JIT.
7678 if (compIsForInlining())
7680 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
7684 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
7690 GenTree* cookie = eeGetPInvokeCookie(sig);
7692 // This cookie is required to be either a simple GT_CNS_INT or
7693 // an indirection of a GT_CNS_INT
7695 GenTree* cookieConst = cookie;
7696 if (cookie->gtOper == GT_IND)
7698 cookieConst = cookie->gtOp.gtOp1;
7700 assert(cookieConst->gtOper == GT_CNS_INT);
7702 // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
7703 // we won't allow this tree to participate in any CSE logic
7705 cookie->gtFlags |= GTF_DONT_CSE;
7706 cookieConst->gtFlags |= GTF_DONT_CSE;
7708 call->gtCall.gtCallCookie = cookie;
7712 canTailCall = false;
7713 szCanTailCallFailReason = "PInvoke calli";
7717 /*-------------------------------------------------------------------------
7718 * Create the argument list
7721 //-------------------------------------------------------------------------
7722 // Special case - for varargs we have an implicit last argument
7724 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7726 assert(!compIsForInlining());
7728 void *varCookie, *pVarCookie;
7729 if (!info.compCompHnd->canGetVarArgsHandle(sig))
7731 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
7735 varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
7736 assert((!varCookie) != (!pVarCookie));
7737 GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig);
7739 assert(extraArg == nullptr);
7740 extraArg = gtNewArgList(cookie);
7743 //-------------------------------------------------------------------------
7744 // Extra arg for shared generic code and array methods
7746 // Extra argument containing instantiation information is passed in the
7747 // following circumstances:
7748 // (a) To the "Address" method on array classes; the extra parameter is
7749 // the array's type handle (a TypeDesc)
7750 // (b) To shared-code instance methods in generic structs; the extra parameter
7751 // is the struct's type handle (a vtable ptr)
7752 // (c) To shared-code per-instantiation non-generic static methods in generic
7753 // classes and structs; the extra parameter is the type handle
7754 // (d) To shared-code generic methods; the extra parameter is an
7755 // exact-instantiation MethodDesc
7757 // We also set the exact type context associated with the call so we can
7758 // inline the call correctly later on.
7760 if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
7762 assert(call->gtCall.gtCallType == CT_USER_FUNC);
7763 if (clsHnd == nullptr)
7765 NO_WAY("CALLI on parameterized type");
7768 assert(opcode != CEE_CALLI);
7773 // Instantiated generic method
7774 if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
7776 CORINFO_METHOD_HANDLE exactMethodHandle =
7777 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7779 if (!exactContextNeedsRuntimeLookup)
7781 #ifdef FEATURE_READYTORUN_COMPILER
7782 if (opts.IsReadyToRun())
7785 impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
7786 if (instParam == nullptr)
7788 assert(compDonotInline());
7795 instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
7796 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
7801 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7802 if (instParam == nullptr)
7804 assert(compDonotInline());
7810 // otherwise must be an instance method in a generic struct,
7811 // a static method in a generic type, or a runtime-generated array method
7814 assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
7815 CORINFO_CLASS_HANDLE exactClassHandle =
7816 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
7818 if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
7820 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
7824 if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
7826 // We indicate "readonly" to the Address operation by using a null
7828 instParam = gtNewIconNode(0, TYP_REF);
7830 else if (!exactContextNeedsRuntimeLookup)
7832 #ifdef FEATURE_READYTORUN_COMPILER
7833 if (opts.IsReadyToRun())
7836 impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
7837 if (instParam == nullptr)
7839 assert(compDonotInline());
7846 instParam = gtNewIconEmbClsHndNode(exactClassHandle);
7847 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
7852 // If the EE was able to resolve a constrained call, the instantiating parameter to use is the type
7853 // by which the call was constrained with. We embed pConstrainedResolvedToken as the extra argument
7854 // because pResolvedToken is an interface method and interface types make a poor generic context.
7855 if (pConstrainedResolvedToken)
7857 instParam = impTokenToHandle(pConstrainedResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/,
7858 FALSE /* importParent */);
7862 instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
7865 if (instParam == nullptr)
7867 assert(compDonotInline());
7873 assert(extraArg == nullptr);
7874 extraArg = gtNewArgList(instParam);
7877 // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
7878 // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
7879 // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
7880 // exactContextHnd is not currently required when inlining shared generic code into shared
7881 // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
7882 // (e.g. anything marked needsRuntimeLookup)
7883 if (exactContextNeedsRuntimeLookup)
7885 exactContextHnd = nullptr;
7888 if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0))
7890 // Only verifiable cases are supported.
7891 // dup; ldvirtftn; newobj; or ldftn; newobj.
7892 // IL test could contain unverifiable sequence, in this case optimization should not be done.
7893 if (impStackHeight() > 0)
7895 typeInfo delegateTypeInfo = impStackTop().seTypeInfo;
7896 if (delegateTypeInfo.IsToken())
7898 ldftnToken = delegateTypeInfo.GetToken();
7903 //-------------------------------------------------------------------------
7904 // The main group of arguments
7906 args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, sig, extraArg);
7910 call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
7913 //-------------------------------------------------------------------------
7914 // The "this" pointer
7916 if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
7920 if (opcode == CEE_NEWOBJ)
7926 obj = impPopStack().val;
7927 obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
7928 if (compDonotInline())
7934 // Store the "this" value in the call
7935 call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
7936 call->gtCall.gtCallObjp = obj;
7938 // Is this a virtual or interface call?
7939 if (call->gtCall.IsVirtual())
7941 // only true object pointers can be virtual
7942 assert(obj->gtType == TYP_REF);
7944 // See if we can devirtualize.
7945 impDevirtualizeCall(call->AsCall(), &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle,
7951 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
7955 //-------------------------------------------------------------------------
7956 // The "this" pointer for "newobj"
7958 if (opcode == CEE_NEWOBJ)
7960 if (clsFlags & CORINFO_FLG_VAROBJSIZE)
7962 assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
7963 // This is a 'new' of a variable sized object, wher
7964 // the constructor is to return the object. In this case
7965 // the constructor claims to return VOID but we know it
7966 // actually returns the new object
7967 assert(callRetTyp == TYP_VOID);
7968 callRetTyp = TYP_REF;
7969 call->gtType = TYP_REF;
7970 impSpillSpecialSideEff();
7972 impPushOnStack(call, typeInfo(TI_REF, clsHnd));
7976 if (clsFlags & CORINFO_FLG_DELEGATE)
7978 // New inliner morph it in impImportCall.
7979 // This will allow us to inline the call to the delegate constructor.
7980 call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken);
7983 if (!bIntrinsicImported)
7986 #if defined(DEBUG) || defined(INLINE_DATA)
7988 // Keep track of the raw IL offset of the call
7989 call->gtCall.gtRawILOffset = rawILOffset;
7991 #endif // defined(DEBUG) || defined(INLINE_DATA)
7993 // Is it an inline candidate?
7994 impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
7997 // append the call node.
7998 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8000 // Now push the value of the 'new onto the stack
8002 // This is a 'new' of a non-variable sized object.
8003 // Append the new node (op1) to the statement list,
8004 // and then push the local holding the value of this
8005 // new instruction on the stack.
8007 if (clsFlags & CORINFO_FLG_VALUECLASS)
8009 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
8011 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
8012 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
8016 if (newobjThis->gtOper == GT_COMMA)
8018 // In coreclr the callout can be inserted even if verification is disabled
8019 // so we cannot rely on tiVerificationNeeded alone
8021 // We must have inserted the callout. Get the real newobj.
8022 newobjThis = newobjThis->gtOp.gtOp2;
8025 assert(newobjThis->gtOper == GT_LCL_VAR);
8026 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
8036 // This check cannot be performed for implicit tail calls for the reason
8037 // that impIsImplicitTailCallCandidate() is not checking whether return
8038 // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
8039 // As a result it is possible that in the following case, we find that
8040 // the type stack is non-empty if Callee() is considered for implicit
8042 // int Caller(..) { .... void Callee(); ret val; ... }
8044 // Note that we cannot check return type compatibility before ImpImportCall()
8045 // as we don't have required info or need to duplicate some of the logic of
8048 // For implicit tail calls, we perform this check after return types are
8049 // known to be compatible.
8050 if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
8052 BADCODE("Stack should be empty after tailcall");
8055 // Note that we can not relax this condition with genActualType() as
8056 // the calling convention dictates that the caller of a function with
8057 // a small-typed return value is responsible for normalizing the return val
8060 !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
8061 callInfo->sig.retTypeClass))
8063 canTailCall = false;
8064 szCanTailCallFailReason = "Return types are not tail call compatible";
8067 // Stack empty check for implicit tail calls.
8068 if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
8070 #ifdef _TARGET_AMD64_
8071 // JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException
8072 // in JIT64, not an InvalidProgramException.
8073 Verify(false, "Stack should be empty after tailcall");
8074 #else // _TARGET_64BIT_
8075 BADCODE("Stack should be empty after tailcall");
8076 #endif //!_TARGET_64BIT_
8079 // assert(compCurBB is not a catch, finally or filter block);
8080 // assert(compCurBB is not a try block protected by a finally block);
8082 // Check for permission to tailcall
8083 bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
8085 assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
8089 // True virtual or indirect calls, shouldn't pass in a callee handle.
8090 CORINFO_METHOD_HANDLE exactCalleeHnd =
8091 ((call->gtCall.gtCallType != CT_USER_FUNC) || call->gtCall.IsVirtual()) ? nullptr : methHnd;
8092 GenTree* thisArg = call->gtCall.gtCallObjp;
8094 if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
8097 if (explicitTailCall)
8099 // In case of explicit tail calls, mark it so that it is not considered
8101 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
8105 printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
8113 #if FEATURE_TAILCALL_OPT
8114 // Must be an implicit tail call.
8115 assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
8117 // It is possible that a call node is both an inline candidate and marked
8118 // for opportunistic tail calling. In-lining happens before morhphing of
8119 // trees. If in-lining of an in-line candidate gets aborted for whatever
8120 // reason, it will survive to the morphing stage at which point it will be
8121 // transformed into a tail call after performing additional checks.
8123 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
8127 printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
8133 #else //! FEATURE_TAILCALL_OPT
8134 NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
8136 #endif // FEATURE_TAILCALL_OPT
8139 // we can't report success just yet...
8143 canTailCall = false;
8144 // canTailCall reported its reasons already
8148 printf("\ninfo.compCompHnd->canTailCall returned false for call ");
8157 // If this assert fires it means that canTailCall was set to false without setting a reason!
8158 assert(szCanTailCallFailReason != nullptr);
8163 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
8165 printf(": %s\n", szCanTailCallFailReason);
8168 info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
8169 szCanTailCallFailReason);
8173 // Note: we assume that small return types are already normalized by the managed callee
8174 // or by the pinvoke stub for calls to unmanaged code.
8176 if (!bIntrinsicImported)
8179 // Things needed to be checked when bIntrinsicImported is false.
8182 assert(call->gtOper == GT_CALL);
8183 assert(sig != nullptr);
8185 // Tail calls require us to save the call site's sig info so we can obtain an argument
8186 // copying thunk from the EE later on.
8187 if (call->gtCall.callSig == nullptr)
8189 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
8190 *call->gtCall.callSig = *sig;
8193 if (compIsForInlining() && opcode == CEE_CALLVIRT)
8195 GenTree* callObj = call->gtCall.gtCallObjp;
8196 assert(callObj != nullptr);
8198 if ((call->gtCall.IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
8199 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
8200 impInlineInfo->inlArgInfo))
8202 impInlineInfo->thisDereferencedFirst = true;
8206 #if defined(DEBUG) || defined(INLINE_DATA)
8208 // Keep track of the raw IL offset of the call
8209 call->gtCall.gtRawILOffset = rawILOffset;
8211 #endif // defined(DEBUG) || defined(INLINE_DATA)
8213 // Is it an inline candidate?
8214 impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
8218 // Push or append the result of the call
8219 if (callRetTyp == TYP_VOID)
8221 if (opcode == CEE_NEWOBJ)
8223 // we actually did push something, so don't spill the thing we just pushed.
8224 assert(verCurrentState.esStackDepth > 0);
8225 impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
8229 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8234 impSpillSpecialSideEff();
8236 if (clsFlags & CORINFO_FLG_ARRAY)
8238 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
8241 // Find the return type used for verification by interpreting the method signature.
8242 // NB: we are clobbering the already established sig.
8243 if (tiVerificationNeeded)
8245 // Actually, we never get the sig for the original method.
8246 sig = &(callInfo->verSig);
8249 typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
8250 tiRetVal.NormaliseForStack();
8252 // The CEE_READONLY prefix modifies the verification semantics of an Address
8253 // operation on an array type.
8254 if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
8256 tiRetVal.SetIsReadonlyByRef();
8259 if (tiVerificationNeeded)
8261 // We assume all calls return permanent home byrefs. If they
8262 // didn't they wouldn't be verifiable. This is also covering
8263 // the Address() helper for multidimensional arrays.
8264 if (tiRetVal.IsByRef())
8266 tiRetVal.SetIsPermanentHomeByRef();
8272 // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
8274 bool fatPointerCandidate = call->AsCall()->IsFatPointerCandidate();
8275 if (varTypeIsStruct(callRetTyp))
8277 call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass);
8280 if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
8282 assert(opts.OptEnabled(CLFLG_INLINING));
8283 assert(!fatPointerCandidate); // We should not try to inline calli.
8285 // Make the call its own tree (spill the stack if needed).
8286 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8288 // TODO: Still using the widened type.
8289 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
8293 if (fatPointerCandidate)
8295 // fatPointer candidates should be in statements of the form call() or var = call().
8296 // Such form allows to find statements with fat calls without walking through whole trees
8297 // and removes problems with cutting trees.
8298 assert(!bIntrinsicImported);
8299 assert(IsTargetAbi(CORINFO_CORERT_ABI));
8300 if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
8302 unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli"));
8303 LclVarDsc* varDsc = &lvaTable[calliSlot];
8304 varDsc->lvVerTypeInfo = tiRetVal;
8305 impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE);
8306 // impAssignTempGen can change src arg list and return type for call that returns struct.
8307 var_types type = genActualType(lvaTable[calliSlot].TypeGet());
8308 call = gtNewLclvNode(calliSlot, type);
8312 // For non-candidates we must also spill, since we
8313 // might have locals live on the eval stack that this
8316 // Suppress this for certain well-known call targets
8317 // that we know won't modify locals, eg calls that are
8318 // recognized in gtCanOptimizeTypeEquality. Otherwise
8319 // we may break key fragile pattern matches later on.
8320 bool spillStack = true;
8323 GenTreeCall* callNode = call->AsCall();
8324 if ((callNode->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHelper(callNode))
8328 else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
8336 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
8341 if (!bIntrinsicImported)
8343 //-------------------------------------------------------------------------
8345 /* If the call is of a small type and the callee is managed, the callee will normalize the result
8347 However, we need to normalize small type values returned by unmanaged
8348 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
8349 if we use the shorter inlined pinvoke stub. */
8351 if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
8353 call = gtNewCastNode(genActualType(callRetTyp), call, false, callRetTyp);
8357 impPushOnStack(call, tiRetVal);
8360 // VSD functions get a new call target each time we getCallInfo, so clear the cache.
8361 // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
8362 // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
8363 // callInfoCache.uncacheCallInfo();
8368 #pragma warning(pop)
8371 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
8373 CorInfoType corType = methInfo->args.retType;
8375 if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
8377 // We have some kind of STRUCT being returned
8379 structPassingKind howToReturnStruct = SPK_Unknown;
8381 var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
8383 if (howToReturnStruct == SPK_ByReference)
8394 var_types Compiler::impImportJitTestLabelMark(int numArgs)
8396 TestLabelAndNum tlAndN;
8400 StackEntry se = impPopStack();
8401 assert(se.seTypeInfo.GetType() == TI_INT);
8402 GenTree* val = se.val;
8403 assert(val->IsCnsIntOrI());
8404 tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8406 else if (numArgs == 3)
8408 StackEntry se = impPopStack();
8409 assert(se.seTypeInfo.GetType() == TI_INT);
8410 GenTree* val = se.val;
8411 assert(val->IsCnsIntOrI());
8412 tlAndN.m_num = val->AsIntConCommon()->IconValue();
8414 assert(se.seTypeInfo.GetType() == TI_INT);
8416 assert(val->IsCnsIntOrI());
8417 tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8424 StackEntry expSe = impPopStack();
8425 GenTree* node = expSe.val;
8427 // There are a small number of special cases, where we actually put the annotation on a subnode.
8428 if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
8430 // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
8431 // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
8432 // offset within the the static field block whose address is returned by the helper call.
8433 // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
8434 GenTree* helperCall = nullptr;
8435 assert(node->OperGet() == GT_IND);
8436 tlAndN.m_num -= 100;
8437 GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
8438 GetNodeTestData()->Remove(node);
8442 GetNodeTestData()->Set(node, tlAndN);
8445 impPushOnStack(node, expSe.seTypeInfo);
8446 return node->TypeGet();
8450 //-----------------------------------------------------------------------------------
8451 // impFixupCallStructReturn: For a call node that returns a struct type either
8452 // adjust the return type to an enregisterable type, or set the flag to indicate
8453 // struct return via retbuf arg.
8456 // call - GT_CALL GenTree node
8457 // retClsHnd - Class handle of return type of the call
8460 // Returns new GenTree node after fixing struct return of call node
8462 GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd)
8464 if (!varTypeIsStruct(call))
8469 call->gtRetClsHnd = retClsHnd;
8471 #if FEATURE_MULTIREG_RET
8472 // Initialize Return type descriptor of call node
8473 ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
8474 retTypeDesc->InitializeStructReturnType(this, retClsHnd);
8475 #endif // FEATURE_MULTIREG_RET
8477 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
8479 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
8480 assert(!call->IsVarargs() && "varargs not allowed for System V OSs.");
8482 // The return type will remain as the incoming struct type unless normalized to a
8483 // single eightbyte return type below.
8484 call->gtReturnType = call->gtType;
8486 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8487 if (retRegCount != 0)
8489 if (retRegCount == 1)
8491 // struct returned in a single register
8492 call->gtReturnType = retTypeDesc->GetReturnRegType(0);
8496 // must be a struct returned in two registers
8497 assert(retRegCount == 2);
8499 if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8501 // Force a call returning multi-reg struct to be always of the IR form
8504 // No need to assign a multi-reg struct to a local var if:
8505 // - It is a tail call or
8506 // - The call is marked for in-lining later
8507 return impAssignMultiRegTypeToVar(call, retClsHnd);
8513 // struct not returned in registers i.e returned via hiddden retbuf arg.
8514 call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8517 #else // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8519 // Check for TYP_STRUCT type that wraps a primitive type
8520 // Such structs are returned using a single register
8521 // and we change the return type on those calls here.
8523 structPassingKind howToReturnStruct;
8524 var_types returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
8526 if (howToReturnStruct == SPK_ByReference)
8528 assert(returnType == TYP_UNKNOWN);
8529 call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8533 assert(returnType != TYP_UNKNOWN);
8534 call->gtReturnType = returnType;
8536 // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
8537 if ((returnType == TYP_LONG) && (compLongUsed == false))
8539 compLongUsed = true;
8541 else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
8543 compFloatingPointUsed = true;
8546 #if FEATURE_MULTIREG_RET
8547 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8548 assert(retRegCount != 0);
8550 if (retRegCount >= 2)
8552 if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8554 // Force a call returning multi-reg struct to be always of the IR form
8557 // No need to assign a multi-reg struct to a local var if:
8558 // - It is a tail call or
8559 // - The call is marked for in-lining later
8560 return impAssignMultiRegTypeToVar(call, retClsHnd);
8563 #endif // FEATURE_MULTIREG_RET
8566 #endif // not FEATURE_UNIX_AMD64_STRUCT_PASSING
8571 /*****************************************************************************
8572 For struct return values, re-type the operand in the case where the ABI
8573 does not use a struct return buffer
8574 Note that this method is only call for !_TARGET_X86_
8577 GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd)
8579 assert(varTypeIsStruct(info.compRetType));
8580 assert(info.compRetBuffArg == BAD_VAR_NUM);
8582 #if defined(_TARGET_XARCH_)
8584 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
8585 // No VarArgs for CoreCLR on x64 Unix
8586 assert(!info.compIsVarArgs);
8588 // Is method returning a multi-reg struct?
8589 if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
8591 // In case of multi-reg struct return, we force IR to be one of the following:
8592 // GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a
8593 // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
8595 if (op->gtOper == GT_LCL_VAR)
8597 // Make sure that this struct stays in memory and doesn't get promoted.
8598 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8599 lvaTable[lclNum].lvIsMultiRegRet = true;
8601 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8602 op->gtFlags |= GTF_DONT_CSE;
8607 if (op->gtOper == GT_CALL)
8612 return impAssignMultiRegTypeToVar(op, retClsHnd);
8614 #else // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8615 assert(info.compRetNativeType != TYP_STRUCT);
8616 #endif // !FEATURE_UNIX_AMD64_STRUCT_PASSING
8618 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
8620 if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
8622 if (op->gtOper == GT_LCL_VAR)
8624 // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
8625 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8626 // Make sure this struct type stays as struct so that we can return it as an HFA
8627 lvaTable[lclNum].lvIsMultiRegRet = true;
8629 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8630 op->gtFlags |= GTF_DONT_CSE;
8635 if (op->gtOper == GT_CALL)
8637 if (op->gtCall.IsVarargs())
8639 // We cannot tail call because control needs to return to fixup the calling
8640 // convention for result return.
8641 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8642 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8649 return impAssignMultiRegTypeToVar(op, retClsHnd);
8652 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
8654 // Is method returning a multi-reg struct?
8655 if (IsMultiRegReturnedType(retClsHnd))
8657 if (op->gtOper == GT_LCL_VAR)
8659 // This LCL_VAR stays as a TYP_STRUCT
8660 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
8662 // Make sure this struct type is not struct promoted
8663 lvaTable[lclNum].lvIsMultiRegRet = true;
8665 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
8666 op->gtFlags |= GTF_DONT_CSE;
8671 if (op->gtOper == GT_CALL)
8673 if (op->gtCall.IsVarargs())
8675 // We cannot tail call because control needs to return to fixup the calling
8676 // convention for result return.
8677 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
8678 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
8685 return impAssignMultiRegTypeToVar(op, retClsHnd);
8688 #endif // FEATURE_MULTIREG_RET && FEATURE_HFA
8691 // adjust the type away from struct to integral
8692 // and no normalizing
8693 if (op->gtOper == GT_LCL_VAR)
8695 op->ChangeOper(GT_LCL_FLD);
8697 else if (op->gtOper == GT_OBJ)
8699 GenTree* op1 = op->AsObj()->Addr();
8701 // We will fold away OBJ/ADDR
8702 // except for OBJ/ADDR/INDEX
8703 // as the array type influences the array element's offset
8704 // Later in this method we change op->gtType to info.compRetNativeType
8705 // This is not correct when op is a GT_INDEX as the starting offset
8706 // for the array elements 'elemOffs' is different for an array of
8707 // TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
8708 // Also refer to the GTF_INX_REFARR_LAYOUT flag
8710 if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
8712 // Change '*(&X)' to 'X' and see if we can do better
8713 op = op1->gtOp.gtOp1;
8714 goto REDO_RETURN_NODE;
8716 op->gtObj.gtClass = NO_CLASS_HANDLE;
8717 op->ChangeOperUnchecked(GT_IND);
8718 op->gtFlags |= GTF_IND_TGTANYWHERE;
8720 else if (op->gtOper == GT_CALL)
8722 if (op->AsCall()->TreatAsHasRetBufArg(this))
8724 // This must be one of those 'special' helpers that don't
8725 // really have a return buffer, but instead use it as a way
8726 // to keep the trees cleaner with fewer address-taken temps.
8728 // Well now we have to materialize the the return buffer as
8729 // an address-taken temp. Then we can return the temp.
8731 // NOTE: this code assumes that since the call directly
8732 // feeds the return, then the call must be returning the
8733 // same structure/class/type.
8735 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
8737 // No need to spill anything as we're about to return.
8738 impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
8740 // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
8741 // jump directly to a GT_LCL_FLD.
8742 op = gtNewLclvNode(tmpNum, info.compRetNativeType);
8743 op->ChangeOper(GT_LCL_FLD);
8747 assert(info.compRetNativeType == op->gtCall.gtReturnType);
8749 // Don't change the gtType of the node just yet, it will get changed later.
8753 #if defined(FEATURE_HW_INTRINSICS) && defined(_TARGET_ARM64_)
8754 else if ((op->gtOper == GT_HWIntrinsic) && varTypeIsSIMD(op->gtType))
8756 // TODO-ARM64-FIXME Implement ARM64 ABI for Short Vectors properly
8757 // assert(op->gtType == info.compRetNativeType)
8758 if (op->gtType != info.compRetNativeType)
8760 // Insert a register move to keep target type of SIMD intrinsic intact
8761 op = gtNewScalarHWIntrinsicNode(info.compRetNativeType, op, NI_ARM64_NONE_MOV);
8765 else if (op->gtOper == GT_COMMA)
8767 op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
8770 op->gtType = info.compRetNativeType;
8775 /*****************************************************************************
8776 CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
8777 finally-protected try. We find the finally blocks protecting the current
8778 offset (in order) by walking over the complete exception table and
8779 finding enclosing clauses. This assumes that the table is sorted.
8780 This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
8782 If we are leaving a catch handler, we need to attach the
8783 CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
8785 After this function, the BBJ_LEAVE block has been converted to a different type.
8788 #if !FEATURE_EH_FUNCLETS
8790 void Compiler::impImportLeave(BasicBlock* block)
8795 printf("\nBefore import CEE_LEAVE:\n");
8796 fgDispBasicBlocks();
8801 bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
8802 unsigned blkAddr = block->bbCodeOffs;
8803 BasicBlock* leaveTarget = block->bbJumpDest;
8804 unsigned jmpAddr = leaveTarget->bbCodeOffs;
8806 // LEAVE clears the stack, spill side effects, and set stack to 0
8808 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
8809 verCurrentState.esStackDepth = 0;
8811 assert(block->bbJumpKind == BBJ_LEAVE);
8812 assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
8814 BasicBlock* step = DUMMY_INIT(NULL);
8815 unsigned encFinallies = 0; // Number of enclosing finallies.
8816 GenTree* endCatches = NULL;
8817 GenTree* endLFin = NULL; // The statement tree to indicate the end of locally-invoked finally.
8822 for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
8824 // Grab the handler offsets
8826 IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
8827 IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
8828 IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
8829 IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
8831 /* Is this a catch-handler we are CEE_LEAVEing out of?
8832 * If so, we need to call CORINFO_HELP_ENDCATCH.
8835 if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
8837 // Can't CEE_LEAVE out of a finally/fault handler
8838 if (HBtab->HasFinallyOrFaultHandler())
8839 BADCODE("leave out of fault/finally block");
8841 // Create the call to CORINFO_HELP_ENDCATCH
8842 GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
8844 // Make a list of all the currently pending endCatches
8846 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
8848 endCatches = endCatch;
8853 printf("impImportLeave - BB%02u jumping out of catch handler EH#%u, adding call to "
8854 "CORINFO_HELP_ENDCATCH\n",
8855 block->bbNum, XTnum);
8859 else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
8860 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
8862 /* This is a finally-protected try we are jumping out of */
8864 /* If there are any pending endCatches, and we have already
8865 jumped out of a finally-protected try, then the endCatches
8866 have to be put in a block in an outer try for async
8867 exceptions to work correctly.
8868 Else, just use append to the original block */
8870 BasicBlock* callBlock;
8872 assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
8874 if (encFinallies == 0)
8876 assert(step == DUMMY_INIT(NULL));
8878 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
8881 impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8886 printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
8888 callBlock->dspToString());
8894 assert(step != DUMMY_INIT(NULL));
8896 /* Calling the finally block */
8897 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
8898 assert(step->bbJumpKind == BBJ_ALWAYS);
8899 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
8900 // finally in the chain)
8901 step->bbJumpDest->bbRefs++;
8903 /* The new block will inherit this block's weight */
8904 callBlock->setBBWeight(block->bbWeight);
8905 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
8910 printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n",
8911 callBlock->dspToString());
8919 lastStmt = gtNewStmt(endCatches);
8920 endLFin->gtNext = lastStmt;
8921 lastStmt->gtPrev = endLFin;
8928 // note that this sets BBF_IMPORTED on the block
8929 impEndTreeList(callBlock, endLFin, lastStmt);
8932 step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
8933 /* The new block will inherit this block's weight */
8934 step->setBBWeight(block->bbWeight);
8935 step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
8940 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n",
8941 step->dspToString());
8945 unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
8946 assert(finallyNesting <= compHndBBtabCount);
8948 callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
8949 endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
8950 endLFin = gtNewStmt(endLFin);
8955 invalidatePreds = true;
8959 /* Append any remaining endCatches, if any */
8961 assert(!encFinallies == !endLFin);
8963 if (encFinallies == 0)
8965 assert(step == DUMMY_INIT(NULL));
8966 block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
8969 impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
8974 printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
8976 block->dspToString());
8982 // If leaveTarget is the start of another try block, we want to make sure that
8983 // we do not insert finalStep into that try block. Hence, we find the enclosing
8985 unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
8987 // Insert a new BB either in the try region indicated by tryIndex or
8988 // the handler region indicated by leaveTarget->bbHndIndex,
8989 // depending on which is the inner region.
8990 BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
8991 finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
8992 step->bbJumpDest = finalStep;
8994 /* The new block will inherit this block's weight */
8995 finalStep->setBBWeight(block->bbWeight);
8996 finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
9001 printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies,
9002 finalStep->dspToString());
9010 lastStmt = gtNewStmt(endCatches);
9011 endLFin->gtNext = lastStmt;
9012 lastStmt->gtPrev = endLFin;
9019 impEndTreeList(finalStep, endLFin, lastStmt);
9021 finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9023 // Queue up the jump target for importing
9025 impImportBlockPending(leaveTarget);
9027 invalidatePreds = true;
9030 if (invalidatePreds && fgComputePredsDone)
9032 JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9037 fgVerifyHandlerTab();
9041 printf("\nAfter import CEE_LEAVE:\n");
9042 fgDispBasicBlocks();
9048 #else // FEATURE_EH_FUNCLETS
9050 void Compiler::impImportLeave(BasicBlock* block)
9055 printf("\nBefore import CEE_LEAVE in BB%02u (targetting BB%02u):\n", block->bbNum, block->bbJumpDest->bbNum);
9056 fgDispBasicBlocks();
9061 bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
9062 unsigned blkAddr = block->bbCodeOffs;
9063 BasicBlock* leaveTarget = block->bbJumpDest;
9064 unsigned jmpAddr = leaveTarget->bbCodeOffs;
9066 // LEAVE clears the stack, spill side effects, and set stack to 0
9068 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
9069 verCurrentState.esStackDepth = 0;
9071 assert(block->bbJumpKind == BBJ_LEAVE);
9072 assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
9074 BasicBlock* step = nullptr;
9078 // No step type; step == NULL.
9081 // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
9082 // That is, is step->bbJumpDest where a finally will return to?
9085 // The step block is a catch return.
9088 // The step block is in a "try", created as the target for a finally return or the target for a catch return.
9091 StepType stepType = ST_None;
9096 for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
9098 // Grab the handler offsets
9100 IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
9101 IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
9102 IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
9103 IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
9105 /* Is this a catch-handler we are CEE_LEAVEing out of?
9108 if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
9110 // Can't CEE_LEAVE out of a finally/fault handler
9111 if (HBtab->HasFinallyOrFaultHandler())
9113 BADCODE("leave out of fault/finally block");
9116 /* We are jumping out of a catch */
9118 if (step == nullptr)
9121 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
9122 stepType = ST_Catch;
9127 printf("impImportLeave - jumping out of a catch (EH#%u), convert block BB%02u to BBJ_EHCATCHRET "
9129 XTnum, step->bbNum);
9135 BasicBlock* exitBlock;
9137 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
9139 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
9141 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9142 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
9143 // exit) returns to this block
9144 step->bbJumpDest->bbRefs++;
9146 #if defined(_TARGET_ARM_)
9147 if (stepType == ST_FinallyReturn)
9149 assert(step->bbJumpKind == BBJ_ALWAYS);
9150 // Mark the target of a finally return
9151 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9153 #endif // defined(_TARGET_ARM_)
9155 /* The new block will inherit this block's weight */
9156 exitBlock->setBBWeight(block->bbWeight);
9157 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9159 /* This exit block is the new step */
9161 stepType = ST_Catch;
9163 invalidatePreds = true;
9168 printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block BB%02u\n", XTnum,
9174 else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9175 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9177 /* We are jumping out of a finally-protected try */
9179 BasicBlock* callBlock;
9181 if (step == nullptr)
9183 #if FEATURE_EH_CALLFINALLY_THUNKS
9185 // Put the call to the finally in the enclosing region.
9186 unsigned callFinallyTryIndex =
9187 (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9188 unsigned callFinallyHndIndex =
9189 (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9190 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
9192 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
9193 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
9194 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
9195 // next block, and flow optimizations will remove it.
9196 block->bbJumpKind = BBJ_ALWAYS;
9197 block->bbJumpDest = callBlock;
9198 block->bbJumpDest->bbRefs++;
9200 /* The new block will inherit this block's weight */
9201 callBlock->setBBWeight(block->bbWeight);
9202 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9207 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
9208 "BBJ_ALWAYS, add BBJ_CALLFINALLY block BB%02u\n",
9209 XTnum, block->bbNum, callBlock->bbNum);
9213 #else // !FEATURE_EH_CALLFINALLY_THUNKS
9216 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
9221 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block BB%02u to "
9222 "BBJ_CALLFINALLY block\n",
9223 XTnum, callBlock->bbNum);
9227 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9231 // Calling the finally block. We already have a step block that is either the call-to-finally from a
9232 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
9233 // a 'finally'), or the step block is the return from a catch.
9235 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
9236 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
9237 // automatically re-raise the exception, using the return address of the catch (that is, the target
9238 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
9239 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
9240 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
9241 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
9242 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
9243 // within the 'try' region protected by the finally, since we generate code in such a way that execution
9244 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
9247 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9249 #if FEATURE_EH_CALLFINALLY_THUNKS
9250 if (step->bbJumpKind == BBJ_EHCATCHRET)
9252 // Need to create another step block in the 'try' region that will actually branch to the
9253 // call-to-finally thunk.
9254 BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9255 step->bbJumpDest = step2;
9256 step->bbJumpDest->bbRefs++;
9257 step2->setBBWeight(block->bbWeight);
9258 step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9263 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
9264 "BBJ_EHCATCHRET (BB%02u), new BBJ_ALWAYS step-step block BB%02u\n",
9265 XTnum, step->bbNum, step2->bbNum);
9270 assert(stepType == ST_Catch); // Leave it as catch type for now.
9272 #endif // FEATURE_EH_CALLFINALLY_THUNKS
9274 #if FEATURE_EH_CALLFINALLY_THUNKS
9275 unsigned callFinallyTryIndex =
9276 (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9277 unsigned callFinallyHndIndex =
9278 (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9279 #else // !FEATURE_EH_CALLFINALLY_THUNKS
9280 unsigned callFinallyTryIndex = XTnum + 1;
9281 unsigned callFinallyHndIndex = 0; // don't care
9282 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9284 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
9285 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
9286 // finally in the chain)
9287 step->bbJumpDest->bbRefs++;
9289 #if defined(_TARGET_ARM_)
9290 if (stepType == ST_FinallyReturn)
9292 assert(step->bbJumpKind == BBJ_ALWAYS);
9293 // Mark the target of a finally return
9294 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9296 #endif // defined(_TARGET_ARM_)
9298 /* The new block will inherit this block's weight */
9299 callBlock->setBBWeight(block->bbWeight);
9300 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9305 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY block "
9307 XTnum, callBlock->bbNum);
9312 step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
9313 stepType = ST_FinallyReturn;
9315 /* The new block will inherit this block's weight */
9316 step->setBBWeight(block->bbWeight);
9317 step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
9322 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
9324 XTnum, step->bbNum);
9328 callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
9330 invalidatePreds = true;
9332 else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9333 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9335 // We are jumping out of a catch-protected try.
9337 // If we are returning from a call to a finally, then we must have a step block within a try
9338 // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
9339 // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
9340 // and invoke the appropriate catch.
9342 // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
9343 // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
9344 // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
9345 // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
9346 // address of the catch return as the new exception address. That is, the re-raised exception appears to
9347 // occur at the catch return address. If this exception return address skips an enclosing try/catch that
9348 // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
9353 // // something here raises ThreadAbortException
9354 // LEAVE LABEL_1; // no need to stop at LABEL_2
9355 // } catch (Exception) {
9356 // // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
9357 // // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
9358 // // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
9359 // // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
9360 // // need to do this transformation if the current EH block is a try/catch that catches
9361 // // ThreadAbortException (or one of its parents), however we might not be able to find that
9362 // // information, so currently we do it for all catch types.
9363 // LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
9365 // LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
9366 // } catch (ThreadAbortException) {
9370 // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
9373 if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
9375 BasicBlock* catchStep;
9379 if (stepType == ST_FinallyReturn)
9381 assert(step->bbJumpKind == BBJ_ALWAYS);
9385 assert(stepType == ST_Catch);
9386 assert(step->bbJumpKind == BBJ_EHCATCHRET);
9389 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
9390 catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9391 step->bbJumpDest = catchStep;
9392 step->bbJumpDest->bbRefs++;
9394 #if defined(_TARGET_ARM_)
9395 if (stepType == ST_FinallyReturn)
9397 // Mark the target of a finally return
9398 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9400 #endif // defined(_TARGET_ARM_)
9402 /* The new block will inherit this block's weight */
9403 catchStep->setBBWeight(block->bbWeight);
9404 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9409 if (stepType == ST_FinallyReturn)
9411 printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
9412 "BBJ_ALWAYS block BB%02u\n",
9413 XTnum, catchStep->bbNum);
9417 assert(stepType == ST_Catch);
9418 printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
9419 "BBJ_ALWAYS block BB%02u\n",
9420 XTnum, catchStep->bbNum);
9425 /* This block is the new step */
9429 invalidatePreds = true;
9434 if (step == nullptr)
9436 block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
9441 printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
9442 "block BB%02u to BBJ_ALWAYS\n",
9449 step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9451 #if defined(_TARGET_ARM_)
9452 if (stepType == ST_FinallyReturn)
9454 assert(step->bbJumpKind == BBJ_ALWAYS);
9455 // Mark the target of a finally return
9456 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9458 #endif // defined(_TARGET_ARM_)
9463 printf("impImportLeave - final destination of step blocks set to BB%02u\n", leaveTarget->bbNum);
9467 // Queue up the jump target for importing
9469 impImportBlockPending(leaveTarget);
9472 if (invalidatePreds && fgComputePredsDone)
9474 JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9479 fgVerifyHandlerTab();
9483 printf("\nAfter import CEE_LEAVE:\n");
9484 fgDispBasicBlocks();
9490 #endif // FEATURE_EH_FUNCLETS
9492 /*****************************************************************************/
9493 // This is called when reimporting a leave block. It resets the JumpKind,
9494 // JumpDest, and bbNext to the original values
9496 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
9498 #if FEATURE_EH_FUNCLETS
9499 // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
9500 // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0,
9501 // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
9502 // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
9503 // only predecessor are also considered orphans and attempted to be deleted.
9510 // leave OUTSIDE; // B0 is the block containing this leave, following this would be B1
9515 // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
9516 // where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block.
9517 // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To
9518 // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
9519 // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
9520 // will be treated as pair and handled correctly.
9521 if (block->bbJumpKind == BBJ_CALLFINALLY)
9523 BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
9524 dupBlock->bbFlags = block->bbFlags;
9525 dupBlock->bbJumpDest = block->bbJumpDest;
9526 dupBlock->copyEHRegion(block);
9527 dupBlock->bbCatchTyp = block->bbCatchTyp;
9529 // Mark this block as
9530 // a) not referenced by any other block to make sure that it gets deleted
9532 // c) prevent from being imported
9535 dupBlock->bbRefs = 0;
9536 dupBlock->bbWeight = 0;
9537 dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
9539 // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
9540 // will be next to each other.
9541 fgInsertBBafter(block, dupBlock);
9546 printf("New Basic Block BB%02u duplicate of BB%02u created.\n", dupBlock->bbNum, block->bbNum);
9550 #endif // FEATURE_EH_FUNCLETS
9552 block->bbJumpKind = BBJ_LEAVE;
9554 block->bbJumpDest = fgLookupBB(jmpAddr);
9556 // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
9557 // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
9558 // reason we don't want to remove the block at this point is that if we call
9559 // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
9560 // added and the linked list length will be different than fgBBcount.
9563 /*****************************************************************************/
9564 // Get the first non-prefix opcode. Used for verification of valid combinations
9565 // of prefixes and actual opcodes.
9567 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
9569 while (codeAddr < codeEndp)
9571 OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
9572 codeAddr += sizeof(__int8);
9574 if (opcode == CEE_PREFIX1)
9576 if (codeAddr >= codeEndp)
9580 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
9581 codeAddr += sizeof(__int8);
9589 case CEE_CONSTRAINED:
9596 codeAddr += opcodeSizes[opcode];
9602 /*****************************************************************************/
9603 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
9605 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
9607 OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
9610 // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
9611 ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
9612 (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
9613 (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
9614 // volatile. prefix is allowed with the ldsfld and stsfld
9615 (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
9617 BADCODE("Invalid opcode for unaligned. or volatile. prefix");
9621 /*****************************************************************************/
9625 #undef RETURN // undef contracts RETURN macro
9640 const static controlFlow_t controlFlow[] = {
9641 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
9642 #include "opcode.def"
9648 /*****************************************************************************
9649 * Determine the result type of an arithemetic operation
9650 * On 64-bit inserts upcasts when native int is mixed with int32
9652 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2)
9654 var_types type = TYP_UNDEF;
9655 GenTree* op1 = *pOp1;
9656 GenTree* op2 = *pOp2;
9658 // Arithemetic operations are generally only allowed with
9659 // primitive types, but certain operations are allowed
9662 if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9664 if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9666 // byref1-byref2 => gives a native int
9669 else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
9671 // [native] int - byref => gives a native int
9674 // The reason is that it is possible, in managed C++,
9675 // to have a tree like this:
9682 // const(h) int addr byref
9684 // <BUGNUM> VSW 318822 </BUGNUM>
9686 // So here we decide to make the resulting type to be a native int.
9687 CLANG_FORMAT_COMMENT_ANCHOR;
9689 #ifdef _TARGET_64BIT_
9690 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9692 // insert an explicit upcast
9693 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
9695 #endif // _TARGET_64BIT_
9701 // byref - [native] int => gives a byref
9702 assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
9704 #ifdef _TARGET_64BIT_
9705 if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
9707 // insert an explicit upcast
9708 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
9710 #endif // _TARGET_64BIT_
9715 else if ((oper == GT_ADD) &&
9716 (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
9718 // byref + [native] int => gives a byref
9720 // [native] int + byref => gives a byref
9722 // only one can be a byref : byref op byref not allowed
9723 assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
9724 assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
9726 #ifdef _TARGET_64BIT_
9727 if (genActualType(op2->TypeGet()) == TYP_BYREF)
9729 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9731 // insert an explicit upcast
9732 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
9735 else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9737 // insert an explicit upcast
9738 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
9740 #endif // _TARGET_64BIT_
9744 #ifdef _TARGET_64BIT_
9745 else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
9747 assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9749 // int + long => gives long
9750 // long + int => gives long
9751 // we get this because in the IL the long isn't Int64, it's just IntPtr
9753 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
9755 // insert an explicit upcast
9756 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
9758 else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
9760 // insert an explicit upcast
9761 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
9766 #else // 32-bit TARGET
9767 else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
9769 assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
9771 // int + long => gives long
9772 // long + int => gives long
9776 #endif // _TARGET_64BIT_
9779 // int + int => gives an int
9780 assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
9782 assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
9783 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
9785 type = genActualType(op1->gtType);
9787 #if FEATURE_X87_DOUBLES
9789 // For x87, since we only have 1 size of registers, prefer double
9790 // For everybody else, be more precise
9791 if (type == TYP_FLOAT)
9794 #else // !FEATURE_X87_DOUBLES
9796 // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
9797 // Otherwise, turn floats into doubles
9798 if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
9800 assert(genActualType(op2->gtType) == TYP_DOUBLE);
9804 #endif // FEATURE_X87_DOUBLES
9807 #if FEATURE_X87_DOUBLES
9808 assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_LONG || type == TYP_INT);
9809 #else // FEATURE_X87_DOUBLES
9810 assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
9811 #endif // FEATURE_X87_DOUBLES
9816 //------------------------------------------------------------------------
9817 // impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting
9820 // op1 - value to cast
9821 // pResolvedToken - resolved token for type to cast to
9822 // isCastClass - true if this is a castclass, false if isinst
9825 // tree representing optimized cast, or null if no optimization possible
9827 GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass)
9829 assert(op1->TypeGet() == TYP_REF);
9831 // Don't optimize for minopts or debug codegen.
9832 if (opts.compDbgCode || opts.MinOpts())
9837 // See what we know about the type of the object being cast.
9838 bool isExact = false;
9839 bool isNonNull = false;
9840 CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull);
9841 GenTree* optResult = nullptr;
9843 if (fromClass != nullptr)
9845 CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass;
9846 JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst",
9847 isExact ? "exact " : "", dspPtr(fromClass), info.compCompHnd->getClassName(fromClass), dspPtr(toClass),
9848 info.compCompHnd->getClassName(toClass));
9850 // Perhaps we know if the cast will succeed or fail.
9851 TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass);
9853 if (castResult == TypeCompareState::Must)
9855 // Cast will succeed, result is simply op1.
9856 JITDUMP("Cast will succeed, optimizing to simply return input\n");
9859 else if (castResult == TypeCompareState::MustNot)
9861 // See if we can sharpen exactness by looking for final classes
9864 DWORD flags = info.compCompHnd->getClassAttribs(fromClass);
9865 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL |
9866 CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY;
9867 isExact = ((flags & flagsMask) == CORINFO_FLG_FINAL);
9870 // Cast to exact type will fail. Handle case where we have
9871 // an exact type (that is, fromClass is not a subtype)
9872 // and we're not going to throw on failure.
9873 if (isExact && !isCastClass)
9875 JITDUMP("Cast will fail, optimizing to return null\n");
9876 GenTree* result = gtNewIconNode(0, TYP_REF);
9878 // If the cast was fed by a box, we can remove that too.
9879 if (op1->IsBoxedValue())
9881 JITDUMP("Also removing upstream box\n");
9882 gtTryRemoveBoxUpstreamEffects(op1);
9889 JITDUMP("Not optimizing failing castclass (yet)\n");
9893 JITDUMP("Can't optimize since fromClass is inexact\n");
9898 JITDUMP("Result of cast unknown, must generate runtime test\n");
9903 JITDUMP("\nCan't optimize since fromClass is unknown\n");
9909 //------------------------------------------------------------------------
9910 // impCastClassOrIsInstToTree: build and import castclass/isinst
9913 // op1 - value to cast
9914 // op2 - type handle for type to cast to
9915 // pResolvedToken - resolved token from the cast operation
9916 // isCastClass - true if this is castclass, false means isinst
9919 // Tree representing the cast
9922 // May expand into a series of runtime checks or a helper call.
9924 GenTree* Compiler::impCastClassOrIsInstToTree(GenTree* op1,
9926 CORINFO_RESOLVED_TOKEN* pResolvedToken,
9929 assert(op1->TypeGet() == TYP_REF);
9931 // Optimistically assume the jit should expand this as an inline test
9932 bool shouldExpandInline = true;
9934 // Profitability check.
9936 // Don't bother with inline expansion when jit is trying to
9937 // generate code quickly, or the cast is in code that won't run very
9938 // often, or the method already is pretty big.
9939 if (compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts())
9941 // not worth the code expansion if jitting fast or in a rarely run block
9942 shouldExpandInline = false;
9944 else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
9946 // not worth creating an untracked local variable
9947 shouldExpandInline = false;
9950 // Pessimistically assume the jit cannot expand this as an inline test
9951 bool canExpandInline = false;
9952 const CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
9956 // Not all classclass/isinst operations can be inline expanded.
9957 // Check legality only if an inline expansion is desirable.
9958 if (shouldExpandInline)
9962 // Jit can only inline expand the normal CHKCASTCLASS helper.
9963 canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
9967 if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
9969 // Check the class attributes.
9970 DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
9972 // If the class is final and is not marshal byref or
9973 // contextful, the jit can expand the IsInst check inline.
9974 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL;
9975 canExpandInline = ((flags & flagsMask) == CORINFO_FLG_FINAL);
9980 const bool expandInline = canExpandInline && shouldExpandInline;
9984 JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst",
9985 canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
9987 // If we CSE this class handle we prevent assertionProp from making SubType assertions
9988 // so instead we force the CSE logic to not consider CSE-ing this class handle.
9990 op2->gtFlags |= GTF_DONT_CSE;
9992 return gtNewHelperCallNode(helper, TYP_REF, gtNewArgList(op2, op1));
9995 JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst");
9997 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
10002 // expand the methodtable match:
10004 // condMT ==> GT_NE
10006 // GT_IND op2 (typically CNS_INT)
10011 // This can replace op1 with a GT_COMMA that evaluates op1 into a local
10013 op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
10015 // op1 is now known to be a non-complex tree
10016 // thus we can use gtClone(op1) from now on
10019 GenTree* op2Var = op2;
10022 op2Var = fgInsertCommaFormTemp(&op2);
10023 lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
10025 temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
10026 temp->gtFlags |= GTF_EXCEPT;
10027 condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
10031 // expand the null check:
10033 // condNull ==> GT_EQ
10038 condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
10041 // expand the true and false trees for the condMT
10043 GenTree* condFalse = gtClone(op1);
10048 // use the special helper that skips the cases checked by our inlined cast
10050 const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
10052 condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewArgList(op2Var, gtClone(op1)));
10056 condTrue = gtNewIconNode(0, TYP_REF);
10059 #define USE_QMARK_TREES
10061 #ifdef USE_QMARK_TREES
10064 // Generate first QMARK - COLON tree
10066 // qmarkMT ==> GT_QMARK
10070 // condFalse condTrue
10072 temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
10073 qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
10074 condMT->gtFlags |= GTF_RELOP_QMARK;
10076 GenTree* qmarkNull;
10078 // Generate second QMARK - COLON tree
10080 // qmarkNull ==> GT_QMARK
10082 // condNull GT_COLON
10086 temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
10087 qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
10088 qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
10089 condNull->gtFlags |= GTF_RELOP_QMARK;
10091 // Make QMark node a top level node by spilling it.
10092 unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
10093 impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
10095 // TODO: Is it possible op1 has a better type?
10096 lvaSetClass(tmp, pResolvedToken->hClass);
10097 return gtNewLclvNode(tmp, TYP_REF);
10102 #define assertImp(cond) ((void)0)
10104 #define assertImp(cond) \
10109 const int cchAssertImpBuf = 600; \
10110 char* assertImpBuf = (char*)alloca(cchAssertImpBuf); \
10111 _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \
10112 "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \
10113 impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \
10114 op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \
10115 assertAbort(assertImpBuf, __FILE__, __LINE__); \
10121 #pragma warning(push)
10122 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
10124 /*****************************************************************************
10125 * Import the instr for the given basic block
10127 void Compiler::impImportBlockCode(BasicBlock* block)
10129 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
10135 printf("\nImporting BB%02u (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
10139 unsigned nxtStmtIndex = impInitBlockLineInfo();
10140 IL_OFFSET nxtStmtOffs;
10142 GenTree* arrayNodeFrom;
10143 GenTree* arrayNodeTo;
10144 GenTree* arrayNodeToIndex;
10145 CorInfoHelpFunc helper;
10146 CorInfoIsAccessAllowedResult accessAllowedResult;
10147 CORINFO_HELPER_DESC calloutHelper;
10148 const BYTE* lastLoadToken = nullptr;
10150 // reject cyclic constraints
10151 if (tiVerificationNeeded)
10153 Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
10154 Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
10157 /* Get the tree list started */
10159 impBeginTreeList();
10161 /* Walk the opcodes that comprise the basic block */
10163 const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
10164 const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
10166 IL_OFFSET opcodeOffs = block->bbCodeOffs;
10167 IL_OFFSET lastSpillOffs = opcodeOffs;
10171 /* remember the start of the delegate creation sequence (used for verification) */
10172 const BYTE* delegateCreateStart = nullptr;
10174 int prefixFlags = 0;
10175 bool explicitTailCall, constraintCall, readonlyCall;
10179 unsigned numArgs = info.compArgsCount;
10181 /* Now process all the opcodes in the block */
10183 var_types callTyp = TYP_COUNT;
10184 OPCODE prevOpcode = CEE_ILLEGAL;
10186 if (block->bbCatchTyp)
10188 if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
10190 impCurStmtOffsSet(block->bbCodeOffs);
10193 // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
10194 // to a temp. This is a trade off for code simplicity
10195 impSpillSpecialSideEff();
10198 while (codeAddr < codeEndp)
10200 bool usingReadyToRunHelper = false;
10201 CORINFO_RESOLVED_TOKEN resolvedToken;
10202 CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
10203 CORINFO_CALL_INFO callInfo;
10204 CORINFO_FIELD_INFO fieldInfo;
10206 tiRetVal = typeInfo(); // Default type info
10208 //---------------------------------------------------------------------
10210 /* We need to restrict the max tree depth as many of the Compiler
10211 functions are recursive. We do this by spilling the stack */
10213 if (verCurrentState.esStackDepth)
10215 /* Has it been a while since we last saw a non-empty stack (which
10216 guarantees that the tree depth isnt accumulating. */
10218 if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode))
10220 impSpillStackEnsure();
10221 lastSpillOffs = opcodeOffs;
10226 lastSpillOffs = opcodeOffs;
10227 impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
10230 /* Compute the current instr offset */
10232 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10235 if (opts.compDbgInfo)
10238 if (!compIsForInlining())
10241 (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
10243 /* Have we reached the next stmt boundary ? */
10245 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
10247 assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
10249 if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
10251 /* We need to provide accurate IP-mapping at this point.
10252 So spill anything on the stack so that it will form
10253 gtStmts with the correct stmt offset noted */
10255 impSpillStackEnsure(true);
10258 // Has impCurStmtOffs been reported in any tree?
10260 if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
10262 GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
10263 impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10265 assert(impCurStmtOffs == BAD_IL_OFFSET);
10268 if (impCurStmtOffs == BAD_IL_OFFSET)
10270 /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
10271 If opcodeOffs has gone past nxtStmtIndex, catch up */
10273 while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
10274 info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
10279 /* Go to the new stmt */
10281 impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
10283 /* Update the stmt boundary index */
10286 assert(nxtStmtIndex <= info.compStmtOffsetsCount);
10288 /* Are there any more line# entries after this one? */
10290 if (nxtStmtIndex < info.compStmtOffsetsCount)
10292 /* Remember where the next line# starts */
10294 nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
10298 /* No more line# entries */
10300 nxtStmtOffs = BAD_IL_OFFSET;
10304 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
10305 (verCurrentState.esStackDepth == 0))
10307 /* At stack-empty locations, we have already added the tree to
10308 the stmt list with the last offset. We just need to update
10312 impCurStmtOffsSet(opcodeOffs);
10314 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
10315 impOpcodeIsCallSiteBoundary(prevOpcode))
10317 /* Make sure we have a type cached */
10318 assert(callTyp != TYP_COUNT);
10320 if (callTyp == TYP_VOID)
10322 impCurStmtOffsSet(opcodeOffs);
10324 else if (opts.compDbgCode)
10326 impSpillStackEnsure(true);
10327 impCurStmtOffsSet(opcodeOffs);
10330 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
10332 if (opts.compDbgCode)
10334 impSpillStackEnsure(true);
10337 impCurStmtOffsSet(opcodeOffs);
10340 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
10341 jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
10345 CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL);
10346 CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
10347 CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
10349 var_types lclTyp, ovflType = TYP_UNKNOWN;
10350 GenTree* op1 = DUMMY_INIT(NULL);
10351 GenTree* op2 = DUMMY_INIT(NULL);
10352 GenTreeArgList* args = nullptr; // What good do these "DUMMY_INIT"s do?
10353 GenTree* newObjThisPtr = DUMMY_INIT(NULL);
10354 bool uns = DUMMY_INIT(false);
10355 bool isLocal = false;
10357 /* Get the next opcode and the size of its parameters */
10359 OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
10360 codeAddr += sizeof(__int8);
10363 impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10364 JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
10369 // Return if any previous code has caused inline to fail.
10370 if (compDonotInline())
10375 /* Get the size of additional parameters */
10377 signed int sz = opcodeSizes[opcode];
10380 clsHnd = NO_CLASS_HANDLE;
10381 lclTyp = TYP_COUNT;
10382 callTyp = TYP_COUNT;
10384 impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10385 impCurOpcName = opcodeNames[opcode];
10387 if (verbose && (opcode != CEE_PREFIX1))
10389 printf("%s", impCurOpcName);
10392 /* Use assertImp() to display the opcode */
10394 op1 = op2 = nullptr;
10397 /* See what kind of an opcode we have, then */
10399 unsigned mflags = 0;
10400 unsigned clsFlags = 0;
10413 CORINFO_SIG_INFO sig;
10415 bool ovfl, unordered, callNode;
10417 CORINFO_CLASS_HANDLE tokenType;
10427 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
10428 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10429 codeAddr += sizeof(__int8);
10430 goto DECODE_OPCODE;
10434 // We need to call impSpillLclRefs() for a struct type lclVar.
10435 // This is done for non-block assignments in the handling of stloc.
10436 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
10437 (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
10439 impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
10442 /* Append 'op1' to the list of statements */
10443 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10448 /* Append 'op1' to the list of statements */
10450 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10456 // Remember at which BC offset the tree was finished
10457 impNoteLastILoffs();
10462 impPushNullObjRefOnStack();
10465 case CEE_LDC_I4_M1:
10475 cval.intVal = (opcode - CEE_LDC_I4_0);
10476 assert(-1 <= cval.intVal && cval.intVal <= 8);
10480 cval.intVal = getI1LittleEndian(codeAddr);
10483 cval.intVal = getI4LittleEndian(codeAddr);
10486 JITDUMP(" %d", cval.intVal);
10487 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
10491 cval.lngVal = getI8LittleEndian(codeAddr);
10492 JITDUMP(" 0x%016llx", cval.lngVal);
10493 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
10497 cval.dblVal = getR8LittleEndian(codeAddr);
10498 JITDUMP(" %#.17g", cval.dblVal);
10499 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
10503 cval.dblVal = getR4LittleEndian(codeAddr);
10504 JITDUMP(" %#.17g", cval.dblVal);
10506 GenTree* cnsOp = gtNewDconNode(cval.dblVal);
10507 #if !FEATURE_X87_DOUBLES
10508 // X87 stack doesn't differentiate between float/double
10509 // so R4 is treated as R8, but everybody else does
10510 cnsOp->gtType = TYP_FLOAT;
10511 #endif // FEATURE_X87_DOUBLES
10512 impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
10518 if (compIsForInlining())
10520 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
10522 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
10527 val = getU4LittleEndian(codeAddr);
10528 JITDUMP(" %08X", val);
10529 if (tiVerificationNeeded)
10531 Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
10532 tiRetVal = typeInfo(TI_REF, impGetStringClass());
10534 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
10539 lclNum = getU2LittleEndian(codeAddr);
10540 JITDUMP(" %u", lclNum);
10541 impLoadArg(lclNum, opcodeOffs + sz + 1);
10545 lclNum = getU1LittleEndian(codeAddr);
10546 JITDUMP(" %u", lclNum);
10547 impLoadArg(lclNum, opcodeOffs + sz + 1);
10554 lclNum = (opcode - CEE_LDARG_0);
10555 assert(lclNum >= 0 && lclNum < 4);
10556 impLoadArg(lclNum, opcodeOffs + sz + 1);
10560 lclNum = getU2LittleEndian(codeAddr);
10561 JITDUMP(" %u", lclNum);
10562 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10566 lclNum = getU1LittleEndian(codeAddr);
10567 JITDUMP(" %u", lclNum);
10568 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10575 lclNum = (opcode - CEE_LDLOC_0);
10576 assert(lclNum >= 0 && lclNum < 4);
10577 impLoadLoc(lclNum, opcodeOffs + sz + 1);
10581 lclNum = getU2LittleEndian(codeAddr);
10585 lclNum = getU1LittleEndian(codeAddr);
10587 JITDUMP(" %u", lclNum);
10589 if (tiVerificationNeeded)
10591 Verify(lclNum < info.compILargsCount, "bad arg num");
10594 if (compIsForInlining())
10596 op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10597 noway_assert(op1->gtOper == GT_LCL_VAR);
10598 lclNum = op1->AsLclVar()->gtLclNum;
10603 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10604 assertImp(lclNum < numArgs);
10606 if (lclNum == info.compThisArg)
10608 lclNum = lvaArg0Var;
10611 // We should have seen this arg write in the prescan
10612 assert(lvaTable[lclNum].lvHasILStoreOp);
10614 if (tiVerificationNeeded)
10616 typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
10617 Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
10620 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10622 Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
10629 lclNum = getU2LittleEndian(codeAddr);
10631 JITDUMP(" %u", lclNum);
10635 lclNum = getU1LittleEndian(codeAddr);
10637 JITDUMP(" %u", lclNum);
10645 lclNum = (opcode - CEE_STLOC_0);
10646 assert(lclNum >= 0 && lclNum < 4);
10649 if (tiVerificationNeeded)
10651 Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10652 Verify(tiCompatibleWith(impStackTop().seTypeInfo,
10653 NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
10657 if (compIsForInlining())
10659 lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10661 /* Have we allocated a temp for this local? */
10663 lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
10672 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
10674 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10680 /* if it is a struct assignment, make certain we don't overflow the buffer */
10681 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
10683 if (lvaTable[lclNum].lvNormalizeOnLoad())
10685 lclTyp = lvaGetRealType(lclNum);
10689 lclTyp = lvaGetActualType(lclNum);
10693 /* Pop the value being assigned */
10696 StackEntry se = impPopStack();
10697 clsHnd = se.seTypeInfo.GetClassHandle();
10699 tiRetVal = se.seTypeInfo;
10702 #ifdef FEATURE_SIMD
10703 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
10705 assert(op1->TypeGet() == TYP_STRUCT);
10706 op1->gtType = lclTyp;
10708 #endif // FEATURE_SIMD
10710 op1 = impImplicitIorI4Cast(op1, lclTyp);
10712 #ifdef _TARGET_64BIT_
10713 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
10714 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
10716 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
10717 op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT);
10719 #endif // _TARGET_64BIT_
10721 // We had better assign it a value of the correct type
10723 genActualType(lclTyp) == genActualType(op1->gtType) ||
10724 genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
10725 (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
10726 (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
10727 (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
10728 ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
10730 /* If op1 is "&var" then its type is the transient "*" and it can
10731 be used either as TYP_BYREF or TYP_I_IMPL */
10733 if (op1->IsVarAddr())
10735 assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
10737 /* When "&var" is created, we assume it is a byref. If it is
10738 being assigned to a TYP_I_IMPL var, change the type to
10739 prevent unnecessary GC info */
10741 if (genActualType(lclTyp) == TYP_I_IMPL)
10743 op1->gtType = TYP_I_IMPL;
10747 // If this is a local and the local is a ref type, see
10748 // if we can improve type information based on the
10749 // value being assigned.
10750 if (isLocal && (lclTyp == TYP_REF))
10752 // We should have seen a stloc in our IL prescan.
10753 assert(lvaTable[lclNum].lvHasILStoreOp);
10755 const bool isSingleILStoreLocal =
10756 !lvaTable[lclNum].lvHasMultipleILStoreOp && !lvaTable[lclNum].lvHasLdAddrOp;
10758 // Conservative check that there is just one
10759 // definition that reaches this store.
10760 const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0);
10762 if (isSingleILStoreLocal && hasSingleReachingDef)
10764 lvaUpdateClass(lclNum, op1, clsHnd);
10768 /* Filter out simple assignments to itself */
10770 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
10772 if (opts.compDbgCode)
10774 op1 = gtNewNothingNode();
10783 /* Create the assignment node */
10785 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
10787 /* If the local is aliased or pinned, we need to spill calls and
10788 indirections from the stack. */
10790 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp || lvaTable[lclNum].lvPinned) &&
10791 (verCurrentState.esStackDepth > 0))
10793 impSpillSideEffects(false,
10794 (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned"));
10797 /* Spill any refs to the local from the stack */
10799 impSpillLclRefs(lclNum);
10801 #if !FEATURE_X87_DOUBLES
10802 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
10803 // We insert a cast to the dest 'op2' type
10805 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
10806 varTypeIsFloating(op2->gtType))
10808 op1 = gtNewCastNode(op2->TypeGet(), op1, false, op2->TypeGet());
10810 #endif // !FEATURE_X87_DOUBLES
10812 if (varTypeIsStruct(lclTyp))
10814 op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
10818 // The code generator generates GC tracking information
10819 // based on the RHS of the assignment. Later the LHS (which is
10820 // is a BYREF) gets used and the emitter checks that that variable
10821 // is being tracked. It is not (since the RHS was an int and did
10822 // not need tracking). To keep this assert happy, we change the RHS
10823 if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
10825 op1->gtType = TYP_BYREF;
10827 op1 = gtNewAssignNode(op2, op1);
10833 lclNum = getU2LittleEndian(codeAddr);
10837 lclNum = getU1LittleEndian(codeAddr);
10839 JITDUMP(" %u", lclNum);
10840 if (tiVerificationNeeded)
10842 Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
10843 Verify(info.compInitMem, "initLocals not set");
10846 if (compIsForInlining())
10848 // Get the local type
10849 lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
10851 /* Have we allocated a temp for this local? */
10853 lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
10855 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
10861 assertImp(lclNum < info.compLocalsCount);
10865 lclNum = getU2LittleEndian(codeAddr);
10869 lclNum = getU1LittleEndian(codeAddr);
10871 JITDUMP(" %u", lclNum);
10872 Verify(lclNum < info.compILargsCount, "bad arg num");
10874 if (compIsForInlining())
10876 // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
10877 // followed by a ldfld to load the field.
10879 op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
10880 if (op1->gtOper != GT_LCL_VAR)
10882 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
10886 assert(op1->gtOper == GT_LCL_VAR);
10891 lclNum = compMapILargNum(lclNum); // account for possible hidden param
10892 assertImp(lclNum < numArgs);
10894 if (lclNum == info.compThisArg)
10896 lclNum = lvaArg0Var;
10903 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
10906 assert(op1->gtOper == GT_LCL_VAR);
10908 /* Note that this is supposed to create the transient type "*"
10909 which may be used as a TYP_I_IMPL. However we catch places
10910 where it is used as a TYP_I_IMPL and change the node if needed.
10911 Thus we are pessimistic and may report byrefs in the GC info
10912 where it was not absolutely needed, but it is safer this way.
10914 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10916 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
10917 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
10919 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
10920 if (tiVerificationNeeded)
10922 // Don't allow taking address of uninit this ptr.
10923 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
10925 Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
10928 if (!tiRetVal.IsByRef())
10930 tiRetVal.MakeByRef();
10934 Verify(false, "byref to byref");
10938 impPushOnStack(op1, tiRetVal);
10943 if (!info.compIsVarArgs)
10945 BADCODE("arglist in non-vararg method");
10948 if (tiVerificationNeeded)
10950 tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
10952 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
10954 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
10955 adjusted the arg count cos this is like fetching the last param */
10956 assertImp(0 < numArgs);
10957 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
10958 lclNum = lvaVarargsHandleArg;
10959 op1 = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
10960 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
10961 impPushOnStack(op1, tiRetVal);
10964 case CEE_ENDFINALLY:
10966 if (compIsForInlining())
10968 assert(!"Shouldn't have exception handlers in the inliner!");
10969 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
10973 if (verCurrentState.esStackDepth > 0)
10975 impEvalSideEffects();
10978 if (info.compXcptnsCount == 0)
10980 BADCODE("endfinally outside finally");
10983 assert(verCurrentState.esStackDepth == 0);
10985 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
10988 case CEE_ENDFILTER:
10990 if (compIsForInlining())
10992 assert(!"Shouldn't have exception handlers in the inliner!");
10993 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
10997 block->bbSetRunRarely(); // filters are rare
10999 if (info.compXcptnsCount == 0)
11001 BADCODE("endfilter outside filter");
11004 if (tiVerificationNeeded)
11006 Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
11009 op1 = impPopStack().val;
11010 assertImp(op1->gtType == TYP_INT);
11011 if (!bbInFilterILRange(block))
11013 BADCODE("EndFilter outside a filter handler");
11016 /* Mark current bb as end of filter */
11018 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
11019 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
11021 /* Mark catch handler as successor */
11023 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
11024 if (verCurrentState.esStackDepth != 0)
11026 verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
11027 DEBUGARG(__LINE__));
11032 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
11034 if (!impReturnInstruction(block, prefixFlags, opcode))
11045 assert(!compIsForInlining());
11047 if (tiVerificationNeeded)
11049 Verify(false, "Invalid opcode: CEE_JMP");
11052 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
11054 /* CEE_JMP does not make sense in some "protected" regions. */
11056 BADCODE("Jmp not allowed in protected region");
11059 if (verCurrentState.esStackDepth != 0)
11061 BADCODE("Stack must be empty after CEE_JMPs");
11064 _impResolveToken(CORINFO_TOKENKIND_Method);
11066 JITDUMP(" %08X", resolvedToken.token);
11068 /* The signature of the target has to be identical to ours.
11069 At least check that argCnt and returnType match */
11071 eeGetMethodSig(resolvedToken.hMethod, &sig);
11072 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
11073 sig.retType != info.compMethodInfo->args.retType ||
11074 sig.callConv != info.compMethodInfo->args.callConv)
11076 BADCODE("Incompatible target for CEE_JMPs");
11079 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
11081 /* Mark the basic block as being a JUMP instead of RETURN */
11083 block->bbFlags |= BBF_HAS_JMP;
11085 /* Set this flag to make sure register arguments have a location assigned
11086 * even if we don't use them inside the method */
11088 compJmpOpUsed = true;
11090 fgNoStructPromotion = true;
11095 assertImp(sz == sizeof(unsigned));
11097 _impResolveToken(CORINFO_TOKENKIND_Class);
11099 JITDUMP(" %08X", resolvedToken.token);
11101 ldelemClsHnd = resolvedToken.hClass;
11103 if (tiVerificationNeeded)
11105 typeInfo tiArray = impStackTop(1).seTypeInfo;
11106 typeInfo tiIndex = impStackTop().seTypeInfo;
11108 // As per ECMA 'index' specified can be either int32 or native int.
11109 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11111 typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
11112 Verify(tiArray.IsNullObjRef() ||
11113 typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
11116 tiRetVal = arrayElemType;
11117 tiRetVal.MakeByRef();
11118 if (prefixFlags & PREFIX_READONLY)
11120 tiRetVal.SetIsReadonlyByRef();
11123 // an array interior pointer is always in the heap
11124 tiRetVal.SetIsPermanentHomeByRef();
11127 // If it's a value class array we just do a simple address-of
11128 if (eeIsValueClass(ldelemClsHnd))
11130 CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
11131 if (cit == CORINFO_TYPE_UNDEF)
11133 lclTyp = TYP_STRUCT;
11137 lclTyp = JITtype2varType(cit);
11139 goto ARR_LD_POST_VERIFY;
11142 // Similarly, if its a readonly access, we can do a simple address-of
11143 // without doing a runtime type-check
11144 if (prefixFlags & PREFIX_READONLY)
11147 goto ARR_LD_POST_VERIFY;
11150 // Otherwise we need the full helper function with run-time type check
11151 op1 = impTokenToHandle(&resolvedToken);
11152 if (op1 == nullptr)
11153 { // compDonotInline()
11157 args = gtNewArgList(op1); // Type
11158 args = gtNewListNode(impPopStack().val, args); // index
11159 args = gtNewListNode(impPopStack().val, args); // array
11160 op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args);
11162 impPushOnStack(op1, tiRetVal);
11165 // ldelem for reference and value types
11167 assertImp(sz == sizeof(unsigned));
11169 _impResolveToken(CORINFO_TOKENKIND_Class);
11171 JITDUMP(" %08X", resolvedToken.token);
11173 ldelemClsHnd = resolvedToken.hClass;
11175 if (tiVerificationNeeded)
11177 typeInfo tiArray = impStackTop(1).seTypeInfo;
11178 typeInfo tiIndex = impStackTop().seTypeInfo;
11180 // As per ECMA 'index' specified can be either int32 or native int.
11181 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11182 tiRetVal = verMakeTypeInfo(ldelemClsHnd);
11184 Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
11185 "type of array incompatible with type operand");
11186 tiRetVal.NormaliseForStack();
11189 // If it's a reference type or generic variable type
11190 // then just generate code as though it's a ldelem.ref instruction
11191 if (!eeIsValueClass(ldelemClsHnd))
11194 opcode = CEE_LDELEM_REF;
11198 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
11199 lclTyp = JITtype2varType(jitTyp);
11200 tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
11201 tiRetVal.NormaliseForStack();
11203 goto ARR_LD_POST_VERIFY;
11205 case CEE_LDELEM_I1:
11208 case CEE_LDELEM_I2:
11209 lclTyp = TYP_SHORT;
11212 lclTyp = TYP_I_IMPL;
11215 // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
11216 // and treating it as TYP_INT avoids other asserts.
11217 case CEE_LDELEM_U4:
11221 case CEE_LDELEM_I4:
11224 case CEE_LDELEM_I8:
11227 case CEE_LDELEM_REF:
11230 case CEE_LDELEM_R4:
11231 lclTyp = TYP_FLOAT;
11233 case CEE_LDELEM_R8:
11234 lclTyp = TYP_DOUBLE;
11236 case CEE_LDELEM_U1:
11237 lclTyp = TYP_UBYTE;
11239 case CEE_LDELEM_U2:
11240 lclTyp = TYP_USHORT;
11245 if (tiVerificationNeeded)
11247 typeInfo tiArray = impStackTop(1).seTypeInfo;
11248 typeInfo tiIndex = impStackTop().seTypeInfo;
11250 // As per ECMA 'index' specified can be either int32 or native int.
11251 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11252 if (tiArray.IsNullObjRef())
11254 if (lclTyp == TYP_REF)
11255 { // we will say a deref of a null array yields a null ref
11256 tiRetVal = typeInfo(TI_NULL);
11260 tiRetVal = typeInfo(lclTyp);
11265 tiRetVal = verGetArrayElemType(tiArray);
11266 typeInfo arrayElemTi = typeInfo(lclTyp);
11267 #ifdef _TARGET_64BIT_
11268 if (opcode == CEE_LDELEM_I)
11270 arrayElemTi = typeInfo::nativeInt();
11273 if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
11275 Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
11278 #endif // _TARGET_64BIT_
11280 Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
11283 tiRetVal.NormaliseForStack();
11285 ARR_LD_POST_VERIFY:
11287 /* Pull the index value and array address */
11288 op2 = impPopStack().val;
11289 op1 = impPopStack().val;
11290 assertImp(op1->gtType == TYP_REF);
11292 /* Check for null pointer - in the inliner case we simply abort */
11294 if (compIsForInlining())
11296 if (op1->gtOper == GT_CNS_INT)
11298 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
11303 op1 = impCheckForNullPointer(op1);
11305 /* Mark the block as containing an index expression */
11307 if (op1->gtOper == GT_LCL_VAR)
11309 if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
11311 block->bbFlags |= BBF_HAS_IDX_LEN;
11312 optMethodFlags |= OMF_HAS_ARRAYREF;
11316 /* Create the index node and push it on the stack */
11318 op1 = gtNewIndexRef(lclTyp, op1, op2);
11320 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
11322 if ((opcode == CEE_LDELEMA) || ldstruct ||
11323 (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
11325 assert(ldelemClsHnd != DUMMY_INIT(NULL));
11327 // remember the element size
11328 if (lclTyp == TYP_REF)
11330 op1->gtIndex.gtIndElemSize = TARGET_POINTER_SIZE;
11334 // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
11335 if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
11337 op1->gtIndex.gtStructElemClass = ldelemClsHnd;
11339 assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
11340 if (lclTyp == TYP_STRUCT)
11342 size = info.compCompHnd->getClassSize(ldelemClsHnd);
11343 op1->gtIndex.gtIndElemSize = size;
11344 op1->gtType = lclTyp;
11348 if ((opcode == CEE_LDELEMA) || ldstruct)
11351 lclTyp = TYP_BYREF;
11353 op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
11357 assert(lclTyp != TYP_STRUCT);
11363 // Create an OBJ for the result
11364 op1 = gtNewObjNode(ldelemClsHnd, op1);
11365 op1->gtFlags |= GTF_EXCEPT;
11367 impPushOnStack(op1, tiRetVal);
11370 // stelem for reference and value types
11373 assertImp(sz == sizeof(unsigned));
11375 _impResolveToken(CORINFO_TOKENKIND_Class);
11377 JITDUMP(" %08X", resolvedToken.token);
11379 stelemClsHnd = resolvedToken.hClass;
11381 if (tiVerificationNeeded)
11383 typeInfo tiArray = impStackTop(2).seTypeInfo;
11384 typeInfo tiIndex = impStackTop(1).seTypeInfo;
11385 typeInfo tiValue = impStackTop().seTypeInfo;
11387 // As per ECMA 'index' specified can be either int32 or native int.
11388 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11389 typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
11391 Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
11392 "type operand incompatible with array element type");
11393 arrayElem.NormaliseForStack();
11394 Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
11397 // If it's a reference type just behave as though it's a stelem.ref instruction
11398 if (!eeIsValueClass(stelemClsHnd))
11400 goto STELEM_REF_POST_VERIFY;
11403 // Otherwise extract the type
11405 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
11406 lclTyp = JITtype2varType(jitTyp);
11407 goto ARR_ST_POST_VERIFY;
11410 case CEE_STELEM_REF:
11412 if (tiVerificationNeeded)
11414 typeInfo tiArray = impStackTop(2).seTypeInfo;
11415 typeInfo tiIndex = impStackTop(1).seTypeInfo;
11416 typeInfo tiValue = impStackTop().seTypeInfo;
11418 // As per ECMA 'index' specified can be either int32 or native int.
11419 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11420 Verify(tiValue.IsObjRef(), "bad value");
11422 // we only check that it is an object referece, The helper does additional checks
11423 Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
11426 STELEM_REF_POST_VERIFY:
11428 arrayNodeTo = impStackTop(2).val;
11429 arrayNodeToIndex = impStackTop(1).val;
11430 arrayNodeFrom = impStackTop().val;
11433 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
11434 // lot of cases because of covariance. ie. foo[] can be cast to object[].
11437 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
11438 // This does not need CORINFO_HELP_ARRADDR_ST
11439 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
11440 arrayNodeTo->gtOper == GT_LCL_VAR &&
11441 arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
11442 !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
11444 JITDUMP("\nstelem of ref from same array: skipping covariant store check\n");
11446 goto ARR_ST_POST_VERIFY;
11449 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
11450 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
11452 JITDUMP("\nstelem of null: skipping covariant store check\n");
11453 assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
11455 goto ARR_ST_POST_VERIFY;
11458 /* Call a helper function to do the assignment */
11459 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopList(3, nullptr));
11463 case CEE_STELEM_I1:
11466 case CEE_STELEM_I2:
11467 lclTyp = TYP_SHORT;
11470 lclTyp = TYP_I_IMPL;
11472 case CEE_STELEM_I4:
11475 case CEE_STELEM_I8:
11478 case CEE_STELEM_R4:
11479 lclTyp = TYP_FLOAT;
11481 case CEE_STELEM_R8:
11482 lclTyp = TYP_DOUBLE;
11487 if (tiVerificationNeeded)
11489 typeInfo tiArray = impStackTop(2).seTypeInfo;
11490 typeInfo tiIndex = impStackTop(1).seTypeInfo;
11491 typeInfo tiValue = impStackTop().seTypeInfo;
11493 // As per ECMA 'index' specified can be either int32 or native int.
11494 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11495 typeInfo arrayElem = typeInfo(lclTyp);
11496 #ifdef _TARGET_64BIT_
11497 if (opcode == CEE_STELEM_I)
11499 arrayElem = typeInfo::nativeInt();
11501 #endif // _TARGET_64BIT_
11502 Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
11505 Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
11509 ARR_ST_POST_VERIFY:
11510 /* The strict order of evaluation is LHS-operands, RHS-operands,
11511 range-check, and then assignment. However, codegen currently
11512 does the range-check before evaluation the RHS-operands. So to
11513 maintain strict ordering, we spill the stack. */
11515 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
11517 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11518 "Strict ordering of exceptions for Array store"));
11521 /* Pull the new value from the stack */
11522 op2 = impPopStack().val;
11524 /* Pull the index value */
11525 op1 = impPopStack().val;
11527 /* Pull the array address */
11528 op3 = impPopStack().val;
11530 assertImp(op3->gtType == TYP_REF);
11531 if (op2->IsVarAddr())
11533 op2->gtType = TYP_I_IMPL;
11536 op3 = impCheckForNullPointer(op3);
11538 // Mark the block as containing an index expression
11540 if (op3->gtOper == GT_LCL_VAR)
11542 if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
11544 block->bbFlags |= BBF_HAS_IDX_LEN;
11545 optMethodFlags |= OMF_HAS_ARRAYREF;
11549 /* Create the index node */
11551 op1 = gtNewIndexRef(lclTyp, op3, op1);
11553 /* Create the assignment node and append it */
11555 if (lclTyp == TYP_STRUCT)
11557 assert(stelemClsHnd != DUMMY_INIT(NULL));
11559 op1->gtIndex.gtStructElemClass = stelemClsHnd;
11560 op1->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd);
11562 if (varTypeIsStruct(op1))
11564 op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
11568 op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
11569 op1 = gtNewAssignNode(op1, op2);
11572 /* Mark the expression as containing an assignment */
11574 op1->gtFlags |= GTF_ASG;
11585 case CEE_ADD_OVF_UN:
11593 goto MATH_OP2_FLAGS;
11602 case CEE_SUB_OVF_UN:
11610 goto MATH_OP2_FLAGS;
11614 goto MATH_MAYBE_CALL_NO_OVF;
11619 case CEE_MUL_OVF_UN:
11626 goto MATH_MAYBE_CALL_OVF;
11628 // Other binary math operations
11632 goto MATH_MAYBE_CALL_NO_OVF;
11636 goto MATH_MAYBE_CALL_NO_OVF;
11640 goto MATH_MAYBE_CALL_NO_OVF;
11644 goto MATH_MAYBE_CALL_NO_OVF;
11646 MATH_MAYBE_CALL_NO_OVF:
11648 MATH_MAYBE_CALL_OVF:
11649 // Morpher has some complex logic about when to turn different
11650 // typed nodes on different platforms into helper calls. We
11651 // need to either duplicate that logic here, or just
11652 // pessimistically make all the nodes large enough to become
11653 // call nodes. Since call nodes aren't that much larger and
11654 // these opcodes are infrequent enough I chose the latter.
11656 goto MATH_OP2_FLAGS;
11668 MATH_OP2: // For default values of 'ovfl' and 'callNode'
11673 MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
11675 /* Pull two values and push back the result */
11677 if (tiVerificationNeeded)
11679 const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
11680 const typeInfo& tiOp2 = impStackTop().seTypeInfo;
11682 Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
11683 if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
11685 Verify(tiOp1.IsNumberType(), "not number");
11689 Verify(tiOp1.IsIntegerType(), "not integer");
11692 Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
11696 #ifdef _TARGET_64BIT_
11697 if (tiOp2.IsNativeIntType())
11701 #endif // _TARGET_64BIT_
11704 op2 = impPopStack().val;
11705 op1 = impPopStack().val;
11707 #if !CPU_HAS_FP_SUPPORT
11708 if (varTypeIsFloating(op1->gtType))
11713 /* Can't do arithmetic with references */
11714 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
11716 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
11717 // if it is in the stack)
11718 impBashVarAddrsToI(op1, op2);
11720 type = impGetByRefResultType(oper, uns, &op1, &op2);
11722 assert(!ovfl || !varTypeIsFloating(op1->gtType));
11724 /* Special case: "int+0", "int-0", "int*1", "int/1" */
11726 if (op2->gtOper == GT_CNS_INT)
11728 if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
11729 (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
11732 impPushOnStack(op1, tiRetVal);
11737 #if !FEATURE_X87_DOUBLES
11738 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
11740 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
11742 if (op1->TypeGet() != type)
11744 // We insert a cast of op1 to 'type'
11745 op1 = gtNewCastNode(type, op1, false, type);
11747 if (op2->TypeGet() != type)
11749 // We insert a cast of op2 to 'type'
11750 op2 = gtNewCastNode(type, op2, false, type);
11753 #endif // !FEATURE_X87_DOUBLES
11755 #if SMALL_TREE_NODES
11758 /* These operators can later be transformed into 'GT_CALL' */
11760 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
11761 #ifndef _TARGET_ARM_
11762 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
11763 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
11764 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
11765 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
11767 // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
11768 // that we'll need to transform into a general large node, but rather specifically
11769 // to a call: by doing it this way, things keep working if there are multiple sizes,
11770 // and a CALL is no longer the largest.
11771 // That said, as of now it *is* a large node, so we'll do this with an assert rather
11773 assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
11774 op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
11777 #endif // SMALL_TREE_NODES
11779 op1 = gtNewOperNode(oper, type, op1, op2);
11782 /* Special case: integer/long division may throw an exception */
11784 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this))
11786 op1->gtFlags |= GTF_EXCEPT;
11791 assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
11792 if (ovflType != TYP_UNKNOWN)
11794 op1->gtType = ovflType;
11796 op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
11799 op1->gtFlags |= GTF_UNSIGNED;
11803 impPushOnStack(op1, tiRetVal);
11818 if (tiVerificationNeeded)
11820 const typeInfo& tiVal = impStackTop(1).seTypeInfo;
11821 const typeInfo& tiShift = impStackTop(0).seTypeInfo;
11822 Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
11825 op2 = impPopStack().val;
11826 op1 = impPopStack().val; // operand to be shifted
11827 impBashVarAddrsToI(op1, op2);
11829 type = genActualType(op1->TypeGet());
11830 op1 = gtNewOperNode(oper, type, op1, op2);
11832 impPushOnStack(op1, tiRetVal);
11836 if (tiVerificationNeeded)
11838 tiRetVal = impStackTop().seTypeInfo;
11839 Verify(tiRetVal.IsIntegerType(), "bad int value");
11842 op1 = impPopStack().val;
11843 impBashVarAddrsToI(op1, nullptr);
11844 type = genActualType(op1->TypeGet());
11845 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
11849 if (tiVerificationNeeded)
11851 tiRetVal = impStackTop().seTypeInfo;
11852 Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
11854 op1 = impPopStack().val;
11855 type = op1->TypeGet();
11856 op1 = gtNewOperNode(GT_CKFINITE, type, op1);
11857 op1->gtFlags |= GTF_EXCEPT;
11859 impPushOnStack(op1, tiRetVal);
11864 val = getI4LittleEndian(codeAddr); // jump distance
11865 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
11869 val = getI1LittleEndian(codeAddr); // jump distance
11870 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
11874 if (compIsForInlining())
11876 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
11880 JITDUMP(" %04X", jmpAddr);
11881 if (block->bbJumpKind != BBJ_LEAVE)
11883 impResetLeaveBlock(block, jmpAddr);
11886 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
11887 impImportLeave(block);
11888 impNoteBranchOffs();
11894 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
11896 if (compIsForInlining() && jmpDist == 0)
11901 impNoteBranchOffs();
11907 case CEE_BRFALSE_S:
11909 /* Pop the comparand (now there's a neat term) from the stack */
11910 if (tiVerificationNeeded)
11912 typeInfo& tiVal = impStackTop().seTypeInfo;
11913 Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
11917 op1 = impPopStack().val;
11918 type = op1->TypeGet();
11920 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
11921 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
11923 block->bbJumpKind = BBJ_NONE;
11925 if (op1->gtFlags & GTF_GLOB_EFFECT)
11927 op1 = gtUnusedValNode(op1);
11936 if (op1->OperIsCompare())
11938 if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
11940 // Flip the sense of the compare
11942 op1 = gtReverseCond(op1);
11947 /* We'll compare against an equally-sized integer 0 */
11948 /* For small types, we always compare against int */
11949 op2 = gtNewZeroConNode(genActualType(op1->gtType));
11951 /* Create the comparison operator and try to fold it */
11953 oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
11954 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
11961 /* Fold comparison if we can */
11963 op1 = gtFoldExpr(op1);
11965 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
11966 /* Don't make any blocks unreachable in import only mode */
11968 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
11970 /* gtFoldExpr() should prevent this as we don't want to make any blocks
11971 unreachable under compDbgCode */
11972 assert(!opts.compDbgCode);
11974 BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
11975 assertImp((block->bbJumpKind == BBJ_COND) // normal case
11976 || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
11977 // block for the second time
11979 block->bbJumpKind = foldedJumpKind;
11983 if (op1->gtIntCon.gtIconVal)
11985 printf("\nThe conditional jump becomes an unconditional jump to BB%02u\n",
11986 block->bbJumpDest->bbNum);
11990 printf("\nThe block falls through into the next BB%02u\n", block->bbNext->bbNum);
11997 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
11999 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
12000 in impImportBlock(block). For correct line numbers, spill stack. */
12002 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
12004 impSpillStackEnsure(true);
12031 if (tiVerificationNeeded)
12033 verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12034 tiRetVal = typeInfo(TI_INT);
12037 op2 = impPopStack().val;
12038 op1 = impPopStack().val;
12040 #ifdef _TARGET_64BIT_
12041 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
12043 op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12045 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
12047 op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12049 #endif // _TARGET_64BIT_
12051 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12052 varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12053 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12055 /* Create the comparison node */
12057 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12059 /* TODO: setting both flags when only one is appropriate */
12060 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
12062 op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
12065 // Fold result, if possible.
12066 op1 = gtFoldExpr(op1);
12068 impPushOnStack(op1, tiRetVal);
12074 goto CMP_2_OPs_AND_BR;
12079 goto CMP_2_OPs_AND_BR;
12084 goto CMP_2_OPs_AND_BR_UN;
12089 goto CMP_2_OPs_AND_BR;
12094 goto CMP_2_OPs_AND_BR_UN;
12099 goto CMP_2_OPs_AND_BR;
12104 goto CMP_2_OPs_AND_BR_UN;
12109 goto CMP_2_OPs_AND_BR;
12114 goto CMP_2_OPs_AND_BR_UN;
12119 goto CMP_2_OPs_AND_BR_UN;
12121 CMP_2_OPs_AND_BR_UN:
12124 goto CMP_2_OPs_AND_BR_ALL;
12128 goto CMP_2_OPs_AND_BR_ALL;
12129 CMP_2_OPs_AND_BR_ALL:
12131 if (tiVerificationNeeded)
12133 verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12136 /* Pull two values */
12137 op2 = impPopStack().val;
12138 op1 = impPopStack().val;
12140 #ifdef _TARGET_64BIT_
12141 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
12143 op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12145 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
12147 op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12149 #endif // _TARGET_64BIT_
12151 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12152 varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12153 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12155 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
12157 block->bbJumpKind = BBJ_NONE;
12159 if (op1->gtFlags & GTF_GLOB_EFFECT)
12161 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12162 "Branch to next Optimization, op1 side effect"));
12163 impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12165 if (op2->gtFlags & GTF_GLOB_EFFECT)
12167 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12168 "Branch to next Optimization, op2 side effect"));
12169 impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12173 if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
12175 impNoteLastILoffs();
12180 #if !FEATURE_X87_DOUBLES
12181 // We can generate an compare of different sized floating point op1 and op2
12182 // We insert a cast
12184 if (varTypeIsFloating(op1->TypeGet()))
12186 if (op1->TypeGet() != op2->TypeGet())
12188 assert(varTypeIsFloating(op2->TypeGet()));
12190 // say op1=double, op2=float. To avoid loss of precision
12191 // while comparing, op2 is converted to double and double
12192 // comparison is done.
12193 if (op1->TypeGet() == TYP_DOUBLE)
12195 // We insert a cast of op2 to TYP_DOUBLE
12196 op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE);
12198 else if (op2->TypeGet() == TYP_DOUBLE)
12200 // We insert a cast of op1 to TYP_DOUBLE
12201 op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE);
12205 #endif // !FEATURE_X87_DOUBLES
12207 /* Create and append the operator */
12209 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12213 op1->gtFlags |= GTF_UNSIGNED;
12218 op1->gtFlags |= GTF_RELOP_NAN_UN;
12224 assert(!compIsForInlining());
12226 if (tiVerificationNeeded)
12228 Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
12230 /* Pop the switch value off the stack */
12231 op1 = impPopStack().val;
12232 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
12234 /* We can create a switch node */
12236 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
12238 val = (int)getU4LittleEndian(codeAddr);
12239 codeAddr += 4 + val * 4; // skip over the switch-table
12243 /************************** Casting OPCODES ***************************/
12245 case CEE_CONV_OVF_I1:
12248 case CEE_CONV_OVF_I2:
12249 lclTyp = TYP_SHORT;
12251 case CEE_CONV_OVF_I:
12252 lclTyp = TYP_I_IMPL;
12254 case CEE_CONV_OVF_I4:
12257 case CEE_CONV_OVF_I8:
12261 case CEE_CONV_OVF_U1:
12262 lclTyp = TYP_UBYTE;
12264 case CEE_CONV_OVF_U2:
12265 lclTyp = TYP_USHORT;
12267 case CEE_CONV_OVF_U:
12268 lclTyp = TYP_U_IMPL;
12270 case CEE_CONV_OVF_U4:
12273 case CEE_CONV_OVF_U8:
12274 lclTyp = TYP_ULONG;
12277 case CEE_CONV_OVF_I1_UN:
12280 case CEE_CONV_OVF_I2_UN:
12281 lclTyp = TYP_SHORT;
12283 case CEE_CONV_OVF_I_UN:
12284 lclTyp = TYP_I_IMPL;
12286 case CEE_CONV_OVF_I4_UN:
12289 case CEE_CONV_OVF_I8_UN:
12293 case CEE_CONV_OVF_U1_UN:
12294 lclTyp = TYP_UBYTE;
12296 case CEE_CONV_OVF_U2_UN:
12297 lclTyp = TYP_USHORT;
12299 case CEE_CONV_OVF_U_UN:
12300 lclTyp = TYP_U_IMPL;
12302 case CEE_CONV_OVF_U4_UN:
12305 case CEE_CONV_OVF_U8_UN:
12306 lclTyp = TYP_ULONG;
12311 goto CONV_OVF_COMMON;
12314 goto CONV_OVF_COMMON;
12324 lclTyp = TYP_SHORT;
12327 lclTyp = TYP_I_IMPL;
12337 lclTyp = TYP_UBYTE;
12340 lclTyp = TYP_USHORT;
12342 #if (REGSIZE_BYTES == 8)
12344 lclTyp = TYP_U_IMPL;
12348 lclTyp = TYP_U_IMPL;
12355 lclTyp = TYP_ULONG;
12359 lclTyp = TYP_FLOAT;
12362 lclTyp = TYP_DOUBLE;
12365 case CEE_CONV_R_UN:
12366 lclTyp = TYP_DOUBLE;
12380 // just check that we have a number on the stack
12381 if (tiVerificationNeeded)
12383 const typeInfo& tiVal = impStackTop().seTypeInfo;
12384 Verify(tiVal.IsNumberType(), "bad arg");
12386 #ifdef _TARGET_64BIT_
12387 bool isNative = false;
12391 case CEE_CONV_OVF_I:
12392 case CEE_CONV_OVF_I_UN:
12394 case CEE_CONV_OVF_U:
12395 case CEE_CONV_OVF_U_UN:
12399 // leave 'isNative' = false;
12404 tiRetVal = typeInfo::nativeInt();
12407 #endif // _TARGET_64BIT_
12409 tiRetVal = typeInfo(lclTyp).NormaliseForStack();
12413 // only converts from FLOAT or DOUBLE to an integer type
12414 // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls
12416 if (varTypeIsFloating(lclTyp))
12418 callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
12419 #ifdef _TARGET_64BIT_
12420 // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
12421 // TYP_BYREF could be used as TYP_I_IMPL which is long.
12422 // TODO-CQ: remove this when we lower casts long/ulong --> float/double
12423 // and generate SSE2 code instead of going through helper calls.
12424 || (impStackTop().val->TypeGet() == TYP_BYREF)
12430 callNode = varTypeIsFloating(impStackTop().val->TypeGet());
12433 // At this point uns, ovf, callNode all set
12435 op1 = impPopStack().val;
12436 impBashVarAddrsToI(op1);
12438 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
12440 op2 = op1->gtOp.gtOp2;
12442 if (op2->gtOper == GT_CNS_INT)
12444 ssize_t ival = op2->gtIntCon.gtIconVal;
12445 ssize_t mask, umask;
12461 assert(!"unexpected type");
12465 if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
12467 /* Toss the cast, it's a waste of time */
12469 impPushOnStack(op1, tiRetVal);
12472 else if (ival == mask)
12474 /* Toss the masking, it's a waste of time, since
12475 we sign-extend from the small value anyways */
12477 op1 = op1->gtOp.gtOp1;
12482 /* The 'op2' sub-operand of a cast is the 'real' type number,
12483 since the result of a cast to one of the 'small' integer
12484 types is an integer.
12487 type = genActualType(lclTyp);
12489 #if SMALL_TREE_NODES
12492 op1 = gtNewCastNodeL(type, op1, uns, lclTyp);
12495 #endif // SMALL_TREE_NODES
12497 op1 = gtNewCastNode(type, op1, uns, lclTyp);
12502 op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
12504 impPushOnStack(op1, tiRetVal);
12508 if (tiVerificationNeeded)
12510 tiRetVal = impStackTop().seTypeInfo;
12511 Verify(tiRetVal.IsNumberType(), "Bad arg");
12514 op1 = impPopStack().val;
12515 impBashVarAddrsToI(op1, nullptr);
12516 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
12521 /* Pull the top value from the stack */
12523 StackEntry se = impPopStack();
12524 clsHnd = se.seTypeInfo.GetClassHandle();
12527 /* Get hold of the type of the value being duplicated */
12529 lclTyp = genActualType(op1->gtType);
12531 /* Does the value have any side effects? */
12533 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
12535 // Since we are throwing away the value, just normalize
12536 // it to its address. This is more efficient.
12538 if (varTypeIsStruct(op1))
12540 #ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
12541 // Non-calls, such as obj or ret_expr, have to go through this.
12542 // Calls with large struct return value have to go through this.
12543 // Helper calls with small struct return value also have to go
12544 // through this since they do not follow Unix calling convention.
12545 if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
12546 op1->AsCall()->gtCallType == CT_HELPER)
12547 #endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
12549 op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
12553 // If op1 is non-overflow cast, throw it away since it is useless.
12554 // Another reason for throwing away the useless cast is in the context of
12555 // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
12556 // The cast gets added as part of importing GT_CALL, which gets in the way
12557 // of fgMorphCall() on the forms of tail call nodes that we assert.
12558 if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
12560 op1 = op1->gtOp.gtOp1;
12563 // If 'op1' is an expression, create an assignment node.
12564 // Helps analyses (like CSE) to work fine.
12566 if (op1->gtOper != GT_CALL)
12568 op1 = gtUnusedValNode(op1);
12571 /* Append the value to the tree list */
12575 /* No side effects - just throw the <BEEP> thing away */
12581 if (tiVerificationNeeded)
12583 // Dup could start the begining of delegate creation sequence, remember that
12584 delegateCreateStart = codeAddr - 1;
12588 // If the expression to dup is simple, just clone it.
12589 // Otherwise spill it to a temp, and reload the temp
12591 StackEntry se = impPopStack();
12592 GenTree* tree = se.val;
12593 tiRetVal = se.seTypeInfo;
12596 if (!opts.compDbgCode && !op1->IsIntegralConst(0) && !op1->IsFPZero() && !op1->IsLocal())
12598 const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill"));
12599 impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL);
12600 var_types type = genActualType(lvaTable[tmpNum].TypeGet());
12601 op1 = gtNewLclvNode(tmpNum, type);
12603 // Propagate type info to the temp from the stack and the original tree
12604 if (type == TYP_REF)
12606 lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle());
12610 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
12611 nullptr DEBUGARG("DUP instruction"));
12613 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
12614 impPushOnStack(op1, tiRetVal);
12615 impPushOnStack(op2, tiRetVal);
12623 lclTyp = TYP_SHORT;
12632 lclTyp = TYP_I_IMPL;
12634 case CEE_STIND_REF:
12638 lclTyp = TYP_FLOAT;
12641 lclTyp = TYP_DOUBLE;
12645 if (tiVerificationNeeded)
12647 typeInfo instrType(lclTyp);
12648 #ifdef _TARGET_64BIT_
12649 if (opcode == CEE_STIND_I)
12651 instrType = typeInfo::nativeInt();
12653 #endif // _TARGET_64BIT_
12654 verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
12658 compUnsafeCastUsed = true; // Have to go conservative
12663 op2 = impPopStack().val; // value to store
12664 op1 = impPopStack().val; // address to store to
12666 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
12667 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12669 impBashVarAddrsToI(op1, op2);
12671 op2 = impImplicitR4orR8Cast(op2, lclTyp);
12673 #ifdef _TARGET_64BIT_
12674 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
12675 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
12677 op2->gtType = TYP_I_IMPL;
12681 // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
12683 if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
12685 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12686 op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
12688 // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12690 if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
12692 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12693 op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
12696 #endif // _TARGET_64BIT_
12698 if (opcode == CEE_STIND_REF)
12700 // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
12701 assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
12702 lclTyp = genActualType(op2->TypeGet());
12705 // Check target type.
12707 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
12709 if (op2->gtType == TYP_BYREF)
12711 assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
12713 else if (lclTyp == TYP_BYREF)
12715 assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
12720 assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
12721 ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
12722 (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
12726 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12728 // stind could point anywhere, example a boxed class static int
12729 op1->gtFlags |= GTF_IND_TGTANYWHERE;
12731 if (prefixFlags & PREFIX_VOLATILE)
12733 assert(op1->OperGet() == GT_IND);
12734 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
12735 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12736 op1->gtFlags |= GTF_IND_VOLATILE;
12739 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
12741 assert(op1->OperGet() == GT_IND);
12742 op1->gtFlags |= GTF_IND_UNALIGNED;
12745 op1 = gtNewAssignNode(op1, op2);
12746 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
12748 // Spill side-effects AND global-data-accesses
12749 if (verCurrentState.esStackDepth > 0)
12751 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
12760 lclTyp = TYP_SHORT;
12769 case CEE_LDIND_REF:
12773 lclTyp = TYP_I_IMPL;
12776 lclTyp = TYP_FLOAT;
12779 lclTyp = TYP_DOUBLE;
12782 lclTyp = TYP_UBYTE;
12785 lclTyp = TYP_USHORT;
12789 if (tiVerificationNeeded)
12791 typeInfo lclTiType(lclTyp);
12792 #ifdef _TARGET_64BIT_
12793 if (opcode == CEE_LDIND_I)
12795 lclTiType = typeInfo::nativeInt();
12797 #endif // _TARGET_64BIT_
12798 tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
12799 tiRetVal.NormaliseForStack();
12803 compUnsafeCastUsed = true; // Have to go conservative
12808 op1 = impPopStack().val; // address to load from
12809 impBashVarAddrsToI(op1);
12811 #ifdef _TARGET_64BIT_
12812 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
12814 if (genActualType(op1->gtType) == TYP_INT)
12816 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
12817 op1 = gtNewCastNode(TYP_I_IMPL, op1, false, TYP_I_IMPL);
12821 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
12823 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
12825 // ldind could point anywhere, example a boxed class static int
12826 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
12828 if (prefixFlags & PREFIX_VOLATILE)
12830 assert(op1->OperGet() == GT_IND);
12831 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
12832 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
12833 op1->gtFlags |= GTF_IND_VOLATILE;
12836 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
12838 assert(op1->OperGet() == GT_IND);
12839 op1->gtFlags |= GTF_IND_UNALIGNED;
12842 impPushOnStack(op1, tiRetVal);
12846 case CEE_UNALIGNED:
12849 val = getU1LittleEndian(codeAddr);
12851 JITDUMP(" %u", val);
12852 if ((val != 1) && (val != 2) && (val != 4))
12854 BADCODE("Alignment unaligned. must be 1, 2, or 4");
12857 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
12858 prefixFlags |= PREFIX_UNALIGNED;
12860 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
12863 opcode = (OPCODE)getU1LittleEndian(codeAddr);
12864 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
12865 codeAddr += sizeof(__int8);
12866 goto DECODE_OPCODE;
12870 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
12871 prefixFlags |= PREFIX_VOLATILE;
12873 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
12880 // Need to do a lookup here so that we perform an access check
12881 // and do a NOWAY if protections are violated
12882 _impResolveToken(CORINFO_TOKENKIND_Method);
12884 JITDUMP(" %08X", resolvedToken.token);
12886 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
12887 addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
12890 // This check really only applies to intrinsic Array.Address methods
12891 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12893 NO_WAY("Currently do not support LDFTN of Parameterized functions");
12896 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
12897 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12899 if (tiVerificationNeeded)
12901 // LDFTN could start the begining of delegate creation sequence, remember that
12902 delegateCreateStart = codeAddr - 2;
12904 // check any constraints on the callee's class and type parameters
12905 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12906 "method has unsatisfied class constraints");
12907 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12908 resolvedToken.hMethod),
12909 "method has unsatisfied method constraints");
12911 mflags = callInfo.verMethodFlags;
12912 Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
12916 op1 = impMethodPointer(&resolvedToken, &callInfo);
12917 if (compDonotInline())
12922 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
12923 impPushOnStack(op1, typeInfo(heapToken));
12928 case CEE_LDVIRTFTN:
12930 /* Get the method token */
12932 _impResolveToken(CORINFO_TOKENKIND_Method);
12934 JITDUMP(" %08X", resolvedToken.token);
12936 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
12937 addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
12938 CORINFO_CALLINFO_CALLVIRT)),
12941 // This check really only applies to intrinsic Array.Address methods
12942 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
12944 NO_WAY("Currently do not support LDFTN of Parameterized functions");
12947 mflags = callInfo.methodFlags;
12949 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
12951 if (compIsForInlining())
12953 if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
12955 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
12960 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
12962 if (tiVerificationNeeded)
12965 Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
12966 Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
12968 // JIT32 verifier rejects verifiable ldvirtftn pattern
12969 typeInfo declType =
12970 verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
12972 typeInfo arg = impStackTop().seTypeInfo;
12973 Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
12976 CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
12977 if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
12979 instanceClassHnd = arg.GetClassHandleForObjRef();
12982 // check any constraints on the method's class and type parameters
12983 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
12984 "method has unsatisfied class constraints");
12985 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
12986 resolvedToken.hMethod),
12987 "method has unsatisfied method constraints");
12989 if (mflags & CORINFO_FLG_PROTECTED)
12991 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
12992 "Accessing protected method through wrong type.");
12996 /* Get the object-ref */
12997 op1 = impPopStack().val;
12998 assertImp(op1->gtType == TYP_REF);
13000 if (opts.IsReadyToRun())
13002 if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
13004 if (op1->gtFlags & GTF_SIDE_EFFECT)
13006 op1 = gtUnusedValNode(op1);
13007 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13012 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
13014 if (op1->gtFlags & GTF_SIDE_EFFECT)
13016 op1 = gtUnusedValNode(op1);
13017 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13022 GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
13023 if (compDonotInline())
13028 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
13029 assert(heapToken->tokenType == CORINFO_TOKENKIND_Method);
13030 heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn;
13031 impPushOnStack(fptr, typeInfo(heapToken));
13036 case CEE_CONSTRAINED:
13038 assertImp(sz == sizeof(unsigned));
13039 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
13040 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
13041 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
13043 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
13044 prefixFlags |= PREFIX_CONSTRAINED;
13047 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13048 if (actualOpcode != CEE_CALLVIRT)
13050 BADCODE("constrained. has to be followed by callvirt");
13057 JITDUMP(" readonly.");
13059 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
13060 prefixFlags |= PREFIX_READONLY;
13063 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13064 if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
13066 BADCODE("readonly. has to be followed by ldelema or call");
13076 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
13077 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13080 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13081 if (!impOpcodeIsCallOpcode(actualOpcode))
13083 BADCODE("tailcall. has to be followed by call, callvirt or calli");
13091 /* Since we will implicitly insert newObjThisPtr at the start of the
13092 argument list, spill any GTF_ORDER_SIDEEFF */
13093 impSpillSpecialSideEff();
13095 /* NEWOBJ does not respond to TAIL */
13096 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
13098 /* NEWOBJ does not respond to CONSTRAINED */
13099 prefixFlags &= ~PREFIX_CONSTRAINED;
13101 _impResolveToken(CORINFO_TOKENKIND_NewObj);
13103 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
13104 addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
13107 if (compIsForInlining())
13109 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13111 // Check to see if this call violates the boundary.
13112 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
13117 mflags = callInfo.methodFlags;
13119 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
13121 BADCODE("newobj on static or abstract method");
13124 // Insert the security callout before any actual code is generated
13125 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13127 // There are three different cases for new
13128 // Object size is variable (depends on arguments)
13129 // 1) Object is an array (arrays treated specially by the EE)
13130 // 2) Object is some other variable sized object (e.g. String)
13131 // 3) Class Size can be determined beforehand (normal case)
13132 // In the first case, we need to call a NEWOBJ helper (multinewarray)
13133 // in the second case we call the constructor with a '0' this pointer
13134 // In the third case we alloc the memory, then call the constuctor
13136 clsFlags = callInfo.classFlags;
13137 if (clsFlags & CORINFO_FLG_ARRAY)
13139 if (tiVerificationNeeded)
13141 CORINFO_CLASS_HANDLE elemTypeHnd;
13142 INDEBUG(CorInfoType corType =)
13143 info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13144 assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
13145 Verify(elemTypeHnd == nullptr ||
13146 !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13147 "newarr of byref-like objects");
13148 verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
13149 ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
13150 &callInfo DEBUGARG(info.compFullName));
13152 // Arrays need to call the NEWOBJ helper.
13153 assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
13155 impImportNewObjArray(&resolvedToken, &callInfo);
13156 if (compDonotInline())
13164 // At present this can only be String
13165 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
13167 if (IsTargetAbi(CORINFO_CORERT_ABI))
13169 // The dummy argument does not exist in CoreRT
13170 newObjThisPtr = nullptr;
13174 // This is the case for variable-sized objects that are not
13175 // arrays. In this case, call the constructor with a null 'this'
13177 newObjThisPtr = gtNewIconNode(0, TYP_REF);
13180 /* Remember that this basic block contains 'new' of an object */
13181 block->bbFlags |= BBF_HAS_NEWOBJ;
13182 optMethodFlags |= OMF_HAS_NEWOBJ;
13186 // This is the normal case where the size of the object is
13187 // fixed. Allocate the memory and call the constructor.
13189 // Note: We cannot add a peep to avoid use of temp here
13190 // becase we don't have enough interference info to detect when
13191 // sources and destination interfere, example: s = new S(ref);
13193 // TODO: We find the correct place to introduce a general
13194 // reverse copy prop for struct return values from newobj or
13195 // any function returning structs.
13197 /* get a temporary for the new object */
13198 lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
13199 if (compDonotInline())
13201 // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS.
13202 assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS);
13206 // In the value class case we only need clsHnd for size calcs.
13208 // The lookup of the code pointer will be handled by CALL in this case
13209 if (clsFlags & CORINFO_FLG_VALUECLASS)
13211 if (compIsForInlining())
13213 // If value class has GC fields, inform the inliner. It may choose to
13214 // bail out on the inline.
13215 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13216 if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
13218 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
13219 if (compInlineResult->IsFailure())
13224 // Do further notification in the case where the call site is rare;
13225 // some policies do not track the relative hotness of call sites for
13226 // "always" inline cases.
13227 if (impInlineInfo->iciBlock->isRunRarely())
13229 compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
13230 if (compInlineResult->IsFailure())
13238 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
13239 unsigned size = info.compCompHnd->getClassSize(resolvedToken.hClass);
13241 if (impIsPrimitive(jitTyp))
13243 lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
13247 // The local variable itself is the allocated space.
13248 // Here we need unsafe value cls check, since the address of struct is taken for further use
13249 // and potentially exploitable.
13250 lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
13252 if (compIsForInlining() || fgStructTempNeedsExplicitZeroInit(lvaTable + lclNum, block))
13254 // Append a tree to zero-out the temp
13255 newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
13257 newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest
13258 gtNewIconNode(0), // Value
13260 false, // isVolatile
13261 false); // not copyBlock
13262 impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
13265 // Obtain the address of the temp
13267 gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
13271 #ifdef FEATURE_READYTORUN_COMPILER
13272 if (opts.IsReadyToRun())
13274 op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
13275 usingReadyToRunHelper = (op1 != nullptr);
13278 if (!usingReadyToRunHelper)
13281 op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
13282 if (op1 == nullptr)
13283 { // compDonotInline()
13287 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13288 // and the newfast call with a single call to a dynamic R2R cell that will:
13289 // 1) Load the context
13290 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate
13292 // 3) Allocate and return the new object
13293 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13295 op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
13296 resolvedToken.hClass, TYP_REF, op1);
13299 // Remember that this basic block contains 'new' of an object
13300 block->bbFlags |= BBF_HAS_NEWOBJ;
13301 optMethodFlags |= OMF_HAS_NEWOBJ;
13303 // Append the assignment to the temp/local. Dont need to spill
13304 // at all as we are just calling an EE-Jit helper which can only
13305 // cause an (async) OutOfMemoryException.
13307 // We assign the newly allocated object (by a GT_ALLOCOBJ node)
13308 // to a temp. Note that the pattern "temp = allocObj" is required
13309 // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
13310 // without exhaustive walk over all expressions.
13312 impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
13313 lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */);
13315 newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
13322 /* CALLI does not respond to CONSTRAINED */
13323 prefixFlags &= ~PREFIX_CONSTRAINED;
13325 if (compIsForInlining())
13327 // CALLI doesn't have a method handle, so assume the worst.
13328 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13330 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
13340 // We can't call getCallInfo on the token from a CALLI, but we need it in
13341 // many other places. We unfortunately embed that knowledge here.
13342 if (opcode != CEE_CALLI)
13344 _impResolveToken(CORINFO_TOKENKIND_Method);
13346 eeGetCallInfo(&resolvedToken,
13347 (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
13348 // this is how impImportCall invokes getCallInfo
13350 combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
13351 (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
13352 : CORINFO_CALLINFO_NONE)),
13357 // Suppress uninitialized use warning.
13358 memset(&resolvedToken, 0, sizeof(resolvedToken));
13359 memset(&callInfo, 0, sizeof(callInfo));
13361 resolvedToken.token = getU4LittleEndian(codeAddr);
13364 CALL: // memberRef should be set.
13365 // newObjThisPtr should be set for CEE_NEWOBJ
13367 JITDUMP(" %08X", resolvedToken.token);
13368 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
13370 bool newBBcreatedForTailcallStress;
13372 newBBcreatedForTailcallStress = false;
13374 if (compIsForInlining())
13376 if (compDonotInline())
13380 // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
13381 assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
13385 if (compTailCallStress())
13387 // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
13388 // Tail call stress only recognizes call+ret patterns and forces them to be
13389 // explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress
13390 // doesn't import 'ret' opcode following the call into the basic block containing
13391 // the call instead imports it to a new basic block. Note that fgMakeBasicBlocks()
13392 // is already checking that there is an opcode following call and hence it is
13393 // safe here to read next opcode without bounds check.
13394 newBBcreatedForTailcallStress =
13395 impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
13396 // make it jump to RET.
13397 (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
13399 if (newBBcreatedForTailcallStress &&
13400 !(prefixFlags & PREFIX_TAILCALL_EXPLICIT) && // User hasn't set "tail." prefix yet.
13401 verCheckTailCallConstraint(opcode, &resolvedToken,
13402 constraintCall ? &constrainedResolvedToken : nullptr,
13403 true) // Is it legal to do tailcall?
13406 // Stress the tailcall.
13407 JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
13408 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13413 // This is split up to avoid goto flow warnings.
13415 isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd);
13417 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
13418 // hence will not be considered for implicit tail calling.
13419 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
13421 if (compIsForInlining())
13423 #if FEATURE_TAILCALL_OPT_SHARED_RETURN
13424 // Are we inlining at an implicit tail call site? If so the we can flag
13425 // implicit tail call sites in the inline body. These call sites
13426 // often end up in non BBJ_RETURN blocks, so only flag them when
13427 // we're able to handle shared returns.
13428 if (impInlineInfo->iciCall->IsImplicitTailCall())
13430 JITDUMP(" (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13431 prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13433 #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN
13437 JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13438 prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13442 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
13443 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
13444 readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
13446 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
13448 // All calls and delegates need a security callout.
13449 // For delegates, this is the call to the delegate constructor, not the access check on the
13451 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13453 #if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
13455 // DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
13456 // and the field it is reading, thus it is now unverifiable to not immediately precede with
13457 // ldtoken <filed token>, and we now check accessibility
13458 if ((callInfo.methodFlags & CORINFO_FLG_INTRINSIC) &&
13459 (info.compCompHnd->getIntrinsicID(callInfo.hMethod) == CORINFO_INTRINSIC_InitializeArray))
13461 if (prevOpcode != CEE_LDTOKEN)
13463 Verify(prevOpcode == CEE_LDTOKEN, "Need ldtoken for InitializeArray");
13467 assert(lastLoadToken != NULL);
13468 // Now that we know we have a token, verify that it is accessible for loading
13469 CORINFO_RESOLVED_TOKEN resolvedLoadField;
13470 impResolveToken(lastLoadToken, &resolvedLoadField, CORINFO_TOKENKIND_Field);
13471 eeGetFieldInfo(&resolvedLoadField, CORINFO_ACCESS_INIT_ARRAY, &fieldInfo);
13472 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13476 #endif // DevDiv 410397
13479 if (tiVerificationNeeded)
13481 verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13482 explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
13483 &callInfo DEBUGARG(info.compFullName));
13486 // Insert delegate callout here.
13487 if (opcode == CEE_NEWOBJ && (mflags & CORINFO_FLG_CONSTRUCTOR) && (clsFlags & CORINFO_FLG_DELEGATE))
13490 // We should do this only if verification is enabled
13491 // If verification is disabled, delegateCreateStart will not be initialized correctly
13492 if (tiVerificationNeeded)
13494 mdMemberRef delegateMethodRef = mdMemberRefNil;
13495 // We should get here only for well formed delegate creation.
13496 assert(verCheckDelegateCreation(delegateCreateStart, codeAddr - 1, delegateMethodRef));
13501 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13502 newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
13503 if (compDonotInline())
13505 // We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue.
13506 assert((callTyp == TYP_UNDEF) ||
13507 (compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS));
13511 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
13512 // have created a new BB after the "call"
13513 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
13515 assert(!compIsForInlining());
13527 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
13528 BOOL isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
13530 /* Get the CP_Fieldref index */
13531 assertImp(sz == sizeof(unsigned));
13533 _impResolveToken(CORINFO_TOKENKIND_Field);
13535 JITDUMP(" %08X", resolvedToken.token);
13537 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
13539 GenTree* obj = nullptr;
13540 typeInfo* tiObj = nullptr;
13541 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
13543 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
13545 tiObj = &impStackTop().seTypeInfo;
13546 StackEntry se = impPopStack();
13547 objType = se.seTypeInfo.GetClassHandle();
13550 if (impIsThis(obj))
13552 aflags |= CORINFO_ACCESS_THIS;
13554 // An optimization for Contextful classes:
13555 // we unwrap the proxy when we have a 'this reference'
13557 if (info.compUnwrapContextful)
13559 aflags |= CORINFO_ACCESS_UNWRAP;
13564 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13566 // Figure out the type of the member. We always call canAccessField, so you always need this
13568 CorInfoType ciType = fieldInfo.fieldType;
13569 clsHnd = fieldInfo.structType;
13571 lclTyp = JITtype2varType(ciType);
13573 #ifdef _TARGET_AMD64
13574 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
13575 #endif // _TARGET_AMD64
13577 if (compIsForInlining())
13579 switch (fieldInfo.fieldAccessor)
13581 case CORINFO_FIELD_INSTANCE_HELPER:
13582 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13583 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13584 case CORINFO_FIELD_STATIC_TLS:
13586 compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
13589 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13590 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13591 /* We may be able to inline the field accessors in specific instantiations of generic
13593 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
13600 if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
13603 if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
13604 !(info.compFlags & CORINFO_FLG_FORCEINLINE))
13606 // Loading a static valuetype field usually will cause a JitHelper to be called
13607 // for the static base. This will bloat the code.
13608 compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
13610 if (compInlineResult->IsFailure())
13618 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
13621 tiRetVal.MakeByRef();
13625 tiRetVal.NormaliseForStack();
13628 // Perform this check always to ensure that we get field access exceptions even with
13629 // SkipVerification.
13630 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13632 if (tiVerificationNeeded)
13634 // You can also pass the unboxed struct to LDFLD
13635 BOOL bAllowPlainValueTypeAsThis = FALSE;
13636 if (opcode == CEE_LDFLD && impIsValueType(tiObj))
13638 bAllowPlainValueTypeAsThis = TRUE;
13641 verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
13643 // If we're doing this on a heap object or from a 'safe' byref
13644 // then the result is a safe byref too
13645 if (isLoadAddress) // load address
13647 if (fieldInfo.fieldFlags &
13648 CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
13650 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
13652 tiRetVal.SetIsPermanentHomeByRef();
13655 else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
13657 // ldflda of byref is safe if done on a gc object or on a
13659 tiRetVal.SetIsPermanentHomeByRef();
13665 // tiVerificationNeeded is false.
13666 // Raise InvalidProgramException if static load accesses non-static field
13667 if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
13669 BADCODE("static access on an instance field");
13673 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
13674 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
13676 if (obj->gtFlags & GTF_SIDE_EFFECT)
13678 obj = gtUnusedValNode(obj);
13679 impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13684 /* Preserve 'small' int types */
13685 if (!varTypeIsSmall(lclTyp))
13687 lclTyp = genActualType(lclTyp);
13690 bool usesHelper = false;
13692 switch (fieldInfo.fieldAccessor)
13694 case CORINFO_FIELD_INSTANCE:
13695 #ifdef FEATURE_READYTORUN_COMPILER
13696 case CORINFO_FIELD_INSTANCE_WITH_BASE:
13699 bool nullcheckNeeded = false;
13701 obj = impCheckForNullPointer(obj);
13703 if (isLoadAddress && (obj->gtType == TYP_BYREF) && fgAddrCouldBeNull(obj))
13705 nullcheckNeeded = true;
13708 // If the object is a struct, what we really want is
13709 // for the field to operate on the address of the struct.
13710 if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
13712 assert(opcode == CEE_LDFLD && objType != nullptr);
13714 obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
13717 /* Create the data member node */
13718 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset, nullcheckNeeded);
13720 #ifdef FEATURE_READYTORUN_COMPILER
13721 if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
13723 op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
13727 op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
13729 if (fgAddrCouldBeNull(obj))
13731 op1->gtFlags |= GTF_EXCEPT;
13734 // If gtFldObj is a BYREF then our target is a value class and
13735 // it could point anywhere, example a boxed class static int
13736 if (obj->gtType == TYP_BYREF)
13738 op1->gtFlags |= GTF_IND_TGTANYWHERE;
13741 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13742 if (StructHasOverlappingFields(typeFlags))
13744 op1->gtField.gtFldMayOverlap = true;
13747 // wrap it in a address of operator if necessary
13750 op1 = gtNewOperNode(GT_ADDR,
13751 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
13755 if (compIsForInlining() &&
13756 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
13757 impInlineInfo->inlArgInfo))
13759 impInlineInfo->thisDereferencedFirst = true;
13765 case CORINFO_FIELD_STATIC_TLS:
13766 #ifdef _TARGET_X86_
13767 // Legacy TLS access is implemented as intrinsic on x86 only
13769 /* Create the data member node */
13770 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
13771 op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
13775 op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
13779 fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
13784 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13785 case CORINFO_FIELD_INSTANCE_HELPER:
13786 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13787 op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
13792 case CORINFO_FIELD_STATIC_ADDRESS:
13793 // Replace static read-only fields with constant if possible
13794 if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
13795 !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
13796 (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
13798 CorInfoInitClassResult initClassResult =
13799 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
13800 impTokenLookupContextHandle);
13802 if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
13804 void** pFldAddr = nullptr;
13806 info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
13808 // We should always be able to access this static's address directly
13809 assert(pFldAddr == nullptr);
13811 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
13818 case CORINFO_FIELD_STATIC_RVA_ADDRESS:
13819 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
13820 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13821 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13822 op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
13826 case CORINFO_FIELD_INTRINSIC_ZERO:
13828 assert(aflags & CORINFO_ACCESS_GET);
13829 op1 = gtNewIconNode(0, lclTyp);
13834 case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
13836 assert(aflags & CORINFO_ACCESS_GET);
13839 InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
13840 op1 = gtNewStringLiteralNode(iat, pValue);
13845 case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN:
13847 assert(aflags & CORINFO_ACCESS_GET);
13849 op1 = gtNewIconNode(0, lclTyp);
13851 op1 = gtNewIconNode(1, lclTyp);
13858 assert(!"Unexpected fieldAccessor");
13861 if (!isLoadAddress)
13864 if (prefixFlags & PREFIX_VOLATILE)
13866 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
13867 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13871 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13872 (op1->OperGet() == GT_OBJ));
13873 op1->gtFlags |= GTF_IND_VOLATILE;
13877 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
13881 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
13882 (op1->OperGet() == GT_OBJ));
13883 op1->gtFlags |= GTF_IND_UNALIGNED;
13888 /* Check if the class needs explicit initialization */
13890 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
13892 GenTree* helperNode = impInitClass(&resolvedToken);
13893 if (compDonotInline())
13897 if (helperNode != nullptr)
13899 op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
13904 impPushOnStack(op1, tiRetVal);
13912 BOOL isStoreStatic = (opcode == CEE_STSFLD);
13914 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
13916 /* Get the CP_Fieldref index */
13918 assertImp(sz == sizeof(unsigned));
13920 _impResolveToken(CORINFO_TOKENKIND_Field);
13922 JITDUMP(" %08X", resolvedToken.token);
13924 int aflags = CORINFO_ACCESS_SET;
13925 GenTree* obj = nullptr;
13926 typeInfo* tiObj = nullptr;
13929 /* Pull the value from the stack */
13930 StackEntry se = impPopStack();
13932 tiVal = se.seTypeInfo;
13933 clsHnd = tiVal.GetClassHandle();
13935 if (opcode == CEE_STFLD)
13937 tiObj = &impStackTop().seTypeInfo;
13938 obj = impPopStack().val;
13940 if (impIsThis(obj))
13942 aflags |= CORINFO_ACCESS_THIS;
13944 // An optimization for Contextful classes:
13945 // we unwrap the proxy when we have a 'this reference'
13947 if (info.compUnwrapContextful)
13949 aflags |= CORINFO_ACCESS_UNWRAP;
13954 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
13956 // Figure out the type of the member. We always call canAccessField, so you always need this
13958 CorInfoType ciType = fieldInfo.fieldType;
13959 fieldClsHnd = fieldInfo.structType;
13961 lclTyp = JITtype2varType(ciType);
13963 if (compIsForInlining())
13965 /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
13966 * per-inst static? */
13968 switch (fieldInfo.fieldAccessor)
13970 case CORINFO_FIELD_INSTANCE_HELPER:
13971 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
13972 case CORINFO_FIELD_STATIC_ADDR_HELPER:
13973 case CORINFO_FIELD_STATIC_TLS:
13975 compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
13978 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
13979 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
13980 /* We may be able to inline the field accessors in specific instantiations of generic
13982 compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
13990 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
13992 if (tiVerificationNeeded)
13994 verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
13995 typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
13996 Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
14000 // tiVerificationNeed is false.
14001 // Raise InvalidProgramException if static store accesses non-static field
14002 if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
14004 BADCODE("static access on an instance field");
14008 // We are using stfld on a static field.
14009 // We allow it, but need to eval any side-effects for obj
14010 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
14012 if (obj->gtFlags & GTF_SIDE_EFFECT)
14014 obj = gtUnusedValNode(obj);
14015 impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14020 /* Preserve 'small' int types */
14021 if (!varTypeIsSmall(lclTyp))
14023 lclTyp = genActualType(lclTyp);
14026 switch (fieldInfo.fieldAccessor)
14028 case CORINFO_FIELD_INSTANCE:
14029 #ifdef FEATURE_READYTORUN_COMPILER
14030 case CORINFO_FIELD_INSTANCE_WITH_BASE:
14033 obj = impCheckForNullPointer(obj);
14035 /* Create the data member node */
14036 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
14037 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14038 if (StructHasOverlappingFields(typeFlags))
14040 op1->gtField.gtFldMayOverlap = true;
14043 #ifdef FEATURE_READYTORUN_COMPILER
14044 if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
14046 op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
14050 op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
14052 if (fgAddrCouldBeNull(obj))
14054 op1->gtFlags |= GTF_EXCEPT;
14057 // If gtFldObj is a BYREF then our target is a value class and
14058 // it could point anywhere, example a boxed class static int
14059 if (obj->gtType == TYP_BYREF)
14061 op1->gtFlags |= GTF_IND_TGTANYWHERE;
14064 if (compIsForInlining() &&
14065 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
14067 impInlineInfo->thisDereferencedFirst = true;
14072 case CORINFO_FIELD_STATIC_TLS:
14073 #ifdef _TARGET_X86_
14074 // Legacy TLS access is implemented as intrinsic on x86 only
14076 /* Create the data member node */
14077 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
14078 op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
14082 fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
14087 case CORINFO_FIELD_STATIC_ADDR_HELPER:
14088 case CORINFO_FIELD_INSTANCE_HELPER:
14089 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14090 op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
14094 case CORINFO_FIELD_STATIC_ADDRESS:
14095 case CORINFO_FIELD_STATIC_RVA_ADDRESS:
14096 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
14097 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14098 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14099 op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
14104 assert(!"Unexpected fieldAccessor");
14107 // Create the member assignment, unless we have a struct.
14108 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
14109 bool deferStructAssign = varTypeIsStruct(lclTyp);
14111 if (!deferStructAssign)
14113 if (prefixFlags & PREFIX_VOLATILE)
14115 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14116 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
14117 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
14118 op1->gtFlags |= GTF_IND_VOLATILE;
14120 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
14122 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14123 op1->gtFlags |= GTF_IND_UNALIGNED;
14126 /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
14127 trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during
14128 importation and reads from the union as if it were a long during code generation. Though this
14129 can potentially read garbage, one can get lucky to have this working correctly.
14131 This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
14132 /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a
14133 dependency on it. To be backward compatible, we will explicitly add an upward cast here so that
14134 it works correctly always.
14136 Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT
14139 CLANG_FORMAT_COMMENT_ANCHOR;
14141 #ifndef _TARGET_64BIT_
14142 // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be
14143 // generated for ARM as well as x86, so the following IR will be accepted:
14145 // | /--* CNS_INT int 2
14147 // \--* CLS_VAR long
14149 if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
14150 varTypeIsLong(op1->TypeGet()))
14152 op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
14156 #ifdef _TARGET_64BIT_
14157 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
14158 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
14160 op2->gtType = TYP_I_IMPL;
14164 // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
14166 if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
14168 op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
14170 // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
14172 if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
14174 op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
14179 #if !FEATURE_X87_DOUBLES
14180 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
14181 // We insert a cast to the dest 'op1' type
14183 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
14184 varTypeIsFloating(op2->gtType))
14186 op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
14188 #endif // !FEATURE_X87_DOUBLES
14190 op1 = gtNewAssignNode(op1, op2);
14192 /* Mark the expression as containing an assignment */
14194 op1->gtFlags |= GTF_ASG;
14197 /* Check if the class needs explicit initialization */
14199 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
14201 GenTree* helperNode = impInitClass(&resolvedToken);
14202 if (compDonotInline())
14206 if (helperNode != nullptr)
14208 op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
14212 /* stfld can interfere with value classes (consider the sequence
14213 ldloc, ldloca, ..., stfld, stloc). We will be conservative and
14214 spill all value class references from the stack. */
14216 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
14220 if (impIsValueType(tiObj))
14222 impSpillEvalStack();
14226 impSpillValueClasses();
14230 /* Spill any refs to the same member from the stack */
14232 impSpillLclRefs((ssize_t)resolvedToken.hField);
14234 /* stsfld also interferes with indirect accesses (for aliased
14235 statics) and calls. But don't need to spill other statics
14236 as we have explicitly spilled this particular static field. */
14238 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
14240 if (deferStructAssign)
14242 op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
14250 /* Get the class type index operand */
14252 _impResolveToken(CORINFO_TOKENKIND_Newarr);
14254 JITDUMP(" %08X", resolvedToken.token);
14256 if (!opts.IsReadyToRun())
14258 // Need to restore array classes before creating array objects on the heap
14259 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14260 if (op1 == nullptr)
14261 { // compDonotInline()
14266 if (tiVerificationNeeded)
14268 // As per ECMA 'numElems' specified can be either int32 or native int.
14269 Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
14271 CORINFO_CLASS_HANDLE elemTypeHnd;
14272 info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
14273 Verify(elemTypeHnd == nullptr ||
14274 !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
14275 "array of byref-like type");
14278 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14280 accessAllowedResult =
14281 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14282 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14284 /* Form the arglist: array class handle, size */
14285 op2 = impPopStack().val;
14286 assertImp(genActualTypeIsIntOrI(op2->gtType));
14288 #ifdef FEATURE_READYTORUN_COMPILER
14289 if (opts.IsReadyToRun())
14291 op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
14292 gtNewArgList(op2));
14293 usingReadyToRunHelper = (op1 != nullptr);
14295 if (!usingReadyToRunHelper)
14297 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14298 // and the newarr call with a single call to a dynamic R2R cell that will:
14299 // 1) Load the context
14300 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14301 // 3) Allocate the new array
14302 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14304 // Need to restore array classes before creating array objects on the heap
14305 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14306 if (op1 == nullptr)
14307 { // compDonotInline()
14313 if (!usingReadyToRunHelper)
14316 args = gtNewArgList(op1, op2);
14318 /* Create a call to 'new' */
14320 // Note that this only works for shared generic code because the same helper is used for all
14321 // reference array types
14322 op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args);
14325 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
14327 /* Remember that this basic block contains 'new' of an sd array */
14329 block->bbFlags |= BBF_HAS_NEWARRAY;
14330 optMethodFlags |= OMF_HAS_NEWARRAY;
14332 /* Push the result of the call on the stack */
14334 impPushOnStack(op1, tiRetVal);
14341 if (tiVerificationNeeded)
14343 Verify(false, "bad opcode");
14346 // We don't allow locallocs inside handlers
14347 if (block->hasHndIndex())
14349 BADCODE("Localloc can't be inside handler");
14352 setNeedsGSSecurityCookie();
14354 // Get the size to allocate
14356 op2 = impPopStack().val;
14357 assertImp(genActualTypeIsIntOrI(op2->gtType));
14359 if (verCurrentState.esStackDepth != 0)
14361 BADCODE("Localloc can only be used when the stack is empty");
14364 // If the localloc is not in a loop and its size is a small constant,
14365 // create a new local var of TYP_BLK and return its address.
14367 bool convertedToLocal = false;
14369 // Need to aggressively fold here, as even fixed-size locallocs
14370 // will have casts in the way.
14371 op2 = gtFoldExpr(op2);
14373 if (op2->IsIntegralConst())
14375 const ssize_t allocSize = op2->AsIntCon()->IconValue();
14377 if (allocSize == 0)
14379 // Result is nullptr
14380 JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n");
14381 op1 = gtNewIconNode(0, TYP_I_IMPL);
14382 convertedToLocal = true;
14384 else if ((allocSize > 0) && ((compCurBB->bbFlags & BBF_BACKWARD_JUMP) == 0))
14386 // Get the size threshold for local conversion
14387 ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE;
14390 // Optionally allow this to be modified
14391 maxSize = JitConfig.JitStackAllocToLocalSize();
14394 if (allocSize <= maxSize)
14396 const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal"));
14397 JITDUMP("Converting stackalloc of %lld bytes to new local V%02u\n", allocSize,
14398 stackallocAsLocal);
14399 lvaTable[stackallocAsLocal].lvType = TYP_BLK;
14400 lvaTable[stackallocAsLocal].lvExactSize = (unsigned)allocSize;
14401 lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true;
14402 op1 = gtNewLclvNode(stackallocAsLocal, TYP_BLK);
14403 op1 = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1);
14404 convertedToLocal = true;
14405 compGSReorderStackLayout = true;
14410 if (!convertedToLocal)
14412 // Bail out if inlining and the localloc was not converted.
14414 // Note we might consider allowing the inline, if the call
14415 // site is not in a loop.
14416 if (compIsForInlining())
14418 InlineObservation obs = op2->IsIntegralConst()
14419 ? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE
14420 : InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN;
14421 compInlineResult->NoteFatal(obs);
14425 op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
14426 // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
14427 op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
14429 /* The FP register may not be back to the original value at the end
14430 of the method, even if the frame size is 0, as localloc may
14431 have modified it. So we will HAVE to reset it */
14432 compLocallocUsed = true;
14436 compLocallocOptimized = true;
14440 impPushOnStack(op1, tiRetVal);
14445 /* Get the type token */
14446 assertImp(sz == sizeof(unsigned));
14448 _impResolveToken(CORINFO_TOKENKIND_Casting);
14450 JITDUMP(" %08X", resolvedToken.token);
14452 if (!opts.IsReadyToRun())
14454 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14455 if (op2 == nullptr)
14456 { // compDonotInline()
14461 if (tiVerificationNeeded)
14463 Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
14464 // Even if this is a value class, we know it is boxed.
14465 tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14467 accessAllowedResult =
14468 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14469 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14471 op1 = impPopStack().val;
14473 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false);
14475 if (optTree != nullptr)
14477 impPushOnStack(optTree, tiRetVal);
14482 #ifdef FEATURE_READYTORUN_COMPILER
14483 if (opts.IsReadyToRun())
14485 GenTreeCall* opLookup =
14486 impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
14487 gtNewArgList(op1));
14488 usingReadyToRunHelper = (opLookup != nullptr);
14489 op1 = (usingReadyToRunHelper ? opLookup : op1);
14491 if (!usingReadyToRunHelper)
14493 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14494 // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
14495 // 1) Load the context
14496 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate
14498 // 3) Perform the 'is instance' check on the input object
14499 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14501 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14502 if (op2 == nullptr)
14503 { // compDonotInline()
14509 if (!usingReadyToRunHelper)
14512 op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
14514 if (compDonotInline())
14519 impPushOnStack(op1, tiRetVal);
14524 case CEE_REFANYVAL:
14526 // get the class handle and make a ICON node out of it
14528 _impResolveToken(CORINFO_TOKENKIND_Class);
14530 JITDUMP(" %08X", resolvedToken.token);
14532 op2 = impTokenToHandle(&resolvedToken);
14533 if (op2 == nullptr)
14534 { // compDonotInline()
14538 if (tiVerificationNeeded)
14540 Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
14542 tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
14545 op1 = impPopStack().val;
14546 // make certain it is normalized;
14547 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
14549 // Call helper GETREFANY(classHandle, op1);
14550 args = gtNewArgList(op2, op1);
14551 op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, args);
14553 impPushOnStack(op1, tiRetVal);
14556 case CEE_REFANYTYPE:
14558 if (tiVerificationNeeded)
14560 Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
14564 op1 = impPopStack().val;
14566 // make certain it is normalized;
14567 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
14569 if (op1->gtOper == GT_OBJ)
14571 // Get the address of the refany
14572 op1 = op1->gtOp.gtOp1;
14574 // Fetch the type from the correct slot
14575 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
14576 gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL));
14577 op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
14581 assertImp(op1->gtOper == GT_MKREFANY);
14583 // The pointer may have side-effects
14584 if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
14586 impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14588 impNoteLastILoffs();
14592 // We already have the class handle
14593 op1 = op1->gtOp.gtOp2;
14596 // convert native TypeHandle to RuntimeTypeHandle
14598 GenTreeArgList* helperArgs = gtNewArgList(op1);
14600 op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL, TYP_STRUCT, helperArgs);
14602 // The handle struct is returned in register
14603 op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
14605 tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
14608 impPushOnStack(op1, tiRetVal);
14613 /* Get the Class index */
14614 assertImp(sz == sizeof(unsigned));
14615 lastLoadToken = codeAddr;
14616 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
14618 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
14620 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
14621 if (op1 == nullptr)
14622 { // compDonotInline()
14626 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
14627 assert(resolvedToken.hClass != nullptr);
14629 if (resolvedToken.hMethod != nullptr)
14631 helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
14633 else if (resolvedToken.hField != nullptr)
14635 helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
14638 GenTreeArgList* helperArgs = gtNewArgList(op1);
14640 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs);
14642 // The handle struct is returned in register
14643 op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
14645 tiRetVal = verMakeTypeInfo(tokenType);
14646 impPushOnStack(op1, tiRetVal);
14651 case CEE_UNBOX_ANY:
14653 /* Get the Class index */
14654 assertImp(sz == sizeof(unsigned));
14656 _impResolveToken(CORINFO_TOKENKIND_Class);
14658 JITDUMP(" %08X", resolvedToken.token);
14660 BOOL runtimeLookup;
14661 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
14662 if (op2 == nullptr)
14664 assert(compDonotInline());
14668 // Run this always so we can get access exceptions even with SkipVerification.
14669 accessAllowedResult =
14670 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14671 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14673 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
14675 if (tiVerificationNeeded)
14677 typeInfo tiUnbox = impStackTop().seTypeInfo;
14678 Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
14679 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14680 tiRetVal.NormaliseForStack();
14682 JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n");
14683 op1 = impPopStack().val;
14687 /* Pop the object and create the unbox helper call */
14688 /* You might think that for UNBOX_ANY we need to push a different */
14689 /* (non-byref) type, but here we're making the tiRetVal that is used */
14690 /* for the intermediate pointer which we then transfer onto the OBJ */
14691 /* instruction. OBJ then creates the appropriate tiRetVal. */
14692 if (tiVerificationNeeded)
14694 typeInfo tiUnbox = impStackTop().seTypeInfo;
14695 Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
14697 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14698 Verify(tiRetVal.IsValueClass(), "not value class");
14699 tiRetVal.MakeByRef();
14701 // We always come from an objref, so this is safe byref
14702 tiRetVal.SetIsPermanentHomeByRef();
14703 tiRetVal.SetIsReadonlyByRef();
14706 op1 = impPopStack().val;
14707 assertImp(op1->gtType == TYP_REF);
14709 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
14710 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
14712 // Check legality and profitability of inline expansion for unboxing.
14713 const bool canExpandInline = (helper == CORINFO_HELP_UNBOX);
14714 const bool shouldExpandInline = !(compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts());
14716 if (canExpandInline && shouldExpandInline)
14718 JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY");
14719 // we are doing normal unboxing
14720 // inline the common case of the unbox helper
14721 // UNBOX(exp) morphs into
14722 // clone = pop(exp);
14723 // ((*clone == typeToken) ? nop : helper(clone, typeToken));
14724 // push(clone + TARGET_POINTER_SIZE)
14726 GenTree* cloneOperand;
14727 op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14728 nullptr DEBUGARG("inline UNBOX clone1"));
14729 op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
14731 GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
14733 op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
14734 nullptr DEBUGARG("inline UNBOX clone2"));
14735 op2 = impTokenToHandle(&resolvedToken);
14736 if (op2 == nullptr)
14737 { // compDonotInline()
14740 args = gtNewArgList(op2, op1);
14741 op1 = gtNewHelperCallNode(helper, TYP_VOID, args);
14743 op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
14744 op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
14745 condBox->gtFlags |= GTF_RELOP_QMARK;
14747 // QMARK nodes cannot reside on the evaluation stack. Because there
14748 // may be other trees on the evaluation stack that side-effect the
14749 // sources of the UNBOX operation we must spill the stack.
14751 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14753 // Create the address-expression to reference past the object header
14754 // to the beginning of the value-type. Today this means adjusting
14755 // past the base of the objects vtable field which is pointer sized.
14757 op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
14758 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
14762 JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY",
14763 canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
14765 // Don't optimize, just call the helper and be done with it
14766 args = gtNewArgList(op2, op1);
14768 gtNewHelperCallNode(helper,
14769 (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), args);
14772 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
14773 helper == CORINFO_HELP_UNBOX_NULLABLE &&
14774 varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
14778 ----------------------------------------------------------------------
14781 | \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE |
14782 | \ | (which returns a BYREF) | (which returns a STRUCT) | |
14784 |---------------------------------------------------------------------
14785 | UNBOX | push the BYREF | spill the STRUCT to a local, |
14786 | | | push the BYREF to this local |
14787 |---------------------------------------------------------------------
14788 | UNBOX_ANY | push a GT_OBJ of | push the STRUCT |
14789 | | the BYREF | For Linux when the |
14790 | | | struct is returned in two |
14791 | | | registers create a temp |
14792 | | | which address is passed to |
14793 | | | the unbox_nullable helper. |
14794 |---------------------------------------------------------------------
14797 if (opcode == CEE_UNBOX)
14799 if (helper == CORINFO_HELP_UNBOX_NULLABLE)
14801 // Unbox nullable helper returns a struct type.
14802 // We need to spill it to a temp so than can take the address of it.
14803 // Here we need unsafe value cls check, since the address of struct is taken to be used
14804 // further along and potetially be exploitable.
14806 unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
14807 lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14809 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14810 op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14811 assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14813 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14814 op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14815 op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14818 assert(op1->gtType == TYP_BYREF);
14819 assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14823 assert(opcode == CEE_UNBOX_ANY);
14825 if (helper == CORINFO_HELP_UNBOX)
14827 // Normal unbox helper returns a TYP_BYREF.
14828 impPushOnStack(op1, tiRetVal);
14833 assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
14835 #if FEATURE_MULTIREG_RET
14837 if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
14839 // Unbox nullable helper returns a TYP_STRUCT.
14840 // For the multi-reg case we need to spill it to a temp so that
14841 // we can pass the address to the unbox_nullable jit helper.
14843 unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
14844 lvaTable[tmp].lvIsMultiRegArg = true;
14845 lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
14847 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14848 op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
14849 assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
14851 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
14852 op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
14853 op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
14855 // In this case the return value of the unbox helper is TYP_BYREF.
14856 // Make sure the right type is placed on the operand type stack.
14857 impPushOnStack(op1, tiRetVal);
14859 // Load the struct.
14862 assert(op1->gtType == TYP_BYREF);
14863 assert(!tiVerificationNeeded || tiRetVal.IsByRef());
14869 #endif // !FEATURE_MULTIREG_RET
14872 // If non register passable struct we have it materialized in the RetBuf.
14873 assert(op1->gtType == TYP_STRUCT);
14874 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14875 assert(tiRetVal.IsValueClass());
14879 impPushOnStack(op1, tiRetVal);
14885 /* Get the Class index */
14886 assertImp(sz == sizeof(unsigned));
14888 _impResolveToken(CORINFO_TOKENKIND_Box);
14890 JITDUMP(" %08X", resolvedToken.token);
14892 if (tiVerificationNeeded)
14894 typeInfo tiActual = impStackTop().seTypeInfo;
14895 typeInfo tiBox = verMakeTypeInfo(resolvedToken.hClass);
14897 Verify(verIsBoxable(tiBox), "boxable type expected");
14899 // check the class constraints of the boxed type in case we are boxing an uninitialized value
14900 Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
14901 "boxed type has unsatisfied class constraints");
14903 Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
14905 // Observation: the following code introduces a boxed value class on the stack, but,
14906 // according to the ECMA spec, one would simply expect: tiRetVal =
14907 // typeInfo(TI_REF,impGetObjectClass());
14909 // Push the result back on the stack,
14910 // even if clsHnd is a value class we want the TI_REF
14911 // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
14912 tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
14915 accessAllowedResult =
14916 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14917 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14919 // Note BOX can be used on things that are not value classes, in which
14920 // case we get a NOP. However the verifier's view of the type on the
14921 // stack changes (in generic code a 'T' becomes a 'boxed T')
14922 if (!eeIsValueClass(resolvedToken.hClass))
14924 JITDUMP("\n Importing BOX(refClass) as NOP\n");
14925 verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
14929 // Look ahead for unbox.any
14930 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
14932 CORINFO_RESOLVED_TOKEN unboxResolvedToken;
14934 impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
14936 // See if the resolved tokens describe types that are equal.
14937 const TypeCompareState compare =
14938 info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, resolvedToken.hClass);
14940 // If so, box/unbox.any is a nop.
14941 if (compare == TypeCompareState::Must)
14943 JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n");
14944 // Skip the next unbox.any instruction
14945 sz += sizeof(mdToken) + 1;
14950 impImportAndPushBox(&resolvedToken);
14951 if (compDonotInline())
14960 /* Get the Class index */
14961 assertImp(sz == sizeof(unsigned));
14963 _impResolveToken(CORINFO_TOKENKIND_Class);
14965 JITDUMP(" %08X", resolvedToken.token);
14967 if (tiVerificationNeeded)
14969 tiRetVal = typeInfo(TI_INT);
14972 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
14973 impPushOnStack(op1, tiRetVal);
14976 case CEE_CASTCLASS:
14978 /* Get the Class index */
14980 assertImp(sz == sizeof(unsigned));
14982 _impResolveToken(CORINFO_TOKENKIND_Casting);
14984 JITDUMP(" %08X", resolvedToken.token);
14986 if (!opts.IsReadyToRun())
14988 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14989 if (op2 == nullptr)
14990 { // compDonotInline()
14995 if (tiVerificationNeeded)
14997 Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
14999 tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
15002 accessAllowedResult =
15003 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15004 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15006 op1 = impPopStack().val;
15008 /* Pop the address and create the 'checked cast' helper call */
15010 // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
15011 // and op2 to contain code that creates the type handle corresponding to typeRef
15014 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true);
15016 if (optTree != nullptr)
15018 impPushOnStack(optTree, tiRetVal);
15023 #ifdef FEATURE_READYTORUN_COMPILER
15024 if (opts.IsReadyToRun())
15026 GenTreeCall* opLookup =
15027 impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF,
15028 gtNewArgList(op1));
15029 usingReadyToRunHelper = (opLookup != nullptr);
15030 op1 = (usingReadyToRunHelper ? opLookup : op1);
15032 if (!usingReadyToRunHelper)
15034 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
15035 // and the chkcastany call with a single call to a dynamic R2R cell that will:
15036 // 1) Load the context
15037 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate
15039 // 3) Check the object on the stack for the type-cast
15040 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
15042 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
15043 if (op2 == nullptr)
15044 { // compDonotInline()
15050 if (!usingReadyToRunHelper)
15053 op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
15055 if (compDonotInline())
15060 /* Push the result back on the stack */
15061 impPushOnStack(op1, tiRetVal);
15068 if (compIsForInlining())
15070 // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15071 // TODO: Will this be too strict, given that we will inline many basic blocks?
15072 // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15074 /* Do we have just the exception on the stack ?*/
15076 if (verCurrentState.esStackDepth != 1)
15078 /* if not, just don't inline the method */
15080 compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
15085 if (tiVerificationNeeded)
15087 tiRetVal = impStackTop().seTypeInfo;
15088 Verify(tiRetVal.IsObjRef(), "object ref expected");
15089 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15091 Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
15095 block->bbSetRunRarely(); // any block with a throw is rare
15096 /* Pop the exception object and create the 'throw' helper call */
15098 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewArgList(impPopStack().val));
15101 if (verCurrentState.esStackDepth > 0)
15103 impEvalSideEffects();
15106 assert(verCurrentState.esStackDepth == 0);
15112 assert(!compIsForInlining());
15114 if (info.compXcptnsCount == 0)
15116 BADCODE("rethrow outside catch");
15119 if (tiVerificationNeeded)
15121 Verify(block->hasHndIndex(), "rethrow outside catch");
15122 if (block->hasHndIndex())
15124 EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
15125 Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
15126 if (HBtab->HasFilter())
15128 // we better be in the handler clause part, not the filter part
15129 Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
15130 "rethrow in filter");
15135 /* Create the 'rethrow' helper call */
15137 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID);
15143 assertImp(sz == sizeof(unsigned));
15145 _impResolveToken(CORINFO_TOKENKIND_Class);
15147 JITDUMP(" %08X", resolvedToken.token);
15149 if (tiVerificationNeeded)
15151 typeInfo tiTo = impStackTop().seTypeInfo;
15152 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15154 Verify(tiTo.IsByRef(), "byref expected");
15155 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15157 Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15158 "type operand incompatible with type of address");
15161 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
15162 op2 = gtNewIconNode(0); // Value
15163 op1 = impPopStack().val; // Dest
15164 op1 = gtNewBlockVal(op1, size);
15165 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15170 if (tiVerificationNeeded)
15172 Verify(false, "bad opcode");
15175 op3 = impPopStack().val; // Size
15176 op2 = impPopStack().val; // Value
15177 op1 = impPopStack().val; // Dest
15179 if (op3->IsCnsIntOrI())
15181 size = (unsigned)op3->AsIntConCommon()->IconValue();
15182 op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15186 op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15189 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15195 if (tiVerificationNeeded)
15197 Verify(false, "bad opcode");
15199 op3 = impPopStack().val; // Size
15200 op2 = impPopStack().val; // Src
15201 op1 = impPopStack().val; // Dest
15203 if (op3->IsCnsIntOrI())
15205 size = (unsigned)op3->AsIntConCommon()->IconValue();
15206 op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15210 op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15213 if (op2->OperGet() == GT_ADDR)
15215 op2 = op2->gtOp.gtOp1;
15219 op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
15222 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
15227 assertImp(sz == sizeof(unsigned));
15229 _impResolveToken(CORINFO_TOKENKIND_Class);
15231 JITDUMP(" %08X", resolvedToken.token);
15233 if (tiVerificationNeeded)
15235 typeInfo tiFrom = impStackTop().seTypeInfo;
15236 typeInfo tiTo = impStackTop(1).seTypeInfo;
15237 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15239 Verify(tiFrom.IsByRef(), "expected byref source");
15240 Verify(tiTo.IsByRef(), "expected byref destination");
15242 Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
15243 "type of source address incompatible with type operand");
15244 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15245 Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15246 "type operand incompatible with type of destination address");
15249 if (!eeIsValueClass(resolvedToken.hClass))
15251 op1 = impPopStack().val; // address to load from
15253 impBashVarAddrsToI(op1);
15255 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
15257 op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
15258 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
15260 impPushOnStack(op1, typeInfo());
15261 opcode = CEE_STIND_REF;
15263 goto STIND_POST_VERIFY;
15266 op2 = impPopStack().val; // Src
15267 op1 = impPopStack().val; // Dest
15268 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
15273 assertImp(sz == sizeof(unsigned));
15275 _impResolveToken(CORINFO_TOKENKIND_Class);
15277 JITDUMP(" %08X", resolvedToken.token);
15279 if (eeIsValueClass(resolvedToken.hClass))
15281 lclTyp = TYP_STRUCT;
15288 if (tiVerificationNeeded)
15291 typeInfo tiPtr = impStackTop(1).seTypeInfo;
15293 // Make sure we have a good looking byref
15294 Verify(tiPtr.IsByRef(), "pointer not byref");
15295 Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
15296 if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
15298 compUnsafeCastUsed = true;
15301 typeInfo ptrVal = DereferenceByRef(tiPtr);
15302 typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
15304 if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
15306 Verify(false, "type of value incompatible with type operand");
15307 compUnsafeCastUsed = true;
15310 if (!tiCompatibleWith(argVal, ptrVal, false))
15312 Verify(false, "type operand incompatible with type of address");
15313 compUnsafeCastUsed = true;
15318 compUnsafeCastUsed = true;
15321 if (lclTyp == TYP_REF)
15323 opcode = CEE_STIND_REF;
15324 goto STIND_POST_VERIFY;
15327 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15328 if (impIsPrimitive(jitTyp))
15330 lclTyp = JITtype2varType(jitTyp);
15331 goto STIND_POST_VERIFY;
15334 op2 = impPopStack().val; // Value
15335 op1 = impPopStack().val; // Ptr
15337 assertImp(varTypeIsStruct(op2));
15339 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
15341 if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED))
15343 op1->gtFlags |= GTF_BLK_UNALIGNED;
15350 assert(!compIsForInlining());
15352 // Being lazy here. Refanys are tricky in terms of gc tracking.
15353 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
15355 JITDUMP("disabling struct promotion because of mkrefany\n");
15356 fgNoStructPromotion = true;
15358 oper = GT_MKREFANY;
15359 assertImp(sz == sizeof(unsigned));
15361 _impResolveToken(CORINFO_TOKENKIND_Class);
15363 JITDUMP(" %08X", resolvedToken.token);
15365 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
15366 if (op2 == nullptr)
15367 { // compDonotInline()
15371 if (tiVerificationNeeded)
15373 typeInfo tiPtr = impStackTop().seTypeInfo;
15374 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15376 Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
15377 Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
15378 Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
15381 accessAllowedResult =
15382 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15383 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15385 op1 = impPopStack().val;
15387 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
15388 // But JIT32 allowed it, so we continue to allow it.
15389 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
15391 // MKREFANY returns a struct. op2 is the class token.
15392 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
15394 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
15400 assertImp(sz == sizeof(unsigned));
15402 _impResolveToken(CORINFO_TOKENKIND_Class);
15404 JITDUMP(" %08X", resolvedToken.token);
15408 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15410 if (tiVerificationNeeded)
15412 typeInfo tiPtr = impStackTop().seTypeInfo;
15414 // Make sure we have a byref
15415 if (!tiPtr.IsByRef())
15417 Verify(false, "pointer not byref");
15418 compUnsafeCastUsed = true;
15420 typeInfo tiPtrVal = DereferenceByRef(tiPtr);
15422 if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
15424 Verify(false, "type of address incompatible with type operand");
15425 compUnsafeCastUsed = true;
15427 tiRetVal.NormaliseForStack();
15431 compUnsafeCastUsed = true;
15434 if (eeIsValueClass(resolvedToken.hClass))
15436 lclTyp = TYP_STRUCT;
15441 opcode = CEE_LDIND_REF;
15442 goto LDIND_POST_VERIFY;
15445 op1 = impPopStack().val;
15447 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
15449 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15450 if (impIsPrimitive(jitTyp))
15452 op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
15454 // Could point anywhere, example a boxed class static int
15455 op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
15456 assertImp(varTypeIsArithmetic(op1->gtType));
15460 // OBJ returns a struct
15461 // and an inline argument which is the class token of the loaded obj
15462 op1 = gtNewObjNode(resolvedToken.hClass, op1);
15464 op1->gtFlags |= GTF_EXCEPT;
15466 if (prefixFlags & PREFIX_UNALIGNED)
15468 op1->gtFlags |= GTF_IND_UNALIGNED;
15471 impPushOnStack(op1, tiRetVal);
15476 if (tiVerificationNeeded)
15478 typeInfo tiArray = impStackTop().seTypeInfo;
15479 Verify(verIsSDArray(tiArray), "bad array");
15480 tiRetVal = typeInfo(TI_INT);
15483 op1 = impPopStack().val;
15484 if (!opts.MinOpts() && !opts.compDbgCode)
15486 /* Use GT_ARR_LENGTH operator so rng check opts see this */
15487 GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, offsetof(CORINFO_Array, length));
15489 /* Mark the block as containing a length expression */
15491 if (op1->gtOper == GT_LCL_VAR)
15493 block->bbFlags |= BBF_HAS_IDX_LEN;
15500 /* Create the expression "*(array_addr + ArrLenOffs)" */
15501 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
15502 gtNewIconNode(offsetof(CORINFO_Array, length), TYP_I_IMPL));
15503 op1 = gtNewIndir(TYP_INT, op1);
15504 op1->gtFlags |= GTF_IND_ARR_LEN;
15507 /* Push the result back on the stack */
15508 impPushOnStack(op1, tiRetVal);
15512 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
15516 if (opts.compDbgCode)
15518 op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
15523 /******************************** NYI *******************************/
15526 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
15529 case CEE_MACRO_END:
15532 BADCODE3("unknown opcode", ": %02X", (int)opcode);
15536 prevOpcode = opcode;
15542 #undef _impResolveToken
15545 #pragma warning(pop)
15548 // Push a local/argument treeon the operand stack
15549 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
15551 tiRetVal.NormaliseForStack();
15553 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
15555 tiRetVal.SetUninitialisedObjRef();
15558 impPushOnStack(op, tiRetVal);
15561 // Load a local/argument on the operand stack
15562 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
15563 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
15567 if (lvaTable[lclNum].lvNormalizeOnLoad())
15569 lclTyp = lvaGetRealType(lclNum);
15573 lclTyp = lvaGetActualType(lclNum);
15576 impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
15579 // Load an argument on the operand stack
15580 // Shared by the various CEE_LDARG opcodes
15581 // ilArgNum is the argument index as specified in IL.
15582 // It will be mapped to the correct lvaTable index
15583 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
15585 Verify(ilArgNum < info.compILargsCount, "bad arg num");
15587 if (compIsForInlining())
15589 if (ilArgNum >= info.compArgsCount)
15591 compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
15595 impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
15596 impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
15600 if (ilArgNum >= info.compArgsCount)
15605 unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
15607 if (lclNum == info.compThisArg)
15609 lclNum = lvaArg0Var;
15612 impLoadVar(lclNum, offset);
15616 // Load a local on the operand stack
15617 // Shared by the various CEE_LDLOC opcodes
15618 // ilLclNum is the local index as specified in IL.
15619 // It will be mapped to the correct lvaTable index
15620 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
15622 if (tiVerificationNeeded)
15624 Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
15625 Verify(info.compInitMem, "initLocals not set");
15628 if (compIsForInlining())
15630 if (ilLclNum >= info.compMethodInfo->locals.numArgs)
15632 compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
15636 // Get the local type
15637 var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
15639 typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
15641 /* Have we allocated a temp for this local? */
15643 unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
15645 // All vars of inlined methods should be !lvNormalizeOnLoad()
15647 assert(!lvaTable[lclNum].lvNormalizeOnLoad());
15648 lclTyp = genActualType(lclTyp);
15650 impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
15654 if (ilLclNum >= info.compMethodInfo->locals.numArgs)
15659 unsigned lclNum = info.compArgsCount + ilLclNum;
15661 impLoadVar(lclNum, offset);
15665 #ifdef _TARGET_ARM_
15666 /**************************************************************************************
15668 * When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
15669 * dst struct, because struct promotion will turn it into a float/double variable while
15670 * the rhs will be an int/long variable. We don't code generate assignment of int into
15671 * a float, but there is nothing that might prevent us from doing so. The tree however
15672 * would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
15674 * tmpNum - the lcl dst variable num that is a struct.
15675 * src - the src tree assigned to the dest that is a struct/int (when varargs call.)
15676 * hClass - the type handle for the struct variable.
15678 * TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
15679 * however, we could do a codegen of transferring from int to float registers
15680 * (transfer, not a cast.)
15683 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass)
15685 if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
15687 int hfaSlots = GetHfaCount(hClass);
15688 var_types hfaType = GetHfaType(hClass);
15690 // If we have varargs we morph the method's return type to be "int" irrespective of its original
15691 // type: struct/float at importer because the ABI calls out return in integer registers.
15692 // We don't want struct promotion to replace an expression like this:
15693 // lclFld_int = callvar_int() into lclFld_float = callvar_int();
15694 // This means an int is getting assigned to a float without a cast. Prevent the promotion.
15695 if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
15696 (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
15698 // Make sure this struct type stays as struct so we can receive the call in a struct.
15699 lvaTable[tmpNum].lvIsMultiRegRet = true;
15703 #endif // _TARGET_ARM_
15705 #if FEATURE_MULTIREG_RET
15706 GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass)
15708 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
15709 impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
15710 GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
15712 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
15713 ret->gtFlags |= GTF_DONT_CSE;
15715 assert(IsMultiRegReturnedType(hClass));
15717 // Mark the var so that fields are not promoted and stay together.
15718 lvaTable[tmpNum].lvIsMultiRegRet = true;
15722 #endif // FEATURE_MULTIREG_RET
15724 // do import for a return
15725 // returns false if inlining was aborted
15726 // opcode can be ret or call in the case of a tail.call
15727 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
15729 if (tiVerificationNeeded)
15731 verVerifyThisPtrInitialised();
15733 unsigned expectedStack = 0;
15734 if (info.compRetType != TYP_VOID)
15736 typeInfo tiVal = impStackTop().seTypeInfo;
15737 typeInfo tiDeclared =
15738 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
15740 Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
15742 Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
15745 Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
15749 // If we are importing an inlinee and have GC ref locals we always
15750 // need to have a spill temp for the return value. This temp
15751 // should have been set up in advance, over in fgFindBasicBlocks.
15752 if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID))
15754 assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM);
15758 GenTree* op2 = nullptr;
15759 GenTree* op1 = nullptr;
15760 CORINFO_CLASS_HANDLE retClsHnd = nullptr;
15762 if (info.compRetType != TYP_VOID)
15764 StackEntry se = impPopStack();
15765 retClsHnd = se.seTypeInfo.GetClassHandle();
15768 if (!compIsForInlining())
15770 impBashVarAddrsToI(op2);
15771 op2 = impImplicitIorI4Cast(op2, info.compRetType);
15772 op2 = impImplicitR4orR8Cast(op2, info.compRetType);
15773 assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
15774 ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
15775 ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
15776 (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
15777 (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
15780 if (opts.compGcChecks && info.compRetType == TYP_REF)
15782 // DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path
15783 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
15786 assert(op2->gtType == TYP_REF);
15788 // confirm that the argument is a GC pointer (for debugging (GC stress))
15789 GenTreeArgList* args = gtNewArgList(op2);
15790 op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args);
15794 printf("\ncompGcChecks tree:\n");
15802 // inlinee's stack should be empty now.
15803 assert(verCurrentState.esStackDepth == 0);
15808 printf("\n\n Inlinee Return expression (before normalization) =>\n");
15813 // Make sure the type matches the original call.
15815 var_types returnType = genActualType(op2->gtType);
15816 var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
15817 if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
15819 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
15822 if (returnType != originalCallType)
15824 JITDUMP("Return type mismatch, have %s, needed %s\n", varTypeName(returnType),
15825 varTypeName(originalCallType));
15826 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
15830 // Below, we are going to set impInlineInfo->retExpr to the tree with the return
15831 // expression. At this point, retExpr could already be set if there are multiple
15832 // return blocks (meaning fgNeedReturnSpillTemp() == true) and one of
15833 // the other blocks already set it. If there is only a single return block,
15834 // retExpr shouldn't be set. However, this is not true if we reimport a block
15835 // with a return. In that case, retExpr will be set, then the block will be
15836 // reimported, but retExpr won't get cleared as part of setting the block to
15837 // be reimported. The reimported retExpr value should be the same, so even if
15838 // we don't unconditionally overwrite it, it shouldn't matter.
15839 if (info.compRetNativeType != TYP_STRUCT)
15841 // compRetNativeType is not TYP_STRUCT.
15842 // This implies it could be either a scalar type or SIMD vector type or
15843 // a struct type that can be normalized to a scalar type.
15845 if (varTypeIsStruct(info.compRetType))
15847 noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
15848 // adjust the type away from struct to integral
15849 // and no normalizing
15850 op2 = impFixupStructReturnType(op2, retClsHnd);
15854 // Do we have to normalize?
15855 var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
15856 if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
15857 fgCastNeeded(op2, fncRealRetType))
15859 // Small-typed return values are normalized by the callee
15860 op2 = gtNewCastNode(TYP_INT, op2, false, fncRealRetType);
15864 if (fgNeedReturnSpillTemp())
15866 assert(info.compRetNativeType != TYP_VOID &&
15867 (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()));
15869 // If this method returns a ref type, track the actual types seen
15871 if (info.compRetType == TYP_REF)
15873 bool isExact = false;
15874 bool isNonNull = false;
15875 CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull);
15877 if (impInlineInfo->retExpr == nullptr)
15879 // This is the first return, so best known type is the type
15880 // of this return value.
15881 impInlineInfo->retExprClassHnd = returnClsHnd;
15882 impInlineInfo->retExprClassHndIsExact = isExact;
15884 else if (impInlineInfo->retExprClassHnd != returnClsHnd)
15886 // This return site type differs from earlier seen sites,
15887 // so reset the info and we'll fall back to using the method's
15888 // declared return type for the return spill temp.
15889 impInlineInfo->retExprClassHnd = nullptr;
15890 impInlineInfo->retExprClassHndIsExact = false;
15894 // This is a bit of a workaround...
15895 // If we are inlining a call that returns a struct, where the actual "native" return type is
15896 // not a struct (for example, the struct is composed of exactly one int, and the native
15897 // return type is thus an int), and the inlinee has multiple return blocks (thus,
15898 // fgNeedReturnSpillTemp() == true, and is the index of a local var that is set
15899 // to the *native* return type), and at least one of the return blocks is the result of
15900 // a call, then we have a problem. The situation is like this (from a failed test case):
15903 // // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
15904 // call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
15905 // plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
15909 // ldobj !!T // this gets bashed to a GT_LCL_FLD, type TYP_INT
15912 // call !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
15913 // object&, class System.Func`1<!!0>)
15916 // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
15917 // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
15918 // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
15919 // inlining properly by leaving the correct type on the GT_CALL node through importing.
15921 // To fix this, for this case, we temporarily change the GT_CALL node type to the
15922 // native return type, which is what it will be set to eventually. We generate the
15923 // assignment to the return temp, using the correct type, and then restore the GT_CALL
15924 // node type. During morphing, the GT_CALL will get the correct, final, native return type.
15926 bool restoreType = false;
15927 if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
15929 noway_assert(op2->TypeGet() == TYP_STRUCT);
15930 op2->gtType = info.compRetNativeType;
15931 restoreType = true;
15934 impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15935 (unsigned)CHECK_SPILL_ALL);
15937 GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
15941 op2->gtType = TYP_STRUCT; // restore it to what it was
15947 if (impInlineInfo->retExpr)
15949 // Some other block(s) have seen the CEE_RET first.
15950 // Better they spilled to the same temp.
15951 assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
15952 assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
15960 printf("\n\n Inlinee Return expression (after normalization) =>\n");
15965 // Report the return expression
15966 impInlineInfo->retExpr = op2;
15970 // compRetNativeType is TYP_STRUCT.
15971 // This implies that struct return via RetBuf arg or multi-reg struct return
15973 GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall();
15975 // Assign the inlinee return into a spill temp.
15976 // spill temp only exists if there are multiple return points
15977 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
15979 // in this case we have to insert multiple struct copies to the temp
15980 // and the retexpr is just the temp.
15981 assert(info.compRetNativeType != TYP_VOID);
15982 assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals());
15984 impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
15985 (unsigned)CHECK_SPILL_ALL);
15988 #if defined(_TARGET_ARM_) || defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15989 #if defined(_TARGET_ARM_)
15990 // TODO-ARM64-NYI: HFA
15991 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
15992 // next ifdefs could be refactored in a single method with the ifdef inside.
15993 if (IsHfa(retClsHnd))
15995 // Same as !IsHfa but just don't bother with impAssignStructPtr.
15996 #else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
15997 ReturnTypeDesc retTypeDesc;
15998 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
15999 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
16001 if (retRegCount != 0)
16003 // If single eightbyte, the return type would have been normalized and there won't be a temp var.
16004 // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
16006 assert(retRegCount == MAX_RET_REG_COUNT);
16007 // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
16008 CLANG_FORMAT_COMMENT_ANCHOR;
16009 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
16011 if (fgNeedReturnSpillTemp())
16013 if (!impInlineInfo->retExpr)
16015 #if defined(_TARGET_ARM_)
16016 impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
16017 #else // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
16018 // The inlinee compiler has figured out the type of the temp already. Use it here.
16019 impInlineInfo->retExpr =
16020 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
16021 #endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
16026 impInlineInfo->retExpr = op2;
16030 #elif defined(_TARGET_ARM64_)
16031 ReturnTypeDesc retTypeDesc;
16032 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
16033 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
16035 if (retRegCount != 0)
16037 assert(!iciCall->HasRetBufArg());
16038 assert(retRegCount >= 2);
16039 if (fgNeedReturnSpillTemp())
16041 if (!impInlineInfo->retExpr)
16043 // The inlinee compiler has figured out the type of the temp already. Use it here.
16044 impInlineInfo->retExpr =
16045 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
16050 impInlineInfo->retExpr = op2;
16054 #endif // defined(_TARGET_ARM64_)
16056 assert(iciCall->HasRetBufArg());
16057 GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->gtOp.gtOp1);
16058 // spill temp only exists if there are multiple return points
16059 if (fgNeedReturnSpillTemp())
16061 // if this is the first return we have seen set the retExpr
16062 if (!impInlineInfo->retExpr)
16064 impInlineInfo->retExpr =
16065 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
16066 retClsHnd, (unsigned)CHECK_SPILL_ALL);
16071 impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16078 if (compIsForInlining())
16083 if (info.compRetType == TYP_VOID)
16086 op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16088 else if (info.compRetBuffArg != BAD_VAR_NUM)
16090 // Assign value to return buff (first param)
16091 GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
16093 op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16094 impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16096 // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
16097 CLANG_FORMAT_COMMENT_ANCHOR;
16099 #if defined(_TARGET_AMD64_)
16101 // x64 (System V and Win64) calling convention requires to
16102 // return the implicit return buffer explicitly (in RAX).
16103 // Change the return type to be BYREF.
16104 op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16105 #else // !defined(_TARGET_AMD64_)
16106 // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
16107 // In such case the return value of the function is changed to BYREF.
16108 // If profiler hook is not needed the return type of the function is TYP_VOID.
16109 if (compIsProfilerHookNeeded())
16111 op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16116 op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16118 #endif // !defined(_TARGET_AMD64_)
16120 else if (varTypeIsStruct(info.compRetType))
16122 #if !FEATURE_MULTIREG_RET
16123 // For both ARM architectures the HFA native types are maintained as structs.
16124 // Also on System V AMD64 the multireg structs returns are also left as structs.
16125 noway_assert(info.compRetNativeType != TYP_STRUCT);
16127 op2 = impFixupStructReturnType(op2, retClsHnd);
16129 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
16134 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
16137 // We must have imported a tailcall and jumped to RET
16138 if (prefixFlags & PREFIX_TAILCALL)
16140 #if defined(FEATURE_CORECLR) || !defined(_TARGET_AMD64_)
16142 // This cannot be asserted on Amd64 since we permit the following IL pattern:
16146 assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
16147 #endif // FEATURE_CORECLR || !_TARGET_AMD64_
16149 opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
16151 // impImportCall() would have already appended TYP_VOID calls
16152 if (info.compRetType == TYP_VOID)
16158 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16160 // Remember at which BC offset the tree was finished
16161 impNoteLastILoffs();
16166 /*****************************************************************************
16167 * Mark the block as unimported.
16168 * Note that the caller is responsible for calling impImportBlockPending(),
16169 * with the appropriate stack-state
16172 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
16175 if (verbose && (block->bbFlags & BBF_IMPORTED))
16177 printf("\nBB%02u will be reimported\n", block->bbNum);
16181 block->bbFlags &= ~BBF_IMPORTED;
16184 /*****************************************************************************
16185 * Mark the successors of the given block as unimported.
16186 * Note that the caller is responsible for calling impImportBlockPending()
16187 * for all the successors, with the appropriate stack-state.
16190 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
16192 const unsigned numSuccs = block->NumSucc();
16193 for (unsigned i = 0; i < numSuccs; i++)
16195 impReimportMarkBlock(block->GetSucc(i));
16199 /*****************************************************************************
16201 * Filter wrapper to handle only passed in exception code
16205 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
16207 if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
16209 return EXCEPTION_EXECUTE_HANDLER;
16212 return EXCEPTION_CONTINUE_SEARCH;
16215 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
16217 assert(block->hasTryIndex());
16218 assert(!compIsForInlining());
16220 unsigned tryIndex = block->getTryIndex();
16221 EHblkDsc* HBtab = ehGetDsc(tryIndex);
16225 assert(block->bbFlags & BBF_TRY_BEG);
16227 // The Stack must be empty
16229 if (block->bbStkDepth != 0)
16231 BADCODE("Evaluation stack must be empty on entry into a try block");
16235 // Save the stack contents, we'll need to restore it later
16237 SavedStack blockState;
16238 impSaveStackState(&blockState, false);
16240 while (HBtab != nullptr)
16244 // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
16245 // We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
16247 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
16249 // We trigger an invalid program exception here unless we have a try/fault region.
16251 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
16254 "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
16258 // Allow a try/fault region to proceed.
16259 assert(HBtab->HasFaultHandler());
16263 /* Recursively process the handler block */
16264 BasicBlock* hndBegBB = HBtab->ebdHndBeg;
16266 // Construct the proper verification stack state
16267 // either empty or one that contains just
16268 // the Exception Object that we are dealing with
16270 verCurrentState.esStackDepth = 0;
16272 if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
16274 CORINFO_CLASS_HANDLE clsHnd;
16276 if (HBtab->HasFilter())
16278 clsHnd = impGetObjectClass();
16282 CORINFO_RESOLVED_TOKEN resolvedToken;
16284 resolvedToken.tokenContext = impTokenLookupContextHandle;
16285 resolvedToken.tokenScope = info.compScopeHnd;
16286 resolvedToken.token = HBtab->ebdTyp;
16287 resolvedToken.tokenType = CORINFO_TOKENKIND_Class;
16288 info.compCompHnd->resolveToken(&resolvedToken);
16290 clsHnd = resolvedToken.hClass;
16293 // push catch arg the stack, spill to a temp if necessary
16294 // Note: can update HBtab->ebdHndBeg!
16295 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false);
16298 // Queue up the handler for importing
16300 impImportBlockPending(hndBegBB);
16302 if (HBtab->HasFilter())
16304 /* @VERIFICATION : Ideally the end of filter state should get
16305 propagated to the catch handler, this is an incompleteness,
16306 but is not a security/compliance issue, since the only
16307 interesting state is the 'thisInit' state.
16310 verCurrentState.esStackDepth = 0;
16312 BasicBlock* filterBB = HBtab->ebdFilter;
16314 // push catch arg the stack, spill to a temp if necessary
16315 // Note: can update HBtab->ebdFilter!
16316 const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB);
16317 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter);
16319 impImportBlockPending(filterBB);
16322 else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
16324 /* Recursively process the handler block */
16326 verCurrentState.esStackDepth = 0;
16328 // Queue up the fault handler for importing
16330 impImportBlockPending(HBtab->ebdHndBeg);
16333 // Now process our enclosing try index (if any)
16335 tryIndex = HBtab->ebdEnclosingTryIndex;
16336 if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
16342 HBtab = ehGetDsc(tryIndex);
16346 // Restore the stack contents
16347 impRestoreStackState(&blockState);
16350 //***************************************************************
16351 // Import the instructions for the given basic block. Perform
16352 // verification, throwing an exception on failure. Push any successor blocks that are enabled for the first
16353 // time, or whose verification pre-state is changed.
16356 #pragma warning(push)
16357 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
16359 void Compiler::impImportBlock(BasicBlock* block)
16361 // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
16362 // handle them specially. In particular, there is no IL to import for them, but we do need
16363 // to mark them as imported and put their successors on the pending import list.
16364 if (block->bbFlags & BBF_INTERNAL)
16366 JITDUMP("Marking BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", block->bbNum);
16367 block->bbFlags |= BBF_IMPORTED;
16369 const unsigned numSuccs = block->NumSucc();
16370 for (unsigned i = 0; i < numSuccs; i++)
16372 impImportBlockPending(block->GetSucc(i));
16382 /* Make the block globaly available */
16387 /* Initialize the debug variables */
16388 impCurOpcName = "unknown";
16389 impCurOpcOffs = block->bbCodeOffs;
16392 /* Set the current stack state to the merged result */
16393 verResetCurrentState(block, &verCurrentState);
16395 /* Now walk the code and import the IL into GenTrees */
16397 struct FilterVerificationExceptionsParam
16402 FilterVerificationExceptionsParam param;
16404 param.pThis = this;
16405 param.block = block;
16407 PAL_TRY(FilterVerificationExceptionsParam*, pParam, ¶m)
16409 /* @VERIFICATION : For now, the only state propagation from try
16410 to it's handler is "thisInit" state (stack is empty at start of try).
16411 In general, for state that we track in verification, we need to
16412 model the possibility that an exception might happen at any IL
16413 instruction, so we really need to merge all states that obtain
16414 between IL instructions in a try block into the start states of
16417 However we do not allow the 'this' pointer to be uninitialized when
16418 entering most kinds try regions (only try/fault are allowed to have
16419 an uninitialized this pointer on entry to the try)
16421 Fortunately, the stack is thrown away when an exception
16422 leads to a handler, so we don't have to worry about that.
16423 We DO, however, have to worry about the "thisInit" state.
16424 But only for the try/fault case.
16426 The only allowed transition is from TIS_Uninit to TIS_Init.
16428 So for a try/fault region for the fault handler block
16429 we will merge the start state of the try begin
16430 and the post-state of each block that is part of this try region
16433 // merge the start state of the try begin
16435 if (pParam->block->bbFlags & BBF_TRY_BEG)
16437 pParam->pThis->impVerifyEHBlock(pParam->block, true);
16440 pParam->pThis->impImportBlockCode(pParam->block);
16442 // As discussed above:
16443 // merge the post-state of each block that is part of this try region
16445 if (pParam->block->hasTryIndex())
16447 pParam->pThis->impVerifyEHBlock(pParam->block, false);
16450 PAL_EXCEPT_FILTER(FilterVerificationExceptions)
16452 verHandleVerificationFailure(block DEBUGARG(false));
16456 if (compDonotInline())
16461 assert(!compDonotInline());
16463 markImport = false;
16467 unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks
16468 bool reimportSpillClique = false;
16469 BasicBlock* tgtBlock = nullptr;
16471 /* If the stack is non-empty, we might have to spill its contents */
16473 if (verCurrentState.esStackDepth != 0)
16475 impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
16476 // on the stack, its lifetime is hard to determine, simply
16477 // don't reuse such temps.
16479 GenTree* addStmt = nullptr;
16481 /* Do the successors of 'block' have any other predecessors ?
16482 We do not want to do some of the optimizations related to multiRef
16483 if we can reimport blocks */
16485 unsigned multRef = impCanReimport ? unsigned(~0) : 0;
16487 switch (block->bbJumpKind)
16491 /* Temporarily remove the 'jtrue' from the end of the tree list */
16493 assert(impTreeLast);
16494 assert(impTreeLast->gtOper == GT_STMT);
16495 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
16497 addStmt = impTreeLast;
16498 impTreeLast = impTreeLast->gtPrev;
16500 /* Note if the next block has more than one ancestor */
16502 multRef |= block->bbNext->bbRefs;
16504 /* Does the next block have temps assigned? */
16506 baseTmp = block->bbNext->bbStkTempsIn;
16507 tgtBlock = block->bbNext;
16509 if (baseTmp != NO_BASE_TMP)
16514 /* Try the target of the jump then */
16516 multRef |= block->bbJumpDest->bbRefs;
16517 baseTmp = block->bbJumpDest->bbStkTempsIn;
16518 tgtBlock = block->bbJumpDest;
16522 multRef |= block->bbJumpDest->bbRefs;
16523 baseTmp = block->bbJumpDest->bbStkTempsIn;
16524 tgtBlock = block->bbJumpDest;
16528 multRef |= block->bbNext->bbRefs;
16529 baseTmp = block->bbNext->bbStkTempsIn;
16530 tgtBlock = block->bbNext;
16535 BasicBlock** jmpTab;
16538 /* Temporarily remove the GT_SWITCH from the end of the tree list */
16540 assert(impTreeLast);
16541 assert(impTreeLast->gtOper == GT_STMT);
16542 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
16544 addStmt = impTreeLast;
16545 impTreeLast = impTreeLast->gtPrev;
16547 jmpCnt = block->bbJumpSwt->bbsCount;
16548 jmpTab = block->bbJumpSwt->bbsDstTab;
16552 tgtBlock = (*jmpTab);
16554 multRef |= tgtBlock->bbRefs;
16556 // Thanks to spill cliques, we should have assigned all or none
16557 assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
16558 baseTmp = tgtBlock->bbStkTempsIn;
16563 } while (++jmpTab, --jmpCnt);
16567 case BBJ_CALLFINALLY:
16568 case BBJ_EHCATCHRET:
16570 case BBJ_EHFINALLYRET:
16571 case BBJ_EHFILTERRET:
16573 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
16577 noway_assert(!"Unexpected bbJumpKind");
16581 assert(multRef >= 1);
16583 /* Do we have a base temp number? */
16585 bool newTemps = (baseTmp == NO_BASE_TMP);
16589 /* Grab enough temps for the whole stack */
16590 baseTmp = impGetSpillTmpBase(block);
16593 /* Spill all stack entries into temps */
16594 unsigned level, tempNum;
16596 JITDUMP("\nSpilling stack entries into temps\n");
16597 for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
16599 GenTree* tree = verCurrentState.esStack[level].val;
16601 /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
16602 the other. This should merge to a byref in unverifiable code.
16603 However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
16604 successor would be imported assuming there was a TYP_I_IMPL on
16605 the stack. Thus the value would not get GC-tracked. Hence,
16606 change the temp to TYP_BYREF and reimport the successors.
16607 Note: We should only allow this in unverifiable code.
16609 if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
16611 lvaTable[tempNum].lvType = TYP_BYREF;
16612 impReimportMarkSuccessors(block);
16616 #ifdef _TARGET_64BIT_
16617 if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
16619 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
16620 (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
16622 // Merge the current state into the entry state of block;
16623 // the call to verMergeEntryStates must have changed
16624 // the entry state of the block by merging the int local var
16625 // and the native-int stack entry.
16626 bool changed = false;
16627 if (verMergeEntryStates(tgtBlock, &changed))
16629 impRetypeEntryStateTemps(tgtBlock);
16630 impReimportBlockPending(tgtBlock);
16635 tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
16640 // Some other block in the spill clique set this to "int", but now we have "native int".
16641 // Change the type and go back to re-import any blocks that used the wrong type.
16642 lvaTable[tempNum].lvType = TYP_I_IMPL;
16643 reimportSpillClique = true;
16645 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
16647 // Spill clique has decided this should be "native int", but this block only pushes an "int".
16648 // Insert a sign-extension to "native int" so we match the clique.
16649 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
16652 // Consider the case where one branch left a 'byref' on the stack and the other leaves
16653 // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
16654 // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
16655 // behavior instead of asserting and then generating bad code (where we save/restore the
16656 // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
16657 // imported already, we need to change the type of the local and reimport the spill clique.
16658 // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
16659 // the 'byref' size.
16660 if (!tiVerificationNeeded)
16662 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
16664 // Some other block in the spill clique set this to "int", but now we have "byref".
16665 // Change the type and go back to re-import any blocks that used the wrong type.
16666 lvaTable[tempNum].lvType = TYP_BYREF;
16667 reimportSpillClique = true;
16669 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
16671 // Spill clique has decided this should be "byref", but this block only pushes an "int".
16672 // Insert a sign-extension to "native int" so we match the clique size.
16673 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
16676 #endif // _TARGET_64BIT_
16678 #if FEATURE_X87_DOUBLES
16679 // X87 stack doesn't differentiate between float/double
16680 // so promoting is no big deal.
16681 // For everybody else keep it as float until we have a collision and then promote
16682 // Just like for x64's TYP_INT<->TYP_I_IMPL
16684 if (multRef > 1 && tree->gtType == TYP_FLOAT)
16686 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
16689 #else // !FEATURE_X87_DOUBLES
16691 if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
16693 // Some other block in the spill clique set this to "float", but now we have "double".
16694 // Change the type and go back to re-import any blocks that used the wrong type.
16695 lvaTable[tempNum].lvType = TYP_DOUBLE;
16696 reimportSpillClique = true;
16698 else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
16700 // Spill clique has decided this should be "double", but this block only pushes a "float".
16701 // Insert a cast to "double" so we match the clique.
16702 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE);
16705 #endif // FEATURE_X87_DOUBLES
16707 /* If addStmt has a reference to tempNum (can only happen if we
16708 are spilling to the temps already used by a previous block),
16709 we need to spill addStmt */
16711 if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
16713 GenTree* addTree = addStmt->gtStmt.gtStmtExpr;
16715 if (addTree->gtOper == GT_JTRUE)
16717 GenTree* relOp = addTree->gtOp.gtOp1;
16718 assert(relOp->OperIsCompare());
16720 var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
16722 if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
16724 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
16725 impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
16726 type = genActualType(lvaTable[temp].TypeGet());
16727 relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
16730 if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
16732 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
16733 impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
16734 type = genActualType(lvaTable[temp].TypeGet());
16735 relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
16740 assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->gtOp.gtOp1->TypeGet()));
16742 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
16743 impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
16744 addTree->gtOp.gtOp1 = gtNewLclvNode(temp, genActualType(addTree->gtOp.gtOp1->TypeGet()));
16748 /* Spill the stack entry, and replace with the temp */
16750 if (!impSpillStackEntry(level, tempNum
16753 true, "Spill Stack Entry"
16759 BADCODE("bad stack state");
16762 // Oops. Something went wrong when spilling. Bad code.
16763 verHandleVerificationFailure(block DEBUGARG(true));
16769 /* Put back the 'jtrue'/'switch' if we removed it earlier */
16773 impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
16777 // Some of the append/spill logic works on compCurBB
16779 assert(compCurBB == block);
16781 /* Save the tree list in the block */
16782 impEndTreeList(block);
16784 // impEndTreeList sets BBF_IMPORTED on the block
16785 // We do *NOT* want to set it later than this because
16786 // impReimportSpillClique might clear it if this block is both a
16787 // predecessor and successor in the current spill clique
16788 assert(block->bbFlags & BBF_IMPORTED);
16790 // If we had a int/native int, or float/double collision, we need to re-import
16791 if (reimportSpillClique)
16793 // This will re-import all the successors of block (as well as each of their predecessors)
16794 impReimportSpillClique(block);
16796 // For blocks that haven't been imported yet, we still need to mark them as pending import.
16797 const unsigned numSuccs = block->NumSucc();
16798 for (unsigned i = 0; i < numSuccs; i++)
16800 BasicBlock* succ = block->GetSucc(i);
16801 if ((succ->bbFlags & BBF_IMPORTED) == 0)
16803 impImportBlockPending(succ);
16807 else // the normal case
16809 // otherwise just import the successors of block
16811 /* Does this block jump to any other blocks? */
16812 const unsigned numSuccs = block->NumSucc();
16813 for (unsigned i = 0; i < numSuccs; i++)
16815 impImportBlockPending(block->GetSucc(i));
16820 #pragma warning(pop)
16823 /*****************************************************************************/
16825 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16826 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16827 // impPendingBlockMembers). Merges the current verification state into the verification state of "block"
16828 // (its "pre-state").
16830 void Compiler::impImportBlockPending(BasicBlock* block)
16835 printf("\nimpImportBlockPending for BB%02u\n", block->bbNum);
16839 // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
16840 // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
16841 // (When we're doing verification, we always attempt the merge to detect verification errors.)
16843 // If the block has not been imported, add to pending set.
16844 bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
16846 // Initialize bbEntryState just the first time we try to add this block to the pending list
16847 // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
16848 // We use NULL to indicate the 'common' state to avoid memory allocation
16849 if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
16850 (impGetPendingBlockMember(block) == 0))
16852 verInitBBEntryState(block, &verCurrentState);
16853 assert(block->bbStkDepth == 0);
16854 block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
16855 assert(addToPending);
16856 assert(impGetPendingBlockMember(block) == 0);
16860 // The stack should have the same height on entry to the block from all its predecessors.
16861 if (block->bbStkDepth != verCurrentState.esStackDepth)
16865 sprintf_s(buffer, sizeof(buffer),
16866 "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
16867 "Previous depth was %d, current depth is %d",
16868 block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
16869 verCurrentState.esStackDepth);
16870 buffer[400 - 1] = 0;
16873 NO_WAY("Block entered with different stack depths");
16877 // Additionally, if we need to verify, merge the verification state.
16878 if (tiVerificationNeeded)
16880 // Merge the current state into the entry state of block; if this does not change the entry state
16881 // by merging, do not add the block to the pending-list.
16882 bool changed = false;
16883 if (!verMergeEntryStates(block, &changed))
16885 block->bbFlags |= BBF_FAILED_VERIFICATION;
16886 addToPending = true; // We will pop it off, and check the flag set above.
16890 addToPending = true;
16892 JITDUMP("Adding BB%02u to pending set due to new merge result\n", block->bbNum);
16901 if (block->bbStkDepth > 0)
16903 // We need to fix the types of any spill temps that might have changed:
16904 // int->native int, float->double, int->byref, etc.
16905 impRetypeEntryStateTemps(block);
16908 // OK, we must add to the pending list, if it's not already in it.
16909 if (impGetPendingBlockMember(block) != 0)
16915 // Get an entry to add to the pending list
16919 if (impPendingFree)
16921 // We can reuse one of the freed up dscs.
16922 dsc = impPendingFree;
16923 impPendingFree = dsc->pdNext;
16927 // We have to create a new dsc
16928 dsc = new (this, CMK_Unknown) PendingDsc;
16932 dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
16933 dsc->pdThisPtrInit = verCurrentState.thisInitialized;
16935 // Save the stack trees for later
16937 if (verCurrentState.esStackDepth)
16939 impSaveStackState(&dsc->pdSavedStack, false);
16942 // Add the entry to the pending list
16944 dsc->pdNext = impPendingList;
16945 impPendingList = dsc;
16946 impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
16948 // Various assertions require us to now to consider the block as not imported (at least for
16949 // the final time...)
16950 block->bbFlags &= ~BBF_IMPORTED;
16955 printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
16960 /*****************************************************************************/
16962 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
16963 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
16964 // impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block.
16966 void Compiler::impReimportBlockPending(BasicBlock* block)
16968 JITDUMP("\nimpReimportBlockPending for BB%02u", block->bbNum);
16970 assert(block->bbFlags & BBF_IMPORTED);
16972 // OK, we must add to the pending list, if it's not already in it.
16973 if (impGetPendingBlockMember(block) != 0)
16978 // Get an entry to add to the pending list
16982 if (impPendingFree)
16984 // We can reuse one of the freed up dscs.
16985 dsc = impPendingFree;
16986 impPendingFree = dsc->pdNext;
16990 // We have to create a new dsc
16991 dsc = new (this, CMK_ImpStack) PendingDsc;
16996 if (block->bbEntryState)
16998 dsc->pdThisPtrInit = block->bbEntryState->thisInitialized;
16999 dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
17000 dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
17004 dsc->pdThisPtrInit = TIS_Bottom;
17005 dsc->pdSavedStack.ssDepth = 0;
17006 dsc->pdSavedStack.ssTrees = nullptr;
17009 // Add the entry to the pending list
17011 dsc->pdNext = impPendingList;
17012 impPendingList = dsc;
17013 impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
17015 // Various assertions require us to now to consider the block as not imported (at least for
17016 // the final time...)
17017 block->bbFlags &= ~BBF_IMPORTED;
17022 printf("Added PendingDsc - %08p for BB%02u\n", dspPtr(dsc), block->bbNum);
17027 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
17029 if (comp->impBlockListNodeFreeList == nullptr)
17031 return (BlockListNode*)comp->compGetMem(sizeof(BlockListNode), CMK_BasicBlock);
17035 BlockListNode* res = comp->impBlockListNodeFreeList;
17036 comp->impBlockListNodeFreeList = res->m_next;
17041 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
17043 node->m_next = impBlockListNodeFreeList;
17044 impBlockListNodeFreeList = node;
17047 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
17051 noway_assert(!fgComputePredsDone);
17052 if (!fgCheapPredsValid)
17054 fgComputeCheapPreds();
17057 BlockListNode* succCliqueToDo = nullptr;
17058 BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
17062 // Look at the successors of every member of the predecessor to-do list.
17063 while (predCliqueToDo != nullptr)
17065 BlockListNode* node = predCliqueToDo;
17066 predCliqueToDo = node->m_next;
17067 BasicBlock* blk = node->m_blk;
17068 FreeBlockListNode(node);
17070 const unsigned numSuccs = blk->NumSucc();
17071 for (unsigned succNum = 0; succNum < numSuccs; succNum++)
17073 BasicBlock* succ = blk->GetSucc(succNum);
17074 // If it's not already in the clique, add it, and also add it
17075 // as a member of the successor "toDo" set.
17076 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
17078 callback->Visit(SpillCliqueSucc, succ);
17079 impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
17080 succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
17085 // Look at the predecessors of every member of the successor to-do list.
17086 while (succCliqueToDo != nullptr)
17088 BlockListNode* node = succCliqueToDo;
17089 succCliqueToDo = node->m_next;
17090 BasicBlock* blk = node->m_blk;
17091 FreeBlockListNode(node);
17093 for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
17095 BasicBlock* predBlock = pred->block;
17096 // If it's not already in the clique, add it, and also add it
17097 // as a member of the predecessor "toDo" set.
17098 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
17100 callback->Visit(SpillCliquePred, predBlock);
17101 impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
17102 predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
17109 // If this fails, it means we didn't walk the spill clique properly and somehow managed
17110 // miss walking back to include the predecessor we started from.
17111 // This most likely cause: missing or out of date bbPreds
17112 assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
17115 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17117 if (predOrSucc == SpillCliqueSucc)
17119 assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
17120 blk->bbStkTempsIn = m_baseTmp;
17124 assert(predOrSucc == SpillCliquePred);
17125 assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
17126 blk->bbStkTempsOut = m_baseTmp;
17130 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17132 // For Preds we could be a little smarter and just find the existing store
17133 // and re-type it/add a cast, but that is complicated and hopefully very rare, so
17134 // just re-import the whole block (just like we do for successors)
17136 if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
17138 // If we haven't imported this block and we're not going to (because it isn't on
17139 // the pending list) then just ignore it for now.
17141 // This block has either never been imported (EntryState == NULL) or it failed
17142 // verification. Neither state requires us to force it to be imported now.
17143 assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
17147 // For successors we have a valid verCurrentState, so just mark them for reimport
17148 // the 'normal' way
17149 // Unlike predecessors, we *DO* need to reimport the current block because the
17150 // initial import had the wrong entry state types.
17151 // Similarly, blocks that are currently on the pending list, still need to call
17152 // impImportBlockPending to fixup their entry state.
17153 if (predOrSucc == SpillCliqueSucc)
17155 m_pComp->impReimportMarkBlock(blk);
17157 // Set the current stack state to that of the blk->bbEntryState
17158 m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
17159 assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
17161 m_pComp->impImportBlockPending(blk);
17163 else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
17165 // As described above, we are only visiting predecessors so they can
17166 // add the appropriate casts, since we have already done that for the current
17167 // block, it does not need to be reimported.
17168 // Nor do we need to reimport blocks that are still pending, but not yet
17171 // For predecessors, we have no state to seed the EntryState, so we just have
17172 // to assume the existing one is correct.
17173 // If the block is also a successor, it will get the EntryState properly
17174 // updated when it is visited as a successor in the above "if" block.
17175 assert(predOrSucc == SpillCliquePred);
17176 m_pComp->impReimportBlockPending(blk);
17180 // Re-type the incoming lclVar nodes to match the varDsc.
17181 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
17183 if (blk->bbEntryState != nullptr)
17185 EntryState* es = blk->bbEntryState;
17186 for (unsigned level = 0; level < es->esStackDepth; level++)
17188 GenTree* tree = es->esStack[level].val;
17189 if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
17191 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
17192 noway_assert(lclNum < lvaCount);
17193 LclVarDsc* varDsc = lvaTable + lclNum;
17194 es->esStack[level].val->gtType = varDsc->TypeGet();
17200 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
17202 if (block->bbStkTempsOut != NO_BASE_TMP)
17204 return block->bbStkTempsOut;
17210 printf("\n*************** In impGetSpillTmpBase(BB%02u)\n", block->bbNum);
17214 // Otherwise, choose one, and propagate to all members of the spill clique.
17215 // Grab enough temps for the whole stack.
17216 unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
17217 SetSpillTempsBase callback(baseTmp);
17219 // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
17220 // to one spill clique, and similarly can only be the sucessor to one spill clique
17221 impWalkSpillCliqueFromPred(block, &callback);
17226 void Compiler::impReimportSpillClique(BasicBlock* block)
17231 printf("\n*************** In impReimportSpillClique(BB%02u)\n", block->bbNum);
17235 // If we get here, it is because this block is already part of a spill clique
17236 // and one predecessor had an outgoing live stack slot of type int, and this
17237 // block has an outgoing live stack slot of type native int.
17238 // We need to reset these before traversal because they have already been set
17239 // by the previous walk to determine all the members of the spill clique.
17240 impInlineRoot()->impSpillCliquePredMembers.Reset();
17241 impInlineRoot()->impSpillCliqueSuccMembers.Reset();
17243 ReimportSpillClique callback(this);
17245 impWalkSpillCliqueFromPred(block, &callback);
17248 // Set the pre-state of "block" (which should not have a pre-state allocated) to
17249 // a copy of "srcState", cloning tree pointers as required.
17250 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
17252 if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
17254 block->bbEntryState = nullptr;
17258 block->bbEntryState = (EntryState*)compGetMem(sizeof(EntryState));
17260 // block->bbEntryState.esRefcount = 1;
17262 block->bbEntryState->esStackDepth = srcState->esStackDepth;
17263 block->bbEntryState->thisInitialized = TIS_Bottom;
17265 if (srcState->esStackDepth > 0)
17267 block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
17268 unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
17270 memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
17271 for (unsigned level = 0; level < srcState->esStackDepth; level++)
17273 GenTree* tree = srcState->esStack[level].val;
17274 block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
17278 if (verTrackObjCtorInitState)
17280 verSetThisInit(block, srcState->thisInitialized);
17286 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
17288 assert(tis != TIS_Bottom); // Precondition.
17289 if (block->bbEntryState == nullptr)
17291 block->bbEntryState = new (this, CMK_Unknown) EntryState();
17294 block->bbEntryState->thisInitialized = tis;
17298 * Resets the current state to the state at the start of the basic block
17300 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
17303 if (block->bbEntryState == nullptr)
17305 destState->esStackDepth = 0;
17306 destState->thisInitialized = TIS_Bottom;
17310 destState->esStackDepth = block->bbEntryState->esStackDepth;
17312 if (destState->esStackDepth > 0)
17314 unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
17316 memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
17319 destState->thisInitialized = block->bbThisOnEntry();
17324 ThisInitState BasicBlock::bbThisOnEntry()
17326 return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
17329 unsigned BasicBlock::bbStackDepthOnEntry()
17331 return (bbEntryState ? bbEntryState->esStackDepth : 0);
17334 void BasicBlock::bbSetStack(void* stackBuffer)
17336 assert(bbEntryState);
17337 assert(stackBuffer);
17338 bbEntryState->esStack = (StackEntry*)stackBuffer;
17341 StackEntry* BasicBlock::bbStackOnEntry()
17343 assert(bbEntryState);
17344 return bbEntryState->esStack;
17347 void Compiler::verInitCurrentState()
17349 verTrackObjCtorInitState = FALSE;
17350 verCurrentState.thisInitialized = TIS_Bottom;
17352 if (tiVerificationNeeded)
17354 // Track this ptr initialization
17355 if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
17357 verTrackObjCtorInitState = TRUE;
17358 verCurrentState.thisInitialized = TIS_Uninit;
17362 // initialize stack info
17364 verCurrentState.esStackDepth = 0;
17365 assert(verCurrentState.esStack != nullptr);
17367 // copy current state to entry state of first BB
17368 verInitBBEntryState(fgFirstBB, &verCurrentState);
17371 Compiler* Compiler::impInlineRoot()
17373 if (impInlineInfo == nullptr)
17379 return impInlineInfo->InlineRoot;
17383 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
17385 if (predOrSucc == SpillCliquePred)
17387 return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
17391 assert(predOrSucc == SpillCliqueSucc);
17392 return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
17396 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
17398 if (predOrSucc == SpillCliquePred)
17400 impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
17404 assert(predOrSucc == SpillCliqueSucc);
17405 impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
17409 /*****************************************************************************
17411 * Convert the instrs ("import") into our internal format (trees). The
17412 * basic flowgraph has already been constructed and is passed in.
17415 void Compiler::impImport(BasicBlock* method)
17420 printf("*************** In impImport() for %s\n", info.compFullName);
17424 /* Allocate the stack contents */
17426 if (info.compMaxStack <= _countof(impSmallStack))
17428 /* Use local variable, don't waste time allocating on the heap */
17430 impStkSize = _countof(impSmallStack);
17431 verCurrentState.esStack = impSmallStack;
17435 impStkSize = info.compMaxStack;
17436 verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
17439 // initialize the entry state at start of method
17440 verInitCurrentState();
17442 // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
17443 Compiler* inlineRoot = impInlineRoot();
17444 if (this == inlineRoot) // These are only used on the root of the inlining tree.
17446 // We have initialized these previously, but to size 0. Make them larger.
17447 impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
17448 impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
17449 impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
17451 inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
17452 inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
17453 inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
17454 impBlockListNodeFreeList = nullptr;
17457 impLastILoffsStmt = nullptr;
17458 impNestedStackSpill = false;
17460 impBoxTemp = BAD_VAR_NUM;
17462 impPendingList = impPendingFree = nullptr;
17464 /* Add the entry-point to the worker-list */
17466 // Skip leading internal blocks. There can be one as a leading scratch BB, and more
17467 // from EH normalization.
17468 // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
17470 for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
17472 // Treat these as imported.
17473 assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
17474 JITDUMP("Marking leading BBF_INTERNAL block BB%02u as BBF_IMPORTED\n", method->bbNum);
17475 method->bbFlags |= BBF_IMPORTED;
17478 impImportBlockPending(method);
17480 /* Import blocks in the worker-list until there are no more */
17482 while (impPendingList)
17484 /* Remove the entry at the front of the list */
17486 PendingDsc* dsc = impPendingList;
17487 impPendingList = impPendingList->pdNext;
17488 impSetPendingBlockMember(dsc->pdBB, 0);
17490 /* Restore the stack state */
17492 verCurrentState.thisInitialized = dsc->pdThisPtrInit;
17493 verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth;
17494 if (verCurrentState.esStackDepth)
17496 impRestoreStackState(&dsc->pdSavedStack);
17499 /* Add the entry to the free list for reuse */
17501 dsc->pdNext = impPendingFree;
17502 impPendingFree = dsc;
17504 /* Now import the block */
17506 if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
17509 #ifdef _TARGET_64BIT_
17510 // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
17511 // coupled with the JIT64 IL Verification logic. Look inside verHandleVerificationFailure
17512 // method for further explanation on why we raise this exception instead of making the jitted
17513 // code throw the verification exception during execution.
17514 if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
17516 BADCODE("Basic block marked as not verifiable");
17519 #endif // _TARGET_64BIT_
17521 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
17522 impEndTreeList(dsc->pdBB);
17527 impImportBlock(dsc->pdBB);
17529 if (compDonotInline())
17533 if (compIsForImportOnly() && !tiVerificationNeeded)
17541 if (verbose && info.compXcptnsCount)
17543 printf("\nAfter impImport() added block for try,catch,finally");
17544 fgDispBasicBlocks();
17548 // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
17549 for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
17551 block->bbFlags &= ~BBF_VISITED;
17555 assert(!compIsForInlining() || !tiVerificationNeeded);
17558 // Checks if a typeinfo (usually stored in the type stack) is a struct.
17559 // The invariant here is that if it's not a ref or a method and has a class handle
17560 // it's a valuetype
17561 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
17563 if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
17573 /*****************************************************************************
17574 * Check to see if the tree is the address of a local or
17575 the address of a field in a local.
17577 *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
17581 BOOL Compiler::impIsAddressInLocal(GenTree* tree, GenTree** lclVarTreeOut)
17583 if (tree->gtOper != GT_ADDR)
17588 GenTree* op = tree->gtOp.gtOp1;
17589 while (op->gtOper == GT_FIELD)
17591 op = op->gtField.gtFldObj;
17592 if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
17594 op = op->gtOp.gtOp1;
17602 if (op->gtOper == GT_LCL_VAR)
17604 *lclVarTreeOut = op;
17613 //------------------------------------------------------------------------
17614 // impMakeDiscretionaryInlineObservations: make observations that help
17615 // determine the profitability of a discretionary inline
17618 // pInlineInfo -- InlineInfo for the inline, or null for the prejit root
17619 // inlineResult -- InlineResult accumulating information about this inline
17622 // If inlining or prejitting the root, this method also makes
17623 // various observations about the method that factor into inline
17624 // decisions. It sets `compNativeSizeEstimate` as a side effect.
17626 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
17628 assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
17629 pInlineInfo == nullptr && !compIsForInlining() // Calculate the static inlining hint for ngen.
17632 // If we're really inlining, we should just have one result in play.
17633 assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
17635 // If this is a "forceinline" method, the JIT probably shouldn't have gone
17636 // to the trouble of estimating the native code size. Even if it did, it
17637 // shouldn't be relying on the result of this method.
17638 assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
17640 // Note if the caller contains NEWOBJ or NEWARR.
17641 Compiler* rootCompiler = impInlineRoot();
17643 if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
17645 inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
17648 if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
17650 inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
17653 bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0;
17654 bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
17656 if (isSpecialMethod)
17658 if (calleeIsStatic)
17660 inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
17664 inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
17667 else if (!calleeIsStatic)
17669 // Callee is an instance method.
17671 // Check if the callee has the same 'this' as the root.
17672 if (pInlineInfo != nullptr)
17674 GenTree* thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
17676 bool isSameThis = impIsThis(thisArg);
17677 inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
17681 // Note if the callee's class is a promotable struct
17682 if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
17684 lvaStructPromotionInfo structPromotionInfo;
17685 lvaCanPromoteStructType(info.compClassHnd, &structPromotionInfo, false);
17686 if (structPromotionInfo.canPromote)
17688 inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
17692 #ifdef FEATURE_SIMD
17694 // Note if this method is has SIMD args or return value
17695 if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
17697 inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
17700 #endif // FEATURE_SIMD
17702 // Roughly classify callsite frequency.
17703 InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
17705 // If this is a prejit root, or a maximally hot block...
17706 if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
17708 frequency = InlineCallsiteFrequency::HOT;
17710 // No training data. Look for loop-like things.
17711 // We consider a recursive call loop-like. Do not give the inlining boost to the method itself.
17712 // However, give it to things nearby.
17713 else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
17714 (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
17716 frequency = InlineCallsiteFrequency::LOOP;
17718 else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
17720 frequency = InlineCallsiteFrequency::WARM;
17722 // Now modify the multiplier based on where we're called from.
17723 else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
17725 frequency = InlineCallsiteFrequency::RARE;
17729 frequency = InlineCallsiteFrequency::BORING;
17732 // Also capture the block weight of the call site. In the prejit
17733 // root case, assume there's some hot call site for this method.
17734 unsigned weight = 0;
17736 if (pInlineInfo != nullptr)
17738 weight = pInlineInfo->iciBlock->bbWeight;
17742 weight = BB_MAX_WEIGHT;
17745 inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
17746 inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
17749 /*****************************************************************************
17750 This method makes STATIC inlining decision based on the IL code.
17751 It should not make any inlining decision based on the context.
17752 If forceInline is true, then the inlining decision should not depend on
17753 performance heuristics (code size, etc.).
17756 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
17757 CORINFO_METHOD_INFO* methInfo,
17759 InlineResult* inlineResult)
17761 unsigned codeSize = methInfo->ILCodeSize;
17763 // We shouldn't have made up our minds yet...
17764 assert(!inlineResult->IsDecided());
17766 if (methInfo->EHcount)
17768 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
17772 if ((methInfo->ILCode == nullptr) || (codeSize == 0))
17774 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
17778 // For now we don't inline varargs (import code can't handle it)
17780 if (methInfo->args.isVarArg())
17782 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
17786 // Reject if it has too many locals.
17787 // This is currently an implementation limit due to fixed-size arrays in the
17788 // inline info, rather than a performance heuristic.
17790 inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
17792 if (methInfo->locals.numArgs > MAX_INL_LCLS)
17794 inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
17798 // Make sure there aren't too many arguments.
17799 // This is currently an implementation limit due to fixed-size arrays in the
17800 // inline info, rather than a performance heuristic.
17802 inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
17804 if (methInfo->args.numArgs > MAX_INL_ARGS)
17806 inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
17810 // Note force inline state
17812 inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
17814 // Note IL code size
17816 inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
17818 if (inlineResult->IsFailure())
17823 // Make sure maxstack is not too big
17825 inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
17827 if (inlineResult->IsFailure())
17833 /*****************************************************************************
17836 void Compiler::impCheckCanInline(GenTree* call,
17837 CORINFO_METHOD_HANDLE fncHandle,
17839 CORINFO_CONTEXT_HANDLE exactContextHnd,
17840 InlineCandidateInfo** ppInlineCandidateInfo,
17841 InlineResult* inlineResult)
17843 // Either EE or JIT might throw exceptions below.
17844 // If that happens, just don't inline the method.
17850 CORINFO_METHOD_HANDLE fncHandle;
17852 CORINFO_CONTEXT_HANDLE exactContextHnd;
17853 InlineResult* result;
17854 InlineCandidateInfo** ppInlineCandidateInfo;
17856 memset(¶m, 0, sizeof(param));
17858 param.pThis = this;
17860 param.fncHandle = fncHandle;
17861 param.methAttr = methAttr;
17862 param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
17863 param.result = inlineResult;
17864 param.ppInlineCandidateInfo = ppInlineCandidateInfo;
17866 bool success = eeRunWithErrorTrap<Param>(
17867 [](Param* pParam) {
17868 DWORD dwRestrictions = 0;
17869 CorInfoInitClassResult initClassResult;
17872 const char* methodName;
17873 const char* className;
17874 methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
17876 if (JitConfig.JitNoInline())
17878 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
17883 /* Try to get the code address/size for the method */
17885 CORINFO_METHOD_INFO methInfo;
17886 if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
17888 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
17893 forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
17895 pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
17897 if (pParam->result->IsFailure())
17899 assert(pParam->result->IsNever());
17903 // Speculatively check if initClass() can be done.
17904 // If it can be done, we will try to inline the method. If inlining
17905 // succeeds, then we will do the non-speculative initClass() and commit it.
17906 // If this speculative call to initClass() fails, there is no point
17907 // trying to inline this method.
17909 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
17910 pParam->exactContextHnd /* context */,
17911 TRUE /* speculative */);
17913 if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
17915 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
17919 // Given the EE the final say in whether to inline or not.
17920 // This should be last since for verifiable code, this can be expensive
17922 /* VM Inline check also ensures that the method is verifiable if needed */
17923 CorInfoInline vmResult;
17924 vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
17927 if (vmResult == INLINE_FAIL)
17929 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
17931 else if (vmResult == INLINE_NEVER)
17933 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
17936 if (pParam->result->IsFailure())
17938 // Make sure not to report this one. It was already reported by the VM.
17939 pParam->result->SetReported();
17943 // check for unsupported inlining restrictions
17944 assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
17946 if (dwRestrictions & INLINE_SAME_THIS)
17948 GenTree* thisArg = pParam->call->gtCall.gtCallObjp;
17951 if (!pParam->pThis->impIsThis(thisArg))
17953 pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
17958 /* Get the method properties */
17960 CORINFO_CLASS_HANDLE clsHandle;
17961 clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
17963 clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
17965 /* Get the return type */
17967 var_types fncRetType;
17968 fncRetType = pParam->call->TypeGet();
17971 var_types fncRealRetType;
17972 fncRealRetType = JITtype2varType(methInfo.args.retType);
17974 assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
17975 // <BUGNUM> VSW 288602 </BUGNUM>
17976 // In case of IJW, we allow to assign a native pointer to a BYREF.
17977 (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
17978 (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
17982 // Allocate an InlineCandidateInfo structure
17984 InlineCandidateInfo* pInfo;
17985 pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
17987 pInfo->dwRestrictions = dwRestrictions;
17988 pInfo->methInfo = methInfo;
17989 pInfo->methAttr = pParam->methAttr;
17990 pInfo->clsHandle = clsHandle;
17991 pInfo->clsAttr = clsAttr;
17992 pInfo->fncRetType = fncRetType;
17993 pInfo->exactContextHnd = pParam->exactContextHnd;
17994 pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd;
17995 pInfo->initClassResult = initClassResult;
17997 *(pParam->ppInlineCandidateInfo) = pInfo;
18004 param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
18008 //------------------------------------------------------------------------
18009 // impInlineRecordArgInfo: record information about an inline candidate argument
18012 // pInlineInfo - inline info for the inline candidate
18013 // curArgVal - tree for the caller actual argument value
18014 // argNum - logical index of this argument
18015 // inlineResult - result of ongoing inline evaluation
18019 // Checks for various inline blocking conditions and makes notes in
18020 // the inline info arg table about the properties of the actual. These
18021 // properties are used later by impFetchArg to determine how best to
18022 // pass the argument into the inlinee.
18024 void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo,
18025 GenTree* curArgVal,
18027 InlineResult* inlineResult)
18029 InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
18031 if (curArgVal->gtOper == GT_MKREFANY)
18033 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
18037 inlCurArgInfo->argNode = curArgVal;
18039 GenTree* lclVarTree;
18040 if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
18042 inlCurArgInfo->argIsByRefToStructLocal = true;
18043 #ifdef FEATURE_SIMD
18044 if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
18046 pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
18048 #endif // FEATURE_SIMD
18051 if (curArgVal->gtFlags & GTF_ALL_EFFECT)
18053 inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
18054 inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
18057 if (curArgVal->gtOper == GT_LCL_VAR)
18059 inlCurArgInfo->argIsLclVar = true;
18061 /* Remember the "original" argument number */
18062 curArgVal->gtLclVar.gtLclILoffs = argNum;
18065 if ((curArgVal->OperKind() & GTK_CONST) ||
18066 ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
18068 inlCurArgInfo->argIsInvariant = true;
18069 if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
18071 // Abort inlining at this call site
18072 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
18077 // If the arg is a local that is address-taken, we can't safely
18078 // directly substitute it into the inlinee.
18080 // Previously we'd accomplish this by setting "argHasLdargaOp" but
18081 // that has a stronger meaning: that the arg value can change in
18082 // the method body. Using that flag prevents type propagation,
18083 // which is safe in this case.
18085 // Instead mark the arg as having a caller local ref.
18086 if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
18088 inlCurArgInfo->argHasCallerLocalRef = true;
18094 if (inlCurArgInfo->argIsThis)
18096 printf("thisArg:");
18100 printf("\nArgument #%u:", argNum);
18102 if (inlCurArgInfo->argIsLclVar)
18104 printf(" is a local var");
18106 if (inlCurArgInfo->argIsInvariant)
18108 printf(" is a constant");
18110 if (inlCurArgInfo->argHasGlobRef)
18112 printf(" has global refs");
18114 if (inlCurArgInfo->argHasCallerLocalRef)
18116 printf(" has caller local ref");
18118 if (inlCurArgInfo->argHasSideEff)
18120 printf(" has side effects");
18122 if (inlCurArgInfo->argHasLdargaOp)
18124 printf(" has ldarga effect");
18126 if (inlCurArgInfo->argHasStargOp)
18128 printf(" has starg effect");
18130 if (inlCurArgInfo->argIsByRefToStructLocal)
18132 printf(" is byref to a struct local");
18136 gtDispTree(curArgVal);
18142 //------------------------------------------------------------------------
18143 // impInlineInitVars: setup inline information for inlinee args and locals
18146 // pInlineInfo - inline info for the inline candidate
18149 // This method primarily adds caller-supplied info to the inlArgInfo
18150 // and sets up the lclVarInfo table.
18152 // For args, the inlArgInfo records properties of the actual argument
18153 // including the tree node that produces the arg value. This node is
18154 // usually the tree node present at the call, but may also differ in
18156 // - when the call arg is a GT_RET_EXPR, we search back through the ret
18157 // expr chain for the actual node. Note this will either be the original
18158 // call (which will be a failed inline by this point), or the return
18159 // expression from some set of inlines.
18160 // - when argument type casting is needed the necessary casts are added
18161 // around the argument node.
18162 // - if an argment can be simplified by folding then the node here is the
18165 // The method may make observations that lead to marking this candidate as
18166 // a failed inline. If this happens the initialization is abandoned immediately
18167 // to try and reduce the jit time cost for a failed inline.
18169 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
18171 assert(!compIsForInlining());
18173 GenTree* call = pInlineInfo->iciCall;
18174 CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo;
18175 unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr;
18176 InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo;
18177 InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo;
18178 InlineResult* inlineResult = pInlineInfo->inlineResult;
18180 const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
18182 /* init the argument stuct */
18184 memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
18186 /* Get hold of the 'this' pointer and the argument list proper */
18188 GenTree* thisArg = call->gtCall.gtCallObjp;
18189 GenTree* argList = call->gtCall.gtCallArgs;
18190 unsigned argCnt = 0; // Count of the arguments
18192 assert((methInfo->args.hasThis()) == (thisArg != nullptr));
18196 inlArgInfo[0].argIsThis = true;
18197 GenTree* actualThisArg = thisArg->gtRetExprVal();
18198 impInlineRecordArgInfo(pInlineInfo, actualThisArg, argCnt, inlineResult);
18200 if (inlineResult->IsFailure())
18205 /* Increment the argument count */
18209 /* Record some information about each of the arguments */
18210 bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
18212 #if USER_ARGS_COME_LAST
18213 unsigned typeCtxtArg = thisArg ? 1 : 0;
18214 #else // USER_ARGS_COME_LAST
18215 unsigned typeCtxtArg = methInfo->args.totalILArgs();
18216 #endif // USER_ARGS_COME_LAST
18218 for (GenTree* argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
18220 if (argTmp == argList && hasRetBuffArg)
18225 // Ignore the type context argument
18226 if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
18228 pInlineInfo->typeContextArg = typeCtxtArg;
18229 typeCtxtArg = 0xFFFFFFFF;
18233 assert(argTmp->gtOper == GT_LIST);
18234 GenTree* arg = argTmp->gtOp.gtOp1;
18235 GenTree* actualArg = arg->gtRetExprVal();
18236 impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult);
18238 if (inlineResult->IsFailure())
18243 /* Increment the argument count */
18247 /* Make sure we got the arg number right */
18248 assert(argCnt == methInfo->args.totalILArgs());
18250 #ifdef FEATURE_SIMD
18251 bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
18252 #endif // FEATURE_SIMD
18254 /* We have typeless opcodes, get type information from the signature */
18260 if (clsAttr & CORINFO_FLG_VALUECLASS)
18262 sigType = TYP_BYREF;
18269 lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
18270 lclVarInfo[0].lclHasLdlocaOp = false;
18272 #ifdef FEATURE_SIMD
18273 // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
18274 // the inlining multiplier) for anything in that assembly.
18275 // But we only need to normalize it if it is a TYP_STRUCT
18276 // (which we need to do even if we have already set foundSIMDType).
18277 if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
18279 if (sigType == TYP_STRUCT)
18281 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
18283 foundSIMDType = true;
18285 #endif // FEATURE_SIMD
18286 lclVarInfo[0].lclTypeInfo = sigType;
18288 assert(varTypeIsGC(thisArg->gtType) || // "this" is managed
18289 (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
18290 (clsAttr & CORINFO_FLG_VALUECLASS)));
18292 if (genActualType(thisArg->gtType) != genActualType(sigType))
18294 if (sigType == TYP_REF)
18296 /* The argument cannot be bashed into a ref (see bug 750871) */
18297 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
18301 /* This can only happen with byrefs <-> ints/shorts */
18303 assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
18304 assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
18306 if (sigType == TYP_BYREF)
18308 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18310 else if (thisArg->gtType == TYP_BYREF)
18312 assert(sigType == TYP_I_IMPL);
18314 /* If possible change the BYREF to an int */
18315 if (thisArg->IsVarAddr())
18317 thisArg->gtType = TYP_I_IMPL;
18318 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18322 /* Arguments 'int <- byref' cannot be bashed */
18323 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18330 /* Init the types of the arguments and make sure the types
18331 * from the trees match the types in the signature */
18333 CORINFO_ARG_LIST_HANDLE argLst;
18334 argLst = methInfo->args.args;
18337 for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
18339 var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
18341 lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
18343 #ifdef FEATURE_SIMD
18344 if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
18346 // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
18347 // found a SIMD type, even if this may not be a type we recognize (the assumption is that
18348 // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
18349 foundSIMDType = true;
18350 if (sigType == TYP_STRUCT)
18352 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
18353 sigType = structType;
18356 #endif // FEATURE_SIMD
18358 lclVarInfo[i].lclTypeInfo = sigType;
18359 lclVarInfo[i].lclHasLdlocaOp = false;
18361 /* Does the tree type match the signature type? */
18363 GenTree* inlArgNode = inlArgInfo[i].argNode;
18365 if (sigType != inlArgNode->gtType)
18367 /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
18368 but in bad IL cases with caller-callee signature mismatches we can see other types.
18369 Intentionally reject cases with mismatches so the jit is more flexible when
18370 encountering bad IL. */
18372 bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
18373 (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
18374 (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
18376 if (!isPlausibleTypeMatch)
18378 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
18382 /* Is it a narrowing or widening cast?
18383 * Widening casts are ok since the value computed is already
18384 * normalized to an int (on the IL stack) */
18386 if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
18388 if (sigType == TYP_BYREF)
18390 lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18392 else if (inlArgNode->gtType == TYP_BYREF)
18394 assert(varTypeIsIntOrI(sigType));
18396 /* If possible bash the BYREF to an int */
18397 if (inlArgNode->IsVarAddr())
18399 inlArgNode->gtType = TYP_I_IMPL;
18400 lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18404 /* Arguments 'int <- byref' cannot be changed */
18405 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18409 else if (genTypeSize(sigType) < EA_PTRSIZE)
18411 /* Narrowing cast */
18413 if (inlArgNode->gtOper == GT_LCL_VAR &&
18414 !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
18415 sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
18417 /* We don't need to insert a cast here as the variable
18418 was assigned a normalized value of the right type */
18423 inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType);
18425 inlArgInfo[i].argIsLclVar = false;
18427 /* Try to fold the node in case we have constant arguments */
18429 if (inlArgInfo[i].argIsInvariant)
18431 inlArgNode = gtFoldExprConst(inlArgNode);
18432 inlArgInfo[i].argNode = inlArgNode;
18433 assert(inlArgNode->OperIsConst());
18436 #ifdef _TARGET_64BIT_
18437 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
18439 // This should only happen for int -> native int widening
18440 inlArgNode = inlArgInfo[i].argNode =
18441 gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType);
18443 inlArgInfo[i].argIsLclVar = false;
18445 /* Try to fold the node in case we have constant arguments */
18447 if (inlArgInfo[i].argIsInvariant)
18449 inlArgNode = gtFoldExprConst(inlArgNode);
18450 inlArgInfo[i].argNode = inlArgNode;
18451 assert(inlArgNode->OperIsConst());
18454 #endif // _TARGET_64BIT_
18459 /* Init the types of the local variables */
18461 CORINFO_ARG_LIST_HANDLE localsSig;
18462 localsSig = methInfo->locals.args;
18464 for (i = 0; i < methInfo->locals.numArgs; i++)
18467 var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
18469 lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
18470 lclVarInfo[i + argCnt].lclIsPinned = isPinned;
18471 lclVarInfo[i + argCnt].lclTypeInfo = type;
18473 if (varTypeIsGC(type))
18475 pInlineInfo->numberOfGcRefLocals++;
18480 // Pinned locals may cause inlines to fail.
18481 inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
18482 if (inlineResult->IsFailure())
18488 lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
18490 // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
18491 // out on the inline.
18492 if (type == TYP_STRUCT)
18494 CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
18495 DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
18496 if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
18498 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
18499 if (inlineResult->IsFailure())
18504 // Do further notification in the case where the call site is rare; some policies do
18505 // not track the relative hotness of call sites for "always" inline cases.
18506 if (pInlineInfo->iciBlock->isRunRarely())
18508 inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
18509 if (inlineResult->IsFailure())
18518 localsSig = info.compCompHnd->getArgNext(localsSig);
18520 #ifdef FEATURE_SIMD
18521 if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
18523 foundSIMDType = true;
18524 if (featureSIMD && type == TYP_STRUCT)
18526 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
18527 lclVarInfo[i + argCnt].lclTypeInfo = structType;
18530 #endif // FEATURE_SIMD
18533 #ifdef FEATURE_SIMD
18534 if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd))
18536 foundSIMDType = true;
18538 pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
18539 #endif // FEATURE_SIMD
18542 //------------------------------------------------------------------------
18543 // impInlineFetchLocal: get a local var that represents an inlinee local
18546 // lclNum -- number of the inlinee local
18547 // reason -- debug string describing purpose of the local var
18550 // Number of the local to use
18553 // This method is invoked only for locals actually used in the
18556 // Allocates a new temp if necessary, and copies key properties
18557 // over from the inlinee local var info.
18559 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
18561 assert(compIsForInlining());
18563 unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
18565 if (tmpNum == BAD_VAR_NUM)
18567 const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt];
18568 const var_types lclTyp = inlineeLocal.lclTypeInfo;
18570 // The lifetime of this local might span multiple BBs.
18571 // So it is a long lifetime local.
18572 impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
18574 // Copy over key info
18575 lvaTable[tmpNum].lvType = lclTyp;
18576 lvaTable[tmpNum].lvHasLdAddrOp = inlineeLocal.lclHasLdlocaOp;
18577 lvaTable[tmpNum].lvPinned = inlineeLocal.lclIsPinned;
18578 lvaTable[tmpNum].lvHasILStoreOp = inlineeLocal.lclHasStlocOp;
18579 lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp;
18581 // Copy over class handle for ref types. Note this may be a
18582 // shared type -- someday perhaps we can get the exact
18583 // signature and pass in a more precise type.
18584 if (lclTyp == TYP_REF)
18586 lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef());
18589 if (inlineeLocal.lclVerTypeInfo.IsStruct())
18591 if (varTypeIsStruct(lclTyp))
18593 lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
18597 // This is a wrapped primitive. Make sure the verstate knows that
18598 lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo;
18603 // Sanity check that we're properly prepared for gc ref locals.
18604 if (varTypeIsGC(lclTyp))
18606 // Since there are gc locals we should have seen them earlier
18607 // and if there was a return value, set up the spill temp.
18608 assert(impInlineInfo->HasGcRefLocals());
18609 assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp());
18613 // Make sure all pinned locals count as gc refs.
18614 assert(!inlineeLocal.lclIsPinned);
18622 //------------------------------------------------------------------------
18623 // impInlineFetchArg: return tree node for argument value in an inlinee
18626 // lclNum -- argument number in inlinee IL
18627 // inlArgInfo -- argument info for inlinee
18628 // lclVarInfo -- var info for inlinee
18631 // Tree for the argument's value. Often an inlinee-scoped temp
18632 // GT_LCL_VAR but can be other tree kinds, if the argument
18633 // expression from the caller can be directly substituted into the
18637 // Must be used only for arguments -- use impInlineFetchLocal for
18640 // Direct substitution is performed when the formal argument cannot
18641 // change value in the inlinee body (no starg or ldarga), and the
18642 // actual argument expression's value cannot be changed if it is
18643 // substituted it into the inlinee body.
18645 // Even if an inlinee-scoped temp is returned here, it may later be
18646 // "bashed" to a caller-supplied tree when arguments are actually
18647 // passed (see fgInlinePrependStatements). Bashing can happen if
18648 // the argument ends up being single use and other conditions are
18649 // met. So the contents of the tree returned here may not end up
18650 // being the ones ultimately used for the argument.
18652 // This method will side effect inlArgInfo. It should only be called
18653 // for actual uses of the argument in the inlinee.
18655 GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
18657 // Cache the relevant arg and lcl info for this argument.
18658 // We will modify argInfo but not lclVarInfo.
18659 InlArgInfo& argInfo = inlArgInfo[lclNum];
18660 const InlLclVarInfo& lclInfo = lclVarInfo[lclNum];
18661 const bool argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp;
18662 const var_types lclTyp = lclInfo.lclTypeInfo;
18663 GenTree* op1 = nullptr;
18665 if (argInfo.argIsInvariant && !argCanBeModified)
18667 // Directly substitute constants or addresses of locals
18669 // Clone the constant. Note that we cannot directly use
18670 // argNode in the trees even if !argInfo.argIsUsed as this
18671 // would introduce aliasing between inlArgInfo[].argNode and
18672 // impInlineExpr. Then gtFoldExpr() could change it, causing
18673 // further references to the argument working off of the
18675 op1 = gtCloneExpr(argInfo.argNode);
18676 PREFIX_ASSUME(op1 != nullptr);
18677 argInfo.argTmpNum = BAD_VAR_NUM;
18679 // We may need to retype to ensure we match the callee's view of the type.
18680 // Otherwise callee-pass throughs of arguments can create return type
18681 // mismatches that block inlining.
18683 // Note argument type mismatches that prevent inlining should
18684 // have been caught in impInlineInitVars.
18685 if (op1->TypeGet() != lclTyp)
18687 op1->gtType = genActualType(lclTyp);
18690 else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef)
18692 // Directly substitute unaliased caller locals for args that cannot be modified
18694 // Use the caller-supplied node if this is the first use.
18695 op1 = argInfo.argNode;
18696 argInfo.argTmpNum = op1->gtLclVarCommon.gtLclNum;
18698 // Use an equivalent copy if this is the second or subsequent
18699 // use, or if we need to retype.
18701 // Note argument type mismatches that prevent inlining should
18702 // have been caught in impInlineInitVars.
18703 if (argInfo.argIsUsed || (op1->TypeGet() != lclTyp))
18705 assert(op1->gtOper == GT_LCL_VAR);
18706 assert(lclNum == op1->gtLclVar.gtLclILoffs);
18708 var_types newTyp = lclTyp;
18710 if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
18712 newTyp = genActualType(lclTyp);
18715 // Create a new lcl var node - remember the argument lclNum
18716 op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, newTyp, op1->gtLclVar.gtLclILoffs);
18719 else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp)
18721 /* Argument is a by-ref address to a struct, a normed struct, or its field.
18722 In these cases, don't spill the byref to a local, simply clone the tree and use it.
18723 This way we will increase the chance for this byref to be optimized away by
18724 a subsequent "dereference" operation.
18726 From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
18727 (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
18728 For example, if the caller is:
18729 ldloca.s V_1 // V_1 is a local struct
18730 call void Test.ILPart::RunLdargaOnPointerArg(int32*)
18731 and the callee being inlined has:
18732 .method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed
18734 call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
18735 then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
18736 soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
18738 assert(argInfo.argNode->TypeGet() == TYP_BYREF || argInfo.argNode->TypeGet() == TYP_I_IMPL);
18739 op1 = gtCloneExpr(argInfo.argNode);
18743 /* Argument is a complex expression - it must be evaluated into a temp */
18745 if (argInfo.argHasTmp)
18747 assert(argInfo.argIsUsed);
18748 assert(argInfo.argTmpNum < lvaCount);
18750 /* Create a new lcl var node - remember the argument lclNum */
18751 op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp));
18753 /* This is the second or later use of the this argument,
18754 so we have to use the temp (instead of the actual arg) */
18755 argInfo.argBashTmpNode = nullptr;
18759 /* First time use */
18760 assert(!argInfo.argIsUsed);
18762 /* Reserve a temp for the expression.
18763 * Use a large size node as we may change it later */
18765 const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
18767 lvaTable[tmpNum].lvType = lclTyp;
18769 // For ref types, determine the type of the temp.
18770 if (lclTyp == TYP_REF)
18772 if (!argCanBeModified)
18774 // If the arg can't be modified in the method
18775 // body, use the type of the value, if
18776 // known. Otherwise, use the declared type.
18777 lvaSetClass(tmpNum, argInfo.argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
18781 // Arg might be modified, use the declared type of
18783 lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
18787 assert(lvaTable[tmpNum].lvAddrExposed == 0);
18788 if (argInfo.argHasLdargaOp)
18790 lvaTable[tmpNum].lvHasLdAddrOp = 1;
18793 if (lclInfo.lclVerTypeInfo.IsStruct())
18795 if (varTypeIsStruct(lclTyp))
18797 lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
18801 // This is a wrapped primitive. Make sure the verstate knows that
18802 lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo;
18806 argInfo.argHasTmp = true;
18807 argInfo.argTmpNum = tmpNum;
18809 // If we require strict exception order, then arguments must
18810 // be evaluated in sequence before the body of the inlined method.
18811 // So we need to evaluate them to a temp.
18812 // Also, if arguments have global or local references, we need to
18813 // evaluate them to a temp before the inlined body as the
18814 // inlined body may be modifying the global ref.
18815 // TODO-1stClassStructs: We currently do not reuse an existing lclVar
18816 // if it is a struct, because it requires some additional handling.
18818 if (!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef &&
18819 !argInfo.argHasCallerLocalRef)
18821 /* Get a *LARGE* LCL_VAR node */
18822 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
18824 /* Record op1 as the very first use of this argument.
18825 If there are no further uses of the arg, we may be
18826 able to use the actual arg node instead of the temp.
18827 If we do see any further uses, we will clear this. */
18828 argInfo.argBashTmpNode = op1;
18832 /* Get a small LCL_VAR node */
18833 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
18834 /* No bashing of this argument */
18835 argInfo.argBashTmpNode = nullptr;
18840 // Mark this argument as used.
18841 argInfo.argIsUsed = true;
18846 /******************************************************************************
18847 Is this the original "this" argument to the call being inlined?
18849 Note that we do not inline methods with "starg 0", and so we do not need to
18853 BOOL Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo)
18855 assert(compIsForInlining());
18856 return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
18859 //-----------------------------------------------------------------------------
18860 // This function checks if a dereference in the inlinee can guarantee that
18861 // the "this" is non-NULL.
18862 // If we haven't hit a branch or a side effect, and we are dereferencing
18863 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
18864 // then we can avoid a separate null pointer check.
18866 // "additionalTreesToBeEvaluatedBefore"
18867 // is the set of pending trees that have not yet been added to the statement list,
18868 // and which have been removed from verCurrentState.esStack[]
18870 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTreesToBeEvaluatedBefore,
18871 GenTree* variableBeingDereferenced,
18872 InlArgInfo* inlArgInfo)
18874 assert(compIsForInlining());
18875 assert(opts.OptEnabled(CLFLG_INLINING));
18877 BasicBlock* block = compCurBB;
18882 if (block != fgFirstBB)
18887 if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
18892 if (additionalTreesToBeEvaluatedBefore &&
18893 GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
18898 for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
18900 expr = stmt->gtStmt.gtStmtExpr;
18902 if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
18908 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
18910 unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
18911 if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
18920 //------------------------------------------------------------------------
18921 // impMarkInlineCandidate: determine if this call can be subsequently inlined
18924 // callNode -- call under scrutiny
18925 // exactContextHnd -- context handle for inlining
18926 // exactContextNeedsRuntimeLookup -- true if context required runtime lookup
18927 // callInfo -- call info from VM
18930 // If callNode is an inline candidate, this method sets the flag
18931 // GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have
18932 // filled in the associated InlineCandidateInfo.
18934 // If callNode is not an inline candidate, and the reason is
18935 // something that is inherent to the method being called, the
18936 // method may be marked as "noinline" to short-circuit any
18937 // future assessments of calls to this method.
18939 void Compiler::impMarkInlineCandidate(GenTree* callNode,
18940 CORINFO_CONTEXT_HANDLE exactContextHnd,
18941 bool exactContextNeedsRuntimeLookup,
18942 CORINFO_CALL_INFO* callInfo)
18944 // Let the strategy know there's another call
18945 impInlineRoot()->m_inlineStrategy->NoteCall();
18947 if (!opts.OptEnabled(CLFLG_INLINING))
18949 /* XXX Mon 8/18/2008
18950 * This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before
18951 * calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and
18952 * CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and
18953 * figure out why we did not set MAXOPT for this compile.
18955 assert(!compIsForInlining());
18959 if (compIsForImportOnly())
18961 // Don't bother creating the inline candidate during verification.
18962 // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
18963 // that leads to the creation of multiple instances of Compiler.
18967 GenTreeCall* call = callNode->AsCall();
18968 InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
18970 // Don't inline if not optimizing root method
18971 if (opts.compDbgCode)
18973 inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
18977 // Don't inline if inlining into root method is disabled.
18978 if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
18980 inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
18984 // Inlining candidate determination needs to honor only IL tail prefix.
18985 // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
18986 if (call->IsTailPrefixedCall())
18988 inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
18992 // Tail recursion elimination takes precedence over inlining.
18993 // TODO: We may want to do some of the additional checks from fgMorphCall
18994 // here to reduce the chance we don't inline a call that won't be optimized
18995 // as a fast tail call or turned into a loop.
18996 if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
18998 inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
19002 if (call->IsVirtual())
19004 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
19008 /* Ignore helper calls */
19010 if (call->gtCallType == CT_HELPER)
19012 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
19016 /* Ignore indirect calls */
19017 if (call->gtCallType == CT_INDIRECT)
19019 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
19023 /* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less
19024 * restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding
19025 * inlining in throw blocks. I should consider the same thing for catch and filter regions. */
19027 CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
19030 // Reuse method flags from the original callInfo if possible
19031 if (fncHandle == callInfo->hMethod)
19033 methAttr = callInfo->methodFlags;
19037 methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
19041 if (compStressCompile(STRESS_FORCE_INLINE, 0))
19043 methAttr |= CORINFO_FLG_FORCEINLINE;
19047 // Check for COMPlus_AggressiveInlining
19048 if (compDoAggressiveInlining)
19050 methAttr |= CORINFO_FLG_FORCEINLINE;
19053 if (!(methAttr & CORINFO_FLG_FORCEINLINE))
19055 /* Don't bother inline blocks that are in the filter region */
19056 if (bbInCatchHandlerILRange(compCurBB))
19061 printf("\nWill not inline blocks that are in the catch handler region\n");
19066 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
19070 if (bbInFilterILRange(compCurBB))
19075 printf("\nWill not inline blocks that are in the filter region\n");
19079 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
19084 /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
19086 if (opts.compNeedSecurityCheck)
19088 inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
19092 /* Check if we tried to inline this method before */
19094 if (methAttr & CORINFO_FLG_DONT_INLINE)
19096 inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
19100 /* Cannot inline synchronized methods */
19102 if (methAttr & CORINFO_FLG_SYNCH)
19104 inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
19108 /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
19110 if (methAttr & CORINFO_FLG_SECURITYCHECK)
19112 inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
19116 InlineCandidateInfo* inlineCandidateInfo = nullptr;
19117 impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
19119 if (inlineResult.IsFailure())
19124 // The old value should be NULL
19125 assert(call->gtInlineCandidateInfo == nullptr);
19127 // The new value should not be NULL.
19128 assert(inlineCandidateInfo != nullptr);
19129 inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup;
19131 call->gtInlineCandidateInfo = inlineCandidateInfo;
19133 // Mark the call node as inline candidate.
19134 call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
19136 // Let the strategy know there's another candidate.
19137 impInlineRoot()->m_inlineStrategy->NoteCandidate();
19139 // Since we're not actually inlining yet, and this call site is
19140 // still just an inline candidate, there's nothing to report.
19141 inlineResult.SetReported();
19144 /******************************************************************************/
19145 // Returns true if the given intrinsic will be implemented by target-specific
19148 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
19150 #if defined(_TARGET_AMD64_) || (defined(_TARGET_X86_) && !defined(LEGACY_BACKEND))
19151 switch (intrinsicId)
19153 // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1
19154 // instructions to directly compute round/ceiling/floor.
19156 // TODO: Because the x86 backend only targets SSE for floating-point code,
19157 // it does not treat Sine, Cosine, or Round as intrinsics (JIT32
19158 // implemented those intrinsics as x87 instructions). If this poses
19159 // a CQ problem, it may be necessary to change the implementation of
19160 // the helper calls to decrease call overhead or switch back to the
19161 // x87 instructions. This is tracked by #7097.
19162 case CORINFO_INTRINSIC_Sqrt:
19163 case CORINFO_INTRINSIC_Abs:
19166 case CORINFO_INTRINSIC_Round:
19167 case CORINFO_INTRINSIC_Ceiling:
19168 case CORINFO_INTRINSIC_Floor:
19169 return compSupports(InstructionSet_SSE41);
19174 #elif defined(_TARGET_ARM64_)
19175 switch (intrinsicId)
19177 case CORINFO_INTRINSIC_Sqrt:
19178 case CORINFO_INTRINSIC_Abs:
19179 case CORINFO_INTRINSIC_Round:
19180 case CORINFO_INTRINSIC_Floor:
19181 case CORINFO_INTRINSIC_Ceiling:
19187 #elif defined(_TARGET_ARM_)
19188 switch (intrinsicId)
19190 case CORINFO_INTRINSIC_Sqrt:
19191 case CORINFO_INTRINSIC_Abs:
19192 case CORINFO_INTRINSIC_Round:
19198 #elif defined(_TARGET_X86_)
19199 switch (intrinsicId)
19201 case CORINFO_INTRINSIC_Sin:
19202 case CORINFO_INTRINSIC_Cos:
19203 case CORINFO_INTRINSIC_Sqrt:
19204 case CORINFO_INTRINSIC_Abs:
19205 case CORINFO_INTRINSIC_Round:
19212 // TODO: This portion of logic is not implemented for other arch.
19213 // The reason for returning true is that on all other arch the only intrinsic
19214 // enabled are target intrinsics.
19216 #endif //_TARGET_AMD64_
19219 /******************************************************************************/
19220 // Returns true if the given intrinsic will be implemented by calling System.Math
19223 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
19225 // Currently, if a math intrinsic is not implemented by target-specific
19226 // instructions, it will be implemented by a System.Math call. In the
19227 // future, if we turn to implementing some of them with helper calls,
19228 // this predicate needs to be revisited.
19229 return !IsTargetIntrinsic(intrinsicId);
19232 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
19234 switch (intrinsicId)
19236 case CORINFO_INTRINSIC_Sin:
19237 case CORINFO_INTRINSIC_Cbrt:
19238 case CORINFO_INTRINSIC_Sqrt:
19239 case CORINFO_INTRINSIC_Abs:
19240 case CORINFO_INTRINSIC_Cos:
19241 case CORINFO_INTRINSIC_Round:
19242 case CORINFO_INTRINSIC_Cosh:
19243 case CORINFO_INTRINSIC_Sinh:
19244 case CORINFO_INTRINSIC_Tan:
19245 case CORINFO_INTRINSIC_Tanh:
19246 case CORINFO_INTRINSIC_Asin:
19247 case CORINFO_INTRINSIC_Asinh:
19248 case CORINFO_INTRINSIC_Acos:
19249 case CORINFO_INTRINSIC_Acosh:
19250 case CORINFO_INTRINSIC_Atan:
19251 case CORINFO_INTRINSIC_Atan2:
19252 case CORINFO_INTRINSIC_Atanh:
19253 case CORINFO_INTRINSIC_Log10:
19254 case CORINFO_INTRINSIC_Pow:
19255 case CORINFO_INTRINSIC_Exp:
19256 case CORINFO_INTRINSIC_Ceiling:
19257 case CORINFO_INTRINSIC_Floor:
19264 bool Compiler::IsMathIntrinsic(GenTree* tree)
19266 return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
19269 //------------------------------------------------------------------------
19270 // impDevirtualizeCall: Attempt to change a virtual vtable call into a
19274 // call -- the call node to examine/modify
19275 // method -- [IN/OUT] the method handle for call. Updated iff call devirtualized.
19276 // methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized.
19277 // contextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized.
19278 // exactContextHnd -- [OUT] updated context handle iff call devirtualized
19281 // Virtual calls in IL will always "invoke" the base class method.
19283 // This transformation looks for evidence that the type of 'this'
19284 // in the call is exactly known, is a final class or would invoke
19285 // a final method, and if that and other safety checks pan out,
19286 // modifies the call and the call info to create a direct call.
19288 // This transformation is initially done in the importer and not
19289 // in some subsequent optimization pass because we want it to be
19290 // upstream of inline candidate identification.
19292 // However, later phases may supply improved type information that
19293 // can enable further devirtualization. We currently reinvoke this
19294 // code after inlining, if the return value of the inlined call is
19295 // the 'this obj' of a subsequent virtual call.
19297 // If devirtualization succeeds and the call's this object is the
19298 // result of a box, the jit will ask the EE for the unboxed entry
19299 // point. If this exists, the jit will see if it can rework the box
19300 // to instead make a local copy. If that is doable, the call is
19301 // updated to invoke the unboxed entry on the local copy.
19303 void Compiler::impDevirtualizeCall(GenTreeCall* call,
19304 CORINFO_METHOD_HANDLE* method,
19305 unsigned* methodFlags,
19306 CORINFO_CONTEXT_HANDLE* contextHandle,
19307 CORINFO_CONTEXT_HANDLE* exactContextHandle)
19309 assert(call != nullptr);
19310 assert(method != nullptr);
19311 assert(methodFlags != nullptr);
19312 assert(contextHandle != nullptr);
19314 // This should be a virtual vtable or virtual stub call.
19315 assert(call->IsVirtual());
19317 // Bail if not optimizing
19318 if (opts.MinOpts())
19323 // Bail if debuggable codegen
19324 if (opts.compDbgCode)
19330 // Bail if devirt is disabled.
19331 if (JitConfig.JitEnableDevirtualization() == 0)
19336 const bool doPrint = JitConfig.JitPrintDevirtualizedMethods() == 1;
19339 // Fetch information about the virtual method we're calling.
19340 CORINFO_METHOD_HANDLE baseMethod = *method;
19341 unsigned baseMethodAttribs = *methodFlags;
19343 if (baseMethodAttribs == 0)
19345 // For late devirt we may not have method attributes, so fetch them.
19346 baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19351 // Validate that callInfo has up to date method flags
19352 const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19354 // All the base method attributes should agree, save that
19355 // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1
19356 // because of concurrent jitting activity.
19358 // Note we don't look at this particular flag bit below, and
19359 // later on (if we do try and inline) we will rediscover why
19360 // the method can't be inlined, so there's no danger here in
19361 // seeing this particular flag bit in different states between
19362 // the cached and fresh values.
19363 if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE))
19365 assert(!"mismatched method attributes");
19370 // In R2R mode, we might see virtual stub calls to
19371 // non-virtuals. For instance cases where the non-virtual method
19372 // is in a different assembly but is called via CALLVIRT. For
19373 // verison resilience we must allow for the fact that the method
19374 // might become virtual in some update.
19376 // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a
19377 // regular call+nullcheck upstream, so we won't reach this
19379 if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0)
19381 assert(call->IsVirtualStub());
19382 assert(opts.IsReadyToRun());
19383 JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n");
19387 // See what we know about the type of 'this' in the call.
19388 GenTree* thisObj = call->gtCallObjp->gtEffectiveVal(false);
19389 GenTree* actualThisObj = nullptr;
19390 bool isExact = false;
19391 bool objIsNonNull = false;
19392 CORINFO_CLASS_HANDLE objClass = gtGetClassHandle(thisObj, &isExact, &objIsNonNull);
19394 // See if we have special knowlege that can get us a type or a better type.
19395 if ((objClass == nullptr) || !isExact)
19397 actualThisObj = thisObj;
19399 // Walk back through any return expression placeholders
19400 while (actualThisObj->OperGet() == GT_RET_EXPR)
19402 actualThisObj = actualThisObj->gtRetExpr.gtInlineCandidate;
19405 // See if we landed on a call to a special intrinsic method
19406 if (actualThisObj->IsCall())
19408 GenTreeCall* thisObjCall = actualThisObj->AsCall();
19409 if ((thisObjCall->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
19411 assert(thisObjCall->gtCallType == CT_USER_FUNC);
19412 CORINFO_METHOD_HANDLE specialIntrinsicHandle = thisObjCall->gtCallMethHnd;
19413 CORINFO_CLASS_HANDLE specialObjClass = impGetSpecialIntrinsicExactReturnType(specialIntrinsicHandle);
19414 if (specialObjClass != nullptr)
19416 objClass = specialObjClass;
19418 objIsNonNull = true;
19424 // Bail if we know nothing.
19425 if (objClass == nullptr)
19427 JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet()));
19431 // Fetch information about the class that introduced the virtual method.
19432 CORINFO_CLASS_HANDLE baseClass = info.compCompHnd->getMethodClass(baseMethod);
19433 const DWORD baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass);
19435 #if !defined(FEATURE_CORECLR)
19436 // If base class is not beforefieldinit then devirtualizing may
19437 // cause us to miss a base class init trigger. Spec says we don't
19438 // need a trigger for ref class callvirts but desktop seems to
19439 // have one anyways. So defer.
19440 if ((baseClassAttribs & CORINFO_FLG_BEFOREFIELDINIT) == 0)
19442 JITDUMP("\nimpDevirtualizeCall: base class has precise initialization, sorry\n");
19445 #endif // FEATURE_CORECLR
19447 // Is the call an interface call?
19448 const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0;
19450 // If the objClass is sealed (final), then we may be able to devirtualize.
19451 const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass);
19452 const bool objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0;
19455 const char* callKind = isInterface ? "interface" : "virtual";
19456 const char* objClassNote = "[?]";
19457 const char* objClassName = "?objClass";
19458 const char* baseClassName = "?baseClass";
19459 const char* baseMethodName = "?baseMethod";
19461 if (verbose || doPrint)
19463 objClassNote = isExact ? " [exact]" : objClassIsFinal ? " [final]" : "";
19464 objClassName = info.compCompHnd->getClassName(objClass);
19465 baseClassName = info.compCompHnd->getClassName(baseClass);
19466 baseMethodName = eeGetMethodName(baseMethod, nullptr);
19470 printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n"
19471 " class for 'this' is %s%s (attrib %08x)\n"
19472 " base method is %s::%s\n",
19473 callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName);
19476 #endif // defined(DEBUG)
19478 // Bail if obj class is an interface.
19479 // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal
19480 // IL_021d: ldloc.0
19481 // IL_021e: callvirt instance int32 System.Object::GetHashCode()
19482 if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0)
19484 JITDUMP("--- obj class is interface, sorry\n");
19490 assert(call->IsVirtualStub());
19491 JITDUMP("--- base class is interface\n");
19494 // Fetch the method that would be called based on the declared type of 'this'
19495 CORINFO_CONTEXT_HANDLE ownerType = *contextHandle;
19496 CORINFO_METHOD_HANDLE derivedMethod = info.compCompHnd->resolveVirtualMethod(baseMethod, objClass, ownerType);
19498 // If we failed to get a handle, we can't devirtualize. This can
19499 // happen when prejitting, if the devirtualization crosses
19500 // servicing bubble boundaries.
19501 if (derivedMethod == nullptr)
19503 JITDUMP("--- no derived method, sorry\n");
19507 // Fetch method attributes to see if method is marked final.
19508 DWORD derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod);
19509 const bool derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0);
19512 const char* derivedClassName = "?derivedClass";
19513 const char* derivedMethodName = "?derivedMethod";
19515 const char* note = "speculative";
19520 else if (objClassIsFinal)
19522 note = "final class";
19524 else if (derivedMethodIsFinal)
19526 note = "final method";
19529 if (verbose || doPrint)
19531 derivedMethodName = eeGetMethodName(derivedMethod, &derivedClassName);
19534 printf(" devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note);
19538 #endif // defined(DEBUG)
19540 if (!isExact && !objClassIsFinal && !derivedMethodIsFinal)
19542 // Type is not exact, and neither class or method is final.
19544 // We could speculatively devirtualize, but there's no
19545 // reason to believe the derived method is the one that
19546 // is likely to be invoked.
19548 // If there's currently no further overriding (that is, at
19549 // the time of jitting, objClass has no subclasses that
19550 // override this method), then perhaps we'd be willing to
19552 JITDUMP(" Class not final or exact, method not final, no devirtualization\n");
19556 // For interface calls we must have an exact type or final class.
19557 if (isInterface && !isExact && !objClassIsFinal)
19559 JITDUMP(" Class not final or exact for interface, no devirtualization\n");
19563 JITDUMP(" %s; can devirtualize\n", note);
19565 // Make the updates.
19566 call->gtFlags &= ~GTF_CALL_VIRT_VTABLE;
19567 call->gtFlags &= ~GTF_CALL_VIRT_STUB;
19568 call->gtCallMethHnd = derivedMethod;
19569 call->gtCallType = CT_USER_FUNC;
19571 // Virtual calls include an implicit null check, which we may
19572 // now need to make explicit.
19575 call->gtFlags |= GTF_CALL_NULLCHECK;
19578 // Clear the inline candidate info (may be non-null since
19579 // it's a union field used for other things by virtual
19581 call->gtInlineCandidateInfo = nullptr;
19586 printf("... after devirt...\n");
19592 printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName,
19593 baseMethodName, derivedClassName, derivedMethodName, note);
19595 #endif // defined(DEBUG)
19597 // If the 'this' object is a box, see if we can find the unboxed entry point for the call.
19598 if (thisObj->IsBoxedValue())
19600 JITDUMP("Now have direct call to boxed entry point, looking for unboxed entry point\n");
19602 // Note for some shared methods the unboxed entry point requires an extra parameter.
19603 bool requiresInstMethodTableArg = false;
19604 CORINFO_METHOD_HANDLE unboxedEntryMethod =
19605 info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg);
19607 if (unboxedEntryMethod != nullptr)
19609 // Since the call is the only consumer of the box, we know the box can't escape
19610 // since it is being passed an interior pointer.
19612 // So, revise the box to simply create a local copy, use the address of that copy
19613 // as the this pointer, and update the entry point to the unboxed entry.
19615 // Ideally, we then inline the boxed method and and if it turns out not to modify
19616 // the copy, we can undo the copy too.
19617 if (requiresInstMethodTableArg)
19619 // Perform a trial box removal and ask for the type handle tree.
19620 JITDUMP("Unboxed entry needs method table arg...\n");
19621 GenTree* methodTableArg = gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE);
19623 if (methodTableArg != nullptr)
19625 // If that worked, turn the box into a copy to a local var
19626 JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg));
19627 GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
19629 if (localCopyThis != nullptr)
19631 // Pass the local var as this and the type handle as a new arg
19632 JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table arg\n");
19633 call->gtCallObjp = localCopyThis;
19635 // Prepend for R2L arg passing or empty L2R passing
19636 if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr))
19638 call->gtCallArgs = gtNewListNode(methodTableArg, call->gtCallArgs);
19640 // Append for non-empty L2R
19643 GenTreeArgList* beforeArg = call->gtCallArgs;
19644 while (beforeArg->Rest() != nullptr)
19646 beforeArg = beforeArg->Rest();
19649 beforeArg->Rest() = gtNewListNode(methodTableArg, nullptr);
19652 call->gtCallMethHnd = unboxedEntryMethod;
19653 derivedMethod = unboxedEntryMethod;
19655 // Method attributes will differ because unboxed entry point is shared
19656 const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod);
19657 JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs,
19658 unboxedMethodAttribs);
19659 derivedMethodAttribs = unboxedMethodAttribs;
19663 JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n");
19668 JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n");
19673 JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n");
19674 GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
19676 if (localCopyThis != nullptr)
19678 JITDUMP("Success! invoking unboxed entry point on local copy\n");
19679 call->gtCallObjp = localCopyThis;
19680 call->gtCallMethHnd = unboxedEntryMethod;
19681 derivedMethod = unboxedEntryMethod;
19685 JITDUMP("Sorry, failed to undo the box\n");
19691 // Many of the low-level methods on value classes won't have unboxed entries,
19692 // as they need access to the type of the object.
19694 // Note this may be a cue for us to stack allocate the boxed object, since
19695 // we probably know that these objects don't escape.
19696 JITDUMP("Sorry, failed to find unboxed entry point\n");
19700 // Fetch the class that introduced the derived method.
19702 // Note this may not equal objClass, if there is a
19703 // final method that objClass inherits.
19704 CORINFO_CLASS_HANDLE derivedClass = info.compCompHnd->getMethodClass(derivedMethod);
19706 // Need to update call info too. This is fragile
19707 // but hopefully the derived method conforms to
19708 // the base in most other ways.
19709 *method = derivedMethod;
19710 *methodFlags = derivedMethodAttribs;
19711 *contextHandle = MAKE_METHODCONTEXT(derivedMethod);
19713 // Update context handle.
19714 if ((exactContextHandle != nullptr) && (*exactContextHandle != nullptr))
19716 *exactContextHandle = MAKE_METHODCONTEXT(derivedMethod);
19719 #ifdef FEATURE_READYTORUN_COMPILER
19720 if (opts.IsReadyToRun())
19722 // For R2R, getCallInfo triggers bookkeeping on the zap
19723 // side so we need to call it here.
19725 // First, cons up a suitable resolved token.
19726 CORINFO_RESOLVED_TOKEN derivedResolvedToken = {};
19728 derivedResolvedToken.tokenScope = info.compScopeHnd;
19729 derivedResolvedToken.tokenContext = *contextHandle;
19730 derivedResolvedToken.token = info.compCompHnd->getMethodDefFromMethod(derivedMethod);
19731 derivedResolvedToken.tokenType = CORINFO_TOKENKIND_Method;
19732 derivedResolvedToken.hClass = derivedClass;
19733 derivedResolvedToken.hMethod = derivedMethod;
19735 // Look up the new call info.
19736 CORINFO_CALL_INFO derivedCallInfo;
19737 eeGetCallInfo(&derivedResolvedToken, nullptr, addVerifyFlag(CORINFO_CALLINFO_ALLOWINSTPARAM), &derivedCallInfo);
19739 // Update the call.
19740 call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
19741 call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT;
19742 call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup);
19744 #endif // FEATURE_READYTORUN_COMPILER
19747 //------------------------------------------------------------------------
19748 // impGetSpecialIntrinsicExactReturnType: Look for special cases where a call
19749 // to an intrinsic returns an exact type
19752 // methodHnd -- handle for the special intrinsic method
19755 // Exact class handle returned by the intrinsic call, if known.
19756 // Nullptr if not known, or not likely to lead to beneficial optimization.
19758 CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd)
19760 JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd));
19762 CORINFO_CLASS_HANDLE result = nullptr;
19764 // See what intrinisc we have...
19765 const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd);
19768 case NI_System_Collections_Generic_EqualityComparer_get_Default:
19770 // Expect one class generic parameter; figure out which it is.
19771 CORINFO_SIG_INFO sig;
19772 info.compCompHnd->getMethodSig(methodHnd, &sig);
19773 assert(sig.sigInst.classInstCount == 1);
19774 CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0];
19775 assert(typeHnd != nullptr);
19777 // Lookup can incorrect when we have __Canon as it won't appear
19778 // to implement any interface types.
19780 // And if we do not have a final type, devirt & inlining is
19781 // unlikely to result in much simplification.
19783 // We can use CORINFO_FLG_FINAL to screen out both of these cases.
19784 const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd);
19785 const bool isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0);
19789 result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd);
19790 JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd),
19791 result != nullptr ? eeGetClassName(result) : "unknown");
19795 JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd));
19803 JITDUMP("This special intrinsic not handled, sorry...\n");
19811 //------------------------------------------------------------------------
19812 // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it.
19815 // token - init value for the allocated token.
19818 // pointer to token into jit-allocated memory.
19819 CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(CORINFO_RESOLVED_TOKEN token)
19821 CORINFO_RESOLVED_TOKEN* memory = (CORINFO_RESOLVED_TOKEN*)compGetMem(sizeof(token));
19826 //------------------------------------------------------------------------
19827 // SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local varibales.
19829 class SpillRetExprHelper
19832 SpillRetExprHelper(Compiler* comp) : comp(comp)
19836 void StoreRetExprResultsInArgs(GenTreeCall* call)
19838 GenTree* args = call->gtCallArgs;
19839 if (args != nullptr)
19841 comp->fgWalkTreePre(&args, SpillRetExprVisitor, this);
19843 GenTree* thisArg = call->gtCallObjp;
19844 if (thisArg != nullptr)
19846 comp->fgWalkTreePre(&thisArg, SpillRetExprVisitor, this);
19851 static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre)
19853 assert((pTree != nullptr) && (*pTree != nullptr));
19854 GenTree* tree = *pTree;
19855 if ((tree->gtFlags & GTF_CALL) == 0)
19857 // Trees with ret_expr are marked as GTF_CALL.
19858 return Compiler::WALK_SKIP_SUBTREES;
19860 if (tree->OperGet() == GT_RET_EXPR)
19862 SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData);
19863 walker->StoreRetExprAsLocalVar(pTree);
19865 return Compiler::WALK_CONTINUE;
19868 void StoreRetExprAsLocalVar(GenTree** pRetExpr)
19870 GenTree* retExpr = *pRetExpr;
19871 assert(retExpr->OperGet() == GT_RET_EXPR);
19872 JITDUMP("Store return expression %u as a local var.\n", retExpr->gtTreeID);
19873 unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr"));
19874 comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE);
19875 *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet());
19882 //------------------------------------------------------------------------
19883 // addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate.
19884 // Spill ret_expr in the call node, because they can't be cloned.
19887 // call - fat calli candidate
19889 void Compiler::addFatPointerCandidate(GenTreeCall* call)
19891 setMethodHasFatPointer();
19892 call->SetFatPointerCandidate();
19893 SpillRetExprHelper helper(this);
19894 helper.StoreRetExprResultsInArgs(call);