1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
10 XX Imports the given method and converts it to semantic trees XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
23 #define Verify(cond, msg) \
28 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
32 #define VerifyOrReturn(cond, msg) \
37 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
42 #define VerifyOrReturnSpeculative(cond, msg, speculative) \
56 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
62 /*****************************************************************************/
64 void Compiler::impInit()
68 impTreeList = nullptr;
69 impTreeLast = nullptr;
70 impInlinedCodeSize = 0;
74 /*****************************************************************************
76 * Pushes the given tree on the stack.
79 void Compiler::impPushOnStack(GenTree* tree, typeInfo ti)
81 /* Check for overflow. If inlining, we may be using a bigger stack */
83 if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84 (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
86 BADCODE("stack overflow");
90 // If we are pushing a struct, make certain we know the precise type!
91 if (tree->TypeGet() == TYP_STRUCT)
93 assert(ti.IsType(TI_STRUCT));
94 CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95 assert(clsHnd != NO_CLASS_HANDLE);
98 if (tiVerificationNeeded && !ti.IsDead())
100 assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
102 // The ti type is consistent with the tree type.
105 // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106 // In the verification type system, we always transform "native int" to "TI_INT".
107 // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108 // attempts to do that have proved too difficult. Instead, we'll assume that in checks like this,
109 // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110 // method used in the last disjunct allows exactly this mismatch.
111 assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112 ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113 ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114 ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115 typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116 NormaliseForStack(typeInfo(tree->TypeGet()))));
118 // If it is a struct type, make certain we normalized the primitive types
119 assert(!ti.IsType(TI_STRUCT) ||
120 info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
124 if (VERBOSE && tiVerificationNeeded)
127 printf(TI_DUMP_PADDING);
128 printf("About to push to stack: ");
131 #endif // VERBOSE_VERIFY
135 verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136 verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
138 if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
142 else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
144 compFloatingPointUsed = true;
148 inline void Compiler::impPushNullObjRefOnStack()
150 impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
153 // This method gets called when we run into unverifiable code
154 // (and we are verifying the method)
156 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
157 DEBUGARG(unsigned line))
159 // Remember that the code is not verifiable
160 // Note that the method may yet pass canSkipMethodVerification(),
161 // and so the presence of unverifiable code may not be an issue.
162 tiIsVerifiableCode = FALSE;
165 const char* tail = strrchr(file, '\\');
171 if (JitConfig.JitBreakOnUnsafeCode())
173 assert(!"Unsafe code detected");
177 JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
178 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
180 if (verNeedsVerification() || compIsForImportOnly())
182 JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
183 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
184 verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
188 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
189 DEBUGARG(unsigned line))
191 JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
192 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
195 // BreakIfDebuggerPresent();
196 if (getBreakOnBadCode())
198 assert(!"Typechecking error");
202 RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
206 // helper function that will tell us if the IL instruction at the addr passed
207 // by param consumes an address at the top of the stack. We use it to save
209 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
211 assert(!compIsForInlining());
215 opcode = (OPCODE)getU1LittleEndian(codeAddr);
219 // case CEE_LDFLDA: We're taking this one out as if you have a sequence
225 // of a primitivelike struct, you end up after morphing with addr of a local
226 // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
227 // for structs that contain other structs, which isnt a case we handle very
228 // well now for other reasons.
232 // We won't collapse small fields. This is probably not the right place to have this
233 // check, but we're only using the function for this purpose, and is easy to factor
234 // out if we need to do so.
236 CORINFO_RESOLVED_TOKEN resolvedToken;
237 impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
239 var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField));
241 // Preserve 'small' int types
242 if (!varTypeIsSmall(lclTyp))
244 lclTyp = genActualType(lclTyp);
247 if (varTypeIsSmall(lclTyp))
261 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
263 pResolvedToken->tokenContext = impTokenLookupContextHandle;
264 pResolvedToken->tokenScope = info.compScopeHnd;
265 pResolvedToken->token = getU4LittleEndian(addr);
266 pResolvedToken->tokenType = kind;
268 if (!tiVerificationNeeded)
270 info.compCompHnd->resolveToken(pResolvedToken);
274 Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
278 /*****************************************************************************
280 * Pop one tree from the stack.
283 StackEntry Compiler::impPopStack()
285 if (verCurrentState.esStackDepth == 0)
287 BADCODE("stack underflow");
292 if (VERBOSE && tiVerificationNeeded)
295 printf(TI_DUMP_PADDING);
296 printf("About to pop from the stack: ");
297 const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
300 #endif // VERBOSE_VERIFY
303 return verCurrentState.esStack[--verCurrentState.esStackDepth];
306 /*****************************************************************************
308 * Peep at n'th (0-based) tree on the top of the stack.
311 StackEntry& Compiler::impStackTop(unsigned n)
313 if (verCurrentState.esStackDepth <= n)
315 BADCODE("stack underflow");
318 return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
321 unsigned Compiler::impStackHeight()
323 return verCurrentState.esStackDepth;
326 /*****************************************************************************
327 * Some of the trees are spilled specially. While unspilling them, or
328 * making a copy, these need to be handled specially. The function
329 * enumerates the operators possible after spilling.
332 #ifdef DEBUG // only used in asserts
333 static bool impValidSpilledStackEntry(GenTree* tree)
335 if (tree->gtOper == GT_LCL_VAR)
340 if (tree->OperIsConst())
349 /*****************************************************************************
351 * The following logic is used to save/restore stack contents.
352 * If 'copy' is true, then we make a copy of the trees on the stack. These
353 * have to all be cloneable/spilled values.
356 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
358 savePtr->ssDepth = verCurrentState.esStackDepth;
360 if (verCurrentState.esStackDepth)
362 savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
363 size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
367 StackEntry* table = savePtr->ssTrees;
369 /* Make a fresh copy of all the stack entries */
371 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
373 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
374 GenTree* tree = verCurrentState.esStack[level].val;
376 assert(impValidSpilledStackEntry(tree));
378 switch (tree->gtOper)
385 table->val = gtCloneExpr(tree);
389 assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
396 memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
401 void Compiler::impRestoreStackState(SavedStack* savePtr)
403 verCurrentState.esStackDepth = savePtr->ssDepth;
405 if (verCurrentState.esStackDepth)
407 memcpy(verCurrentState.esStack, savePtr->ssTrees,
408 verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
412 /*****************************************************************************
414 * Get the tree list started for a new basic block.
416 inline void Compiler::impBeginTreeList()
418 assert(impTreeList == nullptr && impTreeLast == nullptr);
420 impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
423 /*****************************************************************************
425 * Store the given start and end stmt in the given basic block. This is
426 * mostly called by impEndTreeList(BasicBlock *block). It is called
427 * directly only for handling CEE_LEAVEs out of finally-protected try's.
430 inline void Compiler::impEndTreeList(BasicBlock* block, GenTree* firstStmt, GenTree* lastStmt)
432 assert(firstStmt->gtOper == GT_STMT);
433 assert(lastStmt->gtOper == GT_STMT);
435 /* Make the list circular, so that we can easily walk it backwards */
437 firstStmt->gtPrev = lastStmt;
439 /* Store the tree list in the basic block */
441 block->bbTreeList = firstStmt;
443 /* The block should not already be marked as imported */
444 assert((block->bbFlags & BBF_IMPORTED) == 0);
446 block->bbFlags |= BBF_IMPORTED;
449 /*****************************************************************************
451 * Store the current tree list in the given basic block.
454 inline void Compiler::impEndTreeList(BasicBlock* block)
456 assert(impTreeList->gtOper == GT_BEG_STMTS);
458 GenTree* firstTree = impTreeList->gtNext;
462 /* The block should not already be marked as imported */
463 assert((block->bbFlags & BBF_IMPORTED) == 0);
465 // Empty block. Just mark it as imported
466 block->bbFlags |= BBF_IMPORTED;
470 // Ignore the GT_BEG_STMTS
471 assert(firstTree->gtPrev == impTreeList);
473 impEndTreeList(block, firstTree, impTreeLast);
477 if (impLastILoffsStmt != nullptr)
479 impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
480 impLastILoffsStmt = nullptr;
483 impTreeList = impTreeLast = nullptr;
487 /*****************************************************************************
489 * Check that storing the given tree doesnt mess up the semantic order. Note
490 * that this has only limited value as we can only check [0..chkLevel).
493 inline void Compiler::impAppendStmtCheck(GenTree* stmt, unsigned chkLevel)
498 assert(stmt->gtOper == GT_STMT);
500 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
502 chkLevel = verCurrentState.esStackDepth;
505 if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
510 GenTree* tree = stmt->gtStmt.gtStmtExpr;
512 // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
514 if (tree->gtFlags & GTF_CALL)
516 for (unsigned level = 0; level < chkLevel; level++)
518 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
522 if (tree->gtOper == GT_ASG)
524 // For an assignment to a local variable, all references of that
525 // variable have to be spilled. If it is aliased, all calls and
526 // indirect accesses have to be spilled
528 if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
530 unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
531 for (unsigned level = 0; level < chkLevel; level++)
533 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
534 assert(!lvaTable[lclNum].lvAddrExposed ||
535 (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
539 // If the access may be to global memory, all side effects have to be spilled.
541 else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
543 for (unsigned level = 0; level < chkLevel; level++)
545 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
552 /*****************************************************************************
554 * Append the given GT_STMT node to the current block's tree list.
555 * [0..chkLevel) is the portion of the stack which we will check for
556 * interference with stmt and spill if needed.
559 inline void Compiler::impAppendStmt(GenTree* stmt, unsigned chkLevel)
561 assert(stmt->gtOper == GT_STMT);
562 noway_assert(impTreeLast != nullptr);
564 /* If the statement being appended has any side-effects, check the stack
565 to see if anything needs to be spilled to preserve correct ordering. */
567 GenTree* expr = stmt->gtStmt.gtStmtExpr;
568 unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
570 // Assignment to (unaliased) locals don't count as a side-effect as
571 // we handle them specially using impSpillLclRefs(). Temp locals should
574 if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
575 !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
577 unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
578 assert(flags == (op2Flags | GTF_ASG));
582 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
584 chkLevel = verCurrentState.esStackDepth;
587 if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
589 assert(chkLevel <= verCurrentState.esStackDepth);
593 // If there is a call, we have to spill global refs
594 bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
596 if (expr->gtOper == GT_ASG)
598 GenTree* lhs = expr->gtGetOp1();
599 // If we are assigning to a global ref, we have to spill global refs on stack.
600 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
601 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
602 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
603 if (!expr->OperIsBlkOp())
605 // If we are assigning to a global ref, we have to spill global refs on stack
606 if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
608 spillGlobEffects = true;
611 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
612 ((lhs->OperGet() == GT_LCL_VAR) &&
613 (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
615 spillGlobEffects = true;
619 impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
623 impSpillSpecialSideEff();
627 impAppendStmtCheck(stmt, chkLevel);
629 /* Point 'prev' at the previous node, so that we can walk backwards */
631 stmt->gtPrev = impTreeLast;
633 /* Append the expression statement to the list */
635 impTreeLast->gtNext = stmt;
639 impMarkContiguousSIMDFieldAssignments(stmt);
642 /* Once we set impCurStmtOffs in an appended tree, we are ready to
643 report the following offsets. So reset impCurStmtOffs */
645 if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
647 impCurStmtOffsSet(BAD_IL_OFFSET);
651 if (impLastILoffsStmt == nullptr)
653 impLastILoffsStmt = stmt;
664 /*****************************************************************************
666 * Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
669 inline void Compiler::impInsertStmtBefore(GenTree* stmt, GenTree* stmtBefore)
671 assert(stmt->gtOper == GT_STMT);
672 assert(stmtBefore->gtOper == GT_STMT);
674 GenTree* stmtPrev = stmtBefore->gtPrev;
675 stmt->gtPrev = stmtPrev;
676 stmt->gtNext = stmtBefore;
677 stmtPrev->gtNext = stmt;
678 stmtBefore->gtPrev = stmt;
681 /*****************************************************************************
683 * Append the given expression tree to the current block's tree list.
684 * Return the newly created statement.
687 GenTree* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, IL_OFFSETX offset)
691 /* Allocate an 'expression statement' node */
693 GenTree* expr = gtNewStmt(tree, offset);
695 /* Append the statement to the current block's stmt list */
697 impAppendStmt(expr, chkLevel);
702 /*****************************************************************************
704 * Insert the given exression tree before GT_STMT "stmtBefore"
707 void Compiler::impInsertTreeBefore(GenTree* tree, IL_OFFSETX offset, GenTree* stmtBefore)
709 assert(stmtBefore->gtOper == GT_STMT);
711 /* Allocate an 'expression statement' node */
713 GenTree* expr = gtNewStmt(tree, offset);
715 /* Append the statement to the current block's stmt list */
717 impInsertStmtBefore(expr, stmtBefore);
720 /*****************************************************************************
722 * Append an assignment of the given value to a temp to the current tree list.
723 * curLevel is the stack level for which the spill to the temp is being done.
726 void Compiler::impAssignTempGen(unsigned tmp,
729 GenTree** pAfterStmt, /* = NULL */
730 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
731 BasicBlock* block /* = NULL */
734 GenTree* asg = gtNewTempAssign(tmp, val);
736 if (!asg->IsNothingNode())
740 GenTree* asgStmt = gtNewStmt(asg, ilOffset);
741 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
745 impAppendTree(asg, curLevel, impCurStmtOffs);
750 /*****************************************************************************
751 * same as above, but handle the valueclass case too
754 void Compiler::impAssignTempGen(unsigned tmpNum,
756 CORINFO_CLASS_HANDLE structType,
758 GenTree** pAfterStmt, /* = NULL */
759 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
760 BasicBlock* block /* = NULL */
765 if (varTypeIsStruct(val))
767 assert(tmpNum < lvaCount);
768 assert(structType != NO_CLASS_HANDLE);
770 // if the method is non-verifiable the assert is not true
771 // so at least ignore it in the case when verification is turned on
772 // since any block that tries to use the temp would have failed verification.
773 var_types varType = lvaTable[tmpNum].lvType;
774 assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
775 lvaSetStruct(tmpNum, structType, false);
777 // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
778 // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
779 // that has been passed in for the value being assigned to the temp, in which case we
780 // need to set 'val' to that same type.
781 // Note also that if we always normalized the types of any node that might be a struct
782 // type, this would not be necessary - but that requires additional JIT/EE interface
783 // calls that may not actually be required - e.g. if we only access a field of a struct.
785 val->gtType = lvaTable[tmpNum].lvType;
787 GenTree* dst = gtNewLclvNode(tmpNum, val->gtType);
788 asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, ilOffset, block);
792 asg = gtNewTempAssign(tmpNum, val);
795 if (!asg->IsNothingNode())
799 GenTree* asgStmt = gtNewStmt(asg, ilOffset);
800 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
804 impAppendTree(asg, curLevel, impCurStmtOffs);
809 /*****************************************************************************
811 * Pop the given number of values from the stack and return a list node with
813 * The 'prefixTree' argument may optionally contain an argument
814 * list that is prepended to the list returned from this function.
816 * The notion of prepended is a bit misleading in that the list is backwards
817 * from the way I would expect: The first element popped is at the end of
818 * the returned list, and prefixTree is 'before' that, meaning closer to
819 * the end of the list. To get to prefixTree, you have to walk to the
822 * For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
823 * such we reverse its meaning such that returnValue has a reversed
824 * prefixTree at the head of the list.
827 GenTreeArgList* Compiler::impPopList(unsigned count, CORINFO_SIG_INFO* sig, GenTreeArgList* prefixTree)
829 assert(sig == nullptr || count == sig->numArgs);
831 CORINFO_CLASS_HANDLE structType;
832 GenTreeArgList* treeList;
834 if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
840 treeList = prefixTree;
845 StackEntry se = impPopStack();
846 typeInfo ti = se.seTypeInfo;
847 GenTree* temp = se.val;
849 if (varTypeIsStruct(temp))
851 // Morph trees that aren't already OBJs or MKREFANY to be OBJs
852 assert(ti.IsType(TI_STRUCT));
853 structType = ti.GetClassHandleForValueClass();
857 printf("Calling impNormStructVal on:\n");
861 temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
865 printf("resulting tree:\n");
871 /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
872 treeList = gtNewListNode(temp, treeList);
877 if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
878 sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
880 // Make sure that all valuetypes (including enums) that we push are loaded.
881 // This is to guarantee that if a GC is triggerred from the prestub of this methods,
882 // all valuetypes in the method signature are already loaded.
883 // We need to be able to find the size of the valuetypes, but we cannot
884 // do a class-load from within GC.
885 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
888 CORINFO_ARG_LIST_HANDLE argLst = sig->args;
889 CORINFO_CLASS_HANDLE argClass;
890 CORINFO_CLASS_HANDLE argRealClass;
891 GenTreeArgList* args;
893 for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
895 PREFIX_ASSUME(args != nullptr);
897 CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
899 // insert implied casts (from float to double or double to float)
901 if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
903 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), false, TYP_DOUBLE);
905 else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
907 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), false, TYP_FLOAT);
910 // insert any widening or narrowing casts for backwards compatibility
912 args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
914 if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
915 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
917 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
918 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
920 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
922 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
924 args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
927 // Make sure that all valuetypes (including enums) that we push are loaded.
928 // This is to guarantee that if a GC is triggered from the prestub of this methods,
929 // all valuetypes in the method signature are already loaded.
930 // We need to be able to find the size of the valuetypes, but we cannot
931 // do a class-load from within GC.
932 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
935 argLst = info.compCompHnd->getArgNext(argLst);
939 if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
941 // Prepend the prefixTree
943 // Simple in-place reversal to place treeList
944 // at the end of a reversed prefixTree
945 while (prefixTree != nullptr)
947 GenTreeArgList* next = prefixTree->Rest();
948 prefixTree->Rest() = treeList;
949 treeList = prefixTree;
956 /*****************************************************************************
958 * Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
959 * The first "skipReverseCount" items are not reversed.
962 GenTreeArgList* Compiler::impPopRevList(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount)
965 assert(skipReverseCount <= count);
967 GenTreeArgList* list = impPopList(count, sig);
970 if (list == nullptr || skipReverseCount == count)
975 GenTreeArgList* ptr = nullptr; // Initialized to the first node that needs to be reversed
976 GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
978 if (skipReverseCount == 0)
985 // Get to the first node that needs to be reversed
986 for (unsigned i = 0; i < skipReverseCount - 1; i++)
988 lastSkipNode = lastSkipNode->Rest();
991 PREFIX_ASSUME(lastSkipNode != nullptr);
992 ptr = lastSkipNode->Rest();
995 GenTreeArgList* reversedList = nullptr;
999 GenTreeArgList* tmp = ptr->Rest();
1000 ptr->Rest() = reversedList;
1003 } while (ptr != nullptr);
1005 if (skipReverseCount)
1007 lastSkipNode->Rest() = reversedList;
1012 return reversedList;
1016 //------------------------------------------------------------------------
1017 // impAssignStruct: Assign (copy) the structure from 'src' to 'dest'.
1020 // dest - destination of the assignment
1021 // src - source of the assignment
1022 // structHnd - handle representing the struct type
1023 // curLevel - stack level for which a spill may be being done
1024 // pAfterStmt - statement to insert any additional statements after
1025 // ilOffset - il offset for new statements
1026 // block - block to insert any additional statements in
1029 // The tree that should be appended to the statement list that represents the assignment.
1032 // Temp assignments may be appended to impTreeList if spilling is necessary.
1034 GenTree* Compiler::impAssignStruct(GenTree* dest,
1036 CORINFO_CLASS_HANDLE structHnd,
1038 GenTree** pAfterStmt, /* = nullptr */
1039 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
1040 BasicBlock* block /* = nullptr */
1043 assert(varTypeIsStruct(dest));
1045 if (ilOffset == BAD_IL_OFFSET)
1047 ilOffset = impCurStmtOffs;
1050 while (dest->gtOper == GT_COMMA)
1052 assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1054 // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1057 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, ilOffset));
1061 impAppendTree(dest->gtOp.gtOp1, curLevel, ilOffset); // do the side effect
1064 // set dest to the second thing
1065 dest = dest->gtOp.gtOp2;
1068 assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1069 dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1071 if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1072 src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1075 return gtNewNothingNode();
1078 // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1079 // or re-creating a Blk node if it is.
1082 if (dest->gtOper == GT_IND || dest->OperIsBlk())
1084 destAddr = dest->gtOp.gtOp1;
1088 destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1091 return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, ilOffset, block));
1094 //------------------------------------------------------------------------
1095 // impAssignStructPtr: Assign (copy) the structure from 'src' to 'destAddr'.
1098 // destAddr - address of the destination of the assignment
1099 // src - source of the assignment
1100 // structHnd - handle representing the struct type
1101 // curLevel - stack level for which a spill may be being done
1102 // pAfterStmt - statement to insert any additional statements after
1103 // ilOffset - il offset for new statements
1104 // block - block to insert any additional statements in
1107 // The tree that should be appended to the statement list that represents the assignment.
1110 // Temp assignments may be appended to impTreeList if spilling is necessary.
1112 GenTree* Compiler::impAssignStructPtr(GenTree* destAddr,
1114 CORINFO_CLASS_HANDLE structHnd,
1116 GenTree** pAfterStmt, /* = NULL */
1117 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
1118 BasicBlock* block /* = NULL */
1122 GenTree* dest = nullptr;
1123 unsigned destFlags = 0;
1125 if (ilOffset == BAD_IL_OFFSET)
1127 ilOffset = impCurStmtOffs;
1130 #if defined(UNIX_AMD64_ABI)
1131 assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1132 // TODO-ARM-BUG: Does ARM need this?
1133 // TODO-ARM64-BUG: Does ARM64 need this?
1134 assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1135 src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1136 src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1137 (src->TypeGet() != TYP_STRUCT &&
1138 (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1139 #else // !defined(UNIX_AMD64_ABI)
1140 assert(varTypeIsStruct(src));
1142 assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1143 src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1144 src->gtOper == GT_COMMA ||
1145 (src->TypeGet() != TYP_STRUCT &&
1146 (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1147 #endif // !defined(UNIX_AMD64_ABI)
1148 if (destAddr->OperGet() == GT_ADDR)
1150 GenTree* destNode = destAddr->gtGetOp1();
1151 // If the actual destination is a local, or already a block node, or is a node that
1152 // will be morphed, don't insert an OBJ(ADDR).
1153 if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk() ||
1154 ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet())))
1158 destType = destNode->TypeGet();
1162 destType = src->TypeGet();
1165 var_types asgType = src->TypeGet();
1167 if (src->gtOper == GT_CALL)
1169 if (src->AsCall()->TreatAsHasRetBufArg(this))
1171 // Case of call returning a struct via hidden retbuf arg
1173 // insert the return value buffer into the argument list as first byref parameter
1174 src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1176 // now returns void, not a struct
1177 src->gtType = TYP_VOID;
1179 // return the morphed call node
1184 // Case of call returning a struct in one or more registers.
1186 var_types returnType = (var_types)src->gtCall.gtReturnType;
1188 // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1189 src->gtType = genActualType(returnType);
1191 // First we try to change this to "LclVar/LclFld = call"
1193 if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1195 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1196 // That is, the IR will be of the form lclVar = call for multi-reg return
1198 GenTree* lcl = destAddr->gtOp.gtOp1;
1199 if (src->AsCall()->HasMultiRegRetVal())
1201 // Mark the struct LclVar as used in a MultiReg return context
1202 // which currently makes it non promotable.
1203 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1204 // handle multireg returns.
1205 lcl->gtFlags |= GTF_DONT_CSE;
1206 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1208 else // The call result is not a multireg return
1210 // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1211 lcl->ChangeOper(GT_LCL_FLD);
1212 fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1213 lcl->gtType = src->gtType;
1214 asgType = src->gtType;
1219 #if defined(_TARGET_ARM_)
1220 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1221 // but that method has not been updadted to include ARM.
1222 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1223 lcl->gtFlags |= GTF_DONT_CSE;
1224 #elif defined(UNIX_AMD64_ABI)
1225 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1226 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1228 // Make the struct non promotable. The eightbytes could contain multiple fields.
1229 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1230 // handle multireg returns.
1231 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1232 // non-multireg returns.
1233 lcl->gtFlags |= GTF_DONT_CSE;
1234 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1237 else // we don't have a GT_ADDR of a GT_LCL_VAR
1239 // !!! The destination could be on stack. !!!
1240 // This flag will let us choose the correct write barrier.
1241 asgType = returnType;
1242 destFlags = GTF_IND_TGTANYWHERE;
1246 else if (src->gtOper == GT_RET_EXPR)
1248 GenTreeCall* call = src->gtRetExpr.gtInlineCandidate->AsCall();
1249 noway_assert(call->gtOper == GT_CALL);
1251 if (call->HasRetBufArg())
1253 // insert the return value buffer into the argument list as first byref parameter
1254 call->gtCallArgs = gtNewListNode(destAddr, call->gtCallArgs);
1256 // now returns void, not a struct
1257 src->gtType = TYP_VOID;
1258 call->gtType = TYP_VOID;
1260 // We already have appended the write to 'dest' GT_CALL's args
1261 // So now we just return an empty node (pruning the GT_RET_EXPR)
1266 // Case of inline method returning a struct in one or more registers.
1268 var_types returnType = (var_types)call->gtReturnType;
1270 // We won't need a return buffer
1271 asgType = returnType;
1272 src->gtType = genActualType(returnType);
1273 call->gtType = src->gtType;
1275 // If we've changed the type, and it no longer matches a local destination,
1276 // we must use an indirection.
1277 if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1282 // !!! The destination could be on stack. !!!
1283 // This flag will let us choose the correct write barrier.
1284 destFlags = GTF_IND_TGTANYWHERE;
1287 else if (src->OperIsBlk())
1289 asgType = impNormStructType(structHnd);
1290 if (src->gtOper == GT_OBJ)
1292 assert(src->gtObj.gtClass == structHnd);
1295 else if (src->gtOper == GT_INDEX)
1297 asgType = impNormStructType(structHnd);
1298 assert(src->gtIndex.gtStructElemClass == structHnd);
1300 else if (src->gtOper == GT_MKREFANY)
1302 // Since we are assigning the result of a GT_MKREFANY,
1303 // "destAddr" must point to a refany.
1305 GenTree* destAddrClone;
1307 impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1309 assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0);
1310 assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1311 GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1312 GenTree* ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1313 GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL);
1314 typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1316 gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1318 // append the assign of the pointer value
1319 GenTree* asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1322 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, ilOffset));
1326 impAppendTree(asg, curLevel, ilOffset);
1329 // return the assign of the type value, to be appended
1330 return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1332 else if (src->gtOper == GT_COMMA)
1334 // The second thing is the struct or its address.
1335 assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1338 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, ilOffset));
1342 impAppendTree(src->gtOp.gtOp1, curLevel, ilOffset); // do the side effect
1345 // Evaluate the second thing using recursion.
1346 return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, ilOffset, block);
1348 else if (src->IsLocal())
1350 asgType = src->TypeGet();
1352 else if (asgType == TYP_STRUCT)
1354 asgType = impNormStructType(structHnd);
1355 src->gtType = asgType;
1357 if (dest == nullptr)
1359 // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1360 // if this is a known struct type.
1361 if (asgType == TYP_STRUCT)
1363 dest = gtNewObjNode(structHnd, destAddr);
1364 gtSetObjGcInfo(dest->AsObj());
1365 // Although an obj as a call argument was always assumed to be a globRef
1366 // (which is itself overly conservative), that is not true of the operands
1367 // of a block assignment.
1368 dest->gtFlags &= ~GTF_GLOB_REF;
1369 dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1371 else if (varTypeIsStruct(asgType))
1373 dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1377 dest = gtNewOperNode(GT_IND, asgType, destAddr);
1382 dest->gtType = asgType;
1385 dest->gtFlags |= destFlags;
1386 destFlags = dest->gtFlags;
1388 // return an assignment node, to be appended
1389 GenTree* asgNode = gtNewAssignNode(dest, src);
1390 gtBlockOpInit(asgNode, dest, src, false);
1392 // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1394 if ((destFlags & GTF_DONT_CSE) == 0)
1396 dest->gtFlags &= ~(GTF_DONT_CSE);
1401 /*****************************************************************************
1402 Given a struct value, and the class handle for that structure, return
1403 the expression for the address for that structure value.
1405 willDeref - does the caller guarantee to dereference the pointer.
1408 GenTree* Compiler::impGetStructAddr(GenTree* structVal,
1409 CORINFO_CLASS_HANDLE structHnd,
1413 assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1415 var_types type = structVal->TypeGet();
1417 genTreeOps oper = structVal->gtOper;
1419 if (oper == GT_OBJ && willDeref)
1421 assert(structVal->gtObj.gtClass == structHnd);
1422 return (structVal->gtObj.Addr());
1424 else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY ||
1425 structVal->OperIsSimdHWIntrinsic())
1427 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1429 impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1431 // The 'return value' is now the temp itself
1433 type = genActualType(lvaTable[tmpNum].TypeGet());
1434 GenTree* temp = gtNewLclvNode(tmpNum, type);
1435 temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1438 else if (oper == GT_COMMA)
1440 assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1442 GenTree* oldTreeLast = impTreeLast;
1443 structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1444 structVal->gtType = TYP_BYREF;
1446 if (oldTreeLast != impTreeLast)
1448 // Some temp assignment statement was placed on the statement list
1449 // for Op2, but that would be out of order with op1, so we need to
1450 // spill op1 onto the statement list after whatever was last
1451 // before we recursed on Op2 (i.e. before whatever Op2 appended).
1452 impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1453 structVal->gtOp.gtOp1 = gtNewNothingNode();
1459 return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1462 //------------------------------------------------------------------------
1463 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1464 // and optionally determine the GC layout of the struct.
1467 // structHnd - The class handle for the struct type of interest.
1468 // gcLayout - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1469 // into which the gcLayout will be written.
1470 // pNumGCVars - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1471 // which will be set to the number of GC fields in the struct.
1472 // pSimdBaseType - (optional, default nullptr) - if non-null, and the struct is a SIMD
1473 // type, set to the SIMD base type
1476 // The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1477 // The gcLayout will be returned using the pointers provided by the caller, if non-null.
1478 // It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1481 // The caller must set gcLayout to nullptr OR ensure that it is large enough
1482 // (see ICorStaticInfo::getClassGClayout in corinfo.h).
1485 // Normalizing the type involves examining the struct type to determine if it should
1486 // be modified to one that is handled specially by the JIT, possibly being a candidate
1487 // for full enregistration, e.g. TYP_SIMD16.
1489 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1491 unsigned* pNumGCVars,
1492 var_types* pSimdBaseType)
1494 assert(structHnd != NO_CLASS_HANDLE);
1496 const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1497 var_types structType = TYP_STRUCT;
1499 // On coreclr the check for GC includes a "may" to account for the special
1500 // ByRef like span structs. The added check for "CONTAINS_STACK_PTR" is the particular bit.
1501 // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1503 const bool mayContainGCPtrs =
1504 ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1507 // Check to see if this is a SIMD type.
1508 if (featureSIMD && !mayContainGCPtrs)
1510 unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1512 if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1514 unsigned int sizeBytes;
1515 var_types simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1516 if (simdBaseType != TYP_UNKNOWN)
1518 assert(sizeBytes == originalSize);
1519 structType = getSIMDTypeForSize(sizeBytes);
1520 if (pSimdBaseType != nullptr)
1522 *pSimdBaseType = simdBaseType;
1524 // Also indicate that we use floating point registers.
1525 compFloatingPointUsed = true;
1529 #endif // FEATURE_SIMD
1531 // Fetch GC layout info if requested
1532 if (gcLayout != nullptr)
1534 unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1536 // Verify that the quick test up above via the class attributes gave a
1537 // safe view of the type's GCness.
1539 // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1540 // does not report any gc fields.
1542 assert(mayContainGCPtrs || (numGCVars == 0));
1544 if (pNumGCVars != nullptr)
1546 *pNumGCVars = numGCVars;
1551 // Can't safely ask for number of GC pointers without also
1552 // asking for layout.
1553 assert(pNumGCVars == nullptr);
1559 //****************************************************************************
1560 // Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1561 // it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1563 GenTree* Compiler::impNormStructVal(GenTree* structVal,
1564 CORINFO_CLASS_HANDLE structHnd,
1566 bool forceNormalization /*=false*/)
1568 assert(forceNormalization || varTypeIsStruct(structVal));
1569 assert(structHnd != NO_CLASS_HANDLE);
1570 var_types structType = structVal->TypeGet();
1571 bool makeTemp = false;
1572 if (structType == TYP_STRUCT)
1574 structType = impNormStructType(structHnd);
1576 bool alreadyNormalized = false;
1577 GenTreeLclVarCommon* structLcl = nullptr;
1579 genTreeOps oper = structVal->OperGet();
1582 // GT_RETURN and GT_MKREFANY don't capture the handle.
1586 alreadyNormalized = true;
1590 structVal->gtCall.gtRetClsHnd = structHnd;
1595 structVal->gtRetExpr.gtRetClsHnd = structHnd;
1600 structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1604 // This will be transformed to an OBJ later.
1605 alreadyNormalized = true;
1606 structVal->gtIndex.gtStructElemClass = structHnd;
1607 structVal->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(structHnd);
1611 // Wrap it in a GT_OBJ.
1612 structVal->gtType = structType;
1613 structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1618 structLcl = structVal->AsLclVarCommon();
1619 // Wrap it in a GT_OBJ.
1620 structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1627 // These should already have the appropriate type.
1628 assert(structVal->gtType == structType);
1629 alreadyNormalized = true;
1633 assert(structVal->gtType == structType);
1634 structVal = gtNewObjNode(structHnd, structVal->gtGetOp1());
1635 alreadyNormalized = true;
1640 assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1642 #endif // FEATURE_SIMD
1643 #ifdef FEATURE_HW_INTRINSICS
1644 case GT_HWIntrinsic:
1645 assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1651 // The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node.
1652 GenTree* blockNode = structVal->gtOp.gtOp2;
1653 assert(blockNode->gtType == structType);
1655 // Is this GT_COMMA(op1, GT_COMMA())?
1656 GenTree* parent = structVal;
1657 if (blockNode->OperGet() == GT_COMMA)
1659 // Find the last node in the comma chain.
1662 assert(blockNode->gtType == structType);
1664 blockNode = blockNode->gtOp.gtOp2;
1665 } while (blockNode->OperGet() == GT_COMMA);
1668 if (blockNode->OperGet() == GT_FIELD)
1670 // If we have a GT_FIELD then wrap it in a GT_OBJ.
1671 blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode));
1675 if (blockNode->OperIsSIMDorSimdHWintrinsic())
1677 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1678 alreadyNormalized = true;
1683 noway_assert(blockNode->OperIsBlk());
1685 // Sink the GT_COMMA below the blockNode addr.
1686 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1687 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1689 // In case of a chained GT_COMMA case, we sink the last
1690 // GT_COMMA below the blockNode addr.
1691 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1692 assert(blockNodeAddr->gtType == TYP_BYREF);
1693 GenTree* commaNode = parent;
1694 commaNode->gtType = TYP_BYREF;
1695 commaNode->gtOp.gtOp2 = blockNodeAddr;
1696 blockNode->gtOp.gtOp1 = commaNode;
1697 if (parent == structVal)
1699 structVal = blockNode;
1701 alreadyNormalized = true;
1707 noway_assert(!"Unexpected node in impNormStructVal()");
1710 structVal->gtType = structType;
1711 GenTree* structObj = structVal;
1713 if (!alreadyNormalized || forceNormalization)
1717 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1719 impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1721 // The structVal is now the temp itself
1723 structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1724 // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1725 structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1727 else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1729 // Wrap it in a GT_OBJ
1730 structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1734 if (structLcl != nullptr)
1736 // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1737 // so we don't set GTF_EXCEPT here.
1738 if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1740 structObj->gtFlags &= ~GTF_GLOB_REF;
1745 // In general a OBJ is an indirection and could raise an exception.
1746 structObj->gtFlags |= GTF_EXCEPT;
1751 /******************************************************************************/
1752 // Given a type token, generate code that will evaluate to the correct
1753 // handle representation of that token (type handle, field handle, or method handle)
1755 // For most cases, the handle is determined at compile-time, and the code
1756 // generated is simply an embedded handle.
1758 // Run-time lookup is required if the enclosing method is shared between instantiations
1759 // and the token refers to formal type parameters whose instantiation is not known
1762 GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1763 BOOL* pRuntimeLookup /* = NULL */,
1764 BOOL mustRestoreHandle /* = FALSE */,
1765 BOOL importParent /* = FALSE */)
1767 assert(!fgGlobalMorph);
1769 CORINFO_GENERICHANDLE_RESULT embedInfo;
1770 info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1774 *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1777 if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1779 switch (embedInfo.handleType)
1781 case CORINFO_HANDLETYPE_CLASS:
1782 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1785 case CORINFO_HANDLETYPE_METHOD:
1786 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1789 case CORINFO_HANDLETYPE_FIELD:
1790 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1791 info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1799 // Generate the full lookup tree. May be null if we're abandoning an inline attempt.
1800 GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1801 embedInfo.compileTimeHandle);
1803 // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node.
1804 if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup)
1806 result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result);
1812 GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1813 CORINFO_LOOKUP* pLookup,
1814 unsigned handleFlags,
1815 void* compileTimeHandle)
1817 if (!pLookup->lookupKind.needsRuntimeLookup)
1819 // No runtime lookup is required.
1820 // Access is direct or memory-indirect (of a fixed address) reference
1822 CORINFO_GENERIC_HANDLE handle = nullptr;
1823 void* pIndirection = nullptr;
1824 assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE);
1826 if (pLookup->constLookup.accessType == IAT_VALUE)
1828 handle = pLookup->constLookup.handle;
1830 else if (pLookup->constLookup.accessType == IAT_PVALUE)
1832 pIndirection = pLookup->constLookup.addr;
1834 return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1836 else if (compIsForInlining())
1838 // Don't import runtime lookups when inlining
1839 // Inlining has to be aborted in such a case
1840 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1845 // Need to use dictionary-based access which depends on the typeContext
1846 // which is only available at runtime, not at compile-time.
1848 return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1852 #ifdef FEATURE_READYTORUN_COMPILER
1853 GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1854 unsigned handleFlags,
1855 void* compileTimeHandle)
1857 CORINFO_GENERIC_HANDLE handle = nullptr;
1858 void* pIndirection = nullptr;
1859 assert(pLookup->accessType != IAT_PPVALUE && pLookup->accessType != IAT_RELPVALUE);
1861 if (pLookup->accessType == IAT_VALUE)
1863 handle = pLookup->handle;
1865 else if (pLookup->accessType == IAT_PVALUE)
1867 pIndirection = pLookup->addr;
1869 return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1872 GenTreeCall* Compiler::impReadyToRunHelperToTree(
1873 CORINFO_RESOLVED_TOKEN* pResolvedToken,
1874 CorInfoHelpFunc helper,
1876 GenTreeArgList* args /* =NULL*/,
1877 CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */)
1879 CORINFO_CONST_LOOKUP lookup;
1880 if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1885 GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args);
1887 op1->setEntryPoint(lookup);
1893 GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1895 GenTree* op1 = nullptr;
1897 switch (pCallInfo->kind)
1900 op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1902 #ifdef FEATURE_READYTORUN_COMPILER
1903 if (opts.IsReadyToRun())
1905 op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
1909 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1910 op1->gtFptrVal.gtEntryPoint.accessType = IAT_VALUE;
1915 case CORINFO_CALL_CODE_POINTER:
1916 if (compIsForInlining())
1918 // Don't import runtime lookups when inlining
1919 // Inlining has to be aborted in such a case
1920 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1924 op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1928 noway_assert(!"unknown call kind");
1935 //------------------------------------------------------------------------
1936 // getRuntimeContextTree: find pointer to context for runtime lookup.
1939 // kind - lookup kind.
1942 // Return GenTree pointer to generic shared context.
1945 // Reports about generic context using.
1947 GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1949 GenTree* ctxTree = nullptr;
1951 // Collectible types requires that for shared generic code, if we use the generic context parameter
1952 // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1953 // context parameter is this that we don't need the eager reporting logic.)
1954 lvaGenericsContextUseCount++;
1956 if (kind == CORINFO_LOOKUP_THISOBJ)
1959 ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1961 // Vtable pointer of this object
1962 ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1963 ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1964 ctxTree->gtFlags |= GTF_IND_INVARIANT;
1968 assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1970 ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1975 /*****************************************************************************/
1976 /* Import a dictionary lookup to access a handle in code shared between
1977 generic instantiations.
1978 The lookup depends on the typeContext which is only available at
1979 runtime, and not at compile-time.
1980 pLookup->token1 and pLookup->token2 specify the handle that is needed.
1983 1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1984 instantiation-specific handle, and the tokens to lookup the handle.
1985 2. pLookup->indirections != CORINFO_USEHELPER :
1986 2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1988 2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1989 If it is non-NULL, it is the handle required. Else, call a helper
1990 to lookup the handle.
1993 GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1994 CORINFO_LOOKUP* pLookup,
1995 void* compileTimeHandle)
1998 // This method can only be called from the importer instance of the Compiler.
1999 // In other word, it cannot be called by the instance of the Compiler for the inlinee.
2000 assert(!compIsForInlining());
2002 GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
2004 CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
2005 // It's available only via the run-time helper function
2006 if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
2008 #ifdef FEATURE_READYTORUN_COMPILER
2009 if (opts.IsReadyToRun())
2011 return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
2012 gtNewArgList(ctxTree), &pLookup->lookupKind);
2016 gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
2017 GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
2019 return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
2023 GenTree* slotPtrTree = ctxTree;
2025 if (pRuntimeLookup->testForNull)
2027 slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2028 nullptr DEBUGARG("impRuntimeLookup slot"));
2031 GenTree* indOffTree = nullptr;
2033 // Applied repeated indirections
2034 for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
2036 if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2038 indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2039 nullptr DEBUGARG("impRuntimeLookup indirectOffset"));
2044 slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2045 slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2046 slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
2049 if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2051 slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree);
2054 if (pRuntimeLookup->offsets[i] != 0)
2057 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
2061 // No null test required
2062 if (!pRuntimeLookup->testForNull)
2064 if (pRuntimeLookup->indirections == 0)
2069 slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2070 slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2072 if (!pRuntimeLookup->testForFixup)
2077 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2079 unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test"));
2080 impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtOffs);
2082 GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2083 // downcast the pointer to a TYP_INT on 64-bit targets
2084 slot = impImplicitIorI4Cast(slot, TYP_INT);
2085 // Use a GT_AND to check for the lowest bit and indirect if it is set
2086 GenTree* test = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1));
2087 GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0));
2089 // slot = GT_IND(slot - 1)
2090 slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2091 GenTree* add = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL));
2092 GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add);
2093 indir->gtFlags |= GTF_IND_NONFAULTING;
2094 indir->gtFlags |= GTF_IND_INVARIANT;
2096 slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2097 GenTree* asg = gtNewAssignNode(slot, indir);
2098 GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg);
2099 GenTree* qmark = gtNewQmarkNode(TYP_VOID, relop, colon);
2100 impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2102 return gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2105 assert(pRuntimeLookup->indirections != 0);
2107 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2109 // Extract the handle
2110 GenTree* handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2111 handle->gtFlags |= GTF_IND_NONFAULTING;
2113 GenTree* handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2114 nullptr DEBUGARG("impRuntimeLookup typehandle"));
2117 GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
2119 GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
2120 GenTree* helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
2122 // Check for null and possibly call helper
2123 GenTree* relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2125 GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2126 gtNewNothingNode(), // do nothing if nonnull
2129 GenTree* qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2132 if (handleCopy->IsLocal())
2134 tmp = handleCopy->gtLclVarCommon.gtLclNum;
2138 tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2141 impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2142 return gtNewLclvNode(tmp, TYP_I_IMPL);
2145 /******************************************************************************
2146 * Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2147 * If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2148 * else, grab a new temp.
2149 * For structs (which can be pushed on the stack using obj, etc),
2150 * special handling is needed
2153 struct RecursiveGuard
2158 m_pAddress = nullptr;
2165 *m_pAddress = false;
2169 void Init(bool* pAddress, bool bInitialize)
2171 assert(pAddress && *pAddress == false && "Recursive guard violation");
2172 m_pAddress = pAddress;
2184 bool Compiler::impSpillStackEntry(unsigned level,
2188 bool bAssertOnRecursion,
2195 RecursiveGuard guard;
2196 guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2199 GenTree* tree = verCurrentState.esStack[level].val;
2201 /* Allocate a temp if we haven't been asked to use a particular one */
2203 if (tiVerificationNeeded)
2205 // Ignore bad temp requests (they will happen with bad code and will be
2206 // catched when importing the destblock)
2207 if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2214 if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2220 bool isNewTemp = false;
2222 if (tnum == BAD_VAR_NUM)
2224 tnum = lvaGrabTemp(true DEBUGARG(reason));
2227 else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2229 // if verification is needed and tnum's type is incompatible with
2230 // type on that stack, we grab a new temp. This is safe since
2231 // we will throw a verification exception in the dest block.
2233 var_types valTyp = tree->TypeGet();
2234 var_types dstTyp = lvaTable[tnum].TypeGet();
2236 // if the two types are different, we return. This will only happen with bad code and will
2237 // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2238 if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2240 #ifndef _TARGET_64BIT_
2241 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2242 #endif // !_TARGET_64BIT_
2243 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2245 if (verNeedsVerification())
2252 /* Assign the spilled entry to the temp */
2253 impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2255 // If temp is newly introduced and a ref type, grab what type info we can.
2256 if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF))
2258 assert(lvaTable[tnum].lvSingleDef == 0);
2259 lvaTable[tnum].lvSingleDef = 1;
2260 JITDUMP("Marked V%02u as a single def temp\n", tnum);
2261 CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle();
2262 lvaSetClass(tnum, tree, stkHnd);
2264 // If we're assigning a GT_RET_EXPR, note the temp over on the call,
2265 // so the inliner can use it in case it needs a return spill temp.
2266 if (tree->OperGet() == GT_RET_EXPR)
2268 JITDUMP("\n*** see V%02u = GT_RET_EXPR, noting temp\n", tnum);
2269 GenTree* call = tree->gtRetExpr.gtInlineCandidate;
2270 InlineCandidateInfo* ici = call->gtCall.gtInlineCandidateInfo;
2271 ici->preexistingSpillTemp = tnum;
2275 // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2276 var_types type = genActualType(lvaTable[tnum].TypeGet());
2277 GenTree* temp = gtNewLclvNode(tnum, type);
2278 verCurrentState.esStack[level].val = temp;
2283 /*****************************************************************************
2285 * Ensure that the stack has only spilled values
2288 void Compiler::impSpillStackEnsure(bool spillLeaves)
2290 assert(!spillLeaves || opts.compDbgCode);
2292 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2294 GenTree* tree = verCurrentState.esStack[level].val;
2296 if (!spillLeaves && tree->OperIsLeaf())
2301 // Temps introduced by the importer itself don't need to be spilled
2303 bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2310 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2314 void Compiler::impSpillEvalStack()
2316 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2318 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2322 /*****************************************************************************
2324 * If the stack contains any trees with side effects in them, assign those
2325 * trees to temps and append the assignments to the statement list.
2326 * On return the stack is guaranteed to be empty.
2329 inline void Compiler::impEvalSideEffects()
2331 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2332 verCurrentState.esStackDepth = 0;
2335 /*****************************************************************************
2337 * If the stack contains any trees with side effects in them, assign those
2338 * trees to temps and replace them on the stack with refs to their temps.
2339 * [0..chkLevel) is the portion of the stack which will be checked and spilled.
2342 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2344 assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2346 /* Before we make any appends to the tree list we must spill the
2347 * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2349 impSpillSpecialSideEff();
2351 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2353 chkLevel = verCurrentState.esStackDepth;
2356 assert(chkLevel <= verCurrentState.esStackDepth);
2358 unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2360 for (unsigned i = 0; i < chkLevel; i++)
2362 GenTree* tree = verCurrentState.esStack[i].val;
2364 GenTree* lclVarTree;
2366 if ((tree->gtFlags & spillFlags) != 0 ||
2367 (spillGlobEffects && // Only consider the following when spillGlobEffects == TRUE
2368 !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2369 gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2370 // lvAddrTaken flag.
2372 impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2377 /*****************************************************************************
2379 * If the stack contains any trees with special side effects in them, assign
2380 * those trees to temps and replace them on the stack with refs to their temps.
2383 inline void Compiler::impSpillSpecialSideEff()
2385 // Only exception objects need to be carefully handled
2387 if (!compCurBB->bbCatchTyp)
2392 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2394 GenTree* tree = verCurrentState.esStack[level].val;
2395 // Make sure if we have an exception object in the sub tree we spill ourselves.
2396 if (gtHasCatchArg(tree))
2398 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2403 /*****************************************************************************
2405 * Spill all stack references to value classes (TYP_STRUCT nodes)
2408 void Compiler::impSpillValueClasses()
2410 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2412 GenTree* tree = verCurrentState.esStack[level].val;
2414 if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2416 // Tree walk was aborted, which means that we found a
2417 // value class on the stack. Need to spill that
2420 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2425 /*****************************************************************************
2427 * Callback that checks if a tree node is TYP_STRUCT
2430 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data)
2432 fgWalkResult walkResult = WALK_CONTINUE;
2434 if ((*pTree)->gtType == TYP_STRUCT)
2436 // Abort the walk and indicate that we found a value class
2438 walkResult = WALK_ABORT;
2444 /*****************************************************************************
2446 * If the stack contains any trees with references to local #lclNum, assign
2447 * those trees to temps and replace their place on the stack with refs to
2451 void Compiler::impSpillLclRefs(ssize_t lclNum)
2453 /* Before we make any appends to the tree list we must spill the
2454 * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2456 impSpillSpecialSideEff();
2458 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2460 GenTree* tree = verCurrentState.esStack[level].val;
2462 /* If the tree may throw an exception, and the block has a handler,
2463 then we need to spill assignments to the local if the local is
2464 live on entry to the handler.
2465 Just spill 'em all without considering the liveness */
2467 bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2469 /* Skip the tree if it doesn't have an affected reference,
2470 unless xcptnCaught */
2472 if (xcptnCaught || gtHasRef(tree, lclNum, false))
2474 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2479 /*****************************************************************************
2481 * Push catch arg onto the stack.
2482 * If there are jumps to the beginning of the handler, insert basic block
2483 * and spill catch arg to a temp. Update the handler block if necessary.
2485 * Returns the basic block of the actual handler.
2488 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter)
2490 // Do not inject the basic block twice on reimport. This should be
2491 // hit only under JIT stress. See if the block is the one we injected.
2492 // Note that EH canonicalization can inject internal blocks here. We might
2493 // be able to re-use such a block (but we don't, right now).
2494 if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2495 (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2497 GenTree* tree = hndBlk->bbTreeList;
2499 if (tree != nullptr && tree->gtOper == GT_STMT)
2501 tree = tree->gtStmt.gtStmtExpr;
2502 assert(tree != nullptr);
2504 if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2505 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2507 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2509 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2511 return hndBlk->bbNext;
2515 // If we get here, it must have been some other kind of internal block. It's possible that
2516 // someone prepended something to our injected block, but that's unlikely.
2519 /* Push the exception address value on the stack */
2520 GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2522 /* Mark the node as having a side-effect - i.e. cannot be
2523 * moved around since it is tied to a fixed location (EAX) */
2524 arg->gtFlags |= GTF_ORDER_SIDEEFF;
2526 #if defined(JIT32_GCENCODER)
2527 const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5);
2529 const bool forceInsertNewBlock = compStressCompile(STRESS_CATCH_ARG, 5);
2530 #endif // defined(JIT32_GCENCODER)
2532 /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2533 if (hndBlk->bbRefs > 1 || forceInsertNewBlock)
2535 if (hndBlk->bbRefs == 1)
2540 /* Create extra basic block for the spill */
2541 BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2542 newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2543 newBlk->setBBWeight(hndBlk->bbWeight);
2544 newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2546 /* Account for the new link we are about to create */
2549 /* Spill into a temp */
2550 unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2551 lvaTable[tempNum].lvType = TYP_REF;
2552 arg = gtNewTempAssign(tempNum, arg);
2554 hndBlk->bbStkTempsIn = tempNum;
2556 /* Report the debug info. impImportBlockCode won't treat
2557 * the actual handler as exception block and thus won't do it for us. */
2558 if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2560 impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2561 arg = gtNewStmt(arg, impCurStmtOffs);
2564 fgInsertStmtAtEnd(newBlk, arg);
2566 arg = gtNewLclvNode(tempNum, TYP_REF);
2569 impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2574 /*****************************************************************************
2576 * Given a tree, clone it. *pClone is set to the cloned tree.
2577 * Returns the original tree if the cloning was easy,
2578 * else returns the temp to which the tree had to be spilled to.
2579 * If the tree has side-effects, it will be spilled to a temp.
2582 GenTree* Compiler::impCloneExpr(GenTree* tree,
2584 CORINFO_CLASS_HANDLE structHnd,
2586 GenTree** pAfterStmt DEBUGARG(const char* reason))
2588 if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2590 GenTree* clone = gtClone(tree, true);
2599 /* Store the operand in a temp and return the temp */
2601 unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2603 // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2604 // return a struct type. It also may modify the struct type to a more
2605 // specialized type (e.g. a SIMD type). So we will get the type from
2606 // the lclVar AFTER calling impAssignTempGen().
2608 impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2609 var_types type = genActualType(lvaTable[temp].TypeGet());
2611 *pClone = gtNewLclvNode(temp, type);
2612 return gtNewLclvNode(temp, type);
2615 /*****************************************************************************
2616 * Remember the IL offset (including stack-empty info) for the trees we will
2620 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2622 if (compIsForInlining())
2624 GenTree* callStmt = impInlineInfo->iciStmt;
2625 assert(callStmt->gtOper == GT_STMT);
2626 impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2630 assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2631 IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2632 impCurStmtOffs = offs | stkBit;
2636 /*****************************************************************************
2637 * Returns current IL offset with stack-empty and call-instruction info incorporated
2639 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2641 if (compIsForInlining())
2643 return BAD_IL_OFFSET;
2647 assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2648 IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2649 IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2650 return offs | stkBit | callInstructionBit;
2654 //------------------------------------------------------------------------
2655 // impCanSpillNow: check is it possible to spill all values from eeStack to local variables.
2658 // prevOpcode - last importer opcode
2661 // true if it is legal, false if it could be a sequence that we do not want to divide.
2662 bool Compiler::impCanSpillNow(OPCODE prevOpcode)
2664 // Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence.
2665 // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed.
2666 return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ);
2669 /*****************************************************************************
2671 * Remember the instr offset for the statements
2673 * When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2674 * impCurOpcOffs, if the append was done because of a partial stack spill,
2675 * as some of the trees corresponding to code up to impCurOpcOffs might
2676 * still be sitting on the stack.
2677 * So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2678 * This should be called when an opcode finally/explicitly causes
2679 * impAppendTree(tree) to be called (as opposed to being called because of
2680 * a spill caused by the opcode)
2685 void Compiler::impNoteLastILoffs()
2687 if (impLastILoffsStmt == nullptr)
2689 // We should have added a statement for the current basic block
2690 // Is this assert correct ?
2692 assert(impTreeLast);
2693 assert(impTreeLast->gtOper == GT_STMT);
2695 impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2699 impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2700 impLastILoffsStmt = nullptr;
2706 /*****************************************************************************
2707 * We don't create any GenTree (excluding spills) for a branch.
2708 * For debugging info, we need a placeholder so that we can note
2709 * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2712 void Compiler::impNoteBranchOffs()
2714 if (opts.compDbgCode)
2716 impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2720 /*****************************************************************************
2721 * Locate the next stmt boundary for which we need to record info.
2722 * We will have to spill the stack at such boundaries if it is not
2724 * Returns the next stmt boundary (after the start of the block)
2727 unsigned Compiler::impInitBlockLineInfo()
2729 /* Assume the block does not correspond with any IL offset. This prevents
2730 us from reporting extra offsets. Extra mappings can cause confusing
2731 stepping, especially if the extra mapping is a jump-target, and the
2732 debugger does not ignore extra mappings, but instead rewinds to the
2733 nearest known offset */
2735 impCurStmtOffsSet(BAD_IL_OFFSET);
2737 if (compIsForInlining())
2742 IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2744 if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2746 impCurStmtOffsSet(blockOffs);
2749 if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2751 impCurStmtOffsSet(blockOffs);
2754 /* Always report IL offset 0 or some tests get confused.
2755 Probably a good idea anyways */
2759 impCurStmtOffsSet(blockOffs);
2762 if (!info.compStmtOffsetsCount)
2767 /* Find the lowest explicit stmt boundary within the block */
2769 /* Start looking at an entry that is based on our instr offset */
2771 unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2773 if (index >= info.compStmtOffsetsCount)
2775 index = info.compStmtOffsetsCount - 1;
2778 /* If we've guessed too far, back up */
2780 while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2785 /* If we guessed short, advance ahead */
2787 while (info.compStmtOffsets[index] < blockOffs)
2791 if (index == info.compStmtOffsetsCount)
2793 return info.compStmtOffsetsCount;
2797 assert(index < info.compStmtOffsetsCount);
2799 if (info.compStmtOffsets[index] == blockOffs)
2801 /* There is an explicit boundary for the start of this basic block.
2802 So we will start with bbCodeOffs. Else we will wait until we
2803 get to the next explicit boundary */
2805 impCurStmtOffsSet(blockOffs);
2813 /*****************************************************************************/
2815 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2829 /*****************************************************************************/
2831 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2848 /*****************************************************************************/
2850 // One might think it is worth caching these values, but results indicate
2852 // In addition, caching them causes SuperPMI to be unable to completely
2853 // encapsulate an individual method context.
2854 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2856 CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2857 assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2861 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2863 CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2864 assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2865 return typeHandleClass;
2868 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2870 CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2871 assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2872 return argIteratorClass;
2875 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2877 CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2878 assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2882 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2884 CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2885 assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2889 /*****************************************************************************
2890 * "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2891 * set its type to TYP_BYREF when we create it. We know if it can be
2892 * changed to TYP_I_IMPL only at the point where we use it
2896 void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2)
2898 if (tree1->IsVarAddr())
2900 tree1->gtType = TYP_I_IMPL;
2903 if (tree2 && tree2->IsVarAddr())
2905 tree2->gtType = TYP_I_IMPL;
2909 /*****************************************************************************
2910 * TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2911 * to make that an explicit cast in our trees, so any implicit casts that
2912 * exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2913 * turned into explicit casts here.
2914 * We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2917 GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp)
2919 var_types currType = genActualType(tree->gtType);
2920 var_types wantedType = genActualType(dstTyp);
2922 if (wantedType != currType)
2924 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2925 if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2927 if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2929 tree->gtType = TYP_I_IMPL;
2932 #ifdef _TARGET_64BIT_
2933 else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2935 // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2936 tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
2938 else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2940 // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2941 tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT);
2943 #endif // _TARGET_64BIT_
2949 /*****************************************************************************
2950 * TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2951 * but we want to make that an explicit cast in our trees, so any implicit casts
2952 * that exist in the IL are turned into explicit casts here.
2955 GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp)
2957 if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2959 tree = gtNewCastNode(dstTyp, tree, false, dstTyp);
2965 //------------------------------------------------------------------------
2966 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2967 // with a GT_COPYBLK node.
2970 // sig - The InitializeArray signature.
2973 // A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2974 // nullptr otherwise.
2977 // The function recognizes the following IL pattern:
2978 // ldc <length> or a list of ldc <lower bound>/<length>
2981 // ldtoken <field handle>
2982 // call InitializeArray
2983 // The lower bounds need not be constant except when the array rank is 1.
2984 // The function recognizes all kinds of arrays thus enabling a small runtime
2985 // such as CoreRT to skip providing an implementation for InitializeArray.
2987 GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2989 assert(sig->numArgs == 2);
2991 GenTree* fieldTokenNode = impStackTop(0).val;
2992 GenTree* arrayLocalNode = impStackTop(1).val;
2995 // Verify that the field token is known and valid. Note that It's also
2996 // possible for the token to come from reflection, in which case we cannot do
2997 // the optimization and must therefore revert to calling the helper. You can
2998 // see an example of this in bvt\DynIL\initarray2.exe (in Main).
3001 // Check to see if the ldtoken helper call is what we see here.
3002 if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
3003 (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
3008 // Strip helper call away
3009 fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
3011 if (fieldTokenNode->gtOper == GT_IND)
3013 fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
3016 // Check for constant
3017 if (fieldTokenNode->gtOper != GT_CNS_INT)
3022 CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
3023 if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
3029 // We need to get the number of elements in the array and the size of each element.
3030 // We verify that the newarr statement is exactly what we expect it to be.
3031 // If it's not then we just return NULL and we don't optimize this call
3035 // It is possible the we don't have any statements in the block yet
3037 if (impTreeLast->gtOper != GT_STMT)
3039 assert(impTreeLast->gtOper == GT_BEG_STMTS);
3044 // We start by looking at the last statement, making sure it's an assignment, and
3045 // that the target of the assignment is the array passed to InitializeArray.
3047 GenTree* arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
3048 if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
3049 (arrayLocalNode->gtOper != GT_LCL_VAR) ||
3050 (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
3056 // Make sure that the object being assigned is a helper call.
3059 GenTree* newArrayCall = arrayAssignment->gtOp.gtOp2;
3060 if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
3066 // Verify that it is one of the new array helpers.
3069 bool isMDArray = false;
3071 if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
3072 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
3073 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
3074 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
3075 #ifdef FEATURE_READYTORUN_COMPILER
3076 && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_R2R_DIRECT) &&
3077 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
3081 if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3089 CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3092 // Make sure we found a compile time handle to the array
3101 S_UINT32 numElements;
3105 rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3112 GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3113 assert(tokenArg != nullptr);
3114 GenTreeArgList* numArgsArg = tokenArg->Rest();
3115 assert(numArgsArg != nullptr);
3116 GenTreeArgList* argsArg = numArgsArg->Rest();
3117 assert(argsArg != nullptr);
3120 // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3121 // so at least one length must be present and the rank can't exceed 32 so there can
3122 // be at most 64 arguments - 32 lengths and 32 lower bounds.
3125 if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3126 (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3131 unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3132 bool lowerBoundsSpecified;
3134 if (numArgs == rank * 2)
3136 lowerBoundsSpecified = true;
3138 else if (numArgs == rank)
3140 lowerBoundsSpecified = false;
3143 // If the rank is 1 and a lower bound isn't specified then the runtime creates
3144 // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3145 // we get a SDArray as well, see the for loop below.
3159 // The rank is known to be at least 1 so we can start with numElements being 1
3160 // to avoid the need to special case the first dimension.
3163 numElements = S_UINT32(1);
3167 static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3169 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3170 IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3173 static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3175 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3176 (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3177 IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3180 static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3182 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3183 (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3186 static bool IsComma(GenTree* tree)
3188 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3192 unsigned argIndex = 0;
3195 for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3197 if (lowerBoundsSpecified)
3200 // In general lower bounds can be ignored because they're not needed to
3201 // calculate the total number of elements. But for single dimensional arrays
3202 // we need to know if the lower bound is 0 because in this case the runtime
3203 // creates a SDArray and this affects the way the array data offset is calculated.
3208 GenTree* lowerBoundAssign = comma->gtGetOp1();
3209 assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3210 GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3212 if (lowerBoundNode->IsIntegralConst(0))
3218 comma = comma->gtGetOp2();
3222 GenTree* lengthNodeAssign = comma->gtGetOp1();
3223 assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3224 GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3226 if (!lengthNode->IsCnsIntOrI())
3231 numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3235 assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3237 if (argIndex != numArgs)
3245 // Make sure there are exactly two arguments: the array class and
3246 // the number of elements.
3249 GenTree* arrayLengthNode;
3251 GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3252 #ifdef FEATURE_READYTORUN_COMPILER
3253 if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3255 // Array length is 1st argument for readytorun helper
3256 arrayLengthNode = args->Current();
3261 // Array length is 2nd argument for regular helper
3262 arrayLengthNode = args->Rest()->Current();
3266 // Make sure that the number of elements look valid.
3268 if (arrayLengthNode->gtOper != GT_CNS_INT)
3273 numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3275 if (!info.compCompHnd->isSDArray(arrayClsHnd))
3281 CORINFO_CLASS_HANDLE elemClsHnd;
3282 var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3285 // Note that genTypeSize will return zero for non primitive types, which is exactly
3286 // what we want (size will then be 0, and we will catch this in the conditional below).
3287 // Note that we don't expect this to fail for valid binaries, so we assert in the
3288 // non-verification case (the verification case should not assert but rather correctly
3289 // handle bad binaries). This assert is not guarding any specific invariant, but rather
3290 // saying that we don't expect this to happen, and if it is hit, we need to investigate
3294 S_UINT32 elemSize(genTypeSize(elementType));
3295 S_UINT32 size = elemSize * S_UINT32(numElements);
3297 if (size.IsOverflow())
3302 if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3304 assert(verNeedsVerification());
3308 void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3315 // At this point we are ready to commit to implementing the InitializeArray
3316 // intrinsic using a struct assignment. Pop the arguments from the stack and
3317 // return the struct assignment node.
3323 const unsigned blkSize = size.Value();
3324 unsigned dataOffset;
3328 dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3332 dataOffset = eeGetArrayDataOffset(elementType);
3335 GenTree* dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3336 GenTree* blk = gtNewBlockVal(dst, blkSize);
3337 GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_STATIC_HDL, false);
3339 return gtNewBlkOpNode(blk, // dst
3346 //------------------------------------------------------------------------
3347 // impIntrinsic: possibly expand intrinsic call into alternate IR sequence
3350 // newobjThis - for constructor calls, the tree for the newly allocated object
3351 // clsHnd - handle for the intrinsic method's class
3352 // method - handle for the intrinsic method
3353 // sig - signature of the intrinsic method
3354 // methodFlags - CORINFO_FLG_XXX flags of the intrinsic method
3355 // memberRef - the token for the intrinsic method
3356 // readonlyCall - true if call has a readonly prefix
3357 // tailCall - true if call is in tail position
3358 // pConstrainedResolvedToken -- resolved token for constrained call, or nullptr
3359 // if call is not constrained
3360 // constraintCallThisTransform -- this transform to apply for a constrained call
3361 // pIntrinsicID [OUT] -- intrinsic ID (see enumeration in corinfo.h)
3362 // for "traditional" jit intrinsics
3363 // isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call
3364 // that is amenable to special downstream optimization opportunities
3367 // IR tree to use in place of the call, or nullptr if the jit should treat
3368 // the intrinsic call like a normal call.
3370 // pIntrinsicID set to non-illegal value if the call is recognized as a
3371 // traditional jit intrinsic, even if the intrinsic is not expaned.
3373 // isSpecial set true if the expansion is subject to special
3374 // optimizations later in the jit processing
3377 // On success the IR tree may be a call to a different method or an inline
3378 // sequence. If it is a call, then the intrinsic processing here is responsible
3379 // for handling all the special cases, as upon return to impImportCall
3380 // expanded intrinsics bypass most of the normal call processing.
3382 // Intrinsics are generally not recognized in minopts and debug codegen.
3384 // However, certain traditional intrinsics are identifed as "must expand"
3385 // if there is no fallback implmentation to invoke; these must be handled
3386 // in all codegen modes.
3388 // New style intrinsics (where the fallback implementation is in IL) are
3389 // identified as "must expand" if they are invoked from within their
3390 // own method bodies.
3393 GenTree* Compiler::impIntrinsic(GenTree* newobjThis,
3394 CORINFO_CLASS_HANDLE clsHnd,
3395 CORINFO_METHOD_HANDLE method,
3396 CORINFO_SIG_INFO* sig,
3397 unsigned methodFlags,
3401 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
3402 CORINFO_THIS_TRANSFORM constraintCallThisTransform,
3403 CorInfoIntrinsics* pIntrinsicID,
3404 bool* isSpecialIntrinsic)
3406 assert((methodFlags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0);
3408 bool mustExpand = false;
3409 bool isSpecial = false;
3410 CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Illegal;
3411 NamedIntrinsic ni = NI_Illegal;
3413 if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0)
3415 intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3418 if ((methodFlags & CORINFO_FLG_JIT_INTRINSIC) != 0)
3420 // The recursive calls to Jit intrinsics are must-expand by convention.
3421 mustExpand = mustExpand || gtIsRecursiveCall(method);
3423 if (intrinsicID == CORINFO_INTRINSIC_Illegal)
3425 ni = lookupNamedIntrinsic(method);
3427 #ifdef FEATURE_HW_INTRINSICS
3430 #if defined(_TARGET_ARM64_)
3431 case NI_Base_Vector64_AsByte:
3432 case NI_Base_Vector64_AsInt16:
3433 case NI_Base_Vector64_AsInt32:
3434 case NI_Base_Vector64_AsSByte:
3435 case NI_Base_Vector64_AsSingle:
3436 case NI_Base_Vector64_AsUInt16:
3437 case NI_Base_Vector64_AsUInt32:
3438 #endif // _TARGET_ARM64_
3439 case NI_Base_Vector128_As:
3440 case NI_Base_Vector128_AsByte:
3441 case NI_Base_Vector128_AsDouble:
3442 case NI_Base_Vector128_AsInt16:
3443 case NI_Base_Vector128_AsInt32:
3444 case NI_Base_Vector128_AsInt64:
3445 case NI_Base_Vector128_AsSByte:
3446 case NI_Base_Vector128_AsSingle:
3447 case NI_Base_Vector128_AsUInt16:
3448 case NI_Base_Vector128_AsUInt32:
3449 case NI_Base_Vector128_AsUInt64:
3450 #if defined(_TARGET_XARCH_)
3451 case NI_Base_Vector128_Zero:
3452 case NI_Base_Vector256_As:
3453 case NI_Base_Vector256_AsByte:
3454 case NI_Base_Vector256_AsDouble:
3455 case NI_Base_Vector256_AsInt16:
3456 case NI_Base_Vector256_AsInt32:
3457 case NI_Base_Vector256_AsInt64:
3458 case NI_Base_Vector256_AsSByte:
3459 case NI_Base_Vector256_AsSingle:
3460 case NI_Base_Vector256_AsUInt16:
3461 case NI_Base_Vector256_AsUInt32:
3462 case NI_Base_Vector256_AsUInt64:
3463 case NI_Base_Vector256_Zero:
3464 #endif // _TARGET_XARCH_
3466 return impBaseIntrinsic(ni, method, sig);
3475 if ((ni > NI_HW_INTRINSIC_START) && (ni < NI_HW_INTRINSIC_END))
3477 GenTree* hwintrinsic = impHWIntrinsic(ni, method, sig, mustExpand);
3479 if (mustExpand && (hwintrinsic == nullptr))
3481 return impUnsupportedHWIntrinsic(CORINFO_HELP_THROW_NOT_IMPLEMENTED, method, sig, mustExpand);
3486 #endif // FEATURE_HW_INTRINSICS
3490 *pIntrinsicID = intrinsicID;
3492 #ifndef _TARGET_ARM_
3493 genTreeOps interlockedOperator;
3496 if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3498 // must be done regardless of DbgCode and MinOpts
3499 return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3501 #ifdef _TARGET_64BIT_
3502 if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3504 // must be done regardless of DbgCode and MinOpts
3505 return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3508 assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3511 GenTree* retNode = nullptr;
3513 // Under debug and minopts, only expand what is required.
3514 if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3516 *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3520 var_types callType = JITtype2varType(sig->retType);
3522 /* First do the intrinsics which are always smaller than a call */
3524 switch (intrinsicID)
3529 case CORINFO_INTRINSIC_Sin:
3530 case CORINFO_INTRINSIC_Cbrt:
3531 case CORINFO_INTRINSIC_Sqrt:
3532 case CORINFO_INTRINSIC_Abs:
3533 case CORINFO_INTRINSIC_Cos:
3534 case CORINFO_INTRINSIC_Round:
3535 case CORINFO_INTRINSIC_Cosh:
3536 case CORINFO_INTRINSIC_Sinh:
3537 case CORINFO_INTRINSIC_Tan:
3538 case CORINFO_INTRINSIC_Tanh:
3539 case CORINFO_INTRINSIC_Asin:
3540 case CORINFO_INTRINSIC_Asinh:
3541 case CORINFO_INTRINSIC_Acos:
3542 case CORINFO_INTRINSIC_Acosh:
3543 case CORINFO_INTRINSIC_Atan:
3544 case CORINFO_INTRINSIC_Atan2:
3545 case CORINFO_INTRINSIC_Atanh:
3546 case CORINFO_INTRINSIC_Log10:
3547 case CORINFO_INTRINSIC_Pow:
3548 case CORINFO_INTRINSIC_Exp:
3549 case CORINFO_INTRINSIC_Ceiling:
3550 case CORINFO_INTRINSIC_Floor:
3551 retNode = impMathIntrinsic(method, sig, callType, intrinsicID, tailCall);
3554 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3555 // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3557 // Note that CORINFO_INTRINSIC_InterlockedAdd32/64 are not actually used.
3558 // Anyway, we can import them as XADD and leave it to lowering/codegen to perform
3559 // whatever optimizations may arise from the fact that result value is not used.
3560 case CORINFO_INTRINSIC_InterlockedAdd32:
3561 case CORINFO_INTRINSIC_InterlockedXAdd32:
3562 interlockedOperator = GT_XADD;
3563 goto InterlockedBinOpCommon;
3564 case CORINFO_INTRINSIC_InterlockedXchg32:
3565 interlockedOperator = GT_XCHG;
3566 goto InterlockedBinOpCommon;
3568 #ifdef _TARGET_64BIT_
3569 case CORINFO_INTRINSIC_InterlockedAdd64:
3570 case CORINFO_INTRINSIC_InterlockedXAdd64:
3571 interlockedOperator = GT_XADD;
3572 goto InterlockedBinOpCommon;
3573 case CORINFO_INTRINSIC_InterlockedXchg64:
3574 interlockedOperator = GT_XCHG;
3575 goto InterlockedBinOpCommon;
3576 #endif // _TARGET_AMD64_
3578 InterlockedBinOpCommon:
3579 assert(callType != TYP_STRUCT);
3580 assert(sig->numArgs == 2);
3582 op2 = impPopStack().val;
3583 op1 = impPopStack().val;
3589 // field (for example)
3591 // In the case where the first argument is the address of a local, we might
3592 // want to make this *not* make the var address-taken -- but atomic instructions
3593 // on a local are probably pretty useless anyway, so we probably don't care.
3595 op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3596 op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3599 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3601 case CORINFO_INTRINSIC_MemoryBarrier:
3603 assert(sig->numArgs == 0);
3605 op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3606 op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3610 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3611 // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3612 case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3613 #ifdef _TARGET_64BIT_
3614 case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3617 assert(callType != TYP_STRUCT);
3618 assert(sig->numArgs == 3);
3621 op3 = impPopStack().val; // comparand
3622 op2 = impPopStack().val; // value
3623 op1 = impPopStack().val; // location
3625 GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3627 node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3631 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3633 case CORINFO_INTRINSIC_StringLength:
3634 op1 = impPopStack().val;
3635 if (!opts.MinOpts() && !opts.compDbgCode)
3637 GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_String__stringLen);
3642 /* Create the expression "*(str_addr + stringLengthOffset)" */
3643 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3644 gtNewIconNode(OFFSETOF__CORINFO_String__stringLen, TYP_I_IMPL));
3645 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3648 // Getting the length of a null string should throw
3649 op1->gtFlags |= GTF_EXCEPT;
3654 case CORINFO_INTRINSIC_StringGetChar:
3655 op2 = impPopStack().val;
3656 op1 = impPopStack().val;
3657 op1 = gtNewIndexRef(TYP_USHORT, op1, op2);
3658 op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3662 case CORINFO_INTRINSIC_InitializeArray:
3663 retNode = impInitializeArrayIntrinsic(sig);
3666 case CORINFO_INTRINSIC_Array_Address:
3667 case CORINFO_INTRINSIC_Array_Get:
3668 case CORINFO_INTRINSIC_Array_Set:
3669 retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3672 case CORINFO_INTRINSIC_GetTypeFromHandle:
3673 op1 = impStackTop(0).val;
3674 CorInfoHelpFunc typeHandleHelper;
3675 if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3676 gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall(), &typeHandleHelper))
3678 op1 = impPopStack().val;
3679 // Replace helper with a more specialized helper that returns RuntimeType
3680 if (typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE)
3682 typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
3686 assert(typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL);
3687 typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL;
3689 assert(op1->gtCall.gtCallArgs->gtOp.gtOp2 == nullptr);
3690 op1 = gtNewHelperCallNode(typeHandleHelper, TYP_REF, op1->gtCall.gtCallArgs);
3691 op1->gtType = TYP_REF;
3694 // Call the regular function.
3697 case CORINFO_INTRINSIC_RTH_GetValueInternal:
3698 op1 = impStackTop(0).val;
3699 if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3700 gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall()))
3703 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3706 // TreeToGetNativeTypeHandle
3708 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3711 op1 = impPopStack().val;
3713 // Get native TypeHandle argument to old helper
3714 op1 = op1->gtCall.gtCallArgs;
3715 assert(op1->OperIsList());
3716 assert(op1->gtOp.gtOp2 == nullptr);
3717 op1 = op1->gtOp.gtOp1;
3720 // Call the regular function.
3723 case CORINFO_INTRINSIC_Object_GetType:
3725 JITDUMP("\n impIntrinsic: call to Object.GetType\n");
3726 op1 = impStackTop(0).val;
3728 // If we're calling GetType on a boxed value, just get the type directly.
3729 if (op1->IsBoxedValue())
3731 JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n");
3733 // Try and clean up the box. Obtain the handle we
3734 // were going to pass to the newobj.
3735 GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE);
3737 if (boxTypeHandle != nullptr)
3739 // Note we don't need to play the TYP_STRUCT games here like
3740 // do for LDTOKEN since the return value of this operator is Type,
3741 // not RuntimeTypeHandle.
3743 GenTreeArgList* helperArgs = gtNewArgList(boxTypeHandle);
3744 GenTree* runtimeType =
3745 gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3746 retNode = runtimeType;
3750 // If we have a constrained callvirt with a "box this" transform
3751 // we know we have a value class and hence an exact type.
3753 // If so, instead of boxing and then extracting the type, just
3754 // construct the type directly.
3755 if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) &&
3756 (constraintCallThisTransform == CORINFO_BOX_THIS))
3758 // Ensure this is one of the is simple box cases (in particular, rule out nullables).
3759 const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass);
3760 const bool isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX);
3762 if (isSafeToOptimize)
3764 JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n");
3766 GenTree* typeHandleOp =
3767 impTokenToHandle(pConstrainedResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
3768 if (typeHandleOp == nullptr)
3770 assert(compDonotInline());
3773 GenTreeArgList* helperArgs = gtNewArgList(typeHandleOp);
3774 GenTree* runtimeType =
3775 gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3776 retNode = runtimeType;
3781 if (retNode != nullptr)
3783 JITDUMP("Optimized result for call to GetType is\n");
3786 gtDispTree(retNode);
3791 // Else expand as an intrinsic, unless the call is constrained,
3792 // in which case we defer expansion to allow impImportCall do the
3793 // special constraint processing.
3794 if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr))
3796 JITDUMP("Expanding as special intrinsic\n");
3798 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3800 // Set the CALL flag to indicate that the operator is implemented by a call.
3801 // Set also the EXCEPTION flag because the native implementation of
3802 // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3803 op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3805 // Might be further optimizable, so arrange to leave a mark behind
3809 if (retNode == nullptr)
3811 JITDUMP("Leaving as normal call\n");
3812 // Might be further optimizable, so arrange to leave a mark behind
3819 // Implement ByReference Ctor. This wraps the assignment of the ref into a byref-like field
3820 // in a value type. The canonical example of this is Span<T>. In effect this is just a
3821 // substitution. The parameter byref will be assigned into the newly allocated object.
3822 case CORINFO_INTRINSIC_ByReference_Ctor:
3824 // Remove call to constructor and directly assign the byref passed
3825 // to the call to the first slot of the ByReference struct.
3826 op1 = impPopStack().val;
3827 GenTree* thisptr = newobjThis;
3828 CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3829 GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0);
3830 GenTree* assign = gtNewAssignNode(field, op1);
3831 GenTree* byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3832 assert(byReferenceStruct != nullptr);
3833 impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3837 // Implement ptr value getter for ByReference struct.
3838 case CORINFO_INTRINSIC_ByReference_Value:
3840 op1 = impPopStack().val;
3841 CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3842 GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0);
3846 case CORINFO_INTRINSIC_Span_GetItem:
3847 case CORINFO_INTRINSIC_ReadOnlySpan_GetItem:
3849 // Have index, stack pointer-to Span<T> s on the stack. Expand to:
3853 // BoundsCheck(index, s->_length)
3854 // s->_pointer + index * sizeof(T)
3856 // For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref
3858 // Signature should show one class type parameter, which
3859 // we need to examine.
3860 assert(sig->sigInst.classInstCount == 1);
3861 CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0];
3862 const unsigned elemSize = info.compCompHnd->getClassSize(spanElemHnd);
3863 assert(elemSize > 0);
3865 const bool isReadOnly = (intrinsicID == CORINFO_INTRINSIC_ReadOnlySpan_GetItem);
3867 JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "",
3868 info.compCompHnd->getClassName(spanElemHnd), elemSize);
3870 GenTree* index = impPopStack().val;
3871 GenTree* ptrToSpan = impPopStack().val;
3872 GenTree* indexClone = nullptr;
3873 GenTree* ptrToSpanClone = nullptr;
3874 assert(varTypeIsIntegral(index));
3875 assert(ptrToSpan->TypeGet() == TYP_BYREF);
3880 printf("with ptr-to-span\n");
3881 gtDispTree(ptrToSpan);
3882 printf("and index\n");
3885 #endif // defined(DEBUG)
3887 // We need to use both index and ptr-to-span twice, so clone or spill.
3888 index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3889 nullptr DEBUGARG("Span.get_Item index"));
3890 ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3891 nullptr DEBUGARG("Span.get_Item ptrToSpan"));
3894 CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(clsHnd, 1);
3895 const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd);
3896 GenTree* length = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset);
3897 GenTree* boundsCheck = new (this, GT_ARR_BOUNDS_CHECK)
3898 GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, index, length, SCK_RNGCHK_FAIL);
3901 GenTree* indexIntPtr = impImplicitIorI4Cast(indexClone, TYP_I_IMPL);
3902 GenTree* sizeofNode = gtNewIconNode(elemSize);
3903 GenTree* mulNode = gtNewOperNode(GT_MUL, TYP_I_IMPL, indexIntPtr, sizeofNode);
3904 CORINFO_FIELD_HANDLE ptrHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3905 const unsigned ptrOffset = info.compCompHnd->getFieldOffset(ptrHnd);
3906 GenTree* data = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset);
3907 GenTree* result = gtNewOperNode(GT_ADD, TYP_BYREF, data, mulNode);
3910 var_types resultType = JITtype2varType(sig->retType);
3911 assert(resultType == result->TypeGet());
3912 retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result);
3917 case CORINFO_INTRINSIC_GetRawHandle:
3919 noway_assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it.
3920 CORINFO_RESOLVED_TOKEN resolvedToken;
3921 resolvedToken.tokenContext = MAKE_METHODCONTEXT(info.compMethodHnd);
3922 resolvedToken.tokenScope = info.compScopeHnd;
3923 resolvedToken.token = memberRef;
3924 resolvedToken.tokenType = CORINFO_TOKENKIND_Method;
3926 CORINFO_GENERICHANDLE_RESULT embedInfo;
3927 info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo);
3929 GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef),
3930 embedInfo.compileTimeHandle);
3931 if (rawHandle == nullptr)
3936 noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL));
3938 unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle"));
3939 impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE);
3941 GenTree* lclVar = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL);
3942 GenTree* lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar);
3943 var_types resultType = JITtype2varType(sig->retType);
3944 retNode = gtNewOperNode(GT_IND, resultType, lclVarAddr);
3949 case CORINFO_INTRINSIC_TypeEQ:
3950 case CORINFO_INTRINSIC_TypeNEQ:
3952 JITDUMP("Importing Type.op_*Equality intrinsic\n");
3953 op1 = impStackTop(1).val;
3954 op2 = impStackTop(0).val;
3955 GenTree* optTree = gtFoldTypeEqualityCall(intrinsicID, op1, op2);
3956 if (optTree != nullptr)
3958 // Success, clean up the evaluation stack.
3962 // See if we can optimize even further, to a handle compare.
3963 optTree = gtFoldTypeCompare(optTree);
3965 // See if we can now fold a handle compare to a constant.
3966 optTree = gtFoldExpr(optTree);
3972 // Retry optimizing these later
3978 case CORINFO_INTRINSIC_GetCurrentManagedThread:
3979 case CORINFO_INTRINSIC_GetManagedThreadId:
3981 // Retry optimizing these during morph
3987 /* Unknown intrinsic */
3988 intrinsicID = CORINFO_INTRINSIC_Illegal;
3992 // Look for new-style jit intrinsics by name
3993 if (ni != NI_Illegal)
3995 assert(retNode == nullptr);
3998 case NI_System_Enum_HasFlag:
4000 GenTree* thisOp = impStackTop(1).val;
4001 GenTree* flagOp = impStackTop(0).val;
4002 GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp);
4004 if (optTree != nullptr)
4006 // Optimization successful. Pop the stack for real.
4013 // Retry optimizing this during morph.
4020 case NI_MathF_Round:
4023 // Math.Round and MathF.Round used to be a traditional JIT intrinsic. In order
4024 // to simplify the transition, we will just treat it as if it was still the
4025 // old intrinsic, CORINFO_INTRINSIC_Round. This should end up flowing properly
4028 retNode = impMathIntrinsic(method, sig, callType, CORINFO_INTRINSIC_Round, tailCall);
4032 case NI_System_Collections_Generic_EqualityComparer_get_Default:
4034 // Flag for later handling during devirtualization.
4039 case NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness:
4041 assert(sig->numArgs == 1);
4043 // We expect the return type of the ReverseEndianness routine to match the type of the
4044 // one and only argument to the method. We use a special instruction for 16-bit
4045 // BSWAPs since on x86 processors this is implemented as ROR <16-bit reg>, 8. Additionally,
4046 // we only emit 64-bit BSWAP instructions on 64-bit archs; if we're asked to perform a
4047 // 64-bit byte swap on a 32-bit arch, we'll fall to the default case in the switch block below.
4049 switch (sig->retType)
4051 case CorInfoType::CORINFO_TYPE_SHORT:
4052 case CorInfoType::CORINFO_TYPE_USHORT:
4053 retNode = gtNewOperNode(GT_BSWAP16, callType, impPopStack().val);
4056 case CorInfoType::CORINFO_TYPE_INT:
4057 case CorInfoType::CORINFO_TYPE_UINT:
4058 #ifdef _TARGET_64BIT_
4059 case CorInfoType::CORINFO_TYPE_LONG:
4060 case CorInfoType::CORINFO_TYPE_ULONG:
4061 #endif // _TARGET_64BIT_
4062 retNode = gtNewOperNode(GT_BSWAP, callType, impPopStack().val);
4066 // This default case gets hit on 32-bit archs when a call to a 64-bit overload
4067 // of ReverseEndianness is encountered. In that case we'll let JIT treat this as a standard
4068 // method call, where the implementation decomposes the operation into two 32-bit
4069 // bswap routines. If the input to the 64-bit function is a constant, then we rely
4070 // on inlining + constant folding of 32-bit bswaps to effectively constant fold
4071 // the 64-bit call site.
4083 if (mustExpand && (retNode == nullptr))
4085 NO_WAY("JIT must expand the intrinsic!");
4088 // Optionally report if this intrinsic is special
4089 // (that is, potentially re-optimizable during morph).
4090 if (isSpecialIntrinsic != nullptr)
4092 *isSpecialIntrinsic = isSpecial;
4098 #ifdef FEATURE_HW_INTRINSICS
4099 //------------------------------------------------------------------------
4100 // impBaseIntrinsic: dispatch intrinsics to their own implementation
4103 // intrinsic -- id of the intrinsic function.
4104 // method -- method handle of the intrinsic function.
4105 // sig -- signature of the intrinsic call
4108 // the expanded intrinsic.
4110 GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig)
4112 GenTree* retNode = nullptr;
4119 unsigned simdSize = 0;
4120 var_types baseType = getBaseTypeAndSizeOfSIMDType(sig->retTypeClass, &simdSize);
4121 var_types retType = getSIMDTypeForSize(simdSize);
4125 CORINFO_CLASS_HANDLE thisClass = info.compCompHnd->getArgClass(sig, sig->args);
4126 var_types thisType = getBaseTypeOfSIMDType(thisClass);
4128 if (!varTypeIsArithmetic(thisType))
4134 if (!varTypeIsArithmetic(baseType))
4141 #if defined(_TARGET_ARM64_)
4142 case NI_Base_Vector64_AsByte:
4143 case NI_Base_Vector64_AsInt16:
4144 case NI_Base_Vector64_AsInt32:
4145 case NI_Base_Vector64_AsSByte:
4146 case NI_Base_Vector64_AsSingle:
4147 case NI_Base_Vector64_AsUInt16:
4148 case NI_Base_Vector64_AsUInt32:
4149 #endif // _TARGET_ARM64_
4150 case NI_Base_Vector128_As:
4151 case NI_Base_Vector128_AsByte:
4152 case NI_Base_Vector128_AsDouble:
4153 case NI_Base_Vector128_AsInt16:
4154 case NI_Base_Vector128_AsInt32:
4155 case NI_Base_Vector128_AsInt64:
4156 case NI_Base_Vector128_AsSByte:
4157 case NI_Base_Vector128_AsSingle:
4158 case NI_Base_Vector128_AsUInt16:
4159 case NI_Base_Vector128_AsUInt32:
4160 case NI_Base_Vector128_AsUInt64:
4161 #if defined(_TARGET_XARCH_)
4162 case NI_Base_Vector256_As:
4163 case NI_Base_Vector256_AsByte:
4164 case NI_Base_Vector256_AsDouble:
4165 case NI_Base_Vector256_AsInt16:
4166 case NI_Base_Vector256_AsInt32:
4167 case NI_Base_Vector256_AsInt64:
4168 case NI_Base_Vector256_AsSByte:
4169 case NI_Base_Vector256_AsSingle:
4170 case NI_Base_Vector256_AsUInt16:
4171 case NI_Base_Vector256_AsUInt32:
4172 case NI_Base_Vector256_AsUInt64:
4173 #endif // _TARGET_XARCH_
4175 // We fold away the cast here, as it only exists to satisfy
4176 // the type system. It is safe to do this here since the retNode type
4177 // and the signature return type are both the same TYP_SIMD.
4179 assert(sig->numArgs == 0);
4180 assert(sig->hasThis());
4182 retNode = impSIMDPopStack(retType, true, sig->retTypeClass);
4183 SetOpLclRelatedToSIMDIntrinsic(retNode);
4184 assert(retNode->gtType == getSIMDTypeForSize(getSIMDTypeSizeInBytes(sig->retTypeSigClass)));
4188 #ifdef _TARGET_XARCH_
4189 case NI_Base_Vector128_Zero:
4191 assert(sig->numArgs == 0);
4193 if (compSupports(InstructionSet_SSE))
4195 retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, baseType, simdSize);
4200 case NI_Base_Vector256_Zero:
4202 assert(sig->numArgs == 0);
4204 if (compSupports(InstructionSet_AVX))
4206 retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, baseType, simdSize);
4210 #endif // _TARGET_XARCH_
4221 #endif // FEATURE_HW_INTRINSICS
4223 GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method,
4224 CORINFO_SIG_INFO* sig,
4226 CorInfoIntrinsics intrinsicID,
4232 assert(callType != TYP_STRUCT);
4233 assert(IsMathIntrinsic(intrinsicID));
4237 #if !defined(_TARGET_X86_)
4238 // Intrinsics that are not implemented directly by target instructions will
4239 // be re-materialized as users calls in rationalizer. For prefixed tail calls,
4240 // don't do this optimization, because
4241 // a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
4242 // b) It will be non-trivial task or too late to re-materialize a surviving
4243 // tail prefixed GT_INTRINSIC as tail call in rationalizer.
4244 if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
4246 // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
4247 // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
4248 // code generation for certain EH constructs.
4249 if (!IsIntrinsicImplementedByUserCall(intrinsicID))
4252 switch (sig->numArgs)
4255 op1 = impPopStack().val;
4257 assert(varTypeIsFloating(op1));
4259 if (op1->TypeGet() != callType)
4261 op1 = gtNewCastNode(callType, op1, false, callType);
4264 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
4268 op2 = impPopStack().val;
4269 op1 = impPopStack().val;
4271 assert(varTypeIsFloating(op1));
4272 assert(varTypeIsFloating(op2));
4274 if (op2->TypeGet() != callType)
4276 op2 = gtNewCastNode(callType, op2, false, callType);
4278 if (op1->TypeGet() != callType)
4280 op1 = gtNewCastNode(callType, op1, false, callType);
4283 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
4287 NO_WAY("Unsupported number of args for Math Instrinsic");
4290 if (IsIntrinsicImplementedByUserCall(intrinsicID))
4292 op1->gtFlags |= GTF_CALL;
4299 //------------------------------------------------------------------------
4300 // lookupNamedIntrinsic: map method to jit named intrinsic value
4303 // method -- method handle for method
4306 // Id for the named intrinsic, or Illegal if none.
4309 // method should have CORINFO_FLG_JIT_INTRINSIC set in its attributes,
4310 // otherwise it is not a named jit intrinsic.
4313 NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
4315 NamedIntrinsic result = NI_Illegal;
4317 const char* className = nullptr;
4318 const char* namespaceName = nullptr;
4319 const char* methodName = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName);
4321 if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr))
4326 if (strcmp(namespaceName, "System") == 0)
4328 if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0))
4330 result = NI_System_Enum_HasFlag;
4332 else if ((strcmp(className, "MathF") == 0) && (strcmp(methodName, "Round") == 0))
4334 result = NI_MathF_Round;
4336 else if ((strcmp(className, "Math") == 0) && (strcmp(methodName, "Round") == 0))
4338 result = NI_Math_Round;
4341 #if defined(_TARGET_XARCH_) // We currently only support BSWAP on x86
4342 else if (strcmp(namespaceName, "System.Buffers.Binary") == 0)
4344 if ((strcmp(className, "BinaryPrimitives") == 0) && (strcmp(methodName, "ReverseEndianness") == 0))
4346 result = NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness;
4349 #endif // !defined(_TARGET_XARCH_)
4350 else if (strcmp(namespaceName, "System.Collections.Generic") == 0)
4352 if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0))
4354 result = NI_System_Collections_Generic_EqualityComparer_get_Default;
4357 #ifdef FEATURE_HW_INTRINSICS
4358 else if (strncmp(namespaceName, "System.Runtime.Intrinsics", 25) == 0)
4360 namespaceName += 25;
4362 if (namespaceName[0] == '\0')
4364 if (strncmp(className, "Vector", 6) == 0)
4368 #if defined(_TARGET_ARM64_)
4369 if (strncmp(className, "64", 2) == 0)
4373 if (strcmp(className, "`1") == 0)
4375 if (strncmp(methodName, "As", 2) == 0)
4379 // Vector64_As, Vector64_AsDouble, Vector64_AsInt64, and Vector64_AsUInt64
4380 // are not currently supported as they require additional plumbing to be
4381 // supported by the JIT as TYP_SIMD8.
4383 if (strcmp(methodName, "Byte") == 0)
4385 result = NI_Base_Vector64_AsByte;
4387 else if (strcmp(methodName, "Int16") == 0)
4389 result = NI_Base_Vector64_AsInt16;
4391 else if (strcmp(methodName, "Int32") == 0)
4393 result = NI_Base_Vector64_AsInt32;
4395 else if (strcmp(methodName, "SByte") == 0)
4397 result = NI_Base_Vector64_AsSByte;
4399 else if (strcmp(methodName, "Single") == 0)
4401 result = NI_Base_Vector64_AsSingle;
4403 else if (strcmp(methodName, "UInt16") == 0)
4405 result = NI_Base_Vector64_AsUInt16;
4407 else if (strcmp(methodName, "UInt32") == 0)
4409 result = NI_Base_Vector64_AsUInt32;
4415 #endif // _TARGET_ARM64_
4416 if (strncmp(className, "128", 3) == 0)
4420 if (strcmp(className, "`1") == 0)
4422 if (strncmp(methodName, "As", 2) == 0)
4426 if (strcmp(methodName, "`1") == 0)
4428 result = NI_Base_Vector128_As;
4430 else if (strcmp(methodName, "Byte") == 0)
4432 result = NI_Base_Vector128_AsByte;
4434 else if (strcmp(methodName, "Double") == 0)
4436 result = NI_Base_Vector128_AsDouble;
4438 else if (strcmp(methodName, "Int16") == 0)
4440 result = NI_Base_Vector128_AsInt16;
4442 else if (strcmp(methodName, "Int32") == 0)
4444 result = NI_Base_Vector128_AsInt32;
4446 else if (strcmp(methodName, "Int64") == 0)
4448 result = NI_Base_Vector128_AsInt64;
4450 else if (strcmp(methodName, "SByte") == 0)
4452 result = NI_Base_Vector128_AsSByte;
4454 else if (strcmp(methodName, "Single") == 0)
4456 result = NI_Base_Vector128_AsSingle;
4458 else if (strcmp(methodName, "UInt16") == 0)
4460 result = NI_Base_Vector128_AsUInt16;
4462 else if (strcmp(methodName, "UInt32") == 0)
4464 result = NI_Base_Vector128_AsUInt32;
4466 else if (strcmp(methodName, "UInt64") == 0)
4468 result = NI_Base_Vector128_AsUInt64;
4471 #if defined(_TARGET_XARCH_)
4472 else if (strcmp(methodName, "get_Zero") == 0)
4474 result = NI_Base_Vector128_Zero;
4476 #endif // _TARGET_XARCH_
4479 #if defined(_TARGET_XARCH_)
4480 else if (strncmp(className, "256", 3) == 0)
4484 if (strcmp(className, "`1") == 0)
4486 if (strncmp(methodName, "As", 2) == 0)
4490 if (strcmp(methodName, "`1") == 0)
4492 result = NI_Base_Vector256_As;
4494 else if (strcmp(methodName, "Byte") == 0)
4496 result = NI_Base_Vector256_AsByte;
4498 else if (strcmp(methodName, "Double") == 0)
4500 result = NI_Base_Vector256_AsDouble;
4502 else if (strcmp(methodName, "Int16") == 0)
4504 result = NI_Base_Vector256_AsInt16;
4506 else if (strcmp(methodName, "Int32") == 0)
4508 result = NI_Base_Vector256_AsInt32;
4510 else if (strcmp(methodName, "Int64") == 0)
4512 result = NI_Base_Vector256_AsInt64;
4514 else if (strcmp(methodName, "SByte") == 0)
4516 result = NI_Base_Vector256_AsSByte;
4518 else if (strcmp(methodName, "Single") == 0)
4520 result = NI_Base_Vector256_AsSingle;
4522 else if (strcmp(methodName, "UInt16") == 0)
4524 result = NI_Base_Vector256_AsUInt16;
4526 else if (strcmp(methodName, "UInt32") == 0)
4528 result = NI_Base_Vector256_AsUInt32;
4530 else if (strcmp(methodName, "UInt64") == 0)
4532 result = NI_Base_Vector256_AsUInt64;
4535 else if (strcmp(methodName, "get_Zero") == 0)
4537 result = NI_Base_Vector256_Zero;
4541 #endif // _TARGET_XARCH_
4544 #if defined(_TARGET_XARCH_)
4545 else if (strcmp(namespaceName, ".X86") == 0)
4547 result = HWIntrinsicInfo::lookupId(className, methodName);
4549 #elif defined(_TARGET_ARM64_)
4550 else if (strcmp(namespaceName, ".Arm.Arm64") == 0)
4552 result = lookupHWIntrinsic(className, methodName);
4554 #else // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_)
4555 #error Unsupported platform
4556 #endif // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_)
4558 #endif // FEATURE_HW_INTRINSICS
4563 /*****************************************************************************/
4565 GenTree* Compiler::impArrayAccessIntrinsic(
4566 CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
4568 /* If we are generating SMALL_CODE, we don't want to use intrinsics for
4569 the following, as it generates fatter code.
4572 if (compCodeOpt() == SMALL_CODE)
4577 /* These intrinsics generate fatter (but faster) code and are only
4578 done if we don't need SMALL_CODE */
4580 unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
4582 // The rank 1 case is special because it has to handle two array formats
4583 // we will simply not do that case
4584 if (rank > GT_ARR_MAX_RANK || rank <= 1)
4589 CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
4590 var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
4592 // For the ref case, we will only be able to inline if the types match
4593 // (verifier checks for this, we don't care for the nonverified case and the
4594 // type is final (so we don't need to do the cast)
4595 if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
4597 // Get the call site signature
4598 CORINFO_SIG_INFO LocalSig;
4599 eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
4600 assert(LocalSig.hasThis());
4602 CORINFO_CLASS_HANDLE actualElemClsHnd;
4604 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4606 // Fetch the last argument, the one that indicates the type we are setting.
4607 CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
4608 for (unsigned r = 0; r < rank; r++)
4610 argType = info.compCompHnd->getArgNext(argType);
4613 typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
4614 actualElemClsHnd = argInfo.GetClassHandle();
4618 assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
4620 // Fetch the return type
4621 typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
4622 assert(retInfo.IsByRef());
4623 actualElemClsHnd = retInfo.GetClassHandle();
4626 // if it's not final, we can't do the optimization
4627 if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
4633 unsigned arrayElemSize;
4634 if (elemType == TYP_STRUCT)
4636 assert(arrElemClsHnd);
4638 arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
4642 arrayElemSize = genTypeSize(elemType);
4645 if ((unsigned char)arrayElemSize != arrayElemSize)
4647 // arrayElemSize would be truncated as an unsigned char.
4648 // This means the array element is too large. Don't do the optimization.
4652 GenTree* val = nullptr;
4654 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4656 // Assignment of a struct is more work, and there are more gets than sets.
4657 if (elemType == TYP_STRUCT)
4662 val = impPopStack().val;
4663 assert(genActualType(elemType) == genActualType(val->gtType) ||
4664 (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
4665 (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
4666 (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
4669 noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
4671 GenTree* inds[GT_ARR_MAX_RANK];
4672 for (unsigned k = rank; k > 0; k--)
4674 inds[k - 1] = impPopStack().val;
4677 GenTree* arr = impPopStack().val;
4678 assert(arr->gtType == TYP_REF);
4681 new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
4682 static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
4684 if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
4686 arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
4689 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4691 assert(val != nullptr);
4692 return gtNewAssignNode(arrElem, val);
4700 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
4704 // do some basic checks first
4705 if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
4710 if (verCurrentState.esStackDepth > 0)
4712 // merge stack types
4713 StackEntry* parentStack = block->bbStackOnEntry();
4714 StackEntry* childStack = verCurrentState.esStack;
4716 for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
4718 if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
4725 // merge initialization status of this ptr
4727 if (verTrackObjCtorInitState)
4729 // If we're tracking the CtorInitState, then it must not be unknown in the current state.
4730 assert(verCurrentState.thisInitialized != TIS_Bottom);
4732 // If the successor block's thisInit state is unknown, copy it from the current state.
4733 if (block->bbThisOnEntry() == TIS_Bottom)
4736 verSetThisInit(block, verCurrentState.thisInitialized);
4738 else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
4740 if (block->bbThisOnEntry() != TIS_Top)
4743 verSetThisInit(block, TIS_Top);
4745 if (block->bbFlags & BBF_FAILED_VERIFICATION)
4747 // The block is bad. Control can flow through the block to any handler that catches the
4748 // verification exception, but the importer ignores bad blocks and therefore won't model
4749 // this flow in the normal way. To complete the merge into the bad block, the new state
4750 // needs to be manually pushed to the handlers that may be reached after the verification
4751 // exception occurs.
4753 // Usually, the new state was already propagated to the relevant handlers while processing
4754 // the predecessors of the bad block. The exception is when the bad block is at the start
4755 // of a try region, meaning it is protected by additional handlers that do not protect its
4758 if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
4760 // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
4761 // recursive calls back into this code path (if successors of the current bad block are
4762 // also bad blocks).
4764 ThisInitState origTIS = verCurrentState.thisInitialized;
4765 verCurrentState.thisInitialized = TIS_Top;
4766 impVerifyEHBlock(block, true);
4767 verCurrentState.thisInitialized = origTIS;
4775 assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
4781 /*****************************************************************************
4782 * 'logMsg' is true if a log message needs to be logged. false if the caller has
4783 * already logged it (presumably in a more detailed fashion than done here)
4784 * 'bVerificationException' is true for a verification exception, false for a
4785 * "call unauthorized by host" exception.
4788 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
4790 block->bbJumpKind = BBJ_THROW;
4791 block->bbFlags |= BBF_FAILED_VERIFICATION;
4793 impCurStmtOffsSet(block->bbCodeOffs);
4796 // we need this since BeginTreeList asserts otherwise
4797 impTreeList = impTreeLast = nullptr;
4798 block->bbFlags &= ~BBF_IMPORTED;
4802 JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
4803 block->bbCodeOffs, block->bbCodeOffsEnd));
4806 printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
4810 if (JitConfig.DebugBreakOnVerificationFailure())
4818 // if the stack is non-empty evaluate all the side-effects
4819 if (verCurrentState.esStackDepth > 0)
4821 impEvalSideEffects();
4823 assert(verCurrentState.esStackDepth == 0);
4826 gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
4827 // verCurrentState.esStackDepth = 0;
4828 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
4830 // The inliner is not able to handle methods that require throw block, so
4831 // make sure this methods never gets inlined.
4832 info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
4835 /*****************************************************************************
4838 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
4841 // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
4842 // slightly different mechanism in which it calls the JIT to perform IL verification:
4843 // in the case of transparent methods the VM calls for a predicate IsVerifiable()
4844 // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
4845 // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
4846 // it bubble up until reported by the runtime. Currently in RyuJIT, this method doesn't bubble
4847 // up the exception, instead it embeds a throw inside the offending basic block and lets this
4848 // to fail upon runtime of the jitted method.
4850 // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
4851 // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
4852 // just try to find out whether to fail this method before even actually jitting it. So, in case
4853 // we detect these two conditions, instead of generating a throw statement inside the offending
4854 // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
4855 // to return false and make RyuJIT behave the same way JIT64 does.
4857 // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
4858 // RyuJIT for the time being until we completely replace JIT64.
4859 // TODO-ARM64-Cleanup: We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
4861 // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
4862 // exception if we are only importing and verifying. The method verNeedsVerification() can also modify the
4863 // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
4864 // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
4865 // be turned off during importation).
4866 CLANG_FORMAT_COMMENT_ANCHOR;
4868 #ifdef _TARGET_64BIT_
4871 bool canSkipVerificationResult =
4872 info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
4873 assert(tiVerificationNeeded || canSkipVerificationResult);
4876 // Add the non verifiable flag to the compiler
4877 if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
4879 tiIsVerifiableCode = FALSE;
4881 #endif //_TARGET_64BIT_
4882 verResetCurrentState(block, &verCurrentState);
4883 verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
4886 impNoteLastILoffs(); // Remember at which BC offset the tree was finished
4890 /******************************************************************************/
4891 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
4893 assert(ciType < CORINFO_TYPE_COUNT);
4898 case CORINFO_TYPE_STRING:
4899 case CORINFO_TYPE_CLASS:
4900 tiResult = verMakeTypeInfo(clsHnd);
4901 if (!tiResult.IsType(TI_REF))
4902 { // type must be consistent with element type
4907 #ifdef _TARGET_64BIT_
4908 case CORINFO_TYPE_NATIVEINT:
4909 case CORINFO_TYPE_NATIVEUINT:
4912 // If we have more precise information, use it
4913 return verMakeTypeInfo(clsHnd);
4917 return typeInfo::nativeInt();
4920 #endif // _TARGET_64BIT_
4922 case CORINFO_TYPE_VALUECLASS:
4923 case CORINFO_TYPE_REFANY:
4924 tiResult = verMakeTypeInfo(clsHnd);
4925 // type must be constant with element type;
4926 if (!tiResult.IsValueClass())
4931 case CORINFO_TYPE_VAR:
4932 return verMakeTypeInfo(clsHnd);
4934 case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4935 case CORINFO_TYPE_VOID:
4939 case CORINFO_TYPE_BYREF:
4941 CORINFO_CLASS_HANDLE childClassHandle;
4942 CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4943 return ByRef(verMakeTypeInfo(childType, childClassHandle));
4949 { // If we have more precise information, use it
4950 return typeInfo(TI_STRUCT, clsHnd);
4954 return typeInfo(JITtype2tiType(ciType));
4960 /******************************************************************************/
4962 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4964 if (clsHnd == nullptr)
4969 // Byrefs should only occur in method and local signatures, which are accessed
4970 // using ICorClassInfo and ICorClassInfo.getChildType.
4971 // So findClass() and getClassAttribs() should not be called for byrefs
4973 if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4975 assert(!"Did findClass() return a Byref?");
4979 unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4981 if (attribs & CORINFO_FLG_VALUECLASS)
4983 CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4985 // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4986 // not occur here, so we may want to change this to an assert instead.
4987 if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4992 #ifdef _TARGET_64BIT_
4993 if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4995 return typeInfo::nativeInt();
4997 #endif // _TARGET_64BIT_
4999 if (t != CORINFO_TYPE_UNDEF)
5001 return (typeInfo(JITtype2tiType(t)));
5003 else if (bashStructToRef)
5005 return (typeInfo(TI_REF, clsHnd));
5009 return (typeInfo(TI_STRUCT, clsHnd));
5012 else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
5014 // See comment in _typeInfo.h for why we do it this way.
5015 return (typeInfo(TI_REF, clsHnd, true));
5019 return (typeInfo(TI_REF, clsHnd));
5023 /******************************************************************************/
5024 BOOL Compiler::verIsSDArray(typeInfo ti)
5026 if (ti.IsNullObjRef())
5027 { // nulls are SD arrays
5031 if (!ti.IsType(TI_REF))
5036 if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
5043 /******************************************************************************/
5044 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
5045 /* Returns an error type if anything goes wrong */
5047 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
5049 assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
5051 if (!verIsSDArray(arrayObjectType))
5056 CORINFO_CLASS_HANDLE childClassHandle = nullptr;
5057 CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
5059 return verMakeTypeInfo(ciType, childClassHandle);
5062 /*****************************************************************************
5064 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
5066 CORINFO_CLASS_HANDLE classHandle;
5067 CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
5069 var_types type = JITtype2varType(ciType);
5070 if (varTypeIsGC(type))
5072 // For efficiency, getArgType only returns something in classHandle for
5073 // value types. For other types that have addition type info, you
5074 // have to call back explicitly
5075 classHandle = info.compCompHnd->getArgClass(sig, args);
5078 NO_WAY("Could not figure out Class specified in argument or local signature");
5082 return verMakeTypeInfo(ciType, classHandle);
5085 /*****************************************************************************/
5087 // This does the expensive check to figure out whether the method
5088 // needs to be verified. It is called only when we fail verification,
5089 // just before throwing the verification exception.
5091 BOOL Compiler::verNeedsVerification()
5093 // If we have previously determined that verification is NOT needed
5094 // (for example in Compiler::compCompile), that means verification is really not needed.
5095 // Return the same decision we made before.
5096 // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
5098 if (!tiVerificationNeeded)
5100 return tiVerificationNeeded;
5103 assert(tiVerificationNeeded);
5105 // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
5106 // obtain the answer.
5107 CorInfoCanSkipVerificationResult canSkipVerificationResult =
5108 info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
5110 // canSkipVerification will return one of the following three values:
5111 // CORINFO_VERIFICATION_CANNOT_SKIP = 0, // Cannot skip verification during jit time.
5112 // CORINFO_VERIFICATION_CAN_SKIP = 1, // Can skip verification during jit time.
5113 // CORINFO_VERIFICATION_RUNTIME_CHECK = 2, // Skip verification during jit time,
5114 // but need to insert a callout to the VM to ask during runtime
5115 // whether to skip verification or not.
5117 // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
5118 if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
5120 tiRuntimeCalloutNeeded = true;
5123 if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
5125 // Dev10 706080 - Testers don't like the assert, so just silence it
5126 // by not using the macros that invoke debugAssert.
5130 // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
5131 // The following line means we will NOT do jit time verification if canSkipVerification
5132 // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
5133 tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
5134 return tiVerificationNeeded;
5137 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
5143 if (!ti.IsType(TI_STRUCT))
5147 return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
5150 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
5152 if (ti.IsPermanentHomeByRef())
5162 BOOL Compiler::verIsBoxable(const typeInfo& ti)
5164 return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
5165 || ti.IsUnboxedGenericTypeVar() ||
5166 (ti.IsType(TI_STRUCT) &&
5167 // exclude byreflike structs
5168 !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
5171 // Is it a boxed value type?
5172 bool Compiler::verIsBoxedValueType(typeInfo ti)
5174 if (ti.GetType() == TI_REF)
5176 CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
5177 return !!eeIsValueClass(clsHnd);
5185 /*****************************************************************************
5187 * Check if a TailCall is legal.
5190 bool Compiler::verCheckTailCallConstraint(
5192 CORINFO_RESOLVED_TOKEN* pResolvedToken,
5193 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
5194 bool speculative // If true, won't throw if verificatoin fails. Instead it will
5195 // return false to the caller.
5196 // If false, it will throw.
5200 CORINFO_SIG_INFO sig;
5201 unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
5202 // this counter is used to keep track of how many items have been
5205 CORINFO_METHOD_HANDLE methodHnd = nullptr;
5206 CORINFO_CLASS_HANDLE methodClassHnd = nullptr;
5207 unsigned methodClassFlgs = 0;
5209 assert(impOpcodeIsCallOpcode(opcode));
5211 if (compIsForInlining())
5216 // for calli, VerifyOrReturn that this is not a virtual method
5217 if (opcode == CEE_CALLI)
5219 /* Get the call sig */
5220 eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig);
5222 // We don't know the target method, so we have to infer the flags, or
5223 // assume the worst-case.
5224 mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
5228 methodHnd = pResolvedToken->hMethod;
5230 mflags = info.compCompHnd->getMethodAttribs(methodHnd);
5232 // When verifying generic code we pair the method handle with its
5233 // owning class to get the exact method signature.
5234 methodClassHnd = pResolvedToken->hClass;
5235 assert(methodClassHnd);
5237 eeGetMethodSig(methodHnd, &sig, methodClassHnd);
5239 // opcode specific check
5240 methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
5243 // We must have got the methodClassHnd if opcode is not CEE_CALLI
5244 assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
5246 if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
5248 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig);
5251 // check compatibility of the arguments
5252 unsigned int argCount;
5253 argCount = sig.numArgs;
5254 CORINFO_ARG_LIST_HANDLE args;
5258 typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
5260 // check that the argument is not a byref for tailcalls
5261 VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
5263 // For unsafe code, we might have parameters containing pointer to the stack location.
5264 // Disallow the tailcall for this kind.
5265 CORINFO_CLASS_HANDLE classHandle;
5266 CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
5267 VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
5269 args = info.compCompHnd->getArgNext(args);
5273 popCount += sig.numArgs;
5275 // check for 'this' which is on non-static methods, not called via NEWOBJ
5276 if (!(mflags & CORINFO_FLG_STATIC))
5278 // Always update the popCount.
5279 // This is crucial for the stack calculation to be correct.
5280 typeInfo tiThis = impStackTop(popCount).seTypeInfo;
5283 if (opcode == CEE_CALLI)
5285 // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
5287 if (tiThis.IsValueClass())
5291 VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
5295 // Check type compatibility of the this argument
5296 typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
5297 if (tiDeclaredThis.IsValueClass())
5299 tiDeclaredThis.MakeByRef();
5302 VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
5306 // Tail calls on constrained calls should be illegal too:
5307 // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
5308 VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
5310 // Get the exact view of the signature for an array method
5311 if (sig.retType != CORINFO_TYPE_VOID)
5313 if (methodClassFlgs & CORINFO_FLG_ARRAY)
5315 assert(opcode != CEE_CALLI);
5316 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig);
5320 typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
5321 typeInfo tiCallerRetType =
5322 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
5324 // void return type gets morphed into the error type, so we have to treat them specially here
5325 if (sig.retType == CORINFO_TYPE_VOID)
5327 VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
5332 VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
5333 NormaliseForStack(tiCallerRetType), true),
5334 "tailcall return mismatch", speculative);
5337 // for tailcall, stack must be empty
5338 VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
5340 return true; // Yes, tailcall is legal
5343 /*****************************************************************************
5345 * Checks the IL verification rules for the call
5348 void Compiler::verVerifyCall(OPCODE opcode,
5349 CORINFO_RESOLVED_TOKEN* pResolvedToken,
5350 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5353 const BYTE* delegateCreateStart,
5354 const BYTE* codeAddr,
5355 CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
5358 CORINFO_SIG_INFO* sig = nullptr;
5359 unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
5360 // this counter is used to keep track of how many items have been
5363 // for calli, VerifyOrReturn that this is not a virtual method
5364 if (opcode == CEE_CALLI)
5366 Verify(false, "Calli not verifiable");
5370 //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
5371 mflags = callInfo->verMethodFlags;
5373 sig = &callInfo->verSig;
5375 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
5377 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
5380 // opcode specific check
5381 unsigned methodClassFlgs = callInfo->classFlags;
5385 // cannot do callvirt on valuetypes
5386 VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
5387 VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
5392 assert(!tailCall); // Importer should not allow this
5393 VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
5394 "newobj must be on instance");
5396 if (methodClassFlgs & CORINFO_FLG_DELEGATE)
5398 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
5399 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
5400 typeInfo tiDeclaredFtn =
5401 verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
5402 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
5404 assert(popCount == 0);
5405 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
5406 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
5408 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
5409 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
5410 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
5411 "delegate object type mismatch");
5413 CORINFO_CLASS_HANDLE objTypeHandle =
5414 tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
5416 // the method signature must be compatible with the delegate's invoke method
5418 // check that for virtual functions, the type of the object used to get the
5419 // ftn ptr is the same as the type of the object passed to the delegate ctor.
5420 // since this is a bit of work to determine in general, we pattern match stylized
5423 // the delegate creation code check, which used to be done later, is now done here
5424 // so we can read delegateMethodRef directly from
5425 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
5426 // we then use it in our call to isCompatibleDelegate().
5428 mdMemberRef delegateMethodRef = mdMemberRefNil;
5429 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
5430 "must create delegates with certain IL");
5432 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
5433 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
5434 delegateResolvedToken.tokenScope = info.compScopeHnd;
5435 delegateResolvedToken.token = delegateMethodRef;
5436 delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method;
5437 info.compCompHnd->resolveToken(&delegateResolvedToken);
5439 CORINFO_CALL_INFO delegateCallInfo;
5440 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
5441 addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
5443 BOOL isOpenDelegate = FALSE;
5444 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
5445 tiActualFtn.GetMethod(), pResolvedToken->hClass,
5447 "function incompatible with delegate");
5449 // check the constraints on the target method
5450 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
5451 "delegate target has unsatisfied class constraints");
5452 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
5453 tiActualFtn.GetMethod()),
5454 "delegate target has unsatisfied method constraints");
5456 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
5457 // for additional verification rules for delegates
5458 CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod();
5459 DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
5460 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5463 if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
5465 && StrictCheckForNonVirtualCallToVirtualMethod()
5469 if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5471 VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
5472 verIsBoxedValueType(tiActualObj),
5473 "The 'this' parameter to the call must be either the calling method's "
5474 "'this' parameter or "
5475 "a boxed value type.");
5480 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
5482 BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
5484 Verify(targetIsStatic || !isOpenDelegate,
5485 "Unverifiable creation of an open instance delegate for a protected member.");
5487 CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
5489 : tiActualObj.GetClassHandleForObjRef();
5491 // In the case of protected methods, it is a requirement that the 'this'
5492 // pointer be a subclass of the current context. Perform this check.
5493 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5494 "Accessing protected method through wrong type.");
5499 // fall thru to default checks
5501 VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
5503 VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
5504 "can only newobj a delegate constructor");
5506 // check compatibility of the arguments
5507 unsigned int argCount;
5508 argCount = sig->numArgs;
5509 CORINFO_ARG_LIST_HANDLE args;
5513 typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
5515 typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
5516 VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
5518 args = info.compCompHnd->getArgNext(args);
5524 popCount += sig->numArgs;
5526 // check for 'this' which are is non-static methods, not called via NEWOBJ
5527 CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
5528 if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
5530 typeInfo tiThis = impStackTop(popCount).seTypeInfo;
5533 // If it is null, we assume we can access it (since it will AV shortly)
5534 // If it is anything but a reference class, there is no hierarchy, so
5535 // again, we don't need the precise instance class to compute 'protected' access
5536 if (tiThis.IsType(TI_REF))
5538 instanceClassHnd = tiThis.GetClassHandleForObjRef();
5541 // Check type compatibility of the this argument
5542 typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
5543 if (tiDeclaredThis.IsValueClass())
5545 tiDeclaredThis.MakeByRef();
5548 // If this is a call to the base class .ctor, set thisPtr Init for
5550 if (mflags & CORINFO_FLG_CONSTRUCTOR)
5552 if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
5553 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
5555 assert(verCurrentState.thisInitialized !=
5556 TIS_Bottom); // This should never be the case just from the logic of the verifier.
5557 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
5558 "Call to base class constructor when 'this' is possibly initialized");
5559 // Otherwise, 'this' is now initialized.
5560 verCurrentState.thisInitialized = TIS_Init;
5561 tiThis.SetInitialisedObjRef();
5565 // We allow direct calls to value type constructors
5566 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
5567 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
5568 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
5569 "Bad call to a constructor");
5573 if (pConstrainedResolvedToken != nullptr)
5575 VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
5577 typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
5579 // We just dereference this and test for equality
5580 tiThis.DereferenceByRef();
5581 VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
5582 "this type mismatch with constrained type operand");
5584 // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
5585 tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
5588 // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
5589 if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
5591 tiDeclaredThis.SetIsReadonlyByRef();
5594 VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
5596 if (tiThis.IsByRef())
5598 // Find the actual type where the method exists (as opposed to what is declared
5599 // in the metadata). This is to prevent passing a byref as the "this" argument
5600 // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
5602 CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
5603 VerifyOrReturn(eeIsValueClass(actualClassHnd),
5604 "Call to base type of valuetype (which is never a valuetype)");
5607 // Rules for non-virtual call to a non-final virtual method:
5610 // The "this" pointer is considered to be "possibly written" if
5611 // 1. Its address have been taken (LDARGA 0) anywhere in the method.
5613 // 2. It has been stored to (STARG.0) anywhere in the method.
5615 // A non-virtual call to a non-final virtual method is only allowed if
5616 // 1. The this pointer passed to the callee is an instance of a boxed value type.
5618 // 2. The this pointer passed to the callee is the current method's this pointer.
5619 // (and) The current method's this pointer is not "possibly written".
5621 // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
5622 // virtual methods. (Luckily this does affect .ctors, since they are not virtual).
5623 // This is stronger that is strictly needed, but implementing a laxer rule is significantly
5624 // hard and more error prone.
5626 if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
5628 && StrictCheckForNonVirtualCallToVirtualMethod()
5632 if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5635 tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
5636 "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
5637 "a boxed value type.");
5642 // check any constraints on the callee's class and type parameters
5643 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
5644 "method has unsatisfied class constraints");
5645 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
5646 "method has unsatisfied method constraints");
5648 if (mflags & CORINFO_FLG_PROTECTED)
5650 VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5651 "Can't access protected method");
5654 // Get the exact view of the signature for an array method
5655 if (sig->retType != CORINFO_TYPE_VOID)
5657 eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
5660 // "readonly." prefixed calls only allowed for the Address operation on arrays.
5661 // The methods supported by array types are under the control of the EE
5662 // so we can trust that only the Address operation returns a byref.
5665 typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
5666 VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
5667 "unexpected use of readonly prefix");
5670 // Verify the tailcall
5673 verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
5677 /*****************************************************************************
5678 * Checks that a delegate creation is done using the following pattern:
5680 * ldvirtftn targetMemberRef
5682 * ldftn targetMemberRef
5684 * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
5685 * not in this basic block)
5687 * targetMemberRef is read from the code sequence.
5688 * targetMemberRef is validated iff verificationNeeded.
5691 BOOL Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart,
5692 const BYTE* codeAddr,
5693 mdMemberRef& targetMemberRef)
5695 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5697 targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
5700 else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
5702 targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
5709 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
5711 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
5712 typeInfo ptrVal = verVerifyLDIND(tiTo, instrType);
5713 typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
5714 if (!tiCompatibleWith(value, normPtrVal, true))
5716 Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
5717 compUnsafeCastUsed = true;
5722 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
5724 assert(!instrType.IsStruct());
5729 ptrVal = DereferenceByRef(ptr);
5730 if (instrType.IsObjRef() && !ptrVal.IsObjRef())
5732 Verify(false, "bad pointer");
5733 compUnsafeCastUsed = true;
5735 else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
5737 Verify(false, "pointer not consistent with instr");
5738 compUnsafeCastUsed = true;
5743 Verify(false, "pointer not byref");
5744 compUnsafeCastUsed = true;
5750 // Verify that the field is used properly. 'tiThis' is NULL for statics,
5751 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
5752 // ld*flda or a st*fld.
5753 // 'enclosingClass' is given if we are accessing a field in some specific type.
5755 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken,
5756 const CORINFO_FIELD_INFO& fieldInfo,
5757 const typeInfo* tiThis,
5759 BOOL allowPlainStructAsThis)
5761 CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
5762 unsigned fieldFlags = fieldInfo.fieldFlags;
5763 CORINFO_CLASS_HANDLE instanceClass =
5764 info.compClassHnd; // for statics, we imagine the instance is the current class.
5766 bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
5769 Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
5770 if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
5772 Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
5773 info.compIsStatic == isStaticField,
5774 "bad use of initonly field (set or address taken)");
5778 if (tiThis == nullptr)
5780 Verify(isStaticField, "used static opcode with non-static field");
5784 typeInfo tThis = *tiThis;
5786 if (allowPlainStructAsThis && tThis.IsValueClass())
5791 // If it is null, we assume we can access it (since it will AV shortly)
5792 // If it is anything but a refernce class, there is no hierarchy, so
5793 // again, we don't need the precise instance class to compute 'protected' access
5794 if (tiThis->IsType(TI_REF))
5796 instanceClass = tiThis->GetClassHandleForObjRef();
5799 // Note that even if the field is static, we require that the this pointer
5800 // satisfy the same constraints as a non-static field This happens to
5801 // be simpler and seems reasonable
5802 typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
5803 if (tiDeclaredThis.IsValueClass())
5805 tiDeclaredThis.MakeByRef();
5807 // we allow read-only tThis, on any field access (even stores!), because if the
5808 // class implementor wants to prohibit stores he should make the field private.
5809 // we do this by setting the read-only bit on the type we compare tThis to.
5810 tiDeclaredThis.SetIsReadonlyByRef();
5812 else if (verTrackObjCtorInitState && tThis.IsThisPtr())
5814 // Any field access is legal on "uninitialized" this pointers.
5815 // The easiest way to implement this is to simply set the
5816 // initialized bit for the duration of the type check on the
5817 // field access only. It does not change the state of the "this"
5818 // for the function as a whole. Note that the "tThis" is a copy
5819 // of the original "this" type (*tiThis) passed in.
5820 tThis.SetInitialisedObjRef();
5823 Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
5826 // Presently the JIT does not check that we don't store or take the address of init-only fields
5827 // since we cannot guarantee their immutability and it is not a security issue.
5829 // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
5830 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
5831 "field has unsatisfied class constraints");
5832 if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
5834 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
5835 "Accessing protected method through wrong type.");
5839 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
5841 if (tiOp1.IsNumberType())
5843 #ifdef _TARGET_64BIT_
5844 Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
5845 #else // _TARGET_64BIT
5846 // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
5847 // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
5848 // but compatible, since we can coalesce native int with int32 (see section III.1.5).
5849 Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
5850 #endif // !_TARGET_64BIT_
5852 else if (tiOp1.IsObjRef())
5864 Verify(FALSE, "Cond not allowed on object types");
5866 Verify(tiOp2.IsObjRef(), "Cond type mismatch");
5868 else if (tiOp1.IsByRef())
5870 Verify(tiOp2.IsByRef(), "Cond type mismatch");
5874 Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
5878 void Compiler::verVerifyThisPtrInitialised()
5880 if (verTrackObjCtorInitState)
5882 Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
5886 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
5888 // Either target == context, in this case calling an alternate .ctor
5889 // Or target is the immediate parent of context
5891 return ((target == context) || (target == info.compCompHnd->getParentType(context)));
5894 GenTree* Compiler::impImportLdvirtftn(GenTree* thisPtr,
5895 CORINFO_RESOLVED_TOKEN* pResolvedToken,
5896 CORINFO_CALL_INFO* pCallInfo)
5898 if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
5900 NO_WAY("Virtual call to a function added via EnC is not supported");
5903 // CoreRT generic virtual method
5904 if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
5906 GenTree* runtimeMethodHandle = nullptr;
5907 if (pCallInfo->exactContextNeedsRuntimeLookup)
5909 runtimeMethodHandle =
5910 impRuntimeLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, pCallInfo->hMethod);
5914 runtimeMethodHandle = gtNewIconEmbMethHndNode(pResolvedToken->hMethod);
5916 return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL,
5917 gtNewArgList(thisPtr, runtimeMethodHandle));
5920 #ifdef FEATURE_READYTORUN_COMPILER
5921 if (opts.IsReadyToRun())
5923 if (!pCallInfo->exactContextNeedsRuntimeLookup)
5926 gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewArgList(thisPtr));
5928 call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5933 // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5934 if (IsTargetAbi(CORINFO_CORERT_ABI))
5936 GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5938 return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5939 gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5944 // Get the exact descriptor for the static callsite
5945 GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5946 if (exactTypeDesc == nullptr)
5947 { // compDonotInline()
5951 GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken);
5952 if (exactMethodDesc == nullptr)
5953 { // compDonotInline()
5957 GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5959 helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5961 helpArgs = gtNewListNode(thisPtr, helpArgs);
5963 // Call helper function. This gets the target address of the final destination callsite.
5965 return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs);
5968 //------------------------------------------------------------------------
5969 // impImportAndPushBox: build and import a value-type box
5972 // pResolvedToken - resolved token from the box operation
5978 // The value to be boxed is popped from the stack, and a tree for
5979 // the boxed value is pushed. This method may create upstream
5980 // statements, spill side effecting trees, and create new temps.
5982 // If importing an inlinee, we may also discover the inline must
5983 // fail. If so there is no new value pushed on the stack. Callers
5984 // should use CompDoNotInline after calling this method to see if
5985 // ongoing importation should be aborted.
5988 // Boxing of ref classes results in the same value as the value on
5989 // the top of the stack, so is handled inline in impImportBlockCode
5990 // for the CEE_BOX case. Only value or primitive type boxes make it
5993 // Boxing for nullable types is done via a helper call; boxing
5994 // of other value types is expanded inline or handled via helper
5995 // call, depending on the jit's codegen mode.
5997 // When the jit is operating in size and time constrained modes,
5998 // using a helper call here can save jit time and code size. But it
5999 // also may inhibit cleanup optimizations that could have also had a
6000 // even greater benefit effect on code size and jit time. An optimal
6001 // strategy may need to peek ahead and see if it is easy to tell how
6002 // the box is being used. For now, we defer.
6004 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
6006 // Spill any special side effects
6007 impSpillSpecialSideEff();
6009 // Get get the expression to box from the stack.
6010 GenTree* op1 = nullptr;
6011 GenTree* op2 = nullptr;
6012 StackEntry se = impPopStack();
6013 CORINFO_CLASS_HANDLE operCls = se.seTypeInfo.GetClassHandle();
6014 GenTree* exprToBox = se.val;
6016 // Look at what helper we should use.
6017 CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
6019 // Determine what expansion to prefer.
6021 // In size/time/debuggable constrained modes, the helper call
6022 // expansion for box is generally smaller and is preferred, unless
6023 // the value to box is a struct that comes from a call. In that
6024 // case the call can construct its return value directly into the
6025 // box payload, saving possibly some up-front zeroing.
6027 // Currently primitive type boxes always get inline expanded. We may
6028 // want to do the same for small structs if they don't come from
6029 // calls and don't have GC pointers, since explicitly copying such
6030 // structs is cheap.
6031 JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via");
6032 bool canExpandInline = (boxHelper == CORINFO_HELP_BOX);
6033 bool optForSize = !exprToBox->IsCall() && (operCls != nullptr) && (opts.compDbgCode || opts.MinOpts());
6034 bool expandInline = canExpandInline && !optForSize;
6038 JITDUMP(" inline allocate/copy sequence\n");
6040 // we are doing 'normal' boxing. This means that we can inline the box operation
6041 // Box(expr) gets morphed into
6042 // temp = new(clsHnd)
6043 // cpobj(temp+4, expr, clsHnd)
6045 // The code paths differ slightly below for structs and primitives because
6046 // "cpobj" differs in these cases. In one case you get
6047 // impAssignStructPtr(temp+4, expr, clsHnd)
6048 // and the other you get
6051 if (opts.MinOpts() || opts.compDbgCode)
6053 // For minopts/debug code, try and minimize the total number
6054 // of box temps by reusing an existing temp when possible.
6055 if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
6057 impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper"));
6062 // When optimizing, use a new temp for each box operation
6063 // since we then know the exact class of the box temp.
6064 impBoxTemp = lvaGrabTemp(true DEBUGARG("Single-def Box Helper"));
6065 lvaTable[impBoxTemp].lvType = TYP_REF;
6066 lvaTable[impBoxTemp].lvSingleDef = 1;
6067 JITDUMP("Marking V%02u as a single def local\n", impBoxTemp);
6068 const bool isExact = true;
6069 lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact);
6072 // needs to stay in use until this box expression is appended
6073 // some other node. We approximate this by keeping it alive until
6074 // the opcode stack becomes empty
6075 impBoxTempInUse = true;
6077 #ifdef FEATURE_READYTORUN_COMPILER
6078 bool usingReadyToRunHelper = false;
6080 if (opts.IsReadyToRun())
6082 op1 = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
6083 usingReadyToRunHelper = (op1 != nullptr);
6086 if (!usingReadyToRunHelper)
6089 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
6090 // and the newfast call with a single call to a dynamic R2R cell that will:
6091 // 1) Load the context
6092 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
6093 // 3) Allocate and return the new object for boxing
6094 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
6096 // Ensure that the value class is restored
6097 op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
6100 // We must be backing out of an inline.
6101 assert(compDonotInline());
6105 op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd),
6106 pResolvedToken->hClass, TYP_REF, op2);
6109 /* Remember that this basic block contains 'new' of an object, and so does this method */
6110 compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
6111 optMethodFlags |= OMF_HAS_NEWOBJ;
6113 GenTree* asg = gtNewTempAssign(impBoxTemp, op1);
6115 GenTree* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6117 op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
6118 op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
6119 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
6121 if (varTypeIsStruct(exprToBox))
6123 assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
6124 op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
6128 var_types lclTyp = exprToBox->TypeGet();
6129 if (lclTyp == TYP_BYREF)
6131 lclTyp = TYP_I_IMPL;
6133 CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
6134 if (impIsPrimitive(jitType))
6136 lclTyp = JITtype2varType(jitType);
6138 assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
6139 varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
6140 var_types srcTyp = exprToBox->TypeGet();
6141 var_types dstTyp = lclTyp;
6143 if (srcTyp != dstTyp)
6145 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
6146 (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
6147 exprToBox = gtNewCastNode(dstTyp, exprToBox, false, dstTyp);
6149 op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
6152 // Spill eval stack to flush out any pending side effects.
6153 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox"));
6155 // Set up this copy as a second assignment.
6156 GenTree* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6158 op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
6160 // Record that this is a "box" node and keep track of the matching parts.
6161 op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt);
6163 // If it is a value class, mark the "box" node. We can use this information
6164 // to optimise several cases:
6165 // "box(x) == null" --> false
6166 // "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
6167 // "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
6169 op1->gtFlags |= GTF_BOX_VALUE;
6170 assert(op1->IsBoxedValue());
6171 assert(asg->gtOper == GT_ASG);
6175 // Don't optimize, just call the helper and be done with it.
6176 JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable");
6177 assert(operCls != nullptr);
6179 // Ensure that the value class is restored
6180 op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
6183 // We must be backing out of an inline.
6184 assert(compDonotInline());
6188 GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
6189 op1 = gtNewHelperCallNode(boxHelper, TYP_REF, args);
6192 /* Push the result back on the stack, */
6193 /* even if clsHnd is a value class we want the TI_REF */
6194 typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
6195 impPushOnStack(op1, tiRetVal);
6198 //------------------------------------------------------------------------
6199 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
6202 // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
6203 // by a call to CEEInfo::resolveToken().
6204 // pCallInfo - The CORINFO_CALL_INFO that has been initialized
6205 // by a call to CEEInfo::getCallInfo().
6208 // The multi-dimensional array constructor arguments (array dimensions) are
6209 // pushed on the IL stack on entry to this method.
6212 // Multi-dimensional array constructors are imported as calls to a JIT
6213 // helper, not as regular calls.
6215 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
6217 GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken);
6218 if (classHandle == nullptr)
6219 { // compDonotInline()
6223 assert(pCallInfo->sig.numArgs);
6226 GenTreeArgList* args;
6229 // There are two different JIT helpers that can be used to allocate
6230 // multi-dimensional arrays:
6232 // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
6233 // This variant is deprecated. It should be eventually removed.
6235 // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
6236 // pointer to block of int32s. This variant is more portable.
6238 // The non-varargs helper is enabled for CoreRT only for now. Enabling this
6239 // unconditionally would require ReadyToRun version bump.
6241 CLANG_FORMAT_COMMENT_ANCHOR;
6243 if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
6246 // Reuse the temp used to pass the array dimensions to avoid bloating
6247 // the stack frame in case there are multiple calls to multi-dim array
6248 // constructors within a single method.
6249 if (lvaNewObjArrayArgs == BAD_VAR_NUM)
6251 lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
6252 lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK;
6253 lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
6256 // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
6257 // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
6258 lvaTable[lvaNewObjArrayArgs].lvExactSize =
6259 max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
6261 // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
6262 // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
6263 // to one allocation at a time.
6264 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
6267 // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
6268 // - Array class handle
6269 // - Number of dimension arguments
6270 // - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp.
6273 node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
6274 node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
6276 // Pop dimension arguments from the stack one at a time and store it
6277 // into lvaNewObjArrayArgs temp.
6278 for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
6280 GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
6282 GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
6283 dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
6284 dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
6285 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
6286 dest = gtNewOperNode(GT_IND, TYP_INT, dest);
6288 node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
6291 args = gtNewArgList(node);
6293 // pass number of arguments to the helper
6294 args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
6296 args = gtNewListNode(classHandle, args);
6298 node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, args);
6303 // The varargs helper needs the type and method handles as last
6304 // and last-1 param (this is a cdecl call, so args will be
6305 // pushed in reverse order on the CPU stack)
6308 args = gtNewArgList(classHandle);
6310 // pass number of arguments to the helper
6311 args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
6313 unsigned argFlags = 0;
6314 args = impPopList(pCallInfo->sig.numArgs, &pCallInfo->sig, args);
6316 node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args);
6318 // varargs, so we pop the arguments
6319 node->gtFlags |= GTF_CALL_POP_ARGS;
6322 // At the present time we don't track Caller pop arguments
6323 // that have GC references in them
6324 for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
6326 assert(temp->Current()->gtType != TYP_REF);
6331 node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
6332 node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
6334 // Remember that this basic block contains 'new' of a md array
6335 compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
6337 impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
6340 GenTree* Compiler::impTransformThis(GenTree* thisPtr,
6341 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6342 CORINFO_THIS_TRANSFORM transform)
6346 case CORINFO_DEREF_THIS:
6348 GenTree* obj = thisPtr;
6350 // This does a LDIND on the obj, which should be a byref. pointing to a ref
6351 impBashVarAddrsToI(obj);
6352 assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
6353 CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
6355 obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
6356 // ldind could point anywhere, example a boxed class static int
6357 obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
6362 case CORINFO_BOX_THIS:
6364 // Constraint calls where there might be no
6365 // unboxed entry point require us to implement the call via helper.
6366 // These only occur when a possible target of the call
6367 // may have inherited an implementation of an interface
6368 // method from System.Object or System.ValueType. The EE does not provide us with
6369 // "unboxed" versions of these methods.
6371 GenTree* obj = thisPtr;
6373 assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
6374 obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
6375 obj->gtFlags |= GTF_EXCEPT;
6377 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
6378 var_types objType = JITtype2varType(jitTyp);
6379 if (impIsPrimitive(jitTyp))
6381 if (obj->OperIsBlk())
6383 obj->ChangeOperUnchecked(GT_IND);
6385 // Obj could point anywhere, example a boxed class static int
6386 obj->gtFlags |= GTF_IND_TGTANYWHERE;
6387 obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
6390 obj->gtType = JITtype2varType(jitTyp);
6391 assert(varTypeIsArithmetic(obj->gtType));
6394 // This pushes on the dereferenced byref
6395 // This is then used immediately to box.
6396 impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
6398 // This pops off the byref-to-a-value-type remaining on the stack and
6399 // replaces it with a boxed object.
6400 // This is then used as the object to the virtual call immediately below.
6401 impImportAndPushBox(pConstrainedResolvedToken);
6402 if (compDonotInline())
6407 obj = impPopStack().val;
6410 case CORINFO_NO_THIS_TRANSFORM:
6416 //------------------------------------------------------------------------
6417 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
6420 // true if PInvoke inlining should be enabled in current method, false otherwise
6423 // Checks a number of ambient conditions where we could pinvoke but choose not to
6425 bool Compiler::impCanPInvokeInline()
6427 return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
6428 (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
6432 //------------------------------------------------------------------------
6433 // impCanPInvokeInlineCallSite: basic legality checks using information
6434 // from a call to see if the call qualifies as an inline pinvoke.
6437 // block - block contaning the call, or for inlinees, block
6438 // containing the call being inlined
6441 // true if this call can legally qualify as an inline pinvoke, false otherwise
6444 // For runtimes that support exception handling interop there are
6445 // restrictions on using inline pinvoke in handler regions.
6447 // * We have to disable pinvoke inlining inside of filters because
6448 // in case the main execution (i.e. in the try block) is inside
6449 // unmanaged code, we cannot reuse the inlined stub (we still need
6450 // the original state until we are in the catch handler)
6452 // * We disable pinvoke inlining inside handlers since the GSCookie
6453 // is in the inlined Frame (see
6454 // CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
6455 // this would not protect framelets/return-address of handlers.
6457 // These restrictions are currently also in place for CoreCLR but
6458 // can be relaxed when coreclr/#8459 is addressed.
6460 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
6462 if (block->hasHndIndex())
6467 // The remaining limitations do not apply to CoreRT
6468 if (IsTargetAbi(CORINFO_CORERT_ABI))
6473 #ifdef _TARGET_AMD64_
6474 // On x64, we disable pinvoke inlining inside of try regions.
6475 // Here is the comment from JIT64 explaining why:
6477 // [VSWhidbey: 611015] - because the jitted code links in the
6478 // Frame (instead of the stub) we rely on the Frame not being
6479 // 'active' until inside the stub. This normally happens by the
6480 // stub setting the return address pointer in the Frame object
6481 // inside the stub. On a normal return, the return address
6482 // pointer is zeroed out so the Frame can be safely re-used, but
6483 // if an exception occurs, nobody zeros out the return address
6484 // pointer. Thus if we re-used the Frame object, it would go
6485 // 'active' as soon as we link it into the Frame chain.
6487 // Technically we only need to disable PInvoke inlining if we're
6488 // in a handler or if we're in a try body with a catch or
6489 // filter/except where other non-handler code in this method
6490 // might run and try to re-use the dirty Frame object.
6492 // A desktop test case where this seems to matter is
6493 // jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
6494 if (block->hasTryIndex())
6498 #endif // _TARGET_AMD64_
6503 //------------------------------------------------------------------------
6504 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
6505 // if it can be expressed as an inline pinvoke.
6508 // call - tree for the call
6509 // methHnd - handle for the method being called (may be null)
6510 // sig - signature of the method being called
6511 // mflags - method flags for the method being called
6512 // block - block contaning the call, or for inlinees, block
6513 // containing the call being inlined
6516 // Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
6518 // Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
6519 // call passes a combination of legality and profitabilty checks.
6521 // If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
6523 void Compiler::impCheckForPInvokeCall(
6524 GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
6526 CorInfoUnmanagedCallConv unmanagedCallConv;
6528 // If VM flagged it as Pinvoke, flag the call node accordingly
6529 if ((mflags & CORINFO_FLG_PINVOKE) != 0)
6531 call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
6536 if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
6541 unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
6545 CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
6546 if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
6548 // Used by the IL Stubs.
6549 callConv = CORINFO_CALLCONV_C;
6551 static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
6552 static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
6553 static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
6554 unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
6556 assert(!call->gtCallCookie);
6559 if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
6560 unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
6564 optNativeCallCount++;
6566 if (methHnd == nullptr && (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) || IsTargetAbi(CORINFO_CORERT_ABI)))
6568 // PInvoke in CoreRT ABI must be always inlined. Non-inlineable CALLI cases have been
6569 // converted to regular method calls earlier using convertPInvokeCalliToCall.
6571 // PInvoke CALLI in IL stubs must be inlined
6576 if (!impCanPInvokeInlineCallSite(block))
6581 // Legal PInvoke CALL in PInvoke IL stubs must be inlined to avoid infinite recursive
6582 // inlining in CoreRT. Skip the ambient conditions checks and profitability checks.
6583 if (!IsTargetAbi(CORINFO_CORERT_ABI) || (info.compFlags & CORINFO_FLG_PINVOKE) == 0)
6585 if (!impCanPInvokeInline())
6590 // Size-speed tradeoff: don't use inline pinvoke at rarely
6591 // executed call sites. The non-inline version is more
6593 if (block->isRunRarely())
6599 // The expensive check should be last
6600 if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
6606 JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
6608 call->gtFlags |= GTF_CALL_UNMANAGED;
6609 info.compCallUnmanaged++;
6611 // AMD64 convention is same for native and managed
6612 if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
6614 call->gtFlags |= GTF_CALL_POP_ARGS;
6617 if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
6619 call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
6623 GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
6625 var_types callRetTyp = JITtype2varType(sig->retType);
6627 /* The function pointer is on top of the stack - It may be a
6628 * complex expression. As it is evaluated after the args,
6629 * it may cause registered args to be spilled. Simply spill it.
6632 // Ignore this trivial case.
6633 if (impStackTop().val->gtOper != GT_LCL_VAR)
6635 impSpillStackEntry(verCurrentState.esStackDepth - 1,
6636 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
6639 /* Get the function pointer */
6641 GenTree* fptr = impPopStack().val;
6643 // The function pointer is typically a sized to match the target pointer size
6644 // However, stubgen IL optimization can change LDC.I8 to LDC.I4
6645 // See ILCodeStream::LowerOpcode
6646 assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT);
6649 // This temporary must never be converted to a double in stress mode,
6650 // because that can introduce a call to the cast helper after the
6651 // arguments have already been evaluated.
6653 if (fptr->OperGet() == GT_LCL_VAR)
6655 lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
6659 /* Create the call node */
6661 GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6663 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6668 /*****************************************************************************/
6670 void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig)
6672 assert(call->gtFlags & GTF_CALL_UNMANAGED);
6674 /* Since we push the arguments in reverse order (i.e. right -> left)
6675 * spill any side effects from the stack
6677 * OBS: If there is only one side effect we do not need to spill it
6678 * thus we have to spill all side-effects except last one
6681 unsigned lastLevelWithSideEffects = UINT_MAX;
6683 unsigned argsToReverse = sig->numArgs;
6685 // For "thiscall", the first argument goes in a register. Since its
6686 // order does not need to be changed, we do not need to spill it
6688 if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6690 assert(argsToReverse);
6694 #ifndef _TARGET_X86_
6695 // Don't reverse args on ARM or x64 - first four args always placed in regs in order
6699 for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
6701 if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
6703 assert(lastLevelWithSideEffects == UINT_MAX);
6705 impSpillStackEntry(level,
6706 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
6708 else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
6710 if (lastLevelWithSideEffects != UINT_MAX)
6712 /* We had a previous side effect - must spill it */
6713 impSpillStackEntry(lastLevelWithSideEffects,
6714 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
6716 /* Record the level for the current side effect in case we will spill it */
6717 lastLevelWithSideEffects = level;
6721 /* This is the first side effect encountered - record its level */
6723 lastLevelWithSideEffects = level;
6728 /* The argument list is now "clean" - no out-of-order side effects
6729 * Pop the argument list in reverse order */
6731 GenTree* args = call->gtCall.gtCallArgs = impPopRevList(sig->numArgs, sig, sig->numArgs - argsToReverse);
6733 if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6735 GenTree* thisPtr = args->Current();
6736 impBashVarAddrsToI(thisPtr);
6737 assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
6742 call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
6746 //------------------------------------------------------------------------
6747 // impInitClass: Build a node to initialize the class before accessing the
6748 // field if necessary
6751 // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
6752 // by a call to CEEInfo::resolveToken().
6754 // Return Value: If needed, a pointer to the node that will perform the class
6755 // initializtion. Otherwise, nullptr.
6758 GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
6760 CorInfoInitClassResult initClassResult =
6761 info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
6763 if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
6769 GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
6771 if (node == nullptr)
6773 assert(compDonotInline());
6779 node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewArgList(node));
6783 // Call the shared non gc static helper, as its the fastest
6784 node = fgGetSharedCCtor(pResolvedToken->hClass);
6790 GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
6792 GenTree* op1 = nullptr;
6801 ival = *((bool*)fldAddr);
6805 ival = *((signed char*)fldAddr);
6809 ival = *((unsigned char*)fldAddr);
6813 ival = *((short*)fldAddr);
6817 ival = *((unsigned short*)fldAddr);
6822 ival = *((int*)fldAddr);
6824 op1 = gtNewIconNode(ival);
6829 lval = *((__int64*)fldAddr);
6830 op1 = gtNewLconNode(lval);
6834 dval = *((float*)fldAddr);
6835 op1 = gtNewDconNode(dval);
6836 op1->gtType = TYP_FLOAT;
6840 dval = *((double*)fldAddr);
6841 op1 = gtNewDconNode(dval);
6845 assert(!"Unexpected lclTyp");
6852 GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
6853 CORINFO_ACCESS_FLAGS access,
6854 CORINFO_FIELD_INFO* pFieldInfo,
6859 switch (pFieldInfo->fieldAccessor)
6861 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
6863 assert(!compIsForInlining());
6865 // We first call a special helper to get the statics base pointer
6866 op1 = impParentClassTokenToHandle(pResolvedToken);
6868 // compIsForInlining() is false so we should not neve get NULL here
6869 assert(op1 != nullptr);
6871 var_types type = TYP_BYREF;
6873 switch (pFieldInfo->helper)
6875 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
6878 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
6879 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
6880 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
6883 assert(!"unknown generic statics helper");
6887 op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewArgList(op1));
6889 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6890 op1 = gtNewOperNode(GT_ADD, type, op1,
6891 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6895 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
6897 #ifdef FEATURE_READYTORUN_COMPILER
6898 if (opts.IsReadyToRun())
6900 unsigned callFlags = 0;
6902 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6904 callFlags |= GTF_CALL_HOISTABLE;
6907 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF);
6908 op1->gtFlags |= callFlags;
6910 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6915 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
6919 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6920 op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
6921 new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
6926 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
6928 #ifdef FEATURE_READYTORUN_COMPILER
6929 noway_assert(opts.IsReadyToRun());
6930 CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
6931 assert(kind.needsRuntimeLookup);
6933 GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
6934 GenTreeArgList* args = gtNewArgList(ctxTree);
6936 unsigned callFlags = 0;
6938 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6940 callFlags |= GTF_CALL_HOISTABLE;
6942 var_types type = TYP_BYREF;
6943 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args);
6944 op1->gtFlags |= callFlags;
6946 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6947 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6948 op1 = gtNewOperNode(GT_ADD, type, op1,
6949 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6952 #endif // FEATURE_READYTORUN_COMPILER
6958 if (!(access & CORINFO_ACCESS_ADDRESS))
6960 // In future, it may be better to just create the right tree here instead of folding it later.
6961 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
6963 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6965 op1->gtFlags |= GTF_FLD_INITCLASS;
6968 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6970 op1->gtType = TYP_REF; // points at boxed object
6971 FieldSeqNode* firstElemFldSeq =
6972 GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6973 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6974 new (this, GT_CNS_INT)
6975 GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, firstElemFldSeq));
6977 if (varTypeIsStruct(lclTyp))
6979 // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT.
6980 op1 = gtNewObjNode(pFieldInfo->structType, op1);
6984 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6985 op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
6993 void** pFldAddr = nullptr;
6994 void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
6996 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6998 /* Create the data member node */
6999 op1 = gtNewIconHandleNode(pFldAddr == nullptr ? (size_t)fldAddr : (size_t)pFldAddr, GTF_ICON_STATIC_HDL,
7002 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
7004 op1->gtFlags |= GTF_ICON_INITCLASS;
7007 if (pFldAddr != nullptr)
7009 // There are two cases here, either the static is RVA based,
7010 // in which case the type of the FIELD node is not a GC type
7011 // and the handle to the RVA is a TYP_I_IMPL. Or the FIELD node is
7012 // a GC type and the handle to it is a TYP_BYREF in the GC heap
7013 // because handles to statics now go into the large object heap
7015 var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
7016 op1 = gtNewOperNode(GT_IND, handleTyp, op1);
7017 op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
7024 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
7026 op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
7028 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
7030 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
7031 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, fldSeq));
7034 if (!(access & CORINFO_ACCESS_ADDRESS))
7036 if (varTypeIsStruct(lclTyp))
7038 // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT.
7039 op1 = gtNewObjNode(pFieldInfo->structType, op1);
7043 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
7044 op1->gtFlags |= GTF_GLOB_REF;
7051 // In general try to call this before most of the verification work. Most people expect the access
7052 // exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns
7053 // out if you can't access something we also think that you're unverifiable for other reasons.
7054 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
7056 if (result != CORINFO_ACCESS_ALLOWED)
7058 impHandleAccessAllowedInternal(result, helperCall);
7062 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
7066 case CORINFO_ACCESS_ALLOWED:
7068 case CORINFO_ACCESS_ILLEGAL:
7069 // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
7070 // method is verifiable. Otherwise, delay the exception to runtime.
7071 if (compIsForImportOnly())
7073 info.compCompHnd->ThrowExceptionForHelper(helperCall);
7077 impInsertHelperCall(helperCall);
7080 case CORINFO_ACCESS_RUNTIME_CHECK:
7081 impInsertHelperCall(helperCall);
7086 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
7088 // Construct the argument list
7089 GenTreeArgList* args = nullptr;
7090 assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
7091 for (unsigned i = helperInfo->numArgs; i > 0; --i)
7093 const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1];
7094 GenTree* currentArg = nullptr;
7095 switch (helperArg.argType)
7097 case CORINFO_HELPER_ARG_TYPE_Field:
7098 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
7099 info.compCompHnd->getFieldClass(helperArg.fieldHandle));
7100 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
7102 case CORINFO_HELPER_ARG_TYPE_Method:
7103 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
7104 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
7106 case CORINFO_HELPER_ARG_TYPE_Class:
7107 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
7108 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
7110 case CORINFO_HELPER_ARG_TYPE_Module:
7111 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
7113 case CORINFO_HELPER_ARG_TYPE_Const:
7114 currentArg = gtNewIconNode(helperArg.constant);
7117 NO_WAY("Illegal helper arg type");
7119 args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
7123 * Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee.
7124 * Also, consider sticking this in the first basic block.
7126 GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args);
7127 impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
7130 // Checks whether the return types of caller and callee are compatible
7131 // so that callee can be tail called. Note that here we don't check
7132 // compatibility in IL Verifier sense, but on the lines of return type
7133 // sizes are equal and get returned in the same return register.
7134 bool Compiler::impTailCallRetTypeCompatible(var_types callerRetType,
7135 CORINFO_CLASS_HANDLE callerRetTypeClass,
7136 var_types calleeRetType,
7137 CORINFO_CLASS_HANDLE calleeRetTypeClass)
7139 // Note that we can not relax this condition with genActualType() as the
7140 // calling convention dictates that the caller of a function with a small
7141 // typed return value is responsible for normalizing the return val.
7142 if (callerRetType == calleeRetType)
7147 // If the class handles are the same and not null, the return types are compatible.
7148 if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass))
7153 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
7155 if (callerRetType == TYP_VOID)
7157 // This needs to be allowed to support the following IL pattern that Jit64 allows:
7162 // Note that the above IL pattern is not valid as per IL verification rules.
7163 // Therefore, only full trust code can take advantage of this pattern.
7167 // These checks return true if the return value type sizes are the same and
7168 // get returned in the same return register i.e. caller doesn't need to normalize
7169 // return value. Some of the tail calls permitted by below checks would have
7170 // been rejected by IL Verifier before we reached here. Therefore, only full
7171 // trust code can make those tail calls.
7172 unsigned callerRetTypeSize = 0;
7173 unsigned calleeRetTypeSize = 0;
7174 bool isCallerRetTypMBEnreg =
7175 VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true, info.compIsVarArgs);
7176 bool isCalleeRetTypMBEnreg =
7177 VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true, info.compIsVarArgs);
7179 if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
7181 return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
7183 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
7191 PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
7192 PREFIX_TAILCALL_IMPLICIT =
7193 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
7194 PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
7195 PREFIX_VOLATILE = 0x00000100,
7196 PREFIX_UNALIGNED = 0x00001000,
7197 PREFIX_CONSTRAINED = 0x00010000,
7198 PREFIX_READONLY = 0x00100000
7201 /********************************************************************************
7203 * Returns true if the current opcode and and the opcodes following it correspond
7204 * to a supported tail call IL pattern.
7207 bool Compiler::impIsTailCallILPattern(bool tailPrefixed,
7209 const BYTE* codeAddrOfNextOpcode,
7210 const BYTE* codeEnd,
7212 bool* isCallPopAndRet /* = nullptr */)
7214 // Bail out if the current opcode is not a call.
7215 if (!impOpcodeIsCallOpcode(curOpcode))
7220 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
7221 // If shared ret tail opt is not enabled, we will enable
7222 // it for recursive methods.
7226 // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
7227 // sequence. Make sure we don't go past the end of the IL however.
7228 codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
7231 // Bail out if there is no next opcode after call
7232 if (codeAddrOfNextOpcode >= codeEnd)
7237 // Scan the opcodes to look for the following IL patterns if either
7238 // i) the call is not tail prefixed (i.e. implicit tail call) or
7239 // ii) if tail prefixed, IL verification is not needed for the method.
7241 // Only in the above two cases we can allow the below tail call patterns
7242 // violating ECMA spec.
7258 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
7261 nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
7262 codeAddrOfNextOpcode += sizeof(__int8);
7263 } while ((codeAddrOfNextOpcode < codeEnd) && // Haven't reached end of method
7264 (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
7265 ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
7266 // one pop seen so far.
7268 nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
7269 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
7271 if (isCallPopAndRet)
7273 // Allow call+pop+ret to be tail call optimized if caller ret type is void
7274 *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
7277 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
7279 // Tail call IL pattern could be either of the following
7280 // 1) call/callvirt/calli + ret
7281 // 2) call/callvirt/calli + pop + ret in a method returning void.
7282 return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
7284 return (nextOpcode == CEE_RET) && (cntPop == 0);
7285 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
7288 /*****************************************************************************
7290 * Determine whether the call could be converted to an implicit tail call
7293 bool Compiler::impIsImplicitTailCallCandidate(
7294 OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
7297 #if FEATURE_TAILCALL_OPT
7298 if (!opts.compTailCallOpt)
7303 if (opts.compDbgCode || opts.MinOpts())
7308 // must not be tail prefixed
7309 if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
7314 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
7315 // the block containing call is marked as BBJ_RETURN
7316 // We allow shared ret tail call optimization on recursive calls even under
7317 // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
7318 if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
7320 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
7322 // must be call+ret or call+pop+ret
7323 if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
7331 #endif // FEATURE_TAILCALL_OPT
7334 //------------------------------------------------------------------------
7335 // impImportCall: import a call-inspiring opcode
7338 // opcode - opcode that inspires the call
7339 // pResolvedToken - resolved token for the call target
7340 // pConstrainedResolvedToken - resolved constraint token (or nullptr)
7341 // newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr)
7342 // prefixFlags - IL prefix flags for the call
7343 // callInfo - EE supplied info for the call
7344 // rawILOffset - IL offset of the opcode
7347 // Type of the call's return value.
7348 // If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF.
7349 // However we can't assert for this here yet because there are cases we miss. See issue #13272.
7353 // opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
7355 // For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
7356 // uninitalized object.
7359 #pragma warning(push)
7360 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
7363 var_types Compiler::impImportCall(OPCODE opcode,
7364 CORINFO_RESOLVED_TOKEN* pResolvedToken,
7365 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
7366 GenTree* newobjThis,
7368 CORINFO_CALL_INFO* callInfo,
7369 IL_OFFSET rawILOffset)
7371 assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
7373 IL_OFFSETX ilOffset = impCurILOffset(rawILOffset, true);
7374 var_types callRetTyp = TYP_COUNT;
7375 CORINFO_SIG_INFO* sig = nullptr;
7376 CORINFO_METHOD_HANDLE methHnd = nullptr;
7377 CORINFO_CLASS_HANDLE clsHnd = nullptr;
7378 unsigned clsFlags = 0;
7379 unsigned mflags = 0;
7380 unsigned argFlags = 0;
7381 GenTree* call = nullptr;
7382 GenTreeArgList* args = nullptr;
7383 CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM;
7384 CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr;
7385 bool exactContextNeedsRuntimeLookup = false;
7386 bool canTailCall = true;
7387 const char* szCanTailCallFailReason = nullptr;
7388 int tailCall = prefixFlags & PREFIX_TAILCALL;
7389 bool readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
7391 CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr;
7393 // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
7394 // do that before tailcalls, but that is probably not the intended
7395 // semantic. So just disallow tailcalls from synchronized methods.
7396 // Also, popping arguments in a varargs function is more work and NYI
7397 // If we have a security object, we have to keep our frame around for callers
7398 // to see any imperative security.
7399 if (info.compFlags & CORINFO_FLG_SYNCH)
7401 canTailCall = false;
7402 szCanTailCallFailReason = "Caller is synchronized";
7404 #if !FEATURE_FIXED_OUT_ARGS
7405 else if (info.compIsVarArgs)
7407 canTailCall = false;
7408 szCanTailCallFailReason = "Caller is varargs";
7410 #endif // FEATURE_FIXED_OUT_ARGS
7411 else if (opts.compNeedSecurityCheck)
7413 canTailCall = false;
7414 szCanTailCallFailReason = "Caller requires a security check.";
7417 // We only need to cast the return value of pinvoke inlined calls that return small types
7419 // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
7420 // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
7421 // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
7422 // the time being that the callee might be compiled by the other JIT and thus the return
7423 // value will need to be widened by us (or not widened at all...)
7425 // ReadyToRun code sticks with default calling convention that does not widen small return types.
7427 bool checkForSmallType = opts.IsJit64Compat() || opts.IsReadyToRun();
7428 bool bIntrinsicImported = false;
7430 CORINFO_SIG_INFO calliSig;
7431 GenTreeArgList* extraArg = nullptr;
7433 /*-------------------------------------------------------------------------
7434 * First create the call node
7437 if (opcode == CEE_CALLI)
7439 if (IsTargetAbi(CORINFO_CORERT_ABI))
7441 // See comment in impCheckForPInvokeCall
7442 BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7443 if (info.compCompHnd->convertPInvokeCalliToCall(pResolvedToken, !impCanPInvokeInlineCallSite(block)))
7445 eeGetCallInfo(pResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, callInfo);
7446 return impImportCall(CEE_CALL, pResolvedToken, nullptr, nullptr, prefixFlags, callInfo, rawILOffset);
7450 /* Get the call site sig */
7451 eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &calliSig);
7453 callRetTyp = JITtype2varType(calliSig.retType);
7455 call = impImportIndirectCall(&calliSig, ilOffset);
7457 // We don't know the target method, so we have to infer the flags, or
7458 // assume the worst-case.
7459 mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
7464 unsigned structSize =
7465 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
7466 printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7467 opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7470 // This should be checked in impImportBlockCode.
7471 assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
7476 // We cannot lazily obtain the signature of a CALLI call because it has no method
7477 // handle that we can use, so we need to save its full call signature here.
7478 assert(call->gtCall.callSig == nullptr);
7479 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7480 *call->gtCall.callSig = calliSig;
7483 if (IsTargetAbi(CORINFO_CORERT_ABI))
7485 bool managedCall = (((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_STDCALL) &&
7486 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_C) &&
7487 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_THISCALL) &&
7488 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_FASTCALL));
7491 addFatPointerCandidate(call->AsCall());
7495 else // (opcode != CEE_CALLI)
7497 CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
7499 // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
7500 // supply the instantiation parameters necessary to make direct calls to underlying
7501 // shared generic code, rather than calling through instantiating stubs. If the
7502 // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
7503 // must indeed pass an instantiation parameter.
7505 methHnd = callInfo->hMethod;
7507 sig = &(callInfo->sig);
7508 callRetTyp = JITtype2varType(sig->retType);
7510 mflags = callInfo->methodFlags;
7515 unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
7516 printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7517 opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7520 if (compIsForInlining())
7522 /* Does this call site have security boundary restrictions? */
7524 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
7526 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
7530 /* Does the inlinee need a security check token on the frame */
7532 if (mflags & CORINFO_FLG_SECURITYCHECK)
7534 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7538 /* Does the inlinee use StackCrawlMark */
7540 if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
7542 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
7546 /* For now ignore delegate invoke */
7548 if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7550 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
7554 /* For now ignore varargs */
7555 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7557 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
7561 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7563 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
7567 if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
7569 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
7574 clsHnd = pResolvedToken->hClass;
7576 clsFlags = callInfo->classFlags;
7579 // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
7581 // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
7582 // These should be in mscorlib.h, and available through a JIT/EE interface call.
7583 const char* modName;
7584 const char* className;
7585 const char* methodName;
7586 if ((className = eeGetClassName(clsHnd)) != nullptr &&
7587 strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
7588 (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
7590 return impImportJitTestLabelMark(sig->numArgs);
7594 // <NICE> Factor this into getCallInfo </NICE>
7595 bool isSpecialIntrinsic = false;
7596 if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0)
7598 const bool isTail = canTailCall && (tailCall != 0);
7600 call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, readonlyCall, isTail,
7601 pConstrainedResolvedToken, callInfo->thisTransform, &intrinsicID, &isSpecialIntrinsic);
7603 if (compDonotInline())
7608 if (call != nullptr)
7610 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
7611 (clsFlags & CORINFO_FLG_FINAL));
7613 #ifdef FEATURE_READYTORUN_COMPILER
7614 if (call->OperGet() == GT_INTRINSIC)
7616 if (opts.IsReadyToRun())
7618 noway_assert(callInfo->kind == CORINFO_CALL);
7619 call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
7623 call->gtIntrinsic.gtEntryPoint.addr = nullptr;
7624 call->gtIntrinsic.gtEntryPoint.accessType = IAT_VALUE;
7629 bIntrinsicImported = true;
7637 call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
7638 if (call != nullptr)
7640 bIntrinsicImported = true;
7644 #endif // FEATURE_SIMD
7646 if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
7648 NO_WAY("Virtual call to a function added via EnC is not supported");
7651 if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
7652 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7653 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
7655 BADCODE("Bad calling convention");
7658 //-------------------------------------------------------------------------
7659 // Construct the call node
7661 // Work out what sort of call we're making.
7662 // Dispense with virtual calls implemented via LDVIRTFTN immediately.
7664 constraintCallThisTransform = callInfo->thisTransform;
7665 exactContextHnd = callInfo->contextHandle;
7666 exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup == TRUE;
7668 // Recursive call is treated as a loop to the begining of the method.
7669 if (gtIsRecursiveCall(methHnd))
7674 JITDUMP("\nFound recursive call in the method. Mark " FMT_BB " to " FMT_BB
7675 " as having a backward branch.\n",
7676 fgFirstBB->bbNum, compCurBB->bbNum);
7679 fgMarkBackwardJump(fgFirstBB, compCurBB);
7682 switch (callInfo->kind)
7685 case CORINFO_VIRTUALCALL_STUB:
7687 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7688 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7689 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
7692 if (compIsForInlining())
7694 // Don't import runtime lookups when inlining
7695 // Inlining has to be aborted in such a case
7696 /* XXX Fri 3/20/2009
7697 * By the way, this would never succeed. If the handle lookup is into the generic
7698 * dictionary for a candidate, you'll generate different dictionary offsets and the
7699 * inlined code will crash.
7701 * To anyone code reviewing this, when could this ever succeed in the future? It'll
7702 * always have a handle lookup. These lookups are safe intra-module, but we're just
7705 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
7709 GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
7710 assert(!compDonotInline());
7712 // This is the rough code to set up an indirect stub call
7713 assert(stubAddr != nullptr);
7715 // The stubAddr may be a
7716 // complex expression. As it is evaluated after the args,
7717 // it may cause registered args to be spilled. Simply spill it.
7719 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
7720 impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
7721 stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7723 // Create the actual call node
7725 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7726 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7728 call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
7730 call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
7731 call->gtFlags |= GTF_CALL_VIRT_STUB;
7734 // No tailcalls allowed for these yet...
7735 canTailCall = false;
7736 szCanTailCallFailReason = "VirtualCall with runtime lookup";
7741 // ok, the stub is available at compile type.
7743 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7744 call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
7745 call->gtFlags |= GTF_CALL_VIRT_STUB;
7746 assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE &&
7747 callInfo->stubLookup.constLookup.accessType != IAT_RELPVALUE);
7748 if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
7750 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
7754 #ifdef FEATURE_READYTORUN_COMPILER
7755 if (opts.IsReadyToRun())
7757 // Null check is sometimes needed for ready to run to handle
7758 // non-virtual <-> virtual changes between versions
7759 if (callInfo->nullInstanceCheck)
7761 call->gtFlags |= GTF_CALL_NULLCHECK;
7769 case CORINFO_VIRTUALCALL_VTABLE:
7771 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7772 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7773 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7774 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
7778 case CORINFO_VIRTUALCALL_LDVIRTFTN:
7780 if (compIsForInlining())
7782 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
7786 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7787 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7788 // OK, We've been told to call via LDVIRTFTN, so just
7789 // take the call now....
7791 args = impPopList(sig->numArgs, sig);
7793 GenTree* thisPtr = impPopStack().val;
7794 thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
7795 assert(thisPtr != nullptr);
7797 // Clone the (possibly transformed) "this" pointer
7798 GenTree* thisPtrCopy;
7799 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
7800 nullptr DEBUGARG("LDVIRTFTN this pointer"));
7802 GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
7803 assert(fptr != nullptr);
7805 thisPtr = nullptr; // can't reuse it
7807 // Now make an indirect call through the function pointer
7809 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
7810 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7811 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7813 // Create the actual call node
7815 call = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
7816 call->gtCall.gtCallObjp = thisPtrCopy;
7817 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7819 if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
7821 // CoreRT generic virtual method: need to handle potential fat function pointers
7822 addFatPointerCandidate(call->AsCall());
7824 #ifdef FEATURE_READYTORUN_COMPILER
7825 if (opts.IsReadyToRun())
7827 // Null check is needed for ready to run to handle
7828 // non-virtual <-> virtual changes between versions
7829 call->gtFlags |= GTF_CALL_NULLCHECK;
7833 // Sine we are jumping over some code, check that its OK to skip that code
7834 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7835 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7841 // This is for a non-virtual, non-interface etc. call
7842 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7844 // We remove the nullcheck for the GetType call instrinsic.
7845 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
7847 if (callInfo->nullInstanceCheck &&
7848 !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
7850 call->gtFlags |= GTF_CALL_NULLCHECK;
7853 #ifdef FEATURE_READYTORUN_COMPILER
7854 if (opts.IsReadyToRun())
7856 call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
7862 case CORINFO_CALL_CODE_POINTER:
7864 // The EE has asked us to call by computing a code pointer and then doing an
7865 // indirect call. This is because a runtime lookup is required to get the code entry point.
7867 // These calls always follow a uniform calling convention, i.e. no extra hidden params
7868 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
7870 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
7871 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7874 impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
7876 if (compDonotInline())
7881 // Now make an indirect call through the function pointer
7883 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
7884 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7885 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7887 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
7888 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7889 if (callInfo->nullInstanceCheck)
7891 call->gtFlags |= GTF_CALL_NULLCHECK;
7898 assert(!"unknown call kind");
7902 //-------------------------------------------------------------------------
7905 PREFIX_ASSUME(call != nullptr);
7907 if (mflags & CORINFO_FLG_NOGCCHECK)
7909 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
7912 // Mark call if it's one of the ones we will maybe treat as an intrinsic
7913 if (isSpecialIntrinsic)
7915 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
7919 assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
7921 /* Some sanity checks */
7923 // CALL_VIRT and NEWOBJ must have a THIS pointer
7924 assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
7925 // static bit and hasThis are negations of one another
7926 assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
7927 assert(call != nullptr);
7929 /*-------------------------------------------------------------------------
7930 * Check special-cases etc
7933 /* Special case - Check if it is a call to Delegate.Invoke(). */
7935 if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7937 assert(!compIsForInlining());
7938 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7939 assert(mflags & CORINFO_FLG_FINAL);
7941 /* Set the delegate flag */
7942 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
7944 if (callInfo->secureDelegateInvoke)
7946 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
7949 if (opcode == CEE_CALLVIRT)
7951 assert(mflags & CORINFO_FLG_FINAL);
7953 /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
7954 assert(call->gtFlags & GTF_CALL_NULLCHECK);
7955 call->gtFlags &= ~GTF_CALL_NULLCHECK;
7959 CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
7960 actualMethodRetTypeSigClass = sig->retTypeSigClass;
7961 if (varTypeIsStruct(callRetTyp))
7963 callRetTyp = impNormStructType(actualMethodRetTypeSigClass);
7964 call->gtType = callRetTyp;
7968 /* Check for varargs */
7969 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7970 (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7972 BADCODE("Varargs not supported.");
7974 #endif // !FEATURE_VARARG
7977 if (call->gtCall.callSig == nullptr)
7979 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7980 *call->gtCall.callSig = *sig;
7982 #endif // UNIX_X86_ABI
7984 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7985 (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7987 assert(!compIsForInlining());
7989 /* Set the right flags */
7991 call->gtFlags |= GTF_CALL_POP_ARGS;
7992 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
7994 /* Can't allow tailcall for varargs as it is caller-pop. The caller
7995 will be expecting to pop a certain number of arguments, but if we
7996 tailcall to a function with a different number of arguments, we
7997 are hosed. There are ways around this (caller remembers esp value,
7998 varargs is not caller-pop, etc), but not worth it. */
7999 CLANG_FORMAT_COMMENT_ANCHOR;
8004 canTailCall = false;
8005 szCanTailCallFailReason = "Callee is varargs";
8009 /* Get the total number of arguments - this is already correct
8010 * for CALLI - for methods we have to get it from the call site */
8012 if (opcode != CEE_CALLI)
8015 unsigned numArgsDef = sig->numArgs;
8017 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
8020 // We cannot lazily obtain the signature of a vararg call because using its method
8021 // handle will give us only the declared argument list, not the full argument list.
8022 assert(call->gtCall.callSig == nullptr);
8023 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
8024 *call->gtCall.callSig = *sig;
8027 // For vararg calls we must be sure to load the return type of the
8028 // method actually being called, as well as the return types of the
8029 // specified in the vararg signature. With type equivalency, these types
8030 // may not be the same.
8031 if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
8033 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
8034 sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
8035 sig->retType != CORINFO_TYPE_VAR)
8037 // Make sure that all valuetypes (including enums) that we push are loaded.
8038 // This is to guarantee that if a GC is triggerred from the prestub of this methods,
8039 // all valuetypes in the method signature are already loaded.
8040 // We need to be able to find the size of the valuetypes, but we cannot
8041 // do a class-load from within GC.
8042 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
8046 assert(numArgsDef <= sig->numArgs);
8049 /* We will have "cookie" as the last argument but we cannot push
8050 * it on the operand stack because we may overflow, so we append it
8051 * to the arg list next after we pop them */
8054 if (mflags & CORINFO_FLG_SECURITYCHECK)
8056 assert(!compIsForInlining());
8058 // Need security prolog/epilog callouts when there is
8059 // imperative security in the method. This is to give security a
8060 // chance to do any setup in the prolog and cleanup in the epilog if needed.
8062 if (compIsForInlining())
8064 // Cannot handle this if the method being imported is an inlinee by itself.
8065 // Because inlinee method does not have its own frame.
8067 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
8072 tiSecurityCalloutNeeded = true;
8074 // If the current method calls a method which needs a security check,
8075 // (i.e. the method being compiled has imperative security)
8076 // we need to reserve a slot for the security object in
8077 // the current method's stack frame
8078 opts.compNeedSecurityCheck = true;
8082 //--------------------------- Inline NDirect ------------------------------
8084 // For inline cases we technically should look at both the current
8085 // block and the call site block (or just the latter if we've
8086 // fused the EH trees). However the block-related checks pertain to
8087 // EH and we currently won't inline a method with EH. So for
8088 // inlinees, just checking the call site block is sufficient.
8090 // New lexical block here to avoid compilation errors because of GOTOs.
8091 BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
8092 impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block);
8095 if (call->gtFlags & GTF_CALL_UNMANAGED)
8097 // We set up the unmanaged call by linking the frame, disabling GC, etc
8098 // This needs to be cleaned up on return
8101 canTailCall = false;
8102 szCanTailCallFailReason = "Callee is native";
8105 checkForSmallType = true;
8107 impPopArgsForUnmanagedCall(call, sig);
8111 else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
8112 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
8113 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
8114 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
8116 if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
8118 // Normally this only happens with inlining.
8119 // However, a generic method (or type) being NGENd into another module
8120 // can run into this issue as well. There's not an easy fall-back for NGEN
8121 // so instead we fallback to JIT.
8122 if (compIsForInlining())
8124 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
8128 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
8134 GenTree* cookie = eeGetPInvokeCookie(sig);
8136 // This cookie is required to be either a simple GT_CNS_INT or
8137 // an indirection of a GT_CNS_INT
8139 GenTree* cookieConst = cookie;
8140 if (cookie->gtOper == GT_IND)
8142 cookieConst = cookie->gtOp.gtOp1;
8144 assert(cookieConst->gtOper == GT_CNS_INT);
8146 // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
8147 // we won't allow this tree to participate in any CSE logic
8149 cookie->gtFlags |= GTF_DONT_CSE;
8150 cookieConst->gtFlags |= GTF_DONT_CSE;
8152 call->gtCall.gtCallCookie = cookie;
8156 canTailCall = false;
8157 szCanTailCallFailReason = "PInvoke calli";
8161 /*-------------------------------------------------------------------------
8162 * Create the argument list
8165 //-------------------------------------------------------------------------
8166 // Special case - for varargs we have an implicit last argument
8168 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
8170 assert(!compIsForInlining());
8172 void *varCookie, *pVarCookie;
8173 if (!info.compCompHnd->canGetVarArgsHandle(sig))
8175 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
8179 varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
8180 assert((!varCookie) != (!pVarCookie));
8181 GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig);
8183 assert(extraArg == nullptr);
8184 extraArg = gtNewArgList(cookie);
8187 //-------------------------------------------------------------------------
8188 // Extra arg for shared generic code and array methods
8190 // Extra argument containing instantiation information is passed in the
8191 // following circumstances:
8192 // (a) To the "Address" method on array classes; the extra parameter is
8193 // the array's type handle (a TypeDesc)
8194 // (b) To shared-code instance methods in generic structs; the extra parameter
8195 // is the struct's type handle (a vtable ptr)
8196 // (c) To shared-code per-instantiation non-generic static methods in generic
8197 // classes and structs; the extra parameter is the type handle
8198 // (d) To shared-code generic methods; the extra parameter is an
8199 // exact-instantiation MethodDesc
8201 // We also set the exact type context associated with the call so we can
8202 // inline the call correctly later on.
8204 if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
8206 assert(call->gtCall.gtCallType == CT_USER_FUNC);
8207 if (clsHnd == nullptr)
8209 NO_WAY("CALLI on parameterized type");
8212 assert(opcode != CEE_CALLI);
8217 // Instantiated generic method
8218 if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
8220 CORINFO_METHOD_HANDLE exactMethodHandle =
8221 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
8223 if (!exactContextNeedsRuntimeLookup)
8225 #ifdef FEATURE_READYTORUN_COMPILER
8226 if (opts.IsReadyToRun())
8229 impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
8230 if (instParam == nullptr)
8232 assert(compDonotInline());
8239 instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
8240 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
8245 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
8246 if (instParam == nullptr)
8248 assert(compDonotInline());
8254 // otherwise must be an instance method in a generic struct,
8255 // a static method in a generic type, or a runtime-generated array method
8258 assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
8259 CORINFO_CLASS_HANDLE exactClassHandle =
8260 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
8262 if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
8264 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
8268 if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
8270 // We indicate "readonly" to the Address operation by using a null
8272 instParam = gtNewIconNode(0, TYP_REF);
8274 else if (!exactContextNeedsRuntimeLookup)
8276 #ifdef FEATURE_READYTORUN_COMPILER
8277 if (opts.IsReadyToRun())
8280 impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
8281 if (instParam == nullptr)
8283 assert(compDonotInline());
8290 instParam = gtNewIconEmbClsHndNode(exactClassHandle);
8291 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
8296 // If the EE was able to resolve a constrained call, the instantiating parameter to use is the type
8297 // by which the call was constrained with. We embed pConstrainedResolvedToken as the extra argument
8298 // because pResolvedToken is an interface method and interface types make a poor generic context.
8299 if (pConstrainedResolvedToken)
8301 instParam = impTokenToHandle(pConstrainedResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/,
8302 FALSE /* importParent */);
8306 instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
8309 if (instParam == nullptr)
8311 assert(compDonotInline());
8317 assert(extraArg == nullptr);
8318 extraArg = gtNewArgList(instParam);
8321 // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
8322 // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
8323 // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
8324 // exactContextHnd is not currently required when inlining shared generic code into shared
8325 // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
8326 // (e.g. anything marked needsRuntimeLookup)
8327 if (exactContextNeedsRuntimeLookup)
8329 exactContextHnd = nullptr;
8332 if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0))
8334 // Only verifiable cases are supported.
8335 // dup; ldvirtftn; newobj; or ldftn; newobj.
8336 // IL test could contain unverifiable sequence, in this case optimization should not be done.
8337 if (impStackHeight() > 0)
8339 typeInfo delegateTypeInfo = impStackTop().seTypeInfo;
8340 if (delegateTypeInfo.IsToken())
8342 ldftnToken = delegateTypeInfo.GetToken();
8347 //-------------------------------------------------------------------------
8348 // The main group of arguments
8350 args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, sig, extraArg);
8354 call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
8357 //-------------------------------------------------------------------------
8358 // The "this" pointer
8360 if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
8364 if (opcode == CEE_NEWOBJ)
8370 obj = impPopStack().val;
8371 obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
8372 if (compDonotInline())
8378 // Store the "this" value in the call
8379 call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
8380 call->gtCall.gtCallObjp = obj;
8382 // Is this a virtual or interface call?
8383 if (call->gtCall.IsVirtual())
8385 // only true object pointers can be virtual
8386 assert(obj->gtType == TYP_REF);
8388 // See if we can devirtualize.
8389 impDevirtualizeCall(call->AsCall(), &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle,
8395 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
8399 //-------------------------------------------------------------------------
8400 // The "this" pointer for "newobj"
8402 if (opcode == CEE_NEWOBJ)
8404 if (clsFlags & CORINFO_FLG_VAROBJSIZE)
8406 assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
8407 // This is a 'new' of a variable sized object, wher
8408 // the constructor is to return the object. In this case
8409 // the constructor claims to return VOID but we know it
8410 // actually returns the new object
8411 assert(callRetTyp == TYP_VOID);
8412 callRetTyp = TYP_REF;
8413 call->gtType = TYP_REF;
8414 impSpillSpecialSideEff();
8416 impPushOnStack(call, typeInfo(TI_REF, clsHnd));
8420 if (clsFlags & CORINFO_FLG_DELEGATE)
8422 // New inliner morph it in impImportCall.
8423 // This will allow us to inline the call to the delegate constructor.
8424 call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken);
8427 if (!bIntrinsicImported)
8430 #if defined(DEBUG) || defined(INLINE_DATA)
8432 // Keep track of the raw IL offset of the call
8433 call->gtCall.gtRawILOffset = rawILOffset;
8435 #endif // defined(DEBUG) || defined(INLINE_DATA)
8437 // Is it an inline candidate?
8438 impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
8441 // append the call node.
8442 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8444 // Now push the value of the 'new onto the stack
8446 // This is a 'new' of a non-variable sized object.
8447 // Append the new node (op1) to the statement list,
8448 // and then push the local holding the value of this
8449 // new instruction on the stack.
8451 if (clsFlags & CORINFO_FLG_VALUECLASS)
8453 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
8455 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
8456 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
8460 if (newobjThis->gtOper == GT_COMMA)
8462 // In coreclr the callout can be inserted even if verification is disabled
8463 // so we cannot rely on tiVerificationNeeded alone
8465 // We must have inserted the callout. Get the real newobj.
8466 newobjThis = newobjThis->gtOp.gtOp2;
8469 assert(newobjThis->gtOper == GT_LCL_VAR);
8470 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
8480 // This check cannot be performed for implicit tail calls for the reason
8481 // that impIsImplicitTailCallCandidate() is not checking whether return
8482 // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
8483 // As a result it is possible that in the following case, we find that
8484 // the type stack is non-empty if Callee() is considered for implicit
8486 // int Caller(..) { .... void Callee(); ret val; ... }
8488 // Note that we cannot check return type compatibility before ImpImportCall()
8489 // as we don't have required info or need to duplicate some of the logic of
8492 // For implicit tail calls, we perform this check after return types are
8493 // known to be compatible.
8494 if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
8496 BADCODE("Stack should be empty after tailcall");
8499 // Note that we can not relax this condition with genActualType() as
8500 // the calling convention dictates that the caller of a function with
8501 // a small-typed return value is responsible for normalizing the return val
8504 !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
8505 callInfo->sig.retTypeClass))
8507 canTailCall = false;
8508 szCanTailCallFailReason = "Return types are not tail call compatible";
8511 // Stack empty check for implicit tail calls.
8512 if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
8514 #ifdef _TARGET_AMD64_
8515 // JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException
8516 // in JIT64, not an InvalidProgramException.
8517 Verify(false, "Stack should be empty after tailcall");
8518 #else // _TARGET_64BIT_
8519 BADCODE("Stack should be empty after tailcall");
8520 #endif //!_TARGET_64BIT_
8523 // assert(compCurBB is not a catch, finally or filter block);
8524 // assert(compCurBB is not a try block protected by a finally block);
8526 // Check for permission to tailcall
8527 bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
8529 assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
8533 // True virtual or indirect calls, shouldn't pass in a callee handle.
8534 CORINFO_METHOD_HANDLE exactCalleeHnd =
8535 ((call->gtCall.gtCallType != CT_USER_FUNC) || call->gtCall.IsVirtual()) ? nullptr : methHnd;
8536 GenTree* thisArg = call->gtCall.gtCallObjp;
8538 if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
8541 if (explicitTailCall)
8543 // In case of explicit tail calls, mark it so that it is not considered
8545 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
8549 printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
8557 #if FEATURE_TAILCALL_OPT
8558 // Must be an implicit tail call.
8559 assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
8561 // It is possible that a call node is both an inline candidate and marked
8562 // for opportunistic tail calling. In-lining happens before morhphing of
8563 // trees. If in-lining of an in-line candidate gets aborted for whatever
8564 // reason, it will survive to the morphing stage at which point it will be
8565 // transformed into a tail call after performing additional checks.
8567 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
8571 printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
8577 #else //! FEATURE_TAILCALL_OPT
8578 NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
8580 #endif // FEATURE_TAILCALL_OPT
8583 // we can't report success just yet...
8587 canTailCall = false;
8588 // canTailCall reported its reasons already
8592 printf("\ninfo.compCompHnd->canTailCall returned false for call ");
8601 // If this assert fires it means that canTailCall was set to false without setting a reason!
8602 assert(szCanTailCallFailReason != nullptr);
8607 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
8609 printf(": %s\n", szCanTailCallFailReason);
8612 info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
8613 szCanTailCallFailReason);
8617 // Note: we assume that small return types are already normalized by the managed callee
8618 // or by the pinvoke stub for calls to unmanaged code.
8620 if (!bIntrinsicImported)
8623 // Things needed to be checked when bIntrinsicImported is false.
8626 assert(call->gtOper == GT_CALL);
8627 assert(sig != nullptr);
8629 // Tail calls require us to save the call site's sig info so we can obtain an argument
8630 // copying thunk from the EE later on.
8631 if (call->gtCall.callSig == nullptr)
8633 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
8634 *call->gtCall.callSig = *sig;
8637 if (compIsForInlining() && opcode == CEE_CALLVIRT)
8639 GenTree* callObj = call->gtCall.gtCallObjp;
8640 assert(callObj != nullptr);
8642 if ((call->gtCall.IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
8643 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
8644 impInlineInfo->inlArgInfo))
8646 impInlineInfo->thisDereferencedFirst = true;
8650 #if defined(DEBUG) || defined(INLINE_DATA)
8652 // Keep track of the raw IL offset of the call
8653 call->gtCall.gtRawILOffset = rawILOffset;
8655 #endif // defined(DEBUG) || defined(INLINE_DATA)
8657 // Is it an inline candidate?
8658 impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
8662 // Push or append the result of the call
8663 if (callRetTyp == TYP_VOID)
8665 if (opcode == CEE_NEWOBJ)
8667 // we actually did push something, so don't spill the thing we just pushed.
8668 assert(verCurrentState.esStackDepth > 0);
8669 impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
8673 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8678 impSpillSpecialSideEff();
8680 if (clsFlags & CORINFO_FLG_ARRAY)
8682 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
8685 // Find the return type used for verification by interpreting the method signature.
8686 // NB: we are clobbering the already established sig.
8687 if (tiVerificationNeeded)
8689 // Actually, we never get the sig for the original method.
8690 sig = &(callInfo->verSig);
8693 typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
8694 tiRetVal.NormaliseForStack();
8696 // The CEE_READONLY prefix modifies the verification semantics of an Address
8697 // operation on an array type.
8698 if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
8700 tiRetVal.SetIsReadonlyByRef();
8703 if (tiVerificationNeeded)
8705 // We assume all calls return permanent home byrefs. If they
8706 // didn't they wouldn't be verifiable. This is also covering
8707 // the Address() helper for multidimensional arrays.
8708 if (tiRetVal.IsByRef())
8710 tiRetVal.SetIsPermanentHomeByRef();
8716 // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
8718 bool fatPointerCandidate = call->AsCall()->IsFatPointerCandidate();
8719 if (varTypeIsStruct(callRetTyp))
8721 call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass);
8724 if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
8726 assert(opts.OptEnabled(CLFLG_INLINING));
8727 assert(!fatPointerCandidate); // We should not try to inline calli.
8729 // Make the call its own tree (spill the stack if needed).
8730 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8732 // TODO: Still using the widened type.
8733 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
8737 if (fatPointerCandidate)
8739 // fatPointer candidates should be in statements of the form call() or var = call().
8740 // Such form allows to find statements with fat calls without walking through whole trees
8741 // and removes problems with cutting trees.
8742 assert(!bIntrinsicImported);
8743 assert(IsTargetAbi(CORINFO_CORERT_ABI));
8744 if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
8746 unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli"));
8747 LclVarDsc* varDsc = &lvaTable[calliSlot];
8748 varDsc->lvVerTypeInfo = tiRetVal;
8749 impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE);
8750 // impAssignTempGen can change src arg list and return type for call that returns struct.
8751 var_types type = genActualType(lvaTable[calliSlot].TypeGet());
8752 call = gtNewLclvNode(calliSlot, type);
8756 // For non-candidates we must also spill, since we
8757 // might have locals live on the eval stack that this
8760 // Suppress this for certain well-known call targets
8761 // that we know won't modify locals, eg calls that are
8762 // recognized in gtCanOptimizeTypeEquality. Otherwise
8763 // we may break key fragile pattern matches later on.
8764 bool spillStack = true;
8767 GenTreeCall* callNode = call->AsCall();
8768 if ((callNode->gtCallType == CT_HELPER) && (gtIsTypeHandleToRuntimeTypeHelper(callNode) ||
8769 gtIsTypeHandleToRuntimeTypeHandleHelper(callNode)))
8773 else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
8781 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
8786 if (!bIntrinsicImported)
8788 //-------------------------------------------------------------------------
8790 /* If the call is of a small type and the callee is managed, the callee will normalize the result
8792 However, we need to normalize small type values returned by unmanaged
8793 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
8794 if we use the shorter inlined pinvoke stub. */
8796 if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
8798 call = gtNewCastNode(genActualType(callRetTyp), call, false, callRetTyp);
8802 impPushOnStack(call, tiRetVal);
8805 // VSD functions get a new call target each time we getCallInfo, so clear the cache.
8806 // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
8807 // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
8808 // callInfoCache.uncacheCallInfo();
8813 #pragma warning(pop)
8816 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
8818 CorInfoType corType = methInfo->args.retType;
8820 if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
8822 // We have some kind of STRUCT being returned
8824 structPassingKind howToReturnStruct = SPK_Unknown;
8826 var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
8828 if (howToReturnStruct == SPK_ByReference)
8839 var_types Compiler::impImportJitTestLabelMark(int numArgs)
8841 TestLabelAndNum tlAndN;
8845 StackEntry se = impPopStack();
8846 assert(se.seTypeInfo.GetType() == TI_INT);
8847 GenTree* val = se.val;
8848 assert(val->IsCnsIntOrI());
8849 tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8851 else if (numArgs == 3)
8853 StackEntry se = impPopStack();
8854 assert(se.seTypeInfo.GetType() == TI_INT);
8855 GenTree* val = se.val;
8856 assert(val->IsCnsIntOrI());
8857 tlAndN.m_num = val->AsIntConCommon()->IconValue();
8859 assert(se.seTypeInfo.GetType() == TI_INT);
8861 assert(val->IsCnsIntOrI());
8862 tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8869 StackEntry expSe = impPopStack();
8870 GenTree* node = expSe.val;
8872 // There are a small number of special cases, where we actually put the annotation on a subnode.
8873 if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
8875 // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
8876 // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
8877 // offset within the the static field block whose address is returned by the helper call.
8878 // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
8879 GenTree* helperCall = nullptr;
8880 assert(node->OperGet() == GT_IND);
8881 tlAndN.m_num -= 100;
8882 GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
8883 GetNodeTestData()->Remove(node);
8887 GetNodeTestData()->Set(node, tlAndN);
8890 impPushOnStack(node, expSe.seTypeInfo);
8891 return node->TypeGet();
8895 //-----------------------------------------------------------------------------------
8896 // impFixupCallStructReturn: For a call node that returns a struct type either
8897 // adjust the return type to an enregisterable type, or set the flag to indicate
8898 // struct return via retbuf arg.
8901 // call - GT_CALL GenTree node
8902 // retClsHnd - Class handle of return type of the call
8905 // Returns new GenTree node after fixing struct return of call node
8907 GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd)
8909 if (!varTypeIsStruct(call))
8914 call->gtRetClsHnd = retClsHnd;
8916 #if FEATURE_MULTIREG_RET
8917 // Initialize Return type descriptor of call node
8918 ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
8919 retTypeDesc->InitializeStructReturnType(this, retClsHnd);
8920 #endif // FEATURE_MULTIREG_RET
8922 #ifdef UNIX_AMD64_ABI
8924 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
8925 assert(!call->IsVarargs() && "varargs not allowed for System V OSs.");
8927 // The return type will remain as the incoming struct type unless normalized to a
8928 // single eightbyte return type below.
8929 call->gtReturnType = call->gtType;
8931 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8932 if (retRegCount != 0)
8934 if (retRegCount == 1)
8936 // See if the struct size is smaller than the return
8938 if (retTypeDesc->IsEnclosingType())
8940 // If we know for sure this call will remain a call,
8941 // retype and return value via a suitable temp.
8942 if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8944 call->gtReturnType = retTypeDesc->GetReturnRegType(0);
8945 return impAssignSmallStructTypeToVar(call, retClsHnd);
8950 // Return type is same size as struct, so we can
8951 // simply retype the call.
8952 call->gtReturnType = retTypeDesc->GetReturnRegType(0);
8957 // must be a struct returned in two registers
8958 assert(retRegCount == 2);
8960 if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8962 // Force a call returning multi-reg struct to be always of the IR form
8965 // No need to assign a multi-reg struct to a local var if:
8966 // - It is a tail call or
8967 // - The call is marked for in-lining later
8968 return impAssignMultiRegTypeToVar(call, retClsHnd);
8974 // struct not returned in registers i.e returned via hiddden retbuf arg.
8975 call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8978 #else // not UNIX_AMD64_ABI
8980 // Check for TYP_STRUCT type that wraps a primitive type
8981 // Such structs are returned using a single register
8982 // and we change the return type on those calls here.
8984 structPassingKind howToReturnStruct;
8985 var_types returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
8987 if (howToReturnStruct == SPK_ByReference)
8989 assert(returnType == TYP_UNKNOWN);
8990 call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8994 assert(returnType != TYP_UNKNOWN);
8996 // See if the struct size is smaller than the return
8998 if (howToReturnStruct == SPK_EnclosingType)
9000 // If we know for sure this call will remain a call,
9001 // retype and return value via a suitable temp.
9002 if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
9004 call->gtReturnType = returnType;
9005 return impAssignSmallStructTypeToVar(call, retClsHnd);
9010 // Return type is same size as struct, so we can
9011 // simply retype the call.
9012 call->gtReturnType = returnType;
9015 // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
9016 if ((returnType == TYP_LONG) && (compLongUsed == false))
9018 compLongUsed = true;
9020 else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
9022 compFloatingPointUsed = true;
9025 #if FEATURE_MULTIREG_RET
9026 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
9027 assert(retRegCount != 0);
9029 if (retRegCount >= 2)
9031 if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
9033 // Force a call returning multi-reg struct to be always of the IR form
9036 // No need to assign a multi-reg struct to a local var if:
9037 // - It is a tail call or
9038 // - The call is marked for in-lining later
9039 return impAssignMultiRegTypeToVar(call, retClsHnd);
9042 #endif // FEATURE_MULTIREG_RET
9045 #endif // not UNIX_AMD64_ABI
9050 /*****************************************************************************
9051 For struct return values, re-type the operand in the case where the ABI
9052 does not use a struct return buffer
9053 Note that this method is only call for !_TARGET_X86_
9056 GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd)
9058 assert(varTypeIsStruct(info.compRetType));
9059 assert(info.compRetBuffArg == BAD_VAR_NUM);
9061 JITDUMP("\nimpFixupStructReturnType: retyping\n");
9064 #if defined(_TARGET_XARCH_)
9066 #ifdef UNIX_AMD64_ABI
9067 // No VarArgs for CoreCLR on x64 Unix
9068 assert(!info.compIsVarArgs);
9070 // Is method returning a multi-reg struct?
9071 if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
9073 // In case of multi-reg struct return, we force IR to be one of the following:
9074 // GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a
9075 // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
9077 if (op->gtOper == GT_LCL_VAR)
9079 // Make sure that this struct stays in memory and doesn't get promoted.
9080 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
9081 lvaTable[lclNum].lvIsMultiRegRet = true;
9083 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
9084 op->gtFlags |= GTF_DONT_CSE;
9089 if (op->gtOper == GT_CALL)
9094 return impAssignMultiRegTypeToVar(op, retClsHnd);
9096 #else // !UNIX_AMD64_ABI
9097 assert(info.compRetNativeType != TYP_STRUCT);
9098 #endif // !UNIX_AMD64_ABI
9100 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
9102 if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
9104 if (op->gtOper == GT_LCL_VAR)
9106 // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
9107 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
9108 // Make sure this struct type stays as struct so that we can return it as an HFA
9109 lvaTable[lclNum].lvIsMultiRegRet = true;
9111 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
9112 op->gtFlags |= GTF_DONT_CSE;
9117 if (op->gtOper == GT_CALL)
9119 if (op->gtCall.IsVarargs())
9121 // We cannot tail call because control needs to return to fixup the calling
9122 // convention for result return.
9123 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
9124 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
9131 return impAssignMultiRegTypeToVar(op, retClsHnd);
9134 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
9136 // Is method returning a multi-reg struct?
9137 if (IsMultiRegReturnedType(retClsHnd))
9139 if (op->gtOper == GT_LCL_VAR)
9141 // This LCL_VAR stays as a TYP_STRUCT
9142 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
9144 // Make sure this struct type is not struct promoted
9145 lvaTable[lclNum].lvIsMultiRegRet = true;
9147 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
9148 op->gtFlags |= GTF_DONT_CSE;
9153 if (op->gtOper == GT_CALL)
9155 if (op->gtCall.IsVarargs())
9157 // We cannot tail call because control needs to return to fixup the calling
9158 // convention for result return.
9159 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
9160 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
9167 return impAssignMultiRegTypeToVar(op, retClsHnd);
9170 #endif // FEATURE_MULTIREG_RET && FEATURE_HFA
9173 // adjust the type away from struct to integral
9174 // and no normalizing
9175 if (op->gtOper == GT_LCL_VAR)
9177 // It is possible that we now have a lclVar of scalar type.
9178 // If so, don't transform it to GT_LCL_FLD.
9179 if (varTypeIsStruct(lvaTable[op->AsLclVar()->gtLclNum].lvType))
9181 op->ChangeOper(GT_LCL_FLD);
9184 else if (op->gtOper == GT_OBJ)
9186 GenTree* op1 = op->AsObj()->Addr();
9188 // We will fold away OBJ/ADDR
9189 // except for OBJ/ADDR/INDEX
9190 // as the array type influences the array element's offset
9191 // Later in this method we change op->gtType to info.compRetNativeType
9192 // This is not correct when op is a GT_INDEX as the starting offset
9193 // for the array elements 'elemOffs' is different for an array of
9194 // TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
9195 // Also refer to the GTF_INX_REFARR_LAYOUT flag
9197 if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
9199 // Change '*(&X)' to 'X' and see if we can do better
9200 op = op1->gtOp.gtOp1;
9201 goto REDO_RETURN_NODE;
9203 op->gtObj.gtClass = NO_CLASS_HANDLE;
9204 op->ChangeOperUnchecked(GT_IND);
9205 op->gtFlags |= GTF_IND_TGTANYWHERE;
9207 else if (op->gtOper == GT_CALL)
9209 if (op->AsCall()->TreatAsHasRetBufArg(this))
9211 // This must be one of those 'special' helpers that don't
9212 // really have a return buffer, but instead use it as a way
9213 // to keep the trees cleaner with fewer address-taken temps.
9215 // Well now we have to materialize the the return buffer as
9216 // an address-taken temp. Then we can return the temp.
9218 // NOTE: this code assumes that since the call directly
9219 // feeds the return, then the call must be returning the
9220 // same structure/class/type.
9222 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
9224 // No need to spill anything as we're about to return.
9225 impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
9227 // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
9228 // jump directly to a GT_LCL_FLD.
9229 op = gtNewLclvNode(tmpNum, info.compRetNativeType);
9230 op->ChangeOper(GT_LCL_FLD);
9234 // Don't change the gtType of the call just yet, it will get changed later.
9238 #if defined(FEATURE_HW_INTRINSICS) && defined(_TARGET_ARM64_)
9239 else if ((op->gtOper == GT_HWIntrinsic) && varTypeIsSIMD(op->gtType))
9241 // TODO-ARM64-FIXME Implement ARM64 ABI for Short Vectors properly
9242 // assert(op->gtType == info.compRetNativeType)
9243 if (op->gtType != info.compRetNativeType)
9245 // Insert a register move to keep target type of SIMD intrinsic intact
9246 op = gtNewScalarHWIntrinsicNode(info.compRetNativeType, op, NI_ARM64_NONE_MOV);
9250 else if (op->gtOper == GT_COMMA)
9252 op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
9255 op->gtType = info.compRetNativeType;
9257 JITDUMP("\nimpFixupStructReturnType: result of retyping is\n");
9263 /*****************************************************************************
9264 CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
9265 finally-protected try. We find the finally blocks protecting the current
9266 offset (in order) by walking over the complete exception table and
9267 finding enclosing clauses. This assumes that the table is sorted.
9268 This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
9270 If we are leaving a catch handler, we need to attach the
9271 CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
9273 After this function, the BBJ_LEAVE block has been converted to a different type.
9276 #if !FEATURE_EH_FUNCLETS
9278 void Compiler::impImportLeave(BasicBlock* block)
9283 printf("\nBefore import CEE_LEAVE:\n");
9284 fgDispBasicBlocks();
9289 bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
9290 unsigned blkAddr = block->bbCodeOffs;
9291 BasicBlock* leaveTarget = block->bbJumpDest;
9292 unsigned jmpAddr = leaveTarget->bbCodeOffs;
9294 // LEAVE clears the stack, spill side effects, and set stack to 0
9296 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
9297 verCurrentState.esStackDepth = 0;
9299 assert(block->bbJumpKind == BBJ_LEAVE);
9300 assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
9302 BasicBlock* step = DUMMY_INIT(NULL);
9303 unsigned encFinallies = 0; // Number of enclosing finallies.
9304 GenTree* endCatches = NULL;
9305 GenTree* endLFin = NULL; // The statement tree to indicate the end of locally-invoked finally.
9310 for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
9312 // Grab the handler offsets
9314 IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
9315 IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
9316 IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
9317 IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
9319 /* Is this a catch-handler we are CEE_LEAVEing out of?
9320 * If so, we need to call CORINFO_HELP_ENDCATCH.
9323 if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
9325 // Can't CEE_LEAVE out of a finally/fault handler
9326 if (HBtab->HasFinallyOrFaultHandler())
9327 BADCODE("leave out of fault/finally block");
9329 // Create the call to CORINFO_HELP_ENDCATCH
9330 GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
9332 // Make a list of all the currently pending endCatches
9334 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
9336 endCatches = endCatch;
9341 printf("impImportLeave - " FMT_BB " jumping out of catch handler EH#%u, adding call to "
9342 "CORINFO_HELP_ENDCATCH\n",
9343 block->bbNum, XTnum);
9347 else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9348 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9350 /* This is a finally-protected try we are jumping out of */
9352 /* If there are any pending endCatches, and we have already
9353 jumped out of a finally-protected try, then the endCatches
9354 have to be put in a block in an outer try for async
9355 exceptions to work correctly.
9356 Else, just use append to the original block */
9358 BasicBlock* callBlock;
9360 assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
9362 if (encFinallies == 0)
9364 assert(step == DUMMY_INIT(NULL));
9366 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
9369 impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9374 printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
9376 callBlock->dspToString());
9382 assert(step != DUMMY_INIT(NULL));
9384 /* Calling the finally block */
9385 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
9386 assert(step->bbJumpKind == BBJ_ALWAYS);
9387 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
9388 // finally in the chain)
9389 step->bbJumpDest->bbRefs++;
9391 /* The new block will inherit this block's weight */
9392 callBlock->setBBWeight(block->bbWeight);
9393 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
9398 printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n",
9399 callBlock->dspToString());
9407 lastStmt = gtNewStmt(endCatches);
9408 endLFin->gtNext = lastStmt;
9409 lastStmt->gtPrev = endLFin;
9416 // note that this sets BBF_IMPORTED on the block
9417 impEndTreeList(callBlock, endLFin, lastStmt);
9420 step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
9421 /* The new block will inherit this block's weight */
9422 step->setBBWeight(block->bbWeight);
9423 step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
9428 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n",
9429 step->dspToString());
9433 unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
9434 assert(finallyNesting <= compHndBBtabCount);
9436 callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
9437 endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
9438 endLFin = gtNewStmt(endLFin);
9443 invalidatePreds = true;
9447 /* Append any remaining endCatches, if any */
9449 assert(!encFinallies == !endLFin);
9451 if (encFinallies == 0)
9453 assert(step == DUMMY_INIT(NULL));
9454 block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
9457 impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9462 printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
9464 block->dspToString());
9470 // If leaveTarget is the start of another try block, we want to make sure that
9471 // we do not insert finalStep into that try block. Hence, we find the enclosing
9473 unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
9475 // Insert a new BB either in the try region indicated by tryIndex or
9476 // the handler region indicated by leaveTarget->bbHndIndex,
9477 // depending on which is the inner region.
9478 BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
9479 finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
9480 step->bbJumpDest = finalStep;
9482 /* The new block will inherit this block's weight */
9483 finalStep->setBBWeight(block->bbWeight);
9484 finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
9489 printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies,
9490 finalStep->dspToString());
9498 lastStmt = gtNewStmt(endCatches);
9499 endLFin->gtNext = lastStmt;
9500 lastStmt->gtPrev = endLFin;
9507 impEndTreeList(finalStep, endLFin, lastStmt);
9509 finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9511 // Queue up the jump target for importing
9513 impImportBlockPending(leaveTarget);
9515 invalidatePreds = true;
9518 if (invalidatePreds && fgComputePredsDone)
9520 JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9525 fgVerifyHandlerTab();
9529 printf("\nAfter import CEE_LEAVE:\n");
9530 fgDispBasicBlocks();
9536 #else // FEATURE_EH_FUNCLETS
9538 void Compiler::impImportLeave(BasicBlock* block)
9543 printf("\nBefore import CEE_LEAVE in " FMT_BB " (targetting " FMT_BB "):\n", block->bbNum,
9544 block->bbJumpDest->bbNum);
9545 fgDispBasicBlocks();
9550 bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
9551 unsigned blkAddr = block->bbCodeOffs;
9552 BasicBlock* leaveTarget = block->bbJumpDest;
9553 unsigned jmpAddr = leaveTarget->bbCodeOffs;
9555 // LEAVE clears the stack, spill side effects, and set stack to 0
9557 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
9558 verCurrentState.esStackDepth = 0;
9560 assert(block->bbJumpKind == BBJ_LEAVE);
9561 assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
9563 BasicBlock* step = nullptr;
9567 // No step type; step == NULL.
9570 // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
9571 // That is, is step->bbJumpDest where a finally will return to?
9574 // The step block is a catch return.
9577 // The step block is in a "try", created as the target for a finally return or the target for a catch return.
9580 StepType stepType = ST_None;
9585 for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
9587 // Grab the handler offsets
9589 IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
9590 IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
9591 IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
9592 IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
9594 /* Is this a catch-handler we are CEE_LEAVEing out of?
9597 if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
9599 // Can't CEE_LEAVE out of a finally/fault handler
9600 if (HBtab->HasFinallyOrFaultHandler())
9602 BADCODE("leave out of fault/finally block");
9605 /* We are jumping out of a catch */
9607 if (step == nullptr)
9610 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
9611 stepType = ST_Catch;
9616 printf("impImportLeave - jumping out of a catch (EH#%u), convert block " FMT_BB
9617 " to BBJ_EHCATCHRET "
9619 XTnum, step->bbNum);
9625 BasicBlock* exitBlock;
9627 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
9629 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
9631 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9632 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
9633 // exit) returns to this block
9634 step->bbJumpDest->bbRefs++;
9636 #if defined(_TARGET_ARM_)
9637 if (stepType == ST_FinallyReturn)
9639 assert(step->bbJumpKind == BBJ_ALWAYS);
9640 // Mark the target of a finally return
9641 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9643 #endif // defined(_TARGET_ARM_)
9645 /* The new block will inherit this block's weight */
9646 exitBlock->setBBWeight(block->bbWeight);
9647 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9649 /* This exit block is the new step */
9651 stepType = ST_Catch;
9653 invalidatePreds = true;
9658 printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block " FMT_BB "\n",
9659 XTnum, exitBlock->bbNum);
9664 else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9665 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9667 /* We are jumping out of a finally-protected try */
9669 BasicBlock* callBlock;
9671 if (step == nullptr)
9673 #if FEATURE_EH_CALLFINALLY_THUNKS
9675 // Put the call to the finally in the enclosing region.
9676 unsigned callFinallyTryIndex =
9677 (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9678 unsigned callFinallyHndIndex =
9679 (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9680 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
9682 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
9683 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
9684 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
9685 // next block, and flow optimizations will remove it.
9686 block->bbJumpKind = BBJ_ALWAYS;
9687 block->bbJumpDest = callBlock;
9688 block->bbJumpDest->bbRefs++;
9690 /* The new block will inherit this block's weight */
9691 callBlock->setBBWeight(block->bbWeight);
9692 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9697 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB
9699 "BBJ_ALWAYS, add BBJ_CALLFINALLY block " FMT_BB "\n",
9700 XTnum, block->bbNum, callBlock->bbNum);
9704 #else // !FEATURE_EH_CALLFINALLY_THUNKS
9707 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
9712 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB
9714 "BBJ_CALLFINALLY block\n",
9715 XTnum, callBlock->bbNum);
9719 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9723 // Calling the finally block. We already have a step block that is either the call-to-finally from a
9724 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
9725 // a 'finally'), or the step block is the return from a catch.
9727 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
9728 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
9729 // automatically re-raise the exception, using the return address of the catch (that is, the target
9730 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
9731 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
9732 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
9733 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
9734 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
9735 // within the 'try' region protected by the finally, since we generate code in such a way that execution
9736 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
9739 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9741 #if FEATURE_EH_CALLFINALLY_THUNKS
9742 if (step->bbJumpKind == BBJ_EHCATCHRET)
9744 // Need to create another step block in the 'try' region that will actually branch to the
9745 // call-to-finally thunk.
9746 BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9747 step->bbJumpDest = step2;
9748 step->bbJumpDest->bbRefs++;
9749 step2->setBBWeight(block->bbWeight);
9750 step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9755 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
9756 "BBJ_EHCATCHRET (" FMT_BB "), new BBJ_ALWAYS step-step block " FMT_BB "\n",
9757 XTnum, step->bbNum, step2->bbNum);
9762 assert(stepType == ST_Catch); // Leave it as catch type for now.
9764 #endif // FEATURE_EH_CALLFINALLY_THUNKS
9766 #if FEATURE_EH_CALLFINALLY_THUNKS
9767 unsigned callFinallyTryIndex =
9768 (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9769 unsigned callFinallyHndIndex =
9770 (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9771 #else // !FEATURE_EH_CALLFINALLY_THUNKS
9772 unsigned callFinallyTryIndex = XTnum + 1;
9773 unsigned callFinallyHndIndex = 0; // don't care
9774 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9776 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
9777 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
9778 // finally in the chain)
9779 step->bbJumpDest->bbRefs++;
9781 #if defined(_TARGET_ARM_)
9782 if (stepType == ST_FinallyReturn)
9784 assert(step->bbJumpKind == BBJ_ALWAYS);
9785 // Mark the target of a finally return
9786 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9788 #endif // defined(_TARGET_ARM_)
9790 /* The new block will inherit this block's weight */
9791 callBlock->setBBWeight(block->bbWeight);
9792 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9797 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY "
9798 "block " FMT_BB "\n",
9799 XTnum, callBlock->bbNum);
9804 step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
9805 stepType = ST_FinallyReturn;
9807 /* The new block will inherit this block's weight */
9808 step->setBBWeight(block->bbWeight);
9809 step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
9814 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
9815 "block " FMT_BB "\n",
9816 XTnum, step->bbNum);
9820 callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
9822 invalidatePreds = true;
9824 else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9825 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9827 // We are jumping out of a catch-protected try.
9829 // If we are returning from a call to a finally, then we must have a step block within a try
9830 // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
9831 // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
9832 // and invoke the appropriate catch.
9834 // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
9835 // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
9836 // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
9837 // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
9838 // address of the catch return as the new exception address. That is, the re-raised exception appears to
9839 // occur at the catch return address. If this exception return address skips an enclosing try/catch that
9840 // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
9845 // // something here raises ThreadAbortException
9846 // LEAVE LABEL_1; // no need to stop at LABEL_2
9847 // } catch (Exception) {
9848 // // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
9849 // // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
9850 // // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
9851 // // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
9852 // // need to do this transformation if the current EH block is a try/catch that catches
9853 // // ThreadAbortException (or one of its parents), however we might not be able to find that
9854 // // information, so currently we do it for all catch types.
9855 // LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
9857 // LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
9858 // } catch (ThreadAbortException) {
9862 // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
9865 if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
9867 BasicBlock* catchStep;
9871 if (stepType == ST_FinallyReturn)
9873 assert(step->bbJumpKind == BBJ_ALWAYS);
9877 assert(stepType == ST_Catch);
9878 assert(step->bbJumpKind == BBJ_EHCATCHRET);
9881 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
9882 catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9883 step->bbJumpDest = catchStep;
9884 step->bbJumpDest->bbRefs++;
9886 #if defined(_TARGET_ARM_)
9887 if (stepType == ST_FinallyReturn)
9889 // Mark the target of a finally return
9890 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9892 #endif // defined(_TARGET_ARM_)
9894 /* The new block will inherit this block's weight */
9895 catchStep->setBBWeight(block->bbWeight);
9896 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9901 if (stepType == ST_FinallyReturn)
9903 printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
9904 "BBJ_ALWAYS block " FMT_BB "\n",
9905 XTnum, catchStep->bbNum);
9909 assert(stepType == ST_Catch);
9910 printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
9911 "BBJ_ALWAYS block " FMT_BB "\n",
9912 XTnum, catchStep->bbNum);
9917 /* This block is the new step */
9921 invalidatePreds = true;
9926 if (step == nullptr)
9928 block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
9933 printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
9934 "block " FMT_BB " to BBJ_ALWAYS\n",
9941 step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9943 #if defined(_TARGET_ARM_)
9944 if (stepType == ST_FinallyReturn)
9946 assert(step->bbJumpKind == BBJ_ALWAYS);
9947 // Mark the target of a finally return
9948 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9950 #endif // defined(_TARGET_ARM_)
9955 printf("impImportLeave - final destination of step blocks set to " FMT_BB "\n", leaveTarget->bbNum);
9959 // Queue up the jump target for importing
9961 impImportBlockPending(leaveTarget);
9964 if (invalidatePreds && fgComputePredsDone)
9966 JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9971 fgVerifyHandlerTab();
9975 printf("\nAfter import CEE_LEAVE:\n");
9976 fgDispBasicBlocks();
9982 #endif // FEATURE_EH_FUNCLETS
9984 /*****************************************************************************/
9985 // This is called when reimporting a leave block. It resets the JumpKind,
9986 // JumpDest, and bbNext to the original values
9988 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
9990 #if FEATURE_EH_FUNCLETS
9991 // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
9992 // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0,
9993 // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
9994 // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
9995 // only predecessor are also considered orphans and attempted to be deleted.
10002 // leave OUTSIDE; // B0 is the block containing this leave, following this would be B1
10007 // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
10008 // where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block.
10009 // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To
10010 // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
10011 // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
10012 // will be treated as pair and handled correctly.
10013 if (block->bbJumpKind == BBJ_CALLFINALLY)
10015 BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
10016 dupBlock->bbFlags = block->bbFlags;
10017 dupBlock->bbJumpDest = block->bbJumpDest;
10018 dupBlock->copyEHRegion(block);
10019 dupBlock->bbCatchTyp = block->bbCatchTyp;
10021 // Mark this block as
10022 // a) not referenced by any other block to make sure that it gets deleted
10024 // c) prevent from being imported
10026 // e) as rarely run
10027 dupBlock->bbRefs = 0;
10028 dupBlock->bbWeight = 0;
10029 dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
10031 // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
10032 // will be next to each other.
10033 fgInsertBBafter(block, dupBlock);
10038 printf("New Basic Block " FMT_BB " duplicate of " FMT_BB " created.\n", dupBlock->bbNum, block->bbNum);
10042 #endif // FEATURE_EH_FUNCLETS
10044 block->bbJumpKind = BBJ_LEAVE;
10046 block->bbJumpDest = fgLookupBB(jmpAddr);
10048 // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
10049 // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
10050 // reason we don't want to remove the block at this point is that if we call
10051 // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
10052 // added and the linked list length will be different than fgBBcount.
10055 /*****************************************************************************/
10056 // Get the first non-prefix opcode. Used for verification of valid combinations
10057 // of prefixes and actual opcodes.
10059 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
10061 while (codeAddr < codeEndp)
10063 OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
10064 codeAddr += sizeof(__int8);
10066 if (opcode == CEE_PREFIX1)
10068 if (codeAddr >= codeEndp)
10072 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
10073 codeAddr += sizeof(__int8);
10078 case CEE_UNALIGNED:
10081 case CEE_CONSTRAINED:
10088 codeAddr += opcodeSizes[opcode];
10091 return CEE_ILLEGAL;
10094 /*****************************************************************************/
10095 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
10097 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
10099 OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
10102 // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
10103 ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
10104 (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
10105 (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
10106 // volatile. prefix is allowed with the ldsfld and stsfld
10107 (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
10109 BADCODE("Invalid opcode for unaligned. or volatile. prefix");
10113 /*****************************************************************************/
10117 #undef RETURN // undef contracts RETURN macro
10132 const static controlFlow_t controlFlow[] = {
10133 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
10134 #include "opcode.def"
10140 /*****************************************************************************
10141 * Determine the result type of an arithemetic operation
10142 * On 64-bit inserts upcasts when native int is mixed with int32
10144 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2)
10146 var_types type = TYP_UNDEF;
10147 GenTree* op1 = *pOp1;
10148 GenTree* op2 = *pOp2;
10150 // Arithemetic operations are generally only allowed with
10151 // primitive types, but certain operations are allowed
10154 if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
10156 if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
10158 // byref1-byref2 => gives a native int
10161 else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
10163 // [native] int - byref => gives a native int
10166 // The reason is that it is possible, in managed C++,
10167 // to have a tree like this:
10174 // const(h) int addr byref
10176 // <BUGNUM> VSW 318822 </BUGNUM>
10178 // So here we decide to make the resulting type to be a native int.
10179 CLANG_FORMAT_COMMENT_ANCHOR;
10181 #ifdef _TARGET_64BIT_
10182 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
10184 // insert an explicit upcast
10185 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10187 #endif // _TARGET_64BIT_
10193 // byref - [native] int => gives a byref
10194 assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
10196 #ifdef _TARGET_64BIT_
10197 if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
10199 // insert an explicit upcast
10200 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10202 #endif // _TARGET_64BIT_
10207 else if ((oper == GT_ADD) &&
10208 (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
10210 // byref + [native] int => gives a byref
10212 // [native] int + byref => gives a byref
10214 // only one can be a byref : byref op byref not allowed
10215 assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
10216 assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
10218 #ifdef _TARGET_64BIT_
10219 if (genActualType(op2->TypeGet()) == TYP_BYREF)
10221 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
10223 // insert an explicit upcast
10224 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10227 else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
10229 // insert an explicit upcast
10230 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10232 #endif // _TARGET_64BIT_
10236 #ifdef _TARGET_64BIT_
10237 else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
10239 assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
10241 // int + long => gives long
10242 // long + int => gives long
10243 // we get this because in the IL the long isn't Int64, it's just IntPtr
10245 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
10247 // insert an explicit upcast
10248 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10250 else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
10252 // insert an explicit upcast
10253 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10258 #else // 32-bit TARGET
10259 else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
10261 assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
10263 // int + long => gives long
10264 // long + int => gives long
10268 #endif // _TARGET_64BIT_
10271 // int + int => gives an int
10272 assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
10274 assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
10275 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
10277 type = genActualType(op1->gtType);
10279 // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
10280 // Otherwise, turn floats into doubles
10281 if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
10283 assert(genActualType(op2->gtType) == TYP_DOUBLE);
10288 assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
10292 //------------------------------------------------------------------------
10293 // impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting
10296 // op1 - value to cast
10297 // pResolvedToken - resolved token for type to cast to
10298 // isCastClass - true if this is a castclass, false if isinst
10301 // tree representing optimized cast, or null if no optimization possible
10303 GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass)
10305 assert(op1->TypeGet() == TYP_REF);
10307 // Don't optimize for minopts or debug codegen.
10308 if (opts.compDbgCode || opts.MinOpts())
10313 // See what we know about the type of the object being cast.
10314 bool isExact = false;
10315 bool isNonNull = false;
10316 CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull);
10317 GenTree* optResult = nullptr;
10319 if (fromClass != nullptr)
10321 CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass;
10322 JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst",
10323 isExact ? "exact " : "", dspPtr(fromClass), info.compCompHnd->getClassName(fromClass), dspPtr(toClass),
10324 info.compCompHnd->getClassName(toClass));
10326 // Perhaps we know if the cast will succeed or fail.
10327 TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass);
10329 if (castResult == TypeCompareState::Must)
10331 // Cast will succeed, result is simply op1.
10332 JITDUMP("Cast will succeed, optimizing to simply return input\n");
10335 else if (castResult == TypeCompareState::MustNot)
10337 // See if we can sharpen exactness by looking for final classes
10340 DWORD flags = info.compCompHnd->getClassAttribs(fromClass);
10341 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL |
10342 CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY;
10343 isExact = ((flags & flagsMask) == CORINFO_FLG_FINAL);
10346 // Cast to exact type will fail. Handle case where we have
10347 // an exact type (that is, fromClass is not a subtype)
10348 // and we're not going to throw on failure.
10349 if (isExact && !isCastClass)
10351 JITDUMP("Cast will fail, optimizing to return null\n");
10352 GenTree* result = gtNewIconNode(0, TYP_REF);
10354 // If the cast was fed by a box, we can remove that too.
10355 if (op1->IsBoxedValue())
10357 JITDUMP("Also removing upstream box\n");
10358 gtTryRemoveBoxUpstreamEffects(op1);
10365 JITDUMP("Not optimizing failing castclass (yet)\n");
10369 JITDUMP("Can't optimize since fromClass is inexact\n");
10374 JITDUMP("Result of cast unknown, must generate runtime test\n");
10379 JITDUMP("\nCan't optimize since fromClass is unknown\n");
10385 //------------------------------------------------------------------------
10386 // impCastClassOrIsInstToTree: build and import castclass/isinst
10389 // op1 - value to cast
10390 // op2 - type handle for type to cast to
10391 // pResolvedToken - resolved token from the cast operation
10392 // isCastClass - true if this is castclass, false means isinst
10395 // Tree representing the cast
10398 // May expand into a series of runtime checks or a helper call.
10400 GenTree* Compiler::impCastClassOrIsInstToTree(GenTree* op1,
10402 CORINFO_RESOLVED_TOKEN* pResolvedToken,
10405 assert(op1->TypeGet() == TYP_REF);
10407 // Optimistically assume the jit should expand this as an inline test
10408 bool shouldExpandInline = true;
10410 // Profitability check.
10412 // Don't bother with inline expansion when jit is trying to
10413 // generate code quickly, or the cast is in code that won't run very
10414 // often, or the method already is pretty big.
10415 if (compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts())
10417 // not worth the code expansion if jitting fast or in a rarely run block
10418 shouldExpandInline = false;
10420 else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
10422 // not worth creating an untracked local variable
10423 shouldExpandInline = false;
10426 // Pessimistically assume the jit cannot expand this as an inline test
10427 bool canExpandInline = false;
10428 const CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
10432 // Not all classclass/isinst operations can be inline expanded.
10433 // Check legality only if an inline expansion is desirable.
10434 if (shouldExpandInline)
10438 // Jit can only inline expand the normal CHKCASTCLASS helper.
10439 canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
10443 if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
10445 // Check the class attributes.
10446 DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
10448 // If the class is final and is not marshal byref or
10449 // contextful, the jit can expand the IsInst check inline.
10450 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL;
10451 canExpandInline = ((flags & flagsMask) == CORINFO_FLG_FINAL);
10456 const bool expandInline = canExpandInline && shouldExpandInline;
10460 JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst",
10461 canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
10463 // If we CSE this class handle we prevent assertionProp from making SubType assertions
10464 // so instead we force the CSE logic to not consider CSE-ing this class handle.
10466 op2->gtFlags |= GTF_DONT_CSE;
10468 return gtNewHelperCallNode(helper, TYP_REF, gtNewArgList(op2, op1));
10471 JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst");
10473 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
10478 // expand the methodtable match:
10480 // condMT ==> GT_NE
10482 // GT_IND op2 (typically CNS_INT)
10487 // This can replace op1 with a GT_COMMA that evaluates op1 into a local
10489 op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
10491 // op1 is now known to be a non-complex tree
10492 // thus we can use gtClone(op1) from now on
10495 GenTree* op2Var = op2;
10498 op2Var = fgInsertCommaFormTemp(&op2);
10499 lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
10501 temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
10502 temp->gtFlags |= GTF_EXCEPT;
10503 condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
10507 // expand the null check:
10509 // condNull ==> GT_EQ
10514 condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
10517 // expand the true and false trees for the condMT
10519 GenTree* condFalse = gtClone(op1);
10524 // use the special helper that skips the cases checked by our inlined cast
10526 const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
10528 condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewArgList(op2Var, gtClone(op1)));
10532 condTrue = gtNewIconNode(0, TYP_REF);
10535 #define USE_QMARK_TREES
10537 #ifdef USE_QMARK_TREES
10540 // Generate first QMARK - COLON tree
10542 // qmarkMT ==> GT_QMARK
10546 // condFalse condTrue
10548 temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
10549 qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
10551 GenTree* qmarkNull;
10553 // Generate second QMARK - COLON tree
10555 // qmarkNull ==> GT_QMARK
10557 // condNull GT_COLON
10561 temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
10562 qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
10563 qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
10565 // Make QMark node a top level node by spilling it.
10566 unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
10567 impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
10569 // TODO-CQ: Is it possible op1 has a better type?
10571 // See also gtGetHelperCallClassHandle where we make the same
10572 // determination for the helper call variants.
10573 LclVarDsc* lclDsc = lvaGetDesc(tmp);
10574 assert(lclDsc->lvSingleDef == 0);
10575 lclDsc->lvSingleDef = 1;
10576 JITDUMP("Marked V%02u as a single def temp\n", tmp);
10577 lvaSetClass(tmp, pResolvedToken->hClass);
10578 return gtNewLclvNode(tmp, TYP_REF);
10583 #define assertImp(cond) ((void)0)
10585 #define assertImp(cond) \
10590 const int cchAssertImpBuf = 600; \
10591 char* assertImpBuf = (char*)alloca(cchAssertImpBuf); \
10592 _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \
10593 "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \
10594 impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \
10595 op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \
10596 assertAbort(assertImpBuf, __FILE__, __LINE__); \
10602 #pragma warning(push)
10603 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
10605 /*****************************************************************************
10606 * Import the instr for the given basic block
10608 void Compiler::impImportBlockCode(BasicBlock* block)
10610 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
10616 printf("\nImporting " FMT_BB " (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
10620 unsigned nxtStmtIndex = impInitBlockLineInfo();
10621 IL_OFFSET nxtStmtOffs;
10623 GenTree* arrayNodeFrom;
10624 GenTree* arrayNodeTo;
10625 GenTree* arrayNodeToIndex;
10626 CorInfoHelpFunc helper;
10627 CorInfoIsAccessAllowedResult accessAllowedResult;
10628 CORINFO_HELPER_DESC calloutHelper;
10629 const BYTE* lastLoadToken = nullptr;
10631 // reject cyclic constraints
10632 if (tiVerificationNeeded)
10634 Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
10635 Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
10638 /* Get the tree list started */
10640 impBeginTreeList();
10642 /* Walk the opcodes that comprise the basic block */
10644 const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
10645 const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
10647 IL_OFFSET opcodeOffs = block->bbCodeOffs;
10648 IL_OFFSET lastSpillOffs = opcodeOffs;
10652 /* remember the start of the delegate creation sequence (used for verification) */
10653 const BYTE* delegateCreateStart = nullptr;
10655 int prefixFlags = 0;
10656 bool explicitTailCall, constraintCall, readonlyCall;
10660 unsigned numArgs = info.compArgsCount;
10662 /* Now process all the opcodes in the block */
10664 var_types callTyp = TYP_COUNT;
10665 OPCODE prevOpcode = CEE_ILLEGAL;
10667 if (block->bbCatchTyp)
10669 if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
10671 impCurStmtOffsSet(block->bbCodeOffs);
10674 // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
10675 // to a temp. This is a trade off for code simplicity
10676 impSpillSpecialSideEff();
10679 while (codeAddr < codeEndp)
10681 bool usingReadyToRunHelper = false;
10682 CORINFO_RESOLVED_TOKEN resolvedToken;
10683 CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
10684 CORINFO_CALL_INFO callInfo;
10685 CORINFO_FIELD_INFO fieldInfo;
10687 tiRetVal = typeInfo(); // Default type info
10689 //---------------------------------------------------------------------
10691 /* We need to restrict the max tree depth as many of the Compiler
10692 functions are recursive. We do this by spilling the stack */
10694 if (verCurrentState.esStackDepth)
10696 /* Has it been a while since we last saw a non-empty stack (which
10697 guarantees that the tree depth isnt accumulating. */
10699 if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode))
10701 impSpillStackEnsure();
10702 lastSpillOffs = opcodeOffs;
10707 lastSpillOffs = opcodeOffs;
10708 impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
10711 /* Compute the current instr offset */
10713 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10716 if (opts.compDbgInfo)
10719 if (!compIsForInlining())
10722 (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
10724 /* Have we reached the next stmt boundary ? */
10726 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
10728 assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
10730 if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
10732 /* We need to provide accurate IP-mapping at this point.
10733 So spill anything on the stack so that it will form
10734 gtStmts with the correct stmt offset noted */
10736 impSpillStackEnsure(true);
10739 // Has impCurStmtOffs been reported in any tree?
10741 if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
10743 GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
10744 impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10746 assert(impCurStmtOffs == BAD_IL_OFFSET);
10749 if (impCurStmtOffs == BAD_IL_OFFSET)
10751 /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
10752 If opcodeOffs has gone past nxtStmtIndex, catch up */
10754 while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
10755 info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
10760 /* Go to the new stmt */
10762 impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
10764 /* Update the stmt boundary index */
10767 assert(nxtStmtIndex <= info.compStmtOffsetsCount);
10769 /* Are there any more line# entries after this one? */
10771 if (nxtStmtIndex < info.compStmtOffsetsCount)
10773 /* Remember where the next line# starts */
10775 nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
10779 /* No more line# entries */
10781 nxtStmtOffs = BAD_IL_OFFSET;
10785 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
10786 (verCurrentState.esStackDepth == 0))
10788 /* At stack-empty locations, we have already added the tree to
10789 the stmt list with the last offset. We just need to update
10793 impCurStmtOffsSet(opcodeOffs);
10795 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
10796 impOpcodeIsCallSiteBoundary(prevOpcode))
10798 /* Make sure we have a type cached */
10799 assert(callTyp != TYP_COUNT);
10801 if (callTyp == TYP_VOID)
10803 impCurStmtOffsSet(opcodeOffs);
10805 else if (opts.compDbgCode)
10807 impSpillStackEnsure(true);
10808 impCurStmtOffsSet(opcodeOffs);
10811 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
10813 if (opts.compDbgCode)
10815 impSpillStackEnsure(true);
10818 impCurStmtOffsSet(opcodeOffs);
10821 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
10822 jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
10826 CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL);
10827 CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
10828 CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
10830 var_types lclTyp, ovflType = TYP_UNKNOWN;
10831 GenTree* op1 = DUMMY_INIT(NULL);
10832 GenTree* op2 = DUMMY_INIT(NULL);
10833 GenTreeArgList* args = nullptr; // What good do these "DUMMY_INIT"s do?
10834 GenTree* newObjThisPtr = DUMMY_INIT(NULL);
10835 bool uns = DUMMY_INIT(false);
10836 bool isLocal = false;
10838 /* Get the next opcode and the size of its parameters */
10840 OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
10841 codeAddr += sizeof(__int8);
10844 impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10845 JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
10850 // Return if any previous code has caused inline to fail.
10851 if (compDonotInline())
10856 /* Get the size of additional parameters */
10858 signed int sz = opcodeSizes[opcode];
10861 clsHnd = NO_CLASS_HANDLE;
10862 lclTyp = TYP_COUNT;
10863 callTyp = TYP_COUNT;
10865 impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10866 impCurOpcName = opcodeNames[opcode];
10868 if (verbose && (opcode != CEE_PREFIX1))
10870 printf("%s", impCurOpcName);
10873 /* Use assertImp() to display the opcode */
10875 op1 = op2 = nullptr;
10878 /* See what kind of an opcode we have, then */
10880 unsigned mflags = 0;
10881 unsigned clsFlags = 0;
10894 CORINFO_SIG_INFO sig;
10896 bool ovfl, unordered, callNode;
10898 CORINFO_CLASS_HANDLE tokenType;
10908 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
10909 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10910 codeAddr += sizeof(__int8);
10911 goto DECODE_OPCODE;
10915 // We need to call impSpillLclRefs() for a struct type lclVar.
10916 // This is done for non-block assignments in the handling of stloc.
10917 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
10918 (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
10920 impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
10923 /* Append 'op1' to the list of statements */
10924 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10929 /* Append 'op1' to the list of statements */
10931 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10937 // Remember at which BC offset the tree was finished
10938 impNoteLastILoffs();
10943 impPushNullObjRefOnStack();
10946 case CEE_LDC_I4_M1:
10956 cval.intVal = (opcode - CEE_LDC_I4_0);
10957 assert(-1 <= cval.intVal && cval.intVal <= 8);
10961 cval.intVal = getI1LittleEndian(codeAddr);
10964 cval.intVal = getI4LittleEndian(codeAddr);
10967 JITDUMP(" %d", cval.intVal);
10968 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
10972 cval.lngVal = getI8LittleEndian(codeAddr);
10973 JITDUMP(" 0x%016llx", cval.lngVal);
10974 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
10978 cval.dblVal = getR8LittleEndian(codeAddr);
10979 JITDUMP(" %#.17g", cval.dblVal);
10980 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
10984 cval.dblVal = getR4LittleEndian(codeAddr);
10985 JITDUMP(" %#.17g", cval.dblVal);
10987 GenTree* cnsOp = gtNewDconNode(cval.dblVal);
10988 cnsOp->gtType = TYP_FLOAT;
10989 impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
10995 if (compIsForInlining())
10997 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
10999 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
11004 val = getU4LittleEndian(codeAddr);
11005 JITDUMP(" %08X", val);
11006 if (tiVerificationNeeded)
11008 Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
11009 tiRetVal = typeInfo(TI_REF, impGetStringClass());
11011 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
11016 lclNum = getU2LittleEndian(codeAddr);
11017 JITDUMP(" %u", lclNum);
11018 impLoadArg(lclNum, opcodeOffs + sz + 1);
11022 lclNum = getU1LittleEndian(codeAddr);
11023 JITDUMP(" %u", lclNum);
11024 impLoadArg(lclNum, opcodeOffs + sz + 1);
11031 lclNum = (opcode - CEE_LDARG_0);
11032 assert(lclNum >= 0 && lclNum < 4);
11033 impLoadArg(lclNum, opcodeOffs + sz + 1);
11037 lclNum = getU2LittleEndian(codeAddr);
11038 JITDUMP(" %u", lclNum);
11039 impLoadLoc(lclNum, opcodeOffs + sz + 1);
11043 lclNum = getU1LittleEndian(codeAddr);
11044 JITDUMP(" %u", lclNum);
11045 impLoadLoc(lclNum, opcodeOffs + sz + 1);
11052 lclNum = (opcode - CEE_LDLOC_0);
11053 assert(lclNum >= 0 && lclNum < 4);
11054 impLoadLoc(lclNum, opcodeOffs + sz + 1);
11058 lclNum = getU2LittleEndian(codeAddr);
11062 lclNum = getU1LittleEndian(codeAddr);
11064 JITDUMP(" %u", lclNum);
11066 if (tiVerificationNeeded)
11068 Verify(lclNum < info.compILargsCount, "bad arg num");
11071 if (compIsForInlining())
11073 op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
11074 noway_assert(op1->gtOper == GT_LCL_VAR);
11075 lclNum = op1->AsLclVar()->gtLclNum;
11080 lclNum = compMapILargNum(lclNum); // account for possible hidden param
11081 assertImp(lclNum < numArgs);
11083 if (lclNum == info.compThisArg)
11085 lclNum = lvaArg0Var;
11088 // We should have seen this arg write in the prescan
11089 assert(lvaTable[lclNum].lvHasILStoreOp);
11091 if (tiVerificationNeeded)
11093 typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
11094 Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
11097 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
11099 Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
11106 lclNum = getU2LittleEndian(codeAddr);
11108 JITDUMP(" %u", lclNum);
11112 lclNum = getU1LittleEndian(codeAddr);
11114 JITDUMP(" %u", lclNum);
11122 lclNum = (opcode - CEE_STLOC_0);
11123 assert(lclNum >= 0 && lclNum < 4);
11126 if (tiVerificationNeeded)
11128 Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
11129 Verify(tiCompatibleWith(impStackTop().seTypeInfo,
11130 NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
11134 if (compIsForInlining())
11136 lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
11138 /* Have we allocated a temp for this local? */
11140 lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
11149 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
11151 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
11157 /* if it is a struct assignment, make certain we don't overflow the buffer */
11158 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
11160 if (lvaTable[lclNum].lvNormalizeOnLoad())
11162 lclTyp = lvaGetRealType(lclNum);
11166 lclTyp = lvaGetActualType(lclNum);
11170 /* Pop the value being assigned */
11173 StackEntry se = impPopStack();
11174 clsHnd = se.seTypeInfo.GetClassHandle();
11176 tiRetVal = se.seTypeInfo;
11179 #ifdef FEATURE_SIMD
11180 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
11182 assert(op1->TypeGet() == TYP_STRUCT);
11183 op1->gtType = lclTyp;
11185 #endif // FEATURE_SIMD
11187 op1 = impImplicitIorI4Cast(op1, lclTyp);
11189 #ifdef _TARGET_64BIT_
11190 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
11191 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
11193 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
11194 op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT);
11196 #endif // _TARGET_64BIT_
11198 // We had better assign it a value of the correct type
11200 genActualType(lclTyp) == genActualType(op1->gtType) ||
11201 genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
11202 (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
11203 (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
11204 (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
11205 ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
11207 /* If op1 is "&var" then its type is the transient "*" and it can
11208 be used either as TYP_BYREF or TYP_I_IMPL */
11210 if (op1->IsVarAddr())
11212 assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
11214 /* When "&var" is created, we assume it is a byref. If it is
11215 being assigned to a TYP_I_IMPL var, change the type to
11216 prevent unnecessary GC info */
11218 if (genActualType(lclTyp) == TYP_I_IMPL)
11220 op1->gtType = TYP_I_IMPL;
11224 // If this is a local and the local is a ref type, see
11225 // if we can improve type information based on the
11226 // value being assigned.
11227 if (isLocal && (lclTyp == TYP_REF))
11229 // We should have seen a stloc in our IL prescan.
11230 assert(lvaTable[lclNum].lvHasILStoreOp);
11232 // Is there just one place this local is defined?
11233 const bool isSingleDefLocal = lvaTable[lclNum].lvSingleDef;
11235 // Conservative check that there is just one
11236 // definition that reaches this store.
11237 const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0);
11239 if (isSingleDefLocal && hasSingleReachingDef)
11241 lvaUpdateClass(lclNum, op1, clsHnd);
11245 /* Filter out simple assignments to itself */
11247 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
11249 if (opts.compDbgCode)
11251 op1 = gtNewNothingNode();
11260 /* Create the assignment node */
11262 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
11264 /* If the local is aliased or pinned, we need to spill calls and
11265 indirections from the stack. */
11267 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp || lvaTable[lclNum].lvPinned) &&
11268 (verCurrentState.esStackDepth > 0))
11270 impSpillSideEffects(false,
11271 (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned"));
11274 /* Spill any refs to the local from the stack */
11276 impSpillLclRefs(lclNum);
11278 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
11279 // We insert a cast to the dest 'op2' type
11281 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
11282 varTypeIsFloating(op2->gtType))
11284 op1 = gtNewCastNode(op2->TypeGet(), op1, false, op2->TypeGet());
11287 if (varTypeIsStruct(lclTyp))
11289 op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
11293 // The code generator generates GC tracking information
11294 // based on the RHS of the assignment. Later the LHS (which is
11295 // is a BYREF) gets used and the emitter checks that that variable
11296 // is being tracked. It is not (since the RHS was an int and did
11297 // not need tracking). To keep this assert happy, we change the RHS
11298 if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
11300 op1->gtType = TYP_BYREF;
11302 op1 = gtNewAssignNode(op2, op1);
11308 lclNum = getU2LittleEndian(codeAddr);
11312 lclNum = getU1LittleEndian(codeAddr);
11314 JITDUMP(" %u", lclNum);
11315 if (tiVerificationNeeded)
11317 Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
11318 Verify(info.compInitMem, "initLocals not set");
11321 if (compIsForInlining())
11323 // Get the local type
11324 lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
11326 /* Have we allocated a temp for this local? */
11328 lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
11330 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
11336 assertImp(lclNum < info.compLocalsCount);
11340 lclNum = getU2LittleEndian(codeAddr);
11344 lclNum = getU1LittleEndian(codeAddr);
11346 JITDUMP(" %u", lclNum);
11347 Verify(lclNum < info.compILargsCount, "bad arg num");
11349 if (compIsForInlining())
11351 // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
11352 // followed by a ldfld to load the field.
11354 op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
11355 if (op1->gtOper != GT_LCL_VAR)
11357 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
11361 assert(op1->gtOper == GT_LCL_VAR);
11366 lclNum = compMapILargNum(lclNum); // account for possible hidden param
11367 assertImp(lclNum < numArgs);
11369 if (lclNum == info.compThisArg)
11371 lclNum = lvaArg0Var;
11378 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
11381 assert(op1->gtOper == GT_LCL_VAR);
11383 /* Note that this is supposed to create the transient type "*"
11384 which may be used as a TYP_I_IMPL. However we catch places
11385 where it is used as a TYP_I_IMPL and change the node if needed.
11386 Thus we are pessimistic and may report byrefs in the GC info
11387 where it was not absolutely needed, but it is safer this way.
11389 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
11391 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
11392 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
11394 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
11395 if (tiVerificationNeeded)
11397 // Don't allow taking address of uninit this ptr.
11398 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
11400 Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
11403 if (!tiRetVal.IsByRef())
11405 tiRetVal.MakeByRef();
11409 Verify(false, "byref to byref");
11413 impPushOnStack(op1, tiRetVal);
11418 if (!info.compIsVarArgs)
11420 BADCODE("arglist in non-vararg method");
11423 if (tiVerificationNeeded)
11425 tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
11427 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
11429 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
11430 adjusted the arg count cos this is like fetching the last param */
11431 assertImp(0 < numArgs);
11432 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
11433 lclNum = lvaVarargsHandleArg;
11434 op1 = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
11435 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
11436 impPushOnStack(op1, tiRetVal);
11439 case CEE_ENDFINALLY:
11441 if (compIsForInlining())
11443 assert(!"Shouldn't have exception handlers in the inliner!");
11444 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
11448 if (verCurrentState.esStackDepth > 0)
11450 impEvalSideEffects();
11453 if (info.compXcptnsCount == 0)
11455 BADCODE("endfinally outside finally");
11458 assert(verCurrentState.esStackDepth == 0);
11460 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
11463 case CEE_ENDFILTER:
11465 if (compIsForInlining())
11467 assert(!"Shouldn't have exception handlers in the inliner!");
11468 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
11472 block->bbSetRunRarely(); // filters are rare
11474 if (info.compXcptnsCount == 0)
11476 BADCODE("endfilter outside filter");
11479 if (tiVerificationNeeded)
11481 Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
11484 op1 = impPopStack().val;
11485 assertImp(op1->gtType == TYP_INT);
11486 if (!bbInFilterILRange(block))
11488 BADCODE("EndFilter outside a filter handler");
11491 /* Mark current bb as end of filter */
11493 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
11494 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
11496 /* Mark catch handler as successor */
11498 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
11499 if (verCurrentState.esStackDepth != 0)
11501 verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
11502 DEBUGARG(__LINE__));
11507 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
11509 if (!impReturnInstruction(block, prefixFlags, opcode))
11520 assert(!compIsForInlining());
11522 if (tiVerificationNeeded)
11524 Verify(false, "Invalid opcode: CEE_JMP");
11527 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
11529 /* CEE_JMP does not make sense in some "protected" regions. */
11531 BADCODE("Jmp not allowed in protected region");
11534 if (verCurrentState.esStackDepth != 0)
11536 BADCODE("Stack must be empty after CEE_JMPs");
11539 _impResolveToken(CORINFO_TOKENKIND_Method);
11541 JITDUMP(" %08X", resolvedToken.token);
11543 /* The signature of the target has to be identical to ours.
11544 At least check that argCnt and returnType match */
11546 eeGetMethodSig(resolvedToken.hMethod, &sig);
11547 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
11548 sig.retType != info.compMethodInfo->args.retType ||
11549 sig.callConv != info.compMethodInfo->args.callConv)
11551 BADCODE("Incompatible target for CEE_JMPs");
11554 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
11556 /* Mark the basic block as being a JUMP instead of RETURN */
11558 block->bbFlags |= BBF_HAS_JMP;
11560 /* Set this flag to make sure register arguments have a location assigned
11561 * even if we don't use them inside the method */
11563 compJmpOpUsed = true;
11565 fgNoStructPromotion = true;
11570 assertImp(sz == sizeof(unsigned));
11572 _impResolveToken(CORINFO_TOKENKIND_Class);
11574 JITDUMP(" %08X", resolvedToken.token);
11576 ldelemClsHnd = resolvedToken.hClass;
11578 if (tiVerificationNeeded)
11580 typeInfo tiArray = impStackTop(1).seTypeInfo;
11581 typeInfo tiIndex = impStackTop().seTypeInfo;
11583 // As per ECMA 'index' specified can be either int32 or native int.
11584 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11586 typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
11587 Verify(tiArray.IsNullObjRef() ||
11588 typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
11591 tiRetVal = arrayElemType;
11592 tiRetVal.MakeByRef();
11593 if (prefixFlags & PREFIX_READONLY)
11595 tiRetVal.SetIsReadonlyByRef();
11598 // an array interior pointer is always in the heap
11599 tiRetVal.SetIsPermanentHomeByRef();
11602 // If it's a value class array we just do a simple address-of
11603 if (eeIsValueClass(ldelemClsHnd))
11605 CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
11606 if (cit == CORINFO_TYPE_UNDEF)
11608 lclTyp = TYP_STRUCT;
11612 lclTyp = JITtype2varType(cit);
11614 goto ARR_LD_POST_VERIFY;
11617 // Similarly, if its a readonly access, we can do a simple address-of
11618 // without doing a runtime type-check
11619 if (prefixFlags & PREFIX_READONLY)
11622 goto ARR_LD_POST_VERIFY;
11625 // Otherwise we need the full helper function with run-time type check
11626 op1 = impTokenToHandle(&resolvedToken);
11627 if (op1 == nullptr)
11628 { // compDonotInline()
11632 args = gtNewArgList(op1); // Type
11633 args = gtNewListNode(impPopStack().val, args); // index
11634 args = gtNewListNode(impPopStack().val, args); // array
11635 op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args);
11637 impPushOnStack(op1, tiRetVal);
11640 // ldelem for reference and value types
11642 assertImp(sz == sizeof(unsigned));
11644 _impResolveToken(CORINFO_TOKENKIND_Class);
11646 JITDUMP(" %08X", resolvedToken.token);
11648 ldelemClsHnd = resolvedToken.hClass;
11650 if (tiVerificationNeeded)
11652 typeInfo tiArray = impStackTop(1).seTypeInfo;
11653 typeInfo tiIndex = impStackTop().seTypeInfo;
11655 // As per ECMA 'index' specified can be either int32 or native int.
11656 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11657 tiRetVal = verMakeTypeInfo(ldelemClsHnd);
11659 Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
11660 "type of array incompatible with type operand");
11661 tiRetVal.NormaliseForStack();
11664 // If it's a reference type or generic variable type
11665 // then just generate code as though it's a ldelem.ref instruction
11666 if (!eeIsValueClass(ldelemClsHnd))
11669 opcode = CEE_LDELEM_REF;
11673 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
11674 lclTyp = JITtype2varType(jitTyp);
11675 tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
11676 tiRetVal.NormaliseForStack();
11678 goto ARR_LD_POST_VERIFY;
11680 case CEE_LDELEM_I1:
11683 case CEE_LDELEM_I2:
11684 lclTyp = TYP_SHORT;
11687 lclTyp = TYP_I_IMPL;
11690 // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
11691 // and treating it as TYP_INT avoids other asserts.
11692 case CEE_LDELEM_U4:
11696 case CEE_LDELEM_I4:
11699 case CEE_LDELEM_I8:
11702 case CEE_LDELEM_REF:
11705 case CEE_LDELEM_R4:
11706 lclTyp = TYP_FLOAT;
11708 case CEE_LDELEM_R8:
11709 lclTyp = TYP_DOUBLE;
11711 case CEE_LDELEM_U1:
11712 lclTyp = TYP_UBYTE;
11714 case CEE_LDELEM_U2:
11715 lclTyp = TYP_USHORT;
11720 if (tiVerificationNeeded)
11722 typeInfo tiArray = impStackTop(1).seTypeInfo;
11723 typeInfo tiIndex = impStackTop().seTypeInfo;
11725 // As per ECMA 'index' specified can be either int32 or native int.
11726 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11727 if (tiArray.IsNullObjRef())
11729 if (lclTyp == TYP_REF)
11730 { // we will say a deref of a null array yields a null ref
11731 tiRetVal = typeInfo(TI_NULL);
11735 tiRetVal = typeInfo(lclTyp);
11740 tiRetVal = verGetArrayElemType(tiArray);
11741 typeInfo arrayElemTi = typeInfo(lclTyp);
11742 #ifdef _TARGET_64BIT_
11743 if (opcode == CEE_LDELEM_I)
11745 arrayElemTi = typeInfo::nativeInt();
11748 if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
11750 Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
11753 #endif // _TARGET_64BIT_
11755 Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
11758 tiRetVal.NormaliseForStack();
11760 ARR_LD_POST_VERIFY:
11762 /* Pull the index value and array address */
11763 op2 = impPopStack().val;
11764 op1 = impPopStack().val;
11765 assertImp(op1->gtType == TYP_REF);
11767 /* Check for null pointer - in the inliner case we simply abort */
11769 if (compIsForInlining())
11771 if (op1->gtOper == GT_CNS_INT)
11773 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
11778 op1 = impCheckForNullPointer(op1);
11780 /* Mark the block as containing an index expression */
11782 if (op1->gtOper == GT_LCL_VAR)
11784 if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
11786 block->bbFlags |= BBF_HAS_IDX_LEN;
11787 optMethodFlags |= OMF_HAS_ARRAYREF;
11791 /* Create the index node and push it on the stack */
11793 op1 = gtNewIndexRef(lclTyp, op1, op2);
11795 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
11797 if ((opcode == CEE_LDELEMA) || ldstruct ||
11798 (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
11800 assert(ldelemClsHnd != DUMMY_INIT(NULL));
11802 // remember the element size
11803 if (lclTyp == TYP_REF)
11805 op1->gtIndex.gtIndElemSize = TARGET_POINTER_SIZE;
11809 // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
11810 if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
11812 op1->gtIndex.gtStructElemClass = ldelemClsHnd;
11814 assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
11815 if (lclTyp == TYP_STRUCT)
11817 size = info.compCompHnd->getClassSize(ldelemClsHnd);
11818 op1->gtIndex.gtIndElemSize = size;
11819 op1->gtType = lclTyp;
11823 if ((opcode == CEE_LDELEMA) || ldstruct)
11826 lclTyp = TYP_BYREF;
11828 op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
11832 assert(lclTyp != TYP_STRUCT);
11838 // Create an OBJ for the result
11839 op1 = gtNewObjNode(ldelemClsHnd, op1);
11840 op1->gtFlags |= GTF_EXCEPT;
11842 impPushOnStack(op1, tiRetVal);
11845 // stelem for reference and value types
11848 assertImp(sz == sizeof(unsigned));
11850 _impResolveToken(CORINFO_TOKENKIND_Class);
11852 JITDUMP(" %08X", resolvedToken.token);
11854 stelemClsHnd = resolvedToken.hClass;
11856 if (tiVerificationNeeded)
11858 typeInfo tiArray = impStackTop(2).seTypeInfo;
11859 typeInfo tiIndex = impStackTop(1).seTypeInfo;
11860 typeInfo tiValue = impStackTop().seTypeInfo;
11862 // As per ECMA 'index' specified can be either int32 or native int.
11863 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11864 typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
11866 Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
11867 "type operand incompatible with array element type");
11868 arrayElem.NormaliseForStack();
11869 Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
11872 // If it's a reference type just behave as though it's a stelem.ref instruction
11873 if (!eeIsValueClass(stelemClsHnd))
11875 goto STELEM_REF_POST_VERIFY;
11878 // Otherwise extract the type
11880 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
11881 lclTyp = JITtype2varType(jitTyp);
11882 goto ARR_ST_POST_VERIFY;
11885 case CEE_STELEM_REF:
11887 if (tiVerificationNeeded)
11889 typeInfo tiArray = impStackTop(2).seTypeInfo;
11890 typeInfo tiIndex = impStackTop(1).seTypeInfo;
11891 typeInfo tiValue = impStackTop().seTypeInfo;
11893 // As per ECMA 'index' specified can be either int32 or native int.
11894 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11895 Verify(tiValue.IsObjRef(), "bad value");
11897 // we only check that it is an object referece, The helper does additional checks
11898 Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
11901 STELEM_REF_POST_VERIFY:
11903 arrayNodeTo = impStackTop(2).val;
11904 arrayNodeToIndex = impStackTop(1).val;
11905 arrayNodeFrom = impStackTop().val;
11908 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
11909 // lot of cases because of covariance. ie. foo[] can be cast to object[].
11912 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
11913 // This does not need CORINFO_HELP_ARRADDR_ST
11914 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
11915 arrayNodeTo->gtOper == GT_LCL_VAR &&
11916 arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
11917 !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
11919 JITDUMP("\nstelem of ref from same array: skipping covariant store check\n");
11921 goto ARR_ST_POST_VERIFY;
11924 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
11925 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
11927 JITDUMP("\nstelem of null: skipping covariant store check\n");
11928 assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
11930 goto ARR_ST_POST_VERIFY;
11933 /* Call a helper function to do the assignment */
11934 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopList(3, nullptr));
11938 case CEE_STELEM_I1:
11941 case CEE_STELEM_I2:
11942 lclTyp = TYP_SHORT;
11945 lclTyp = TYP_I_IMPL;
11947 case CEE_STELEM_I4:
11950 case CEE_STELEM_I8:
11953 case CEE_STELEM_R4:
11954 lclTyp = TYP_FLOAT;
11956 case CEE_STELEM_R8:
11957 lclTyp = TYP_DOUBLE;
11962 if (tiVerificationNeeded)
11964 typeInfo tiArray = impStackTop(2).seTypeInfo;
11965 typeInfo tiIndex = impStackTop(1).seTypeInfo;
11966 typeInfo tiValue = impStackTop().seTypeInfo;
11968 // As per ECMA 'index' specified can be either int32 or native int.
11969 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11970 typeInfo arrayElem = typeInfo(lclTyp);
11971 #ifdef _TARGET_64BIT_
11972 if (opcode == CEE_STELEM_I)
11974 arrayElem = typeInfo::nativeInt();
11976 #endif // _TARGET_64BIT_
11977 Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
11980 Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
11984 ARR_ST_POST_VERIFY:
11985 /* The strict order of evaluation is LHS-operands, RHS-operands,
11986 range-check, and then assignment. However, codegen currently
11987 does the range-check before evaluation the RHS-operands. So to
11988 maintain strict ordering, we spill the stack. */
11990 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
11992 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11993 "Strict ordering of exceptions for Array store"));
11996 /* Pull the new value from the stack */
11997 op2 = impPopStack().val;
11999 /* Pull the index value */
12000 op1 = impPopStack().val;
12002 /* Pull the array address */
12003 op3 = impPopStack().val;
12005 assertImp(op3->gtType == TYP_REF);
12006 if (op2->IsVarAddr())
12008 op2->gtType = TYP_I_IMPL;
12011 op3 = impCheckForNullPointer(op3);
12013 // Mark the block as containing an index expression
12015 if (op3->gtOper == GT_LCL_VAR)
12017 if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
12019 block->bbFlags |= BBF_HAS_IDX_LEN;
12020 optMethodFlags |= OMF_HAS_ARRAYREF;
12024 /* Create the index node */
12026 op1 = gtNewIndexRef(lclTyp, op3, op1);
12028 /* Create the assignment node and append it */
12030 if (lclTyp == TYP_STRUCT)
12032 assert(stelemClsHnd != DUMMY_INIT(NULL));
12034 op1->gtIndex.gtStructElemClass = stelemClsHnd;
12035 op1->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd);
12037 if (varTypeIsStruct(op1))
12039 op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
12043 op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
12044 op1 = gtNewAssignNode(op1, op2);
12047 /* Mark the expression as containing an assignment */
12049 op1->gtFlags |= GTF_ASG;
12060 case CEE_ADD_OVF_UN:
12068 goto MATH_OP2_FLAGS;
12077 case CEE_SUB_OVF_UN:
12085 goto MATH_OP2_FLAGS;
12089 goto MATH_MAYBE_CALL_NO_OVF;
12094 case CEE_MUL_OVF_UN:
12101 goto MATH_MAYBE_CALL_OVF;
12103 // Other binary math operations
12107 goto MATH_MAYBE_CALL_NO_OVF;
12111 goto MATH_MAYBE_CALL_NO_OVF;
12115 goto MATH_MAYBE_CALL_NO_OVF;
12119 goto MATH_MAYBE_CALL_NO_OVF;
12121 MATH_MAYBE_CALL_NO_OVF:
12123 MATH_MAYBE_CALL_OVF:
12124 // Morpher has some complex logic about when to turn different
12125 // typed nodes on different platforms into helper calls. We
12126 // need to either duplicate that logic here, or just
12127 // pessimistically make all the nodes large enough to become
12128 // call nodes. Since call nodes aren't that much larger and
12129 // these opcodes are infrequent enough I chose the latter.
12131 goto MATH_OP2_FLAGS;
12143 MATH_OP2: // For default values of 'ovfl' and 'callNode'
12148 MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
12150 /* Pull two values and push back the result */
12152 if (tiVerificationNeeded)
12154 const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
12155 const typeInfo& tiOp2 = impStackTop().seTypeInfo;
12157 Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
12158 if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
12160 Verify(tiOp1.IsNumberType(), "not number");
12164 Verify(tiOp1.IsIntegerType(), "not integer");
12167 Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
12171 #ifdef _TARGET_64BIT_
12172 if (tiOp2.IsNativeIntType())
12176 #endif // _TARGET_64BIT_
12179 op2 = impPopStack().val;
12180 op1 = impPopStack().val;
12182 #if !CPU_HAS_FP_SUPPORT
12183 if (varTypeIsFloating(op1->gtType))
12188 /* Can't do arithmetic with references */
12189 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
12191 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
12192 // if it is in the stack)
12193 impBashVarAddrsToI(op1, op2);
12195 type = impGetByRefResultType(oper, uns, &op1, &op2);
12197 assert(!ovfl || !varTypeIsFloating(op1->gtType));
12199 /* Special case: "int+0", "int-0", "int*1", "int/1" */
12201 if (op2->gtOper == GT_CNS_INT)
12203 if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
12204 (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
12207 impPushOnStack(op1, tiRetVal);
12212 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
12214 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
12216 if (op1->TypeGet() != type)
12218 // We insert a cast of op1 to 'type'
12219 op1 = gtNewCastNode(type, op1, false, type);
12221 if (op2->TypeGet() != type)
12223 // We insert a cast of op2 to 'type'
12224 op2 = gtNewCastNode(type, op2, false, type);
12228 #if SMALL_TREE_NODES
12231 /* These operators can later be transformed into 'GT_CALL' */
12233 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
12234 #ifndef _TARGET_ARM_
12235 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
12236 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
12237 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
12238 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
12240 // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
12241 // that we'll need to transform into a general large node, but rather specifically
12242 // to a call: by doing it this way, things keep working if there are multiple sizes,
12243 // and a CALL is no longer the largest.
12244 // That said, as of now it *is* a large node, so we'll do this with an assert rather
12246 assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
12247 op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
12250 #endif // SMALL_TREE_NODES
12252 op1 = gtNewOperNode(oper, type, op1, op2);
12255 /* Special case: integer/long division may throw an exception */
12257 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this))
12259 op1->gtFlags |= GTF_EXCEPT;
12264 assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
12265 if (ovflType != TYP_UNKNOWN)
12267 op1->gtType = ovflType;
12269 op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
12272 op1->gtFlags |= GTF_UNSIGNED;
12276 impPushOnStack(op1, tiRetVal);
12291 if (tiVerificationNeeded)
12293 const typeInfo& tiVal = impStackTop(1).seTypeInfo;
12294 const typeInfo& tiShift = impStackTop(0).seTypeInfo;
12295 Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
12298 op2 = impPopStack().val;
12299 op1 = impPopStack().val; // operand to be shifted
12300 impBashVarAddrsToI(op1, op2);
12302 type = genActualType(op1->TypeGet());
12303 op1 = gtNewOperNode(oper, type, op1, op2);
12305 impPushOnStack(op1, tiRetVal);
12309 if (tiVerificationNeeded)
12311 tiRetVal = impStackTop().seTypeInfo;
12312 Verify(tiRetVal.IsIntegerType(), "bad int value");
12315 op1 = impPopStack().val;
12316 impBashVarAddrsToI(op1, nullptr);
12317 type = genActualType(op1->TypeGet());
12318 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
12322 if (tiVerificationNeeded)
12324 tiRetVal = impStackTop().seTypeInfo;
12325 Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
12327 op1 = impPopStack().val;
12328 type = op1->TypeGet();
12329 op1 = gtNewOperNode(GT_CKFINITE, type, op1);
12330 op1->gtFlags |= GTF_EXCEPT;
12332 impPushOnStack(op1, tiRetVal);
12337 val = getI4LittleEndian(codeAddr); // jump distance
12338 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
12342 val = getI1LittleEndian(codeAddr); // jump distance
12343 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
12347 if (compIsForInlining())
12349 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
12353 JITDUMP(" %04X", jmpAddr);
12354 if (block->bbJumpKind != BBJ_LEAVE)
12356 impResetLeaveBlock(block, jmpAddr);
12359 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
12360 impImportLeave(block);
12361 impNoteBranchOffs();
12367 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
12369 if (compIsForInlining() && jmpDist == 0)
12374 impNoteBranchOffs();
12380 case CEE_BRFALSE_S:
12382 /* Pop the comparand (now there's a neat term) from the stack */
12383 if (tiVerificationNeeded)
12385 typeInfo& tiVal = impStackTop().seTypeInfo;
12386 Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
12390 op1 = impPopStack().val;
12391 type = op1->TypeGet();
12393 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
12394 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
12396 block->bbJumpKind = BBJ_NONE;
12398 if (op1->gtFlags & GTF_GLOB_EFFECT)
12400 op1 = gtUnusedValNode(op1);
12409 if (op1->OperIsCompare())
12411 if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
12413 // Flip the sense of the compare
12415 op1 = gtReverseCond(op1);
12420 /* We'll compare against an equally-sized integer 0 */
12421 /* For small types, we always compare against int */
12422 op2 = gtNewZeroConNode(genActualType(op1->gtType));
12424 /* Create the comparison operator and try to fold it */
12426 oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
12427 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12434 /* Fold comparison if we can */
12436 op1 = gtFoldExpr(op1);
12438 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
12439 /* Don't make any blocks unreachable in import only mode */
12441 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
12443 /* gtFoldExpr() should prevent this as we don't want to make any blocks
12444 unreachable under compDbgCode */
12445 assert(!opts.compDbgCode);
12447 BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
12448 assertImp((block->bbJumpKind == BBJ_COND) // normal case
12449 || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
12450 // block for the second time
12452 block->bbJumpKind = foldedJumpKind;
12456 if (op1->gtIntCon.gtIconVal)
12458 printf("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n",
12459 block->bbJumpDest->bbNum);
12463 printf("\nThe block falls through into the next " FMT_BB "\n", block->bbNext->bbNum);
12470 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
12472 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
12473 in impImportBlock(block). For correct line numbers, spill stack. */
12475 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
12477 impSpillStackEnsure(true);
12504 if (tiVerificationNeeded)
12506 verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12507 tiRetVal = typeInfo(TI_INT);
12510 op2 = impPopStack().val;
12511 op1 = impPopStack().val;
12513 #ifdef _TARGET_64BIT_
12514 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
12516 op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12518 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
12520 op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12522 #endif // _TARGET_64BIT_
12524 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12525 varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12526 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12528 /* Create the comparison node */
12530 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12532 /* TODO: setting both flags when only one is appropriate */
12533 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
12535 op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
12538 // Fold result, if possible.
12539 op1 = gtFoldExpr(op1);
12541 impPushOnStack(op1, tiRetVal);
12547 goto CMP_2_OPs_AND_BR;
12552 goto CMP_2_OPs_AND_BR;
12557 goto CMP_2_OPs_AND_BR_UN;
12562 goto CMP_2_OPs_AND_BR;
12567 goto CMP_2_OPs_AND_BR_UN;
12572 goto CMP_2_OPs_AND_BR;
12577 goto CMP_2_OPs_AND_BR_UN;
12582 goto CMP_2_OPs_AND_BR;
12587 goto CMP_2_OPs_AND_BR_UN;
12592 goto CMP_2_OPs_AND_BR_UN;
12594 CMP_2_OPs_AND_BR_UN:
12597 goto CMP_2_OPs_AND_BR_ALL;
12601 goto CMP_2_OPs_AND_BR_ALL;
12602 CMP_2_OPs_AND_BR_ALL:
12604 if (tiVerificationNeeded)
12606 verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12609 /* Pull two values */
12610 op2 = impPopStack().val;
12611 op1 = impPopStack().val;
12613 #ifdef _TARGET_64BIT_
12614 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
12616 op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12618 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
12620 op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12622 #endif // _TARGET_64BIT_
12624 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12625 varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12626 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12628 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
12630 block->bbJumpKind = BBJ_NONE;
12632 if (op1->gtFlags & GTF_GLOB_EFFECT)
12634 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12635 "Branch to next Optimization, op1 side effect"));
12636 impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12638 if (op2->gtFlags & GTF_GLOB_EFFECT)
12640 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12641 "Branch to next Optimization, op2 side effect"));
12642 impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12646 if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
12648 impNoteLastILoffs();
12654 // We can generate an compare of different sized floating point op1 and op2
12655 // We insert a cast
12657 if (varTypeIsFloating(op1->TypeGet()))
12659 if (op1->TypeGet() != op2->TypeGet())
12661 assert(varTypeIsFloating(op2->TypeGet()));
12663 // say op1=double, op2=float. To avoid loss of precision
12664 // while comparing, op2 is converted to double and double
12665 // comparison is done.
12666 if (op1->TypeGet() == TYP_DOUBLE)
12668 // We insert a cast of op2 to TYP_DOUBLE
12669 op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE);
12671 else if (op2->TypeGet() == TYP_DOUBLE)
12673 // We insert a cast of op1 to TYP_DOUBLE
12674 op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE);
12679 /* Create and append the operator */
12681 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12685 op1->gtFlags |= GTF_UNSIGNED;
12690 op1->gtFlags |= GTF_RELOP_NAN_UN;
12696 assert(!compIsForInlining());
12698 if (tiVerificationNeeded)
12700 Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
12702 /* Pop the switch value off the stack */
12703 op1 = impPopStack().val;
12704 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
12706 /* We can create a switch node */
12708 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
12710 val = (int)getU4LittleEndian(codeAddr);
12711 codeAddr += 4 + val * 4; // skip over the switch-table
12715 /************************** Casting OPCODES ***************************/
12717 case CEE_CONV_OVF_I1:
12720 case CEE_CONV_OVF_I2:
12721 lclTyp = TYP_SHORT;
12723 case CEE_CONV_OVF_I:
12724 lclTyp = TYP_I_IMPL;
12726 case CEE_CONV_OVF_I4:
12729 case CEE_CONV_OVF_I8:
12733 case CEE_CONV_OVF_U1:
12734 lclTyp = TYP_UBYTE;
12736 case CEE_CONV_OVF_U2:
12737 lclTyp = TYP_USHORT;
12739 case CEE_CONV_OVF_U:
12740 lclTyp = TYP_U_IMPL;
12742 case CEE_CONV_OVF_U4:
12745 case CEE_CONV_OVF_U8:
12746 lclTyp = TYP_ULONG;
12749 case CEE_CONV_OVF_I1_UN:
12752 case CEE_CONV_OVF_I2_UN:
12753 lclTyp = TYP_SHORT;
12755 case CEE_CONV_OVF_I_UN:
12756 lclTyp = TYP_I_IMPL;
12758 case CEE_CONV_OVF_I4_UN:
12761 case CEE_CONV_OVF_I8_UN:
12765 case CEE_CONV_OVF_U1_UN:
12766 lclTyp = TYP_UBYTE;
12768 case CEE_CONV_OVF_U2_UN:
12769 lclTyp = TYP_USHORT;
12771 case CEE_CONV_OVF_U_UN:
12772 lclTyp = TYP_U_IMPL;
12774 case CEE_CONV_OVF_U4_UN:
12777 case CEE_CONV_OVF_U8_UN:
12778 lclTyp = TYP_ULONG;
12783 goto CONV_OVF_COMMON;
12786 goto CONV_OVF_COMMON;
12796 lclTyp = TYP_SHORT;
12799 lclTyp = TYP_I_IMPL;
12809 lclTyp = TYP_UBYTE;
12812 lclTyp = TYP_USHORT;
12814 #if (REGSIZE_BYTES == 8)
12816 lclTyp = TYP_U_IMPL;
12820 lclTyp = TYP_U_IMPL;
12827 lclTyp = TYP_ULONG;
12831 lclTyp = TYP_FLOAT;
12834 lclTyp = TYP_DOUBLE;
12837 case CEE_CONV_R_UN:
12838 lclTyp = TYP_DOUBLE;
12852 // just check that we have a number on the stack
12853 if (tiVerificationNeeded)
12855 const typeInfo& tiVal = impStackTop().seTypeInfo;
12856 Verify(tiVal.IsNumberType(), "bad arg");
12858 #ifdef _TARGET_64BIT_
12859 bool isNative = false;
12863 case CEE_CONV_OVF_I:
12864 case CEE_CONV_OVF_I_UN:
12866 case CEE_CONV_OVF_U:
12867 case CEE_CONV_OVF_U_UN:
12871 // leave 'isNative' = false;
12876 tiRetVal = typeInfo::nativeInt();
12879 #endif // _TARGET_64BIT_
12881 tiRetVal = typeInfo(lclTyp).NormaliseForStack();
12885 // only converts from FLOAT or DOUBLE to an integer type
12886 // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls
12888 if (varTypeIsFloating(lclTyp))
12890 callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
12891 #ifdef _TARGET_64BIT_
12892 // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
12893 // TYP_BYREF could be used as TYP_I_IMPL which is long.
12894 // TODO-CQ: remove this when we lower casts long/ulong --> float/double
12895 // and generate SSE2 code instead of going through helper calls.
12896 || (impStackTop().val->TypeGet() == TYP_BYREF)
12902 callNode = varTypeIsFloating(impStackTop().val->TypeGet());
12905 // At this point uns, ovf, callNode all set
12907 op1 = impPopStack().val;
12908 impBashVarAddrsToI(op1);
12910 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
12912 op2 = op1->gtOp.gtOp2;
12914 if (op2->gtOper == GT_CNS_INT)
12916 ssize_t ival = op2->gtIntCon.gtIconVal;
12917 ssize_t mask, umask;
12933 assert(!"unexpected type");
12937 if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
12939 /* Toss the cast, it's a waste of time */
12941 impPushOnStack(op1, tiRetVal);
12944 else if (ival == mask)
12946 /* Toss the masking, it's a waste of time, since
12947 we sign-extend from the small value anyways */
12949 op1 = op1->gtOp.gtOp1;
12954 /* The 'op2' sub-operand of a cast is the 'real' type number,
12955 since the result of a cast to one of the 'small' integer
12956 types is an integer.
12959 type = genActualType(lclTyp);
12961 // If this is a no-op cast, just use op1.
12962 if (!ovfl && (type == op1->TypeGet()) && (genTypeSize(type) == genTypeSize(lclTyp)))
12964 // Nothing needs to change
12966 // Work is evidently required, add cast node
12969 #if SMALL_TREE_NODES
12972 op1 = gtNewCastNodeL(type, op1, uns, lclTyp);
12975 #endif // SMALL_TREE_NODES
12977 op1 = gtNewCastNode(type, op1, uns, lclTyp);
12982 op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
12986 impPushOnStack(op1, tiRetVal);
12990 if (tiVerificationNeeded)
12992 tiRetVal = impStackTop().seTypeInfo;
12993 Verify(tiRetVal.IsNumberType(), "Bad arg");
12996 op1 = impPopStack().val;
12997 impBashVarAddrsToI(op1, nullptr);
12998 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
13003 /* Pull the top value from the stack */
13005 StackEntry se = impPopStack();
13006 clsHnd = se.seTypeInfo.GetClassHandle();
13009 /* Get hold of the type of the value being duplicated */
13011 lclTyp = genActualType(op1->gtType);
13013 /* Does the value have any side effects? */
13015 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
13017 // Since we are throwing away the value, just normalize
13018 // it to its address. This is more efficient.
13020 if (varTypeIsStruct(op1))
13022 JITDUMP("\n ... CEE_POP struct ...\n");
13024 #ifdef UNIX_AMD64_ABI
13025 // Non-calls, such as obj or ret_expr, have to go through this.
13026 // Calls with large struct return value have to go through this.
13027 // Helper calls with small struct return value also have to go
13028 // through this since they do not follow Unix calling convention.
13029 if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
13030 op1->AsCall()->gtCallType == CT_HELPER)
13031 #endif // UNIX_AMD64_ABI
13033 // If the value being produced comes from loading
13034 // via an underlying address, just null check the address.
13035 if (op1->OperIs(GT_FIELD, GT_IND, GT_OBJ))
13037 op1->ChangeOper(GT_NULLCHECK);
13038 op1->gtType = TYP_BYTE;
13042 op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
13045 JITDUMP("\n ... optimized to ...\n");
13050 // If op1 is non-overflow cast, throw it away since it is useless.
13051 // Another reason for throwing away the useless cast is in the context of
13052 // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
13053 // The cast gets added as part of importing GT_CALL, which gets in the way
13054 // of fgMorphCall() on the forms of tail call nodes that we assert.
13055 if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
13057 op1 = op1->gtOp.gtOp1;
13060 // If 'op1' is an expression, create an assignment node.
13061 // Helps analyses (like CSE) to work fine.
13063 if (op1->gtOper != GT_CALL)
13065 op1 = gtUnusedValNode(op1);
13068 /* Append the value to the tree list */
13072 /* No side effects - just throw the <BEEP> thing away */
13078 if (tiVerificationNeeded)
13080 // Dup could start the begining of delegate creation sequence, remember that
13081 delegateCreateStart = codeAddr - 1;
13085 // If the expression to dup is simple, just clone it.
13086 // Otherwise spill it to a temp, and reload the temp
13088 StackEntry se = impPopStack();
13089 GenTree* tree = se.val;
13090 tiRetVal = se.seTypeInfo;
13093 if (!opts.compDbgCode && !op1->IsIntegralConst(0) && !op1->IsFPZero() && !op1->IsLocal())
13095 const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill"));
13096 impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL);
13097 var_types type = genActualType(lvaTable[tmpNum].TypeGet());
13098 op1 = gtNewLclvNode(tmpNum, type);
13100 // Propagate type info to the temp from the stack and the original tree
13101 if (type == TYP_REF)
13103 assert(lvaTable[tmpNum].lvSingleDef == 0);
13104 lvaTable[tmpNum].lvSingleDef = 1;
13105 JITDUMP("Marked V%02u as a single def local\n", tmpNum);
13106 lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle());
13110 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
13111 nullptr DEBUGARG("DUP instruction"));
13113 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
13114 impPushOnStack(op1, tiRetVal);
13115 impPushOnStack(op2, tiRetVal);
13123 lclTyp = TYP_SHORT;
13132 lclTyp = TYP_I_IMPL;
13134 case CEE_STIND_REF:
13138 lclTyp = TYP_FLOAT;
13141 lclTyp = TYP_DOUBLE;
13145 if (tiVerificationNeeded)
13147 typeInfo instrType(lclTyp);
13148 #ifdef _TARGET_64BIT_
13149 if (opcode == CEE_STIND_I)
13151 instrType = typeInfo::nativeInt();
13153 #endif // _TARGET_64BIT_
13154 verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
13158 compUnsafeCastUsed = true; // Have to go conservative
13163 op2 = impPopStack().val; // value to store
13164 op1 = impPopStack().val; // address to store to
13166 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
13167 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
13169 impBashVarAddrsToI(op1, op2);
13171 op2 = impImplicitR4orR8Cast(op2, lclTyp);
13173 #ifdef _TARGET_64BIT_
13174 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
13175 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
13177 op2->gtType = TYP_I_IMPL;
13181 // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
13183 if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
13185 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
13186 op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
13188 // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13190 if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
13192 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
13193 op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
13196 #endif // _TARGET_64BIT_
13198 if (opcode == CEE_STIND_REF)
13200 // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
13201 assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
13202 lclTyp = genActualType(op2->TypeGet());
13205 // Check target type.
13207 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
13209 if (op2->gtType == TYP_BYREF)
13211 assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
13213 else if (lclTyp == TYP_BYREF)
13215 assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
13220 assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
13221 ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
13222 (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
13226 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
13228 // stind could point anywhere, example a boxed class static int
13229 op1->gtFlags |= GTF_IND_TGTANYWHERE;
13231 if (prefixFlags & PREFIX_VOLATILE)
13233 assert(op1->OperGet() == GT_IND);
13234 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
13235 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13236 op1->gtFlags |= GTF_IND_VOLATILE;
13239 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
13241 assert(op1->OperGet() == GT_IND);
13242 op1->gtFlags |= GTF_IND_UNALIGNED;
13245 op1 = gtNewAssignNode(op1, op2);
13246 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
13248 // Spill side-effects AND global-data-accesses
13249 if (verCurrentState.esStackDepth > 0)
13251 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
13260 lclTyp = TYP_SHORT;
13269 case CEE_LDIND_REF:
13273 lclTyp = TYP_I_IMPL;
13276 lclTyp = TYP_FLOAT;
13279 lclTyp = TYP_DOUBLE;
13282 lclTyp = TYP_UBYTE;
13285 lclTyp = TYP_USHORT;
13289 if (tiVerificationNeeded)
13291 typeInfo lclTiType(lclTyp);
13292 #ifdef _TARGET_64BIT_
13293 if (opcode == CEE_LDIND_I)
13295 lclTiType = typeInfo::nativeInt();
13297 #endif // _TARGET_64BIT_
13298 tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
13299 tiRetVal.NormaliseForStack();
13303 compUnsafeCastUsed = true; // Have to go conservative
13308 op1 = impPopStack().val; // address to load from
13309 impBashVarAddrsToI(op1);
13311 #ifdef _TARGET_64BIT_
13312 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13314 if (genActualType(op1->gtType) == TYP_INT)
13316 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
13317 op1 = gtNewCastNode(TYP_I_IMPL, op1, false, TYP_I_IMPL);
13321 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
13323 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
13325 // ldind could point anywhere, example a boxed class static int
13326 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
13328 if (prefixFlags & PREFIX_VOLATILE)
13330 assert(op1->OperGet() == GT_IND);
13331 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
13332 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13333 op1->gtFlags |= GTF_IND_VOLATILE;
13336 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
13338 assert(op1->OperGet() == GT_IND);
13339 op1->gtFlags |= GTF_IND_UNALIGNED;
13342 impPushOnStack(op1, tiRetVal);
13346 case CEE_UNALIGNED:
13349 val = getU1LittleEndian(codeAddr);
13351 JITDUMP(" %u", val);
13352 if ((val != 1) && (val != 2) && (val != 4))
13354 BADCODE("Alignment unaligned. must be 1, 2, or 4");
13357 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
13358 prefixFlags |= PREFIX_UNALIGNED;
13360 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
13363 opcode = (OPCODE)getU1LittleEndian(codeAddr);
13364 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
13365 codeAddr += sizeof(__int8);
13366 goto DECODE_OPCODE;
13370 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
13371 prefixFlags |= PREFIX_VOLATILE;
13373 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
13380 // Need to do a lookup here so that we perform an access check
13381 // and do a NOWAY if protections are violated
13382 _impResolveToken(CORINFO_TOKENKIND_Method);
13384 JITDUMP(" %08X", resolvedToken.token);
13386 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
13387 addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
13390 // This check really only applies to intrinsic Array.Address methods
13391 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
13393 NO_WAY("Currently do not support LDFTN of Parameterized functions");
13396 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
13397 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13399 if (tiVerificationNeeded)
13401 // LDFTN could start the begining of delegate creation sequence, remember that
13402 delegateCreateStart = codeAddr - 2;
13404 // check any constraints on the callee's class and type parameters
13405 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
13406 "method has unsatisfied class constraints");
13407 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
13408 resolvedToken.hMethod),
13409 "method has unsatisfied method constraints");
13411 mflags = callInfo.verMethodFlags;
13412 Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
13416 op1 = impMethodPointer(&resolvedToken, &callInfo);
13418 if (compDonotInline())
13423 // Call info may have more precise information about the function than
13424 // the resolved token.
13425 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
13426 assert(callInfo.hMethod != nullptr);
13427 heapToken->hMethod = callInfo.hMethod;
13428 impPushOnStack(op1, typeInfo(heapToken));
13433 case CEE_LDVIRTFTN:
13435 /* Get the method token */
13437 _impResolveToken(CORINFO_TOKENKIND_Method);
13439 JITDUMP(" %08X", resolvedToken.token);
13441 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
13442 addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
13443 CORINFO_CALLINFO_CALLVIRT)),
13446 // This check really only applies to intrinsic Array.Address methods
13447 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
13449 NO_WAY("Currently do not support LDFTN of Parameterized functions");
13452 mflags = callInfo.methodFlags;
13454 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13456 if (compIsForInlining())
13458 if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
13460 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
13465 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
13467 if (tiVerificationNeeded)
13470 Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
13471 Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
13473 // JIT32 verifier rejects verifiable ldvirtftn pattern
13474 typeInfo declType =
13475 verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
13477 typeInfo arg = impStackTop().seTypeInfo;
13478 Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
13481 CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
13482 if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
13484 instanceClassHnd = arg.GetClassHandleForObjRef();
13487 // check any constraints on the method's class and type parameters
13488 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
13489 "method has unsatisfied class constraints");
13490 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
13491 resolvedToken.hMethod),
13492 "method has unsatisfied method constraints");
13494 if (mflags & CORINFO_FLG_PROTECTED)
13496 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
13497 "Accessing protected method through wrong type.");
13501 /* Get the object-ref */
13502 op1 = impPopStack().val;
13503 assertImp(op1->gtType == TYP_REF);
13505 if (opts.IsReadyToRun())
13507 if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
13509 if (op1->gtFlags & GTF_SIDE_EFFECT)
13511 op1 = gtUnusedValNode(op1);
13512 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13517 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
13519 if (op1->gtFlags & GTF_SIDE_EFFECT)
13521 op1 = gtUnusedValNode(op1);
13522 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13527 GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
13528 if (compDonotInline())
13533 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
13535 assert(heapToken->tokenType == CORINFO_TOKENKIND_Method);
13536 assert(callInfo.hMethod != nullptr);
13538 heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn;
13539 heapToken->hMethod = callInfo.hMethod;
13540 impPushOnStack(fptr, typeInfo(heapToken));
13545 case CEE_CONSTRAINED:
13547 assertImp(sz == sizeof(unsigned));
13548 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
13549 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
13550 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
13552 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
13553 prefixFlags |= PREFIX_CONSTRAINED;
13556 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13557 if (actualOpcode != CEE_CALLVIRT)
13559 BADCODE("constrained. has to be followed by callvirt");
13566 JITDUMP(" readonly.");
13568 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
13569 prefixFlags |= PREFIX_READONLY;
13572 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13573 if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
13575 BADCODE("readonly. has to be followed by ldelema or call");
13585 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
13586 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13589 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13590 if (!impOpcodeIsCallOpcode(actualOpcode))
13592 BADCODE("tailcall. has to be followed by call, callvirt or calli");
13600 /* Since we will implicitly insert newObjThisPtr at the start of the
13601 argument list, spill any GTF_ORDER_SIDEEFF */
13602 impSpillSpecialSideEff();
13604 /* NEWOBJ does not respond to TAIL */
13605 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
13607 /* NEWOBJ does not respond to CONSTRAINED */
13608 prefixFlags &= ~PREFIX_CONSTRAINED;
13610 _impResolveToken(CORINFO_TOKENKIND_NewObj);
13612 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
13613 addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
13616 if (compIsForInlining())
13618 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13620 // Check to see if this call violates the boundary.
13621 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
13626 mflags = callInfo.methodFlags;
13628 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
13630 BADCODE("newobj on static or abstract method");
13633 // Insert the security callout before any actual code is generated
13634 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13636 // There are three different cases for new
13637 // Object size is variable (depends on arguments)
13638 // 1) Object is an array (arrays treated specially by the EE)
13639 // 2) Object is some other variable sized object (e.g. String)
13640 // 3) Class Size can be determined beforehand (normal case)
13641 // In the first case, we need to call a NEWOBJ helper (multinewarray)
13642 // in the second case we call the constructor with a '0' this pointer
13643 // In the third case we alloc the memory, then call the constuctor
13645 clsFlags = callInfo.classFlags;
13646 if (clsFlags & CORINFO_FLG_ARRAY)
13648 if (tiVerificationNeeded)
13650 CORINFO_CLASS_HANDLE elemTypeHnd;
13651 INDEBUG(CorInfoType corType =)
13652 info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13653 assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
13654 Verify(elemTypeHnd == nullptr ||
13655 !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13656 "newarr of byref-like objects");
13657 verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
13658 ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
13659 &callInfo DEBUGARG(info.compFullName));
13661 // Arrays need to call the NEWOBJ helper.
13662 assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
13664 impImportNewObjArray(&resolvedToken, &callInfo);
13665 if (compDonotInline())
13673 // At present this can only be String
13674 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
13676 if (IsTargetAbi(CORINFO_CORERT_ABI))
13678 // The dummy argument does not exist in CoreRT
13679 newObjThisPtr = nullptr;
13683 // This is the case for variable-sized objects that are not
13684 // arrays. In this case, call the constructor with a null 'this'
13686 newObjThisPtr = gtNewIconNode(0, TYP_REF);
13689 /* Remember that this basic block contains 'new' of an object */
13690 block->bbFlags |= BBF_HAS_NEWOBJ;
13691 optMethodFlags |= OMF_HAS_NEWOBJ;
13695 // This is the normal case where the size of the object is
13696 // fixed. Allocate the memory and call the constructor.
13698 // Note: We cannot add a peep to avoid use of temp here
13699 // becase we don't have enough interference info to detect when
13700 // sources and destination interfere, example: s = new S(ref);
13702 // TODO: We find the correct place to introduce a general
13703 // reverse copy prop for struct return values from newobj or
13704 // any function returning structs.
13706 /* get a temporary for the new object */
13707 lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
13708 if (compDonotInline())
13710 // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS.
13711 assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS);
13715 // In the value class case we only need clsHnd for size calcs.
13717 // The lookup of the code pointer will be handled by CALL in this case
13718 if (clsFlags & CORINFO_FLG_VALUECLASS)
13720 if (compIsForInlining())
13722 // If value class has GC fields, inform the inliner. It may choose to
13723 // bail out on the inline.
13724 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13725 if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
13727 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
13728 if (compInlineResult->IsFailure())
13733 // Do further notification in the case where the call site is rare;
13734 // some policies do not track the relative hotness of call sites for
13735 // "always" inline cases.
13736 if (impInlineInfo->iciBlock->isRunRarely())
13738 compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
13739 if (compInlineResult->IsFailure())
13747 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
13748 unsigned size = info.compCompHnd->getClassSize(resolvedToken.hClass);
13750 if (impIsPrimitive(jitTyp))
13752 lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
13756 // The local variable itself is the allocated space.
13757 // Here we need unsafe value cls check, since the address of struct is taken for further use
13758 // and potentially exploitable.
13759 lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
13761 if (compIsForInlining() || fgStructTempNeedsExplicitZeroInit(lvaTable + lclNum, block))
13763 // Append a tree to zero-out the temp
13764 newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
13766 newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest
13767 gtNewIconNode(0), // Value
13769 false, // isVolatile
13770 false); // not copyBlock
13771 impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
13774 // Obtain the address of the temp
13776 gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
13780 #ifdef FEATURE_READYTORUN_COMPILER
13781 if (opts.IsReadyToRun())
13783 op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
13784 usingReadyToRunHelper = (op1 != nullptr);
13787 if (!usingReadyToRunHelper)
13790 op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
13791 if (op1 == nullptr)
13792 { // compDonotInline()
13796 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13797 // and the newfast call with a single call to a dynamic R2R cell that will:
13798 // 1) Load the context
13799 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate
13801 // 3) Allocate and return the new object
13802 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13804 op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
13805 resolvedToken.hClass, TYP_REF, op1);
13808 // Remember that this basic block contains 'new' of an object
13809 block->bbFlags |= BBF_HAS_NEWOBJ;
13810 optMethodFlags |= OMF_HAS_NEWOBJ;
13812 // Append the assignment to the temp/local. Dont need to spill
13813 // at all as we are just calling an EE-Jit helper which can only
13814 // cause an (async) OutOfMemoryException.
13816 // We assign the newly allocated object (by a GT_ALLOCOBJ node)
13817 // to a temp. Note that the pattern "temp = allocObj" is required
13818 // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
13819 // without exhaustive walk over all expressions.
13821 impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
13823 assert(lvaTable[lclNum].lvSingleDef == 0);
13824 lvaTable[lclNum].lvSingleDef = 1;
13825 JITDUMP("Marked V%02u as a single def local\n", lclNum);
13826 lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */);
13828 newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
13835 /* CALLI does not respond to CONSTRAINED */
13836 prefixFlags &= ~PREFIX_CONSTRAINED;
13838 if (compIsForInlining())
13840 // CALLI doesn't have a method handle, so assume the worst.
13841 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13843 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
13853 // We can't call getCallInfo on the token from a CALLI, but we need it in
13854 // many other places. We unfortunately embed that knowledge here.
13855 if (opcode != CEE_CALLI)
13857 _impResolveToken(CORINFO_TOKENKIND_Method);
13859 eeGetCallInfo(&resolvedToken,
13860 (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
13861 // this is how impImportCall invokes getCallInfo
13863 combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
13864 (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
13865 : CORINFO_CALLINFO_NONE)),
13870 // Suppress uninitialized use warning.
13871 memset(&resolvedToken, 0, sizeof(resolvedToken));
13872 memset(&callInfo, 0, sizeof(callInfo));
13874 resolvedToken.token = getU4LittleEndian(codeAddr);
13875 resolvedToken.tokenContext = impTokenLookupContextHandle;
13876 resolvedToken.tokenScope = info.compScopeHnd;
13879 CALL: // memberRef should be set.
13880 // newObjThisPtr should be set for CEE_NEWOBJ
13882 JITDUMP(" %08X", resolvedToken.token);
13883 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
13885 bool newBBcreatedForTailcallStress;
13887 newBBcreatedForTailcallStress = false;
13889 if (compIsForInlining())
13891 if (compDonotInline())
13895 // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
13896 assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
13900 if (compTailCallStress())
13902 // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
13903 // Tail call stress only recognizes call+ret patterns and forces them to be
13904 // explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress
13905 // doesn't import 'ret' opcode following the call into the basic block containing
13906 // the call instead imports it to a new basic block. Note that fgMakeBasicBlocks()
13907 // is already checking that there is an opcode following call and hence it is
13908 // safe here to read next opcode without bounds check.
13909 newBBcreatedForTailcallStress =
13910 impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
13911 // make it jump to RET.
13912 (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
13914 bool hasTailPrefix = (prefixFlags & PREFIX_TAILCALL_EXPLICIT);
13915 if (newBBcreatedForTailcallStress && !hasTailPrefix && // User hasn't set "tail." prefix yet.
13916 verCheckTailCallConstraint(opcode, &resolvedToken,
13917 constraintCall ? &constrainedResolvedToken : nullptr,
13918 true) // Is it legal to do tailcall?
13921 CORINFO_METHOD_HANDLE declaredCalleeHnd = callInfo.hMethod;
13922 bool isVirtual = (callInfo.kind == CORINFO_VIRTUALCALL_STUB) ||
13923 (callInfo.kind == CORINFO_VIRTUALCALL_VTABLE);
13924 CORINFO_METHOD_HANDLE exactCalleeHnd = isVirtual ? nullptr : declaredCalleeHnd;
13925 if (info.compCompHnd->canTailCall(info.compMethodHnd, declaredCalleeHnd, exactCalleeHnd,
13926 hasTailPrefix)) // Is it legal to do tailcall?
13928 // Stress the tailcall.
13929 JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
13930 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13936 // This is split up to avoid goto flow warnings.
13938 isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd);
13940 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
13941 // hence will not be considered for implicit tail calling.
13942 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
13944 if (compIsForInlining())
13946 #if FEATURE_TAILCALL_OPT_SHARED_RETURN
13947 // Are we inlining at an implicit tail call site? If so the we can flag
13948 // implicit tail call sites in the inline body. These call sites
13949 // often end up in non BBJ_RETURN blocks, so only flag them when
13950 // we're able to handle shared returns.
13951 if (impInlineInfo->iciCall->IsImplicitTailCall())
13953 JITDUMP(" (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13954 prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13956 #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN
13960 JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13961 prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13965 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
13966 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
13967 readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
13969 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
13971 // All calls and delegates need a security callout.
13972 // For delegates, this is the call to the delegate constructor, not the access check on the
13974 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13977 if (tiVerificationNeeded)
13979 verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13980 explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
13981 &callInfo DEBUGARG(info.compFullName));
13984 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13985 newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
13986 if (compDonotInline())
13988 // We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue.
13989 assert((callTyp == TYP_UNDEF) ||
13990 (compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS));
13994 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
13995 // have created a new BB after the "call"
13996 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
13998 assert(!compIsForInlining());
14010 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
14011 BOOL isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
14013 /* Get the CP_Fieldref index */
14014 assertImp(sz == sizeof(unsigned));
14016 _impResolveToken(CORINFO_TOKENKIND_Field);
14018 JITDUMP(" %08X", resolvedToken.token);
14020 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
14022 GenTree* obj = nullptr;
14023 typeInfo* tiObj = nullptr;
14024 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
14026 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
14028 tiObj = &impStackTop().seTypeInfo;
14029 StackEntry se = impPopStack();
14030 objType = se.seTypeInfo.GetClassHandle();
14033 if (impIsThis(obj))
14035 aflags |= CORINFO_ACCESS_THIS;
14037 // An optimization for Contextful classes:
14038 // we unwrap the proxy when we have a 'this reference'
14040 if (info.compUnwrapContextful)
14042 aflags |= CORINFO_ACCESS_UNWRAP;
14047 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
14049 // Figure out the type of the member. We always call canAccessField, so you always need this
14051 CorInfoType ciType = fieldInfo.fieldType;
14052 clsHnd = fieldInfo.structType;
14054 lclTyp = JITtype2varType(ciType);
14056 #ifdef _TARGET_AMD64
14057 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
14058 #endif // _TARGET_AMD64
14060 if (compIsForInlining())
14062 switch (fieldInfo.fieldAccessor)
14064 case CORINFO_FIELD_INSTANCE_HELPER:
14065 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14066 case CORINFO_FIELD_STATIC_ADDR_HELPER:
14067 case CORINFO_FIELD_STATIC_TLS:
14069 compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
14072 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14073 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14074 /* We may be able to inline the field accessors in specific instantiations of generic
14076 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
14083 if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
14086 if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
14087 !(info.compFlags & CORINFO_FLG_FORCEINLINE))
14089 // Loading a static valuetype field usually will cause a JitHelper to be called
14090 // for the static base. This will bloat the code.
14091 compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
14093 if (compInlineResult->IsFailure())
14101 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
14104 tiRetVal.MakeByRef();
14108 tiRetVal.NormaliseForStack();
14111 // Perform this check always to ensure that we get field access exceptions even with
14112 // SkipVerification.
14113 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
14115 if (tiVerificationNeeded)
14117 // You can also pass the unboxed struct to LDFLD
14118 BOOL bAllowPlainValueTypeAsThis = FALSE;
14119 if (opcode == CEE_LDFLD && impIsValueType(tiObj))
14121 bAllowPlainValueTypeAsThis = TRUE;
14124 verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
14126 // If we're doing this on a heap object or from a 'safe' byref
14127 // then the result is a safe byref too
14128 if (isLoadAddress) // load address
14130 if (fieldInfo.fieldFlags &
14131 CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
14133 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
14135 tiRetVal.SetIsPermanentHomeByRef();
14138 else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
14140 // ldflda of byref is safe if done on a gc object or on a
14142 tiRetVal.SetIsPermanentHomeByRef();
14148 // tiVerificationNeeded is false.
14149 // Raise InvalidProgramException if static load accesses non-static field
14150 if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
14152 BADCODE("static access on an instance field");
14156 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
14157 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
14159 if (obj->gtFlags & GTF_SIDE_EFFECT)
14161 obj = gtUnusedValNode(obj);
14162 impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14167 /* Preserve 'small' int types */
14168 if (!varTypeIsSmall(lclTyp))
14170 lclTyp = genActualType(lclTyp);
14173 bool usesHelper = false;
14175 switch (fieldInfo.fieldAccessor)
14177 case CORINFO_FIELD_INSTANCE:
14178 #ifdef FEATURE_READYTORUN_COMPILER
14179 case CORINFO_FIELD_INSTANCE_WITH_BASE:
14182 obj = impCheckForNullPointer(obj);
14184 // If the object is a struct, what we really want is
14185 // for the field to operate on the address of the struct.
14186 if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
14188 assert(opcode == CEE_LDFLD && objType != nullptr);
14190 obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
14193 /* Create the data member node */
14194 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
14196 #ifdef FEATURE_READYTORUN_COMPILER
14197 if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
14199 op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
14203 op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
14205 if (fgAddrCouldBeNull(obj))
14207 op1->gtFlags |= GTF_EXCEPT;
14210 // If gtFldObj is a BYREF then our target is a value class and
14211 // it could point anywhere, example a boxed class static int
14212 if (obj->gtType == TYP_BYREF)
14214 op1->gtFlags |= GTF_IND_TGTANYWHERE;
14217 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14218 if (StructHasOverlappingFields(typeFlags))
14220 op1->gtField.gtFldMayOverlap = true;
14223 // wrap it in a address of operator if necessary
14226 op1 = gtNewOperNode(GT_ADDR,
14227 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
14231 if (compIsForInlining() &&
14232 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
14233 impInlineInfo->inlArgInfo))
14235 impInlineInfo->thisDereferencedFirst = true;
14241 case CORINFO_FIELD_STATIC_TLS:
14242 #ifdef _TARGET_X86_
14243 // Legacy TLS access is implemented as intrinsic on x86 only
14245 /* Create the data member node */
14246 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
14247 op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
14251 op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
14255 fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
14260 case CORINFO_FIELD_STATIC_ADDR_HELPER:
14261 case CORINFO_FIELD_INSTANCE_HELPER:
14262 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14263 op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
14268 case CORINFO_FIELD_STATIC_ADDRESS:
14269 // Replace static read-only fields with constant if possible
14270 if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
14271 !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
14272 (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
14274 CorInfoInitClassResult initClassResult =
14275 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
14276 impTokenLookupContextHandle);
14278 if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
14280 void** pFldAddr = nullptr;
14282 info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
14284 // We should always be able to access this static's address directly
14285 assert(pFldAddr == nullptr);
14287 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
14294 case CORINFO_FIELD_STATIC_RVA_ADDRESS:
14295 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
14296 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14297 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14298 op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
14302 case CORINFO_FIELD_INTRINSIC_ZERO:
14304 assert(aflags & CORINFO_ACCESS_GET);
14305 op1 = gtNewIconNode(0, lclTyp);
14310 case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
14312 assert(aflags & CORINFO_ACCESS_GET);
14315 InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
14316 op1 = gtNewStringLiteralNode(iat, pValue);
14321 case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN:
14323 assert(aflags & CORINFO_ACCESS_GET);
14325 op1 = gtNewIconNode(0, lclTyp);
14327 op1 = gtNewIconNode(1, lclTyp);
14334 assert(!"Unexpected fieldAccessor");
14337 if (!isLoadAddress)
14340 if (prefixFlags & PREFIX_VOLATILE)
14342 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
14343 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
14347 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
14348 (op1->OperGet() == GT_OBJ));
14349 op1->gtFlags |= GTF_IND_VOLATILE;
14353 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
14357 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
14358 (op1->OperGet() == GT_OBJ));
14359 op1->gtFlags |= GTF_IND_UNALIGNED;
14364 /* Check if the class needs explicit initialization */
14366 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
14368 GenTree* helperNode = impInitClass(&resolvedToken);
14369 if (compDonotInline())
14373 if (helperNode != nullptr)
14375 op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
14380 impPushOnStack(op1, tiRetVal);
14388 BOOL isStoreStatic = (opcode == CEE_STSFLD);
14390 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
14392 /* Get the CP_Fieldref index */
14394 assertImp(sz == sizeof(unsigned));
14396 _impResolveToken(CORINFO_TOKENKIND_Field);
14398 JITDUMP(" %08X", resolvedToken.token);
14400 int aflags = CORINFO_ACCESS_SET;
14401 GenTree* obj = nullptr;
14402 typeInfo* tiObj = nullptr;
14405 /* Pull the value from the stack */
14406 StackEntry se = impPopStack();
14408 tiVal = se.seTypeInfo;
14409 clsHnd = tiVal.GetClassHandle();
14411 if (opcode == CEE_STFLD)
14413 tiObj = &impStackTop().seTypeInfo;
14414 obj = impPopStack().val;
14416 if (impIsThis(obj))
14418 aflags |= CORINFO_ACCESS_THIS;
14420 // An optimization for Contextful classes:
14421 // we unwrap the proxy when we have a 'this reference'
14423 if (info.compUnwrapContextful)
14425 aflags |= CORINFO_ACCESS_UNWRAP;
14430 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
14432 // Figure out the type of the member. We always call canAccessField, so you always need this
14434 CorInfoType ciType = fieldInfo.fieldType;
14435 fieldClsHnd = fieldInfo.structType;
14437 lclTyp = JITtype2varType(ciType);
14439 if (compIsForInlining())
14441 /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
14442 * per-inst static? */
14444 switch (fieldInfo.fieldAccessor)
14446 case CORINFO_FIELD_INSTANCE_HELPER:
14447 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14448 case CORINFO_FIELD_STATIC_ADDR_HELPER:
14449 case CORINFO_FIELD_STATIC_TLS:
14451 compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
14454 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14455 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14456 /* We may be able to inline the field accessors in specific instantiations of generic
14458 compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
14466 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
14468 if (tiVerificationNeeded)
14470 verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
14471 typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
14472 Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
14476 // tiVerificationNeed is false.
14477 // Raise InvalidProgramException if static store accesses non-static field
14478 if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
14480 BADCODE("static access on an instance field");
14484 // We are using stfld on a static field.
14485 // We allow it, but need to eval any side-effects for obj
14486 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
14488 if (obj->gtFlags & GTF_SIDE_EFFECT)
14490 obj = gtUnusedValNode(obj);
14491 impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14496 /* Preserve 'small' int types */
14497 if (!varTypeIsSmall(lclTyp))
14499 lclTyp = genActualType(lclTyp);
14502 switch (fieldInfo.fieldAccessor)
14504 case CORINFO_FIELD_INSTANCE:
14505 #ifdef FEATURE_READYTORUN_COMPILER
14506 case CORINFO_FIELD_INSTANCE_WITH_BASE:
14509 obj = impCheckForNullPointer(obj);
14511 /* Create the data member node */
14512 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
14513 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14514 if (StructHasOverlappingFields(typeFlags))
14516 op1->gtField.gtFldMayOverlap = true;
14519 #ifdef FEATURE_READYTORUN_COMPILER
14520 if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
14522 op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
14526 op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
14528 if (fgAddrCouldBeNull(obj))
14530 op1->gtFlags |= GTF_EXCEPT;
14533 // If gtFldObj is a BYREF then our target is a value class and
14534 // it could point anywhere, example a boxed class static int
14535 if (obj->gtType == TYP_BYREF)
14537 op1->gtFlags |= GTF_IND_TGTANYWHERE;
14540 if (compIsForInlining() &&
14541 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
14543 impInlineInfo->thisDereferencedFirst = true;
14548 case CORINFO_FIELD_STATIC_TLS:
14549 #ifdef _TARGET_X86_
14550 // Legacy TLS access is implemented as intrinsic on x86 only
14552 /* Create the data member node */
14553 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
14554 op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
14558 fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
14563 case CORINFO_FIELD_STATIC_ADDR_HELPER:
14564 case CORINFO_FIELD_INSTANCE_HELPER:
14565 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14566 op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
14570 case CORINFO_FIELD_STATIC_ADDRESS:
14571 case CORINFO_FIELD_STATIC_RVA_ADDRESS:
14572 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
14573 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14574 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14575 op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
14580 assert(!"Unexpected fieldAccessor");
14583 // Create the member assignment, unless we have a struct.
14584 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
14585 bool deferStructAssign = varTypeIsStruct(lclTyp);
14587 if (!deferStructAssign)
14589 if (prefixFlags & PREFIX_VOLATILE)
14591 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14592 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
14593 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
14594 op1->gtFlags |= GTF_IND_VOLATILE;
14596 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
14598 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14599 op1->gtFlags |= GTF_IND_UNALIGNED;
14602 /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
14603 trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during
14604 importation and reads from the union as if it were a long during code generation. Though this
14605 can potentially read garbage, one can get lucky to have this working correctly.
14607 This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
14608 /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a
14609 dependency on it. To be backward compatible, we will explicitly add an upward cast here so that
14610 it works correctly always.
14612 Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT
14615 CLANG_FORMAT_COMMENT_ANCHOR;
14617 #ifndef _TARGET_64BIT_
14618 // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be
14619 // generated for ARM as well as x86, so the following IR will be accepted:
14621 // | /--* CNS_INT int 2
14623 // \--* CLS_VAR long
14625 if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
14626 varTypeIsLong(op1->TypeGet()))
14628 op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
14632 #ifdef _TARGET_64BIT_
14633 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
14634 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
14636 op2->gtType = TYP_I_IMPL;
14640 // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
14642 if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
14644 op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
14646 // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
14648 if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
14650 op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
14655 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
14656 // We insert a cast to the dest 'op1' type
14658 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
14659 varTypeIsFloating(op2->gtType))
14661 op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
14664 op1 = gtNewAssignNode(op1, op2);
14666 /* Mark the expression as containing an assignment */
14668 op1->gtFlags |= GTF_ASG;
14671 /* Check if the class needs explicit initialization */
14673 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
14675 GenTree* helperNode = impInitClass(&resolvedToken);
14676 if (compDonotInline())
14680 if (helperNode != nullptr)
14682 op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
14686 /* stfld can interfere with value classes (consider the sequence
14687 ldloc, ldloca, ..., stfld, stloc). We will be conservative and
14688 spill all value class references from the stack. */
14690 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
14694 if (impIsValueType(tiObj))
14696 impSpillEvalStack();
14700 impSpillValueClasses();
14704 /* Spill any refs to the same member from the stack */
14706 impSpillLclRefs((ssize_t)resolvedToken.hField);
14708 /* stsfld also interferes with indirect accesses (for aliased
14709 statics) and calls. But don't need to spill other statics
14710 as we have explicitly spilled this particular static field. */
14712 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
14714 if (deferStructAssign)
14716 op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
14724 /* Get the class type index operand */
14726 _impResolveToken(CORINFO_TOKENKIND_Newarr);
14728 JITDUMP(" %08X", resolvedToken.token);
14730 if (!opts.IsReadyToRun())
14732 // Need to restore array classes before creating array objects on the heap
14733 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14734 if (op1 == nullptr)
14735 { // compDonotInline()
14740 if (tiVerificationNeeded)
14742 // As per ECMA 'numElems' specified can be either int32 or native int.
14743 Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
14745 CORINFO_CLASS_HANDLE elemTypeHnd;
14746 info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
14747 Verify(elemTypeHnd == nullptr ||
14748 !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
14749 "array of byref-like type");
14752 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14754 accessAllowedResult =
14755 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14756 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14758 /* Form the arglist: array class handle, size */
14759 op2 = impPopStack().val;
14760 assertImp(genActualTypeIsIntOrI(op2->gtType));
14762 #ifdef _TARGET_64BIT_
14763 // The array helper takes a native int for array length.
14764 // So if we have an int, explicitly extend it to be a native int.
14765 if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
14767 if (op2->IsIntegralConst())
14769 op2->gtType = TYP_I_IMPL;
14773 bool isUnsigned = false;
14774 op2 = gtNewCastNode(TYP_I_IMPL, op2, isUnsigned, TYP_I_IMPL);
14777 #endif // _TARGET_64BIT_
14779 #ifdef FEATURE_READYTORUN_COMPILER
14780 if (opts.IsReadyToRun())
14782 op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
14783 gtNewArgList(op2));
14784 usingReadyToRunHelper = (op1 != nullptr);
14786 if (!usingReadyToRunHelper)
14788 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14789 // and the newarr call with a single call to a dynamic R2R cell that will:
14790 // 1) Load the context
14791 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14792 // 3) Allocate the new array
14793 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14795 // Need to restore array classes before creating array objects on the heap
14796 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14797 if (op1 == nullptr)
14798 { // compDonotInline()
14804 if (!usingReadyToRunHelper)
14807 args = gtNewArgList(op1, op2);
14809 /* Create a call to 'new' */
14811 // Note that this only works for shared generic code because the same helper is used for all
14812 // reference array types
14813 op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args);
14816 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
14818 /* Remember that this basic block contains 'new' of an sd array */
14820 block->bbFlags |= BBF_HAS_NEWARRAY;
14821 optMethodFlags |= OMF_HAS_NEWARRAY;
14823 /* Push the result of the call on the stack */
14825 impPushOnStack(op1, tiRetVal);
14832 if (tiVerificationNeeded)
14834 Verify(false, "bad opcode");
14837 // We don't allow locallocs inside handlers
14838 if (block->hasHndIndex())
14840 BADCODE("Localloc can't be inside handler");
14843 // Get the size to allocate
14845 op2 = impPopStack().val;
14846 assertImp(genActualTypeIsIntOrI(op2->gtType));
14848 if (verCurrentState.esStackDepth != 0)
14850 BADCODE("Localloc can only be used when the stack is empty");
14853 // If the localloc is not in a loop and its size is a small constant,
14854 // create a new local var of TYP_BLK and return its address.
14856 bool convertedToLocal = false;
14858 // Need to aggressively fold here, as even fixed-size locallocs
14859 // will have casts in the way.
14860 op2 = gtFoldExpr(op2);
14862 if (op2->IsIntegralConst())
14864 const ssize_t allocSize = op2->AsIntCon()->IconValue();
14866 if (allocSize == 0)
14868 // Result is nullptr
14869 JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n");
14870 op1 = gtNewIconNode(0, TYP_I_IMPL);
14871 convertedToLocal = true;
14873 else if ((allocSize > 0) && ((compCurBB->bbFlags & BBF_BACKWARD_JUMP) == 0))
14875 // Get the size threshold for local conversion
14876 ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE;
14879 // Optionally allow this to be modified
14880 maxSize = JitConfig.JitStackAllocToLocalSize();
14883 if (allocSize <= maxSize)
14885 const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal"));
14886 JITDUMP("Converting stackalloc of %lld bytes to new local V%02u\n", allocSize,
14887 stackallocAsLocal);
14888 lvaTable[stackallocAsLocal].lvType = TYP_BLK;
14889 lvaTable[stackallocAsLocal].lvExactSize = (unsigned)allocSize;
14890 lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true;
14891 op1 = gtNewLclvNode(stackallocAsLocal, TYP_BLK);
14892 op1 = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1);
14893 convertedToLocal = true;
14895 if (!this->opts.compDbgEnC)
14897 // Ensure we have stack security for this method.
14898 // Reorder layout since the converted localloc is treated as an unsafe buffer.
14899 setNeedsGSSecurityCookie();
14900 compGSReorderStackLayout = true;
14906 if (!convertedToLocal)
14908 // Bail out if inlining and the localloc was not converted.
14910 // Note we might consider allowing the inline, if the call
14911 // site is not in a loop.
14912 if (compIsForInlining())
14914 InlineObservation obs = op2->IsIntegralConst()
14915 ? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE
14916 : InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN;
14917 compInlineResult->NoteFatal(obs);
14921 op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
14922 // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
14923 op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
14925 // Ensure we have stack security for this method.
14926 setNeedsGSSecurityCookie();
14928 /* The FP register may not be back to the original value at the end
14929 of the method, even if the frame size is 0, as localloc may
14930 have modified it. So we will HAVE to reset it */
14931 compLocallocUsed = true;
14935 compLocallocOptimized = true;
14939 impPushOnStack(op1, tiRetVal);
14944 /* Get the type token */
14945 assertImp(sz == sizeof(unsigned));
14947 _impResolveToken(CORINFO_TOKENKIND_Casting);
14949 JITDUMP(" %08X", resolvedToken.token);
14951 if (!opts.IsReadyToRun())
14953 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14954 if (op2 == nullptr)
14955 { // compDonotInline()
14960 if (tiVerificationNeeded)
14962 Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
14963 // Even if this is a value class, we know it is boxed.
14964 tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14966 accessAllowedResult =
14967 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14968 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14970 op1 = impPopStack().val;
14972 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false);
14974 if (optTree != nullptr)
14976 impPushOnStack(optTree, tiRetVal);
14981 #ifdef FEATURE_READYTORUN_COMPILER
14982 if (opts.IsReadyToRun())
14984 GenTreeCall* opLookup =
14985 impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
14986 gtNewArgList(op1));
14987 usingReadyToRunHelper = (opLookup != nullptr);
14988 op1 = (usingReadyToRunHelper ? opLookup : op1);
14990 if (!usingReadyToRunHelper)
14992 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14993 // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
14994 // 1) Load the context
14995 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate
14997 // 3) Perform the 'is instance' check on the input object
14998 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
15000 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
15001 if (op2 == nullptr)
15002 { // compDonotInline()
15008 if (!usingReadyToRunHelper)
15011 op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
15013 if (compDonotInline())
15018 impPushOnStack(op1, tiRetVal);
15023 case CEE_REFANYVAL:
15025 // get the class handle and make a ICON node out of it
15027 _impResolveToken(CORINFO_TOKENKIND_Class);
15029 JITDUMP(" %08X", resolvedToken.token);
15031 op2 = impTokenToHandle(&resolvedToken);
15032 if (op2 == nullptr)
15033 { // compDonotInline()
15037 if (tiVerificationNeeded)
15039 Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
15041 tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
15044 op1 = impPopStack().val;
15045 // make certain it is normalized;
15046 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
15048 // Call helper GETREFANY(classHandle, op1);
15049 args = gtNewArgList(op2, op1);
15050 op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, args);
15052 impPushOnStack(op1, tiRetVal);
15055 case CEE_REFANYTYPE:
15057 if (tiVerificationNeeded)
15059 Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
15063 op1 = impPopStack().val;
15065 // make certain it is normalized;
15066 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
15068 if (op1->gtOper == GT_OBJ)
15070 // Get the address of the refany
15071 op1 = op1->gtOp.gtOp1;
15073 // Fetch the type from the correct slot
15074 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
15075 gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL));
15076 op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
15080 assertImp(op1->gtOper == GT_MKREFANY);
15082 // The pointer may have side-effects
15083 if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
15085 impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
15087 impNoteLastILoffs();
15091 // We already have the class handle
15092 op1 = op1->gtOp.gtOp2;
15095 // convert native TypeHandle to RuntimeTypeHandle
15097 GenTreeArgList* helperArgs = gtNewArgList(op1);
15099 op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL, TYP_STRUCT,
15102 // The handle struct is returned in register
15103 op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
15105 tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
15108 impPushOnStack(op1, tiRetVal);
15113 /* Get the Class index */
15114 assertImp(sz == sizeof(unsigned));
15115 lastLoadToken = codeAddr;
15116 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
15118 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
15120 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
15121 if (op1 == nullptr)
15122 { // compDonotInline()
15126 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE;
15127 assert(resolvedToken.hClass != nullptr);
15129 if (resolvedToken.hMethod != nullptr)
15131 helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
15133 else if (resolvedToken.hField != nullptr)
15135 helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
15138 GenTreeArgList* helperArgs = gtNewArgList(op1);
15140 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs);
15142 // The handle struct is returned in register
15143 op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
15145 tiRetVal = verMakeTypeInfo(tokenType);
15146 impPushOnStack(op1, tiRetVal);
15151 case CEE_UNBOX_ANY:
15153 /* Get the Class index */
15154 assertImp(sz == sizeof(unsigned));
15156 _impResolveToken(CORINFO_TOKENKIND_Class);
15158 JITDUMP(" %08X", resolvedToken.token);
15160 BOOL runtimeLookup;
15161 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
15162 if (op2 == nullptr)
15164 assert(compDonotInline());
15168 // Run this always so we can get access exceptions even with SkipVerification.
15169 accessAllowedResult =
15170 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15171 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15173 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
15175 if (tiVerificationNeeded)
15177 typeInfo tiUnbox = impStackTop().seTypeInfo;
15178 Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
15179 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15180 tiRetVal.NormaliseForStack();
15182 JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n");
15183 op1 = impPopStack().val;
15187 /* Pop the object and create the unbox helper call */
15188 /* You might think that for UNBOX_ANY we need to push a different */
15189 /* (non-byref) type, but here we're making the tiRetVal that is used */
15190 /* for the intermediate pointer which we then transfer onto the OBJ */
15191 /* instruction. OBJ then creates the appropriate tiRetVal. */
15192 if (tiVerificationNeeded)
15194 typeInfo tiUnbox = impStackTop().seTypeInfo;
15195 Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
15197 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15198 Verify(tiRetVal.IsValueClass(), "not value class");
15199 tiRetVal.MakeByRef();
15201 // We always come from an objref, so this is safe byref
15202 tiRetVal.SetIsPermanentHomeByRef();
15203 tiRetVal.SetIsReadonlyByRef();
15206 op1 = impPopStack().val;
15207 assertImp(op1->gtType == TYP_REF);
15209 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
15210 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
15212 // Check legality and profitability of inline expansion for unboxing.
15213 const bool canExpandInline = (helper == CORINFO_HELP_UNBOX);
15214 const bool shouldExpandInline = !(compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts());
15216 if (canExpandInline && shouldExpandInline)
15218 JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY");
15219 // we are doing normal unboxing
15220 // inline the common case of the unbox helper
15221 // UNBOX(exp) morphs into
15222 // clone = pop(exp);
15223 // ((*clone == typeToken) ? nop : helper(clone, typeToken));
15224 // push(clone + TARGET_POINTER_SIZE)
15226 GenTree* cloneOperand;
15227 op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
15228 nullptr DEBUGARG("inline UNBOX clone1"));
15229 op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
15231 GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
15233 op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
15234 nullptr DEBUGARG("inline UNBOX clone2"));
15235 op2 = impTokenToHandle(&resolvedToken);
15236 if (op2 == nullptr)
15237 { // compDonotInline()
15240 args = gtNewArgList(op2, op1);
15241 op1 = gtNewHelperCallNode(helper, TYP_VOID, args);
15243 op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
15244 op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
15246 // QMARK nodes cannot reside on the evaluation stack. Because there
15247 // may be other trees on the evaluation stack that side-effect the
15248 // sources of the UNBOX operation we must spill the stack.
15250 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
15252 // Create the address-expression to reference past the object header
15253 // to the beginning of the value-type. Today this means adjusting
15254 // past the base of the objects vtable field which is pointer sized.
15256 op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
15257 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
15261 JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY",
15262 canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
15264 // Don't optimize, just call the helper and be done with it
15265 args = gtNewArgList(op2, op1);
15267 gtNewHelperCallNode(helper,
15268 (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), args);
15271 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
15272 helper == CORINFO_HELP_UNBOX_NULLABLE &&
15273 varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
15277 ----------------------------------------------------------------------
15280 | \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE |
15281 | \ | (which returns a BYREF) | (which returns a STRUCT) | |
15283 |---------------------------------------------------------------------
15284 | UNBOX | push the BYREF | spill the STRUCT to a local, |
15285 | | | push the BYREF to this local |
15286 |---------------------------------------------------------------------
15287 | UNBOX_ANY | push a GT_OBJ of | push the STRUCT |
15288 | | the BYREF | For Linux when the |
15289 | | | struct is returned in two |
15290 | | | registers create a temp |
15291 | | | which address is passed to |
15292 | | | the unbox_nullable helper. |
15293 |---------------------------------------------------------------------
15296 if (opcode == CEE_UNBOX)
15298 if (helper == CORINFO_HELP_UNBOX_NULLABLE)
15300 // Unbox nullable helper returns a struct type.
15301 // We need to spill it to a temp so than can take the address of it.
15302 // Here we need unsafe value cls check, since the address of struct is taken to be used
15303 // further along and potetially be exploitable.
15305 unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
15306 lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
15308 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
15309 op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
15310 assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
15312 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
15313 op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
15314 op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
15317 assert(op1->gtType == TYP_BYREF);
15318 assert(!tiVerificationNeeded || tiRetVal.IsByRef());
15322 assert(opcode == CEE_UNBOX_ANY);
15324 if (helper == CORINFO_HELP_UNBOX)
15326 // Normal unbox helper returns a TYP_BYREF.
15327 impPushOnStack(op1, tiRetVal);
15332 assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
15334 #if FEATURE_MULTIREG_RET
15336 if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
15338 // Unbox nullable helper returns a TYP_STRUCT.
15339 // For the multi-reg case we need to spill it to a temp so that
15340 // we can pass the address to the unbox_nullable jit helper.
15342 unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
15343 lvaTable[tmp].lvIsMultiRegArg = true;
15344 lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
15346 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
15347 op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
15348 assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
15350 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
15351 op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
15352 op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
15354 // In this case the return value of the unbox helper is TYP_BYREF.
15355 // Make sure the right type is placed on the operand type stack.
15356 impPushOnStack(op1, tiRetVal);
15358 // Load the struct.
15361 assert(op1->gtType == TYP_BYREF);
15362 assert(!tiVerificationNeeded || tiRetVal.IsByRef());
15368 #endif // !FEATURE_MULTIREG_RET
15371 // If non register passable struct we have it materialized in the RetBuf.
15372 assert(op1->gtType == TYP_STRUCT);
15373 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15374 assert(tiRetVal.IsValueClass());
15378 impPushOnStack(op1, tiRetVal);
15384 /* Get the Class index */
15385 assertImp(sz == sizeof(unsigned));
15387 _impResolveToken(CORINFO_TOKENKIND_Box);
15389 JITDUMP(" %08X", resolvedToken.token);
15391 if (tiVerificationNeeded)
15393 typeInfo tiActual = impStackTop().seTypeInfo;
15394 typeInfo tiBox = verMakeTypeInfo(resolvedToken.hClass);
15396 Verify(verIsBoxable(tiBox), "boxable type expected");
15398 // check the class constraints of the boxed type in case we are boxing an uninitialized value
15399 Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
15400 "boxed type has unsatisfied class constraints");
15402 Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
15404 // Observation: the following code introduces a boxed value class on the stack, but,
15405 // according to the ECMA spec, one would simply expect: tiRetVal =
15406 // typeInfo(TI_REF,impGetObjectClass());
15408 // Push the result back on the stack,
15409 // even if clsHnd is a value class we want the TI_REF
15410 // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
15411 tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
15414 accessAllowedResult =
15415 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15416 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15418 // Note BOX can be used on things that are not value classes, in which
15419 // case we get a NOP. However the verifier's view of the type on the
15420 // stack changes (in generic code a 'T' becomes a 'boxed T')
15421 if (!eeIsValueClass(resolvedToken.hClass))
15423 JITDUMP("\n Importing BOX(refClass) as NOP\n");
15424 verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
15428 // Look ahead for unbox.any
15429 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
15431 CORINFO_RESOLVED_TOKEN unboxResolvedToken;
15433 impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
15435 // See if the resolved tokens describe types that are equal.
15436 const TypeCompareState compare =
15437 info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, resolvedToken.hClass);
15439 // If so, box/unbox.any is a nop.
15440 if (compare == TypeCompareState::Must)
15442 JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n");
15443 // Skip the next unbox.any instruction
15444 sz += sizeof(mdToken) + 1;
15449 impImportAndPushBox(&resolvedToken);
15450 if (compDonotInline())
15459 /* Get the Class index */
15460 assertImp(sz == sizeof(unsigned));
15462 _impResolveToken(CORINFO_TOKENKIND_Class);
15464 JITDUMP(" %08X", resolvedToken.token);
15466 if (tiVerificationNeeded)
15468 tiRetVal = typeInfo(TI_INT);
15471 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
15472 impPushOnStack(op1, tiRetVal);
15475 case CEE_CASTCLASS:
15477 /* Get the Class index */
15479 assertImp(sz == sizeof(unsigned));
15481 _impResolveToken(CORINFO_TOKENKIND_Casting);
15483 JITDUMP(" %08X", resolvedToken.token);
15485 if (!opts.IsReadyToRun())
15487 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
15488 if (op2 == nullptr)
15489 { // compDonotInline()
15494 if (tiVerificationNeeded)
15496 Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
15498 tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
15501 accessAllowedResult =
15502 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15503 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15505 op1 = impPopStack().val;
15507 /* Pop the address and create the 'checked cast' helper call */
15509 // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
15510 // and op2 to contain code that creates the type handle corresponding to typeRef
15513 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true);
15515 if (optTree != nullptr)
15517 impPushOnStack(optTree, tiRetVal);
15522 #ifdef FEATURE_READYTORUN_COMPILER
15523 if (opts.IsReadyToRun())
15525 GenTreeCall* opLookup =
15526 impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF,
15527 gtNewArgList(op1));
15528 usingReadyToRunHelper = (opLookup != nullptr);
15529 op1 = (usingReadyToRunHelper ? opLookup : op1);
15531 if (!usingReadyToRunHelper)
15533 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
15534 // and the chkcastany call with a single call to a dynamic R2R cell that will:
15535 // 1) Load the context
15536 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate
15538 // 3) Check the object on the stack for the type-cast
15539 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
15541 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
15542 if (op2 == nullptr)
15543 { // compDonotInline()
15549 if (!usingReadyToRunHelper)
15552 op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
15554 if (compDonotInline())
15559 /* Push the result back on the stack */
15560 impPushOnStack(op1, tiRetVal);
15567 if (compIsForInlining())
15569 // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15570 // TODO: Will this be too strict, given that we will inline many basic blocks?
15571 // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15573 /* Do we have just the exception on the stack ?*/
15575 if (verCurrentState.esStackDepth != 1)
15577 /* if not, just don't inline the method */
15579 compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
15584 if (tiVerificationNeeded)
15586 tiRetVal = impStackTop().seTypeInfo;
15587 Verify(tiRetVal.IsObjRef(), "object ref expected");
15588 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15590 Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
15594 block->bbSetRunRarely(); // any block with a throw is rare
15595 /* Pop the exception object and create the 'throw' helper call */
15597 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewArgList(impPopStack().val));
15600 if (verCurrentState.esStackDepth > 0)
15602 impEvalSideEffects();
15605 assert(verCurrentState.esStackDepth == 0);
15611 assert(!compIsForInlining());
15613 if (info.compXcptnsCount == 0)
15615 BADCODE("rethrow outside catch");
15618 if (tiVerificationNeeded)
15620 Verify(block->hasHndIndex(), "rethrow outside catch");
15621 if (block->hasHndIndex())
15623 EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
15624 Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
15625 if (HBtab->HasFilter())
15627 // we better be in the handler clause part, not the filter part
15628 Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
15629 "rethrow in filter");
15634 /* Create the 'rethrow' helper call */
15636 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID);
15642 assertImp(sz == sizeof(unsigned));
15644 _impResolveToken(CORINFO_TOKENKIND_Class);
15646 JITDUMP(" %08X", resolvedToken.token);
15648 if (tiVerificationNeeded)
15650 typeInfo tiTo = impStackTop().seTypeInfo;
15651 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15653 Verify(tiTo.IsByRef(), "byref expected");
15654 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15656 Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15657 "type operand incompatible with type of address");
15660 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
15661 op2 = gtNewIconNode(0); // Value
15662 op1 = impPopStack().val; // Dest
15663 op1 = gtNewBlockVal(op1, size);
15664 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15669 if (tiVerificationNeeded)
15671 Verify(false, "bad opcode");
15674 op3 = impPopStack().val; // Size
15675 op2 = impPopStack().val; // Value
15676 op1 = impPopStack().val; // Dest
15678 if (op3->IsCnsIntOrI())
15680 size = (unsigned)op3->AsIntConCommon()->IconValue();
15681 op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15685 op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15688 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15694 if (tiVerificationNeeded)
15696 Verify(false, "bad opcode");
15698 op3 = impPopStack().val; // Size
15699 op2 = impPopStack().val; // Src
15700 op1 = impPopStack().val; // Dest
15702 if (op3->IsCnsIntOrI())
15704 size = (unsigned)op3->AsIntConCommon()->IconValue();
15705 op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15709 op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15712 if (op2->OperGet() == GT_ADDR)
15714 op2 = op2->gtOp.gtOp1;
15718 op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
15721 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
15726 assertImp(sz == sizeof(unsigned));
15728 _impResolveToken(CORINFO_TOKENKIND_Class);
15730 JITDUMP(" %08X", resolvedToken.token);
15732 if (tiVerificationNeeded)
15734 typeInfo tiFrom = impStackTop().seTypeInfo;
15735 typeInfo tiTo = impStackTop(1).seTypeInfo;
15736 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15738 Verify(tiFrom.IsByRef(), "expected byref source");
15739 Verify(tiTo.IsByRef(), "expected byref destination");
15741 Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
15742 "type of source address incompatible with type operand");
15743 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15744 Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15745 "type operand incompatible with type of destination address");
15748 if (!eeIsValueClass(resolvedToken.hClass))
15750 op1 = impPopStack().val; // address to load from
15752 impBashVarAddrsToI(op1);
15754 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
15756 op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
15757 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
15759 impPushOnStack(op1, typeInfo());
15760 opcode = CEE_STIND_REF;
15762 goto STIND_POST_VERIFY;
15765 op2 = impPopStack().val; // Src
15766 op1 = impPopStack().val; // Dest
15767 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
15772 assertImp(sz == sizeof(unsigned));
15774 _impResolveToken(CORINFO_TOKENKIND_Class);
15776 JITDUMP(" %08X", resolvedToken.token);
15778 if (eeIsValueClass(resolvedToken.hClass))
15780 lclTyp = TYP_STRUCT;
15787 if (tiVerificationNeeded)
15790 typeInfo tiPtr = impStackTop(1).seTypeInfo;
15792 // Make sure we have a good looking byref
15793 Verify(tiPtr.IsByRef(), "pointer not byref");
15794 Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
15795 if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
15797 compUnsafeCastUsed = true;
15800 typeInfo ptrVal = DereferenceByRef(tiPtr);
15801 typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
15803 if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
15805 Verify(false, "type of value incompatible with type operand");
15806 compUnsafeCastUsed = true;
15809 if (!tiCompatibleWith(argVal, ptrVal, false))
15811 Verify(false, "type operand incompatible with type of address");
15812 compUnsafeCastUsed = true;
15817 compUnsafeCastUsed = true;
15820 if (lclTyp == TYP_REF)
15822 opcode = CEE_STIND_REF;
15823 goto STIND_POST_VERIFY;
15826 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15827 if (impIsPrimitive(jitTyp))
15829 lclTyp = JITtype2varType(jitTyp);
15830 goto STIND_POST_VERIFY;
15833 op2 = impPopStack().val; // Value
15834 op1 = impPopStack().val; // Ptr
15836 assertImp(varTypeIsStruct(op2));
15838 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
15840 if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED))
15842 op1->gtFlags |= GTF_BLK_UNALIGNED;
15849 assert(!compIsForInlining());
15851 // Being lazy here. Refanys are tricky in terms of gc tracking.
15852 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
15854 JITDUMP("disabling struct promotion because of mkrefany\n");
15855 fgNoStructPromotion = true;
15857 oper = GT_MKREFANY;
15858 assertImp(sz == sizeof(unsigned));
15860 _impResolveToken(CORINFO_TOKENKIND_Class);
15862 JITDUMP(" %08X", resolvedToken.token);
15864 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
15865 if (op2 == nullptr)
15866 { // compDonotInline()
15870 if (tiVerificationNeeded)
15872 typeInfo tiPtr = impStackTop().seTypeInfo;
15873 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15875 Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
15876 Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
15877 Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
15880 accessAllowedResult =
15881 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15882 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15884 op1 = impPopStack().val;
15886 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
15887 // But JIT32 allowed it, so we continue to allow it.
15888 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
15890 // MKREFANY returns a struct. op2 is the class token.
15891 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
15893 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
15899 assertImp(sz == sizeof(unsigned));
15901 _impResolveToken(CORINFO_TOKENKIND_Class);
15903 JITDUMP(" %08X", resolvedToken.token);
15907 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15909 if (tiVerificationNeeded)
15911 typeInfo tiPtr = impStackTop().seTypeInfo;
15913 // Make sure we have a byref
15914 if (!tiPtr.IsByRef())
15916 Verify(false, "pointer not byref");
15917 compUnsafeCastUsed = true;
15919 typeInfo tiPtrVal = DereferenceByRef(tiPtr);
15921 if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
15923 Verify(false, "type of address incompatible with type operand");
15924 compUnsafeCastUsed = true;
15926 tiRetVal.NormaliseForStack();
15930 compUnsafeCastUsed = true;
15933 if (eeIsValueClass(resolvedToken.hClass))
15935 lclTyp = TYP_STRUCT;
15940 opcode = CEE_LDIND_REF;
15941 goto LDIND_POST_VERIFY;
15944 op1 = impPopStack().val;
15946 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
15948 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15949 if (impIsPrimitive(jitTyp))
15951 op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
15953 // Could point anywhere, example a boxed class static int
15954 op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
15955 assertImp(varTypeIsArithmetic(op1->gtType));
15959 // OBJ returns a struct
15960 // and an inline argument which is the class token of the loaded obj
15961 op1 = gtNewObjNode(resolvedToken.hClass, op1);
15963 op1->gtFlags |= GTF_EXCEPT;
15965 if (prefixFlags & PREFIX_UNALIGNED)
15967 op1->gtFlags |= GTF_IND_UNALIGNED;
15970 impPushOnStack(op1, tiRetVal);
15975 if (tiVerificationNeeded)
15977 typeInfo tiArray = impStackTop().seTypeInfo;
15978 Verify(verIsSDArray(tiArray), "bad array");
15979 tiRetVal = typeInfo(TI_INT);
15982 op1 = impPopStack().val;
15983 if (!opts.MinOpts() && !opts.compDbgCode)
15985 /* Use GT_ARR_LENGTH operator so rng check opts see this */
15986 GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_Array__length);
15988 /* Mark the block as containing a length expression */
15990 if (op1->gtOper == GT_LCL_VAR)
15992 block->bbFlags |= BBF_HAS_IDX_LEN;
15999 /* Create the expression "*(array_addr + ArrLenOffs)" */
16000 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
16001 gtNewIconNode(OFFSETOF__CORINFO_Array__length, TYP_I_IMPL));
16002 op1 = gtNewIndir(TYP_INT, op1);
16005 /* Push the result back on the stack */
16006 impPushOnStack(op1, tiRetVal);
16010 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
16014 if (opts.compDbgCode)
16016 op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
16021 /******************************** NYI *******************************/
16024 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
16027 case CEE_MACRO_END:
16030 BADCODE3("unknown opcode", ": %02X", (int)opcode);
16034 prevOpcode = opcode;
16040 #undef _impResolveToken
16043 #pragma warning(pop)
16046 // Push a local/argument treeon the operand stack
16047 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
16049 tiRetVal.NormaliseForStack();
16051 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
16053 tiRetVal.SetUninitialisedObjRef();
16056 impPushOnStack(op, tiRetVal);
16059 // Load a local/argument on the operand stack
16060 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
16061 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
16065 if (lvaTable[lclNum].lvNormalizeOnLoad())
16067 lclTyp = lvaGetRealType(lclNum);
16071 lclTyp = lvaGetActualType(lclNum);
16074 impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
16077 // Load an argument on the operand stack
16078 // Shared by the various CEE_LDARG opcodes
16079 // ilArgNum is the argument index as specified in IL.
16080 // It will be mapped to the correct lvaTable index
16081 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
16083 Verify(ilArgNum < info.compILargsCount, "bad arg num");
16085 if (compIsForInlining())
16087 if (ilArgNum >= info.compArgsCount)
16089 compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
16093 impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
16094 impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
16098 if (ilArgNum >= info.compArgsCount)
16103 unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
16105 if (lclNum == info.compThisArg)
16107 lclNum = lvaArg0Var;
16110 impLoadVar(lclNum, offset);
16114 // Load a local on the operand stack
16115 // Shared by the various CEE_LDLOC opcodes
16116 // ilLclNum is the local index as specified in IL.
16117 // It will be mapped to the correct lvaTable index
16118 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
16120 if (tiVerificationNeeded)
16122 Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
16123 Verify(info.compInitMem, "initLocals not set");
16126 if (compIsForInlining())
16128 if (ilLclNum >= info.compMethodInfo->locals.numArgs)
16130 compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
16134 // Get the local type
16135 var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
16137 typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
16139 /* Have we allocated a temp for this local? */
16141 unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
16143 // All vars of inlined methods should be !lvNormalizeOnLoad()
16145 assert(!lvaTable[lclNum].lvNormalizeOnLoad());
16146 lclTyp = genActualType(lclTyp);
16148 impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
16152 if (ilLclNum >= info.compMethodInfo->locals.numArgs)
16157 unsigned lclNum = info.compArgsCount + ilLclNum;
16159 impLoadVar(lclNum, offset);
16163 #ifdef _TARGET_ARM_
16164 /**************************************************************************************
16166 * When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
16167 * dst struct, because struct promotion will turn it into a float/double variable while
16168 * the rhs will be an int/long variable. We don't code generate assignment of int into
16169 * a float, but there is nothing that might prevent us from doing so. The tree however
16170 * would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
16172 * tmpNum - the lcl dst variable num that is a struct.
16173 * src - the src tree assigned to the dest that is a struct/int (when varargs call.)
16174 * hClass - the type handle for the struct variable.
16176 * TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
16177 * however, we could do a codegen of transferring from int to float registers
16178 * (transfer, not a cast.)
16181 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass)
16183 if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
16185 int hfaSlots = GetHfaCount(hClass);
16186 var_types hfaType = GetHfaType(hClass);
16188 // If we have varargs we morph the method's return type to be "int" irrespective of its original
16189 // type: struct/float at importer because the ABI calls out return in integer registers.
16190 // We don't want struct promotion to replace an expression like this:
16191 // lclFld_int = callvar_int() into lclFld_float = callvar_int();
16192 // This means an int is getting assigned to a float without a cast. Prevent the promotion.
16193 if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
16194 (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
16196 // Make sure this struct type stays as struct so we can receive the call in a struct.
16197 lvaTable[tmpNum].lvIsMultiRegRet = true;
16201 #endif // _TARGET_ARM_
16203 //------------------------------------------------------------------------
16204 // impAssignSmallStructTypeToVar: ensure calls that return small structs whose
16205 // sizes are not supported integral type sizes return values to temps.
16208 // op -- call returning a small struct in a register
16209 // hClass -- class handle for struct
16212 // Tree with reference to struct local to use as call return value.
16215 // The call will be spilled into a preceding statement.
16216 // Currently handles struct returns for 3, 5, 6, and 7 byte structs.
16218 GenTree* Compiler::impAssignSmallStructTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass)
16220 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for small struct return."));
16221 impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
16222 GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
16224 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of small struct returns.
16225 ret->gtFlags |= GTF_DONT_CSE;
16230 #if FEATURE_MULTIREG_RET
16231 //------------------------------------------------------------------------
16232 // impAssignMultiRegTypeToVar: ensure calls that return structs in multiple
16233 // registers return values to suitable temps.
16236 // op -- call returning a struct in a registers
16237 // hClass -- class handle for struct
16240 // Tree with reference to struct local to use as call return value.
16242 GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass)
16244 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
16245 impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
16246 GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
16248 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
16249 ret->gtFlags |= GTF_DONT_CSE;
16251 assert(IsMultiRegReturnedType(hClass));
16253 // Mark the var so that fields are not promoted and stay together.
16254 lvaTable[tmpNum].lvIsMultiRegRet = true;
16258 #endif // FEATURE_MULTIREG_RET
16260 // do import for a return
16261 // returns false if inlining was aborted
16262 // opcode can be ret or call in the case of a tail.call
16263 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
16265 if (tiVerificationNeeded)
16267 verVerifyThisPtrInitialised();
16269 unsigned expectedStack = 0;
16270 if (info.compRetType != TYP_VOID)
16272 typeInfo tiVal = impStackTop().seTypeInfo;
16273 typeInfo tiDeclared =
16274 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
16276 Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
16278 Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
16281 Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
16285 // If we are importing an inlinee and have GC ref locals we always
16286 // need to have a spill temp for the return value. This temp
16287 // should have been set up in advance, over in fgFindBasicBlocks.
16288 if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID))
16290 assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM);
16294 GenTree* op2 = nullptr;
16295 GenTree* op1 = nullptr;
16296 CORINFO_CLASS_HANDLE retClsHnd = nullptr;
16298 if (info.compRetType != TYP_VOID)
16300 StackEntry se = impPopStack();
16301 retClsHnd = se.seTypeInfo.GetClassHandle();
16304 if (!compIsForInlining())
16306 impBashVarAddrsToI(op2);
16307 op2 = impImplicitIorI4Cast(op2, info.compRetType);
16308 op2 = impImplicitR4orR8Cast(op2, info.compRetType);
16309 assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
16310 ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
16311 ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
16312 (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
16313 (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
16316 if (opts.compGcChecks && info.compRetType == TYP_REF)
16318 // DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path
16319 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
16322 assert(op2->gtType == TYP_REF);
16324 // confirm that the argument is a GC pointer (for debugging (GC stress))
16325 GenTreeArgList* args = gtNewArgList(op2);
16326 op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args);
16330 printf("\ncompGcChecks tree:\n");
16338 // inlinee's stack should be empty now.
16339 assert(verCurrentState.esStackDepth == 0);
16344 printf("\n\n Inlinee Return expression (before normalization) =>\n");
16349 // Make sure the type matches the original call.
16351 var_types returnType = genActualType(op2->gtType);
16352 var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
16353 if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
16355 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
16358 if (returnType != originalCallType)
16360 // Allow TYP_BYREF to be returned as TYP_I_IMPL and vice versa
16361 if (((returnType == TYP_BYREF) && (originalCallType == TYP_I_IMPL)) ||
16362 ((returnType == TYP_I_IMPL) && (originalCallType == TYP_BYREF)))
16364 JITDUMP("Allowing return type mismatch: have %s, needed %s\n", varTypeName(returnType),
16365 varTypeName(originalCallType));
16369 JITDUMP("Return type mismatch: have %s, needed %s\n", varTypeName(returnType),
16370 varTypeName(originalCallType));
16371 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
16376 // Below, we are going to set impInlineInfo->retExpr to the tree with the return
16377 // expression. At this point, retExpr could already be set if there are multiple
16378 // return blocks (meaning fgNeedReturnSpillTemp() == true) and one of
16379 // the other blocks already set it. If there is only a single return block,
16380 // retExpr shouldn't be set. However, this is not true if we reimport a block
16381 // with a return. In that case, retExpr will be set, then the block will be
16382 // reimported, but retExpr won't get cleared as part of setting the block to
16383 // be reimported. The reimported retExpr value should be the same, so even if
16384 // we don't unconditionally overwrite it, it shouldn't matter.
16385 if (info.compRetNativeType != TYP_STRUCT)
16387 // compRetNativeType is not TYP_STRUCT.
16388 // This implies it could be either a scalar type or SIMD vector type or
16389 // a struct type that can be normalized to a scalar type.
16391 if (varTypeIsStruct(info.compRetType))
16393 noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
16394 // adjust the type away from struct to integral
16395 // and no normalizing
16396 op2 = impFixupStructReturnType(op2, retClsHnd);
16400 // Do we have to normalize?
16401 var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
16402 if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
16403 fgCastNeeded(op2, fncRealRetType))
16405 // Small-typed return values are normalized by the callee
16406 op2 = gtNewCastNode(TYP_INT, op2, false, fncRealRetType);
16410 if (fgNeedReturnSpillTemp())
16412 assert(info.compRetNativeType != TYP_VOID &&
16413 (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()));
16415 // If this method returns a ref type, track the actual types seen
16417 if (info.compRetType == TYP_REF)
16419 bool isExact = false;
16420 bool isNonNull = false;
16421 CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull);
16423 if (impInlineInfo->retExpr == nullptr)
16425 // This is the first return, so best known type is the type
16426 // of this return value.
16427 impInlineInfo->retExprClassHnd = returnClsHnd;
16428 impInlineInfo->retExprClassHndIsExact = isExact;
16430 else if (impInlineInfo->retExprClassHnd != returnClsHnd)
16432 // This return site type differs from earlier seen sites,
16433 // so reset the info and we'll fall back to using the method's
16434 // declared return type for the return spill temp.
16435 impInlineInfo->retExprClassHnd = nullptr;
16436 impInlineInfo->retExprClassHndIsExact = false;
16440 // This is a bit of a workaround...
16441 // If we are inlining a call that returns a struct, where the actual "native" return type is
16442 // not a struct (for example, the struct is composed of exactly one int, and the native
16443 // return type is thus an int), and the inlinee has multiple return blocks (thus,
16444 // fgNeedReturnSpillTemp() == true, and is the index of a local var that is set
16445 // to the *native* return type), and at least one of the return blocks is the result of
16446 // a call, then we have a problem. The situation is like this (from a failed test case):
16449 // // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
16450 // call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
16451 // plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
16455 // ldobj !!T // this gets bashed to a GT_LCL_FLD, type TYP_INT
16458 // call !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
16459 // object&, class System.Func`1<!!0>)
16462 // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
16463 // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
16464 // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
16465 // inlining properly by leaving the correct type on the GT_CALL node through importing.
16467 // To fix this, for this case, we temporarily change the GT_CALL node type to the
16468 // native return type, which is what it will be set to eventually. We generate the
16469 // assignment to the return temp, using the correct type, and then restore the GT_CALL
16470 // node type. During morphing, the GT_CALL will get the correct, final, native return type.
16472 bool restoreType = false;
16473 if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
16475 noway_assert(op2->TypeGet() == TYP_STRUCT);
16476 op2->gtType = info.compRetNativeType;
16477 restoreType = true;
16480 impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
16481 (unsigned)CHECK_SPILL_ALL);
16483 GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
16487 op2->gtType = TYP_STRUCT; // restore it to what it was
16493 if (impInlineInfo->retExpr)
16495 // Some other block(s) have seen the CEE_RET first.
16496 // Better they spilled to the same temp.
16497 assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
16498 assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
16506 printf("\n\n Inlinee Return expression (after normalization) =>\n");
16511 // Report the return expression
16512 impInlineInfo->retExpr = op2;
16516 // compRetNativeType is TYP_STRUCT.
16517 // This implies that struct return via RetBuf arg or multi-reg struct return
16519 GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall();
16521 // Assign the inlinee return into a spill temp.
16522 // spill temp only exists if there are multiple return points
16523 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
16525 // in this case we have to insert multiple struct copies to the temp
16526 // and the retexpr is just the temp.
16527 assert(info.compRetNativeType != TYP_VOID);
16528 assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals());
16530 impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
16531 (unsigned)CHECK_SPILL_ALL);
16534 #if defined(_TARGET_ARM_) || defined(UNIX_AMD64_ABI)
16535 #if defined(_TARGET_ARM_)
16536 // TODO-ARM64-NYI: HFA
16537 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
16538 // next ifdefs could be refactored in a single method with the ifdef inside.
16539 if (IsHfa(retClsHnd))
16541 // Same as !IsHfa but just don't bother with impAssignStructPtr.
16542 #else // defined(UNIX_AMD64_ABI)
16543 ReturnTypeDesc retTypeDesc;
16544 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
16545 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
16547 if (retRegCount != 0)
16549 // If single eightbyte, the return type would have been normalized and there won't be a temp var.
16550 // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
16552 assert(retRegCount == MAX_RET_REG_COUNT);
16553 // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
16554 CLANG_FORMAT_COMMENT_ANCHOR;
16555 #endif // defined(UNIX_AMD64_ABI)
16557 if (fgNeedReturnSpillTemp())
16559 if (!impInlineInfo->retExpr)
16561 #if defined(_TARGET_ARM_)
16562 impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
16563 #else // defined(UNIX_AMD64_ABI)
16564 // The inlinee compiler has figured out the type of the temp already. Use it here.
16565 impInlineInfo->retExpr =
16566 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
16567 #endif // defined(UNIX_AMD64_ABI)
16572 impInlineInfo->retExpr = op2;
16576 #elif defined(_TARGET_ARM64_)
16577 ReturnTypeDesc retTypeDesc;
16578 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
16579 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
16581 if (retRegCount != 0)
16583 assert(!iciCall->HasRetBufArg());
16584 assert(retRegCount >= 2);
16585 if (fgNeedReturnSpillTemp())
16587 if (!impInlineInfo->retExpr)
16589 // The inlinee compiler has figured out the type of the temp already. Use it here.
16590 impInlineInfo->retExpr =
16591 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
16596 impInlineInfo->retExpr = op2;
16600 #endif // defined(_TARGET_ARM64_)
16602 assert(iciCall->HasRetBufArg());
16603 GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->gtOp.gtOp1);
16604 // spill temp only exists if there are multiple return points
16605 if (fgNeedReturnSpillTemp())
16607 // if this is the first return we have seen set the retExpr
16608 if (!impInlineInfo->retExpr)
16610 impInlineInfo->retExpr =
16611 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
16612 retClsHnd, (unsigned)CHECK_SPILL_ALL);
16617 impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16624 if (compIsForInlining())
16629 if (info.compRetType == TYP_VOID)
16632 op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16634 else if (info.compRetBuffArg != BAD_VAR_NUM)
16636 // Assign value to return buff (first param)
16637 GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
16639 op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16640 impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16642 // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
16643 CLANG_FORMAT_COMMENT_ANCHOR;
16645 #if defined(_TARGET_AMD64_)
16647 // x64 (System V and Win64) calling convention requires to
16648 // return the implicit return buffer explicitly (in RAX).
16649 // Change the return type to be BYREF.
16650 op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16651 #else // !defined(_TARGET_AMD64_)
16652 // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
16653 // In such case the return value of the function is changed to BYREF.
16654 // If profiler hook is not needed the return type of the function is TYP_VOID.
16655 if (compIsProfilerHookNeeded())
16657 op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16662 op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16664 #endif // !defined(_TARGET_AMD64_)
16666 else if (varTypeIsStruct(info.compRetType))
16668 #if !FEATURE_MULTIREG_RET
16669 // For both ARM architectures the HFA native types are maintained as structs.
16670 // Also on System V AMD64 the multireg structs returns are also left as structs.
16671 noway_assert(info.compRetNativeType != TYP_STRUCT);
16673 op2 = impFixupStructReturnType(op2, retClsHnd);
16675 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
16680 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
16683 // We must have imported a tailcall and jumped to RET
16684 if (prefixFlags & PREFIX_TAILCALL)
16686 #if defined(FEATURE_CORECLR) || !defined(_TARGET_AMD64_)
16688 // This cannot be asserted on Amd64 since we permit the following IL pattern:
16692 assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
16693 #endif // FEATURE_CORECLR || !_TARGET_AMD64_
16695 opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
16697 // impImportCall() would have already appended TYP_VOID calls
16698 if (info.compRetType == TYP_VOID)
16704 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16706 // Remember at which BC offset the tree was finished
16707 impNoteLastILoffs();
16712 /*****************************************************************************
16713 * Mark the block as unimported.
16714 * Note that the caller is responsible for calling impImportBlockPending(),
16715 * with the appropriate stack-state
16718 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
16721 if (verbose && (block->bbFlags & BBF_IMPORTED))
16723 printf("\n" FMT_BB " will be reimported\n", block->bbNum);
16727 block->bbFlags &= ~BBF_IMPORTED;
16730 /*****************************************************************************
16731 * Mark the successors of the given block as unimported.
16732 * Note that the caller is responsible for calling impImportBlockPending()
16733 * for all the successors, with the appropriate stack-state.
16736 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
16738 const unsigned numSuccs = block->NumSucc();
16739 for (unsigned i = 0; i < numSuccs; i++)
16741 impReimportMarkBlock(block->GetSucc(i));
16745 /*****************************************************************************
16747 * Filter wrapper to handle only passed in exception code
16751 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
16753 if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
16755 return EXCEPTION_EXECUTE_HANDLER;
16758 return EXCEPTION_CONTINUE_SEARCH;
16761 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
16763 assert(block->hasTryIndex());
16764 assert(!compIsForInlining());
16766 unsigned tryIndex = block->getTryIndex();
16767 EHblkDsc* HBtab = ehGetDsc(tryIndex);
16771 assert(block->bbFlags & BBF_TRY_BEG);
16773 // The Stack must be empty
16775 if (block->bbStkDepth != 0)
16777 BADCODE("Evaluation stack must be empty on entry into a try block");
16781 // Save the stack contents, we'll need to restore it later
16783 SavedStack blockState;
16784 impSaveStackState(&blockState, false);
16786 while (HBtab != nullptr)
16790 // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
16791 // We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
16793 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
16795 // We trigger an invalid program exception here unless we have a try/fault region.
16797 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
16800 "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
16804 // Allow a try/fault region to proceed.
16805 assert(HBtab->HasFaultHandler());
16809 /* Recursively process the handler block */
16810 BasicBlock* hndBegBB = HBtab->ebdHndBeg;
16812 // Construct the proper verification stack state
16813 // either empty or one that contains just
16814 // the Exception Object that we are dealing with
16816 verCurrentState.esStackDepth = 0;
16818 if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
16820 CORINFO_CLASS_HANDLE clsHnd;
16822 if (HBtab->HasFilter())
16824 clsHnd = impGetObjectClass();
16828 CORINFO_RESOLVED_TOKEN resolvedToken;
16830 resolvedToken.tokenContext = impTokenLookupContextHandle;
16831 resolvedToken.tokenScope = info.compScopeHnd;
16832 resolvedToken.token = HBtab->ebdTyp;
16833 resolvedToken.tokenType = CORINFO_TOKENKIND_Class;
16834 info.compCompHnd->resolveToken(&resolvedToken);
16836 clsHnd = resolvedToken.hClass;
16839 // push catch arg the stack, spill to a temp if necessary
16840 // Note: can update HBtab->ebdHndBeg!
16841 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false);
16844 // Queue up the handler for importing
16846 impImportBlockPending(hndBegBB);
16848 if (HBtab->HasFilter())
16850 /* @VERIFICATION : Ideally the end of filter state should get
16851 propagated to the catch handler, this is an incompleteness,
16852 but is not a security/compliance issue, since the only
16853 interesting state is the 'thisInit' state.
16856 verCurrentState.esStackDepth = 0;
16858 BasicBlock* filterBB = HBtab->ebdFilter;
16860 // push catch arg the stack, spill to a temp if necessary
16861 // Note: can update HBtab->ebdFilter!
16862 const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB);
16863 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter);
16865 impImportBlockPending(filterBB);
16868 else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
16870 /* Recursively process the handler block */
16872 verCurrentState.esStackDepth = 0;
16874 // Queue up the fault handler for importing
16876 impImportBlockPending(HBtab->ebdHndBeg);
16879 // Now process our enclosing try index (if any)
16881 tryIndex = HBtab->ebdEnclosingTryIndex;
16882 if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
16888 HBtab = ehGetDsc(tryIndex);
16892 // Restore the stack contents
16893 impRestoreStackState(&blockState);
16896 //***************************************************************
16897 // Import the instructions for the given basic block. Perform
16898 // verification, throwing an exception on failure. Push any successor blocks that are enabled for the first
16899 // time, or whose verification pre-state is changed.
16902 #pragma warning(push)
16903 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
16905 void Compiler::impImportBlock(BasicBlock* block)
16907 // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
16908 // handle them specially. In particular, there is no IL to import for them, but we do need
16909 // to mark them as imported and put their successors on the pending import list.
16910 if (block->bbFlags & BBF_INTERNAL)
16912 JITDUMP("Marking BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", block->bbNum);
16913 block->bbFlags |= BBF_IMPORTED;
16915 const unsigned numSuccs = block->NumSucc();
16916 for (unsigned i = 0; i < numSuccs; i++)
16918 impImportBlockPending(block->GetSucc(i));
16928 /* Make the block globaly available */
16933 /* Initialize the debug variables */
16934 impCurOpcName = "unknown";
16935 impCurOpcOffs = block->bbCodeOffs;
16938 /* Set the current stack state to the merged result */
16939 verResetCurrentState(block, &verCurrentState);
16941 /* Now walk the code and import the IL into GenTrees */
16943 struct FilterVerificationExceptionsParam
16948 FilterVerificationExceptionsParam param;
16950 param.pThis = this;
16951 param.block = block;
16953 PAL_TRY(FilterVerificationExceptionsParam*, pParam, ¶m)
16955 /* @VERIFICATION : For now, the only state propagation from try
16956 to it's handler is "thisInit" state (stack is empty at start of try).
16957 In general, for state that we track in verification, we need to
16958 model the possibility that an exception might happen at any IL
16959 instruction, so we really need to merge all states that obtain
16960 between IL instructions in a try block into the start states of
16963 However we do not allow the 'this' pointer to be uninitialized when
16964 entering most kinds try regions (only try/fault are allowed to have
16965 an uninitialized this pointer on entry to the try)
16967 Fortunately, the stack is thrown away when an exception
16968 leads to a handler, so we don't have to worry about that.
16969 We DO, however, have to worry about the "thisInit" state.
16970 But only for the try/fault case.
16972 The only allowed transition is from TIS_Uninit to TIS_Init.
16974 So for a try/fault region for the fault handler block
16975 we will merge the start state of the try begin
16976 and the post-state of each block that is part of this try region
16979 // merge the start state of the try begin
16981 if (pParam->block->bbFlags & BBF_TRY_BEG)
16983 pParam->pThis->impVerifyEHBlock(pParam->block, true);
16986 pParam->pThis->impImportBlockCode(pParam->block);
16988 // As discussed above:
16989 // merge the post-state of each block that is part of this try region
16991 if (pParam->block->hasTryIndex())
16993 pParam->pThis->impVerifyEHBlock(pParam->block, false);
16996 PAL_EXCEPT_FILTER(FilterVerificationExceptions)
16998 verHandleVerificationFailure(block DEBUGARG(false));
17002 if (compDonotInline())
17007 assert(!compDonotInline());
17009 markImport = false;
17013 unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks
17014 bool reimportSpillClique = false;
17015 BasicBlock* tgtBlock = nullptr;
17017 /* If the stack is non-empty, we might have to spill its contents */
17019 if (verCurrentState.esStackDepth != 0)
17021 impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
17022 // on the stack, its lifetime is hard to determine, simply
17023 // don't reuse such temps.
17025 GenTree* addStmt = nullptr;
17027 /* Do the successors of 'block' have any other predecessors ?
17028 We do not want to do some of the optimizations related to multiRef
17029 if we can reimport blocks */
17031 unsigned multRef = impCanReimport ? unsigned(~0) : 0;
17033 switch (block->bbJumpKind)
17037 /* Temporarily remove the 'jtrue' from the end of the tree list */
17039 assert(impTreeLast);
17040 assert(impTreeLast->gtOper == GT_STMT);
17041 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
17043 addStmt = impTreeLast;
17044 impTreeLast = impTreeLast->gtPrev;
17046 /* Note if the next block has more than one ancestor */
17048 multRef |= block->bbNext->bbRefs;
17050 /* Does the next block have temps assigned? */
17052 baseTmp = block->bbNext->bbStkTempsIn;
17053 tgtBlock = block->bbNext;
17055 if (baseTmp != NO_BASE_TMP)
17060 /* Try the target of the jump then */
17062 multRef |= block->bbJumpDest->bbRefs;
17063 baseTmp = block->bbJumpDest->bbStkTempsIn;
17064 tgtBlock = block->bbJumpDest;
17068 multRef |= block->bbJumpDest->bbRefs;
17069 baseTmp = block->bbJumpDest->bbStkTempsIn;
17070 tgtBlock = block->bbJumpDest;
17074 multRef |= block->bbNext->bbRefs;
17075 baseTmp = block->bbNext->bbStkTempsIn;
17076 tgtBlock = block->bbNext;
17081 BasicBlock** jmpTab;
17084 /* Temporarily remove the GT_SWITCH from the end of the tree list */
17086 assert(impTreeLast);
17087 assert(impTreeLast->gtOper == GT_STMT);
17088 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
17090 addStmt = impTreeLast;
17091 impTreeLast = impTreeLast->gtPrev;
17093 jmpCnt = block->bbJumpSwt->bbsCount;
17094 jmpTab = block->bbJumpSwt->bbsDstTab;
17098 tgtBlock = (*jmpTab);
17100 multRef |= tgtBlock->bbRefs;
17102 // Thanks to spill cliques, we should have assigned all or none
17103 assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
17104 baseTmp = tgtBlock->bbStkTempsIn;
17109 } while (++jmpTab, --jmpCnt);
17113 case BBJ_CALLFINALLY:
17114 case BBJ_EHCATCHRET:
17116 case BBJ_EHFINALLYRET:
17117 case BBJ_EHFILTERRET:
17119 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
17123 noway_assert(!"Unexpected bbJumpKind");
17127 assert(multRef >= 1);
17129 /* Do we have a base temp number? */
17131 bool newTemps = (baseTmp == NO_BASE_TMP);
17135 /* Grab enough temps for the whole stack */
17136 baseTmp = impGetSpillTmpBase(block);
17139 /* Spill all stack entries into temps */
17140 unsigned level, tempNum;
17142 JITDUMP("\nSpilling stack entries into temps\n");
17143 for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
17145 GenTree* tree = verCurrentState.esStack[level].val;
17147 /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
17148 the other. This should merge to a byref in unverifiable code.
17149 However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
17150 successor would be imported assuming there was a TYP_I_IMPL on
17151 the stack. Thus the value would not get GC-tracked. Hence,
17152 change the temp to TYP_BYREF and reimport the successors.
17153 Note: We should only allow this in unverifiable code.
17155 if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
17157 lvaTable[tempNum].lvType = TYP_BYREF;
17158 impReimportMarkSuccessors(block);
17162 #ifdef _TARGET_64BIT_
17163 if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
17165 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
17166 (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
17168 // Merge the current state into the entry state of block;
17169 // the call to verMergeEntryStates must have changed
17170 // the entry state of the block by merging the int local var
17171 // and the native-int stack entry.
17172 bool changed = false;
17173 if (verMergeEntryStates(tgtBlock, &changed))
17175 impRetypeEntryStateTemps(tgtBlock);
17176 impReimportBlockPending(tgtBlock);
17181 tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
17186 // Some other block in the spill clique set this to "int", but now we have "native int".
17187 // Change the type and go back to re-import any blocks that used the wrong type.
17188 lvaTable[tempNum].lvType = TYP_I_IMPL;
17189 reimportSpillClique = true;
17191 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
17193 // Spill clique has decided this should be "native int", but this block only pushes an "int".
17194 // Insert a sign-extension to "native int" so we match the clique.
17195 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
17198 // Consider the case where one branch left a 'byref' on the stack and the other leaves
17199 // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
17200 // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
17201 // behavior instead of asserting and then generating bad code (where we save/restore the
17202 // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
17203 // imported already, we need to change the type of the local and reimport the spill clique.
17204 // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
17205 // the 'byref' size.
17206 if (!tiVerificationNeeded)
17208 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
17210 // Some other block in the spill clique set this to "int", but now we have "byref".
17211 // Change the type and go back to re-import any blocks that used the wrong type.
17212 lvaTable[tempNum].lvType = TYP_BYREF;
17213 reimportSpillClique = true;
17215 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
17217 // Spill clique has decided this should be "byref", but this block only pushes an "int".
17218 // Insert a sign-extension to "native int" so we match the clique size.
17219 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
17222 #endif // _TARGET_64BIT_
17224 if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
17226 // Some other block in the spill clique set this to "float", but now we have "double".
17227 // Change the type and go back to re-import any blocks that used the wrong type.
17228 lvaTable[tempNum].lvType = TYP_DOUBLE;
17229 reimportSpillClique = true;
17231 else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
17233 // Spill clique has decided this should be "double", but this block only pushes a "float".
17234 // Insert a cast to "double" so we match the clique.
17235 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE);
17238 /* If addStmt has a reference to tempNum (can only happen if we
17239 are spilling to the temps already used by a previous block),
17240 we need to spill addStmt */
17242 if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
17244 GenTree* addTree = addStmt->gtStmt.gtStmtExpr;
17246 if (addTree->gtOper == GT_JTRUE)
17248 GenTree* relOp = addTree->gtOp.gtOp1;
17249 assert(relOp->OperIsCompare());
17251 var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
17253 if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
17255 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
17256 impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
17257 type = genActualType(lvaTable[temp].TypeGet());
17258 relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
17261 if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
17263 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
17264 impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
17265 type = genActualType(lvaTable[temp].TypeGet());
17266 relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
17271 assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->gtOp.gtOp1->TypeGet()));
17273 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
17274 impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
17275 addTree->gtOp.gtOp1 = gtNewLclvNode(temp, genActualType(addTree->gtOp.gtOp1->TypeGet()));
17279 /* Spill the stack entry, and replace with the temp */
17281 if (!impSpillStackEntry(level, tempNum
17284 true, "Spill Stack Entry"
17290 BADCODE("bad stack state");
17293 // Oops. Something went wrong when spilling. Bad code.
17294 verHandleVerificationFailure(block DEBUGARG(true));
17300 /* Put back the 'jtrue'/'switch' if we removed it earlier */
17304 impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
17308 // Some of the append/spill logic works on compCurBB
17310 assert(compCurBB == block);
17312 /* Save the tree list in the block */
17313 impEndTreeList(block);
17315 // impEndTreeList sets BBF_IMPORTED on the block
17316 // We do *NOT* want to set it later than this because
17317 // impReimportSpillClique might clear it if this block is both a
17318 // predecessor and successor in the current spill clique
17319 assert(block->bbFlags & BBF_IMPORTED);
17321 // If we had a int/native int, or float/double collision, we need to re-import
17322 if (reimportSpillClique)
17324 // This will re-import all the successors of block (as well as each of their predecessors)
17325 impReimportSpillClique(block);
17327 // For blocks that haven't been imported yet, we still need to mark them as pending import.
17328 const unsigned numSuccs = block->NumSucc();
17329 for (unsigned i = 0; i < numSuccs; i++)
17331 BasicBlock* succ = block->GetSucc(i);
17332 if ((succ->bbFlags & BBF_IMPORTED) == 0)
17334 impImportBlockPending(succ);
17338 else // the normal case
17340 // otherwise just import the successors of block
17342 /* Does this block jump to any other blocks? */
17343 const unsigned numSuccs = block->NumSucc();
17344 for (unsigned i = 0; i < numSuccs; i++)
17346 impImportBlockPending(block->GetSucc(i));
17351 #pragma warning(pop)
17354 /*****************************************************************************/
17356 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
17357 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
17358 // impPendingBlockMembers). Merges the current verification state into the verification state of "block"
17359 // (its "pre-state").
17361 void Compiler::impImportBlockPending(BasicBlock* block)
17366 printf("\nimpImportBlockPending for " FMT_BB "\n", block->bbNum);
17370 // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
17371 // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
17372 // (When we're doing verification, we always attempt the merge to detect verification errors.)
17374 // If the block has not been imported, add to pending set.
17375 bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
17377 // Initialize bbEntryState just the first time we try to add this block to the pending list
17378 // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
17379 // We use NULL to indicate the 'common' state to avoid memory allocation
17380 if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
17381 (impGetPendingBlockMember(block) == 0))
17383 verInitBBEntryState(block, &verCurrentState);
17384 assert(block->bbStkDepth == 0);
17385 block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
17386 assert(addToPending);
17387 assert(impGetPendingBlockMember(block) == 0);
17391 // The stack should have the same height on entry to the block from all its predecessors.
17392 if (block->bbStkDepth != verCurrentState.esStackDepth)
17396 sprintf_s(buffer, sizeof(buffer),
17397 "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
17398 "Previous depth was %d, current depth is %d",
17399 block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
17400 verCurrentState.esStackDepth);
17401 buffer[400 - 1] = 0;
17404 NO_WAY("Block entered with different stack depths");
17408 // Additionally, if we need to verify, merge the verification state.
17409 if (tiVerificationNeeded)
17411 // Merge the current state into the entry state of block; if this does not change the entry state
17412 // by merging, do not add the block to the pending-list.
17413 bool changed = false;
17414 if (!verMergeEntryStates(block, &changed))
17416 block->bbFlags |= BBF_FAILED_VERIFICATION;
17417 addToPending = true; // We will pop it off, and check the flag set above.
17421 addToPending = true;
17423 JITDUMP("Adding " FMT_BB " to pending set due to new merge result\n", block->bbNum);
17432 if (block->bbStkDepth > 0)
17434 // We need to fix the types of any spill temps that might have changed:
17435 // int->native int, float->double, int->byref, etc.
17436 impRetypeEntryStateTemps(block);
17439 // OK, we must add to the pending list, if it's not already in it.
17440 if (impGetPendingBlockMember(block) != 0)
17446 // Get an entry to add to the pending list
17450 if (impPendingFree)
17452 // We can reuse one of the freed up dscs.
17453 dsc = impPendingFree;
17454 impPendingFree = dsc->pdNext;
17458 // We have to create a new dsc
17459 dsc = new (this, CMK_Unknown) PendingDsc;
17463 dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
17464 dsc->pdThisPtrInit = verCurrentState.thisInitialized;
17466 // Save the stack trees for later
17468 if (verCurrentState.esStackDepth)
17470 impSaveStackState(&dsc->pdSavedStack, false);
17473 // Add the entry to the pending list
17475 dsc->pdNext = impPendingList;
17476 impPendingList = dsc;
17477 impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
17479 // Various assertions require us to now to consider the block as not imported (at least for
17480 // the final time...)
17481 block->bbFlags &= ~BBF_IMPORTED;
17486 printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum);
17491 /*****************************************************************************/
17493 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
17494 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
17495 // impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block.
17497 void Compiler::impReimportBlockPending(BasicBlock* block)
17499 JITDUMP("\nimpReimportBlockPending for " FMT_BB, block->bbNum);
17501 assert(block->bbFlags & BBF_IMPORTED);
17503 // OK, we must add to the pending list, if it's not already in it.
17504 if (impGetPendingBlockMember(block) != 0)
17509 // Get an entry to add to the pending list
17513 if (impPendingFree)
17515 // We can reuse one of the freed up dscs.
17516 dsc = impPendingFree;
17517 impPendingFree = dsc->pdNext;
17521 // We have to create a new dsc
17522 dsc = new (this, CMK_ImpStack) PendingDsc;
17527 if (block->bbEntryState)
17529 dsc->pdThisPtrInit = block->bbEntryState->thisInitialized;
17530 dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
17531 dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
17535 dsc->pdThisPtrInit = TIS_Bottom;
17536 dsc->pdSavedStack.ssDepth = 0;
17537 dsc->pdSavedStack.ssTrees = nullptr;
17540 // Add the entry to the pending list
17542 dsc->pdNext = impPendingList;
17543 impPendingList = dsc;
17544 impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
17546 // Various assertions require us to now to consider the block as not imported (at least for
17547 // the final time...)
17548 block->bbFlags &= ~BBF_IMPORTED;
17553 printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum);
17558 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
17560 if (comp->impBlockListNodeFreeList == nullptr)
17562 return comp->getAllocator(CMK_BasicBlock).allocate<BlockListNode>(1);
17566 BlockListNode* res = comp->impBlockListNodeFreeList;
17567 comp->impBlockListNodeFreeList = res->m_next;
17572 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
17574 node->m_next = impBlockListNodeFreeList;
17575 impBlockListNodeFreeList = node;
17578 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
17582 noway_assert(!fgComputePredsDone);
17583 if (!fgCheapPredsValid)
17585 fgComputeCheapPreds();
17588 BlockListNode* succCliqueToDo = nullptr;
17589 BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
17593 // Look at the successors of every member of the predecessor to-do list.
17594 while (predCliqueToDo != nullptr)
17596 BlockListNode* node = predCliqueToDo;
17597 predCliqueToDo = node->m_next;
17598 BasicBlock* blk = node->m_blk;
17599 FreeBlockListNode(node);
17601 const unsigned numSuccs = blk->NumSucc();
17602 for (unsigned succNum = 0; succNum < numSuccs; succNum++)
17604 BasicBlock* succ = blk->GetSucc(succNum);
17605 // If it's not already in the clique, add it, and also add it
17606 // as a member of the successor "toDo" set.
17607 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
17609 callback->Visit(SpillCliqueSucc, succ);
17610 impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
17611 succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
17616 // Look at the predecessors of every member of the successor to-do list.
17617 while (succCliqueToDo != nullptr)
17619 BlockListNode* node = succCliqueToDo;
17620 succCliqueToDo = node->m_next;
17621 BasicBlock* blk = node->m_blk;
17622 FreeBlockListNode(node);
17624 for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
17626 BasicBlock* predBlock = pred->block;
17627 // If it's not already in the clique, add it, and also add it
17628 // as a member of the predecessor "toDo" set.
17629 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
17631 callback->Visit(SpillCliquePred, predBlock);
17632 impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
17633 predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
17640 // If this fails, it means we didn't walk the spill clique properly and somehow managed
17641 // miss walking back to include the predecessor we started from.
17642 // This most likely cause: missing or out of date bbPreds
17643 assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
17646 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17648 if (predOrSucc == SpillCliqueSucc)
17650 assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
17651 blk->bbStkTempsIn = m_baseTmp;
17655 assert(predOrSucc == SpillCliquePred);
17656 assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
17657 blk->bbStkTempsOut = m_baseTmp;
17661 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17663 // For Preds we could be a little smarter and just find the existing store
17664 // and re-type it/add a cast, but that is complicated and hopefully very rare, so
17665 // just re-import the whole block (just like we do for successors)
17667 if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
17669 // If we haven't imported this block and we're not going to (because it isn't on
17670 // the pending list) then just ignore it for now.
17672 // This block has either never been imported (EntryState == NULL) or it failed
17673 // verification. Neither state requires us to force it to be imported now.
17674 assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
17678 // For successors we have a valid verCurrentState, so just mark them for reimport
17679 // the 'normal' way
17680 // Unlike predecessors, we *DO* need to reimport the current block because the
17681 // initial import had the wrong entry state types.
17682 // Similarly, blocks that are currently on the pending list, still need to call
17683 // impImportBlockPending to fixup their entry state.
17684 if (predOrSucc == SpillCliqueSucc)
17686 m_pComp->impReimportMarkBlock(blk);
17688 // Set the current stack state to that of the blk->bbEntryState
17689 m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
17690 assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
17692 m_pComp->impImportBlockPending(blk);
17694 else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
17696 // As described above, we are only visiting predecessors so they can
17697 // add the appropriate casts, since we have already done that for the current
17698 // block, it does not need to be reimported.
17699 // Nor do we need to reimport blocks that are still pending, but not yet
17702 // For predecessors, we have no state to seed the EntryState, so we just have
17703 // to assume the existing one is correct.
17704 // If the block is also a successor, it will get the EntryState properly
17705 // updated when it is visited as a successor in the above "if" block.
17706 assert(predOrSucc == SpillCliquePred);
17707 m_pComp->impReimportBlockPending(blk);
17711 // Re-type the incoming lclVar nodes to match the varDsc.
17712 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
17714 if (blk->bbEntryState != nullptr)
17716 EntryState* es = blk->bbEntryState;
17717 for (unsigned level = 0; level < es->esStackDepth; level++)
17719 GenTree* tree = es->esStack[level].val;
17720 if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
17722 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
17723 noway_assert(lclNum < lvaCount);
17724 LclVarDsc* varDsc = lvaTable + lclNum;
17725 es->esStack[level].val->gtType = varDsc->TypeGet();
17731 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
17733 if (block->bbStkTempsOut != NO_BASE_TMP)
17735 return block->bbStkTempsOut;
17741 printf("\n*************** In impGetSpillTmpBase(" FMT_BB ")\n", block->bbNum);
17745 // Otherwise, choose one, and propagate to all members of the spill clique.
17746 // Grab enough temps for the whole stack.
17747 unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
17748 SetSpillTempsBase callback(baseTmp);
17750 // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
17751 // to one spill clique, and similarly can only be the sucessor to one spill clique
17752 impWalkSpillCliqueFromPred(block, &callback);
17757 void Compiler::impReimportSpillClique(BasicBlock* block)
17762 printf("\n*************** In impReimportSpillClique(" FMT_BB ")\n", block->bbNum);
17766 // If we get here, it is because this block is already part of a spill clique
17767 // and one predecessor had an outgoing live stack slot of type int, and this
17768 // block has an outgoing live stack slot of type native int.
17769 // We need to reset these before traversal because they have already been set
17770 // by the previous walk to determine all the members of the spill clique.
17771 impInlineRoot()->impSpillCliquePredMembers.Reset();
17772 impInlineRoot()->impSpillCliqueSuccMembers.Reset();
17774 ReimportSpillClique callback(this);
17776 impWalkSpillCliqueFromPred(block, &callback);
17779 // Set the pre-state of "block" (which should not have a pre-state allocated) to
17780 // a copy of "srcState", cloning tree pointers as required.
17781 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
17783 if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
17785 block->bbEntryState = nullptr;
17789 block->bbEntryState = getAllocator(CMK_Unknown).allocate<EntryState>(1);
17791 // block->bbEntryState.esRefcount = 1;
17793 block->bbEntryState->esStackDepth = srcState->esStackDepth;
17794 block->bbEntryState->thisInitialized = TIS_Bottom;
17796 if (srcState->esStackDepth > 0)
17798 block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
17799 unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
17801 memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
17802 for (unsigned level = 0; level < srcState->esStackDepth; level++)
17804 GenTree* tree = srcState->esStack[level].val;
17805 block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
17809 if (verTrackObjCtorInitState)
17811 verSetThisInit(block, srcState->thisInitialized);
17817 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
17819 assert(tis != TIS_Bottom); // Precondition.
17820 if (block->bbEntryState == nullptr)
17822 block->bbEntryState = new (this, CMK_Unknown) EntryState();
17825 block->bbEntryState->thisInitialized = tis;
17829 * Resets the current state to the state at the start of the basic block
17831 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
17834 if (block->bbEntryState == nullptr)
17836 destState->esStackDepth = 0;
17837 destState->thisInitialized = TIS_Bottom;
17841 destState->esStackDepth = block->bbEntryState->esStackDepth;
17843 if (destState->esStackDepth > 0)
17845 unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
17847 memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
17850 destState->thisInitialized = block->bbThisOnEntry();
17855 ThisInitState BasicBlock::bbThisOnEntry()
17857 return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
17860 unsigned BasicBlock::bbStackDepthOnEntry()
17862 return (bbEntryState ? bbEntryState->esStackDepth : 0);
17865 void BasicBlock::bbSetStack(void* stackBuffer)
17867 assert(bbEntryState);
17868 assert(stackBuffer);
17869 bbEntryState->esStack = (StackEntry*)stackBuffer;
17872 StackEntry* BasicBlock::bbStackOnEntry()
17874 assert(bbEntryState);
17875 return bbEntryState->esStack;
17878 void Compiler::verInitCurrentState()
17880 verTrackObjCtorInitState = FALSE;
17881 verCurrentState.thisInitialized = TIS_Bottom;
17883 if (tiVerificationNeeded)
17885 // Track this ptr initialization
17886 if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
17888 verTrackObjCtorInitState = TRUE;
17889 verCurrentState.thisInitialized = TIS_Uninit;
17893 // initialize stack info
17895 verCurrentState.esStackDepth = 0;
17896 assert(verCurrentState.esStack != nullptr);
17898 // copy current state to entry state of first BB
17899 verInitBBEntryState(fgFirstBB, &verCurrentState);
17902 Compiler* Compiler::impInlineRoot()
17904 if (impInlineInfo == nullptr)
17910 return impInlineInfo->InlineRoot;
17914 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
17916 if (predOrSucc == SpillCliquePred)
17918 return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
17922 assert(predOrSucc == SpillCliqueSucc);
17923 return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
17927 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
17929 if (predOrSucc == SpillCliquePred)
17931 impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
17935 assert(predOrSucc == SpillCliqueSucc);
17936 impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
17940 /*****************************************************************************
17942 * Convert the instrs ("import") into our internal format (trees). The
17943 * basic flowgraph has already been constructed and is passed in.
17946 void Compiler::impImport(BasicBlock* method)
17951 printf("*************** In impImport() for %s\n", info.compFullName);
17955 Compiler* inlineRoot = impInlineRoot();
17957 if (info.compMaxStack <= SMALL_STACK_SIZE)
17959 impStkSize = SMALL_STACK_SIZE;
17963 impStkSize = info.compMaxStack;
17966 if (this == inlineRoot)
17968 // Allocate the stack contents
17969 verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
17973 // This is the inlinee compiler, steal the stack from the inliner compiler
17974 // (after ensuring that it is large enough).
17975 if (inlineRoot->impStkSize < impStkSize)
17977 inlineRoot->impStkSize = impStkSize;
17978 inlineRoot->verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
17981 verCurrentState.esStack = inlineRoot->verCurrentState.esStack;
17984 // initialize the entry state at start of method
17985 verInitCurrentState();
17987 // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
17988 if (this == inlineRoot) // These are only used on the root of the inlining tree.
17990 // We have initialized these previously, but to size 0. Make them larger.
17991 impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
17992 impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
17993 impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
17995 inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
17996 inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
17997 inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
17998 impBlockListNodeFreeList = nullptr;
18001 impLastILoffsStmt = nullptr;
18002 impNestedStackSpill = false;
18004 impBoxTemp = BAD_VAR_NUM;
18006 impPendingList = impPendingFree = nullptr;
18008 /* Add the entry-point to the worker-list */
18010 // Skip leading internal blocks. There can be one as a leading scratch BB, and more
18011 // from EH normalization.
18012 // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
18014 for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
18016 // Treat these as imported.
18017 assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
18018 JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", method->bbNum);
18019 method->bbFlags |= BBF_IMPORTED;
18022 impImportBlockPending(method);
18024 /* Import blocks in the worker-list until there are no more */
18026 while (impPendingList)
18028 /* Remove the entry at the front of the list */
18030 PendingDsc* dsc = impPendingList;
18031 impPendingList = impPendingList->pdNext;
18032 impSetPendingBlockMember(dsc->pdBB, 0);
18034 /* Restore the stack state */
18036 verCurrentState.thisInitialized = dsc->pdThisPtrInit;
18037 verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth;
18038 if (verCurrentState.esStackDepth)
18040 impRestoreStackState(&dsc->pdSavedStack);
18043 /* Add the entry to the free list for reuse */
18045 dsc->pdNext = impPendingFree;
18046 impPendingFree = dsc;
18048 /* Now import the block */
18050 if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
18053 #ifdef _TARGET_64BIT_
18054 // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
18055 // coupled with the JIT64 IL Verification logic. Look inside verHandleVerificationFailure
18056 // method for further explanation on why we raise this exception instead of making the jitted
18057 // code throw the verification exception during execution.
18058 if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
18060 BADCODE("Basic block marked as not verifiable");
18063 #endif // _TARGET_64BIT_
18065 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
18066 impEndTreeList(dsc->pdBB);
18071 impImportBlock(dsc->pdBB);
18073 if (compDonotInline())
18077 if (compIsForImportOnly() && !tiVerificationNeeded)
18085 if (verbose && info.compXcptnsCount)
18087 printf("\nAfter impImport() added block for try,catch,finally");
18088 fgDispBasicBlocks();
18092 // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
18093 for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
18095 block->bbFlags &= ~BBF_VISITED;
18099 assert(!compIsForInlining() || !tiVerificationNeeded);
18102 // Checks if a typeinfo (usually stored in the type stack) is a struct.
18103 // The invariant here is that if it's not a ref or a method and has a class handle
18104 // it's a valuetype
18105 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
18107 if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
18117 /*****************************************************************************
18118 * Check to see if the tree is the address of a local or
18119 the address of a field in a local.
18121 *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
18125 BOOL Compiler::impIsAddressInLocal(GenTree* tree, GenTree** lclVarTreeOut)
18127 if (tree->gtOper != GT_ADDR)
18132 GenTree* op = tree->gtOp.gtOp1;
18133 while (op->gtOper == GT_FIELD)
18135 op = op->gtField.gtFldObj;
18136 if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
18138 op = op->gtOp.gtOp1;
18146 if (op->gtOper == GT_LCL_VAR)
18148 *lclVarTreeOut = op;
18157 //------------------------------------------------------------------------
18158 // impMakeDiscretionaryInlineObservations: make observations that help
18159 // determine the profitability of a discretionary inline
18162 // pInlineInfo -- InlineInfo for the inline, or null for the prejit root
18163 // inlineResult -- InlineResult accumulating information about this inline
18166 // If inlining or prejitting the root, this method also makes
18167 // various observations about the method that factor into inline
18168 // decisions. It sets `compNativeSizeEstimate` as a side effect.
18170 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
18172 assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
18173 pInlineInfo == nullptr && !compIsForInlining() // Calculate the static inlining hint for ngen.
18176 // If we're really inlining, we should just have one result in play.
18177 assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
18179 // If this is a "forceinline" method, the JIT probably shouldn't have gone
18180 // to the trouble of estimating the native code size. Even if it did, it
18181 // shouldn't be relying on the result of this method.
18182 assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
18184 // Note if the caller contains NEWOBJ or NEWARR.
18185 Compiler* rootCompiler = impInlineRoot();
18187 if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
18189 inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
18192 if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
18194 inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
18197 bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0;
18198 bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
18200 if (isSpecialMethod)
18202 if (calleeIsStatic)
18204 inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
18208 inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
18211 else if (!calleeIsStatic)
18213 // Callee is an instance method.
18215 // Check if the callee has the same 'this' as the root.
18216 if (pInlineInfo != nullptr)
18218 GenTree* thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
18220 bool isSameThis = impIsThis(thisArg);
18221 inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
18225 // Note if the callee's class is a promotable struct
18226 if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
18228 assert(structPromotionHelper != nullptr);
18229 if (structPromotionHelper->CanPromoteStructType(info.compClassHnd))
18231 inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
18235 #ifdef FEATURE_SIMD
18237 // Note if this method is has SIMD args or return value
18238 if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
18240 inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
18243 #endif // FEATURE_SIMD
18245 // Roughly classify callsite frequency.
18246 InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
18248 // If this is a prejit root, or a maximally hot block...
18249 if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
18251 frequency = InlineCallsiteFrequency::HOT;
18253 // No training data. Look for loop-like things.
18254 // We consider a recursive call loop-like. Do not give the inlining boost to the method itself.
18255 // However, give it to things nearby.
18256 else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
18257 (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
18259 frequency = InlineCallsiteFrequency::LOOP;
18261 else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
18263 frequency = InlineCallsiteFrequency::WARM;
18265 // Now modify the multiplier based on where we're called from.
18266 else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
18268 frequency = InlineCallsiteFrequency::RARE;
18272 frequency = InlineCallsiteFrequency::BORING;
18275 // Also capture the block weight of the call site. In the prejit
18276 // root case, assume there's some hot call site for this method.
18277 unsigned weight = 0;
18279 if (pInlineInfo != nullptr)
18281 weight = pInlineInfo->iciBlock->bbWeight;
18285 weight = BB_MAX_WEIGHT;
18288 inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
18289 inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
18292 /*****************************************************************************
18293 This method makes STATIC inlining decision based on the IL code.
18294 It should not make any inlining decision based on the context.
18295 If forceInline is true, then the inlining decision should not depend on
18296 performance heuristics (code size, etc.).
18299 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
18300 CORINFO_METHOD_INFO* methInfo,
18302 InlineResult* inlineResult)
18304 unsigned codeSize = methInfo->ILCodeSize;
18306 // We shouldn't have made up our minds yet...
18307 assert(!inlineResult->IsDecided());
18309 if (methInfo->EHcount)
18311 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
18315 if ((methInfo->ILCode == nullptr) || (codeSize == 0))
18317 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
18321 // For now we don't inline varargs (import code can't handle it)
18323 if (methInfo->args.isVarArg())
18325 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
18329 // Reject if it has too many locals.
18330 // This is currently an implementation limit due to fixed-size arrays in the
18331 // inline info, rather than a performance heuristic.
18333 inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
18335 if (methInfo->locals.numArgs > MAX_INL_LCLS)
18337 inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
18341 // Make sure there aren't too many arguments.
18342 // This is currently an implementation limit due to fixed-size arrays in the
18343 // inline info, rather than a performance heuristic.
18345 inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
18347 if (methInfo->args.numArgs > MAX_INL_ARGS)
18349 inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
18353 // Note force inline state
18355 inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
18357 // Note IL code size
18359 inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
18361 if (inlineResult->IsFailure())
18366 // Make sure maxstack is not too big
18368 inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
18370 if (inlineResult->IsFailure())
18376 /*****************************************************************************
18379 void Compiler::impCheckCanInline(GenTree* call,
18380 CORINFO_METHOD_HANDLE fncHandle,
18382 CORINFO_CONTEXT_HANDLE exactContextHnd,
18383 InlineCandidateInfo** ppInlineCandidateInfo,
18384 InlineResult* inlineResult)
18386 // Either EE or JIT might throw exceptions below.
18387 // If that happens, just don't inline the method.
18393 CORINFO_METHOD_HANDLE fncHandle;
18395 CORINFO_CONTEXT_HANDLE exactContextHnd;
18396 InlineResult* result;
18397 InlineCandidateInfo** ppInlineCandidateInfo;
18399 memset(¶m, 0, sizeof(param));
18401 param.pThis = this;
18403 param.fncHandle = fncHandle;
18404 param.methAttr = methAttr;
18405 param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
18406 param.result = inlineResult;
18407 param.ppInlineCandidateInfo = ppInlineCandidateInfo;
18409 bool success = eeRunWithErrorTrap<Param>(
18410 [](Param* pParam) {
18411 DWORD dwRestrictions = 0;
18412 CorInfoInitClassResult initClassResult;
18415 const char* methodName;
18416 const char* className;
18417 methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
18419 if (JitConfig.JitNoInline())
18421 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
18426 /* Try to get the code address/size for the method */
18428 CORINFO_METHOD_INFO methInfo;
18429 if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
18431 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
18436 forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
18438 pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
18440 if (pParam->result->IsFailure())
18442 assert(pParam->result->IsNever());
18446 // Speculatively check if initClass() can be done.
18447 // If it can be done, we will try to inline the method. If inlining
18448 // succeeds, then we will do the non-speculative initClass() and commit it.
18449 // If this speculative call to initClass() fails, there is no point
18450 // trying to inline this method.
18452 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
18453 pParam->exactContextHnd /* context */,
18454 TRUE /* speculative */);
18456 if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
18458 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
18462 // Given the EE the final say in whether to inline or not.
18463 // This should be last since for verifiable code, this can be expensive
18465 /* VM Inline check also ensures that the method is verifiable if needed */
18466 CorInfoInline vmResult;
18467 vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
18470 if (vmResult == INLINE_FAIL)
18472 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
18474 else if (vmResult == INLINE_NEVER)
18476 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
18479 if (pParam->result->IsFailure())
18481 // Make sure not to report this one. It was already reported by the VM.
18482 pParam->result->SetReported();
18486 // check for unsupported inlining restrictions
18487 assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
18489 if (dwRestrictions & INLINE_SAME_THIS)
18491 GenTree* thisArg = pParam->call->gtCall.gtCallObjp;
18494 if (!pParam->pThis->impIsThis(thisArg))
18496 pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
18501 /* Get the method properties */
18503 CORINFO_CLASS_HANDLE clsHandle;
18504 clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
18506 clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
18508 /* Get the return type */
18510 var_types fncRetType;
18511 fncRetType = pParam->call->TypeGet();
18514 var_types fncRealRetType;
18515 fncRealRetType = JITtype2varType(methInfo.args.retType);
18517 assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
18518 // <BUGNUM> VSW 288602 </BUGNUM>
18519 // In case of IJW, we allow to assign a native pointer to a BYREF.
18520 (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
18521 (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
18525 // Allocate an InlineCandidateInfo structure
18527 InlineCandidateInfo* pInfo;
18528 pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
18530 pInfo->dwRestrictions = dwRestrictions;
18531 pInfo->methInfo = methInfo;
18532 pInfo->methAttr = pParam->methAttr;
18533 pInfo->clsHandle = clsHandle;
18534 pInfo->clsAttr = clsAttr;
18535 pInfo->fncRetType = fncRetType;
18536 pInfo->exactContextHnd = pParam->exactContextHnd;
18537 pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd;
18538 pInfo->initClassResult = initClassResult;
18539 pInfo->preexistingSpillTemp = BAD_VAR_NUM;
18541 *(pParam->ppInlineCandidateInfo) = pInfo;
18548 param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
18552 //------------------------------------------------------------------------
18553 // impInlineRecordArgInfo: record information about an inline candidate argument
18556 // pInlineInfo - inline info for the inline candidate
18557 // curArgVal - tree for the caller actual argument value
18558 // argNum - logical index of this argument
18559 // inlineResult - result of ongoing inline evaluation
18563 // Checks for various inline blocking conditions and makes notes in
18564 // the inline info arg table about the properties of the actual. These
18565 // properties are used later by impFetchArg to determine how best to
18566 // pass the argument into the inlinee.
18568 void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo,
18569 GenTree* curArgVal,
18571 InlineResult* inlineResult)
18573 InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
18575 if (curArgVal->gtOper == GT_MKREFANY)
18577 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
18581 inlCurArgInfo->argNode = curArgVal;
18583 GenTree* lclVarTree;
18584 if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
18586 inlCurArgInfo->argIsByRefToStructLocal = true;
18587 #ifdef FEATURE_SIMD
18588 if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
18590 pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
18592 #endif // FEATURE_SIMD
18595 if (curArgVal->gtFlags & GTF_ALL_EFFECT)
18597 inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
18598 inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
18601 if (curArgVal->gtOper == GT_LCL_VAR)
18603 inlCurArgInfo->argIsLclVar = true;
18605 /* Remember the "original" argument number */
18606 curArgVal->gtLclVar.gtLclILoffs = argNum;
18609 if ((curArgVal->OperKind() & GTK_CONST) ||
18610 ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
18612 inlCurArgInfo->argIsInvariant = true;
18613 if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
18615 // Abort inlining at this call site
18616 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
18621 // If the arg is a local that is address-taken, we can't safely
18622 // directly substitute it into the inlinee.
18624 // Previously we'd accomplish this by setting "argHasLdargaOp" but
18625 // that has a stronger meaning: that the arg value can change in
18626 // the method body. Using that flag prevents type propagation,
18627 // which is safe in this case.
18629 // Instead mark the arg as having a caller local ref.
18630 if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
18632 inlCurArgInfo->argHasCallerLocalRef = true;
18638 if (inlCurArgInfo->argIsThis)
18640 printf("thisArg:");
18644 printf("\nArgument #%u:", argNum);
18646 if (inlCurArgInfo->argIsLclVar)
18648 printf(" is a local var");
18650 if (inlCurArgInfo->argIsInvariant)
18652 printf(" is a constant");
18654 if (inlCurArgInfo->argHasGlobRef)
18656 printf(" has global refs");
18658 if (inlCurArgInfo->argHasCallerLocalRef)
18660 printf(" has caller local ref");
18662 if (inlCurArgInfo->argHasSideEff)
18664 printf(" has side effects");
18666 if (inlCurArgInfo->argHasLdargaOp)
18668 printf(" has ldarga effect");
18670 if (inlCurArgInfo->argHasStargOp)
18672 printf(" has starg effect");
18674 if (inlCurArgInfo->argIsByRefToStructLocal)
18676 printf(" is byref to a struct local");
18680 gtDispTree(curArgVal);
18686 //------------------------------------------------------------------------
18687 // impInlineInitVars: setup inline information for inlinee args and locals
18690 // pInlineInfo - inline info for the inline candidate
18693 // This method primarily adds caller-supplied info to the inlArgInfo
18694 // and sets up the lclVarInfo table.
18696 // For args, the inlArgInfo records properties of the actual argument
18697 // including the tree node that produces the arg value. This node is
18698 // usually the tree node present at the call, but may also differ in
18700 // - when the call arg is a GT_RET_EXPR, we search back through the ret
18701 // expr chain for the actual node. Note this will either be the original
18702 // call (which will be a failed inline by this point), or the return
18703 // expression from some set of inlines.
18704 // - when argument type casting is needed the necessary casts are added
18705 // around the argument node.
18706 // - if an argment can be simplified by folding then the node here is the
18709 // The method may make observations that lead to marking this candidate as
18710 // a failed inline. If this happens the initialization is abandoned immediately
18711 // to try and reduce the jit time cost for a failed inline.
18713 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
18715 assert(!compIsForInlining());
18717 GenTree* call = pInlineInfo->iciCall;
18718 CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo;
18719 unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr;
18720 InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo;
18721 InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo;
18722 InlineResult* inlineResult = pInlineInfo->inlineResult;
18724 const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
18726 /* init the argument stuct */
18728 memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
18730 /* Get hold of the 'this' pointer and the argument list proper */
18732 GenTree* thisArg = call->gtCall.gtCallObjp;
18733 GenTree* argList = call->gtCall.gtCallArgs;
18734 unsigned argCnt = 0; // Count of the arguments
18736 assert((methInfo->args.hasThis()) == (thisArg != nullptr));
18740 inlArgInfo[0].argIsThis = true;
18741 GenTree* actualThisArg = thisArg->gtRetExprVal();
18742 impInlineRecordArgInfo(pInlineInfo, actualThisArg, argCnt, inlineResult);
18744 if (inlineResult->IsFailure())
18749 /* Increment the argument count */
18753 /* Record some information about each of the arguments */
18754 bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
18756 #if USER_ARGS_COME_LAST
18757 unsigned typeCtxtArg = thisArg ? 1 : 0;
18758 #else // USER_ARGS_COME_LAST
18759 unsigned typeCtxtArg = methInfo->args.totalILArgs();
18760 #endif // USER_ARGS_COME_LAST
18762 for (GenTree* argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
18764 if (argTmp == argList && hasRetBuffArg)
18769 // Ignore the type context argument
18770 if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
18772 pInlineInfo->typeContextArg = typeCtxtArg;
18773 typeCtxtArg = 0xFFFFFFFF;
18777 assert(argTmp->gtOper == GT_LIST);
18778 GenTree* arg = argTmp->gtOp.gtOp1;
18779 GenTree* actualArg = arg->gtRetExprVal();
18780 impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult);
18782 if (inlineResult->IsFailure())
18787 /* Increment the argument count */
18791 /* Make sure we got the arg number right */
18792 assert(argCnt == methInfo->args.totalILArgs());
18794 #ifdef FEATURE_SIMD
18795 bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
18796 #endif // FEATURE_SIMD
18798 /* We have typeless opcodes, get type information from the signature */
18804 if (clsAttr & CORINFO_FLG_VALUECLASS)
18806 sigType = TYP_BYREF;
18813 lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
18814 lclVarInfo[0].lclHasLdlocaOp = false;
18816 #ifdef FEATURE_SIMD
18817 // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
18818 // the inlining multiplier) for anything in that assembly.
18819 // But we only need to normalize it if it is a TYP_STRUCT
18820 // (which we need to do even if we have already set foundSIMDType).
18821 if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
18823 if (sigType == TYP_STRUCT)
18825 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
18827 foundSIMDType = true;
18829 #endif // FEATURE_SIMD
18830 lclVarInfo[0].lclTypeInfo = sigType;
18832 assert(varTypeIsGC(thisArg->gtType) || // "this" is managed
18833 (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
18834 (clsAttr & CORINFO_FLG_VALUECLASS)));
18836 if (genActualType(thisArg->gtType) != genActualType(sigType))
18838 if (sigType == TYP_REF)
18840 /* The argument cannot be bashed into a ref (see bug 750871) */
18841 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
18845 /* This can only happen with byrefs <-> ints/shorts */
18847 assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
18848 assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
18850 if (sigType == TYP_BYREF)
18852 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18854 else if (thisArg->gtType == TYP_BYREF)
18856 assert(sigType == TYP_I_IMPL);
18858 /* If possible change the BYREF to an int */
18859 if (thisArg->IsVarAddr())
18861 thisArg->gtType = TYP_I_IMPL;
18862 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18866 /* Arguments 'int <- byref' cannot be bashed */
18867 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18874 /* Init the types of the arguments and make sure the types
18875 * from the trees match the types in the signature */
18877 CORINFO_ARG_LIST_HANDLE argLst;
18878 argLst = methInfo->args.args;
18881 for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
18883 var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
18885 lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
18887 #ifdef FEATURE_SIMD
18888 if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
18890 // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
18891 // found a SIMD type, even if this may not be a type we recognize (the assumption is that
18892 // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
18893 foundSIMDType = true;
18894 if (sigType == TYP_STRUCT)
18896 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
18897 sigType = structType;
18900 #endif // FEATURE_SIMD
18902 lclVarInfo[i].lclTypeInfo = sigType;
18903 lclVarInfo[i].lclHasLdlocaOp = false;
18905 /* Does the tree type match the signature type? */
18907 GenTree* inlArgNode = inlArgInfo[i].argNode;
18909 if (sigType != inlArgNode->gtType)
18911 /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
18912 but in bad IL cases with caller-callee signature mismatches we can see other types.
18913 Intentionally reject cases with mismatches so the jit is more flexible when
18914 encountering bad IL. */
18916 bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
18917 (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
18918 (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
18920 if (!isPlausibleTypeMatch)
18922 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
18926 /* Is it a narrowing or widening cast?
18927 * Widening casts are ok since the value computed is already
18928 * normalized to an int (on the IL stack) */
18930 if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
18932 if (sigType == TYP_BYREF)
18934 lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18936 else if (inlArgNode->gtType == TYP_BYREF)
18938 assert(varTypeIsIntOrI(sigType));
18940 /* If possible bash the BYREF to an int */
18941 if (inlArgNode->IsVarAddr())
18943 inlArgNode->gtType = TYP_I_IMPL;
18944 lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18948 /* Arguments 'int <- byref' cannot be changed */
18949 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18953 else if (genTypeSize(sigType) < EA_PTRSIZE)
18955 /* Narrowing cast */
18957 if (inlArgNode->gtOper == GT_LCL_VAR &&
18958 !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
18959 sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
18961 /* We don't need to insert a cast here as the variable
18962 was assigned a normalized value of the right type */
18967 inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType);
18969 inlArgInfo[i].argIsLclVar = false;
18971 /* Try to fold the node in case we have constant arguments */
18973 if (inlArgInfo[i].argIsInvariant)
18975 inlArgNode = gtFoldExprConst(inlArgNode);
18976 inlArgInfo[i].argNode = inlArgNode;
18977 assert(inlArgNode->OperIsConst());
18980 #ifdef _TARGET_64BIT_
18981 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
18983 // This should only happen for int -> native int widening
18984 inlArgNode = inlArgInfo[i].argNode =
18985 gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType);
18987 inlArgInfo[i].argIsLclVar = false;
18989 /* Try to fold the node in case we have constant arguments */
18991 if (inlArgInfo[i].argIsInvariant)
18993 inlArgNode = gtFoldExprConst(inlArgNode);
18994 inlArgInfo[i].argNode = inlArgNode;
18995 assert(inlArgNode->OperIsConst());
18998 #endif // _TARGET_64BIT_
19003 /* Init the types of the local variables */
19005 CORINFO_ARG_LIST_HANDLE localsSig;
19006 localsSig = methInfo->locals.args;
19008 for (i = 0; i < methInfo->locals.numArgs; i++)
19011 var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
19013 lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
19014 lclVarInfo[i + argCnt].lclIsPinned = isPinned;
19015 lclVarInfo[i + argCnt].lclTypeInfo = type;
19017 if (varTypeIsGC(type))
19019 pInlineInfo->numberOfGcRefLocals++;
19024 // Pinned locals may cause inlines to fail.
19025 inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
19026 if (inlineResult->IsFailure())
19032 lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
19034 // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
19035 // out on the inline.
19036 if (type == TYP_STRUCT)
19038 CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
19039 DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
19040 if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
19042 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
19043 if (inlineResult->IsFailure())
19048 // Do further notification in the case where the call site is rare; some policies do
19049 // not track the relative hotness of call sites for "always" inline cases.
19050 if (pInlineInfo->iciBlock->isRunRarely())
19052 inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
19053 if (inlineResult->IsFailure())
19062 localsSig = info.compCompHnd->getArgNext(localsSig);
19064 #ifdef FEATURE_SIMD
19065 if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
19067 foundSIMDType = true;
19068 if (featureSIMD && type == TYP_STRUCT)
19070 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
19071 lclVarInfo[i + argCnt].lclTypeInfo = structType;
19074 #endif // FEATURE_SIMD
19077 #ifdef FEATURE_SIMD
19078 if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd))
19080 foundSIMDType = true;
19082 pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
19083 #endif // FEATURE_SIMD
19086 //------------------------------------------------------------------------
19087 // impInlineFetchLocal: get a local var that represents an inlinee local
19090 // lclNum -- number of the inlinee local
19091 // reason -- debug string describing purpose of the local var
19094 // Number of the local to use
19097 // This method is invoked only for locals actually used in the
19100 // Allocates a new temp if necessary, and copies key properties
19101 // over from the inlinee local var info.
19103 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
19105 assert(compIsForInlining());
19107 unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
19109 if (tmpNum == BAD_VAR_NUM)
19111 const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt];
19112 const var_types lclTyp = inlineeLocal.lclTypeInfo;
19114 // The lifetime of this local might span multiple BBs.
19115 // So it is a long lifetime local.
19116 impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
19118 // Copy over key info
19119 lvaTable[tmpNum].lvType = lclTyp;
19120 lvaTable[tmpNum].lvHasLdAddrOp = inlineeLocal.lclHasLdlocaOp;
19121 lvaTable[tmpNum].lvPinned = inlineeLocal.lclIsPinned;
19122 lvaTable[tmpNum].lvHasILStoreOp = inlineeLocal.lclHasStlocOp;
19123 lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp;
19125 // Copy over class handle for ref types. Note this may be a
19126 // shared type -- someday perhaps we can get the exact
19127 // signature and pass in a more precise type.
19128 if (lclTyp == TYP_REF)
19130 assert(lvaTable[tmpNum].lvSingleDef == 0);
19132 lvaTable[tmpNum].lvSingleDef = !inlineeLocal.lclHasMultipleStlocOp && !inlineeLocal.lclHasLdlocaOp;
19133 if (lvaTable[tmpNum].lvSingleDef)
19135 JITDUMP("Marked V%02u as a single def temp\n", tmpNum);
19138 lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef());
19141 if (inlineeLocal.lclVerTypeInfo.IsStruct())
19143 if (varTypeIsStruct(lclTyp))
19145 lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
19149 // This is a wrapped primitive. Make sure the verstate knows that
19150 lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo;
19155 // Sanity check that we're properly prepared for gc ref locals.
19156 if (varTypeIsGC(lclTyp))
19158 // Since there are gc locals we should have seen them earlier
19159 // and if there was a return value, set up the spill temp.
19160 assert(impInlineInfo->HasGcRefLocals());
19161 assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp());
19165 // Make sure all pinned locals count as gc refs.
19166 assert(!inlineeLocal.lclIsPinned);
19174 //------------------------------------------------------------------------
19175 // impInlineFetchArg: return tree node for argument value in an inlinee
19178 // lclNum -- argument number in inlinee IL
19179 // inlArgInfo -- argument info for inlinee
19180 // lclVarInfo -- var info for inlinee
19183 // Tree for the argument's value. Often an inlinee-scoped temp
19184 // GT_LCL_VAR but can be other tree kinds, if the argument
19185 // expression from the caller can be directly substituted into the
19189 // Must be used only for arguments -- use impInlineFetchLocal for
19192 // Direct substitution is performed when the formal argument cannot
19193 // change value in the inlinee body (no starg or ldarga), and the
19194 // actual argument expression's value cannot be changed if it is
19195 // substituted it into the inlinee body.
19197 // Even if an inlinee-scoped temp is returned here, it may later be
19198 // "bashed" to a caller-supplied tree when arguments are actually
19199 // passed (see fgInlinePrependStatements). Bashing can happen if
19200 // the argument ends up being single use and other conditions are
19201 // met. So the contents of the tree returned here may not end up
19202 // being the ones ultimately used for the argument.
19204 // This method will side effect inlArgInfo. It should only be called
19205 // for actual uses of the argument in the inlinee.
19207 GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
19209 // Cache the relevant arg and lcl info for this argument.
19210 // We will modify argInfo but not lclVarInfo.
19211 InlArgInfo& argInfo = inlArgInfo[lclNum];
19212 const InlLclVarInfo& lclInfo = lclVarInfo[lclNum];
19213 const bool argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp;
19214 const var_types lclTyp = lclInfo.lclTypeInfo;
19215 GenTree* op1 = nullptr;
19217 if (argInfo.argIsInvariant && !argCanBeModified)
19219 // Directly substitute constants or addresses of locals
19221 // Clone the constant. Note that we cannot directly use
19222 // argNode in the trees even if !argInfo.argIsUsed as this
19223 // would introduce aliasing between inlArgInfo[].argNode and
19224 // impInlineExpr. Then gtFoldExpr() could change it, causing
19225 // further references to the argument working off of the
19227 op1 = gtCloneExpr(argInfo.argNode);
19228 PREFIX_ASSUME(op1 != nullptr);
19229 argInfo.argTmpNum = BAD_VAR_NUM;
19231 // We may need to retype to ensure we match the callee's view of the type.
19232 // Otherwise callee-pass throughs of arguments can create return type
19233 // mismatches that block inlining.
19235 // Note argument type mismatches that prevent inlining should
19236 // have been caught in impInlineInitVars.
19237 if (op1->TypeGet() != lclTyp)
19239 op1->gtType = genActualType(lclTyp);
19242 else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef)
19244 // Directly substitute unaliased caller locals for args that cannot be modified
19246 // Use the caller-supplied node if this is the first use.
19247 op1 = argInfo.argNode;
19248 argInfo.argTmpNum = op1->gtLclVarCommon.gtLclNum;
19250 // Use an equivalent copy if this is the second or subsequent
19251 // use, or if we need to retype.
19253 // Note argument type mismatches that prevent inlining should
19254 // have been caught in impInlineInitVars.
19255 if (argInfo.argIsUsed || (op1->TypeGet() != lclTyp))
19257 assert(op1->gtOper == GT_LCL_VAR);
19258 assert(lclNum == op1->gtLclVar.gtLclILoffs);
19260 var_types newTyp = lclTyp;
19262 if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
19264 newTyp = genActualType(lclTyp);
19267 // Create a new lcl var node - remember the argument lclNum
19268 op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, newTyp, op1->gtLclVar.gtLclILoffs);
19271 else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp)
19273 /* Argument is a by-ref address to a struct, a normed struct, or its field.
19274 In these cases, don't spill the byref to a local, simply clone the tree and use it.
19275 This way we will increase the chance for this byref to be optimized away by
19276 a subsequent "dereference" operation.
19278 From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
19279 (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
19280 For example, if the caller is:
19281 ldloca.s V_1 // V_1 is a local struct
19282 call void Test.ILPart::RunLdargaOnPointerArg(int32*)
19283 and the callee being inlined has:
19284 .method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed
19286 call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
19287 then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
19288 soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
19290 assert(argInfo.argNode->TypeGet() == TYP_BYREF || argInfo.argNode->TypeGet() == TYP_I_IMPL);
19291 op1 = gtCloneExpr(argInfo.argNode);
19295 /* Argument is a complex expression - it must be evaluated into a temp */
19297 if (argInfo.argHasTmp)
19299 assert(argInfo.argIsUsed);
19300 assert(argInfo.argTmpNum < lvaCount);
19302 /* Create a new lcl var node - remember the argument lclNum */
19303 op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp));
19305 /* This is the second or later use of the this argument,
19306 so we have to use the temp (instead of the actual arg) */
19307 argInfo.argBashTmpNode = nullptr;
19311 /* First time use */
19312 assert(!argInfo.argIsUsed);
19314 /* Reserve a temp for the expression.
19315 * Use a large size node as we may change it later */
19317 const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
19319 lvaTable[tmpNum].lvType = lclTyp;
19321 // For ref types, determine the type of the temp.
19322 if (lclTyp == TYP_REF)
19324 if (!argCanBeModified)
19326 // If the arg can't be modified in the method
19327 // body, use the type of the value, if
19328 // known. Otherwise, use the declared type.
19329 assert(lvaTable[tmpNum].lvSingleDef == 0);
19330 lvaTable[tmpNum].lvSingleDef = 1;
19331 JITDUMP("Marked V%02u as a single def temp\n", tmpNum);
19332 lvaSetClass(tmpNum, argInfo.argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
19336 // Arg might be modified, use the declared type of
19338 lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
19342 assert(lvaTable[tmpNum].lvAddrExposed == 0);
19343 if (argInfo.argHasLdargaOp)
19345 lvaTable[tmpNum].lvHasLdAddrOp = 1;
19348 if (lclInfo.lclVerTypeInfo.IsStruct())
19350 if (varTypeIsStruct(lclTyp))
19352 lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
19353 if (info.compIsVarArgs)
19355 lvaSetStructUsedAsVarArg(tmpNum);
19360 // This is a wrapped primitive. Make sure the verstate knows that
19361 lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo;
19365 argInfo.argHasTmp = true;
19366 argInfo.argTmpNum = tmpNum;
19368 // If we require strict exception order, then arguments must
19369 // be evaluated in sequence before the body of the inlined method.
19370 // So we need to evaluate them to a temp.
19371 // Also, if arguments have global or local references, we need to
19372 // evaluate them to a temp before the inlined body as the
19373 // inlined body may be modifying the global ref.
19374 // TODO-1stClassStructs: We currently do not reuse an existing lclVar
19375 // if it is a struct, because it requires some additional handling.
19377 if (!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef &&
19378 !argInfo.argHasCallerLocalRef)
19380 /* Get a *LARGE* LCL_VAR node */
19381 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
19383 /* Record op1 as the very first use of this argument.
19384 If there are no further uses of the arg, we may be
19385 able to use the actual arg node instead of the temp.
19386 If we do see any further uses, we will clear this. */
19387 argInfo.argBashTmpNode = op1;
19391 /* Get a small LCL_VAR node */
19392 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
19393 /* No bashing of this argument */
19394 argInfo.argBashTmpNode = nullptr;
19399 // Mark this argument as used.
19400 argInfo.argIsUsed = true;
19405 /******************************************************************************
19406 Is this the original "this" argument to the call being inlined?
19408 Note that we do not inline methods with "starg 0", and so we do not need to
19412 BOOL Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo)
19414 assert(compIsForInlining());
19415 return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
19418 //-----------------------------------------------------------------------------
19419 // This function checks if a dereference in the inlinee can guarantee that
19420 // the "this" is non-NULL.
19421 // If we haven't hit a branch or a side effect, and we are dereferencing
19422 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
19423 // then we can avoid a separate null pointer check.
19425 // "additionalTreesToBeEvaluatedBefore"
19426 // is the set of pending trees that have not yet been added to the statement list,
19427 // and which have been removed from verCurrentState.esStack[]
19429 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTreesToBeEvaluatedBefore,
19430 GenTree* variableBeingDereferenced,
19431 InlArgInfo* inlArgInfo)
19433 assert(compIsForInlining());
19434 assert(opts.OptEnabled(CLFLG_INLINING));
19436 BasicBlock* block = compCurBB;
19441 if (block != fgFirstBB)
19446 if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
19451 if (additionalTreesToBeEvaluatedBefore &&
19452 GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
19457 for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
19459 expr = stmt->gtStmt.gtStmtExpr;
19461 if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
19467 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
19469 unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
19470 if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
19479 //------------------------------------------------------------------------
19480 // impMarkInlineCandidate: determine if this call can be subsequently inlined
19483 // callNode -- call under scrutiny
19484 // exactContextHnd -- context handle for inlining
19485 // exactContextNeedsRuntimeLookup -- true if context required runtime lookup
19486 // callInfo -- call info from VM
19489 // If callNode is an inline candidate, this method sets the flag
19490 // GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have
19491 // filled in the associated InlineCandidateInfo.
19493 // If callNode is not an inline candidate, and the reason is
19494 // something that is inherent to the method being called, the
19495 // method may be marked as "noinline" to short-circuit any
19496 // future assessments of calls to this method.
19498 void Compiler::impMarkInlineCandidate(GenTree* callNode,
19499 CORINFO_CONTEXT_HANDLE exactContextHnd,
19500 bool exactContextNeedsRuntimeLookup,
19501 CORINFO_CALL_INFO* callInfo)
19503 // Let the strategy know there's another call
19504 impInlineRoot()->m_inlineStrategy->NoteCall();
19506 if (!opts.OptEnabled(CLFLG_INLINING))
19508 /* XXX Mon 8/18/2008
19509 * This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before
19510 * calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and
19511 * CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and
19512 * figure out why we did not set MAXOPT for this compile.
19514 assert(!compIsForInlining());
19518 if (compIsForImportOnly())
19520 // Don't bother creating the inline candidate during verification.
19521 // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
19522 // that leads to the creation of multiple instances of Compiler.
19526 GenTreeCall* call = callNode->AsCall();
19527 InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
19529 // Don't inline if not optimizing root method
19530 if (opts.compDbgCode)
19532 inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
19536 // Don't inline if inlining into root method is disabled.
19537 if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
19539 inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
19543 // Inlining candidate determination needs to honor only IL tail prefix.
19544 // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
19545 if (call->IsTailPrefixedCall())
19547 inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
19551 // Tail recursion elimination takes precedence over inlining.
19552 // TODO: We may want to do some of the additional checks from fgMorphCall
19553 // here to reduce the chance we don't inline a call that won't be optimized
19554 // as a fast tail call or turned into a loop.
19555 if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
19557 inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
19561 if (call->IsVirtual())
19563 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
19567 /* Ignore helper calls */
19569 if (call->gtCallType == CT_HELPER)
19571 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
19575 /* Ignore indirect calls */
19576 if (call->gtCallType == CT_INDIRECT)
19578 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
19582 /* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less
19583 * restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding
19584 * inlining in throw blocks. I should consider the same thing for catch and filter regions. */
19586 CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
19589 // Reuse method flags from the original callInfo if possible
19590 if (fncHandle == callInfo->hMethod)
19592 methAttr = callInfo->methodFlags;
19596 methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
19600 if (compStressCompile(STRESS_FORCE_INLINE, 0))
19602 methAttr |= CORINFO_FLG_FORCEINLINE;
19606 // Check for COMPlus_AggressiveInlining
19607 if (compDoAggressiveInlining)
19609 methAttr |= CORINFO_FLG_FORCEINLINE;
19612 if (!(methAttr & CORINFO_FLG_FORCEINLINE))
19614 /* Don't bother inline blocks that are in the filter region */
19615 if (bbInCatchHandlerILRange(compCurBB))
19620 printf("\nWill not inline blocks that are in the catch handler region\n");
19625 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
19629 if (bbInFilterILRange(compCurBB))
19634 printf("\nWill not inline blocks that are in the filter region\n");
19638 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
19643 /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
19645 if (opts.compNeedSecurityCheck)
19647 inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
19651 /* Check if we tried to inline this method before */
19653 if (methAttr & CORINFO_FLG_DONT_INLINE)
19655 inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
19659 /* Cannot inline synchronized methods */
19661 if (methAttr & CORINFO_FLG_SYNCH)
19663 inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
19667 /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
19669 if (methAttr & CORINFO_FLG_SECURITYCHECK)
19671 inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
19675 /* Check legality of PInvoke callsite (for inlining of marshalling code) */
19677 if (methAttr & CORINFO_FLG_PINVOKE)
19679 // See comment in impCheckForPInvokeCall
19680 BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
19681 if (!impCanPInvokeInlineCallSite(block))
19683 inlineResult.NoteFatal(InlineObservation::CALLSITE_PINVOKE_EH);
19688 InlineCandidateInfo* inlineCandidateInfo = nullptr;
19689 impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
19691 if (inlineResult.IsFailure())
19696 // The old value should be NULL
19697 assert(call->gtInlineCandidateInfo == nullptr);
19699 // The new value should not be NULL.
19700 assert(inlineCandidateInfo != nullptr);
19701 inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup;
19703 call->gtInlineCandidateInfo = inlineCandidateInfo;
19705 // Mark the call node as inline candidate.
19706 call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
19708 // Let the strategy know there's another candidate.
19709 impInlineRoot()->m_inlineStrategy->NoteCandidate();
19711 // Since we're not actually inlining yet, and this call site is
19712 // still just an inline candidate, there's nothing to report.
19713 inlineResult.SetReported();
19716 /******************************************************************************/
19717 // Returns true if the given intrinsic will be implemented by target-specific
19720 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
19722 #if defined(_TARGET_XARCH_)
19723 switch (intrinsicId)
19725 // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1
19726 // instructions to directly compute round/ceiling/floor.
19728 // TODO: Because the x86 backend only targets SSE for floating-point code,
19729 // it does not treat Sine, Cosine, or Round as intrinsics (JIT32
19730 // implemented those intrinsics as x87 instructions). If this poses
19731 // a CQ problem, it may be necessary to change the implementation of
19732 // the helper calls to decrease call overhead or switch back to the
19733 // x87 instructions. This is tracked by #7097.
19734 case CORINFO_INTRINSIC_Sqrt:
19735 case CORINFO_INTRINSIC_Abs:
19738 case CORINFO_INTRINSIC_Round:
19739 case CORINFO_INTRINSIC_Ceiling:
19740 case CORINFO_INTRINSIC_Floor:
19741 return compSupports(InstructionSet_SSE41);
19746 #elif defined(_TARGET_ARM64_)
19747 switch (intrinsicId)
19749 case CORINFO_INTRINSIC_Sqrt:
19750 case CORINFO_INTRINSIC_Abs:
19751 case CORINFO_INTRINSIC_Round:
19752 case CORINFO_INTRINSIC_Floor:
19753 case CORINFO_INTRINSIC_Ceiling:
19759 #elif defined(_TARGET_ARM_)
19760 switch (intrinsicId)
19762 case CORINFO_INTRINSIC_Sqrt:
19763 case CORINFO_INTRINSIC_Abs:
19764 case CORINFO_INTRINSIC_Round:
19771 // TODO: This portion of logic is not implemented for other arch.
19772 // The reason for returning true is that on all other arch the only intrinsic
19773 // enabled are target intrinsics.
19778 /******************************************************************************/
19779 // Returns true if the given intrinsic will be implemented by calling System.Math
19782 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
19784 // Currently, if a math intrinsic is not implemented by target-specific
19785 // instructions, it will be implemented by a System.Math call. In the
19786 // future, if we turn to implementing some of them with helper calls,
19787 // this predicate needs to be revisited.
19788 return !IsTargetIntrinsic(intrinsicId);
19791 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
19793 switch (intrinsicId)
19795 case CORINFO_INTRINSIC_Sin:
19796 case CORINFO_INTRINSIC_Cbrt:
19797 case CORINFO_INTRINSIC_Sqrt:
19798 case CORINFO_INTRINSIC_Abs:
19799 case CORINFO_INTRINSIC_Cos:
19800 case CORINFO_INTRINSIC_Round:
19801 case CORINFO_INTRINSIC_Cosh:
19802 case CORINFO_INTRINSIC_Sinh:
19803 case CORINFO_INTRINSIC_Tan:
19804 case CORINFO_INTRINSIC_Tanh:
19805 case CORINFO_INTRINSIC_Asin:
19806 case CORINFO_INTRINSIC_Asinh:
19807 case CORINFO_INTRINSIC_Acos:
19808 case CORINFO_INTRINSIC_Acosh:
19809 case CORINFO_INTRINSIC_Atan:
19810 case CORINFO_INTRINSIC_Atan2:
19811 case CORINFO_INTRINSIC_Atanh:
19812 case CORINFO_INTRINSIC_Log10:
19813 case CORINFO_INTRINSIC_Pow:
19814 case CORINFO_INTRINSIC_Exp:
19815 case CORINFO_INTRINSIC_Ceiling:
19816 case CORINFO_INTRINSIC_Floor:
19823 bool Compiler::IsMathIntrinsic(GenTree* tree)
19825 return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
19828 //------------------------------------------------------------------------
19829 // impDevirtualizeCall: Attempt to change a virtual vtable call into a
19833 // call -- the call node to examine/modify
19834 // method -- [IN/OUT] the method handle for call. Updated iff call devirtualized.
19835 // methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized.
19836 // contextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized.
19837 // exactContextHnd -- [OUT] updated context handle iff call devirtualized
19840 // Virtual calls in IL will always "invoke" the base class method.
19842 // This transformation looks for evidence that the type of 'this'
19843 // in the call is exactly known, is a final class or would invoke
19844 // a final method, and if that and other safety checks pan out,
19845 // modifies the call and the call info to create a direct call.
19847 // This transformation is initially done in the importer and not
19848 // in some subsequent optimization pass because we want it to be
19849 // upstream of inline candidate identification.
19851 // However, later phases may supply improved type information that
19852 // can enable further devirtualization. We currently reinvoke this
19853 // code after inlining, if the return value of the inlined call is
19854 // the 'this obj' of a subsequent virtual call.
19856 // If devirtualization succeeds and the call's this object is the
19857 // result of a box, the jit will ask the EE for the unboxed entry
19858 // point. If this exists, the jit will see if it can rework the box
19859 // to instead make a local copy. If that is doable, the call is
19860 // updated to invoke the unboxed entry on the local copy.
19862 void Compiler::impDevirtualizeCall(GenTreeCall* call,
19863 CORINFO_METHOD_HANDLE* method,
19864 unsigned* methodFlags,
19865 CORINFO_CONTEXT_HANDLE* contextHandle,
19866 CORINFO_CONTEXT_HANDLE* exactContextHandle)
19868 assert(call != nullptr);
19869 assert(method != nullptr);
19870 assert(methodFlags != nullptr);
19871 assert(contextHandle != nullptr);
19873 // This should be a virtual vtable or virtual stub call.
19874 assert(call->IsVirtual());
19876 // Bail if not optimizing
19877 if (opts.MinOpts())
19882 // Bail if debuggable codegen
19883 if (opts.compDbgCode)
19889 // Bail if devirt is disabled.
19890 if (JitConfig.JitEnableDevirtualization() == 0)
19895 const bool doPrint = JitConfig.JitPrintDevirtualizedMethods() == 1;
19898 // Fetch information about the virtual method we're calling.
19899 CORINFO_METHOD_HANDLE baseMethod = *method;
19900 unsigned baseMethodAttribs = *methodFlags;
19902 if (baseMethodAttribs == 0)
19904 // For late devirt we may not have method attributes, so fetch them.
19905 baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19910 // Validate that callInfo has up to date method flags
19911 const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19913 // All the base method attributes should agree, save that
19914 // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1
19915 // because of concurrent jitting activity.
19917 // Note we don't look at this particular flag bit below, and
19918 // later on (if we do try and inline) we will rediscover why
19919 // the method can't be inlined, so there's no danger here in
19920 // seeing this particular flag bit in different states between
19921 // the cached and fresh values.
19922 if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE))
19924 assert(!"mismatched method attributes");
19929 // In R2R mode, we might see virtual stub calls to
19930 // non-virtuals. For instance cases where the non-virtual method
19931 // is in a different assembly but is called via CALLVIRT. For
19932 // verison resilience we must allow for the fact that the method
19933 // might become virtual in some update.
19935 // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a
19936 // regular call+nullcheck upstream, so we won't reach this
19938 if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0)
19940 assert(call->IsVirtualStub());
19941 assert(opts.IsReadyToRun());
19942 JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n");
19946 // See what we know about the type of 'this' in the call.
19947 GenTree* thisObj = call->gtCallObjp->gtEffectiveVal(false);
19948 GenTree* actualThisObj = nullptr;
19949 bool isExact = false;
19950 bool objIsNonNull = false;
19951 CORINFO_CLASS_HANDLE objClass = gtGetClassHandle(thisObj, &isExact, &objIsNonNull);
19953 // See if we have special knowlege that can get us a type or a better type.
19954 if ((objClass == nullptr) || !isExact)
19956 // Walk back through any return expression placeholders
19957 actualThisObj = thisObj->gtRetExprVal();
19959 // See if we landed on a call to a special intrinsic method
19960 if (actualThisObj->IsCall())
19962 GenTreeCall* thisObjCall = actualThisObj->AsCall();
19963 if ((thisObjCall->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
19965 assert(thisObjCall->gtCallType == CT_USER_FUNC);
19966 CORINFO_METHOD_HANDLE specialIntrinsicHandle = thisObjCall->gtCallMethHnd;
19967 CORINFO_CLASS_HANDLE specialObjClass = impGetSpecialIntrinsicExactReturnType(specialIntrinsicHandle);
19968 if (specialObjClass != nullptr)
19970 objClass = specialObjClass;
19972 objIsNonNull = true;
19978 // Bail if we know nothing.
19979 if (objClass == nullptr)
19981 JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet()));
19985 // Fetch information about the class that introduced the virtual method.
19986 CORINFO_CLASS_HANDLE baseClass = info.compCompHnd->getMethodClass(baseMethod);
19987 const DWORD baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass);
19989 #if !defined(FEATURE_CORECLR)
19990 // If base class is not beforefieldinit then devirtualizing may
19991 // cause us to miss a base class init trigger. Spec says we don't
19992 // need a trigger for ref class callvirts but desktop seems to
19993 // have one anyways. So defer.
19994 if ((baseClassAttribs & CORINFO_FLG_BEFOREFIELDINIT) == 0)
19996 JITDUMP("\nimpDevirtualizeCall: base class has precise initialization, sorry\n");
19999 #endif // FEATURE_CORECLR
20001 // Is the call an interface call?
20002 const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0;
20004 // If the objClass is sealed (final), then we may be able to devirtualize.
20005 const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass);
20006 const bool objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0;
20009 const char* callKind = isInterface ? "interface" : "virtual";
20010 const char* objClassNote = "[?]";
20011 const char* objClassName = "?objClass";
20012 const char* baseClassName = "?baseClass";
20013 const char* baseMethodName = "?baseMethod";
20015 if (verbose || doPrint)
20017 objClassNote = isExact ? " [exact]" : objClassIsFinal ? " [final]" : "";
20018 objClassName = info.compCompHnd->getClassName(objClass);
20019 baseClassName = info.compCompHnd->getClassName(baseClass);
20020 baseMethodName = eeGetMethodName(baseMethod, nullptr);
20024 printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n"
20025 " class for 'this' is %s%s (attrib %08x)\n"
20026 " base method is %s::%s\n",
20027 callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName);
20030 #endif // defined(DEBUG)
20032 // Bail if obj class is an interface.
20033 // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal
20034 // IL_021d: ldloc.0
20035 // IL_021e: callvirt instance int32 System.Object::GetHashCode()
20036 if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0)
20038 JITDUMP("--- obj class is interface, sorry\n");
20044 assert(call->IsVirtualStub());
20045 JITDUMP("--- base class is interface\n");
20048 // Fetch the method that would be called based on the declared type of 'this'
20049 CORINFO_CONTEXT_HANDLE ownerType = *contextHandle;
20050 CORINFO_METHOD_HANDLE derivedMethod = info.compCompHnd->resolveVirtualMethod(baseMethod, objClass, ownerType);
20052 // If we failed to get a handle, we can't devirtualize. This can
20053 // happen when prejitting, if the devirtualization crosses
20054 // servicing bubble boundaries.
20055 if (derivedMethod == nullptr)
20057 JITDUMP("--- no derived method, sorry\n");
20061 // Fetch method attributes to see if method is marked final.
20062 DWORD derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod);
20063 const bool derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0);
20066 const char* derivedClassName = "?derivedClass";
20067 const char* derivedMethodName = "?derivedMethod";
20069 const char* note = "speculative";
20074 else if (objClassIsFinal)
20076 note = "final class";
20078 else if (derivedMethodIsFinal)
20080 note = "final method";
20083 if (verbose || doPrint)
20085 derivedMethodName = eeGetMethodName(derivedMethod, &derivedClassName);
20088 printf(" devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note);
20092 #endif // defined(DEBUG)
20094 if (!isExact && !objClassIsFinal && !derivedMethodIsFinal)
20096 // Type is not exact, and neither class or method is final.
20098 // We could speculatively devirtualize, but there's no
20099 // reason to believe the derived method is the one that
20100 // is likely to be invoked.
20102 // If there's currently no further overriding (that is, at
20103 // the time of jitting, objClass has no subclasses that
20104 // override this method), then perhaps we'd be willing to
20106 JITDUMP(" Class not final or exact, method not final, no devirtualization\n");
20110 // For interface calls we must have an exact type or final class.
20111 if (isInterface && !isExact && !objClassIsFinal)
20113 JITDUMP(" Class not final or exact for interface, no devirtualization\n");
20117 JITDUMP(" %s; can devirtualize\n", note);
20119 // Make the updates.
20120 call->gtFlags &= ~GTF_CALL_VIRT_VTABLE;
20121 call->gtFlags &= ~GTF_CALL_VIRT_STUB;
20122 call->gtCallMethHnd = derivedMethod;
20123 call->gtCallType = CT_USER_FUNC;
20124 call->gtCallMoreFlags |= GTF_CALL_M_DEVIRTUALIZED;
20126 // Virtual calls include an implicit null check, which we may
20127 // now need to make explicit.
20130 call->gtFlags |= GTF_CALL_NULLCHECK;
20133 // Clear the inline candidate info (may be non-null since
20134 // it's a union field used for other things by virtual
20136 call->gtInlineCandidateInfo = nullptr;
20141 printf("... after devirt...\n");
20147 printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName,
20148 baseMethodName, derivedClassName, derivedMethodName, note);
20150 #endif // defined(DEBUG)
20152 // If the 'this' object is a box, see if we can find the unboxed entry point for the call.
20153 if (thisObj->IsBoxedValue())
20155 JITDUMP("Now have direct call to boxed entry point, looking for unboxed entry point\n");
20157 // Note for some shared methods the unboxed entry point requires an extra parameter.
20158 bool requiresInstMethodTableArg = false;
20159 CORINFO_METHOD_HANDLE unboxedEntryMethod =
20160 info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg);
20162 if (unboxedEntryMethod != nullptr)
20164 // Since the call is the only consumer of the box, we know the box can't escape
20165 // since it is being passed an interior pointer.
20167 // So, revise the box to simply create a local copy, use the address of that copy
20168 // as the this pointer, and update the entry point to the unboxed entry.
20170 // Ideally, we then inline the boxed method and and if it turns out not to modify
20171 // the copy, we can undo the copy too.
20172 if (requiresInstMethodTableArg)
20174 // Perform a trial box removal and ask for the type handle tree.
20175 JITDUMP("Unboxed entry needs method table arg...\n");
20176 GenTree* methodTableArg = gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE);
20178 if (methodTableArg != nullptr)
20180 // If that worked, turn the box into a copy to a local var
20181 JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg));
20182 GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
20184 if (localCopyThis != nullptr)
20186 // Pass the local var as this and the type handle as a new arg
20187 JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table arg\n");
20188 call->gtCallObjp = localCopyThis;
20189 call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED;
20191 // Prepend for R2L arg passing or empty L2R passing
20192 if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr))
20194 call->gtCallArgs = gtNewListNode(methodTableArg, call->gtCallArgs);
20196 // Append for non-empty L2R
20199 GenTreeArgList* beforeArg = call->gtCallArgs;
20200 while (beforeArg->Rest() != nullptr)
20202 beforeArg = beforeArg->Rest();
20205 beforeArg->Rest() = gtNewListNode(methodTableArg, nullptr);
20208 call->gtCallMethHnd = unboxedEntryMethod;
20209 derivedMethod = unboxedEntryMethod;
20211 // Method attributes will differ because unboxed entry point is shared
20212 const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod);
20213 JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs,
20214 unboxedMethodAttribs);
20215 derivedMethodAttribs = unboxedMethodAttribs;
20219 JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n");
20224 JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n");
20229 JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n");
20230 GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
20232 if (localCopyThis != nullptr)
20234 JITDUMP("Success! invoking unboxed entry point on local copy\n");
20235 call->gtCallObjp = localCopyThis;
20236 call->gtCallMethHnd = unboxedEntryMethod;
20237 call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED;
20238 derivedMethod = unboxedEntryMethod;
20242 JITDUMP("Sorry, failed to undo the box\n");
20248 // Many of the low-level methods on value classes won't have unboxed entries,
20249 // as they need access to the type of the object.
20251 // Note this may be a cue for us to stack allocate the boxed object, since
20252 // we probably know that these objects don't escape.
20253 JITDUMP("Sorry, failed to find unboxed entry point\n");
20257 // Fetch the class that introduced the derived method.
20259 // Note this may not equal objClass, if there is a
20260 // final method that objClass inherits.
20261 CORINFO_CLASS_HANDLE derivedClass = info.compCompHnd->getMethodClass(derivedMethod);
20263 // Need to update call info too. This is fragile
20264 // but hopefully the derived method conforms to
20265 // the base in most other ways.
20266 *method = derivedMethod;
20267 *methodFlags = derivedMethodAttribs;
20268 *contextHandle = MAKE_METHODCONTEXT(derivedMethod);
20270 // Update context handle.
20271 if ((exactContextHandle != nullptr) && (*exactContextHandle != nullptr))
20273 *exactContextHandle = MAKE_METHODCONTEXT(derivedMethod);
20276 #ifdef FEATURE_READYTORUN_COMPILER
20277 if (opts.IsReadyToRun())
20279 // For R2R, getCallInfo triggers bookkeeping on the zap
20280 // side so we need to call it here.
20282 // First, cons up a suitable resolved token.
20283 CORINFO_RESOLVED_TOKEN derivedResolvedToken = {};
20285 derivedResolvedToken.tokenScope = info.compScopeHnd;
20286 derivedResolvedToken.tokenContext = *contextHandle;
20287 derivedResolvedToken.token = info.compCompHnd->getMethodDefFromMethod(derivedMethod);
20288 derivedResolvedToken.tokenType = CORINFO_TOKENKIND_Method;
20289 derivedResolvedToken.hClass = derivedClass;
20290 derivedResolvedToken.hMethod = derivedMethod;
20292 // Look up the new call info.
20293 CORINFO_CALL_INFO derivedCallInfo;
20294 eeGetCallInfo(&derivedResolvedToken, nullptr, addVerifyFlag(CORINFO_CALLINFO_ALLOWINSTPARAM), &derivedCallInfo);
20296 // Update the call.
20297 call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
20298 call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT;
20299 call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup);
20301 #endif // FEATURE_READYTORUN_COMPILER
20304 //------------------------------------------------------------------------
20305 // impGetSpecialIntrinsicExactReturnType: Look for special cases where a call
20306 // to an intrinsic returns an exact type
20309 // methodHnd -- handle for the special intrinsic method
20312 // Exact class handle returned by the intrinsic call, if known.
20313 // Nullptr if not known, or not likely to lead to beneficial optimization.
20315 CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd)
20317 JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd));
20319 CORINFO_CLASS_HANDLE result = nullptr;
20321 // See what intrinisc we have...
20322 const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd);
20325 case NI_System_Collections_Generic_EqualityComparer_get_Default:
20327 // Expect one class generic parameter; figure out which it is.
20328 CORINFO_SIG_INFO sig;
20329 info.compCompHnd->getMethodSig(methodHnd, &sig);
20330 assert(sig.sigInst.classInstCount == 1);
20331 CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0];
20332 assert(typeHnd != nullptr);
20334 // Lookup can incorrect when we have __Canon as it won't appear
20335 // to implement any interface types.
20337 // And if we do not have a final type, devirt & inlining is
20338 // unlikely to result in much simplification.
20340 // We can use CORINFO_FLG_FINAL to screen out both of these cases.
20341 const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd);
20342 const bool isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0);
20346 result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd);
20347 JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd),
20348 result != nullptr ? eeGetClassName(result) : "unknown");
20352 JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd));
20360 JITDUMP("This special intrinsic not handled, sorry...\n");
20368 //------------------------------------------------------------------------
20369 // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it.
20372 // token - init value for the allocated token.
20375 // pointer to token into jit-allocated memory.
20376 CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(CORINFO_RESOLVED_TOKEN token)
20378 CORINFO_RESOLVED_TOKEN* memory = getAllocator(CMK_Unknown).allocate<CORINFO_RESOLVED_TOKEN>(1);
20383 //------------------------------------------------------------------------
20384 // SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local variables.
20386 class SpillRetExprHelper
20389 SpillRetExprHelper(Compiler* comp) : comp(comp)
20393 void StoreRetExprResultsInArgs(GenTreeCall* call)
20395 GenTreeArgList** pArgs = &call->gtCallArgs;
20396 if (*pArgs != nullptr)
20398 comp->fgWalkTreePre((GenTree**)pArgs, SpillRetExprVisitor, this);
20401 GenTree** pThisArg = &call->gtCallObjp;
20402 if (*pThisArg != nullptr)
20404 comp->fgWalkTreePre(pThisArg, SpillRetExprVisitor, this);
20409 static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre)
20411 assert((pTree != nullptr) && (*pTree != nullptr));
20412 GenTree* tree = *pTree;
20413 if ((tree->gtFlags & GTF_CALL) == 0)
20415 // Trees with ret_expr are marked as GTF_CALL.
20416 return Compiler::WALK_SKIP_SUBTREES;
20418 if (tree->OperGet() == GT_RET_EXPR)
20420 SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData);
20421 walker->StoreRetExprAsLocalVar(pTree);
20423 return Compiler::WALK_CONTINUE;
20426 void StoreRetExprAsLocalVar(GenTree** pRetExpr)
20428 GenTree* retExpr = *pRetExpr;
20429 assert(retExpr->OperGet() == GT_RET_EXPR);
20430 JITDUMP("Store return expression %u as a local var.\n", retExpr->gtTreeID);
20431 unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr"));
20432 comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE);
20433 *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet());
20440 //------------------------------------------------------------------------
20441 // addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate.
20442 // Spill ret_expr in the call node, because they can't be cloned.
20445 // call - fat calli candidate
20447 void Compiler::addFatPointerCandidate(GenTreeCall* call)
20449 setMethodHasFatPointer();
20450 call->SetFatPointerCandidate();
20451 SpillRetExprHelper helper(this);
20452 helper.StoreRetExprResultsInArgs(call);