1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
5 /*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
6 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
10 XX Imports the given method and converts it to semantic trees XX
12 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
13 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
23 #define Verify(cond, msg) \
28 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
32 #define VerifyOrReturn(cond, msg) \
37 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
42 #define VerifyOrReturnSpeculative(cond, msg, speculative) \
56 verRaiseVerifyExceptionIfNeeded(INDEBUG(msg) DEBUGARG(__FILE__) DEBUGARG(__LINE__)); \
62 /*****************************************************************************/
64 void Compiler::impInit()
68 impTreeList = nullptr;
69 impTreeLast = nullptr;
70 impInlinedCodeSize = 0;
74 /*****************************************************************************
76 * Pushes the given tree on the stack.
79 void Compiler::impPushOnStack(GenTree* tree, typeInfo ti)
81 /* Check for overflow. If inlining, we may be using a bigger stack */
83 if ((verCurrentState.esStackDepth >= info.compMaxStack) &&
84 (verCurrentState.esStackDepth >= impStkSize || ((compCurBB->bbFlags & BBF_IMPORTED) == 0)))
86 BADCODE("stack overflow");
90 // If we are pushing a struct, make certain we know the precise type!
91 if (tree->TypeGet() == TYP_STRUCT)
93 assert(ti.IsType(TI_STRUCT));
94 CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandle();
95 assert(clsHnd != NO_CLASS_HANDLE);
98 if (tiVerificationNeeded && !ti.IsDead())
100 assert(typeInfo::AreEquivalent(NormaliseForStack(ti), ti)); // types are normalized
102 // The ti type is consistent with the tree type.
105 // On 64-bit systems, nodes whose "proper" type is "native int" get labeled TYP_LONG.
106 // In the verification type system, we always transform "native int" to "TI_INT".
107 // Ideally, we would keep track of which nodes labeled "TYP_LONG" are really "native int", but
108 // attempts to do that have proved too difficult. Instead, we'll assume that in checks like this,
109 // when there's a mismatch, it's because of this reason -- the typeInfo::AreEquivalentModuloNativeInt
110 // method used in the last disjunct allows exactly this mismatch.
111 assert(ti.IsDead() || ti.IsByRef() && (tree->TypeGet() == TYP_I_IMPL || tree->TypeGet() == TYP_BYREF) ||
112 ti.IsUnboxedGenericTypeVar() && tree->TypeGet() == TYP_REF ||
113 ti.IsObjRef() && tree->TypeGet() == TYP_REF || ti.IsMethod() && tree->TypeGet() == TYP_I_IMPL ||
114 ti.IsType(TI_STRUCT) && tree->TypeGet() != TYP_REF ||
115 typeInfo::AreEquivalentModuloNativeInt(NormaliseForStack(ti),
116 NormaliseForStack(typeInfo(tree->TypeGet()))));
118 // If it is a struct type, make certain we normalized the primitive types
119 assert(!ti.IsType(TI_STRUCT) ||
120 info.compCompHnd->getTypeForPrimitiveValueClass(ti.GetClassHandle()) == CORINFO_TYPE_UNDEF);
124 if (VERBOSE && tiVerificationNeeded)
127 printf(TI_DUMP_PADDING);
128 printf("About to push to stack: ");
131 #endif // VERBOSE_VERIFY
135 verCurrentState.esStack[verCurrentState.esStackDepth].seTypeInfo = ti;
136 verCurrentState.esStack[verCurrentState.esStackDepth++].val = tree;
138 if ((tree->gtType == TYP_LONG) && (compLongUsed == false))
142 else if (((tree->gtType == TYP_FLOAT) || (tree->gtType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
144 compFloatingPointUsed = true;
148 inline void Compiler::impPushNullObjRefOnStack()
150 impPushOnStack(gtNewIconNode(0, TYP_REF), typeInfo(TI_NULL));
153 // This method gets called when we run into unverifiable code
154 // (and we are verifying the method)
156 inline void Compiler::verRaiseVerifyExceptionIfNeeded(INDEBUG(const char* msg) DEBUGARG(const char* file)
157 DEBUGARG(unsigned line))
159 // Remember that the code is not verifiable
160 // Note that the method may yet pass canSkipMethodVerification(),
161 // and so the presence of unverifiable code may not be an issue.
162 tiIsVerifiableCode = FALSE;
165 const char* tail = strrchr(file, '\\');
171 if (JitConfig.JitBreakOnUnsafeCode())
173 assert(!"Unsafe code detected");
177 JITLOG((LL_INFO10000, "Detected unsafe code: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
178 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
180 if (verNeedsVerification() || compIsForImportOnly())
182 JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
183 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
184 verRaiseVerifyException(INDEBUG(msg) DEBUGARG(file) DEBUGARG(line));
188 inline void DECLSPEC_NORETURN Compiler::verRaiseVerifyException(INDEBUG(const char* msg) DEBUGARG(const char* file)
189 DEBUGARG(unsigned line))
191 JITLOG((LL_ERROR, "Verification failure: %s:%d : %s, while compiling %s opcode %s, IL offset %x\n", file, line,
192 msg, info.compFullName, impCurOpcName, impCurOpcOffs));
195 // BreakIfDebuggerPresent();
196 if (getBreakOnBadCode())
198 assert(!"Typechecking error");
202 RaiseException(SEH_VERIFICATION_EXCEPTION, EXCEPTION_NONCONTINUABLE, 0, nullptr);
206 // helper function that will tell us if the IL instruction at the addr passed
207 // by param consumes an address at the top of the stack. We use it to save
209 bool Compiler::impILConsumesAddr(const BYTE* codeAddr, CORINFO_METHOD_HANDLE fncHandle, CORINFO_MODULE_HANDLE scpHandle)
211 assert(!compIsForInlining());
215 opcode = (OPCODE)getU1LittleEndian(codeAddr);
219 // case CEE_LDFLDA: We're taking this one out as if you have a sequence
225 // of a primitivelike struct, you end up after morphing with addr of a local
226 // that's not marked as addrtaken, which is wrong. Also ldflda is usually used
227 // for structs that contain other structs, which isnt a case we handle very
228 // well now for other reasons.
232 // We won't collapse small fields. This is probably not the right place to have this
233 // check, but we're only using the function for this purpose, and is easy to factor
234 // out if we need to do so.
236 CORINFO_RESOLVED_TOKEN resolvedToken;
237 impResolveToken(codeAddr + sizeof(__int8), &resolvedToken, CORINFO_TOKENKIND_Field);
239 var_types lclTyp = JITtype2varType(info.compCompHnd->getFieldType(resolvedToken.hField));
241 // Preserve 'small' int types
242 if (!varTypeIsSmall(lclTyp))
244 lclTyp = genActualType(lclTyp);
247 if (varTypeIsSmall(lclTyp))
261 void Compiler::impResolveToken(const BYTE* addr, CORINFO_RESOLVED_TOKEN* pResolvedToken, CorInfoTokenKind kind)
263 pResolvedToken->tokenContext = impTokenLookupContextHandle;
264 pResolvedToken->tokenScope = info.compScopeHnd;
265 pResolvedToken->token = getU4LittleEndian(addr);
266 pResolvedToken->tokenType = kind;
268 if (!tiVerificationNeeded)
270 info.compCompHnd->resolveToken(pResolvedToken);
274 Verify(eeTryResolveToken(pResolvedToken), "Token resolution failed");
278 /*****************************************************************************
280 * Pop one tree from the stack.
283 StackEntry Compiler::impPopStack()
285 if (verCurrentState.esStackDepth == 0)
287 BADCODE("stack underflow");
292 if (VERBOSE && tiVerificationNeeded)
295 printf(TI_DUMP_PADDING);
296 printf("About to pop from the stack: ");
297 const typeInfo& ti = verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo;
300 #endif // VERBOSE_VERIFY
303 return verCurrentState.esStack[--verCurrentState.esStackDepth];
306 /*****************************************************************************
308 * Peep at n'th (0-based) tree on the top of the stack.
311 StackEntry& Compiler::impStackTop(unsigned n)
313 if (verCurrentState.esStackDepth <= n)
315 BADCODE("stack underflow");
318 return verCurrentState.esStack[verCurrentState.esStackDepth - n - 1];
321 unsigned Compiler::impStackHeight()
323 return verCurrentState.esStackDepth;
326 /*****************************************************************************
327 * Some of the trees are spilled specially. While unspilling them, or
328 * making a copy, these need to be handled specially. The function
329 * enumerates the operators possible after spilling.
332 #ifdef DEBUG // only used in asserts
333 static bool impValidSpilledStackEntry(GenTree* tree)
335 if (tree->gtOper == GT_LCL_VAR)
340 if (tree->OperIsConst())
349 /*****************************************************************************
351 * The following logic is used to save/restore stack contents.
352 * If 'copy' is true, then we make a copy of the trees on the stack. These
353 * have to all be cloneable/spilled values.
356 void Compiler::impSaveStackState(SavedStack* savePtr, bool copy)
358 savePtr->ssDepth = verCurrentState.esStackDepth;
360 if (verCurrentState.esStackDepth)
362 savePtr->ssTrees = new (this, CMK_ImpStack) StackEntry[verCurrentState.esStackDepth];
363 size_t saveSize = verCurrentState.esStackDepth * sizeof(*savePtr->ssTrees);
367 StackEntry* table = savePtr->ssTrees;
369 /* Make a fresh copy of all the stack entries */
371 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
373 table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
374 GenTree* tree = verCurrentState.esStack[level].val;
376 assert(impValidSpilledStackEntry(tree));
378 switch (tree->gtOper)
385 table->val = gtCloneExpr(tree);
389 assert(!"Bad oper - Not covered by impValidSpilledStackEntry()");
396 memcpy(savePtr->ssTrees, verCurrentState.esStack, saveSize);
401 void Compiler::impRestoreStackState(SavedStack* savePtr)
403 verCurrentState.esStackDepth = savePtr->ssDepth;
405 if (verCurrentState.esStackDepth)
407 memcpy(verCurrentState.esStack, savePtr->ssTrees,
408 verCurrentState.esStackDepth * sizeof(*verCurrentState.esStack));
412 /*****************************************************************************
414 * Get the tree list started for a new basic block.
416 inline void Compiler::impBeginTreeList()
418 assert(impTreeList == nullptr && impTreeLast == nullptr);
420 impTreeList = impTreeLast = new (this, GT_BEG_STMTS) GenTree(GT_BEG_STMTS, TYP_VOID);
423 /*****************************************************************************
425 * Store the given start and end stmt in the given basic block. This is
426 * mostly called by impEndTreeList(BasicBlock *block). It is called
427 * directly only for handling CEE_LEAVEs out of finally-protected try's.
430 inline void Compiler::impEndTreeList(BasicBlock* block, GenTree* firstStmt, GenTree* lastStmt)
432 assert(firstStmt->gtOper == GT_STMT);
433 assert(lastStmt->gtOper == GT_STMT);
435 /* Make the list circular, so that we can easily walk it backwards */
437 firstStmt->gtPrev = lastStmt;
439 /* Store the tree list in the basic block */
441 block->bbTreeList = firstStmt;
443 /* The block should not already be marked as imported */
444 assert((block->bbFlags & BBF_IMPORTED) == 0);
446 block->bbFlags |= BBF_IMPORTED;
449 /*****************************************************************************
451 * Store the current tree list in the given basic block.
454 inline void Compiler::impEndTreeList(BasicBlock* block)
456 assert(impTreeList->gtOper == GT_BEG_STMTS);
458 GenTree* firstTree = impTreeList->gtNext;
462 /* The block should not already be marked as imported */
463 assert((block->bbFlags & BBF_IMPORTED) == 0);
465 // Empty block. Just mark it as imported
466 block->bbFlags |= BBF_IMPORTED;
470 // Ignore the GT_BEG_STMTS
471 assert(firstTree->gtPrev == impTreeList);
473 impEndTreeList(block, firstTree, impTreeLast);
477 if (impLastILoffsStmt != nullptr)
479 impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
480 impLastILoffsStmt = nullptr;
483 impTreeList = impTreeLast = nullptr;
487 /*****************************************************************************
489 * Check that storing the given tree doesnt mess up the semantic order. Note
490 * that this has only limited value as we can only check [0..chkLevel).
493 inline void Compiler::impAppendStmtCheck(GenTree* stmt, unsigned chkLevel)
498 assert(stmt->gtOper == GT_STMT);
500 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
502 chkLevel = verCurrentState.esStackDepth;
505 if (verCurrentState.esStackDepth == 0 || chkLevel == 0 || chkLevel == (unsigned)CHECK_SPILL_NONE)
510 GenTree* tree = stmt->gtStmt.gtStmtExpr;
512 // Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
514 if (tree->gtFlags & GTF_CALL)
516 for (unsigned level = 0; level < chkLevel; level++)
518 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_EFFECT) == 0);
522 if (tree->gtOper == GT_ASG)
524 // For an assignment to a local variable, all references of that
525 // variable have to be spilled. If it is aliased, all calls and
526 // indirect accesses have to be spilled
528 if (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)
530 unsigned lclNum = tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
531 for (unsigned level = 0; level < chkLevel; level++)
533 assert(!gtHasRef(verCurrentState.esStack[level].val, lclNum, false));
534 assert(!lvaTable[lclNum].lvAddrExposed ||
535 (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT) == 0);
539 // If the access may be to global memory, all side effects have to be spilled.
541 else if (tree->gtOp.gtOp1->gtFlags & GTF_GLOB_REF)
543 for (unsigned level = 0; level < chkLevel; level++)
545 assert((verCurrentState.esStack[level].val->gtFlags & GTF_GLOB_REF) == 0);
552 /*****************************************************************************
554 * Append the given GT_STMT node to the current block's tree list.
555 * [0..chkLevel) is the portion of the stack which we will check for
556 * interference with stmt and spill if needed.
559 inline void Compiler::impAppendStmt(GenTree* stmt, unsigned chkLevel)
561 assert(stmt->gtOper == GT_STMT);
562 noway_assert(impTreeLast != nullptr);
564 /* If the statement being appended has any side-effects, check the stack
565 to see if anything needs to be spilled to preserve correct ordering. */
567 GenTree* expr = stmt->gtStmt.gtStmtExpr;
568 unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
570 // Assignment to (unaliased) locals don't count as a side-effect as
571 // we handle them specially using impSpillLclRefs(). Temp locals should
574 if ((expr->gtOper == GT_ASG) && (expr->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
575 !(expr->gtOp.gtOp1->gtFlags & GTF_GLOB_REF) && !gtHasLocalsWithAddrOp(expr->gtOp.gtOp2))
577 unsigned op2Flags = expr->gtOp.gtOp2->gtFlags & GTF_GLOB_EFFECT;
578 assert(flags == (op2Flags | GTF_ASG));
582 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
584 chkLevel = verCurrentState.esStackDepth;
587 if (chkLevel && chkLevel != (unsigned)CHECK_SPILL_NONE)
589 assert(chkLevel <= verCurrentState.esStackDepth);
593 // If there is a call, we have to spill global refs
594 bool spillGlobEffects = (flags & GTF_CALL) ? true : false;
596 if (expr->gtOper == GT_ASG)
598 GenTree* lhs = expr->gtGetOp1();
599 // If we are assigning to a global ref, we have to spill global refs on stack.
600 // TODO-1stClassStructs: Previously, spillGlobEffects was set to true for
601 // GT_INITBLK and GT_COPYBLK, but this is overly conservative, and should be
602 // revisited. (Note that it was NOT set to true for GT_COPYOBJ.)
603 if (!expr->OperIsBlkOp())
605 // If we are assigning to a global ref, we have to spill global refs on stack
606 if ((lhs->gtFlags & GTF_GLOB_REF) != 0)
608 spillGlobEffects = true;
611 else if ((lhs->OperIsBlk() && !lhs->AsBlk()->HasGCPtr()) ||
612 ((lhs->OperGet() == GT_LCL_VAR) &&
613 (lvaTable[lhs->AsLclVarCommon()->gtLclNum].lvStructGcCount == 0)))
615 spillGlobEffects = true;
619 impSpillSideEffects(spillGlobEffects, chkLevel DEBUGARG("impAppendStmt"));
623 impSpillSpecialSideEff();
627 impAppendStmtCheck(stmt, chkLevel);
629 /* Point 'prev' at the previous node, so that we can walk backwards */
631 stmt->gtPrev = impTreeLast;
633 /* Append the expression statement to the list */
635 impTreeLast->gtNext = stmt;
639 impMarkContiguousSIMDFieldAssignments(stmt);
642 /* Once we set impCurStmtOffs in an appended tree, we are ready to
643 report the following offsets. So reset impCurStmtOffs */
645 if (impTreeLast->gtStmt.gtStmtILoffsx == impCurStmtOffs)
647 impCurStmtOffsSet(BAD_IL_OFFSET);
651 if (impLastILoffsStmt == nullptr)
653 impLastILoffsStmt = stmt;
664 /*****************************************************************************
666 * Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
669 inline void Compiler::impInsertStmtBefore(GenTree* stmt, GenTree* stmtBefore)
671 assert(stmt->gtOper == GT_STMT);
672 assert(stmtBefore->gtOper == GT_STMT);
674 GenTree* stmtPrev = stmtBefore->gtPrev;
675 stmt->gtPrev = stmtPrev;
676 stmt->gtNext = stmtBefore;
677 stmtPrev->gtNext = stmt;
678 stmtBefore->gtPrev = stmt;
681 /*****************************************************************************
683 * Append the given expression tree to the current block's tree list.
684 * Return the newly created statement.
687 GenTree* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, IL_OFFSETX offset)
691 /* Allocate an 'expression statement' node */
693 GenTree* expr = gtNewStmt(tree, offset);
695 /* Append the statement to the current block's stmt list */
697 impAppendStmt(expr, chkLevel);
702 /*****************************************************************************
704 * Insert the given exression tree before GT_STMT "stmtBefore"
707 void Compiler::impInsertTreeBefore(GenTree* tree, IL_OFFSETX offset, GenTree* stmtBefore)
709 assert(stmtBefore->gtOper == GT_STMT);
711 /* Allocate an 'expression statement' node */
713 GenTree* expr = gtNewStmt(tree, offset);
715 /* Append the statement to the current block's stmt list */
717 impInsertStmtBefore(expr, stmtBefore);
720 /*****************************************************************************
722 * Append an assignment of the given value to a temp to the current tree list.
723 * curLevel is the stack level for which the spill to the temp is being done.
726 void Compiler::impAssignTempGen(unsigned tmp,
729 GenTree** pAfterStmt, /* = NULL */
730 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
731 BasicBlock* block /* = NULL */
734 GenTree* asg = gtNewTempAssign(tmp, val);
736 if (!asg->IsNothingNode())
740 GenTree* asgStmt = gtNewStmt(asg, ilOffset);
741 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
745 impAppendTree(asg, curLevel, impCurStmtOffs);
750 /*****************************************************************************
751 * same as above, but handle the valueclass case too
754 void Compiler::impAssignTempGen(unsigned tmpNum,
756 CORINFO_CLASS_HANDLE structType,
758 GenTree** pAfterStmt, /* = NULL */
759 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
760 BasicBlock* block /* = NULL */
765 if (varTypeIsStruct(val))
767 assert(tmpNum < lvaCount);
768 assert(structType != NO_CLASS_HANDLE);
770 // if the method is non-verifiable the assert is not true
771 // so at least ignore it in the case when verification is turned on
772 // since any block that tries to use the temp would have failed verification.
773 var_types varType = lvaTable[tmpNum].lvType;
774 assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType));
775 lvaSetStruct(tmpNum, structType, false);
777 // Now, set the type of the struct value. Note that lvaSetStruct may modify the type
778 // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType)
779 // that has been passed in for the value being assigned to the temp, in which case we
780 // need to set 'val' to that same type.
781 // Note also that if we always normalized the types of any node that might be a struct
782 // type, this would not be necessary - but that requires additional JIT/EE interface
783 // calls that may not actually be required - e.g. if we only access a field of a struct.
785 val->gtType = lvaTable[tmpNum].lvType;
787 GenTree* dst = gtNewLclvNode(tmpNum, val->gtType);
788 asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, ilOffset, block);
792 asg = gtNewTempAssign(tmpNum, val);
795 if (!asg->IsNothingNode())
799 GenTree* asgStmt = gtNewStmt(asg, ilOffset);
800 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
804 impAppendTree(asg, curLevel, impCurStmtOffs);
809 /*****************************************************************************
811 * Pop the given number of values from the stack and return a list node with
813 * The 'prefixTree' argument may optionally contain an argument
814 * list that is prepended to the list returned from this function.
816 * The notion of prepended is a bit misleading in that the list is backwards
817 * from the way I would expect: The first element popped is at the end of
818 * the returned list, and prefixTree is 'before' that, meaning closer to
819 * the end of the list. To get to prefixTree, you have to walk to the
822 * For ARG_ORDER_R2L prefixTree is only used to insert extra arguments, as
823 * such we reverse its meaning such that returnValue has a reversed
824 * prefixTree at the head of the list.
827 GenTreeArgList* Compiler::impPopList(unsigned count, CORINFO_SIG_INFO* sig, GenTreeArgList* prefixTree)
829 assert(sig == nullptr || count == sig->numArgs);
831 CORINFO_CLASS_HANDLE structType;
832 GenTreeArgList* treeList;
834 if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
840 treeList = prefixTree;
845 StackEntry se = impPopStack();
846 typeInfo ti = se.seTypeInfo;
847 GenTree* temp = se.val;
849 if (varTypeIsStruct(temp))
851 // Morph trees that aren't already OBJs or MKREFANY to be OBJs
852 assert(ti.IsType(TI_STRUCT));
853 structType = ti.GetClassHandleForValueClass();
857 printf("Calling impNormStructVal on:\n");
861 temp = impNormStructVal(temp, structType, (unsigned)CHECK_SPILL_ALL);
865 printf("resulting tree:\n");
871 /* NOTE: we defer bashing the type for I_IMPL to fgMorphArgs */
872 treeList = gtNewListNode(temp, treeList);
877 if (sig->retTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
878 sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR && sig->retType != CORINFO_TYPE_VAR)
880 // Make sure that all valuetypes (including enums) that we push are loaded.
881 // This is to guarantee that if a GC is triggerred from the prestub of this methods,
882 // all valuetypes in the method signature are already loaded.
883 // We need to be able to find the size of the valuetypes, but we cannot
884 // do a class-load from within GC.
885 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
888 CORINFO_ARG_LIST_HANDLE argLst = sig->args;
889 CORINFO_CLASS_HANDLE argClass;
890 CORINFO_CLASS_HANDLE argRealClass;
891 GenTreeArgList* args;
893 for (args = treeList, count = sig->numArgs; count > 0; args = args->Rest(), count--)
895 PREFIX_ASSUME(args != nullptr);
897 CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
899 // insert implied casts (from float to double or double to float)
901 if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
903 args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), false, TYP_DOUBLE);
905 else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
907 args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), false, TYP_FLOAT);
910 // insert any widening or narrowing casts for backwards compatibility
912 args->Current() = impImplicitIorI4Cast(args->Current(), JITtype2varType(corType));
914 if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
915 corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
917 // Everett MC++ could generate IL with a mismatched valuetypes. It used to work with Everett JIT,
918 // but it stopped working in Whidbey when we have started passing simple valuetypes as underlying
920 // We will try to adjust for this case here to avoid breaking customers code (see VSW 485789 for
922 if (corType == CORINFO_TYPE_VALUECLASS && !varTypeIsStruct(args->Current()))
924 args->Current() = impNormStructVal(args->Current(), argRealClass, (unsigned)CHECK_SPILL_ALL, true);
927 // Make sure that all valuetypes (including enums) that we push are loaded.
928 // This is to guarantee that if a GC is triggered from the prestub of this methods,
929 // all valuetypes in the method signature are already loaded.
930 // We need to be able to find the size of the valuetypes, but we cannot
931 // do a class-load from within GC.
932 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
935 argLst = info.compCompHnd->getArgNext(argLst);
939 if (Target::g_tgtArgOrder == Target::ARG_ORDER_R2L)
941 // Prepend the prefixTree
943 // Simple in-place reversal to place treeList
944 // at the end of a reversed prefixTree
945 while (prefixTree != nullptr)
947 GenTreeArgList* next = prefixTree->Rest();
948 prefixTree->Rest() = treeList;
949 treeList = prefixTree;
956 /*****************************************************************************
958 * Pop the given number of values from the stack in reverse order (STDCALL/CDECL etc.)
959 * The first "skipReverseCount" items are not reversed.
962 GenTreeArgList* Compiler::impPopRevList(unsigned count, CORINFO_SIG_INFO* sig, unsigned skipReverseCount)
965 assert(skipReverseCount <= count);
967 GenTreeArgList* list = impPopList(count, sig);
970 if (list == nullptr || skipReverseCount == count)
975 GenTreeArgList* ptr = nullptr; // Initialized to the first node that needs to be reversed
976 GenTreeArgList* lastSkipNode = nullptr; // Will be set to the last node that does not need to be reversed
978 if (skipReverseCount == 0)
985 // Get to the first node that needs to be reversed
986 for (unsigned i = 0; i < skipReverseCount - 1; i++)
988 lastSkipNode = lastSkipNode->Rest();
991 PREFIX_ASSUME(lastSkipNode != nullptr);
992 ptr = lastSkipNode->Rest();
995 GenTreeArgList* reversedList = nullptr;
999 GenTreeArgList* tmp = ptr->Rest();
1000 ptr->Rest() = reversedList;
1003 } while (ptr != nullptr);
1005 if (skipReverseCount)
1007 lastSkipNode->Rest() = reversedList;
1012 return reversedList;
1016 //------------------------------------------------------------------------
1017 // impAssignStruct: Assign (copy) the structure from 'src' to 'dest'.
1020 // dest - destination of the assignment
1021 // src - source of the assignment
1022 // structHnd - handle representing the struct type
1023 // curLevel - stack level for which a spill may be being done
1024 // pAfterStmt - statement to insert any additional statements after
1025 // ilOffset - il offset for new statements
1026 // block - block to insert any additional statements in
1029 // The tree that should be appended to the statement list that represents the assignment.
1032 // Temp assignments may be appended to impTreeList if spilling is necessary.
1034 GenTree* Compiler::impAssignStruct(GenTree* dest,
1036 CORINFO_CLASS_HANDLE structHnd,
1038 GenTree** pAfterStmt, /* = nullptr */
1039 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
1040 BasicBlock* block /* = nullptr */
1043 assert(varTypeIsStruct(dest));
1045 if (ilOffset == BAD_IL_OFFSET)
1047 ilOffset = impCurStmtOffs;
1050 while (dest->gtOper == GT_COMMA)
1052 assert(varTypeIsStruct(dest->gtOp.gtOp2)); // Second thing is the struct
1054 // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
1057 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(dest->gtOp.gtOp1, ilOffset));
1061 impAppendTree(dest->gtOp.gtOp1, curLevel, ilOffset); // do the side effect
1064 // set dest to the second thing
1065 dest = dest->gtOp.gtOp2;
1068 assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
1069 dest->gtOper == GT_IND || dest->gtOper == GT_OBJ || dest->gtOper == GT_INDEX);
1071 if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
1072 src->gtLclVarCommon.gtLclNum == dest->gtLclVarCommon.gtLclNum)
1075 return gtNewNothingNode();
1078 // TODO-1stClassStructs: Avoid creating an address if it is not needed,
1079 // or re-creating a Blk node if it is.
1082 if (dest->gtOper == GT_IND || dest->OperIsBlk())
1084 destAddr = dest->gtOp.gtOp1;
1088 destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
1091 return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, ilOffset, block));
1094 //------------------------------------------------------------------------
1095 // impAssignStructPtr: Assign (copy) the structure from 'src' to 'destAddr'.
1098 // destAddr - address of the destination of the assignment
1099 // src - source of the assignment
1100 // structHnd - handle representing the struct type
1101 // curLevel - stack level for which a spill may be being done
1102 // pAfterStmt - statement to insert any additional statements after
1103 // ilOffset - il offset for new statements
1104 // block - block to insert any additional statements in
1107 // The tree that should be appended to the statement list that represents the assignment.
1110 // Temp assignments may be appended to impTreeList if spilling is necessary.
1112 GenTree* Compiler::impAssignStructPtr(GenTree* destAddr,
1114 CORINFO_CLASS_HANDLE structHnd,
1116 GenTree** pAfterStmt, /* = NULL */
1117 IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
1118 BasicBlock* block /* = NULL */
1122 GenTree* dest = nullptr;
1123 unsigned destFlags = 0;
1125 if (ilOffset == BAD_IL_OFFSET)
1127 ilOffset = impCurStmtOffs;
1130 #if defined(UNIX_AMD64_ABI)
1131 assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
1132 // TODO-ARM-BUG: Does ARM need this?
1133 // TODO-ARM64-BUG: Does ARM64 need this?
1134 assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1135 src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1136 src->gtOper == GT_COMMA || src->gtOper == GT_ADDR ||
1137 (src->TypeGet() != TYP_STRUCT &&
1138 (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1139 #else // !defined(UNIX_AMD64_ABI)
1140 assert(varTypeIsStruct(src));
1142 assert(src->gtOper == GT_LCL_VAR || src->gtOper == GT_FIELD || src->gtOper == GT_IND || src->gtOper == GT_OBJ ||
1143 src->gtOper == GT_CALL || src->gtOper == GT_MKREFANY || src->gtOper == GT_RET_EXPR ||
1144 src->gtOper == GT_COMMA ||
1145 (src->TypeGet() != TYP_STRUCT &&
1146 (GenTree::OperIsSIMD(src->gtOper) || src->OperIsSimdHWIntrinsic() || src->gtOper == GT_LCL_FLD)));
1147 #endif // !defined(UNIX_AMD64_ABI)
1148 if (destAddr->OperGet() == GT_ADDR)
1150 GenTree* destNode = destAddr->gtGetOp1();
1151 // If the actual destination is a local, or already a block node, or is a node that
1152 // will be morphed, don't insert an OBJ(ADDR).
1153 if (destNode->gtOper == GT_INDEX || destNode->OperIsBlk() ||
1154 ((destNode->OperGet() == GT_LCL_VAR) && (destNode->TypeGet() == src->TypeGet())))
1158 destType = destNode->TypeGet();
1162 destType = src->TypeGet();
1165 var_types asgType = src->TypeGet();
1167 if (src->gtOper == GT_CALL)
1169 if (src->AsCall()->TreatAsHasRetBufArg(this))
1171 // Case of call returning a struct via hidden retbuf arg
1173 // insert the return value buffer into the argument list as first byref parameter
1174 src->gtCall.gtCallArgs = gtNewListNode(destAddr, src->gtCall.gtCallArgs);
1176 // now returns void, not a struct
1177 src->gtType = TYP_VOID;
1179 // return the morphed call node
1184 // Case of call returning a struct in one or more registers.
1186 var_types returnType = (var_types)src->gtCall.gtReturnType;
1188 // We won't use a return buffer, so change the type of src->gtType to 'returnType'
1189 src->gtType = genActualType(returnType);
1191 // First we try to change this to "LclVar/LclFld = call"
1193 if ((destAddr->gtOper == GT_ADDR) && (destAddr->gtOp.gtOp1->gtOper == GT_LCL_VAR))
1195 // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
1196 // That is, the IR will be of the form lclVar = call for multi-reg return
1198 GenTree* lcl = destAddr->gtOp.gtOp1;
1199 if (src->AsCall()->HasMultiRegRetVal())
1201 // Mark the struct LclVar as used in a MultiReg return context
1202 // which currently makes it non promotable.
1203 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1204 // handle multireg returns.
1205 lcl->gtFlags |= GTF_DONT_CSE;
1206 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1208 else // The call result is not a multireg return
1210 // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
1211 lcl->ChangeOper(GT_LCL_FLD);
1212 fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
1213 lcl->gtType = src->gtType;
1214 asgType = src->gtType;
1219 #if defined(_TARGET_ARM_)
1220 // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
1221 // but that method has not been updadted to include ARM.
1222 impMarkLclDstNotPromotable(lcl->gtLclVarCommon.gtLclNum, src, structHnd);
1223 lcl->gtFlags |= GTF_DONT_CSE;
1224 #elif defined(UNIX_AMD64_ABI)
1225 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
1226 assert(!src->gtCall.IsVarargs() && "varargs not allowed for System V OSs.");
1228 // Make the struct non promotable. The eightbytes could contain multiple fields.
1229 // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
1230 // handle multireg returns.
1231 // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
1232 // non-multireg returns.
1233 lcl->gtFlags |= GTF_DONT_CSE;
1234 lvaTable[lcl->gtLclVarCommon.gtLclNum].lvIsMultiRegRet = true;
1237 else // we don't have a GT_ADDR of a GT_LCL_VAR
1239 // !!! The destination could be on stack. !!!
1240 // This flag will let us choose the correct write barrier.
1241 asgType = returnType;
1242 destFlags = GTF_IND_TGTANYWHERE;
1246 else if (src->gtOper == GT_RET_EXPR)
1248 GenTreeCall* call = src->gtRetExpr.gtInlineCandidate->AsCall();
1249 noway_assert(call->gtOper == GT_CALL);
1251 if (call->HasRetBufArg())
1253 // insert the return value buffer into the argument list as first byref parameter
1254 call->gtCallArgs = gtNewListNode(destAddr, call->gtCallArgs);
1256 // now returns void, not a struct
1257 src->gtType = TYP_VOID;
1258 call->gtType = TYP_VOID;
1260 // We already have appended the write to 'dest' GT_CALL's args
1261 // So now we just return an empty node (pruning the GT_RET_EXPR)
1266 // Case of inline method returning a struct in one or more registers.
1268 var_types returnType = (var_types)call->gtReturnType;
1270 // We won't need a return buffer
1271 asgType = returnType;
1272 src->gtType = genActualType(returnType);
1273 call->gtType = src->gtType;
1275 // If we've changed the type, and it no longer matches a local destination,
1276 // we must use an indirection.
1277 if ((dest != nullptr) && (dest->OperGet() == GT_LCL_VAR) && (dest->TypeGet() != asgType))
1282 // !!! The destination could be on stack. !!!
1283 // This flag will let us choose the correct write barrier.
1284 destFlags = GTF_IND_TGTANYWHERE;
1287 else if (src->OperIsBlk())
1289 asgType = impNormStructType(structHnd);
1290 if (src->gtOper == GT_OBJ)
1292 assert(src->gtObj.gtClass == structHnd);
1295 else if (src->gtOper == GT_INDEX)
1297 asgType = impNormStructType(structHnd);
1298 assert(src->gtIndex.gtStructElemClass == structHnd);
1300 else if (src->gtOper == GT_MKREFANY)
1302 // Since we are assigning the result of a GT_MKREFANY,
1303 // "destAddr" must point to a refany.
1305 GenTree* destAddrClone;
1307 impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
1309 assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0);
1310 assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
1311 GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
1312 GenTree* ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
1313 GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL);
1314 typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
1316 gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
1318 // append the assign of the pointer value
1319 GenTree* asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
1322 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, ilOffset));
1326 impAppendTree(asg, curLevel, ilOffset);
1329 // return the assign of the type value, to be appended
1330 return gtNewAssignNode(typeSlot, src->gtOp.gtOp2);
1332 else if (src->gtOper == GT_COMMA)
1334 // The second thing is the struct or its address.
1335 assert(varTypeIsStruct(src->gtOp.gtOp2) || src->gtOp.gtOp2->gtType == TYP_BYREF);
1338 *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(src->gtOp.gtOp1, ilOffset));
1342 impAppendTree(src->gtOp.gtOp1, curLevel, ilOffset); // do the side effect
1345 // Evaluate the second thing using recursion.
1346 return impAssignStructPtr(destAddr, src->gtOp.gtOp2, structHnd, curLevel, pAfterStmt, ilOffset, block);
1348 else if (src->IsLocal())
1350 asgType = src->TypeGet();
1352 else if (asgType == TYP_STRUCT)
1354 asgType = impNormStructType(structHnd);
1355 src->gtType = asgType;
1357 if (dest == nullptr)
1359 // TODO-1stClassStructs: We shouldn't really need a block node as the destination
1360 // if this is a known struct type.
1361 if (asgType == TYP_STRUCT)
1363 dest = gtNewObjNode(structHnd, destAddr);
1364 gtSetObjGcInfo(dest->AsObj());
1365 // Although an obj as a call argument was always assumed to be a globRef
1366 // (which is itself overly conservative), that is not true of the operands
1367 // of a block assignment.
1368 dest->gtFlags &= ~GTF_GLOB_REF;
1369 dest->gtFlags |= (destAddr->gtFlags & GTF_GLOB_REF);
1371 else if (varTypeIsStruct(asgType))
1373 dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, asgType, destAddr, genTypeSize(asgType));
1377 dest = gtNewOperNode(GT_IND, asgType, destAddr);
1382 dest->gtType = asgType;
1385 dest->gtFlags |= destFlags;
1386 destFlags = dest->gtFlags;
1388 // return an assignment node, to be appended
1389 GenTree* asgNode = gtNewAssignNode(dest, src);
1390 gtBlockOpInit(asgNode, dest, src, false);
1392 // TODO-1stClassStructs: Clean up the settings of GTF_DONT_CSE on the lhs
1394 if ((destFlags & GTF_DONT_CSE) == 0)
1396 dest->gtFlags &= ~(GTF_DONT_CSE);
1401 /*****************************************************************************
1402 Given a struct value, and the class handle for that structure, return
1403 the expression for the address for that structure value.
1405 willDeref - does the caller guarantee to dereference the pointer.
1408 GenTree* Compiler::impGetStructAddr(GenTree* structVal,
1409 CORINFO_CLASS_HANDLE structHnd,
1413 assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
1415 var_types type = structVal->TypeGet();
1417 genTreeOps oper = structVal->gtOper;
1419 if (oper == GT_OBJ && willDeref)
1421 assert(structVal->gtObj.gtClass == structHnd);
1422 return (structVal->gtObj.Addr());
1424 else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY ||
1425 structVal->OperIsSimdHWIntrinsic())
1427 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1429 impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1431 // The 'return value' is now the temp itself
1433 type = genActualType(lvaTable[tmpNum].TypeGet());
1434 GenTree* temp = gtNewLclvNode(tmpNum, type);
1435 temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
1438 else if (oper == GT_COMMA)
1440 assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
1442 GenTree* oldTreeLast = impTreeLast;
1443 structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
1444 structVal->gtType = TYP_BYREF;
1446 if (oldTreeLast != impTreeLast)
1448 // Some temp assignment statement was placed on the statement list
1449 // for Op2, but that would be out of order with op1, so we need to
1450 // spill op1 onto the statement list after whatever was last
1451 // before we recursed on Op2 (i.e. before whatever Op2 appended).
1452 impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
1453 structVal->gtOp.gtOp1 = gtNewNothingNode();
1459 return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1462 //------------------------------------------------------------------------
1463 // impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
1464 // and optionally determine the GC layout of the struct.
1467 // structHnd - The class handle for the struct type of interest.
1468 // gcLayout - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
1469 // into which the gcLayout will be written.
1470 // pNumGCVars - (optional, default nullptr) - if non-null, a pointer to an unsigned,
1471 // which will be set to the number of GC fields in the struct.
1472 // pSimdBaseType - (optional, default nullptr) - if non-null, and the struct is a SIMD
1473 // type, set to the SIMD base type
1476 // The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
1477 // The gcLayout will be returned using the pointers provided by the caller, if non-null.
1478 // It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
1481 // The caller must set gcLayout to nullptr OR ensure that it is large enough
1482 // (see ICorStaticInfo::getClassGClayout in corinfo.h).
1485 // Normalizing the type involves examining the struct type to determine if it should
1486 // be modified to one that is handled specially by the JIT, possibly being a candidate
1487 // for full enregistration, e.g. TYP_SIMD16.
1489 var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
1491 unsigned* pNumGCVars,
1492 var_types* pSimdBaseType)
1494 assert(structHnd != NO_CLASS_HANDLE);
1496 const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
1497 var_types structType = TYP_STRUCT;
1499 // On coreclr the check for GC includes a "may" to account for the special
1500 // ByRef like span structs. The added check for "CONTAINS_STACK_PTR" is the particular bit.
1501 // When this is set the struct will contain a ByRef that could be a GC pointer or a native
1503 const bool mayContainGCPtrs =
1504 ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
1507 // Check to see if this is a SIMD type.
1508 if (featureSIMD && !mayContainGCPtrs)
1510 unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
1512 if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
1514 unsigned int sizeBytes;
1515 var_types simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
1516 if (simdBaseType != TYP_UNKNOWN)
1518 assert(sizeBytes == originalSize);
1519 structType = getSIMDTypeForSize(sizeBytes);
1520 if (pSimdBaseType != nullptr)
1522 *pSimdBaseType = simdBaseType;
1524 // Also indicate that we use floating point registers.
1525 compFloatingPointUsed = true;
1529 #endif // FEATURE_SIMD
1531 // Fetch GC layout info if requested
1532 if (gcLayout != nullptr)
1534 unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
1536 // Verify that the quick test up above via the class attributes gave a
1537 // safe view of the type's GCness.
1539 // Note there are cases where mayContainGCPtrs is true but getClassGClayout
1540 // does not report any gc fields.
1542 assert(mayContainGCPtrs || (numGCVars == 0));
1544 if (pNumGCVars != nullptr)
1546 *pNumGCVars = numGCVars;
1551 // Can't safely ask for number of GC pointers without also
1552 // asking for layout.
1553 assert(pNumGCVars == nullptr);
1559 //****************************************************************************
1560 // Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
1561 // it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
1563 GenTree* Compiler::impNormStructVal(GenTree* structVal,
1564 CORINFO_CLASS_HANDLE structHnd,
1566 bool forceNormalization /*=false*/)
1568 assert(forceNormalization || varTypeIsStruct(structVal));
1569 assert(structHnd != NO_CLASS_HANDLE);
1570 var_types structType = structVal->TypeGet();
1571 bool makeTemp = false;
1572 if (structType == TYP_STRUCT)
1574 structType = impNormStructType(structHnd);
1576 bool alreadyNormalized = false;
1577 GenTreeLclVarCommon* structLcl = nullptr;
1579 genTreeOps oper = structVal->OperGet();
1582 // GT_RETURN and GT_MKREFANY don't capture the handle.
1586 alreadyNormalized = true;
1590 structVal->gtCall.gtRetClsHnd = structHnd;
1595 structVal->gtRetExpr.gtRetClsHnd = structHnd;
1600 structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
1604 // This will be transformed to an OBJ later.
1605 alreadyNormalized = true;
1606 structVal->gtIndex.gtStructElemClass = structHnd;
1607 structVal->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(structHnd);
1611 // Wrap it in a GT_OBJ.
1612 structVal->gtType = structType;
1613 structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1618 structLcl = structVal->AsLclVarCommon();
1619 // Wrap it in a GT_OBJ.
1620 structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1627 // These should already have the appropriate type.
1628 assert(structVal->gtType == structType);
1629 alreadyNormalized = true;
1633 assert(structVal->gtType == structType);
1634 structVal = gtNewObjNode(structHnd, structVal->gtGetOp1());
1635 alreadyNormalized = true;
1640 assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1642 #endif // FEATURE_SIMD
1643 #ifdef FEATURE_HW_INTRINSICS
1644 case GT_HWIntrinsic:
1645 assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
1651 // The second thing could either be a block node or a GT_FIELD or a GT_SIMD or a GT_COMMA node.
1652 GenTree* blockNode = structVal->gtOp.gtOp2;
1653 assert(blockNode->gtType == structType);
1655 // Is this GT_COMMA(op1, GT_COMMA())?
1656 GenTree* parent = structVal;
1657 if (blockNode->OperGet() == GT_COMMA)
1659 // Find the last node in the comma chain.
1662 assert(blockNode->gtType == structType);
1664 blockNode = blockNode->gtOp.gtOp2;
1665 } while (blockNode->OperGet() == GT_COMMA);
1668 if (blockNode->OperGet() == GT_FIELD)
1670 // If we have a GT_FIELD then wrap it in a GT_OBJ.
1671 blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode));
1675 if (blockNode->OperIsSIMDorSimdHWintrinsic())
1677 parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
1678 alreadyNormalized = true;
1683 noway_assert(blockNode->OperIsBlk());
1685 // Sink the GT_COMMA below the blockNode addr.
1686 // That is GT_COMMA(op1, op2=blockNode) is tranformed into
1687 // blockNode(GT_COMMA(TYP_BYREF, op1, op2's op1)).
1689 // In case of a chained GT_COMMA case, we sink the last
1690 // GT_COMMA below the blockNode addr.
1691 GenTree* blockNodeAddr = blockNode->gtOp.gtOp1;
1692 assert(blockNodeAddr->gtType == TYP_BYREF);
1693 GenTree* commaNode = parent;
1694 commaNode->gtType = TYP_BYREF;
1695 commaNode->gtOp.gtOp2 = blockNodeAddr;
1696 blockNode->gtOp.gtOp1 = commaNode;
1697 if (parent == structVal)
1699 structVal = blockNode;
1701 alreadyNormalized = true;
1707 noway_assert(!"Unexpected node in impNormStructVal()");
1710 structVal->gtType = structType;
1711 GenTree* structObj = structVal;
1713 if (!alreadyNormalized || forceNormalization)
1717 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
1719 impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
1721 // The structVal is now the temp itself
1723 structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
1724 // TODO-1stClassStructs: Avoid always wrapping in GT_OBJ.
1725 structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structLcl));
1727 else if (varTypeIsStruct(structType) && !structVal->OperIsBlk())
1729 // Wrap it in a GT_OBJ
1730 structObj = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
1734 if (structLcl != nullptr)
1736 // A OBJ on a ADDR(LCL_VAR) can never raise an exception
1737 // so we don't set GTF_EXCEPT here.
1738 if (!lvaIsImplicitByRefLocal(structLcl->gtLclNum))
1740 structObj->gtFlags &= ~GTF_GLOB_REF;
1745 // In general a OBJ is an indirection and could raise an exception.
1746 structObj->gtFlags |= GTF_EXCEPT;
1751 /******************************************************************************/
1752 // Given a type token, generate code that will evaluate to the correct
1753 // handle representation of that token (type handle, field handle, or method handle)
1755 // For most cases, the handle is determined at compile-time, and the code
1756 // generated is simply an embedded handle.
1758 // Run-time lookup is required if the enclosing method is shared between instantiations
1759 // and the token refers to formal type parameters whose instantiation is not known
1762 GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1763 BOOL* pRuntimeLookup /* = NULL */,
1764 BOOL mustRestoreHandle /* = FALSE */,
1765 BOOL importParent /* = FALSE */)
1767 assert(!fgGlobalMorph);
1769 CORINFO_GENERICHANDLE_RESULT embedInfo;
1770 info.compCompHnd->embedGenericHandle(pResolvedToken, importParent, &embedInfo);
1774 *pRuntimeLookup = embedInfo.lookup.lookupKind.needsRuntimeLookup;
1777 if (mustRestoreHandle && !embedInfo.lookup.lookupKind.needsRuntimeLookup)
1779 switch (embedInfo.handleType)
1781 case CORINFO_HANDLETYPE_CLASS:
1782 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun((CORINFO_CLASS_HANDLE)embedInfo.compileTimeHandle);
1785 case CORINFO_HANDLETYPE_METHOD:
1786 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun((CORINFO_METHOD_HANDLE)embedInfo.compileTimeHandle);
1789 case CORINFO_HANDLETYPE_FIELD:
1790 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
1791 info.compCompHnd->getFieldClass((CORINFO_FIELD_HANDLE)embedInfo.compileTimeHandle));
1799 // Generate the full lookup tree. May be null if we're abandoning an inline attempt.
1800 GenTree* result = impLookupToTree(pResolvedToken, &embedInfo.lookup, gtTokenToIconFlags(pResolvedToken->token),
1801 embedInfo.compileTimeHandle);
1803 // If we have a result and it requires runtime lookup, wrap it in a runtime lookup node.
1804 if ((result != nullptr) && embedInfo.lookup.lookupKind.needsRuntimeLookup)
1806 result = gtNewRuntimeLookup(embedInfo.compileTimeHandle, embedInfo.handleType, result);
1812 GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1813 CORINFO_LOOKUP* pLookup,
1814 unsigned handleFlags,
1815 void* compileTimeHandle)
1817 if (!pLookup->lookupKind.needsRuntimeLookup)
1819 // No runtime lookup is required.
1820 // Access is direct or memory-indirect (of a fixed address) reference
1822 CORINFO_GENERIC_HANDLE handle = nullptr;
1823 void* pIndirection = nullptr;
1824 assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE);
1826 if (pLookup->constLookup.accessType == IAT_VALUE)
1828 handle = pLookup->constLookup.handle;
1830 else if (pLookup->constLookup.accessType == IAT_PVALUE)
1832 pIndirection = pLookup->constLookup.addr;
1834 return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1836 else if (compIsForInlining())
1838 // Don't import runtime lookups when inlining
1839 // Inlining has to be aborted in such a case
1840 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1845 // Need to use dictionary-based access which depends on the typeContext
1846 // which is only available at runtime, not at compile-time.
1848 return impRuntimeLookupToTree(pResolvedToken, pLookup, compileTimeHandle);
1852 #ifdef FEATURE_READYTORUN_COMPILER
1853 GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
1854 unsigned handleFlags,
1855 void* compileTimeHandle)
1857 CORINFO_GENERIC_HANDLE handle = nullptr;
1858 void* pIndirection = nullptr;
1859 assert(pLookup->accessType != IAT_PPVALUE && pLookup->accessType != IAT_RELPVALUE);
1861 if (pLookup->accessType == IAT_VALUE)
1863 handle = pLookup->handle;
1865 else if (pLookup->accessType == IAT_PVALUE)
1867 pIndirection = pLookup->addr;
1869 return gtNewIconEmbHndNode(handle, pIndirection, handleFlags, compileTimeHandle);
1872 GenTreeCall* Compiler::impReadyToRunHelperToTree(
1873 CORINFO_RESOLVED_TOKEN* pResolvedToken,
1874 CorInfoHelpFunc helper,
1876 GenTreeArgList* args /* =NULL*/,
1877 CORINFO_LOOKUP_KIND* pGenericLookupKind /* =NULL. Only used with generics */)
1879 CORINFO_CONST_LOOKUP lookup;
1880 if (!info.compCompHnd->getReadyToRunHelper(pResolvedToken, pGenericLookupKind, helper, &lookup))
1885 GenTreeCall* op1 = gtNewHelperCallNode(helper, type, args);
1887 op1->setEntryPoint(lookup);
1893 GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
1895 GenTree* op1 = nullptr;
1897 switch (pCallInfo->kind)
1900 op1 = new (this, GT_FTN_ADDR) GenTreeFptrVal(TYP_I_IMPL, pCallInfo->hMethod);
1902 #ifdef FEATURE_READYTORUN_COMPILER
1903 if (opts.IsReadyToRun())
1905 op1->gtFptrVal.gtEntryPoint = pCallInfo->codePointerLookup.constLookup;
1909 op1->gtFptrVal.gtEntryPoint.addr = nullptr;
1910 op1->gtFptrVal.gtEntryPoint.accessType = IAT_VALUE;
1915 case CORINFO_CALL_CODE_POINTER:
1916 if (compIsForInlining())
1918 // Don't import runtime lookups when inlining
1919 // Inlining has to be aborted in such a case
1920 compInlineResult->NoteFatal(InlineObservation::CALLSITE_GENERIC_DICTIONARY_LOOKUP);
1924 op1 = impLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, GTF_ICON_FTN_ADDR, pCallInfo->hMethod);
1928 noway_assert(!"unknown call kind");
1935 //------------------------------------------------------------------------
1936 // getRuntimeContextTree: find pointer to context for runtime lookup.
1939 // kind - lookup kind.
1942 // Return GenTree pointer to generic shared context.
1945 // Reports about generic context using.
1947 GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
1949 GenTree* ctxTree = nullptr;
1951 // Collectible types requires that for shared generic code, if we use the generic context parameter
1952 // that we report it. (This is a conservative approach, we could detect some cases particularly when the
1953 // context parameter is this that we don't need the eager reporting logic.)
1954 lvaGenericsContextUseCount++;
1956 if (kind == CORINFO_LOOKUP_THISOBJ)
1959 ctxTree = gtNewLclvNode(info.compThisArg, TYP_REF);
1961 // Vtable pointer of this object
1962 ctxTree = gtNewOperNode(GT_IND, TYP_I_IMPL, ctxTree);
1963 ctxTree->gtFlags |= GTF_EXCEPT; // Null-pointer exception
1964 ctxTree->gtFlags |= GTF_IND_INVARIANT;
1968 assert(kind == CORINFO_LOOKUP_METHODPARAM || kind == CORINFO_LOOKUP_CLASSPARAM);
1970 ctxTree = gtNewLclvNode(info.compTypeCtxtArg, TYP_I_IMPL); // Exact method descriptor as passed in as last arg
1975 /*****************************************************************************/
1976 /* Import a dictionary lookup to access a handle in code shared between
1977 generic instantiations.
1978 The lookup depends on the typeContext which is only available at
1979 runtime, and not at compile-time.
1980 pLookup->token1 and pLookup->token2 specify the handle that is needed.
1983 1. pLookup->indirections == CORINFO_USEHELPER : Call a helper passing it the
1984 instantiation-specific handle, and the tokens to lookup the handle.
1985 2. pLookup->indirections != CORINFO_USEHELPER :
1986 2a. pLookup->testForNull == false : Dereference the instantiation-specific handle
1988 2b. pLookup->testForNull == true : Dereference the instantiation-specific handle.
1989 If it is non-NULL, it is the handle required. Else, call a helper
1990 to lookup the handle.
1993 GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
1994 CORINFO_LOOKUP* pLookup,
1995 void* compileTimeHandle)
1998 // This method can only be called from the importer instance of the Compiler.
1999 // In other word, it cannot be called by the instance of the Compiler for the inlinee.
2000 assert(!compIsForInlining());
2002 GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
2004 CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
2005 // It's available only via the run-time helper function
2006 if (pRuntimeLookup->indirections == CORINFO_USEHELPER)
2008 #ifdef FEATURE_READYTORUN_COMPILER
2009 if (opts.IsReadyToRun())
2011 return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
2012 gtNewArgList(ctxTree), &pLookup->lookupKind);
2016 gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
2017 GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
2019 return gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
2023 GenTree* slotPtrTree = ctxTree;
2025 if (pRuntimeLookup->testForNull)
2027 slotPtrTree = impCloneExpr(ctxTree, &ctxTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2028 nullptr DEBUGARG("impRuntimeLookup slot"));
2031 GenTree* indOffTree = nullptr;
2033 // Applied repeated indirections
2034 for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
2036 if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2038 indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2039 nullptr DEBUGARG("impRuntimeLookup indirectOffset"));
2044 slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2045 slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2046 slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
2049 if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
2051 slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree);
2054 if (pRuntimeLookup->offsets[i] != 0)
2057 gtNewOperNode(GT_ADD, TYP_I_IMPL, slotPtrTree, gtNewIconNode(pRuntimeLookup->offsets[i], TYP_I_IMPL));
2061 // No null test required
2062 if (!pRuntimeLookup->testForNull)
2064 if (pRuntimeLookup->indirections == 0)
2069 slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2070 slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
2072 if (!pRuntimeLookup->testForFixup)
2077 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark0"));
2079 unsigned slotLclNum = lvaGrabTemp(true DEBUGARG("impRuntimeLookup test"));
2080 impAssignTempGen(slotLclNum, slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr, impCurStmtOffs);
2082 GenTree* slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2083 // downcast the pointer to a TYP_INT on 64-bit targets
2084 slot = impImplicitIorI4Cast(slot, TYP_INT);
2085 // Use a GT_AND to check for the lowest bit and indirect if it is set
2086 GenTree* test = gtNewOperNode(GT_AND, TYP_INT, slot, gtNewIconNode(1));
2087 GenTree* relop = gtNewOperNode(GT_EQ, TYP_INT, test, gtNewIconNode(0));
2089 // slot = GT_IND(slot - 1)
2090 slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2091 GenTree* add = gtNewOperNode(GT_ADD, TYP_I_IMPL, slot, gtNewIconNode(-1, TYP_I_IMPL));
2092 GenTree* indir = gtNewOperNode(GT_IND, TYP_I_IMPL, add);
2093 indir->gtFlags |= GTF_IND_NONFAULTING;
2094 indir->gtFlags |= GTF_IND_INVARIANT;
2096 slot = gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2097 GenTree* asg = gtNewAssignNode(slot, indir);
2098 GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), asg);
2099 GenTree* qmark = gtNewQmarkNode(TYP_VOID, relop, colon);
2100 impAppendTree(qmark, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2102 return gtNewLclvNode(slotLclNum, TYP_I_IMPL);
2105 assert(pRuntimeLookup->indirections != 0);
2107 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
2109 // Extract the handle
2110 GenTree* handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
2111 handle->gtFlags |= GTF_IND_NONFAULTING;
2113 GenTree* handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
2114 nullptr DEBUGARG("impRuntimeLookup typehandle"));
2117 GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
2119 GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
2120 GenTree* helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
2122 // Check for null and possibly call helper
2123 GenTree* relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
2125 GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
2126 gtNewNothingNode(), // do nothing if nonnull
2129 GenTree* qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
2132 if (handleCopy->IsLocal())
2134 tmp = handleCopy->gtLclVarCommon.gtLclNum;
2138 tmp = lvaGrabTemp(true DEBUGARG("spilling QMark1"));
2141 impAssignTempGen(tmp, qmark, (unsigned)CHECK_SPILL_NONE);
2142 return gtNewLclvNode(tmp, TYP_I_IMPL);
2145 /******************************************************************************
2146 * Spills the stack at verCurrentState.esStack[level] and replaces it with a temp.
2147 * If tnum!=BAD_VAR_NUM, the temp var used to replace the tree is tnum,
2148 * else, grab a new temp.
2149 * For structs (which can be pushed on the stack using obj, etc),
2150 * special handling is needed
2153 struct RecursiveGuard
2158 m_pAddress = nullptr;
2165 *m_pAddress = false;
2169 void Init(bool* pAddress, bool bInitialize)
2171 assert(pAddress && *pAddress == false && "Recursive guard violation");
2172 m_pAddress = pAddress;
2184 bool Compiler::impSpillStackEntry(unsigned level,
2188 bool bAssertOnRecursion,
2195 RecursiveGuard guard;
2196 guard.Init(&impNestedStackSpill, bAssertOnRecursion);
2199 GenTree* tree = verCurrentState.esStack[level].val;
2201 /* Allocate a temp if we haven't been asked to use a particular one */
2203 if (tiVerificationNeeded)
2205 // Ignore bad temp requests (they will happen with bad code and will be
2206 // catched when importing the destblock)
2207 if ((tnum != BAD_VAR_NUM && tnum >= lvaCount) && verNeedsVerification())
2214 if (tnum != BAD_VAR_NUM && (tnum >= lvaCount))
2220 bool isNewTemp = false;
2222 if (tnum == BAD_VAR_NUM)
2224 tnum = lvaGrabTemp(true DEBUGARG(reason));
2227 else if (tiVerificationNeeded && lvaTable[tnum].TypeGet() != TYP_UNDEF)
2229 // if verification is needed and tnum's type is incompatible with
2230 // type on that stack, we grab a new temp. This is safe since
2231 // we will throw a verification exception in the dest block.
2233 var_types valTyp = tree->TypeGet();
2234 var_types dstTyp = lvaTable[tnum].TypeGet();
2236 // if the two types are different, we return. This will only happen with bad code and will
2237 // be catched when importing the destblock. We still allow int/byrefs and float/double differences.
2238 if ((genActualType(valTyp) != genActualType(dstTyp)) &&
2240 #ifndef _TARGET_64BIT_
2241 (valTyp == TYP_I_IMPL && dstTyp == TYP_BYREF) || (valTyp == TYP_BYREF && dstTyp == TYP_I_IMPL) ||
2242 #endif // !_TARGET_64BIT_
2243 (varTypeIsFloating(dstTyp) && varTypeIsFloating(valTyp))))
2245 if (verNeedsVerification())
2252 /* Assign the spilled entry to the temp */
2253 impAssignTempGen(tnum, tree, verCurrentState.esStack[level].seTypeInfo.GetClassHandle(), level);
2255 // If temp is newly introduced and a ref type, grab what type info we can.
2256 if (isNewTemp && (lvaTable[tnum].lvType == TYP_REF))
2258 assert(lvaTable[tnum].lvSingleDef == 0);
2259 lvaTable[tnum].lvSingleDef = 1;
2260 JITDUMP("Marked V%02u as a single def temp\n", tnum);
2261 CORINFO_CLASS_HANDLE stkHnd = verCurrentState.esStack[level].seTypeInfo.GetClassHandle();
2262 lvaSetClass(tnum, tree, stkHnd);
2264 // If we're assigning a GT_RET_EXPR, note the temp over on the call,
2265 // so the inliner can use it in case it needs a return spill temp.
2266 if (tree->OperGet() == GT_RET_EXPR)
2268 JITDUMP("\n*** see V%02u = GT_RET_EXPR, noting temp\n", tnum);
2269 GenTree* call = tree->gtRetExpr.gtInlineCandidate;
2270 InlineCandidateInfo* ici = call->gtCall.gtInlineCandidateInfo;
2271 ici->preexistingSpillTemp = tnum;
2275 // The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
2276 var_types type = genActualType(lvaTable[tnum].TypeGet());
2277 GenTree* temp = gtNewLclvNode(tnum, type);
2278 verCurrentState.esStack[level].val = temp;
2283 /*****************************************************************************
2285 * Ensure that the stack has only spilled values
2288 void Compiler::impSpillStackEnsure(bool spillLeaves)
2290 assert(!spillLeaves || opts.compDbgCode);
2292 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2294 GenTree* tree = verCurrentState.esStack[level].val;
2296 if (!spillLeaves && tree->OperIsLeaf())
2301 // Temps introduced by the importer itself don't need to be spilled
2303 bool isTempLcl = (tree->OperGet() == GT_LCL_VAR) && (tree->gtLclVarCommon.gtLclNum >= info.compLocalsCount);
2310 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillStackEnsure"));
2314 void Compiler::impSpillEvalStack()
2316 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2318 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillEvalStack"));
2322 /*****************************************************************************
2324 * If the stack contains any trees with side effects in them, assign those
2325 * trees to temps and append the assignments to the statement list.
2326 * On return the stack is guaranteed to be empty.
2329 inline void Compiler::impEvalSideEffects()
2331 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("impEvalSideEffects"));
2332 verCurrentState.esStackDepth = 0;
2335 /*****************************************************************************
2337 * If the stack contains any trees with side effects in them, assign those
2338 * trees to temps and replace them on the stack with refs to their temps.
2339 * [0..chkLevel) is the portion of the stack which will be checked and spilled.
2342 inline void Compiler::impSpillSideEffects(bool spillGlobEffects, unsigned chkLevel DEBUGARG(const char* reason))
2344 assert(chkLevel != (unsigned)CHECK_SPILL_NONE);
2346 /* Before we make any appends to the tree list we must spill the
2347 * "special" side effects (GTF_ORDER_SIDEEFF on a GT_CATCH_ARG) */
2349 impSpillSpecialSideEff();
2351 if (chkLevel == (unsigned)CHECK_SPILL_ALL)
2353 chkLevel = verCurrentState.esStackDepth;
2356 assert(chkLevel <= verCurrentState.esStackDepth);
2358 unsigned spillFlags = spillGlobEffects ? GTF_GLOB_EFFECT : GTF_SIDE_EFFECT;
2360 for (unsigned i = 0; i < chkLevel; i++)
2362 GenTree* tree = verCurrentState.esStack[i].val;
2364 GenTree* lclVarTree;
2366 if ((tree->gtFlags & spillFlags) != 0 ||
2367 (spillGlobEffects && // Only consider the following when spillGlobEffects == TRUE
2368 !impIsAddressInLocal(tree, &lclVarTree) && // No need to spill the GT_ADDR node on a local.
2369 gtHasLocalsWithAddrOp(tree))) // Spill if we still see GT_LCL_VAR that contains lvHasLdAddrOp or
2370 // lvAddrTaken flag.
2372 impSpillStackEntry(i, BAD_VAR_NUM DEBUGARG(false) DEBUGARG(reason));
2377 /*****************************************************************************
2379 * If the stack contains any trees with special side effects in them, assign
2380 * those trees to temps and replace them on the stack with refs to their temps.
2383 inline void Compiler::impSpillSpecialSideEff()
2385 // Only exception objects need to be carefully handled
2387 if (!compCurBB->bbCatchTyp)
2392 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2394 GenTree* tree = verCurrentState.esStack[level].val;
2395 // Make sure if we have an exception object in the sub tree we spill ourselves.
2396 if (gtHasCatchArg(tree))
2398 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillSpecialSideEff"));
2403 /*****************************************************************************
2405 * Spill all stack references to value classes (TYP_STRUCT nodes)
2408 void Compiler::impSpillValueClasses()
2410 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2412 GenTree* tree = verCurrentState.esStack[level].val;
2414 if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
2416 // Tree walk was aborted, which means that we found a
2417 // value class on the stack. Need to spill that
2420 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillValueClasses"));
2425 /*****************************************************************************
2427 * Callback that checks if a tree node is TYP_STRUCT
2430 Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data)
2432 fgWalkResult walkResult = WALK_CONTINUE;
2434 if ((*pTree)->gtType == TYP_STRUCT)
2436 // Abort the walk and indicate that we found a value class
2438 walkResult = WALK_ABORT;
2444 /*****************************************************************************
2446 * If the stack contains any trees with references to local #lclNum, assign
2447 * those trees to temps and replace their place on the stack with refs to
2451 void Compiler::impSpillLclRefs(ssize_t lclNum)
2453 /* Before we make any appends to the tree list we must spill the
2454 * "special" side effects (GTF_ORDER_SIDEEFF) - GT_CATCH_ARG */
2456 impSpillSpecialSideEff();
2458 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
2460 GenTree* tree = verCurrentState.esStack[level].val;
2462 /* If the tree may throw an exception, and the block has a handler,
2463 then we need to spill assignments to the local if the local is
2464 live on entry to the handler.
2465 Just spill 'em all without considering the liveness */
2467 bool xcptnCaught = ehBlockHasExnFlowDsc(compCurBB) && (tree->gtFlags & (GTF_CALL | GTF_EXCEPT));
2469 /* Skip the tree if it doesn't have an affected reference,
2470 unless xcptnCaught */
2472 if (xcptnCaught || gtHasRef(tree, lclNum, false))
2474 impSpillStackEntry(level, BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impSpillLclRefs"));
2479 /*****************************************************************************
2481 * Push catch arg onto the stack.
2482 * If there are jumps to the beginning of the handler, insert basic block
2483 * and spill catch arg to a temp. Update the handler block if necessary.
2485 * Returns the basic block of the actual handler.
2488 BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_HANDLE clsHnd, bool isSingleBlockFilter)
2490 // Do not inject the basic block twice on reimport. This should be
2491 // hit only under JIT stress. See if the block is the one we injected.
2492 // Note that EH canonicalization can inject internal blocks here. We might
2493 // be able to re-use such a block (but we don't, right now).
2494 if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
2495 (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
2497 GenTree* tree = hndBlk->bbTreeList;
2499 if (tree != nullptr && tree->gtOper == GT_STMT)
2501 tree = tree->gtStmt.gtStmtExpr;
2502 assert(tree != nullptr);
2504 if ((tree->gtOper == GT_ASG) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR) &&
2505 (tree->gtOp.gtOp2->gtOper == GT_CATCH_ARG))
2507 tree = gtNewLclvNode(tree->gtOp.gtOp1->gtLclVarCommon.gtLclNum, TYP_REF);
2509 impPushOnStack(tree, typeInfo(TI_REF, clsHnd));
2511 return hndBlk->bbNext;
2515 // If we get here, it must have been some other kind of internal block. It's possible that
2516 // someone prepended something to our injected block, but that's unlikely.
2519 /* Push the exception address value on the stack */
2520 GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
2522 /* Mark the node as having a side-effect - i.e. cannot be
2523 * moved around since it is tied to a fixed location (EAX) */
2524 arg->gtFlags |= GTF_ORDER_SIDEEFF;
2526 #if defined(JIT32_GCENCODER)
2527 const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5);
2529 const bool forceInsertNewBlock = compStressCompile(STRESS_CATCH_ARG, 5);
2530 #endif // defined(JIT32_GCENCODER)
2532 /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
2533 if (hndBlk->bbRefs > 1 || forceInsertNewBlock)
2535 if (hndBlk->bbRefs == 1)
2540 /* Create extra basic block for the spill */
2541 BasicBlock* newBlk = fgNewBBbefore(BBJ_NONE, hndBlk, /* extendRegion */ true);
2542 newBlk->bbFlags |= BBF_IMPORTED | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET;
2543 newBlk->setBBWeight(hndBlk->bbWeight);
2544 newBlk->bbCodeOffs = hndBlk->bbCodeOffs;
2546 /* Account for the new link we are about to create */
2549 /* Spill into a temp */
2550 unsigned tempNum = lvaGrabTemp(false DEBUGARG("SpillCatchArg"));
2551 lvaTable[tempNum].lvType = TYP_REF;
2552 arg = gtNewTempAssign(tempNum, arg);
2554 hndBlk->bbStkTempsIn = tempNum;
2556 /* Report the debug info. impImportBlockCode won't treat
2557 * the actual handler as exception block and thus won't do it for us. */
2558 if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
2560 impCurStmtOffs = newBlk->bbCodeOffs | IL_OFFSETX_STKBIT;
2561 arg = gtNewStmt(arg, impCurStmtOffs);
2564 fgInsertStmtAtEnd(newBlk, arg);
2566 arg = gtNewLclvNode(tempNum, TYP_REF);
2569 impPushOnStack(arg, typeInfo(TI_REF, clsHnd));
2574 /*****************************************************************************
2576 * Given a tree, clone it. *pClone is set to the cloned tree.
2577 * Returns the original tree if the cloning was easy,
2578 * else returns the temp to which the tree had to be spilled to.
2579 * If the tree has side-effects, it will be spilled to a temp.
2582 GenTree* Compiler::impCloneExpr(GenTree* tree,
2584 CORINFO_CLASS_HANDLE structHnd,
2586 GenTree** pAfterStmt DEBUGARG(const char* reason))
2588 if (!(tree->gtFlags & GTF_GLOB_EFFECT))
2590 GenTree* clone = gtClone(tree, true);
2599 /* Store the operand in a temp and return the temp */
2601 unsigned temp = lvaGrabTemp(true DEBUGARG(reason));
2603 // impAssignTempGen() may change tree->gtType to TYP_VOID for calls which
2604 // return a struct type. It also may modify the struct type to a more
2605 // specialized type (e.g. a SIMD type). So we will get the type from
2606 // the lclVar AFTER calling impAssignTempGen().
2608 impAssignTempGen(temp, tree, structHnd, curLevel, pAfterStmt, impCurStmtOffs);
2609 var_types type = genActualType(lvaTable[temp].TypeGet());
2611 *pClone = gtNewLclvNode(temp, type);
2612 return gtNewLclvNode(temp, type);
2615 /*****************************************************************************
2616 * Remember the IL offset (including stack-empty info) for the trees we will
2620 inline void Compiler::impCurStmtOffsSet(IL_OFFSET offs)
2622 if (compIsForInlining())
2624 GenTree* callStmt = impInlineInfo->iciStmt;
2625 assert(callStmt->gtOper == GT_STMT);
2626 impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
2630 assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2631 IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2632 impCurStmtOffs = offs | stkBit;
2636 /*****************************************************************************
2637 * Returns current IL offset with stack-empty and call-instruction info incorporated
2639 inline IL_OFFSETX Compiler::impCurILOffset(IL_OFFSET offs, bool callInstruction)
2641 if (compIsForInlining())
2643 return BAD_IL_OFFSET;
2647 assert(offs == BAD_IL_OFFSET || (offs & IL_OFFSETX_BITS) == 0);
2648 IL_OFFSETX stkBit = (verCurrentState.esStackDepth > 0) ? IL_OFFSETX_STKBIT : 0;
2649 IL_OFFSETX callInstructionBit = callInstruction ? IL_OFFSETX_CALLINSTRUCTIONBIT : 0;
2650 return offs | stkBit | callInstructionBit;
2654 //------------------------------------------------------------------------
2655 // impCanSpillNow: check is it possible to spill all values from eeStack to local variables.
2658 // prevOpcode - last importer opcode
2661 // true if it is legal, false if it could be a sequence that we do not want to divide.
2662 bool Compiler::impCanSpillNow(OPCODE prevOpcode)
2664 // Don't spill after ldtoken, newarr and newobj, because it could be a part of the InitializeArray sequence.
2665 // Avoid breaking up to guarantee that impInitializeArrayIntrinsic can succeed.
2666 return (prevOpcode != CEE_LDTOKEN) && (prevOpcode != CEE_NEWARR) && (prevOpcode != CEE_NEWOBJ);
2669 /*****************************************************************************
2671 * Remember the instr offset for the statements
2673 * When we do impAppendTree(tree), we can't set tree->gtStmtLastILoffs to
2674 * impCurOpcOffs, if the append was done because of a partial stack spill,
2675 * as some of the trees corresponding to code up to impCurOpcOffs might
2676 * still be sitting on the stack.
2677 * So we delay marking of gtStmtLastILoffs until impNoteLastILoffs().
2678 * This should be called when an opcode finally/explicitly causes
2679 * impAppendTree(tree) to be called (as opposed to being called because of
2680 * a spill caused by the opcode)
2685 void Compiler::impNoteLastILoffs()
2687 if (impLastILoffsStmt == nullptr)
2689 // We should have added a statement for the current basic block
2690 // Is this assert correct ?
2692 assert(impTreeLast);
2693 assert(impTreeLast->gtOper == GT_STMT);
2695 impTreeLast->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2699 impLastILoffsStmt->gtStmt.gtStmtLastILoffs = compIsForInlining() ? BAD_IL_OFFSET : impCurOpcOffs;
2700 impLastILoffsStmt = nullptr;
2706 /*****************************************************************************
2707 * We don't create any GenTree (excluding spills) for a branch.
2708 * For debugging info, we need a placeholder so that we can note
2709 * the IL offset in gtStmt.gtStmtOffs. So append an empty statement.
2712 void Compiler::impNoteBranchOffs()
2714 if (opts.compDbgCode)
2716 impAppendTree(gtNewNothingNode(), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
2720 /*****************************************************************************
2721 * Locate the next stmt boundary for which we need to record info.
2722 * We will have to spill the stack at such boundaries if it is not
2724 * Returns the next stmt boundary (after the start of the block)
2727 unsigned Compiler::impInitBlockLineInfo()
2729 /* Assume the block does not correspond with any IL offset. This prevents
2730 us from reporting extra offsets. Extra mappings can cause confusing
2731 stepping, especially if the extra mapping is a jump-target, and the
2732 debugger does not ignore extra mappings, but instead rewinds to the
2733 nearest known offset */
2735 impCurStmtOffsSet(BAD_IL_OFFSET);
2737 if (compIsForInlining())
2742 IL_OFFSET blockOffs = compCurBB->bbCodeOffs;
2744 if ((verCurrentState.esStackDepth == 0) && (info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES))
2746 impCurStmtOffsSet(blockOffs);
2749 if (false && (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES))
2751 impCurStmtOffsSet(blockOffs);
2754 /* Always report IL offset 0 or some tests get confused.
2755 Probably a good idea anyways */
2759 impCurStmtOffsSet(blockOffs);
2762 if (!info.compStmtOffsetsCount)
2767 /* Find the lowest explicit stmt boundary within the block */
2769 /* Start looking at an entry that is based on our instr offset */
2771 unsigned index = (info.compStmtOffsetsCount * blockOffs) / info.compILCodeSize;
2773 if (index >= info.compStmtOffsetsCount)
2775 index = info.compStmtOffsetsCount - 1;
2778 /* If we've guessed too far, back up */
2780 while (index > 0 && info.compStmtOffsets[index - 1] >= blockOffs)
2785 /* If we guessed short, advance ahead */
2787 while (info.compStmtOffsets[index] < blockOffs)
2791 if (index == info.compStmtOffsetsCount)
2793 return info.compStmtOffsetsCount;
2797 assert(index < info.compStmtOffsetsCount);
2799 if (info.compStmtOffsets[index] == blockOffs)
2801 /* There is an explicit boundary for the start of this basic block.
2802 So we will start with bbCodeOffs. Else we will wait until we
2803 get to the next explicit boundary */
2805 impCurStmtOffsSet(blockOffs);
2813 /*****************************************************************************/
2815 static inline bool impOpcodeIsCallOpcode(OPCODE opcode)
2829 /*****************************************************************************/
2831 static inline bool impOpcodeIsCallSiteBoundary(OPCODE opcode)
2848 /*****************************************************************************/
2850 // One might think it is worth caching these values, but results indicate
2852 // In addition, caching them causes SuperPMI to be unable to completely
2853 // encapsulate an individual method context.
2854 CORINFO_CLASS_HANDLE Compiler::impGetRefAnyClass()
2856 CORINFO_CLASS_HANDLE refAnyClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPED_BYREF);
2857 assert(refAnyClass != (CORINFO_CLASS_HANDLE) nullptr);
2861 CORINFO_CLASS_HANDLE Compiler::impGetTypeHandleClass()
2863 CORINFO_CLASS_HANDLE typeHandleClass = info.compCompHnd->getBuiltinClass(CLASSID_TYPE_HANDLE);
2864 assert(typeHandleClass != (CORINFO_CLASS_HANDLE) nullptr);
2865 return typeHandleClass;
2868 CORINFO_CLASS_HANDLE Compiler::impGetRuntimeArgumentHandle()
2870 CORINFO_CLASS_HANDLE argIteratorClass = info.compCompHnd->getBuiltinClass(CLASSID_ARGUMENT_HANDLE);
2871 assert(argIteratorClass != (CORINFO_CLASS_HANDLE) nullptr);
2872 return argIteratorClass;
2875 CORINFO_CLASS_HANDLE Compiler::impGetStringClass()
2877 CORINFO_CLASS_HANDLE stringClass = info.compCompHnd->getBuiltinClass(CLASSID_STRING);
2878 assert(stringClass != (CORINFO_CLASS_HANDLE) nullptr);
2882 CORINFO_CLASS_HANDLE Compiler::impGetObjectClass()
2884 CORINFO_CLASS_HANDLE objectClass = info.compCompHnd->getBuiltinClass(CLASSID_SYSTEM_OBJECT);
2885 assert(objectClass != (CORINFO_CLASS_HANDLE) nullptr);
2889 /*****************************************************************************
2890 * "&var" can be used either as TYP_BYREF or TYP_I_IMPL, but we
2891 * set its type to TYP_BYREF when we create it. We know if it can be
2892 * changed to TYP_I_IMPL only at the point where we use it
2896 void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2)
2898 if (tree1->IsVarAddr())
2900 tree1->gtType = TYP_I_IMPL;
2903 if (tree2 && tree2->IsVarAddr())
2905 tree2->gtType = TYP_I_IMPL;
2909 /*****************************************************************************
2910 * TYP_INT and TYP_I_IMPL can be used almost interchangeably, but we want
2911 * to make that an explicit cast in our trees, so any implicit casts that
2912 * exist in the IL (at least on 64-bit where TYP_I_IMPL != TYP_INT) are
2913 * turned into explicit casts here.
2914 * We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
2917 GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp)
2919 var_types currType = genActualType(tree->gtType);
2920 var_types wantedType = genActualType(dstTyp);
2922 if (wantedType != currType)
2924 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
2925 if ((tree->OperGet() == GT_CNS_INT) && varTypeIsI(dstTyp))
2927 if (!varTypeIsI(tree->gtType) || ((tree->gtType == TYP_REF) && (tree->gtIntCon.gtIconVal == 0)))
2929 tree->gtType = TYP_I_IMPL;
2932 #ifdef _TARGET_64BIT_
2933 else if (varTypeIsI(wantedType) && (currType == TYP_INT))
2935 // Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
2936 tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
2938 else if ((wantedType == TYP_INT) && varTypeIsI(currType))
2940 // Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
2941 tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT);
2943 #endif // _TARGET_64BIT_
2949 /*****************************************************************************
2950 * TYP_FLOAT and TYP_DOUBLE can be used almost interchangeably in some cases,
2951 * but we want to make that an explicit cast in our trees, so any implicit casts
2952 * that exist in the IL are turned into explicit casts here.
2955 GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp)
2957 if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
2959 tree = gtNewCastNode(dstTyp, tree, false, dstTyp);
2965 //------------------------------------------------------------------------
2966 // impInitializeArrayIntrinsic: Attempts to replace a call to InitializeArray
2967 // with a GT_COPYBLK node.
2970 // sig - The InitializeArray signature.
2973 // A pointer to the newly created GT_COPYBLK node if the replacement succeeds or
2974 // nullptr otherwise.
2977 // The function recognizes the following IL pattern:
2978 // ldc <length> or a list of ldc <lower bound>/<length>
2981 // ldtoken <field handle>
2982 // call InitializeArray
2983 // The lower bounds need not be constant except when the array rank is 1.
2984 // The function recognizes all kinds of arrays thus enabling a small runtime
2985 // such as CoreRT to skip providing an implementation for InitializeArray.
2987 GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
2989 assert(sig->numArgs == 2);
2991 GenTree* fieldTokenNode = impStackTop(0).val;
2992 GenTree* arrayLocalNode = impStackTop(1).val;
2995 // Verify that the field token is known and valid. Note that It's also
2996 // possible for the token to come from reflection, in which case we cannot do
2997 // the optimization and must therefore revert to calling the helper. You can
2998 // see an example of this in bvt\DynIL\initarray2.exe (in Main).
3001 // Check to see if the ldtoken helper call is what we see here.
3002 if (fieldTokenNode->gtOper != GT_CALL || (fieldTokenNode->gtCall.gtCallType != CT_HELPER) ||
3003 (fieldTokenNode->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD)))
3008 // Strip helper call away
3009 fieldTokenNode = fieldTokenNode->gtCall.gtCallArgs->Current();
3011 if (fieldTokenNode->gtOper == GT_IND)
3013 fieldTokenNode = fieldTokenNode->gtOp.gtOp1;
3016 // Check for constant
3017 if (fieldTokenNode->gtOper != GT_CNS_INT)
3022 CORINFO_FIELD_HANDLE fieldToken = (CORINFO_FIELD_HANDLE)fieldTokenNode->gtIntCon.gtCompileTimeHandle;
3023 if (!fieldTokenNode->IsIconHandle(GTF_ICON_FIELD_HDL) || (fieldToken == nullptr))
3029 // We need to get the number of elements in the array and the size of each element.
3030 // We verify that the newarr statement is exactly what we expect it to be.
3031 // If it's not then we just return NULL and we don't optimize this call
3035 // It is possible the we don't have any statements in the block yet
3037 if (impTreeLast->gtOper != GT_STMT)
3039 assert(impTreeLast->gtOper == GT_BEG_STMTS);
3044 // We start by looking at the last statement, making sure it's an assignment, and
3045 // that the target of the assignment is the array passed to InitializeArray.
3047 GenTree* arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
3048 if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
3049 (arrayLocalNode->gtOper != GT_LCL_VAR) ||
3050 (arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
3056 // Make sure that the object being assigned is a helper call.
3059 GenTree* newArrayCall = arrayAssignment->gtOp.gtOp2;
3060 if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
3066 // Verify that it is one of the new array helpers.
3069 bool isMDArray = false;
3071 if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
3072 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
3073 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
3074 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
3075 #ifdef FEATURE_READYTORUN_COMPILER
3076 && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_R2R_DIRECT) &&
3077 newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
3081 if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEW_MDARR_NONVARARG))
3089 CORINFO_CLASS_HANDLE arrayClsHnd = (CORINFO_CLASS_HANDLE)newArrayCall->gtCall.compileTimeHelperArgumentHandle;
3092 // Make sure we found a compile time handle to the array
3101 S_UINT32 numElements;
3105 rank = info.compCompHnd->getArrayRank(arrayClsHnd);
3112 GenTreeArgList* tokenArg = newArrayCall->gtCall.gtCallArgs;
3113 assert(tokenArg != nullptr);
3114 GenTreeArgList* numArgsArg = tokenArg->Rest();
3115 assert(numArgsArg != nullptr);
3116 GenTreeArgList* argsArg = numArgsArg->Rest();
3117 assert(argsArg != nullptr);
3120 // The number of arguments should be a constant between 1 and 64. The rank can't be 0
3121 // so at least one length must be present and the rank can't exceed 32 so there can
3122 // be at most 64 arguments - 32 lengths and 32 lower bounds.
3125 if ((!numArgsArg->Current()->IsCnsIntOrI()) || (numArgsArg->Current()->AsIntCon()->IconValue() < 1) ||
3126 (numArgsArg->Current()->AsIntCon()->IconValue() > 64))
3131 unsigned numArgs = static_cast<unsigned>(numArgsArg->Current()->AsIntCon()->IconValue());
3132 bool lowerBoundsSpecified;
3134 if (numArgs == rank * 2)
3136 lowerBoundsSpecified = true;
3138 else if (numArgs == rank)
3140 lowerBoundsSpecified = false;
3143 // If the rank is 1 and a lower bound isn't specified then the runtime creates
3144 // a SDArray. Note that even if a lower bound is specified it can be 0 and then
3145 // we get a SDArray as well, see the for loop below.
3159 // The rank is known to be at least 1 so we can start with numElements being 1
3160 // to avoid the need to special case the first dimension.
3163 numElements = S_UINT32(1);
3167 static bool IsArgsFieldInit(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3169 return (tree->OperGet() == GT_ASG) && IsArgsFieldIndir(tree->gtGetOp1(), index, lvaNewObjArrayArgs) &&
3170 IsArgsAddr(tree->gtGetOp1()->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3173 static bool IsArgsFieldIndir(GenTree* tree, unsigned index, unsigned lvaNewObjArrayArgs)
3175 return (tree->OperGet() == GT_IND) && (tree->gtGetOp1()->OperGet() == GT_ADD) &&
3176 (tree->gtGetOp1()->gtGetOp2()->IsIntegralConst(sizeof(INT32) * index)) &&
3177 IsArgsAddr(tree->gtGetOp1()->gtGetOp1(), lvaNewObjArrayArgs);
3180 static bool IsArgsAddr(GenTree* tree, unsigned lvaNewObjArrayArgs)
3182 return (tree->OperGet() == GT_ADDR) && (tree->gtGetOp1()->OperGet() == GT_LCL_VAR) &&
3183 (tree->gtGetOp1()->AsLclVar()->GetLclNum() == lvaNewObjArrayArgs);
3186 static bool IsComma(GenTree* tree)
3188 return (tree != nullptr) && (tree->OperGet() == GT_COMMA);
3192 unsigned argIndex = 0;
3195 for (comma = argsArg->Current(); Match::IsComma(comma); comma = comma->gtGetOp2())
3197 if (lowerBoundsSpecified)
3200 // In general lower bounds can be ignored because they're not needed to
3201 // calculate the total number of elements. But for single dimensional arrays
3202 // we need to know if the lower bound is 0 because in this case the runtime
3203 // creates a SDArray and this affects the way the array data offset is calculated.
3208 GenTree* lowerBoundAssign = comma->gtGetOp1();
3209 assert(Match::IsArgsFieldInit(lowerBoundAssign, argIndex, lvaNewObjArrayArgs));
3210 GenTree* lowerBoundNode = lowerBoundAssign->gtGetOp2();
3212 if (lowerBoundNode->IsIntegralConst(0))
3218 comma = comma->gtGetOp2();
3222 GenTree* lengthNodeAssign = comma->gtGetOp1();
3223 assert(Match::IsArgsFieldInit(lengthNodeAssign, argIndex, lvaNewObjArrayArgs));
3224 GenTree* lengthNode = lengthNodeAssign->gtGetOp2();
3226 if (!lengthNode->IsCnsIntOrI())
3231 numElements *= S_SIZE_T(lengthNode->AsIntCon()->IconValue());
3235 assert((comma != nullptr) && Match::IsArgsAddr(comma, lvaNewObjArrayArgs));
3237 if (argIndex != numArgs)
3245 // Make sure there are exactly two arguments: the array class and
3246 // the number of elements.
3249 GenTree* arrayLengthNode;
3251 GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
3252 #ifdef FEATURE_READYTORUN_COMPILER
3253 if (newArrayCall->gtCall.gtCallMethHnd == eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1))
3255 // Array length is 1st argument for readytorun helper
3256 arrayLengthNode = args->Current();
3261 // Array length is 2nd argument for regular helper
3262 arrayLengthNode = args->Rest()->Current();
3266 // Make sure that the number of elements look valid.
3268 if (arrayLengthNode->gtOper != GT_CNS_INT)
3273 numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
3275 if (!info.compCompHnd->isSDArray(arrayClsHnd))
3281 CORINFO_CLASS_HANDLE elemClsHnd;
3282 var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
3285 // Note that genTypeSize will return zero for non primitive types, which is exactly
3286 // what we want (size will then be 0, and we will catch this in the conditional below).
3287 // Note that we don't expect this to fail for valid binaries, so we assert in the
3288 // non-verification case (the verification case should not assert but rather correctly
3289 // handle bad binaries). This assert is not guarding any specific invariant, but rather
3290 // saying that we don't expect this to happen, and if it is hit, we need to investigate
3294 S_UINT32 elemSize(genTypeSize(elementType));
3295 S_UINT32 size = elemSize * S_UINT32(numElements);
3297 if (size.IsOverflow())
3302 if ((size.Value() == 0) || (varTypeIsGC(elementType)))
3304 assert(verNeedsVerification());
3308 void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
3315 // At this point we are ready to commit to implementing the InitializeArray
3316 // intrinsic using a struct assignment. Pop the arguments from the stack and
3317 // return the struct assignment node.
3323 const unsigned blkSize = size.Value();
3324 unsigned dataOffset;
3328 dataOffset = eeGetMDArrayDataOffset(elementType, rank);
3332 dataOffset = eeGetArrayDataOffset(elementType);
3335 GenTree* dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
3336 GenTree* blk = gtNewBlockVal(dst, blkSize);
3337 GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_STATIC_HDL, false);
3339 return gtNewBlkOpNode(blk, // dst
3346 //------------------------------------------------------------------------
3347 // impIntrinsic: possibly expand intrinsic call into alternate IR sequence
3350 // newobjThis - for constructor calls, the tree for the newly allocated object
3351 // clsHnd - handle for the intrinsic method's class
3352 // method - handle for the intrinsic method
3353 // sig - signature of the intrinsic method
3354 // methodFlags - CORINFO_FLG_XXX flags of the intrinsic method
3355 // memberRef - the token for the intrinsic method
3356 // readonlyCall - true if call has a readonly prefix
3357 // tailCall - true if call is in tail position
3358 // pConstrainedResolvedToken -- resolved token for constrained call, or nullptr
3359 // if call is not constrained
3360 // constraintCallThisTransform -- this transform to apply for a constrained call
3361 // pIntrinsicID [OUT] -- intrinsic ID (see enumeration in corinfo.h)
3362 // for "traditional" jit intrinsics
3363 // isSpecialIntrinsic [OUT] -- set true if intrinsic expansion is a call
3364 // that is amenable to special downstream optimization opportunities
3367 // IR tree to use in place of the call, or nullptr if the jit should treat
3368 // the intrinsic call like a normal call.
3370 // pIntrinsicID set to non-illegal value if the call is recognized as a
3371 // traditional jit intrinsic, even if the intrinsic is not expaned.
3373 // isSpecial set true if the expansion is subject to special
3374 // optimizations later in the jit processing
3377 // On success the IR tree may be a call to a different method or an inline
3378 // sequence. If it is a call, then the intrinsic processing here is responsible
3379 // for handling all the special cases, as upon return to impImportCall
3380 // expanded intrinsics bypass most of the normal call processing.
3382 // Intrinsics are generally not recognized in minopts and debug codegen.
3384 // However, certain traditional intrinsics are identifed as "must expand"
3385 // if there is no fallback implmentation to invoke; these must be handled
3386 // in all codegen modes.
3388 // New style intrinsics (where the fallback implementation is in IL) are
3389 // identified as "must expand" if they are invoked from within their
3390 // own method bodies.
3393 GenTree* Compiler::impIntrinsic(GenTree* newobjThis,
3394 CORINFO_CLASS_HANDLE clsHnd,
3395 CORINFO_METHOD_HANDLE method,
3396 CORINFO_SIG_INFO* sig,
3397 unsigned methodFlags,
3401 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
3402 CORINFO_THIS_TRANSFORM constraintCallThisTransform,
3403 CorInfoIntrinsics* pIntrinsicID,
3404 bool* isSpecialIntrinsic)
3406 assert((methodFlags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0);
3408 bool mustExpand = false;
3409 bool isSpecial = false;
3410 CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Illegal;
3411 NamedIntrinsic ni = NI_Illegal;
3413 if ((methodFlags & CORINFO_FLG_INTRINSIC) != 0)
3415 intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
3418 if ((methodFlags & CORINFO_FLG_JIT_INTRINSIC) != 0)
3420 // The recursive calls to Jit intrinsics are must-expand by convention.
3421 mustExpand = mustExpand || gtIsRecursiveCall(method);
3423 if (intrinsicID == CORINFO_INTRINSIC_Illegal)
3425 ni = lookupNamedIntrinsic(method);
3427 #ifdef FEATURE_HW_INTRINSICS
3430 #if defined(_TARGET_ARM64_)
3431 case NI_Base_Vector64_AsByte:
3432 case NI_Base_Vector64_AsInt16:
3433 case NI_Base_Vector64_AsInt32:
3434 case NI_Base_Vector64_AsSByte:
3435 case NI_Base_Vector64_AsSingle:
3436 case NI_Base_Vector64_AsUInt16:
3437 case NI_Base_Vector64_AsUInt32:
3438 #endif // _TARGET_ARM64_
3439 case NI_Base_Vector128_As:
3440 case NI_Base_Vector128_AsByte:
3441 case NI_Base_Vector128_AsDouble:
3442 case NI_Base_Vector128_AsInt16:
3443 case NI_Base_Vector128_AsInt32:
3444 case NI_Base_Vector128_AsInt64:
3445 case NI_Base_Vector128_AsSByte:
3446 case NI_Base_Vector128_AsSingle:
3447 case NI_Base_Vector128_AsUInt16:
3448 case NI_Base_Vector128_AsUInt32:
3449 case NI_Base_Vector128_AsUInt64:
3450 #if defined(_TARGET_XARCH_)
3451 case NI_Base_Vector128_Zero:
3452 case NI_Base_Vector256_As:
3453 case NI_Base_Vector256_AsByte:
3454 case NI_Base_Vector256_AsDouble:
3455 case NI_Base_Vector256_AsInt16:
3456 case NI_Base_Vector256_AsInt32:
3457 case NI_Base_Vector256_AsInt64:
3458 case NI_Base_Vector256_AsSByte:
3459 case NI_Base_Vector256_AsSingle:
3460 case NI_Base_Vector256_AsUInt16:
3461 case NI_Base_Vector256_AsUInt32:
3462 case NI_Base_Vector256_AsUInt64:
3463 case NI_Base_Vector256_Zero:
3464 #endif // _TARGET_XARCH_
3466 return impBaseIntrinsic(ni, method, sig);
3475 if ((ni > NI_HW_INTRINSIC_START) && (ni < NI_HW_INTRINSIC_END))
3477 GenTree* hwintrinsic = impHWIntrinsic(ni, method, sig, mustExpand);
3479 if (mustExpand && (hwintrinsic == nullptr))
3481 return impUnsupportedHWIntrinsic(CORINFO_HELP_THROW_NOT_IMPLEMENTED, method, sig, mustExpand);
3486 #endif // FEATURE_HW_INTRINSICS
3490 *pIntrinsicID = intrinsicID;
3492 #ifndef _TARGET_ARM_
3493 genTreeOps interlockedOperator;
3496 if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
3498 // must be done regardless of DbgCode and MinOpts
3499 return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
3501 #ifdef _TARGET_64BIT_
3502 if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
3504 // must be done regardless of DbgCode and MinOpts
3505 return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
3508 assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
3511 GenTree* retNode = nullptr;
3513 // Under debug and minopts, only expand what is required.
3514 if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
3516 *pIntrinsicID = CORINFO_INTRINSIC_Illegal;
3520 var_types callType = JITtype2varType(sig->retType);
3522 /* First do the intrinsics which are always smaller than a call */
3524 switch (intrinsicID)
3529 case CORINFO_INTRINSIC_Sin:
3530 case CORINFO_INTRINSIC_Cbrt:
3531 case CORINFO_INTRINSIC_Sqrt:
3532 case CORINFO_INTRINSIC_Abs:
3533 case CORINFO_INTRINSIC_Cos:
3534 case CORINFO_INTRINSIC_Round:
3535 case CORINFO_INTRINSIC_Cosh:
3536 case CORINFO_INTRINSIC_Sinh:
3537 case CORINFO_INTRINSIC_Tan:
3538 case CORINFO_INTRINSIC_Tanh:
3539 case CORINFO_INTRINSIC_Asin:
3540 case CORINFO_INTRINSIC_Asinh:
3541 case CORINFO_INTRINSIC_Acos:
3542 case CORINFO_INTRINSIC_Acosh:
3543 case CORINFO_INTRINSIC_Atan:
3544 case CORINFO_INTRINSIC_Atan2:
3545 case CORINFO_INTRINSIC_Atanh:
3546 case CORINFO_INTRINSIC_Log10:
3547 case CORINFO_INTRINSIC_Pow:
3548 case CORINFO_INTRINSIC_Exp:
3549 case CORINFO_INTRINSIC_Ceiling:
3550 case CORINFO_INTRINSIC_Floor:
3551 retNode = impMathIntrinsic(method, sig, callType, intrinsicID, tailCall);
3554 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3555 // TODO-ARM-CQ: reenable treating Interlocked operation as intrinsic
3557 // Note that CORINFO_INTRINSIC_InterlockedAdd32/64 are not actually used.
3558 // Anyway, we can import them as XADD and leave it to lowering/codegen to perform
3559 // whatever optimizations may arise from the fact that result value is not used.
3560 case CORINFO_INTRINSIC_InterlockedAdd32:
3561 case CORINFO_INTRINSIC_InterlockedXAdd32:
3562 interlockedOperator = GT_XADD;
3563 goto InterlockedBinOpCommon;
3564 case CORINFO_INTRINSIC_InterlockedXchg32:
3565 interlockedOperator = GT_XCHG;
3566 goto InterlockedBinOpCommon;
3568 #ifdef _TARGET_64BIT_
3569 case CORINFO_INTRINSIC_InterlockedAdd64:
3570 case CORINFO_INTRINSIC_InterlockedXAdd64:
3571 interlockedOperator = GT_XADD;
3572 goto InterlockedBinOpCommon;
3573 case CORINFO_INTRINSIC_InterlockedXchg64:
3574 interlockedOperator = GT_XCHG;
3575 goto InterlockedBinOpCommon;
3576 #endif // _TARGET_AMD64_
3578 InterlockedBinOpCommon:
3579 assert(callType != TYP_STRUCT);
3580 assert(sig->numArgs == 2);
3582 op2 = impPopStack().val;
3583 op1 = impPopStack().val;
3589 // field (for example)
3591 // In the case where the first argument is the address of a local, we might
3592 // want to make this *not* make the var address-taken -- but atomic instructions
3593 // on a local are probably pretty useless anyway, so we probably don't care.
3595 op1 = gtNewOperNode(interlockedOperator, genActualType(callType), op1, op2);
3596 op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3599 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3601 case CORINFO_INTRINSIC_MemoryBarrier:
3603 assert(sig->numArgs == 0);
3605 op1 = new (this, GT_MEMORYBARRIER) GenTree(GT_MEMORYBARRIER, TYP_VOID);
3606 op1->gtFlags |= GTF_GLOB_REF | GTF_ASG;
3610 #if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3611 // TODO-ARM-CQ: reenable treating InterlockedCmpXchg32 operation as intrinsic
3612 case CORINFO_INTRINSIC_InterlockedCmpXchg32:
3613 #ifdef _TARGET_64BIT_
3614 case CORINFO_INTRINSIC_InterlockedCmpXchg64:
3617 assert(callType != TYP_STRUCT);
3618 assert(sig->numArgs == 3);
3621 op3 = impPopStack().val; // comparand
3622 op2 = impPopStack().val; // value
3623 op1 = impPopStack().val; // location
3625 GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
3627 node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
3631 #endif // defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
3633 case CORINFO_INTRINSIC_StringLength:
3634 op1 = impPopStack().val;
3635 if (!opts.MinOpts() && !opts.compDbgCode)
3637 GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_String__stringLen);
3642 /* Create the expression "*(str_addr + stringLengthOffset)" */
3643 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
3644 gtNewIconNode(OFFSETOF__CORINFO_String__stringLen, TYP_I_IMPL));
3645 op1 = gtNewOperNode(GT_IND, TYP_INT, op1);
3648 // Getting the length of a null string should throw
3649 op1->gtFlags |= GTF_EXCEPT;
3654 case CORINFO_INTRINSIC_StringGetChar:
3655 op2 = impPopStack().val;
3656 op1 = impPopStack().val;
3657 op1 = gtNewIndexRef(TYP_USHORT, op1, op2);
3658 op1->gtFlags |= GTF_INX_STRING_LAYOUT;
3662 case CORINFO_INTRINSIC_InitializeArray:
3663 retNode = impInitializeArrayIntrinsic(sig);
3666 case CORINFO_INTRINSIC_Array_Address:
3667 case CORINFO_INTRINSIC_Array_Get:
3668 case CORINFO_INTRINSIC_Array_Set:
3669 retNode = impArrayAccessIntrinsic(clsHnd, sig, memberRef, readonlyCall, intrinsicID);
3672 case CORINFO_INTRINSIC_GetTypeFromHandle:
3673 op1 = impStackTop(0).val;
3674 CorInfoHelpFunc typeHandleHelper;
3675 if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3676 gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall(), &typeHandleHelper))
3678 op1 = impPopStack().val;
3679 // Replace helper with a more specialized helper that returns RuntimeType
3680 if (typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE)
3682 typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE;
3686 assert(typeHandleHelper == CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL);
3687 typeHandleHelper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE_MAYBENULL;
3689 assert(op1->gtCall.gtCallArgs->gtOp.gtOp2 == nullptr);
3690 op1 = gtNewHelperCallNode(typeHandleHelper, TYP_REF, op1->gtCall.gtCallArgs);
3691 op1->gtType = TYP_REF;
3694 // Call the regular function.
3697 case CORINFO_INTRINSIC_RTH_GetValueInternal:
3698 op1 = impStackTop(0).val;
3699 if (op1->gtOper == GT_CALL && (op1->gtCall.gtCallType == CT_HELPER) &&
3700 gtIsTypeHandleToRuntimeTypeHandleHelper(op1->AsCall()))
3703 // Helper-RuntimeTypeHandle -> TreeToGetNativeTypeHandle
3706 // TreeToGetNativeTypeHandle
3708 // Remove call to helper and return the native TypeHandle pointer that was the parameter
3711 op1 = impPopStack().val;
3713 // Get native TypeHandle argument to old helper
3714 op1 = op1->gtCall.gtCallArgs;
3715 assert(op1->OperIsList());
3716 assert(op1->gtOp.gtOp2 == nullptr);
3717 op1 = op1->gtOp.gtOp1;
3720 // Call the regular function.
3723 case CORINFO_INTRINSIC_Object_GetType:
3725 JITDUMP("\n impIntrinsic: call to Object.GetType\n");
3726 op1 = impStackTop(0).val;
3728 // If we're calling GetType on a boxed value, just get the type directly.
3729 if (op1->IsBoxedValue())
3731 JITDUMP("Attempting to optimize box(...).getType() to direct type construction\n");
3733 // Try and clean up the box. Obtain the handle we
3734 // were going to pass to the newobj.
3735 GenTree* boxTypeHandle = gtTryRemoveBoxUpstreamEffects(op1, BR_REMOVE_AND_NARROW_WANT_TYPE_HANDLE);
3737 if (boxTypeHandle != nullptr)
3739 // Note we don't need to play the TYP_STRUCT games here like
3740 // do for LDTOKEN since the return value of this operator is Type,
3741 // not RuntimeTypeHandle.
3743 GenTreeArgList* helperArgs = gtNewArgList(boxTypeHandle);
3744 GenTree* runtimeType =
3745 gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3746 retNode = runtimeType;
3750 // If we have a constrained callvirt with a "box this" transform
3751 // we know we have a value class and hence an exact type.
3753 // If so, instead of boxing and then extracting the type, just
3754 // construct the type directly.
3755 if ((retNode == nullptr) && (pConstrainedResolvedToken != nullptr) &&
3756 (constraintCallThisTransform == CORINFO_BOX_THIS))
3758 // Ensure this is one of the is simple box cases (in particular, rule out nullables).
3759 const CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pConstrainedResolvedToken->hClass);
3760 const bool isSafeToOptimize = (boxHelper == CORINFO_HELP_BOX);
3762 if (isSafeToOptimize)
3764 JITDUMP("Optimizing constrained box-this obj.getType() to direct type construction\n");
3766 GenTree* typeHandleOp =
3767 impTokenToHandle(pConstrainedResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
3768 if (typeHandleOp == nullptr)
3770 assert(compDonotInline());
3773 GenTreeArgList* helperArgs = gtNewArgList(typeHandleOp);
3774 GenTree* runtimeType =
3775 gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
3776 retNode = runtimeType;
3781 if (retNode != nullptr)
3783 JITDUMP("Optimized result for call to GetType is\n");
3786 gtDispTree(retNode);
3791 // Else expand as an intrinsic, unless the call is constrained,
3792 // in which case we defer expansion to allow impImportCall do the
3793 // special constraint processing.
3794 if ((retNode == nullptr) && (pConstrainedResolvedToken == nullptr))
3796 JITDUMP("Expanding as special intrinsic\n");
3798 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
3800 // Set the CALL flag to indicate that the operator is implemented by a call.
3801 // Set also the EXCEPTION flag because the native implementation of
3802 // CORINFO_INTRINSIC_Object_GetType intrinsic can throw NullReferenceException.
3803 op1->gtFlags |= (GTF_CALL | GTF_EXCEPT);
3805 // Might be further optimizable, so arrange to leave a mark behind
3809 if (retNode == nullptr)
3811 JITDUMP("Leaving as normal call\n");
3812 // Might be further optimizable, so arrange to leave a mark behind
3819 // Implement ByReference Ctor. This wraps the assignment of the ref into a byref-like field
3820 // in a value type. The canonical example of this is Span<T>. In effect this is just a
3821 // substitution. The parameter byref will be assigned into the newly allocated object.
3822 case CORINFO_INTRINSIC_ByReference_Ctor:
3824 // Remove call to constructor and directly assign the byref passed
3825 // to the call to the first slot of the ByReference struct.
3826 op1 = impPopStack().val;
3827 GenTree* thisptr = newobjThis;
3828 CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3829 GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0);
3830 GenTree* assign = gtNewAssignNode(field, op1);
3831 GenTree* byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
3832 assert(byReferenceStruct != nullptr);
3833 impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
3837 // Implement ptr value getter for ByReference struct.
3838 case CORINFO_INTRINSIC_ByReference_Value:
3840 op1 = impPopStack().val;
3841 CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3842 GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0);
3846 case CORINFO_INTRINSIC_Span_GetItem:
3847 case CORINFO_INTRINSIC_ReadOnlySpan_GetItem:
3849 // Have index, stack pointer-to Span<T> s on the stack. Expand to:
3853 // BoundsCheck(index, s->_length)
3854 // s->_pointer + index * sizeof(T)
3856 // For ReadOnlySpan<T> -- same expansion, as it now returns a readonly ref
3858 // Signature should show one class type parameter, which
3859 // we need to examine.
3860 assert(sig->sigInst.classInstCount == 1);
3861 CORINFO_CLASS_HANDLE spanElemHnd = sig->sigInst.classInst[0];
3862 const unsigned elemSize = info.compCompHnd->getClassSize(spanElemHnd);
3863 assert(elemSize > 0);
3865 const bool isReadOnly = (intrinsicID == CORINFO_INTRINSIC_ReadOnlySpan_GetItem);
3867 JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "",
3868 info.compCompHnd->getClassName(spanElemHnd), elemSize);
3870 GenTree* index = impPopStack().val;
3871 GenTree* ptrToSpan = impPopStack().val;
3872 GenTree* indexClone = nullptr;
3873 GenTree* ptrToSpanClone = nullptr;
3874 assert(varTypeIsIntegral(index));
3875 assert(ptrToSpan->TypeGet() == TYP_BYREF);
3880 printf("with ptr-to-span\n");
3881 gtDispTree(ptrToSpan);
3882 printf("and index\n");
3885 #endif // defined(DEBUG)
3887 // We need to use both index and ptr-to-span twice, so clone or spill.
3888 index = impCloneExpr(index, &indexClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3889 nullptr DEBUGARG("Span.get_Item index"));
3890 ptrToSpan = impCloneExpr(ptrToSpan, &ptrToSpanClone, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
3891 nullptr DEBUGARG("Span.get_Item ptrToSpan"));
3894 CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(clsHnd, 1);
3895 const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd);
3896 GenTree* length = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset);
3897 GenTree* boundsCheck = new (this, GT_ARR_BOUNDS_CHECK)
3898 GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, index, length, SCK_RNGCHK_FAIL);
3901 GenTree* indexIntPtr = impImplicitIorI4Cast(indexClone, TYP_I_IMPL);
3902 GenTree* sizeofNode = gtNewIconNode(elemSize);
3903 GenTree* mulNode = gtNewOperNode(GT_MUL, TYP_I_IMPL, indexIntPtr, sizeofNode);
3904 CORINFO_FIELD_HANDLE ptrHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
3905 const unsigned ptrOffset = info.compCompHnd->getFieldOffset(ptrHnd);
3906 GenTree* data = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset);
3907 GenTree* result = gtNewOperNode(GT_ADD, TYP_BYREF, data, mulNode);
3910 var_types resultType = JITtype2varType(sig->retType);
3911 assert(resultType == result->TypeGet());
3912 retNode = gtNewOperNode(GT_COMMA, resultType, boundsCheck, result);
3917 case CORINFO_INTRINSIC_GetRawHandle:
3919 noway_assert(IsTargetAbi(CORINFO_CORERT_ABI)); // Only CoreRT supports it.
3920 CORINFO_RESOLVED_TOKEN resolvedToken;
3921 resolvedToken.tokenContext = MAKE_METHODCONTEXT(info.compMethodHnd);
3922 resolvedToken.tokenScope = info.compScopeHnd;
3923 resolvedToken.token = memberRef;
3924 resolvedToken.tokenType = CORINFO_TOKENKIND_Method;
3926 CORINFO_GENERICHANDLE_RESULT embedInfo;
3927 info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo);
3929 GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef),
3930 embedInfo.compileTimeHandle);
3931 if (rawHandle == nullptr)
3936 noway_assert(genTypeSize(rawHandle->TypeGet()) == genTypeSize(TYP_I_IMPL));
3938 unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle"));
3939 impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE);
3941 GenTree* lclVar = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL);
3942 GenTree* lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar);
3943 var_types resultType = JITtype2varType(sig->retType);
3944 retNode = gtNewOperNode(GT_IND, resultType, lclVarAddr);
3949 case CORINFO_INTRINSIC_TypeEQ:
3950 case CORINFO_INTRINSIC_TypeNEQ:
3952 JITDUMP("Importing Type.op_*Equality intrinsic\n");
3953 op1 = impStackTop(1).val;
3954 op2 = impStackTop(0).val;
3955 GenTree* optTree = gtFoldTypeEqualityCall(intrinsicID, op1, op2);
3956 if (optTree != nullptr)
3958 // Success, clean up the evaluation stack.
3962 // See if we can optimize even further, to a handle compare.
3963 optTree = gtFoldTypeCompare(optTree);
3965 // See if we can now fold a handle compare to a constant.
3966 optTree = gtFoldExpr(optTree);
3972 // Retry optimizing these later
3978 case CORINFO_INTRINSIC_GetCurrentManagedThread:
3979 case CORINFO_INTRINSIC_GetManagedThreadId:
3981 // Retry optimizing these during morph
3987 /* Unknown intrinsic */
3988 intrinsicID = CORINFO_INTRINSIC_Illegal;
3992 // Look for new-style jit intrinsics by name
3993 if (ni != NI_Illegal)
3995 assert(retNode == nullptr);
3998 case NI_System_Enum_HasFlag:
4000 GenTree* thisOp = impStackTop(1).val;
4001 GenTree* flagOp = impStackTop(0).val;
4002 GenTree* optTree = gtOptimizeEnumHasFlag(thisOp, flagOp);
4004 if (optTree != nullptr)
4006 // Optimization successful. Pop the stack for real.
4013 // Retry optimizing this during morph.
4020 case NI_MathF_Round:
4023 // Math.Round and MathF.Round used to be a traditional JIT intrinsic. In order
4024 // to simplify the transition, we will just treat it as if it was still the
4025 // old intrinsic, CORINFO_INTRINSIC_Round. This should end up flowing properly
4028 retNode = impMathIntrinsic(method, sig, callType, CORINFO_INTRINSIC_Round, tailCall);
4032 case NI_System_Collections_Generic_EqualityComparer_get_Default:
4034 // Flag for later handling during devirtualization.
4039 case NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness:
4041 assert(sig->numArgs == 1);
4043 // We expect the return type of the ReverseEndianness routine to match the type of the
4044 // one and only argument to the method. We use a special instruction for 16-bit
4045 // BSWAPs since on x86 processors this is implemented as ROR <16-bit reg>, 8. Additionally,
4046 // we only emit 64-bit BSWAP instructions on 64-bit archs; if we're asked to perform a
4047 // 64-bit byte swap on a 32-bit arch, we'll fall to the default case in the switch block below.
4049 switch (sig->retType)
4051 case CorInfoType::CORINFO_TYPE_SHORT:
4052 case CorInfoType::CORINFO_TYPE_USHORT:
4053 retNode = gtNewOperNode(GT_BSWAP16, callType, impPopStack().val);
4056 case CorInfoType::CORINFO_TYPE_INT:
4057 case CorInfoType::CORINFO_TYPE_UINT:
4058 #ifdef _TARGET_64BIT_
4059 case CorInfoType::CORINFO_TYPE_LONG:
4060 case CorInfoType::CORINFO_TYPE_ULONG:
4061 #endif // _TARGET_64BIT_
4062 retNode = gtNewOperNode(GT_BSWAP, callType, impPopStack().val);
4066 // This default case gets hit on 32-bit archs when a call to a 64-bit overload
4067 // of ReverseEndianness is encountered. In that case we'll let JIT treat this as a standard
4068 // method call, where the implementation decomposes the operation into two 32-bit
4069 // bswap routines. If the input to the 64-bit function is a constant, then we rely
4070 // on inlining + constant folding of 32-bit bswaps to effectively constant fold
4071 // the 64-bit call site.
4083 if (mustExpand && (retNode == nullptr))
4085 NO_WAY("JIT must expand the intrinsic!");
4088 // Optionally report if this intrinsic is special
4089 // (that is, potentially re-optimizable during morph).
4090 if (isSpecialIntrinsic != nullptr)
4092 *isSpecialIntrinsic = isSpecial;
4098 #ifdef FEATURE_HW_INTRINSICS
4099 //------------------------------------------------------------------------
4100 // impBaseIntrinsic: dispatch intrinsics to their own implementation
4103 // intrinsic -- id of the intrinsic function.
4104 // method -- method handle of the intrinsic function.
4105 // sig -- signature of the intrinsic call
4108 // the expanded intrinsic.
4110 GenTree* Compiler::impBaseIntrinsic(NamedIntrinsic intrinsic, CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* sig)
4112 GenTree* retNode = nullptr;
4113 unsigned simdSize = 0;
4114 var_types baseType = getBaseTypeAndSizeOfSIMDType(sig->retTypeClass, &simdSize);
4115 var_types retType = getSIMDTypeForSize(simdSize);
4119 CORINFO_CLASS_HANDLE thisClass = info.compCompHnd->getArgClass(sig, sig->args);
4120 var_types thisType = getBaseTypeOfSIMDType(thisClass);
4122 if (!varTypeIsArithmetic(thisType))
4128 if (!varTypeIsArithmetic(baseType))
4135 #if defined(_TARGET_ARM64_)
4136 case NI_Base_Vector64_AsByte:
4137 case NI_Base_Vector64_AsInt16:
4138 case NI_Base_Vector64_AsInt32:
4139 case NI_Base_Vector64_AsSByte:
4140 case NI_Base_Vector64_AsSingle:
4141 case NI_Base_Vector64_AsUInt16:
4142 case NI_Base_Vector64_AsUInt32:
4143 #endif // _TARGET_ARM64_
4144 case NI_Base_Vector128_As:
4145 case NI_Base_Vector128_AsByte:
4146 case NI_Base_Vector128_AsDouble:
4147 case NI_Base_Vector128_AsInt16:
4148 case NI_Base_Vector128_AsInt32:
4149 case NI_Base_Vector128_AsInt64:
4150 case NI_Base_Vector128_AsSByte:
4151 case NI_Base_Vector128_AsSingle:
4152 case NI_Base_Vector128_AsUInt16:
4153 case NI_Base_Vector128_AsUInt32:
4154 case NI_Base_Vector128_AsUInt64:
4155 #if defined(_TARGET_XARCH_)
4156 case NI_Base_Vector256_As:
4157 case NI_Base_Vector256_AsByte:
4158 case NI_Base_Vector256_AsDouble:
4159 case NI_Base_Vector256_AsInt16:
4160 case NI_Base_Vector256_AsInt32:
4161 case NI_Base_Vector256_AsInt64:
4162 case NI_Base_Vector256_AsSByte:
4163 case NI_Base_Vector256_AsSingle:
4164 case NI_Base_Vector256_AsUInt16:
4165 case NI_Base_Vector256_AsUInt32:
4166 case NI_Base_Vector256_AsUInt64:
4167 #endif // _TARGET_XARCH_
4169 // We fold away the cast here, as it only exists to satisfy
4170 // the type system. It is safe to do this here since the retNode type
4171 // and the signature return type are both the same TYP_SIMD.
4173 assert(sig->numArgs == 0);
4174 assert(sig->hasThis());
4176 retNode = impSIMDPopStack(retType, true, sig->retTypeClass);
4177 SetOpLclRelatedToSIMDIntrinsic(retNode);
4178 assert(retNode->gtType == getSIMDTypeForSize(getSIMDTypeSizeInBytes(sig->retTypeSigClass)));
4182 #ifdef _TARGET_XARCH_
4183 case NI_Base_Vector128_Zero:
4185 assert(sig->numArgs == 0);
4187 if (compSupports(InstructionSet_SSE))
4189 retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, baseType, simdSize);
4194 case NI_Base_Vector256_Zero:
4196 assert(sig->numArgs == 0);
4198 if (compSupports(InstructionSet_AVX))
4200 retNode = gtNewSimdHWIntrinsicNode(retType, intrinsic, baseType, simdSize);
4204 #endif // _TARGET_XARCH_
4215 #endif // FEATURE_HW_INTRINSICS
4217 GenTree* Compiler::impMathIntrinsic(CORINFO_METHOD_HANDLE method,
4218 CORINFO_SIG_INFO* sig,
4220 CorInfoIntrinsics intrinsicID,
4226 assert(callType != TYP_STRUCT);
4227 assert(IsMathIntrinsic(intrinsicID));
4231 #if !defined(_TARGET_X86_)
4232 // Intrinsics that are not implemented directly by target instructions will
4233 // be re-materialized as users calls in rationalizer. For prefixed tail calls,
4234 // don't do this optimization, because
4235 // a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
4236 // b) It will be non-trivial task or too late to re-materialize a surviving
4237 // tail prefixed GT_INTRINSIC as tail call in rationalizer.
4238 if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
4240 // On x86 RyuJIT, importing intrinsics that are implemented as user calls can cause incorrect calculation
4241 // of the depth of the stack if these intrinsics are used as arguments to another call. This causes bad
4242 // code generation for certain EH constructs.
4243 if (!IsIntrinsicImplementedByUserCall(intrinsicID))
4246 switch (sig->numArgs)
4249 op1 = impPopStack().val;
4251 assert(varTypeIsFloating(op1));
4253 if (op1->TypeGet() != callType)
4255 op1 = gtNewCastNode(callType, op1, false, callType);
4258 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, intrinsicID, method);
4262 op2 = impPopStack().val;
4263 op1 = impPopStack().val;
4265 assert(varTypeIsFloating(op1));
4266 assert(varTypeIsFloating(op2));
4268 if (op2->TypeGet() != callType)
4270 op2 = gtNewCastNode(callType, op2, false, callType);
4272 if (op1->TypeGet() != callType)
4274 op1 = gtNewCastNode(callType, op1, false, callType);
4277 op1 = new (this, GT_INTRINSIC) GenTreeIntrinsic(genActualType(callType), op1, op2, intrinsicID, method);
4281 NO_WAY("Unsupported number of args for Math Instrinsic");
4284 if (IsIntrinsicImplementedByUserCall(intrinsicID))
4286 op1->gtFlags |= GTF_CALL;
4293 //------------------------------------------------------------------------
4294 // lookupNamedIntrinsic: map method to jit named intrinsic value
4297 // method -- method handle for method
4300 // Id for the named intrinsic, or Illegal if none.
4303 // method should have CORINFO_FLG_JIT_INTRINSIC set in its attributes,
4304 // otherwise it is not a named jit intrinsic.
4307 NamedIntrinsic Compiler::lookupNamedIntrinsic(CORINFO_METHOD_HANDLE method)
4309 NamedIntrinsic result = NI_Illegal;
4311 const char* className = nullptr;
4312 const char* namespaceName = nullptr;
4313 const char* methodName = info.compCompHnd->getMethodNameFromMetadata(method, &className, &namespaceName);
4315 if ((namespaceName == nullptr) || (className == nullptr) || (methodName == nullptr))
4320 if (strcmp(namespaceName, "System") == 0)
4322 if ((strcmp(className, "Enum") == 0) && (strcmp(methodName, "HasFlag") == 0))
4324 result = NI_System_Enum_HasFlag;
4326 else if ((strcmp(className, "MathF") == 0) && (strcmp(methodName, "Round") == 0))
4328 result = NI_MathF_Round;
4330 else if ((strcmp(className, "Math") == 0) && (strcmp(methodName, "Round") == 0))
4332 result = NI_Math_Round;
4335 #if defined(_TARGET_XARCH_) // We currently only support BSWAP on x86
4336 else if (strcmp(namespaceName, "System.Buffers.Binary") == 0)
4338 if ((strcmp(className, "BinaryPrimitives") == 0) && (strcmp(methodName, "ReverseEndianness") == 0))
4340 result = NI_System_Buffers_Binary_BinaryPrimitives_ReverseEndianness;
4343 #endif // !defined(_TARGET_XARCH_)
4344 else if (strcmp(namespaceName, "System.Collections.Generic") == 0)
4346 if ((strcmp(className, "EqualityComparer`1") == 0) && (strcmp(methodName, "get_Default") == 0))
4348 result = NI_System_Collections_Generic_EqualityComparer_get_Default;
4351 #ifdef FEATURE_HW_INTRINSICS
4352 else if (strncmp(namespaceName, "System.Runtime.Intrinsics", 25) == 0)
4354 namespaceName += 25;
4356 if (namespaceName[0] == '\0')
4358 if (strncmp(className, "Vector", 6) == 0)
4362 #if defined(_TARGET_ARM64_)
4363 if (strncmp(className, "64", 2) == 0)
4367 if (strcmp(className, "`1") == 0)
4369 if (strncmp(methodName, "As", 2) == 0)
4373 // Vector64_As, Vector64_AsDouble, Vector64_AsInt64, and Vector64_AsUInt64
4374 // are not currently supported as they require additional plumbing to be
4375 // supported by the JIT as TYP_SIMD8.
4377 if (strcmp(methodName, "Byte") == 0)
4379 result = NI_Base_Vector64_AsByte;
4381 else if (strcmp(methodName, "Int16") == 0)
4383 result = NI_Base_Vector64_AsInt16;
4385 else if (strcmp(methodName, "Int32") == 0)
4387 result = NI_Base_Vector64_AsInt32;
4389 else if (strcmp(methodName, "SByte") == 0)
4391 result = NI_Base_Vector64_AsSByte;
4393 else if (strcmp(methodName, "Single") == 0)
4395 result = NI_Base_Vector64_AsSingle;
4397 else if (strcmp(methodName, "UInt16") == 0)
4399 result = NI_Base_Vector64_AsUInt16;
4401 else if (strcmp(methodName, "UInt32") == 0)
4403 result = NI_Base_Vector64_AsUInt32;
4409 #endif // _TARGET_ARM64_
4410 if (strncmp(className, "128", 3) == 0)
4414 if (strcmp(className, "`1") == 0)
4416 if (strncmp(methodName, "As", 2) == 0)
4420 if (strcmp(methodName, "`1") == 0)
4422 result = NI_Base_Vector128_As;
4424 else if (strcmp(methodName, "Byte") == 0)
4426 result = NI_Base_Vector128_AsByte;
4428 else if (strcmp(methodName, "Double") == 0)
4430 result = NI_Base_Vector128_AsDouble;
4432 else if (strcmp(methodName, "Int16") == 0)
4434 result = NI_Base_Vector128_AsInt16;
4436 else if (strcmp(methodName, "Int32") == 0)
4438 result = NI_Base_Vector128_AsInt32;
4440 else if (strcmp(methodName, "Int64") == 0)
4442 result = NI_Base_Vector128_AsInt64;
4444 else if (strcmp(methodName, "SByte") == 0)
4446 result = NI_Base_Vector128_AsSByte;
4448 else if (strcmp(methodName, "Single") == 0)
4450 result = NI_Base_Vector128_AsSingle;
4452 else if (strcmp(methodName, "UInt16") == 0)
4454 result = NI_Base_Vector128_AsUInt16;
4456 else if (strcmp(methodName, "UInt32") == 0)
4458 result = NI_Base_Vector128_AsUInt32;
4460 else if (strcmp(methodName, "UInt64") == 0)
4462 result = NI_Base_Vector128_AsUInt64;
4465 #if defined(_TARGET_XARCH_)
4466 else if (strcmp(methodName, "get_Zero") == 0)
4468 result = NI_Base_Vector128_Zero;
4470 #endif // _TARGET_XARCH_
4473 #if defined(_TARGET_XARCH_)
4474 else if (strncmp(className, "256", 3) == 0)
4478 if (strcmp(className, "`1") == 0)
4480 if (strncmp(methodName, "As", 2) == 0)
4484 if (strcmp(methodName, "`1") == 0)
4486 result = NI_Base_Vector256_As;
4488 else if (strcmp(methodName, "Byte") == 0)
4490 result = NI_Base_Vector256_AsByte;
4492 else if (strcmp(methodName, "Double") == 0)
4494 result = NI_Base_Vector256_AsDouble;
4496 else if (strcmp(methodName, "Int16") == 0)
4498 result = NI_Base_Vector256_AsInt16;
4500 else if (strcmp(methodName, "Int32") == 0)
4502 result = NI_Base_Vector256_AsInt32;
4504 else if (strcmp(methodName, "Int64") == 0)
4506 result = NI_Base_Vector256_AsInt64;
4508 else if (strcmp(methodName, "SByte") == 0)
4510 result = NI_Base_Vector256_AsSByte;
4512 else if (strcmp(methodName, "Single") == 0)
4514 result = NI_Base_Vector256_AsSingle;
4516 else if (strcmp(methodName, "UInt16") == 0)
4518 result = NI_Base_Vector256_AsUInt16;
4520 else if (strcmp(methodName, "UInt32") == 0)
4522 result = NI_Base_Vector256_AsUInt32;
4524 else if (strcmp(methodName, "UInt64") == 0)
4526 result = NI_Base_Vector256_AsUInt64;
4529 else if (strcmp(methodName, "get_Zero") == 0)
4531 result = NI_Base_Vector256_Zero;
4535 #endif // _TARGET_XARCH_
4538 #if defined(_TARGET_XARCH_)
4539 else if (strcmp(namespaceName, ".X86") == 0)
4541 result = HWIntrinsicInfo::lookupId(className, methodName);
4543 #elif defined(_TARGET_ARM64_)
4544 else if (strcmp(namespaceName, ".Arm.Arm64") == 0)
4546 result = lookupHWIntrinsic(className, methodName);
4548 #else // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_)
4549 #error Unsupported platform
4550 #endif // !defined(_TARGET_XARCH_) && !defined(_TARGET_ARM64_)
4552 #endif // FEATURE_HW_INTRINSICS
4557 /*****************************************************************************/
4559 GenTree* Compiler::impArrayAccessIntrinsic(
4560 CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
4562 /* If we are generating SMALL_CODE, we don't want to use intrinsics for
4563 the following, as it generates fatter code.
4566 if (compCodeOpt() == SMALL_CODE)
4571 /* These intrinsics generate fatter (but faster) code and are only
4572 done if we don't need SMALL_CODE */
4574 unsigned rank = (intrinsicID == CORINFO_INTRINSIC_Array_Set) ? (sig->numArgs - 1) : sig->numArgs;
4576 // The rank 1 case is special because it has to handle two array formats
4577 // we will simply not do that case
4578 if (rank > GT_ARR_MAX_RANK || rank <= 1)
4583 CORINFO_CLASS_HANDLE arrElemClsHnd = nullptr;
4584 var_types elemType = JITtype2varType(info.compCompHnd->getChildType(clsHnd, &arrElemClsHnd));
4586 // For the ref case, we will only be able to inline if the types match
4587 // (verifier checks for this, we don't care for the nonverified case and the
4588 // type is final (so we don't need to do the cast)
4589 if ((intrinsicID != CORINFO_INTRINSIC_Array_Get) && !readonlyCall && varTypeIsGC(elemType))
4591 // Get the call site signature
4592 CORINFO_SIG_INFO LocalSig;
4593 eeGetCallSiteSig(memberRef, info.compScopeHnd, impTokenLookupContextHandle, &LocalSig);
4594 assert(LocalSig.hasThis());
4596 CORINFO_CLASS_HANDLE actualElemClsHnd;
4598 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4600 // Fetch the last argument, the one that indicates the type we are setting.
4601 CORINFO_ARG_LIST_HANDLE argType = LocalSig.args;
4602 for (unsigned r = 0; r < rank; r++)
4604 argType = info.compCompHnd->getArgNext(argType);
4607 typeInfo argInfo = verParseArgSigToTypeInfo(&LocalSig, argType);
4608 actualElemClsHnd = argInfo.GetClassHandle();
4612 assert(intrinsicID == CORINFO_INTRINSIC_Array_Address);
4614 // Fetch the return type
4615 typeInfo retInfo = verMakeTypeInfo(LocalSig.retType, LocalSig.retTypeClass);
4616 assert(retInfo.IsByRef());
4617 actualElemClsHnd = retInfo.GetClassHandle();
4620 // if it's not final, we can't do the optimization
4621 if (!(info.compCompHnd->getClassAttribs(actualElemClsHnd) & CORINFO_FLG_FINAL))
4627 unsigned arrayElemSize;
4628 if (elemType == TYP_STRUCT)
4630 assert(arrElemClsHnd);
4632 arrayElemSize = info.compCompHnd->getClassSize(arrElemClsHnd);
4636 arrayElemSize = genTypeSize(elemType);
4639 if ((unsigned char)arrayElemSize != arrayElemSize)
4641 // arrayElemSize would be truncated as an unsigned char.
4642 // This means the array element is too large. Don't do the optimization.
4646 GenTree* val = nullptr;
4648 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4650 // Assignment of a struct is more work, and there are more gets than sets.
4651 if (elemType == TYP_STRUCT)
4656 val = impPopStack().val;
4657 assert(genActualType(elemType) == genActualType(val->gtType) ||
4658 (elemType == TYP_FLOAT && val->gtType == TYP_DOUBLE) ||
4659 (elemType == TYP_INT && val->gtType == TYP_BYREF) ||
4660 (elemType == TYP_DOUBLE && val->gtType == TYP_FLOAT));
4663 noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
4665 GenTree* inds[GT_ARR_MAX_RANK];
4666 for (unsigned k = rank; k > 0; k--)
4668 inds[k - 1] = impPopStack().val;
4671 GenTree* arr = impPopStack().val;
4672 assert(arr->gtType == TYP_REF);
4675 new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
4676 static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
4678 if (intrinsicID != CORINFO_INTRINSIC_Array_Address)
4680 arrElem = gtNewOperNode(GT_IND, elemType, arrElem);
4683 if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
4685 assert(val != nullptr);
4686 return gtNewAssignNode(arrElem, val);
4694 BOOL Compiler::verMergeEntryStates(BasicBlock* block, bool* changed)
4698 // do some basic checks first
4699 if (block->bbStackDepthOnEntry() != verCurrentState.esStackDepth)
4704 if (verCurrentState.esStackDepth > 0)
4706 // merge stack types
4707 StackEntry* parentStack = block->bbStackOnEntry();
4708 StackEntry* childStack = verCurrentState.esStack;
4710 for (i = 0; i < verCurrentState.esStackDepth; i++, parentStack++, childStack++)
4712 if (tiMergeToCommonParent(&parentStack->seTypeInfo, &childStack->seTypeInfo, changed) == FALSE)
4719 // merge initialization status of this ptr
4721 if (verTrackObjCtorInitState)
4723 // If we're tracking the CtorInitState, then it must not be unknown in the current state.
4724 assert(verCurrentState.thisInitialized != TIS_Bottom);
4726 // If the successor block's thisInit state is unknown, copy it from the current state.
4727 if (block->bbThisOnEntry() == TIS_Bottom)
4730 verSetThisInit(block, verCurrentState.thisInitialized);
4732 else if (verCurrentState.thisInitialized != block->bbThisOnEntry())
4734 if (block->bbThisOnEntry() != TIS_Top)
4737 verSetThisInit(block, TIS_Top);
4739 if (block->bbFlags & BBF_FAILED_VERIFICATION)
4741 // The block is bad. Control can flow through the block to any handler that catches the
4742 // verification exception, but the importer ignores bad blocks and therefore won't model
4743 // this flow in the normal way. To complete the merge into the bad block, the new state
4744 // needs to be manually pushed to the handlers that may be reached after the verification
4745 // exception occurs.
4747 // Usually, the new state was already propagated to the relevant handlers while processing
4748 // the predecessors of the bad block. The exception is when the bad block is at the start
4749 // of a try region, meaning it is protected by additional handlers that do not protect its
4752 if (block->hasTryIndex() && ((block->bbFlags & BBF_TRY_BEG) != 0))
4754 // Push TIS_Top to the handlers that protect the bad block. Note that this can cause
4755 // recursive calls back into this code path (if successors of the current bad block are
4756 // also bad blocks).
4758 ThisInitState origTIS = verCurrentState.thisInitialized;
4759 verCurrentState.thisInitialized = TIS_Top;
4760 impVerifyEHBlock(block, true);
4761 verCurrentState.thisInitialized = origTIS;
4769 assert(verCurrentState.thisInitialized == TIS_Bottom && block->bbThisOnEntry() == TIS_Bottom);
4775 /*****************************************************************************
4776 * 'logMsg' is true if a log message needs to be logged. false if the caller has
4777 * already logged it (presumably in a more detailed fashion than done here)
4778 * 'bVerificationException' is true for a verification exception, false for a
4779 * "call unauthorized by host" exception.
4782 void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg))
4784 block->bbJumpKind = BBJ_THROW;
4785 block->bbFlags |= BBF_FAILED_VERIFICATION;
4787 impCurStmtOffsSet(block->bbCodeOffs);
4790 // we need this since BeginTreeList asserts otherwise
4791 impTreeList = impTreeLast = nullptr;
4792 block->bbFlags &= ~BBF_IMPORTED;
4796 JITLOG((LL_ERROR, "Verification failure: while compiling %s near IL offset %x..%xh \n", info.compFullName,
4797 block->bbCodeOffs, block->bbCodeOffsEnd));
4800 printf("\n\nVerification failure: %s near IL %xh \n", info.compFullName, block->bbCodeOffs);
4804 if (JitConfig.DebugBreakOnVerificationFailure())
4812 // if the stack is non-empty evaluate all the side-effects
4813 if (verCurrentState.esStackDepth > 0)
4815 impEvalSideEffects();
4817 assert(verCurrentState.esStackDepth == 0);
4820 gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
4821 // verCurrentState.esStackDepth = 0;
4822 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
4824 // The inliner is not able to handle methods that require throw block, so
4825 // make sure this methods never gets inlined.
4826 info.compCompHnd->setMethodAttribs(info.compMethodHnd, CORINFO_FLG_BAD_INLINEE);
4829 /*****************************************************************************
4832 void Compiler::verHandleVerificationFailure(BasicBlock* block DEBUGARG(bool logMsg))
4835 // In AMD64, for historical reasons involving design limitations of JIT64, the VM has a
4836 // slightly different mechanism in which it calls the JIT to perform IL verification:
4837 // in the case of transparent methods the VM calls for a predicate IsVerifiable()
4838 // that consists of calling the JIT with the IMPORT_ONLY flag and with the IL verify flag on.
4839 // If the JIT determines the method is not verifiable, it should raise the exception to the VM and let
4840 // it bubble up until reported by the runtime. Currently in RyuJIT, this method doesn't bubble
4841 // up the exception, instead it embeds a throw inside the offending basic block and lets this
4842 // to fail upon runtime of the jitted method.
4844 // For AMD64 we don't want this behavior when the JIT has been called only for verification (i.e.
4845 // with the IMPORT_ONLY and IL Verification flag set) because this won't actually generate code,
4846 // just try to find out whether to fail this method before even actually jitting it. So, in case
4847 // we detect these two conditions, instead of generating a throw statement inside the offending
4848 // basic block, we immediately fail to JIT and notify the VM to make the IsVerifiable() predicate
4849 // to return false and make RyuJIT behave the same way JIT64 does.
4851 // The rationale behind this workaround is to avoid modifying the VM and maintain compatibility between JIT64 and
4852 // RyuJIT for the time being until we completely replace JIT64.
4853 // TODO-ARM64-Cleanup: We probably want to actually modify the VM in the future to avoid the unnecesary two passes.
4855 // In AMD64 we must make sure we're behaving the same way as JIT64, meaning we should only raise the verification
4856 // exception if we are only importing and verifying. The method verNeedsVerification() can also modify the
4857 // tiVerificationNeeded flag in the case it determines it can 'skip verification' during importation and defer it
4858 // to a runtime check. That's why we must assert one or the other (since the flag tiVerificationNeeded can
4859 // be turned off during importation).
4860 CLANG_FORMAT_COMMENT_ANCHOR;
4862 #ifdef _TARGET_64BIT_
4865 bool canSkipVerificationResult =
4866 info.compCompHnd->canSkipMethodVerification(info.compMethodHnd) != CORINFO_VERIFICATION_CANNOT_SKIP;
4867 assert(tiVerificationNeeded || canSkipVerificationResult);
4870 // Add the non verifiable flag to the compiler
4871 if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
4873 tiIsVerifiableCode = FALSE;
4875 #endif //_TARGET_64BIT_
4876 verResetCurrentState(block, &verCurrentState);
4877 verConvertBBToThrowVerificationException(block DEBUGARG(logMsg));
4880 impNoteLastILoffs(); // Remember at which BC offset the tree was finished
4884 /******************************************************************************/
4885 typeInfo Compiler::verMakeTypeInfo(CorInfoType ciType, CORINFO_CLASS_HANDLE clsHnd)
4887 assert(ciType < CORINFO_TYPE_COUNT);
4892 case CORINFO_TYPE_STRING:
4893 case CORINFO_TYPE_CLASS:
4894 tiResult = verMakeTypeInfo(clsHnd);
4895 if (!tiResult.IsType(TI_REF))
4896 { // type must be consistent with element type
4901 #ifdef _TARGET_64BIT_
4902 case CORINFO_TYPE_NATIVEINT:
4903 case CORINFO_TYPE_NATIVEUINT:
4906 // If we have more precise information, use it
4907 return verMakeTypeInfo(clsHnd);
4911 return typeInfo::nativeInt();
4914 #endif // _TARGET_64BIT_
4916 case CORINFO_TYPE_VALUECLASS:
4917 case CORINFO_TYPE_REFANY:
4918 tiResult = verMakeTypeInfo(clsHnd);
4919 // type must be constant with element type;
4920 if (!tiResult.IsValueClass())
4925 case CORINFO_TYPE_VAR:
4926 return verMakeTypeInfo(clsHnd);
4928 case CORINFO_TYPE_PTR: // for now, pointers are treated as an error
4929 case CORINFO_TYPE_VOID:
4933 case CORINFO_TYPE_BYREF:
4935 CORINFO_CLASS_HANDLE childClassHandle;
4936 CorInfoType childType = info.compCompHnd->getChildType(clsHnd, &childClassHandle);
4937 return ByRef(verMakeTypeInfo(childType, childClassHandle));
4943 { // If we have more precise information, use it
4944 return typeInfo(TI_STRUCT, clsHnd);
4948 return typeInfo(JITtype2tiType(ciType));
4954 /******************************************************************************/
4956 typeInfo Compiler::verMakeTypeInfo(CORINFO_CLASS_HANDLE clsHnd, bool bashStructToRef /* = false */)
4958 if (clsHnd == nullptr)
4963 // Byrefs should only occur in method and local signatures, which are accessed
4964 // using ICorClassInfo and ICorClassInfo.getChildType.
4965 // So findClass() and getClassAttribs() should not be called for byrefs
4967 if (JITtype2varType(info.compCompHnd->asCorInfoType(clsHnd)) == TYP_BYREF)
4969 assert(!"Did findClass() return a Byref?");
4973 unsigned attribs = info.compCompHnd->getClassAttribs(clsHnd);
4975 if (attribs & CORINFO_FLG_VALUECLASS)
4977 CorInfoType t = info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd);
4979 // Meta-data validation should ensure that CORINF_TYPE_BYREF should
4980 // not occur here, so we may want to change this to an assert instead.
4981 if (t == CORINFO_TYPE_VOID || t == CORINFO_TYPE_BYREF || t == CORINFO_TYPE_PTR)
4986 #ifdef _TARGET_64BIT_
4987 if (t == CORINFO_TYPE_NATIVEINT || t == CORINFO_TYPE_NATIVEUINT)
4989 return typeInfo::nativeInt();
4991 #endif // _TARGET_64BIT_
4993 if (t != CORINFO_TYPE_UNDEF)
4995 return (typeInfo(JITtype2tiType(t)));
4997 else if (bashStructToRef)
4999 return (typeInfo(TI_REF, clsHnd));
5003 return (typeInfo(TI_STRUCT, clsHnd));
5006 else if (attribs & CORINFO_FLG_GENERIC_TYPE_VARIABLE)
5008 // See comment in _typeInfo.h for why we do it this way.
5009 return (typeInfo(TI_REF, clsHnd, true));
5013 return (typeInfo(TI_REF, clsHnd));
5017 /******************************************************************************/
5018 BOOL Compiler::verIsSDArray(typeInfo ti)
5020 if (ti.IsNullObjRef())
5021 { // nulls are SD arrays
5025 if (!ti.IsType(TI_REF))
5030 if (!info.compCompHnd->isSDArray(ti.GetClassHandleForObjRef()))
5037 /******************************************************************************/
5038 /* Given 'arrayObjectType' which is an array type, fetch the element type. */
5039 /* Returns an error type if anything goes wrong */
5041 typeInfo Compiler::verGetArrayElemType(typeInfo arrayObjectType)
5043 assert(!arrayObjectType.IsNullObjRef()); // you need to check for null explictly since that is a success case
5045 if (!verIsSDArray(arrayObjectType))
5050 CORINFO_CLASS_HANDLE childClassHandle = nullptr;
5051 CorInfoType ciType = info.compCompHnd->getChildType(arrayObjectType.GetClassHandleForObjRef(), &childClassHandle);
5053 return verMakeTypeInfo(ciType, childClassHandle);
5056 /*****************************************************************************
5058 typeInfo Compiler::verParseArgSigToTypeInfo(CORINFO_SIG_INFO* sig, CORINFO_ARG_LIST_HANDLE args)
5060 CORINFO_CLASS_HANDLE classHandle;
5061 CorInfoType ciType = strip(info.compCompHnd->getArgType(sig, args, &classHandle));
5063 var_types type = JITtype2varType(ciType);
5064 if (varTypeIsGC(type))
5066 // For efficiency, getArgType only returns something in classHandle for
5067 // value types. For other types that have addition type info, you
5068 // have to call back explicitly
5069 classHandle = info.compCompHnd->getArgClass(sig, args);
5072 NO_WAY("Could not figure out Class specified in argument or local signature");
5076 return verMakeTypeInfo(ciType, classHandle);
5079 /*****************************************************************************/
5081 // This does the expensive check to figure out whether the method
5082 // needs to be verified. It is called only when we fail verification,
5083 // just before throwing the verification exception.
5085 BOOL Compiler::verNeedsVerification()
5087 // If we have previously determined that verification is NOT needed
5088 // (for example in Compiler::compCompile), that means verification is really not needed.
5089 // Return the same decision we made before.
5090 // (Note: This literally means that tiVerificationNeeded can never go from 0 to 1.)
5092 if (!tiVerificationNeeded)
5094 return tiVerificationNeeded;
5097 assert(tiVerificationNeeded);
5099 // Ok, we haven't concluded that verification is NOT needed. Consult the EE now to
5100 // obtain the answer.
5101 CorInfoCanSkipVerificationResult canSkipVerificationResult =
5102 info.compCompHnd->canSkipMethodVerification(info.compMethodHnd);
5104 // canSkipVerification will return one of the following three values:
5105 // CORINFO_VERIFICATION_CANNOT_SKIP = 0, // Cannot skip verification during jit time.
5106 // CORINFO_VERIFICATION_CAN_SKIP = 1, // Can skip verification during jit time.
5107 // CORINFO_VERIFICATION_RUNTIME_CHECK = 2, // Skip verification during jit time,
5108 // but need to insert a callout to the VM to ask during runtime
5109 // whether to skip verification or not.
5111 // Set tiRuntimeCalloutNeeded if canSkipVerification() instructs us to insert a callout for runtime check
5112 if (canSkipVerificationResult == CORINFO_VERIFICATION_RUNTIME_CHECK)
5114 tiRuntimeCalloutNeeded = true;
5117 if (canSkipVerificationResult == CORINFO_VERIFICATION_DONT_JIT)
5119 // Dev10 706080 - Testers don't like the assert, so just silence it
5120 // by not using the macros that invoke debugAssert.
5124 // When tiVerificationNeeded is true, JIT will do the verification during JIT time.
5125 // The following line means we will NOT do jit time verification if canSkipVerification
5126 // returns CORINFO_VERIFICATION_CAN_SKIP or CORINFO_VERIFICATION_RUNTIME_CHECK.
5127 tiVerificationNeeded = (canSkipVerificationResult == CORINFO_VERIFICATION_CANNOT_SKIP);
5128 return tiVerificationNeeded;
5131 BOOL Compiler::verIsByRefLike(const typeInfo& ti)
5137 if (!ti.IsType(TI_STRUCT))
5141 return info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR;
5144 BOOL Compiler::verIsSafeToReturnByRef(const typeInfo& ti)
5146 if (ti.IsPermanentHomeByRef())
5156 BOOL Compiler::verIsBoxable(const typeInfo& ti)
5158 return (ti.IsPrimitiveType() || ti.IsObjRef() // includes boxed generic type variables
5159 || ti.IsUnboxedGenericTypeVar() ||
5160 (ti.IsType(TI_STRUCT) &&
5161 // exclude byreflike structs
5162 !(info.compCompHnd->getClassAttribs(ti.GetClassHandleForValueClass()) & CORINFO_FLG_CONTAINS_STACK_PTR)));
5165 // Is it a boxed value type?
5166 bool Compiler::verIsBoxedValueType(typeInfo ti)
5168 if (ti.GetType() == TI_REF)
5170 CORINFO_CLASS_HANDLE clsHnd = ti.GetClassHandleForObjRef();
5171 return !!eeIsValueClass(clsHnd);
5179 /*****************************************************************************
5181 * Check if a TailCall is legal.
5184 bool Compiler::verCheckTailCallConstraint(
5186 CORINFO_RESOLVED_TOKEN* pResolvedToken,
5187 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken, // Is this a "constrained." call on a type parameter?
5188 bool speculative // If true, won't throw if verificatoin fails. Instead it will
5189 // return false to the caller.
5190 // If false, it will throw.
5194 CORINFO_SIG_INFO sig;
5195 unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
5196 // this counter is used to keep track of how many items have been
5199 CORINFO_METHOD_HANDLE methodHnd = nullptr;
5200 CORINFO_CLASS_HANDLE methodClassHnd = nullptr;
5201 unsigned methodClassFlgs = 0;
5203 assert(impOpcodeIsCallOpcode(opcode));
5205 if (compIsForInlining())
5210 // for calli, VerifyOrReturn that this is not a virtual method
5211 if (opcode == CEE_CALLI)
5213 /* Get the call sig */
5214 eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig);
5216 // We don't know the target method, so we have to infer the flags, or
5217 // assume the worst-case.
5218 mflags = (sig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
5222 methodHnd = pResolvedToken->hMethod;
5224 mflags = info.compCompHnd->getMethodAttribs(methodHnd);
5226 // When verifying generic code we pair the method handle with its
5227 // owning class to get the exact method signature.
5228 methodClassHnd = pResolvedToken->hClass;
5229 assert(methodClassHnd);
5231 eeGetMethodSig(methodHnd, &sig, methodClassHnd);
5233 // opcode specific check
5234 methodClassFlgs = info.compCompHnd->getClassAttribs(methodClassHnd);
5237 // We must have got the methodClassHnd if opcode is not CEE_CALLI
5238 assert((methodHnd != nullptr && methodClassHnd != nullptr) || opcode == CEE_CALLI);
5240 if ((sig.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
5242 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig);
5245 // check compatibility of the arguments
5246 unsigned int argCount;
5247 argCount = sig.numArgs;
5248 CORINFO_ARG_LIST_HANDLE args;
5252 typeInfo tiDeclared = verParseArgSigToTypeInfo(&sig, args).NormaliseForStack();
5254 // check that the argument is not a byref for tailcalls
5255 VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclared), "tailcall on byrefs", speculative);
5257 // For unsafe code, we might have parameters containing pointer to the stack location.
5258 // Disallow the tailcall for this kind.
5259 CORINFO_CLASS_HANDLE classHandle;
5260 CorInfoType ciType = strip(info.compCompHnd->getArgType(&sig, args, &classHandle));
5261 VerifyOrReturnSpeculative(ciType != CORINFO_TYPE_PTR, "tailcall on CORINFO_TYPE_PTR", speculative);
5263 args = info.compCompHnd->getArgNext(args);
5267 popCount += sig.numArgs;
5269 // check for 'this' which is on non-static methods, not called via NEWOBJ
5270 if (!(mflags & CORINFO_FLG_STATIC))
5272 // Always update the popCount.
5273 // This is crucial for the stack calculation to be correct.
5274 typeInfo tiThis = impStackTop(popCount).seTypeInfo;
5277 if (opcode == CEE_CALLI)
5279 // For CALLI, we don't know the methodClassHnd. Therefore, let's check the "this" object
5281 if (tiThis.IsValueClass())
5285 VerifyOrReturnSpeculative(!verIsByRefLike(tiThis), "byref in tailcall", speculative);
5289 // Check type compatibility of the this argument
5290 typeInfo tiDeclaredThis = verMakeTypeInfo(methodClassHnd);
5291 if (tiDeclaredThis.IsValueClass())
5293 tiDeclaredThis.MakeByRef();
5296 VerifyOrReturnSpeculative(!verIsByRefLike(tiDeclaredThis), "byref in tailcall", speculative);
5300 // Tail calls on constrained calls should be illegal too:
5301 // when instantiated at a value type, a constrained call may pass the address of a stack allocated value
5302 VerifyOrReturnSpeculative(!pConstrainedResolvedToken, "byref in constrained tailcall", speculative);
5304 // Get the exact view of the signature for an array method
5305 if (sig.retType != CORINFO_TYPE_VOID)
5307 if (methodClassFlgs & CORINFO_FLG_ARRAY)
5309 assert(opcode != CEE_CALLI);
5310 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &sig);
5314 typeInfo tiCalleeRetType = verMakeTypeInfo(sig.retType, sig.retTypeClass);
5315 typeInfo tiCallerRetType =
5316 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
5318 // void return type gets morphed into the error type, so we have to treat them specially here
5319 if (sig.retType == CORINFO_TYPE_VOID)
5321 VerifyOrReturnSpeculative(info.compMethodInfo->args.retType == CORINFO_TYPE_VOID, "tailcall return mismatch",
5326 VerifyOrReturnSpeculative(tiCompatibleWith(NormaliseForStack(tiCalleeRetType),
5327 NormaliseForStack(tiCallerRetType), true),
5328 "tailcall return mismatch", speculative);
5331 // for tailcall, stack must be empty
5332 VerifyOrReturnSpeculative(verCurrentState.esStackDepth == popCount, "stack non-empty on tailcall", speculative);
5334 return true; // Yes, tailcall is legal
5337 /*****************************************************************************
5339 * Checks the IL verification rules for the call
5342 void Compiler::verVerifyCall(OPCODE opcode,
5343 CORINFO_RESOLVED_TOKEN* pResolvedToken,
5344 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
5347 const BYTE* delegateCreateStart,
5348 const BYTE* codeAddr,
5349 CORINFO_CALL_INFO* callInfo DEBUGARG(const char* methodName))
5352 CORINFO_SIG_INFO* sig = nullptr;
5353 unsigned int popCount = 0; // we can't pop the stack since impImportCall needs it, so
5354 // this counter is used to keep track of how many items have been
5357 // for calli, VerifyOrReturn that this is not a virtual method
5358 if (opcode == CEE_CALLI)
5360 Verify(false, "Calli not verifiable");
5364 //<NICE> It would be nice to cache the rest of it, but eeFindMethod is the big ticket item.
5365 mflags = callInfo->verMethodFlags;
5367 sig = &callInfo->verSig;
5369 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
5371 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
5374 // opcode specific check
5375 unsigned methodClassFlgs = callInfo->classFlags;
5379 // cannot do callvirt on valuetypes
5380 VerifyOrReturn(!(methodClassFlgs & CORINFO_FLG_VALUECLASS), "callVirt on value class");
5381 VerifyOrReturn(sig->hasThis(), "CallVirt on static method");
5386 assert(!tailCall); // Importer should not allow this
5387 VerifyOrReturn((mflags & CORINFO_FLG_CONSTRUCTOR) && !(mflags & CORINFO_FLG_STATIC),
5388 "newobj must be on instance");
5390 if (methodClassFlgs & CORINFO_FLG_DELEGATE)
5392 VerifyOrReturn(sig->numArgs == 2, "wrong number args to delegate ctor");
5393 typeInfo tiDeclaredObj = verParseArgSigToTypeInfo(sig, sig->args).NormaliseForStack();
5394 typeInfo tiDeclaredFtn =
5395 verParseArgSigToTypeInfo(sig, info.compCompHnd->getArgNext(sig->args)).NormaliseForStack();
5396 VerifyOrReturn(tiDeclaredFtn.IsNativeIntType(), "ftn arg needs to be a native int type");
5398 assert(popCount == 0);
5399 typeInfo tiActualObj = impStackTop(1).seTypeInfo;
5400 typeInfo tiActualFtn = impStackTop(0).seTypeInfo;
5402 VerifyOrReturn(tiActualFtn.IsMethod(), "delegate needs method as first arg");
5403 VerifyOrReturn(tiCompatibleWith(tiActualObj, tiDeclaredObj, true), "delegate object type mismatch");
5404 VerifyOrReturn(tiActualObj.IsNullObjRef() || tiActualObj.IsType(TI_REF),
5405 "delegate object type mismatch");
5407 CORINFO_CLASS_HANDLE objTypeHandle =
5408 tiActualObj.IsNullObjRef() ? nullptr : tiActualObj.GetClassHandleForObjRef();
5410 // the method signature must be compatible with the delegate's invoke method
5412 // check that for virtual functions, the type of the object used to get the
5413 // ftn ptr is the same as the type of the object passed to the delegate ctor.
5414 // since this is a bit of work to determine in general, we pattern match stylized
5417 // the delegate creation code check, which used to be done later, is now done here
5418 // so we can read delegateMethodRef directly from
5419 // from the preceding LDFTN or CEE_LDVIRTFN instruction sequence;
5420 // we then use it in our call to isCompatibleDelegate().
5422 mdMemberRef delegateMethodRef = mdMemberRefNil;
5423 VerifyOrReturn(verCheckDelegateCreation(delegateCreateStart, codeAddr, delegateMethodRef),
5424 "must create delegates with certain IL");
5426 CORINFO_RESOLVED_TOKEN delegateResolvedToken;
5427 delegateResolvedToken.tokenContext = impTokenLookupContextHandle;
5428 delegateResolvedToken.tokenScope = info.compScopeHnd;
5429 delegateResolvedToken.token = delegateMethodRef;
5430 delegateResolvedToken.tokenType = CORINFO_TOKENKIND_Method;
5431 info.compCompHnd->resolveToken(&delegateResolvedToken);
5433 CORINFO_CALL_INFO delegateCallInfo;
5434 eeGetCallInfo(&delegateResolvedToken, nullptr /* constraint typeRef */,
5435 addVerifyFlag(CORINFO_CALLINFO_SECURITYCHECKS), &delegateCallInfo);
5437 BOOL isOpenDelegate = FALSE;
5438 VerifyOrReturn(info.compCompHnd->isCompatibleDelegate(objTypeHandle, delegateResolvedToken.hClass,
5439 tiActualFtn.GetMethod(), pResolvedToken->hClass,
5441 "function incompatible with delegate");
5443 // check the constraints on the target method
5444 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(delegateResolvedToken.hClass),
5445 "delegate target has unsatisfied class constraints");
5446 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(delegateResolvedToken.hClass,
5447 tiActualFtn.GetMethod()),
5448 "delegate target has unsatisfied method constraints");
5450 // See ECMA spec section 1.8.1.5.2 (Delegating via instance dispatch)
5451 // for additional verification rules for delegates
5452 CORINFO_METHOD_HANDLE actualMethodHandle = tiActualFtn.GetMethod();
5453 DWORD actualMethodAttribs = info.compCompHnd->getMethodAttribs(actualMethodHandle);
5454 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5457 if ((actualMethodAttribs & CORINFO_FLG_VIRTUAL) && ((actualMethodAttribs & CORINFO_FLG_FINAL) == 0)
5459 && StrictCheckForNonVirtualCallToVirtualMethod()
5463 if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5465 VerifyOrReturn(tiActualObj.IsThisPtr() && lvaIsOriginalThisReadOnly() ||
5466 verIsBoxedValueType(tiActualObj),
5467 "The 'this' parameter to the call must be either the calling method's "
5468 "'this' parameter or "
5469 "a boxed value type.");
5474 if (actualMethodAttribs & CORINFO_FLG_PROTECTED)
5476 BOOL targetIsStatic = actualMethodAttribs & CORINFO_FLG_STATIC;
5478 Verify(targetIsStatic || !isOpenDelegate,
5479 "Unverifiable creation of an open instance delegate for a protected member.");
5481 CORINFO_CLASS_HANDLE instanceClassHnd = (tiActualObj.IsNullObjRef() || targetIsStatic)
5483 : tiActualObj.GetClassHandleForObjRef();
5485 // In the case of protected methods, it is a requirement that the 'this'
5486 // pointer be a subclass of the current context. Perform this check.
5487 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5488 "Accessing protected method through wrong type.");
5493 // fall thru to default checks
5495 VerifyOrReturn(!(mflags & CORINFO_FLG_ABSTRACT), "method abstract");
5497 VerifyOrReturn(!((mflags & CORINFO_FLG_CONSTRUCTOR) && (methodClassFlgs & CORINFO_FLG_DELEGATE)),
5498 "can only newobj a delegate constructor");
5500 // check compatibility of the arguments
5501 unsigned int argCount;
5502 argCount = sig->numArgs;
5503 CORINFO_ARG_LIST_HANDLE args;
5507 typeInfo tiActual = impStackTop(popCount + argCount).seTypeInfo;
5509 typeInfo tiDeclared = verParseArgSigToTypeInfo(sig, args).NormaliseForStack();
5510 VerifyOrReturn(tiCompatibleWith(tiActual, tiDeclared, true), "type mismatch");
5512 args = info.compCompHnd->getArgNext(args);
5518 popCount += sig->numArgs;
5520 // check for 'this' which are is non-static methods, not called via NEWOBJ
5521 CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
5522 if (!(mflags & CORINFO_FLG_STATIC) && (opcode != CEE_NEWOBJ))
5524 typeInfo tiThis = impStackTop(popCount).seTypeInfo;
5527 // If it is null, we assume we can access it (since it will AV shortly)
5528 // If it is anything but a reference class, there is no hierarchy, so
5529 // again, we don't need the precise instance class to compute 'protected' access
5530 if (tiThis.IsType(TI_REF))
5532 instanceClassHnd = tiThis.GetClassHandleForObjRef();
5535 // Check type compatibility of the this argument
5536 typeInfo tiDeclaredThis = verMakeTypeInfo(pResolvedToken->hClass);
5537 if (tiDeclaredThis.IsValueClass())
5539 tiDeclaredThis.MakeByRef();
5542 // If this is a call to the base class .ctor, set thisPtr Init for
5544 if (mflags & CORINFO_FLG_CONSTRUCTOR)
5546 if (verTrackObjCtorInitState && tiThis.IsThisPtr() &&
5547 verIsCallToInitThisPtr(info.compClassHnd, pResolvedToken->hClass))
5549 assert(verCurrentState.thisInitialized !=
5550 TIS_Bottom); // This should never be the case just from the logic of the verifier.
5551 VerifyOrReturn(verCurrentState.thisInitialized == TIS_Uninit,
5552 "Call to base class constructor when 'this' is possibly initialized");
5553 // Otherwise, 'this' is now initialized.
5554 verCurrentState.thisInitialized = TIS_Init;
5555 tiThis.SetInitialisedObjRef();
5559 // We allow direct calls to value type constructors
5560 // NB: we have to check that the contents of tiThis is a value type, otherwise we could use a
5561 // constrained callvirt to illegally re-enter a .ctor on a value of reference type.
5562 VerifyOrReturn(tiThis.IsByRef() && DereferenceByRef(tiThis).IsValueClass(),
5563 "Bad call to a constructor");
5567 if (pConstrainedResolvedToken != nullptr)
5569 VerifyOrReturn(tiThis.IsByRef(), "non-byref this type in constrained call");
5571 typeInfo tiConstraint = verMakeTypeInfo(pConstrainedResolvedToken->hClass);
5573 // We just dereference this and test for equality
5574 tiThis.DereferenceByRef();
5575 VerifyOrReturn(typeInfo::AreEquivalent(tiThis, tiConstraint),
5576 "this type mismatch with constrained type operand");
5578 // Now pretend the this type is the boxed constrained type, for the sake of subsequent checks
5579 tiThis = typeInfo(TI_REF, pConstrainedResolvedToken->hClass);
5582 // To support direct calls on readonly byrefs, just pretend tiDeclaredThis is readonly too
5583 if (tiDeclaredThis.IsByRef() && tiThis.IsReadonlyByRef())
5585 tiDeclaredThis.SetIsReadonlyByRef();
5588 VerifyOrReturn(tiCompatibleWith(tiThis, tiDeclaredThis, true), "this type mismatch");
5590 if (tiThis.IsByRef())
5592 // Find the actual type where the method exists (as opposed to what is declared
5593 // in the metadata). This is to prevent passing a byref as the "this" argument
5594 // while calling methods like System.ValueType.GetHashCode() which expect boxed objects.
5596 CORINFO_CLASS_HANDLE actualClassHnd = info.compCompHnd->getMethodClass(pResolvedToken->hMethod);
5597 VerifyOrReturn(eeIsValueClass(actualClassHnd),
5598 "Call to base type of valuetype (which is never a valuetype)");
5601 // Rules for non-virtual call to a non-final virtual method:
5604 // The "this" pointer is considered to be "possibly written" if
5605 // 1. Its address have been taken (LDARGA 0) anywhere in the method.
5607 // 2. It has been stored to (STARG.0) anywhere in the method.
5609 // A non-virtual call to a non-final virtual method is only allowed if
5610 // 1. The this pointer passed to the callee is an instance of a boxed value type.
5612 // 2. The this pointer passed to the callee is the current method's this pointer.
5613 // (and) The current method's this pointer is not "possibly written".
5615 // Thus the rule is that if you assign to this ANYWHERE you can't make "base" calls to
5616 // virtual methods. (Luckily this does affect .ctors, since they are not virtual).
5617 // This is stronger that is strictly needed, but implementing a laxer rule is significantly
5618 // hard and more error prone.
5620 if (opcode == CEE_CALL && (mflags & CORINFO_FLG_VIRTUAL) && ((mflags & CORINFO_FLG_FINAL) == 0)
5622 && StrictCheckForNonVirtualCallToVirtualMethod()
5626 if (info.compCompHnd->shouldEnforceCallvirtRestriction(info.compScopeHnd))
5629 tiThis.IsThisPtr() && lvaIsOriginalThisReadOnly() || verIsBoxedValueType(tiThis),
5630 "The 'this' parameter to the call must be either the calling method's 'this' parameter or "
5631 "a boxed value type.");
5636 // check any constraints on the callee's class and type parameters
5637 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(pResolvedToken->hClass),
5638 "method has unsatisfied class constraints");
5639 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(pResolvedToken->hClass, pResolvedToken->hMethod),
5640 "method has unsatisfied method constraints");
5642 if (mflags & CORINFO_FLG_PROTECTED)
5644 VerifyOrReturn(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
5645 "Can't access protected method");
5648 // Get the exact view of the signature for an array method
5649 if (sig->retType != CORINFO_TYPE_VOID)
5651 eeGetMethodSig(pResolvedToken->hMethod, sig, pResolvedToken->hClass);
5654 // "readonly." prefixed calls only allowed for the Address operation on arrays.
5655 // The methods supported by array types are under the control of the EE
5656 // so we can trust that only the Address operation returns a byref.
5659 typeInfo tiCalleeRetType = verMakeTypeInfo(sig->retType, sig->retTypeClass);
5660 VerifyOrReturn((methodClassFlgs & CORINFO_FLG_ARRAY) && tiCalleeRetType.IsByRef(),
5661 "unexpected use of readonly prefix");
5664 // Verify the tailcall
5667 verCheckTailCallConstraint(opcode, pResolvedToken, pConstrainedResolvedToken, false);
5671 /*****************************************************************************
5672 * Checks that a delegate creation is done using the following pattern:
5674 * ldvirtftn targetMemberRef
5676 * ldftn targetMemberRef
5678 * 'delegateCreateStart' points at the last dup or ldftn in this basic block (null if
5679 * not in this basic block)
5681 * targetMemberRef is read from the code sequence.
5682 * targetMemberRef is validated iff verificationNeeded.
5685 BOOL Compiler::verCheckDelegateCreation(const BYTE* delegateCreateStart,
5686 const BYTE* codeAddr,
5687 mdMemberRef& targetMemberRef)
5689 if (impIsLDFTN_TOKEN(delegateCreateStart, codeAddr))
5691 targetMemberRef = getU4LittleEndian(&delegateCreateStart[2]);
5694 else if (impIsDUP_LDVIRTFTN_TOKEN(delegateCreateStart, codeAddr))
5696 targetMemberRef = getU4LittleEndian(&delegateCreateStart[3]);
5703 typeInfo Compiler::verVerifySTIND(const typeInfo& tiTo, const typeInfo& value, const typeInfo& instrType)
5705 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
5706 typeInfo ptrVal = verVerifyLDIND(tiTo, instrType);
5707 typeInfo normPtrVal = typeInfo(ptrVal).NormaliseForStack();
5708 if (!tiCompatibleWith(value, normPtrVal, true))
5710 Verify(tiCompatibleWith(value, normPtrVal, true), "type mismatch");
5711 compUnsafeCastUsed = true;
5716 typeInfo Compiler::verVerifyLDIND(const typeInfo& ptr, const typeInfo& instrType)
5718 assert(!instrType.IsStruct());
5723 ptrVal = DereferenceByRef(ptr);
5724 if (instrType.IsObjRef() && !ptrVal.IsObjRef())
5726 Verify(false, "bad pointer");
5727 compUnsafeCastUsed = true;
5729 else if (!instrType.IsObjRef() && !typeInfo::AreEquivalent(instrType, ptrVal))
5731 Verify(false, "pointer not consistent with instr");
5732 compUnsafeCastUsed = true;
5737 Verify(false, "pointer not byref");
5738 compUnsafeCastUsed = true;
5744 // Verify that the field is used properly. 'tiThis' is NULL for statics,
5745 // 'fieldFlags' is the fields attributes, and mutator is TRUE if it is a
5746 // ld*flda or a st*fld.
5747 // 'enclosingClass' is given if we are accessing a field in some specific type.
5749 void Compiler::verVerifyField(CORINFO_RESOLVED_TOKEN* pResolvedToken,
5750 const CORINFO_FIELD_INFO& fieldInfo,
5751 const typeInfo* tiThis,
5753 BOOL allowPlainStructAsThis)
5755 CORINFO_CLASS_HANDLE enclosingClass = pResolvedToken->hClass;
5756 unsigned fieldFlags = fieldInfo.fieldFlags;
5757 CORINFO_CLASS_HANDLE instanceClass =
5758 info.compClassHnd; // for statics, we imagine the instance is the current class.
5760 bool isStaticField = ((fieldFlags & CORINFO_FLG_FIELD_STATIC) != 0);
5763 Verify(!(fieldFlags & CORINFO_FLG_FIELD_UNMANAGED), "mutating an RVA bases static");
5764 if ((fieldFlags & CORINFO_FLG_FIELD_FINAL))
5766 Verify((info.compFlags & CORINFO_FLG_CONSTRUCTOR) && enclosingClass == info.compClassHnd &&
5767 info.compIsStatic == isStaticField,
5768 "bad use of initonly field (set or address taken)");
5772 if (tiThis == nullptr)
5774 Verify(isStaticField, "used static opcode with non-static field");
5778 typeInfo tThis = *tiThis;
5780 if (allowPlainStructAsThis && tThis.IsValueClass())
5785 // If it is null, we assume we can access it (since it will AV shortly)
5786 // If it is anything but a refernce class, there is no hierarchy, so
5787 // again, we don't need the precise instance class to compute 'protected' access
5788 if (tiThis->IsType(TI_REF))
5790 instanceClass = tiThis->GetClassHandleForObjRef();
5793 // Note that even if the field is static, we require that the this pointer
5794 // satisfy the same constraints as a non-static field This happens to
5795 // be simpler and seems reasonable
5796 typeInfo tiDeclaredThis = verMakeTypeInfo(enclosingClass);
5797 if (tiDeclaredThis.IsValueClass())
5799 tiDeclaredThis.MakeByRef();
5801 // we allow read-only tThis, on any field access (even stores!), because if the
5802 // class implementor wants to prohibit stores he should make the field private.
5803 // we do this by setting the read-only bit on the type we compare tThis to.
5804 tiDeclaredThis.SetIsReadonlyByRef();
5806 else if (verTrackObjCtorInitState && tThis.IsThisPtr())
5808 // Any field access is legal on "uninitialized" this pointers.
5809 // The easiest way to implement this is to simply set the
5810 // initialized bit for the duration of the type check on the
5811 // field access only. It does not change the state of the "this"
5812 // for the function as a whole. Note that the "tThis" is a copy
5813 // of the original "this" type (*tiThis) passed in.
5814 tThis.SetInitialisedObjRef();
5817 Verify(tiCompatibleWith(tThis, tiDeclaredThis, true), "this type mismatch");
5820 // Presently the JIT does not check that we don't store or take the address of init-only fields
5821 // since we cannot guarantee their immutability and it is not a security issue.
5823 // check any constraints on the fields's class --- accessing the field might cause a class constructor to run.
5824 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(enclosingClass),
5825 "field has unsatisfied class constraints");
5826 if (fieldFlags & CORINFO_FLG_FIELD_PROTECTED)
5828 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClass),
5829 "Accessing protected method through wrong type.");
5833 void Compiler::verVerifyCond(const typeInfo& tiOp1, const typeInfo& tiOp2, unsigned opcode)
5835 if (tiOp1.IsNumberType())
5837 #ifdef _TARGET_64BIT_
5838 Verify(tiCompatibleWith(tiOp1, tiOp2, true), "Cond type mismatch");
5839 #else // _TARGET_64BIT
5840 // [10/17/2013] Consider changing this: to put on my verification lawyer hat,
5841 // this is non-conforming to the ECMA Spec: types don't have to be equivalent,
5842 // but compatible, since we can coalesce native int with int32 (see section III.1.5).
5843 Verify(typeInfo::AreEquivalent(tiOp1, tiOp2), "Cond type mismatch");
5844 #endif // !_TARGET_64BIT_
5846 else if (tiOp1.IsObjRef())
5858 Verify(FALSE, "Cond not allowed on object types");
5860 Verify(tiOp2.IsObjRef(), "Cond type mismatch");
5862 else if (tiOp1.IsByRef())
5864 Verify(tiOp2.IsByRef(), "Cond type mismatch");
5868 Verify(tiOp1.IsMethod() && tiOp2.IsMethod(), "Cond type mismatch");
5872 void Compiler::verVerifyThisPtrInitialised()
5874 if (verTrackObjCtorInitState)
5876 Verify(verCurrentState.thisInitialized == TIS_Init, "this ptr is not initialized");
5880 BOOL Compiler::verIsCallToInitThisPtr(CORINFO_CLASS_HANDLE context, CORINFO_CLASS_HANDLE target)
5882 // Either target == context, in this case calling an alternate .ctor
5883 // Or target is the immediate parent of context
5885 return ((target == context) || (target == info.compCompHnd->getParentType(context)));
5888 GenTree* Compiler::impImportLdvirtftn(GenTree* thisPtr,
5889 CORINFO_RESOLVED_TOKEN* pResolvedToken,
5890 CORINFO_CALL_INFO* pCallInfo)
5892 if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
5894 NO_WAY("Virtual call to a function added via EnC is not supported");
5897 // CoreRT generic virtual method
5898 if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
5900 GenTree* runtimeMethodHandle = nullptr;
5901 if (pCallInfo->exactContextNeedsRuntimeLookup)
5903 runtimeMethodHandle =
5904 impRuntimeLookupToTree(pResolvedToken, &pCallInfo->codePointerLookup, pCallInfo->hMethod);
5908 runtimeMethodHandle = gtNewIconEmbMethHndNode(pResolvedToken->hMethod);
5910 return gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL,
5911 gtNewArgList(thisPtr, runtimeMethodHandle));
5914 #ifdef FEATURE_READYTORUN_COMPILER
5915 if (opts.IsReadyToRun())
5917 if (!pCallInfo->exactContextNeedsRuntimeLookup)
5920 gtNewHelperCallNode(CORINFO_HELP_READYTORUN_VIRTUAL_FUNC_PTR, TYP_I_IMPL, gtNewArgList(thisPtr));
5922 call->setEntryPoint(pCallInfo->codePointerLookup.constLookup);
5927 // We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
5928 if (IsTargetAbi(CORINFO_CORERT_ABI))
5930 GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
5932 return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
5933 gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
5938 // Get the exact descriptor for the static callsite
5939 GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
5940 if (exactTypeDesc == nullptr)
5941 { // compDonotInline()
5945 GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken);
5946 if (exactMethodDesc == nullptr)
5947 { // compDonotInline()
5951 GenTreeArgList* helpArgs = gtNewArgList(exactMethodDesc);
5953 helpArgs = gtNewListNode(exactTypeDesc, helpArgs);
5955 helpArgs = gtNewListNode(thisPtr, helpArgs);
5957 // Call helper function. This gets the target address of the final destination callsite.
5959 return gtNewHelperCallNode(CORINFO_HELP_VIRTUAL_FUNC_PTR, TYP_I_IMPL, helpArgs);
5962 //------------------------------------------------------------------------
5963 // impImportAndPushBox: build and import a value-type box
5966 // pResolvedToken - resolved token from the box operation
5972 // The value to be boxed is popped from the stack, and a tree for
5973 // the boxed value is pushed. This method may create upstream
5974 // statements, spill side effecting trees, and create new temps.
5976 // If importing an inlinee, we may also discover the inline must
5977 // fail. If so there is no new value pushed on the stack. Callers
5978 // should use CompDoNotInline after calling this method to see if
5979 // ongoing importation should be aborted.
5982 // Boxing of ref classes results in the same value as the value on
5983 // the top of the stack, so is handled inline in impImportBlockCode
5984 // for the CEE_BOX case. Only value or primitive type boxes make it
5987 // Boxing for nullable types is done via a helper call; boxing
5988 // of other value types is expanded inline or handled via helper
5989 // call, depending on the jit's codegen mode.
5991 // When the jit is operating in size and time constrained modes,
5992 // using a helper call here can save jit time and code size. But it
5993 // also may inhibit cleanup optimizations that could have also had a
5994 // even greater benefit effect on code size and jit time. An optimal
5995 // strategy may need to peek ahead and see if it is easy to tell how
5996 // the box is being used. For now, we defer.
5998 void Compiler::impImportAndPushBox(CORINFO_RESOLVED_TOKEN* pResolvedToken)
6000 // Spill any special side effects
6001 impSpillSpecialSideEff();
6003 // Get get the expression to box from the stack.
6004 GenTree* op1 = nullptr;
6005 GenTree* op2 = nullptr;
6006 StackEntry se = impPopStack();
6007 CORINFO_CLASS_HANDLE operCls = se.seTypeInfo.GetClassHandle();
6008 GenTree* exprToBox = se.val;
6010 // Look at what helper we should use.
6011 CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
6013 // Determine what expansion to prefer.
6015 // In size/time/debuggable constrained modes, the helper call
6016 // expansion for box is generally smaller and is preferred, unless
6017 // the value to box is a struct that comes from a call. In that
6018 // case the call can construct its return value directly into the
6019 // box payload, saving possibly some up-front zeroing.
6021 // Currently primitive type boxes always get inline expanded. We may
6022 // want to do the same for small structs if they don't come from
6023 // calls and don't have GC pointers, since explicitly copying such
6024 // structs is cheap.
6025 JITDUMP("\nCompiler::impImportAndPushBox -- handling BOX(value class) via");
6026 bool canExpandInline = (boxHelper == CORINFO_HELP_BOX);
6027 bool optForSize = !exprToBox->IsCall() && (operCls != nullptr) && (opts.compDbgCode || opts.MinOpts());
6028 bool expandInline = canExpandInline && !optForSize;
6032 JITDUMP(" inline allocate/copy sequence\n");
6034 // we are doing 'normal' boxing. This means that we can inline the box operation
6035 // Box(expr) gets morphed into
6036 // temp = new(clsHnd)
6037 // cpobj(temp+4, expr, clsHnd)
6039 // The code paths differ slightly below for structs and primitives because
6040 // "cpobj" differs in these cases. In one case you get
6041 // impAssignStructPtr(temp+4, expr, clsHnd)
6042 // and the other you get
6045 if (opts.MinOpts() || opts.compDbgCode)
6047 // For minopts/debug code, try and minimize the total number
6048 // of box temps by reusing an existing temp when possible.
6049 if (impBoxTempInUse || impBoxTemp == BAD_VAR_NUM)
6051 impBoxTemp = lvaGrabTemp(true DEBUGARG("Reusable Box Helper"));
6056 // When optimizing, use a new temp for each box operation
6057 // since we then know the exact class of the box temp.
6058 impBoxTemp = lvaGrabTemp(true DEBUGARG("Single-def Box Helper"));
6059 lvaTable[impBoxTemp].lvType = TYP_REF;
6060 lvaTable[impBoxTemp].lvSingleDef = 1;
6061 JITDUMP("Marking V%02u as a single def local\n", impBoxTemp);
6062 const bool isExact = true;
6063 lvaSetClass(impBoxTemp, pResolvedToken->hClass, isExact);
6066 // needs to stay in use until this box expression is appended
6067 // some other node. We approximate this by keeping it alive until
6068 // the opcode stack becomes empty
6069 impBoxTempInUse = true;
6071 #ifdef FEATURE_READYTORUN_COMPILER
6072 bool usingReadyToRunHelper = false;
6074 if (opts.IsReadyToRun())
6076 op1 = impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
6077 usingReadyToRunHelper = (op1 != nullptr);
6080 if (!usingReadyToRunHelper)
6083 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
6084 // and the newfast call with a single call to a dynamic R2R cell that will:
6085 // 1) Load the context
6086 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
6087 // 3) Allocate and return the new object for boxing
6088 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
6090 // Ensure that the value class is restored
6091 op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
6094 // We must be backing out of an inline.
6095 assert(compDonotInline());
6099 op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(pResolvedToken, info.compMethodHnd),
6100 pResolvedToken->hClass, TYP_REF, op2);
6103 /* Remember that this basic block contains 'new' of an object, and so does this method */
6104 compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
6105 optMethodFlags |= OMF_HAS_NEWOBJ;
6107 GenTree* asg = gtNewTempAssign(impBoxTemp, op1);
6109 GenTree* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6111 op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
6112 op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
6113 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1, op2);
6115 if (varTypeIsStruct(exprToBox))
6117 assert(info.compCompHnd->getClassSize(pResolvedToken->hClass) == info.compCompHnd->getClassSize(operCls));
6118 op1 = impAssignStructPtr(op1, exprToBox, operCls, (unsigned)CHECK_SPILL_ALL);
6122 var_types lclTyp = exprToBox->TypeGet();
6123 if (lclTyp == TYP_BYREF)
6125 lclTyp = TYP_I_IMPL;
6127 CorInfoType jitType = info.compCompHnd->asCorInfoType(pResolvedToken->hClass);
6128 if (impIsPrimitive(jitType))
6130 lclTyp = JITtype2varType(jitType);
6132 assert(genActualType(exprToBox->TypeGet()) == genActualType(lclTyp) ||
6133 varTypeIsFloating(lclTyp) == varTypeIsFloating(exprToBox->TypeGet()));
6134 var_types srcTyp = exprToBox->TypeGet();
6135 var_types dstTyp = lclTyp;
6137 if (srcTyp != dstTyp)
6139 assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
6140 (varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
6141 exprToBox = gtNewCastNode(dstTyp, exprToBox, false, dstTyp);
6143 op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
6146 // Spill eval stack to flush out any pending side effects.
6147 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox"));
6149 // Set up this copy as a second assignment.
6150 GenTree* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
6152 op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
6154 // Record that this is a "box" node and keep track of the matching parts.
6155 op1 = new (this, GT_BOX) GenTreeBox(TYP_REF, op1, asgStmt, copyStmt);
6157 // If it is a value class, mark the "box" node. We can use this information
6158 // to optimise several cases:
6159 // "box(x) == null" --> false
6160 // "(box(x)).CallAnInterfaceMethod(...)" --> "(&x).CallAValueTypeMethod"
6161 // "(box(x)).CallAnObjectMethod(...)" --> "(&x).CallAValueTypeMethod"
6163 op1->gtFlags |= GTF_BOX_VALUE;
6164 assert(op1->IsBoxedValue());
6165 assert(asg->gtOper == GT_ASG);
6169 // Don't optimize, just call the helper and be done with it.
6170 JITDUMP(" helper call because: %s\n", canExpandInline ? "optimizing for size" : "nullable");
6171 assert(operCls != nullptr);
6173 // Ensure that the value class is restored
6174 op2 = impTokenToHandle(pResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
6177 // We must be backing out of an inline.
6178 assert(compDonotInline());
6182 GenTreeArgList* args = gtNewArgList(op2, impGetStructAddr(exprToBox, operCls, (unsigned)CHECK_SPILL_ALL, true));
6183 op1 = gtNewHelperCallNode(boxHelper, TYP_REF, args);
6186 /* Push the result back on the stack, */
6187 /* even if clsHnd is a value class we want the TI_REF */
6188 typeInfo tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(pResolvedToken->hClass));
6189 impPushOnStack(op1, tiRetVal);
6192 //------------------------------------------------------------------------
6193 // impImportNewObjArray: Build and import `new` of multi-dimmensional array
6196 // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
6197 // by a call to CEEInfo::resolveToken().
6198 // pCallInfo - The CORINFO_CALL_INFO that has been initialized
6199 // by a call to CEEInfo::getCallInfo().
6202 // The multi-dimensional array constructor arguments (array dimensions) are
6203 // pushed on the IL stack on entry to this method.
6206 // Multi-dimensional array constructors are imported as calls to a JIT
6207 // helper, not as regular calls.
6209 void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
6211 GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken);
6212 if (classHandle == nullptr)
6213 { // compDonotInline()
6217 assert(pCallInfo->sig.numArgs);
6220 GenTreeArgList* args;
6223 // There are two different JIT helpers that can be used to allocate
6224 // multi-dimensional arrays:
6226 // - CORINFO_HELP_NEW_MDARR - takes the array dimensions as varargs.
6227 // This variant is deprecated. It should be eventually removed.
6229 // - CORINFO_HELP_NEW_MDARR_NONVARARG - takes the array dimensions as
6230 // pointer to block of int32s. This variant is more portable.
6232 // The non-varargs helper is enabled for CoreRT only for now. Enabling this
6233 // unconditionally would require ReadyToRun version bump.
6235 CLANG_FORMAT_COMMENT_ANCHOR;
6237 if (!opts.IsReadyToRun() || IsTargetAbi(CORINFO_CORERT_ABI))
6240 // Reuse the temp used to pass the array dimensions to avoid bloating
6241 // the stack frame in case there are multiple calls to multi-dim array
6242 // constructors within a single method.
6243 if (lvaNewObjArrayArgs == BAD_VAR_NUM)
6245 lvaNewObjArrayArgs = lvaGrabTemp(false DEBUGARG("NewObjArrayArgs"));
6246 lvaTable[lvaNewObjArrayArgs].lvType = TYP_BLK;
6247 lvaTable[lvaNewObjArrayArgs].lvExactSize = 0;
6250 // Increase size of lvaNewObjArrayArgs to be the largest size needed to hold 'numArgs' integers
6251 // for our call to CORINFO_HELP_NEW_MDARR_NONVARARG.
6252 lvaTable[lvaNewObjArrayArgs].lvExactSize =
6253 max(lvaTable[lvaNewObjArrayArgs].lvExactSize, pCallInfo->sig.numArgs * sizeof(INT32));
6255 // The side-effects may include allocation of more multi-dimensional arrays. Spill all side-effects
6256 // to ensure that the shared lvaNewObjArrayArgs local variable is only ever used to pass arguments
6257 // to one allocation at a time.
6258 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportNewObjArray"));
6261 // The arguments of the CORINFO_HELP_NEW_MDARR_NONVARARG helper are:
6262 // - Array class handle
6263 // - Number of dimension arguments
6264 // - Pointer to block of int32 dimensions - address of lvaNewObjArrayArgs temp.
6267 node = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
6268 node = gtNewOperNode(GT_ADDR, TYP_I_IMPL, node);
6270 // Pop dimension arguments from the stack one at a time and store it
6271 // into lvaNewObjArrayArgs temp.
6272 for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
6274 GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
6276 GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
6277 dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
6278 dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
6279 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
6280 dest = gtNewOperNode(GT_IND, TYP_INT, dest);
6282 node = gtNewOperNode(GT_COMMA, node->TypeGet(), gtNewAssignNode(dest, arg), node);
6285 args = gtNewArgList(node);
6287 // pass number of arguments to the helper
6288 args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
6290 args = gtNewListNode(classHandle, args);
6292 node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR_NONVARARG, TYP_REF, args);
6297 // The varargs helper needs the type and method handles as last
6298 // and last-1 param (this is a cdecl call, so args will be
6299 // pushed in reverse order on the CPU stack)
6302 args = gtNewArgList(classHandle);
6304 // pass number of arguments to the helper
6305 args = gtNewListNode(gtNewIconNode(pCallInfo->sig.numArgs), args);
6307 unsigned argFlags = 0;
6308 args = impPopList(pCallInfo->sig.numArgs, &pCallInfo->sig, args);
6310 node = gtNewHelperCallNode(CORINFO_HELP_NEW_MDARR, TYP_REF, args);
6312 // varargs, so we pop the arguments
6313 node->gtFlags |= GTF_CALL_POP_ARGS;
6316 // At the present time we don't track Caller pop arguments
6317 // that have GC references in them
6318 for (GenTreeArgList* temp = args; temp; temp = temp->Rest())
6320 assert(temp->Current()->gtType != TYP_REF);
6325 node->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
6326 node->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)pResolvedToken->hClass;
6328 // Remember that this basic block contains 'new' of a md array
6329 compCurBB->bbFlags |= BBF_HAS_NEWARRAY;
6331 impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
6334 GenTree* Compiler::impTransformThis(GenTree* thisPtr,
6335 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
6336 CORINFO_THIS_TRANSFORM transform)
6340 case CORINFO_DEREF_THIS:
6342 GenTree* obj = thisPtr;
6344 // This does a LDIND on the obj, which should be a byref. pointing to a ref
6345 impBashVarAddrsToI(obj);
6346 assert(genActualType(obj->gtType) == TYP_I_IMPL || obj->gtType == TYP_BYREF);
6347 CorInfoType constraintTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
6349 obj = gtNewOperNode(GT_IND, JITtype2varType(constraintTyp), obj);
6350 // ldind could point anywhere, example a boxed class static int
6351 obj->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
6356 case CORINFO_BOX_THIS:
6358 // Constraint calls where there might be no
6359 // unboxed entry point require us to implement the call via helper.
6360 // These only occur when a possible target of the call
6361 // may have inherited an implementation of an interface
6362 // method from System.Object or System.ValueType. The EE does not provide us with
6363 // "unboxed" versions of these methods.
6365 GenTree* obj = thisPtr;
6367 assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
6368 obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
6369 obj->gtFlags |= GTF_EXCEPT;
6371 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(pConstrainedResolvedToken->hClass);
6372 var_types objType = JITtype2varType(jitTyp);
6373 if (impIsPrimitive(jitTyp))
6375 if (obj->OperIsBlk())
6377 obj->ChangeOperUnchecked(GT_IND);
6379 // Obj could point anywhere, example a boxed class static int
6380 obj->gtFlags |= GTF_IND_TGTANYWHERE;
6381 obj->gtOp.gtOp2 = nullptr; // must be zero for tree walkers
6384 obj->gtType = JITtype2varType(jitTyp);
6385 assert(varTypeIsArithmetic(obj->gtType));
6388 // This pushes on the dereferenced byref
6389 // This is then used immediately to box.
6390 impPushOnStack(obj, verMakeTypeInfo(pConstrainedResolvedToken->hClass).NormaliseForStack());
6392 // This pops off the byref-to-a-value-type remaining on the stack and
6393 // replaces it with a boxed object.
6394 // This is then used as the object to the virtual call immediately below.
6395 impImportAndPushBox(pConstrainedResolvedToken);
6396 if (compDonotInline())
6401 obj = impPopStack().val;
6404 case CORINFO_NO_THIS_TRANSFORM:
6410 //------------------------------------------------------------------------
6411 // impCanPInvokeInline: check whether PInvoke inlining should enabled in current method.
6414 // true if PInvoke inlining should be enabled in current method, false otherwise
6417 // Checks a number of ambient conditions where we could pinvoke but choose not to
6419 bool Compiler::impCanPInvokeInline()
6421 return getInlinePInvokeEnabled() && (!opts.compDbgCode) && (compCodeOpt() != SMALL_CODE) &&
6422 (!opts.compNoPInvokeInlineCB) // profiler is preventing inline pinvoke
6426 //------------------------------------------------------------------------
6427 // impCanPInvokeInlineCallSite: basic legality checks using information
6428 // from a call to see if the call qualifies as an inline pinvoke.
6431 // block - block contaning the call, or for inlinees, block
6432 // containing the call being inlined
6435 // true if this call can legally qualify as an inline pinvoke, false otherwise
6438 // For runtimes that support exception handling interop there are
6439 // restrictions on using inline pinvoke in handler regions.
6441 // * We have to disable pinvoke inlining inside of filters because
6442 // in case the main execution (i.e. in the try block) is inside
6443 // unmanaged code, we cannot reuse the inlined stub (we still need
6444 // the original state until we are in the catch handler)
6446 // * We disable pinvoke inlining inside handlers since the GSCookie
6447 // is in the inlined Frame (see
6448 // CORINFO_EE_INFO::InlinedCallFrameInfo::offsetOfGSCookie), but
6449 // this would not protect framelets/return-address of handlers.
6451 // These restrictions are currently also in place for CoreCLR but
6452 // can be relaxed when coreclr/#8459 is addressed.
6454 bool Compiler::impCanPInvokeInlineCallSite(BasicBlock* block)
6456 if (block->hasHndIndex())
6461 // The remaining limitations do not apply to CoreRT
6462 if (IsTargetAbi(CORINFO_CORERT_ABI))
6467 #ifdef _TARGET_AMD64_
6468 // On x64, we disable pinvoke inlining inside of try regions.
6469 // Here is the comment from JIT64 explaining why:
6471 // [VSWhidbey: 611015] - because the jitted code links in the
6472 // Frame (instead of the stub) we rely on the Frame not being
6473 // 'active' until inside the stub. This normally happens by the
6474 // stub setting the return address pointer in the Frame object
6475 // inside the stub. On a normal return, the return address
6476 // pointer is zeroed out so the Frame can be safely re-used, but
6477 // if an exception occurs, nobody zeros out the return address
6478 // pointer. Thus if we re-used the Frame object, it would go
6479 // 'active' as soon as we link it into the Frame chain.
6481 // Technically we only need to disable PInvoke inlining if we're
6482 // in a handler or if we're in a try body with a catch or
6483 // filter/except where other non-handler code in this method
6484 // might run and try to re-use the dirty Frame object.
6486 // A desktop test case where this seems to matter is
6487 // jit\jit64\ebvts\mcpp\sources2\ijw\__clrcall\vector_ctor_dtor.02\deldtor_clr.exe
6488 if (block->hasTryIndex())
6492 #endif // _TARGET_AMD64_
6497 //------------------------------------------------------------------------
6498 // impCheckForPInvokeCall examine call to see if it is a pinvoke and if so
6499 // if it can be expressed as an inline pinvoke.
6502 // call - tree for the call
6503 // methHnd - handle for the method being called (may be null)
6504 // sig - signature of the method being called
6505 // mflags - method flags for the method being called
6506 // block - block contaning the call, or for inlinees, block
6507 // containing the call being inlined
6510 // Sets GTF_CALL_M_PINVOKE on the call for pinvokes.
6512 // Also sets GTF_CALL_UNMANAGED on call for inline pinvokes if the
6513 // call passes a combination of legality and profitabilty checks.
6515 // If GTF_CALL_UNMANAGED is set, increments info.compCallUnmanaged
6517 void Compiler::impCheckForPInvokeCall(
6518 GenTreeCall* call, CORINFO_METHOD_HANDLE methHnd, CORINFO_SIG_INFO* sig, unsigned mflags, BasicBlock* block)
6520 CorInfoUnmanagedCallConv unmanagedCallConv;
6522 // If VM flagged it as Pinvoke, flag the call node accordingly
6523 if ((mflags & CORINFO_FLG_PINVOKE) != 0)
6525 call->gtCallMoreFlags |= GTF_CALL_M_PINVOKE;
6530 if ((mflags & CORINFO_FLG_PINVOKE) == 0 || (mflags & CORINFO_FLG_NOSECURITYWRAP) == 0)
6535 unmanagedCallConv = info.compCompHnd->getUnmanagedCallConv(methHnd);
6539 CorInfoCallConv callConv = CorInfoCallConv(sig->callConv & CORINFO_CALLCONV_MASK);
6540 if (callConv == CORINFO_CALLCONV_NATIVEVARARG)
6542 // Used by the IL Stubs.
6543 callConv = CORINFO_CALLCONV_C;
6545 static_assert_no_msg((unsigned)CORINFO_CALLCONV_C == (unsigned)CORINFO_UNMANAGED_CALLCONV_C);
6546 static_assert_no_msg((unsigned)CORINFO_CALLCONV_STDCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_STDCALL);
6547 static_assert_no_msg((unsigned)CORINFO_CALLCONV_THISCALL == (unsigned)CORINFO_UNMANAGED_CALLCONV_THISCALL);
6548 unmanagedCallConv = CorInfoUnmanagedCallConv(callConv);
6550 assert(!call->gtCallCookie);
6553 if (unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_C && unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_STDCALL &&
6554 unmanagedCallConv != CORINFO_UNMANAGED_CALLCONV_THISCALL)
6558 optNativeCallCount++;
6560 if (methHnd == nullptr && (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IL_STUB) || IsTargetAbi(CORINFO_CORERT_ABI)))
6562 // PInvoke in CoreRT ABI must be always inlined. Non-inlineable CALLI cases have been
6563 // converted to regular method calls earlier using convertPInvokeCalliToCall.
6565 // PInvoke CALLI in IL stubs must be inlined
6570 if (!impCanPInvokeInlineCallSite(block))
6575 // Legal PInvoke CALL in PInvoke IL stubs must be inlined to avoid infinite recursive
6576 // inlining in CoreRT. Skip the ambient conditions checks and profitability checks.
6577 if (!IsTargetAbi(CORINFO_CORERT_ABI) || (info.compFlags & CORINFO_FLG_PINVOKE) == 0)
6579 if (!impCanPInvokeInline())
6584 // Size-speed tradeoff: don't use inline pinvoke at rarely
6585 // executed call sites. The non-inline version is more
6587 if (block->isRunRarely())
6593 // The expensive check should be last
6594 if (info.compCompHnd->pInvokeMarshalingRequired(methHnd, sig))
6600 JITLOG((LL_INFO1000000, "\nInline a CALLI PINVOKE call from method %s", info.compFullName));
6602 call->gtFlags |= GTF_CALL_UNMANAGED;
6603 info.compCallUnmanaged++;
6605 // AMD64 convention is same for native and managed
6606 if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_C)
6608 call->gtFlags |= GTF_CALL_POP_ARGS;
6611 if (unmanagedCallConv == CORINFO_UNMANAGED_CALLCONV_THISCALL)
6613 call->gtCallMoreFlags |= GTF_CALL_M_UNMGD_THISCALL;
6617 GenTreeCall* Compiler::impImportIndirectCall(CORINFO_SIG_INFO* sig, IL_OFFSETX ilOffset)
6619 var_types callRetTyp = JITtype2varType(sig->retType);
6621 /* The function pointer is on top of the stack - It may be a
6622 * complex expression. As it is evaluated after the args,
6623 * it may cause registered args to be spilled. Simply spill it.
6626 // Ignore this trivial case.
6627 if (impStackTop().val->gtOper != GT_LCL_VAR)
6629 impSpillStackEntry(verCurrentState.esStackDepth - 1,
6630 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impImportIndirectCall"));
6633 /* Get the function pointer */
6635 GenTree* fptr = impPopStack().val;
6637 // The function pointer is typically a sized to match the target pointer size
6638 // However, stubgen IL optimization can change LDC.I8 to LDC.I4
6639 // See ILCodeStream::LowerOpcode
6640 assert(genActualType(fptr->gtType) == TYP_I_IMPL || genActualType(fptr->gtType) == TYP_INT);
6643 // This temporary must never be converted to a double in stress mode,
6644 // because that can introduce a call to the cast helper after the
6645 // arguments have already been evaluated.
6647 if (fptr->OperGet() == GT_LCL_VAR)
6649 lvaTable[fptr->gtLclVarCommon.gtLclNum].lvKeepType = 1;
6653 /* Create the call node */
6655 GenTreeCall* call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
6657 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
6662 /*****************************************************************************/
6664 void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig)
6666 assert(call->gtFlags & GTF_CALL_UNMANAGED);
6668 /* Since we push the arguments in reverse order (i.e. right -> left)
6669 * spill any side effects from the stack
6671 * OBS: If there is only one side effect we do not need to spill it
6672 * thus we have to spill all side-effects except last one
6675 unsigned lastLevelWithSideEffects = UINT_MAX;
6677 unsigned argsToReverse = sig->numArgs;
6679 // For "thiscall", the first argument goes in a register. Since its
6680 // order does not need to be changed, we do not need to spill it
6682 if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6684 assert(argsToReverse);
6688 #ifndef _TARGET_X86_
6689 // Don't reverse args on ARM or x64 - first four args always placed in regs in order
6693 for (unsigned level = verCurrentState.esStackDepth - argsToReverse; level < verCurrentState.esStackDepth; level++)
6695 if (verCurrentState.esStack[level].val->gtFlags & GTF_ORDER_SIDEEFF)
6697 assert(lastLevelWithSideEffects == UINT_MAX);
6699 impSpillStackEntry(level,
6700 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - other side effect"));
6702 else if (verCurrentState.esStack[level].val->gtFlags & GTF_SIDE_EFFECT)
6704 if (lastLevelWithSideEffects != UINT_MAX)
6706 /* We had a previous side effect - must spill it */
6707 impSpillStackEntry(lastLevelWithSideEffects,
6708 BAD_VAR_NUM DEBUGARG(false) DEBUGARG("impPopArgsForUnmanagedCall - side effect"));
6710 /* Record the level for the current side effect in case we will spill it */
6711 lastLevelWithSideEffects = level;
6715 /* This is the first side effect encountered - record its level */
6717 lastLevelWithSideEffects = level;
6722 /* The argument list is now "clean" - no out-of-order side effects
6723 * Pop the argument list in reverse order */
6725 GenTree* args = call->gtCall.gtCallArgs = impPopRevList(sig->numArgs, sig, sig->numArgs - argsToReverse);
6727 if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
6729 GenTree* thisPtr = args->Current();
6730 impBashVarAddrsToI(thisPtr);
6731 assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
6736 call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
6740 //------------------------------------------------------------------------
6741 // impInitClass: Build a node to initialize the class before accessing the
6742 // field if necessary
6745 // pResolvedToken - The CORINFO_RESOLVED_TOKEN that has been initialized
6746 // by a call to CEEInfo::resolveToken().
6748 // Return Value: If needed, a pointer to the node that will perform the class
6749 // initializtion. Otherwise, nullptr.
6752 GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
6754 CorInfoInitClassResult initClassResult =
6755 info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
6757 if ((initClassResult & CORINFO_INITCLASS_USE_HELPER) == 0)
6763 GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
6765 if (node == nullptr)
6767 assert(compDonotInline());
6773 node = gtNewHelperCallNode(CORINFO_HELP_INITCLASS, TYP_VOID, gtNewArgList(node));
6777 // Call the shared non gc static helper, as its the fastest
6778 node = fgGetSharedCCtor(pResolvedToken->hClass);
6784 GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
6786 GenTree* op1 = nullptr;
6795 ival = *((bool*)fldAddr);
6799 ival = *((signed char*)fldAddr);
6803 ival = *((unsigned char*)fldAddr);
6807 ival = *((short*)fldAddr);
6811 ival = *((unsigned short*)fldAddr);
6816 ival = *((int*)fldAddr);
6818 op1 = gtNewIconNode(ival);
6823 lval = *((__int64*)fldAddr);
6824 op1 = gtNewLconNode(lval);
6828 dval = *((float*)fldAddr);
6829 op1 = gtNewDconNode(dval);
6830 op1->gtType = TYP_FLOAT;
6834 dval = *((double*)fldAddr);
6835 op1 = gtNewDconNode(dval);
6839 assert(!"Unexpected lclTyp");
6846 GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
6847 CORINFO_ACCESS_FLAGS access,
6848 CORINFO_FIELD_INFO* pFieldInfo,
6853 switch (pFieldInfo->fieldAccessor)
6855 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
6857 assert(!compIsForInlining());
6859 // We first call a special helper to get the statics base pointer
6860 op1 = impParentClassTokenToHandle(pResolvedToken);
6862 // compIsForInlining() is false so we should not neve get NULL here
6863 assert(op1 != nullptr);
6865 var_types type = TYP_BYREF;
6867 switch (pFieldInfo->helper)
6869 case CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE:
6872 case CORINFO_HELP_GETGENERICS_GCSTATIC_BASE:
6873 case CORINFO_HELP_GETGENERICS_NONGCSTATIC_BASE:
6874 case CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE:
6877 assert(!"unknown generic statics helper");
6881 op1 = gtNewHelperCallNode(pFieldInfo->helper, type, gtNewArgList(op1));
6883 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6884 op1 = gtNewOperNode(GT_ADD, type, op1,
6885 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6889 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
6891 #ifdef FEATURE_READYTORUN_COMPILER
6892 if (opts.IsReadyToRun())
6894 unsigned callFlags = 0;
6896 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6898 callFlags |= GTF_CALL_HOISTABLE;
6901 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_STATIC_BASE, TYP_BYREF);
6902 op1->gtFlags |= callFlags;
6904 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6909 op1 = fgGetStaticsCCtorHelper(pResolvedToken->hClass, pFieldInfo->helper);
6913 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6914 op1 = gtNewOperNode(GT_ADD, op1->TypeGet(), op1,
6915 new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, pFieldInfo->offset, fs));
6920 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
6922 #ifdef FEATURE_READYTORUN_COMPILER
6923 noway_assert(opts.IsReadyToRun());
6924 CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
6925 assert(kind.needsRuntimeLookup);
6927 GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
6928 GenTreeArgList* args = gtNewArgList(ctxTree);
6930 unsigned callFlags = 0;
6932 if (info.compCompHnd->getClassAttribs(pResolvedToken->hClass) & CORINFO_FLG_BEFOREFIELDINIT)
6934 callFlags |= GTF_CALL_HOISTABLE;
6936 var_types type = TYP_BYREF;
6937 op1 = gtNewHelperCallNode(CORINFO_HELP_READYTORUN_GENERIC_STATIC_BASE, type, args);
6938 op1->gtFlags |= callFlags;
6940 op1->gtCall.setEntryPoint(pFieldInfo->fieldLookup);
6941 FieldSeqNode* fs = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6942 op1 = gtNewOperNode(GT_ADD, type, op1,
6943 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, pFieldInfo->offset, fs));
6946 #endif // FEATURE_READYTORUN_COMPILER
6952 if (!(access & CORINFO_ACCESS_ADDRESS))
6954 // In future, it may be better to just create the right tree here instead of folding it later.
6955 op1 = gtNewFieldRef(lclTyp, pResolvedToken->hField);
6957 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6959 op1->gtFlags |= GTF_FLD_INITCLASS;
6962 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
6964 op1->gtType = TYP_REF; // points at boxed object
6965 FieldSeqNode* firstElemFldSeq =
6966 GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
6967 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
6968 new (this, GT_CNS_INT)
6969 GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, firstElemFldSeq));
6971 if (varTypeIsStruct(lclTyp))
6973 // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT.
6974 op1 = gtNewObjNode(pFieldInfo->structType, op1);
6978 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
6979 op1->gtFlags |= GTF_GLOB_REF | GTF_IND_NONFAULTING;
6987 void** pFldAddr = nullptr;
6988 void* fldAddr = info.compCompHnd->getFieldAddress(pResolvedToken->hField, (void**)&pFldAddr);
6990 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField);
6992 /* Create the data member node */
6993 op1 = gtNewIconHandleNode(pFldAddr == nullptr ? (size_t)fldAddr : (size_t)pFldAddr, GTF_ICON_STATIC_HDL,
6996 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
6998 op1->gtFlags |= GTF_ICON_INITCLASS;
7001 if (pFldAddr != nullptr)
7003 // There are two cases here, either the static is RVA based,
7004 // in which case the type of the FIELD node is not a GC type
7005 // and the handle to the RVA is a TYP_I_IMPL. Or the FIELD node is
7006 // a GC type and the handle to it is a TYP_BYREF in the GC heap
7007 // because handles to statics now go into the large object heap
7009 var_types handleTyp = (var_types)(varTypeIsGC(lclTyp) ? TYP_BYREF : TYP_I_IMPL);
7010 op1 = gtNewOperNode(GT_IND, handleTyp, op1);
7011 op1->gtFlags |= GTF_IND_INVARIANT | GTF_IND_NONFAULTING;
7018 if (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP)
7020 op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
7022 FieldSeqNode* fldSeq = GetFieldSeqStore()->CreateSingleton(FieldSeqStore::FirstElemPseudoField);
7024 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
7025 new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, TARGET_POINTER_SIZE, fldSeq));
7028 if (!(access & CORINFO_ACCESS_ADDRESS))
7030 if (varTypeIsStruct(lclTyp))
7032 // Constructor adds GTF_GLOB_REF. Note that this is *not* GTF_EXCEPT.
7033 op1 = gtNewObjNode(pFieldInfo->structType, op1);
7037 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
7038 op1->gtFlags |= GTF_GLOB_REF;
7045 // In general try to call this before most of the verification work. Most people expect the access
7046 // exceptions before the verification exceptions. If you do this after, that usually doesn't happen. Turns
7047 // out if you can't access something we also think that you're unverifiable for other reasons.
7048 void Compiler::impHandleAccessAllowed(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
7050 if (result != CORINFO_ACCESS_ALLOWED)
7052 impHandleAccessAllowedInternal(result, helperCall);
7056 void Compiler::impHandleAccessAllowedInternal(CorInfoIsAccessAllowedResult result, CORINFO_HELPER_DESC* helperCall)
7060 case CORINFO_ACCESS_ALLOWED:
7062 case CORINFO_ACCESS_ILLEGAL:
7063 // if we're verifying, then we need to reject the illegal access to ensure that we don't think the
7064 // method is verifiable. Otherwise, delay the exception to runtime.
7065 if (compIsForImportOnly())
7067 info.compCompHnd->ThrowExceptionForHelper(helperCall);
7071 impInsertHelperCall(helperCall);
7074 case CORINFO_ACCESS_RUNTIME_CHECK:
7075 impInsertHelperCall(helperCall);
7080 void Compiler::impInsertHelperCall(CORINFO_HELPER_DESC* helperInfo)
7082 // Construct the argument list
7083 GenTreeArgList* args = nullptr;
7084 assert(helperInfo->helperNum != CORINFO_HELP_UNDEF);
7085 for (unsigned i = helperInfo->numArgs; i > 0; --i)
7087 const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1];
7088 GenTree* currentArg = nullptr;
7089 switch (helperArg.argType)
7091 case CORINFO_HELPER_ARG_TYPE_Field:
7092 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(
7093 info.compCompHnd->getFieldClass(helperArg.fieldHandle));
7094 currentArg = gtNewIconEmbFldHndNode(helperArg.fieldHandle);
7096 case CORINFO_HELPER_ARG_TYPE_Method:
7097 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(helperArg.methodHandle);
7098 currentArg = gtNewIconEmbMethHndNode(helperArg.methodHandle);
7100 case CORINFO_HELPER_ARG_TYPE_Class:
7101 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(helperArg.classHandle);
7102 currentArg = gtNewIconEmbClsHndNode(helperArg.classHandle);
7104 case CORINFO_HELPER_ARG_TYPE_Module:
7105 currentArg = gtNewIconEmbScpHndNode(helperArg.moduleHandle);
7107 case CORINFO_HELPER_ARG_TYPE_Const:
7108 currentArg = gtNewIconNode(helperArg.constant);
7111 NO_WAY("Illegal helper arg type");
7113 args = (currentArg == nullptr) ? gtNewArgList(currentArg) : gtNewListNode(currentArg, args);
7117 * Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee.
7118 * Also, consider sticking this in the first basic block.
7120 GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args);
7121 impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
7124 // Checks whether the return types of caller and callee are compatible
7125 // so that callee can be tail called. Note that here we don't check
7126 // compatibility in IL Verifier sense, but on the lines of return type
7127 // sizes are equal and get returned in the same return register.
7128 bool Compiler::impTailCallRetTypeCompatible(var_types callerRetType,
7129 CORINFO_CLASS_HANDLE callerRetTypeClass,
7130 var_types calleeRetType,
7131 CORINFO_CLASS_HANDLE calleeRetTypeClass)
7133 // Note that we can not relax this condition with genActualType() as the
7134 // calling convention dictates that the caller of a function with a small
7135 // typed return value is responsible for normalizing the return val.
7136 if (callerRetType == calleeRetType)
7141 // If the class handles are the same and not null, the return types are compatible.
7142 if ((callerRetTypeClass != nullptr) && (callerRetTypeClass == calleeRetTypeClass))
7147 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
7149 if (callerRetType == TYP_VOID)
7151 // This needs to be allowed to support the following IL pattern that Jit64 allows:
7156 // Note that the above IL pattern is not valid as per IL verification rules.
7157 // Therefore, only full trust code can take advantage of this pattern.
7161 // These checks return true if the return value type sizes are the same and
7162 // get returned in the same return register i.e. caller doesn't need to normalize
7163 // return value. Some of the tail calls permitted by below checks would have
7164 // been rejected by IL Verifier before we reached here. Therefore, only full
7165 // trust code can make those tail calls.
7166 unsigned callerRetTypeSize = 0;
7167 unsigned calleeRetTypeSize = 0;
7168 bool isCallerRetTypMBEnreg =
7169 VarTypeIsMultiByteAndCanEnreg(callerRetType, callerRetTypeClass, &callerRetTypeSize, true, info.compIsVarArgs);
7170 bool isCalleeRetTypMBEnreg =
7171 VarTypeIsMultiByteAndCanEnreg(calleeRetType, calleeRetTypeClass, &calleeRetTypeSize, true, info.compIsVarArgs);
7173 if (varTypeIsIntegral(callerRetType) || isCallerRetTypMBEnreg)
7175 return (varTypeIsIntegral(calleeRetType) || isCalleeRetTypMBEnreg) && (callerRetTypeSize == calleeRetTypeSize);
7177 #endif // _TARGET_AMD64_ || _TARGET_ARM64_
7185 PREFIX_TAILCALL_EXPLICIT = 0x00000001, // call has "tail" IL prefix
7186 PREFIX_TAILCALL_IMPLICIT =
7187 0x00000010, // call is treated as having "tail" prefix even though there is no "tail" IL prefix
7188 PREFIX_TAILCALL = (PREFIX_TAILCALL_EXPLICIT | PREFIX_TAILCALL_IMPLICIT),
7189 PREFIX_VOLATILE = 0x00000100,
7190 PREFIX_UNALIGNED = 0x00001000,
7191 PREFIX_CONSTRAINED = 0x00010000,
7192 PREFIX_READONLY = 0x00100000
7195 /********************************************************************************
7197 * Returns true if the current opcode and and the opcodes following it correspond
7198 * to a supported tail call IL pattern.
7201 bool Compiler::impIsTailCallILPattern(bool tailPrefixed,
7203 const BYTE* codeAddrOfNextOpcode,
7204 const BYTE* codeEnd,
7206 bool* isCallPopAndRet /* = nullptr */)
7208 // Bail out if the current opcode is not a call.
7209 if (!impOpcodeIsCallOpcode(curOpcode))
7214 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
7215 // If shared ret tail opt is not enabled, we will enable
7216 // it for recursive methods.
7220 // we can actually handle if the ret is in a fallthrough block, as long as that is the only part of the
7221 // sequence. Make sure we don't go past the end of the IL however.
7222 codeEnd = min(codeEnd + 1, info.compCode + info.compILCodeSize);
7225 // Bail out if there is no next opcode after call
7226 if (codeAddrOfNextOpcode >= codeEnd)
7231 // Scan the opcodes to look for the following IL patterns if either
7232 // i) the call is not tail prefixed (i.e. implicit tail call) or
7233 // ii) if tail prefixed, IL verification is not needed for the method.
7235 // Only in the above two cases we can allow the below tail call patterns
7236 // violating ECMA spec.
7252 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
7255 nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
7256 codeAddrOfNextOpcode += sizeof(__int8);
7257 } while ((codeAddrOfNextOpcode < codeEnd) && // Haven't reached end of method
7258 (!tailPrefixed || !tiVerificationNeeded) && // Not ".tail" prefixed or method requires no IL verification
7259 ((nextOpcode == CEE_NOP) || ((nextOpcode == CEE_POP) && (++cntPop == 1)))); // Next opcode = nop or exactly
7260 // one pop seen so far.
7262 nextOpcode = (OPCODE)getU1LittleEndian(codeAddrOfNextOpcode);
7263 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
7265 if (isCallPopAndRet)
7267 // Allow call+pop+ret to be tail call optimized if caller ret type is void
7268 *isCallPopAndRet = (nextOpcode == CEE_RET) && (cntPop == 1);
7271 #if !defined(FEATURE_CORECLR) && defined(_TARGET_AMD64_)
7273 // Tail call IL pattern could be either of the following
7274 // 1) call/callvirt/calli + ret
7275 // 2) call/callvirt/calli + pop + ret in a method returning void.
7276 return (nextOpcode == CEE_RET) && ((cntPop == 0) || ((cntPop == 1) && (info.compRetType == TYP_VOID)));
7278 return (nextOpcode == CEE_RET) && (cntPop == 0);
7279 #endif // !FEATURE_CORECLR && _TARGET_AMD64_
7282 /*****************************************************************************
7284 * Determine whether the call could be converted to an implicit tail call
7287 bool Compiler::impIsImplicitTailCallCandidate(
7288 OPCODE opcode, const BYTE* codeAddrOfNextOpcode, const BYTE* codeEnd, int prefixFlags, bool isRecursive)
7291 #if FEATURE_TAILCALL_OPT
7292 if (!opts.compTailCallOpt)
7297 if (opts.compDbgCode || opts.MinOpts())
7302 // must not be tail prefixed
7303 if (prefixFlags & PREFIX_TAILCALL_EXPLICIT)
7308 #if !FEATURE_TAILCALL_OPT_SHARED_RETURN
7309 // the block containing call is marked as BBJ_RETURN
7310 // We allow shared ret tail call optimization on recursive calls even under
7311 // !FEATURE_TAILCALL_OPT_SHARED_RETURN.
7312 if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN))
7314 #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN
7316 // must be call+ret or call+pop+ret
7317 if (!impIsTailCallILPattern(false, opcode, codeAddrOfNextOpcode, codeEnd, isRecursive))
7325 #endif // FEATURE_TAILCALL_OPT
7328 //------------------------------------------------------------------------
7329 // impImportCall: import a call-inspiring opcode
7332 // opcode - opcode that inspires the call
7333 // pResolvedToken - resolved token for the call target
7334 // pConstrainedResolvedToken - resolved constraint token (or nullptr)
7335 // newObjThis - tree for this pointer or uninitalized newobj temp (or nullptr)
7336 // prefixFlags - IL prefix flags for the call
7337 // callInfo - EE supplied info for the call
7338 // rawILOffset - IL offset of the opcode
7341 // Type of the call's return value.
7342 // If we're importing an inlinee and have realized the inline must fail, the call return type should be TYP_UNDEF.
7343 // However we can't assert for this here yet because there are cases we miss. See issue #13272.
7347 // opcode can be CEE_CALL, CEE_CALLI, CEE_CALLVIRT, or CEE_NEWOBJ.
7349 // For CEE_NEWOBJ, newobjThis should be the temp grabbed for the allocated
7350 // uninitalized object.
7353 #pragma warning(push)
7354 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
7357 var_types Compiler::impImportCall(OPCODE opcode,
7358 CORINFO_RESOLVED_TOKEN* pResolvedToken,
7359 CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
7360 GenTree* newobjThis,
7362 CORINFO_CALL_INFO* callInfo,
7363 IL_OFFSET rawILOffset)
7365 assert(opcode == CEE_CALL || opcode == CEE_CALLVIRT || opcode == CEE_NEWOBJ || opcode == CEE_CALLI);
7367 IL_OFFSETX ilOffset = impCurILOffset(rawILOffset, true);
7368 var_types callRetTyp = TYP_COUNT;
7369 CORINFO_SIG_INFO* sig = nullptr;
7370 CORINFO_METHOD_HANDLE methHnd = nullptr;
7371 CORINFO_CLASS_HANDLE clsHnd = nullptr;
7372 unsigned clsFlags = 0;
7373 unsigned mflags = 0;
7374 unsigned argFlags = 0;
7375 GenTree* call = nullptr;
7376 GenTreeArgList* args = nullptr;
7377 CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM;
7378 CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr;
7379 bool exactContextNeedsRuntimeLookup = false;
7380 bool canTailCall = true;
7381 const char* szCanTailCallFailReason = nullptr;
7382 int tailCall = prefixFlags & PREFIX_TAILCALL;
7383 bool readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
7385 CORINFO_RESOLVED_TOKEN* ldftnToken = nullptr;
7387 // Synchronized methods need to call CORINFO_HELP_MON_EXIT at the end. We could
7388 // do that before tailcalls, but that is probably not the intended
7389 // semantic. So just disallow tailcalls from synchronized methods.
7390 // Also, popping arguments in a varargs function is more work and NYI
7391 // If we have a security object, we have to keep our frame around for callers
7392 // to see any imperative security.
7393 if (info.compFlags & CORINFO_FLG_SYNCH)
7395 canTailCall = false;
7396 szCanTailCallFailReason = "Caller is synchronized";
7398 #if !FEATURE_FIXED_OUT_ARGS
7399 else if (info.compIsVarArgs)
7401 canTailCall = false;
7402 szCanTailCallFailReason = "Caller is varargs";
7404 #endif // FEATURE_FIXED_OUT_ARGS
7405 else if (opts.compNeedSecurityCheck)
7407 canTailCall = false;
7408 szCanTailCallFailReason = "Caller requires a security check.";
7411 // We only need to cast the return value of pinvoke inlined calls that return small types
7413 // TODO-AMD64-Cleanup: Remove this when we stop interoperating with JIT64, or if we decide to stop
7414 // widening everything! CoreCLR does not support JIT64 interoperation so no need to widen there.
7415 // The existing x64 JIT doesn't bother widening all types to int, so we have to assume for
7416 // the time being that the callee might be compiled by the other JIT and thus the return
7417 // value will need to be widened by us (or not widened at all...)
7419 // ReadyToRun code sticks with default calling convention that does not widen small return types.
7421 bool checkForSmallType = opts.IsJit64Compat() || opts.IsReadyToRun();
7422 bool bIntrinsicImported = false;
7424 CORINFO_SIG_INFO calliSig;
7425 GenTreeArgList* extraArg = nullptr;
7427 /*-------------------------------------------------------------------------
7428 * First create the call node
7431 if (opcode == CEE_CALLI)
7433 if (IsTargetAbi(CORINFO_CORERT_ABI))
7435 // See comment in impCheckForPInvokeCall
7436 BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
7437 if (info.compCompHnd->convertPInvokeCalliToCall(pResolvedToken, !impCanPInvokeInlineCallSite(block)))
7439 eeGetCallInfo(pResolvedToken, nullptr, CORINFO_CALLINFO_ALLOWINSTPARAM, callInfo);
7440 return impImportCall(CEE_CALL, pResolvedToken, nullptr, nullptr, prefixFlags, callInfo, rawILOffset);
7444 /* Get the call site sig */
7445 eeGetSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, &calliSig);
7447 callRetTyp = JITtype2varType(calliSig.retType);
7449 call = impImportIndirectCall(&calliSig, ilOffset);
7451 // We don't know the target method, so we have to infer the flags, or
7452 // assume the worst-case.
7453 mflags = (calliSig.callConv & CORINFO_CALLCONV_HASTHIS) ? 0 : CORINFO_FLG_STATIC;
7458 unsigned structSize =
7459 (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(calliSig.retTypeSigClass) : 0;
7460 printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7461 opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7464 // This should be checked in impImportBlockCode.
7465 assert(!compIsForInlining() || !(impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY));
7470 // We cannot lazily obtain the signature of a CALLI call because it has no method
7471 // handle that we can use, so we need to save its full call signature here.
7472 assert(call->gtCall.callSig == nullptr);
7473 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7474 *call->gtCall.callSig = calliSig;
7477 if (IsTargetAbi(CORINFO_CORERT_ABI))
7479 bool managedCall = (((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_STDCALL) &&
7480 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_C) &&
7481 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_THISCALL) &&
7482 ((calliSig.callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_FASTCALL));
7485 addFatPointerCandidate(call->AsCall());
7489 else // (opcode != CEE_CALLI)
7491 CorInfoIntrinsics intrinsicID = CORINFO_INTRINSIC_Count;
7493 // Passing CORINFO_CALLINFO_ALLOWINSTPARAM indicates that this JIT is prepared to
7494 // supply the instantiation parameters necessary to make direct calls to underlying
7495 // shared generic code, rather than calling through instantiating stubs. If the
7496 // returned signature has CORINFO_CALLCONV_PARAMTYPE then this indicates that the JIT
7497 // must indeed pass an instantiation parameter.
7499 methHnd = callInfo->hMethod;
7501 sig = &(callInfo->sig);
7502 callRetTyp = JITtype2varType(sig->retType);
7504 mflags = callInfo->methodFlags;
7509 unsigned structSize = (callRetTyp == TYP_STRUCT) ? info.compCompHnd->getClassSize(sig->retTypeSigClass) : 0;
7510 printf("\nIn Compiler::impImportCall: opcode is %s, kind=%d, callRetType is %s, structSize is %d\n",
7511 opcodeNames[opcode], callInfo->kind, varTypeName(callRetTyp), structSize);
7514 if (compIsForInlining())
7516 /* Does this call site have security boundary restrictions? */
7518 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
7520 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
7524 /* Does the inlinee need a security check token on the frame */
7526 if (mflags & CORINFO_FLG_SECURITYCHECK)
7528 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
7532 /* Does the inlinee use StackCrawlMark */
7534 if (mflags & CORINFO_FLG_DONT_INLINE_CALLER)
7536 compInlineResult->NoteFatal(InlineObservation::CALLEE_STACK_CRAWL_MARK);
7540 /* For now ignore delegate invoke */
7542 if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7544 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_DELEGATE_INVOKE);
7548 /* For now ignore varargs */
7549 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7551 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NATIVE_VARARGS);
7555 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
7557 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
7561 if ((mflags & CORINFO_FLG_VIRTUAL) && (sig->sigInst.methInstCount != 0) && (opcode == CEE_CALLVIRT))
7563 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_GENERIC_VIRTUAL);
7568 clsHnd = pResolvedToken->hClass;
7570 clsFlags = callInfo->classFlags;
7573 // If this is a call to JitTestLabel.Mark, do "early inlining", and record the test attribute.
7575 // This recognition should really be done by knowing the methHnd of the relevant Mark method(s).
7576 // These should be in mscorlib.h, and available through a JIT/EE interface call.
7577 const char* modName;
7578 const char* className;
7579 const char* methodName;
7580 if ((className = eeGetClassName(clsHnd)) != nullptr &&
7581 strcmp(className, "System.Runtime.CompilerServices.JitTestLabel") == 0 &&
7582 (methodName = eeGetMethodName(methHnd, &modName)) != nullptr && strcmp(methodName, "Mark") == 0)
7584 return impImportJitTestLabelMark(sig->numArgs);
7588 // <NICE> Factor this into getCallInfo </NICE>
7589 bool isSpecialIntrinsic = false;
7590 if ((mflags & (CORINFO_FLG_INTRINSIC | CORINFO_FLG_JIT_INTRINSIC)) != 0)
7592 const bool isTail = canTailCall && (tailCall != 0);
7594 call = impIntrinsic(newobjThis, clsHnd, methHnd, sig, mflags, pResolvedToken->token, readonlyCall, isTail,
7595 pConstrainedResolvedToken, callInfo->thisTransform, &intrinsicID, &isSpecialIntrinsic);
7597 if (compDonotInline())
7602 if (call != nullptr)
7604 assert(!(mflags & CORINFO_FLG_VIRTUAL) || (mflags & CORINFO_FLG_FINAL) ||
7605 (clsFlags & CORINFO_FLG_FINAL));
7607 #ifdef FEATURE_READYTORUN_COMPILER
7608 if (call->OperGet() == GT_INTRINSIC)
7610 if (opts.IsReadyToRun())
7612 noway_assert(callInfo->kind == CORINFO_CALL);
7613 call->gtIntrinsic.gtEntryPoint = callInfo->codePointerLookup.constLookup;
7617 call->gtIntrinsic.gtEntryPoint.addr = nullptr;
7618 call->gtIntrinsic.gtEntryPoint.accessType = IAT_VALUE;
7623 bIntrinsicImported = true;
7631 call = impSIMDIntrinsic(opcode, newobjThis, clsHnd, methHnd, sig, pResolvedToken->token);
7632 if (call != nullptr)
7634 bIntrinsicImported = true;
7638 #endif // FEATURE_SIMD
7640 if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
7642 NO_WAY("Virtual call to a function added via EnC is not supported");
7645 if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
7646 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7647 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG)
7649 BADCODE("Bad calling convention");
7652 //-------------------------------------------------------------------------
7653 // Construct the call node
7655 // Work out what sort of call we're making.
7656 // Dispense with virtual calls implemented via LDVIRTFTN immediately.
7658 constraintCallThisTransform = callInfo->thisTransform;
7659 exactContextHnd = callInfo->contextHandle;
7660 exactContextNeedsRuntimeLookup = callInfo->exactContextNeedsRuntimeLookup == TRUE;
7662 // Recursive call is treated as a loop to the begining of the method.
7663 if (gtIsRecursiveCall(methHnd))
7668 JITDUMP("\nFound recursive call in the method. Mark " FMT_BB " to " FMT_BB
7669 " as having a backward branch.\n",
7670 fgFirstBB->bbNum, compCurBB->bbNum);
7673 fgMarkBackwardJump(fgFirstBB, compCurBB);
7676 switch (callInfo->kind)
7679 case CORINFO_VIRTUALCALL_STUB:
7681 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7682 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7683 if (callInfo->stubLookup.lookupKind.needsRuntimeLookup)
7686 if (compIsForInlining())
7688 // Don't import runtime lookups when inlining
7689 // Inlining has to be aborted in such a case
7690 /* XXX Fri 3/20/2009
7691 * By the way, this would never succeed. If the handle lookup is into the generic
7692 * dictionary for a candidate, you'll generate different dictionary offsets and the
7693 * inlined code will crash.
7695 * To anyone code reviewing this, when could this ever succeed in the future? It'll
7696 * always have a handle lookup. These lookups are safe intra-module, but we're just
7699 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_COMPLEX_HANDLE);
7703 GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
7704 assert(!compDonotInline());
7706 // This is the rough code to set up an indirect stub call
7707 assert(stubAddr != nullptr);
7709 // The stubAddr may be a
7710 // complex expression. As it is evaluated after the args,
7711 // it may cause registered args to be spilled. Simply spill it.
7713 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall with runtime lookup"));
7714 impAssignTempGen(lclNum, stubAddr, (unsigned)CHECK_SPILL_ALL);
7715 stubAddr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7717 // Create the actual call node
7719 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7720 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7722 call = gtNewIndCallNode(stubAddr, callRetTyp, nullptr);
7724 call->gtFlags |= GTF_EXCEPT | (stubAddr->gtFlags & GTF_GLOB_EFFECT);
7725 call->gtFlags |= GTF_CALL_VIRT_STUB;
7728 // No tailcalls allowed for these yet...
7729 canTailCall = false;
7730 szCanTailCallFailReason = "VirtualCall with runtime lookup";
7735 // ok, the stub is available at compile type.
7737 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7738 call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
7739 call->gtFlags |= GTF_CALL_VIRT_STUB;
7740 assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE &&
7741 callInfo->stubLookup.constLookup.accessType != IAT_RELPVALUE);
7742 if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
7744 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
7748 #ifdef FEATURE_READYTORUN_COMPILER
7749 if (opts.IsReadyToRun())
7751 // Null check is sometimes needed for ready to run to handle
7752 // non-virtual <-> virtual changes between versions
7753 if (callInfo->nullInstanceCheck)
7755 call->gtFlags |= GTF_CALL_NULLCHECK;
7763 case CORINFO_VIRTUALCALL_VTABLE:
7765 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7766 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7767 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7768 call->gtFlags |= GTF_CALL_VIRT_VTABLE;
7772 case CORINFO_VIRTUALCALL_LDVIRTFTN:
7774 if (compIsForInlining())
7776 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_CALL_VIA_LDVIRTFTN);
7780 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7781 assert(!(clsFlags & CORINFO_FLG_VALUECLASS));
7782 // OK, We've been told to call via LDVIRTFTN, so just
7783 // take the call now....
7785 args = impPopList(sig->numArgs, sig);
7787 GenTree* thisPtr = impPopStack().val;
7788 thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
7789 assert(thisPtr != nullptr);
7791 // Clone the (possibly transformed) "this" pointer
7792 GenTree* thisPtrCopy;
7793 thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
7794 nullptr DEBUGARG("LDVIRTFTN this pointer"));
7796 GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
7797 assert(fptr != nullptr);
7799 thisPtr = nullptr; // can't reuse it
7801 // Now make an indirect call through the function pointer
7803 unsigned lclNum = lvaGrabTemp(true DEBUGARG("VirtualCall through function pointer"));
7804 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7805 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7807 // Create the actual call node
7809 call = gtNewIndCallNode(fptr, callRetTyp, args, ilOffset);
7810 call->gtCall.gtCallObjp = thisPtrCopy;
7811 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7813 if ((sig->sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
7815 // CoreRT generic virtual method: need to handle potential fat function pointers
7816 addFatPointerCandidate(call->AsCall());
7818 #ifdef FEATURE_READYTORUN_COMPILER
7819 if (opts.IsReadyToRun())
7821 // Null check is needed for ready to run to handle
7822 // non-virtual <-> virtual changes between versions
7823 call->gtFlags |= GTF_CALL_NULLCHECK;
7827 // Sine we are jumping over some code, check that its OK to skip that code
7828 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG &&
7829 (sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7835 // This is for a non-virtual, non-interface etc. call
7836 call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
7838 // We remove the nullcheck for the GetType call instrinsic.
7839 // TODO-CQ: JIT64 does not introduce the null check for many more helper calls
7841 if (callInfo->nullInstanceCheck &&
7842 !((mflags & CORINFO_FLG_INTRINSIC) != 0 && (intrinsicID == CORINFO_INTRINSIC_Object_GetType)))
7844 call->gtFlags |= GTF_CALL_NULLCHECK;
7847 #ifdef FEATURE_READYTORUN_COMPILER
7848 if (opts.IsReadyToRun())
7850 call->gtCall.setEntryPoint(callInfo->codePointerLookup.constLookup);
7856 case CORINFO_CALL_CODE_POINTER:
7858 // The EE has asked us to call by computing a code pointer and then doing an
7859 // indirect call. This is because a runtime lookup is required to get the code entry point.
7861 // These calls always follow a uniform calling convention, i.e. no extra hidden params
7862 assert((sig->callConv & CORINFO_CALLCONV_PARAMTYPE) == 0);
7864 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
7865 assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
7868 impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
7870 if (compDonotInline())
7875 // Now make an indirect call through the function pointer
7877 unsigned lclNum = lvaGrabTemp(true DEBUGARG("Indirect call through function pointer"));
7878 impAssignTempGen(lclNum, fptr, (unsigned)CHECK_SPILL_ALL);
7879 fptr = gtNewLclvNode(lclNum, TYP_I_IMPL);
7881 call = gtNewIndCallNode(fptr, callRetTyp, nullptr, ilOffset);
7882 call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
7883 if (callInfo->nullInstanceCheck)
7885 call->gtFlags |= GTF_CALL_NULLCHECK;
7892 assert(!"unknown call kind");
7896 //-------------------------------------------------------------------------
7899 PREFIX_ASSUME(call != nullptr);
7901 if (mflags & CORINFO_FLG_NOGCCHECK)
7903 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NOGCCHECK;
7906 // Mark call if it's one of the ones we will maybe treat as an intrinsic
7907 if (isSpecialIntrinsic)
7909 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SPECIAL_INTRINSIC;
7913 assert(clsHnd || (opcode == CEE_CALLI)); // We're never verifying for CALLI, so this is not set.
7915 /* Some sanity checks */
7917 // CALL_VIRT and NEWOBJ must have a THIS pointer
7918 assert((opcode != CEE_CALLVIRT && opcode != CEE_NEWOBJ) || (sig->callConv & CORINFO_CALLCONV_HASTHIS));
7919 // static bit and hasThis are negations of one another
7920 assert(((mflags & CORINFO_FLG_STATIC) != 0) == ((sig->callConv & CORINFO_CALLCONV_HASTHIS) == 0));
7921 assert(call != nullptr);
7923 /*-------------------------------------------------------------------------
7924 * Check special-cases etc
7927 /* Special case - Check if it is a call to Delegate.Invoke(). */
7929 if (mflags & CORINFO_FLG_DELEGATE_INVOKE)
7931 assert(!compIsForInlining());
7932 assert(!(mflags & CORINFO_FLG_STATIC)); // can't call a static method
7933 assert(mflags & CORINFO_FLG_FINAL);
7935 /* Set the delegate flag */
7936 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_DELEGATE_INV;
7938 if (callInfo->secureDelegateInvoke)
7940 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_SECURE_DELEGATE_INV;
7943 if (opcode == CEE_CALLVIRT)
7945 assert(mflags & CORINFO_FLG_FINAL);
7947 /* It should have the GTF_CALL_NULLCHECK flag set. Reset it */
7948 assert(call->gtFlags & GTF_CALL_NULLCHECK);
7949 call->gtFlags &= ~GTF_CALL_NULLCHECK;
7953 CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass;
7954 actualMethodRetTypeSigClass = sig->retTypeSigClass;
7955 if (varTypeIsStruct(callRetTyp))
7957 callRetTyp = impNormStructType(actualMethodRetTypeSigClass);
7958 call->gtType = callRetTyp;
7962 /* Check for varargs */
7963 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7964 (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7966 BADCODE("Varargs not supported.");
7968 #endif // !FEATURE_VARARG
7971 if (call->gtCall.callSig == nullptr)
7973 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
7974 *call->gtCall.callSig = *sig;
7976 #endif // UNIX_X86_ABI
7978 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG ||
7979 (sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_NATIVEVARARG)
7981 assert(!compIsForInlining());
7983 /* Set the right flags */
7985 call->gtFlags |= GTF_CALL_POP_ARGS;
7986 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VARARGS;
7988 /* Can't allow tailcall for varargs as it is caller-pop. The caller
7989 will be expecting to pop a certain number of arguments, but if we
7990 tailcall to a function with a different number of arguments, we
7991 are hosed. There are ways around this (caller remembers esp value,
7992 varargs is not caller-pop, etc), but not worth it. */
7993 CLANG_FORMAT_COMMENT_ANCHOR;
7998 canTailCall = false;
7999 szCanTailCallFailReason = "Callee is varargs";
8003 /* Get the total number of arguments - this is already correct
8004 * for CALLI - for methods we have to get it from the call site */
8006 if (opcode != CEE_CALLI)
8009 unsigned numArgsDef = sig->numArgs;
8011 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
8014 // We cannot lazily obtain the signature of a vararg call because using its method
8015 // handle will give us only the declared argument list, not the full argument list.
8016 assert(call->gtCall.callSig == nullptr);
8017 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
8018 *call->gtCall.callSig = *sig;
8021 // For vararg calls we must be sure to load the return type of the
8022 // method actually being called, as well as the return types of the
8023 // specified in the vararg signature. With type equivalency, these types
8024 // may not be the same.
8025 if (sig->retTypeSigClass != actualMethodRetTypeSigClass)
8027 if (actualMethodRetTypeSigClass != nullptr && sig->retType != CORINFO_TYPE_CLASS &&
8028 sig->retType != CORINFO_TYPE_BYREF && sig->retType != CORINFO_TYPE_PTR &&
8029 sig->retType != CORINFO_TYPE_VAR)
8031 // Make sure that all valuetypes (including enums) that we push are loaded.
8032 // This is to guarantee that if a GC is triggerred from the prestub of this methods,
8033 // all valuetypes in the method signature are already loaded.
8034 // We need to be able to find the size of the valuetypes, but we cannot
8035 // do a class-load from within GC.
8036 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(actualMethodRetTypeSigClass);
8040 assert(numArgsDef <= sig->numArgs);
8043 /* We will have "cookie" as the last argument but we cannot push
8044 * it on the operand stack because we may overflow, so we append it
8045 * to the arg list next after we pop them */
8048 if (mflags & CORINFO_FLG_SECURITYCHECK)
8050 assert(!compIsForInlining());
8052 // Need security prolog/epilog callouts when there is
8053 // imperative security in the method. This is to give security a
8054 // chance to do any setup in the prolog and cleanup in the epilog if needed.
8056 if (compIsForInlining())
8058 // Cannot handle this if the method being imported is an inlinee by itself.
8059 // Because inlinee method does not have its own frame.
8061 compInlineResult->NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
8066 tiSecurityCalloutNeeded = true;
8068 // If the current method calls a method which needs a security check,
8069 // (i.e. the method being compiled has imperative security)
8070 // we need to reserve a slot for the security object in
8071 // the current method's stack frame
8072 opts.compNeedSecurityCheck = true;
8076 //--------------------------- Inline NDirect ------------------------------
8078 // For inline cases we technically should look at both the current
8079 // block and the call site block (or just the latter if we've
8080 // fused the EH trees). However the block-related checks pertain to
8081 // EH and we currently won't inline a method with EH. So for
8082 // inlinees, just checking the call site block is sufficient.
8084 // New lexical block here to avoid compilation errors because of GOTOs.
8085 BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
8086 impCheckForPInvokeCall(call->AsCall(), methHnd, sig, mflags, block);
8089 if (call->gtFlags & GTF_CALL_UNMANAGED)
8091 // We set up the unmanaged call by linking the frame, disabling GC, etc
8092 // This needs to be cleaned up on return
8095 canTailCall = false;
8096 szCanTailCallFailReason = "Callee is native";
8099 checkForSmallType = true;
8101 impPopArgsForUnmanagedCall(call, sig);
8105 else if ((opcode == CEE_CALLI) && (((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_STDCALL) ||
8106 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_C) ||
8107 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_THISCALL) ||
8108 ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_FASTCALL)))
8110 if (!info.compCompHnd->canGetCookieForPInvokeCalliSig(sig))
8112 // Normally this only happens with inlining.
8113 // However, a generic method (or type) being NGENd into another module
8114 // can run into this issue as well. There's not an easy fall-back for NGEN
8115 // so instead we fallback to JIT.
8116 if (compIsForInlining())
8118 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_PINVOKE_COOKIE);
8122 IMPL_LIMITATION("Can't get PInvoke cookie (cross module generics)");
8128 GenTree* cookie = eeGetPInvokeCookie(sig);
8130 // This cookie is required to be either a simple GT_CNS_INT or
8131 // an indirection of a GT_CNS_INT
8133 GenTree* cookieConst = cookie;
8134 if (cookie->gtOper == GT_IND)
8136 cookieConst = cookie->gtOp.gtOp1;
8138 assert(cookieConst->gtOper == GT_CNS_INT);
8140 // Setting GTF_DONT_CSE on the GT_CNS_INT as well as on the GT_IND (if it exists) will ensure that
8141 // we won't allow this tree to participate in any CSE logic
8143 cookie->gtFlags |= GTF_DONT_CSE;
8144 cookieConst->gtFlags |= GTF_DONT_CSE;
8146 call->gtCall.gtCallCookie = cookie;
8150 canTailCall = false;
8151 szCanTailCallFailReason = "PInvoke calli";
8155 /*-------------------------------------------------------------------------
8156 * Create the argument list
8159 //-------------------------------------------------------------------------
8160 // Special case - for varargs we have an implicit last argument
8162 if ((sig->callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG)
8164 assert(!compIsForInlining());
8166 void *varCookie, *pVarCookie;
8167 if (!info.compCompHnd->canGetVarArgsHandle(sig))
8169 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CANT_EMBED_VARARGS_COOKIE);
8173 varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
8174 assert((!varCookie) != (!pVarCookie));
8175 GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig);
8177 assert(extraArg == nullptr);
8178 extraArg = gtNewArgList(cookie);
8181 //-------------------------------------------------------------------------
8182 // Extra arg for shared generic code and array methods
8184 // Extra argument containing instantiation information is passed in the
8185 // following circumstances:
8186 // (a) To the "Address" method on array classes; the extra parameter is
8187 // the array's type handle (a TypeDesc)
8188 // (b) To shared-code instance methods in generic structs; the extra parameter
8189 // is the struct's type handle (a vtable ptr)
8190 // (c) To shared-code per-instantiation non-generic static methods in generic
8191 // classes and structs; the extra parameter is the type handle
8192 // (d) To shared-code generic methods; the extra parameter is an
8193 // exact-instantiation MethodDesc
8195 // We also set the exact type context associated with the call so we can
8196 // inline the call correctly later on.
8198 if (sig->callConv & CORINFO_CALLCONV_PARAMTYPE)
8200 assert(call->gtCall.gtCallType == CT_USER_FUNC);
8201 if (clsHnd == nullptr)
8203 NO_WAY("CALLI on parameterized type");
8206 assert(opcode != CEE_CALLI);
8211 // Instantiated generic method
8212 if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
8214 CORINFO_METHOD_HANDLE exactMethodHandle =
8215 (CORINFO_METHOD_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
8217 if (!exactContextNeedsRuntimeLookup)
8219 #ifdef FEATURE_READYTORUN_COMPILER
8220 if (opts.IsReadyToRun())
8223 impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_METHOD_HDL, exactMethodHandle);
8224 if (instParam == nullptr)
8226 assert(compDonotInline());
8233 instParam = gtNewIconEmbMethHndNode(exactMethodHandle);
8234 info.compCompHnd->methodMustBeLoadedBeforeCodeIsRun(exactMethodHandle);
8239 instParam = impTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
8240 if (instParam == nullptr)
8242 assert(compDonotInline());
8248 // otherwise must be an instance method in a generic struct,
8249 // a static method in a generic type, or a runtime-generated array method
8252 assert(((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_CLASS);
8253 CORINFO_CLASS_HANDLE exactClassHandle =
8254 (CORINFO_CLASS_HANDLE)((SIZE_T)exactContextHnd & ~CORINFO_CONTEXTFLAGS_MASK);
8256 if (compIsForInlining() && (clsFlags & CORINFO_FLG_ARRAY) != 0)
8258 compInlineResult->NoteFatal(InlineObservation::CALLEE_IS_ARRAY_METHOD);
8262 if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall)
8264 // We indicate "readonly" to the Address operation by using a null
8266 instParam = gtNewIconNode(0, TYP_REF);
8268 else if (!exactContextNeedsRuntimeLookup)
8270 #ifdef FEATURE_READYTORUN_COMPILER
8271 if (opts.IsReadyToRun())
8274 impReadyToRunLookupToTree(&callInfo->instParamLookup, GTF_ICON_CLASS_HDL, exactClassHandle);
8275 if (instParam == nullptr)
8277 assert(compDonotInline());
8284 instParam = gtNewIconEmbClsHndNode(exactClassHandle);
8285 info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(exactClassHandle);
8290 // If the EE was able to resolve a constrained call, the instantiating parameter to use is the type
8291 // by which the call was constrained with. We embed pConstrainedResolvedToken as the extra argument
8292 // because pResolvedToken is an interface method and interface types make a poor generic context.
8293 if (pConstrainedResolvedToken)
8295 instParam = impTokenToHandle(pConstrainedResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/,
8296 FALSE /* importParent */);
8300 instParam = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup, TRUE /*mustRestoreHandle*/);
8303 if (instParam == nullptr)
8305 assert(compDonotInline());
8311 assert(extraArg == nullptr);
8312 extraArg = gtNewArgList(instParam);
8315 // Inlining may need the exact type context (exactContextHnd) if we're inlining shared generic code, in particular
8316 // to inline 'polytypic' operations such as static field accesses, type tests and method calls which
8317 // rely on the exact context. The exactContextHnd is passed back to the JitInterface at appropriate points.
8318 // exactContextHnd is not currently required when inlining shared generic code into shared
8319 // generic code, since the inliner aborts whenever shared code polytypic operations are encountered
8320 // (e.g. anything marked needsRuntimeLookup)
8321 if (exactContextNeedsRuntimeLookup)
8323 exactContextHnd = nullptr;
8326 if ((opcode == CEE_NEWOBJ) && ((clsFlags & CORINFO_FLG_DELEGATE) != 0))
8328 // Only verifiable cases are supported.
8329 // dup; ldvirtftn; newobj; or ldftn; newobj.
8330 // IL test could contain unverifiable sequence, in this case optimization should not be done.
8331 if (impStackHeight() > 0)
8333 typeInfo delegateTypeInfo = impStackTop().seTypeInfo;
8334 if (delegateTypeInfo.IsToken())
8336 ldftnToken = delegateTypeInfo.GetToken();
8341 //-------------------------------------------------------------------------
8342 // The main group of arguments
8344 args = call->gtCall.gtCallArgs = impPopList(sig->numArgs, sig, extraArg);
8348 call->gtFlags |= args->gtFlags & GTF_GLOB_EFFECT;
8351 //-------------------------------------------------------------------------
8352 // The "this" pointer
8354 if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
8358 if (opcode == CEE_NEWOBJ)
8364 obj = impPopStack().val;
8365 obj = impTransformThis(obj, pConstrainedResolvedToken, constraintCallThisTransform);
8366 if (compDonotInline())
8372 // Store the "this" value in the call
8373 call->gtFlags |= obj->gtFlags & GTF_GLOB_EFFECT;
8374 call->gtCall.gtCallObjp = obj;
8376 // Is this a virtual or interface call?
8377 if (call->gtCall.IsVirtual())
8379 // only true object pointers can be virtual
8380 assert(obj->gtType == TYP_REF);
8382 // See if we can devirtualize.
8383 impDevirtualizeCall(call->AsCall(), &callInfo->hMethod, &callInfo->methodFlags, &callInfo->contextHandle,
8389 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_NONVIRT_SAME_THIS;
8393 //-------------------------------------------------------------------------
8394 // The "this" pointer for "newobj"
8396 if (opcode == CEE_NEWOBJ)
8398 if (clsFlags & CORINFO_FLG_VAROBJSIZE)
8400 assert(!(clsFlags & CORINFO_FLG_ARRAY)); // arrays handled separately
8401 // This is a 'new' of a variable sized object, wher
8402 // the constructor is to return the object. In this case
8403 // the constructor claims to return VOID but we know it
8404 // actually returns the new object
8405 assert(callRetTyp == TYP_VOID);
8406 callRetTyp = TYP_REF;
8407 call->gtType = TYP_REF;
8408 impSpillSpecialSideEff();
8410 impPushOnStack(call, typeInfo(TI_REF, clsHnd));
8414 if (clsFlags & CORINFO_FLG_DELEGATE)
8416 // New inliner morph it in impImportCall.
8417 // This will allow us to inline the call to the delegate constructor.
8418 call = fgOptimizeDelegateConstructor(call->AsCall(), &exactContextHnd, ldftnToken);
8421 if (!bIntrinsicImported)
8424 #if defined(DEBUG) || defined(INLINE_DATA)
8426 // Keep track of the raw IL offset of the call
8427 call->gtCall.gtRawILOffset = rawILOffset;
8429 #endif // defined(DEBUG) || defined(INLINE_DATA)
8431 // Is it an inline candidate?
8432 impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
8435 // append the call node.
8436 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8438 // Now push the value of the 'new onto the stack
8440 // This is a 'new' of a non-variable sized object.
8441 // Append the new node (op1) to the statement list,
8442 // and then push the local holding the value of this
8443 // new instruction on the stack.
8445 if (clsFlags & CORINFO_FLG_VALUECLASS)
8447 assert(newobjThis->gtOper == GT_ADDR && newobjThis->gtOp.gtOp1->gtOper == GT_LCL_VAR);
8449 unsigned tmp = newobjThis->gtOp.gtOp1->gtLclVarCommon.gtLclNum;
8450 impPushOnStack(gtNewLclvNode(tmp, lvaGetRealType(tmp)), verMakeTypeInfo(clsHnd).NormaliseForStack());
8454 if (newobjThis->gtOper == GT_COMMA)
8456 // In coreclr the callout can be inserted even if verification is disabled
8457 // so we cannot rely on tiVerificationNeeded alone
8459 // We must have inserted the callout. Get the real newobj.
8460 newobjThis = newobjThis->gtOp.gtOp2;
8463 assert(newobjThis->gtOper == GT_LCL_VAR);
8464 impPushOnStack(gtNewLclvNode(newobjThis->gtLclVarCommon.gtLclNum, TYP_REF), typeInfo(TI_REF, clsHnd));
8474 // This check cannot be performed for implicit tail calls for the reason
8475 // that impIsImplicitTailCallCandidate() is not checking whether return
8476 // types are compatible before marking a call node with PREFIX_TAILCALL_IMPLICIT.
8477 // As a result it is possible that in the following case, we find that
8478 // the type stack is non-empty if Callee() is considered for implicit
8480 // int Caller(..) { .... void Callee(); ret val; ... }
8482 // Note that we cannot check return type compatibility before ImpImportCall()
8483 // as we don't have required info or need to duplicate some of the logic of
8486 // For implicit tail calls, we perform this check after return types are
8487 // known to be compatible.
8488 if ((tailCall & PREFIX_TAILCALL_EXPLICIT) && (verCurrentState.esStackDepth != 0))
8490 BADCODE("Stack should be empty after tailcall");
8493 // Note that we can not relax this condition with genActualType() as
8494 // the calling convention dictates that the caller of a function with
8495 // a small-typed return value is responsible for normalizing the return val
8498 !impTailCallRetTypeCompatible(info.compRetType, info.compMethodInfo->args.retTypeClass, callRetTyp,
8499 callInfo->sig.retTypeClass))
8501 canTailCall = false;
8502 szCanTailCallFailReason = "Return types are not tail call compatible";
8505 // Stack empty check for implicit tail calls.
8506 if (canTailCall && (tailCall & PREFIX_TAILCALL_IMPLICIT) && (verCurrentState.esStackDepth != 0))
8508 #ifdef _TARGET_AMD64_
8509 // JIT64 Compatibility: Opportunistic tail call stack mismatch throws a VerificationException
8510 // in JIT64, not an InvalidProgramException.
8511 Verify(false, "Stack should be empty after tailcall");
8512 #else // _TARGET_64BIT_
8513 BADCODE("Stack should be empty after tailcall");
8514 #endif //!_TARGET_64BIT_
8517 // assert(compCurBB is not a catch, finally or filter block);
8518 // assert(compCurBB is not a try block protected by a finally block);
8520 // Check for permission to tailcall
8521 bool explicitTailCall = (tailCall & PREFIX_TAILCALL_EXPLICIT) != 0;
8523 assert(!explicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN);
8527 // True virtual or indirect calls, shouldn't pass in a callee handle.
8528 CORINFO_METHOD_HANDLE exactCalleeHnd =
8529 ((call->gtCall.gtCallType != CT_USER_FUNC) || call->gtCall.IsVirtual()) ? nullptr : methHnd;
8530 GenTree* thisArg = call->gtCall.gtCallObjp;
8532 if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
8535 if (explicitTailCall)
8537 // In case of explicit tail calls, mark it so that it is not considered
8539 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_EXPLICIT_TAILCALL;
8543 printf("\nGTF_CALL_M_EXPLICIT_TAILCALL bit set for call ");
8551 #if FEATURE_TAILCALL_OPT
8552 // Must be an implicit tail call.
8553 assert((tailCall & PREFIX_TAILCALL_IMPLICIT) != 0);
8555 // It is possible that a call node is both an inline candidate and marked
8556 // for opportunistic tail calling. In-lining happens before morhphing of
8557 // trees. If in-lining of an in-line candidate gets aborted for whatever
8558 // reason, it will survive to the morphing stage at which point it will be
8559 // transformed into a tail call after performing additional checks.
8561 call->gtCall.gtCallMoreFlags |= GTF_CALL_M_IMPLICIT_TAILCALL;
8565 printf("\nGTF_CALL_M_IMPLICIT_TAILCALL bit set for call ");
8571 #else //! FEATURE_TAILCALL_OPT
8572 NYI("Implicit tail call prefix on a target which doesn't support opportunistic tail calls");
8574 #endif // FEATURE_TAILCALL_OPT
8577 // we can't report success just yet...
8581 canTailCall = false;
8582 // canTailCall reported its reasons already
8586 printf("\ninfo.compCompHnd->canTailCall returned false for call ");
8595 // If this assert fires it means that canTailCall was set to false without setting a reason!
8596 assert(szCanTailCallFailReason != nullptr);
8601 printf("\nRejecting %splicit tail call for call ", explicitTailCall ? "ex" : "im");
8603 printf(": %s\n", szCanTailCallFailReason);
8606 info.compCompHnd->reportTailCallDecision(info.compMethodHnd, methHnd, explicitTailCall, TAILCALL_FAIL,
8607 szCanTailCallFailReason);
8611 // Note: we assume that small return types are already normalized by the managed callee
8612 // or by the pinvoke stub for calls to unmanaged code.
8614 if (!bIntrinsicImported)
8617 // Things needed to be checked when bIntrinsicImported is false.
8620 assert(call->gtOper == GT_CALL);
8621 assert(sig != nullptr);
8623 // Tail calls require us to save the call site's sig info so we can obtain an argument
8624 // copying thunk from the EE later on.
8625 if (call->gtCall.callSig == nullptr)
8627 call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
8628 *call->gtCall.callSig = *sig;
8631 if (compIsForInlining() && opcode == CEE_CALLVIRT)
8633 GenTree* callObj = call->gtCall.gtCallObjp;
8634 assert(callObj != nullptr);
8636 if ((call->gtCall.IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
8637 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(call->gtCall.gtCallArgs, callObj,
8638 impInlineInfo->inlArgInfo))
8640 impInlineInfo->thisDereferencedFirst = true;
8644 #if defined(DEBUG) || defined(INLINE_DATA)
8646 // Keep track of the raw IL offset of the call
8647 call->gtCall.gtRawILOffset = rawILOffset;
8649 #endif // defined(DEBUG) || defined(INLINE_DATA)
8651 // Is it an inline candidate?
8652 impMarkInlineCandidate(call, exactContextHnd, exactContextNeedsRuntimeLookup, callInfo);
8656 // Push or append the result of the call
8657 if (callRetTyp == TYP_VOID)
8659 if (opcode == CEE_NEWOBJ)
8661 // we actually did push something, so don't spill the thing we just pushed.
8662 assert(verCurrentState.esStackDepth > 0);
8663 impAppendTree(call, verCurrentState.esStackDepth - 1, impCurStmtOffs);
8667 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8672 impSpillSpecialSideEff();
8674 if (clsFlags & CORINFO_FLG_ARRAY)
8676 eeGetCallSiteSig(pResolvedToken->token, pResolvedToken->tokenScope, pResolvedToken->tokenContext, sig);
8679 // Find the return type used for verification by interpreting the method signature.
8680 // NB: we are clobbering the already established sig.
8681 if (tiVerificationNeeded)
8683 // Actually, we never get the sig for the original method.
8684 sig = &(callInfo->verSig);
8687 typeInfo tiRetVal = verMakeTypeInfo(sig->retType, sig->retTypeClass);
8688 tiRetVal.NormaliseForStack();
8690 // The CEE_READONLY prefix modifies the verification semantics of an Address
8691 // operation on an array type.
8692 if ((clsFlags & CORINFO_FLG_ARRAY) && readonlyCall && tiRetVal.IsByRef())
8694 tiRetVal.SetIsReadonlyByRef();
8697 if (tiVerificationNeeded)
8699 // We assume all calls return permanent home byrefs. If they
8700 // didn't they wouldn't be verifiable. This is also covering
8701 // the Address() helper for multidimensional arrays.
8702 if (tiRetVal.IsByRef())
8704 tiRetVal.SetIsPermanentHomeByRef();
8710 // Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
8712 bool fatPointerCandidate = call->AsCall()->IsFatPointerCandidate();
8713 if (varTypeIsStruct(callRetTyp))
8715 call = impFixupCallStructReturn(call->AsCall(), sig->retTypeClass);
8718 if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
8720 assert(opts.OptEnabled(CLFLG_INLINING));
8721 assert(!fatPointerCandidate); // We should not try to inline calli.
8723 // Make the call its own tree (spill the stack if needed).
8724 impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
8726 // TODO: Still using the widened type.
8727 call = gtNewInlineCandidateReturnExpr(call, genActualType(callRetTyp));
8731 if (fatPointerCandidate)
8733 // fatPointer candidates should be in statements of the form call() or var = call().
8734 // Such form allows to find statements with fat calls without walking through whole trees
8735 // and removes problems with cutting trees.
8736 assert(!bIntrinsicImported);
8737 assert(IsTargetAbi(CORINFO_CORERT_ABI));
8738 if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
8740 unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli"));
8741 LclVarDsc* varDsc = &lvaTable[calliSlot];
8742 varDsc->lvVerTypeInfo = tiRetVal;
8743 impAssignTempGen(calliSlot, call, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_NONE);
8744 // impAssignTempGen can change src arg list and return type for call that returns struct.
8745 var_types type = genActualType(lvaTable[calliSlot].TypeGet());
8746 call = gtNewLclvNode(calliSlot, type);
8750 // For non-candidates we must also spill, since we
8751 // might have locals live on the eval stack that this
8754 // Suppress this for certain well-known call targets
8755 // that we know won't modify locals, eg calls that are
8756 // recognized in gtCanOptimizeTypeEquality. Otherwise
8757 // we may break key fragile pattern matches later on.
8758 bool spillStack = true;
8761 GenTreeCall* callNode = call->AsCall();
8762 if ((callNode->gtCallType == CT_HELPER) && (gtIsTypeHandleToRuntimeTypeHelper(callNode) ||
8763 gtIsTypeHandleToRuntimeTypeHandleHelper(callNode)))
8767 else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
8775 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
8780 if (!bIntrinsicImported)
8782 //-------------------------------------------------------------------------
8784 /* If the call is of a small type and the callee is managed, the callee will normalize the result
8786 However, we need to normalize small type values returned by unmanaged
8787 functions (pinvoke). The pinvoke stub does the normalization, but we need to do it here
8788 if we use the shorter inlined pinvoke stub. */
8790 if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
8792 call = gtNewCastNode(genActualType(callRetTyp), call, false, callRetTyp);
8796 impPushOnStack(call, tiRetVal);
8799 // VSD functions get a new call target each time we getCallInfo, so clear the cache.
8800 // Also, the call info cache for CALLI instructions is largely incomplete, so clear it out.
8801 // if ( (opcode == CEE_CALLI) || (callInfoCache.fetchCallInfo().kind == CORINFO_VIRTUALCALL_STUB))
8802 // callInfoCache.uncacheCallInfo();
8807 #pragma warning(pop)
8810 bool Compiler::impMethodInfo_hasRetBuffArg(CORINFO_METHOD_INFO* methInfo)
8812 CorInfoType corType = methInfo->args.retType;
8814 if ((corType == CORINFO_TYPE_VALUECLASS) || (corType == CORINFO_TYPE_REFANY))
8816 // We have some kind of STRUCT being returned
8818 structPassingKind howToReturnStruct = SPK_Unknown;
8820 var_types returnType = getReturnTypeForStruct(methInfo->args.retTypeClass, &howToReturnStruct);
8822 if (howToReturnStruct == SPK_ByReference)
8833 var_types Compiler::impImportJitTestLabelMark(int numArgs)
8835 TestLabelAndNum tlAndN;
8839 StackEntry se = impPopStack();
8840 assert(se.seTypeInfo.GetType() == TI_INT);
8841 GenTree* val = se.val;
8842 assert(val->IsCnsIntOrI());
8843 tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8845 else if (numArgs == 3)
8847 StackEntry se = impPopStack();
8848 assert(se.seTypeInfo.GetType() == TI_INT);
8849 GenTree* val = se.val;
8850 assert(val->IsCnsIntOrI());
8851 tlAndN.m_num = val->AsIntConCommon()->IconValue();
8853 assert(se.seTypeInfo.GetType() == TI_INT);
8855 assert(val->IsCnsIntOrI());
8856 tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
8863 StackEntry expSe = impPopStack();
8864 GenTree* node = expSe.val;
8866 // There are a small number of special cases, where we actually put the annotation on a subnode.
8867 if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
8869 // A loop hoist annotation with value >= 100 means that the expression should be a static field access,
8870 // a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
8871 // offset within the the static field block whose address is returned by the helper call.
8872 // The annotation is saying that this address calculation, but not the entire access, should be hoisted.
8873 GenTree* helperCall = nullptr;
8874 assert(node->OperGet() == GT_IND);
8875 tlAndN.m_num -= 100;
8876 GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
8877 GetNodeTestData()->Remove(node);
8881 GetNodeTestData()->Set(node, tlAndN);
8884 impPushOnStack(node, expSe.seTypeInfo);
8885 return node->TypeGet();
8889 //-----------------------------------------------------------------------------------
8890 // impFixupCallStructReturn: For a call node that returns a struct type either
8891 // adjust the return type to an enregisterable type, or set the flag to indicate
8892 // struct return via retbuf arg.
8895 // call - GT_CALL GenTree node
8896 // retClsHnd - Class handle of return type of the call
8899 // Returns new GenTree node after fixing struct return of call node
8901 GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd)
8903 if (!varTypeIsStruct(call))
8908 call->gtRetClsHnd = retClsHnd;
8910 #if FEATURE_MULTIREG_RET
8911 // Initialize Return type descriptor of call node
8912 ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
8913 retTypeDesc->InitializeStructReturnType(this, retClsHnd);
8914 #endif // FEATURE_MULTIREG_RET
8916 #ifdef UNIX_AMD64_ABI
8918 // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
8919 assert(!call->IsVarargs() && "varargs not allowed for System V OSs.");
8921 // The return type will remain as the incoming struct type unless normalized to a
8922 // single eightbyte return type below.
8923 call->gtReturnType = call->gtType;
8925 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
8926 if (retRegCount != 0)
8928 if (retRegCount == 1)
8930 // See if the struct size is smaller than the return
8932 if (retTypeDesc->IsEnclosingType())
8934 // If we know for sure this call will remain a call,
8935 // retype and return value via a suitable temp.
8936 if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8938 call->gtReturnType = retTypeDesc->GetReturnRegType(0);
8939 return impAssignSmallStructTypeToVar(call, retClsHnd);
8944 // Return type is same size as struct, so we can
8945 // simply retype the call.
8946 call->gtReturnType = retTypeDesc->GetReturnRegType(0);
8951 // must be a struct returned in two registers
8952 assert(retRegCount == 2);
8954 if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8956 // Force a call returning multi-reg struct to be always of the IR form
8959 // No need to assign a multi-reg struct to a local var if:
8960 // - It is a tail call or
8961 // - The call is marked for in-lining later
8962 return impAssignMultiRegTypeToVar(call, retClsHnd);
8968 // struct not returned in registers i.e returned via hiddden retbuf arg.
8969 call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8972 #else // not UNIX_AMD64_ABI
8974 // Check for TYP_STRUCT type that wraps a primitive type
8975 // Such structs are returned using a single register
8976 // and we change the return type on those calls here.
8978 structPassingKind howToReturnStruct;
8979 var_types returnType = getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
8981 if (howToReturnStruct == SPK_ByReference)
8983 assert(returnType == TYP_UNKNOWN);
8984 call->gtCallMoreFlags |= GTF_CALL_M_RETBUFFARG;
8988 assert(returnType != TYP_UNKNOWN);
8990 // See if the struct size is smaller than the return
8992 if (howToReturnStruct == SPK_EnclosingType)
8994 // If we know for sure this call will remain a call,
8995 // retype and return value via a suitable temp.
8996 if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
8998 call->gtReturnType = returnType;
8999 return impAssignSmallStructTypeToVar(call, retClsHnd);
9004 // Return type is same size as struct, so we can
9005 // simply retype the call.
9006 call->gtReturnType = returnType;
9009 // ToDo: Refactor this common code sequence into its own method as it is used 4+ times
9010 if ((returnType == TYP_LONG) && (compLongUsed == false))
9012 compLongUsed = true;
9014 else if (((returnType == TYP_FLOAT) || (returnType == TYP_DOUBLE)) && (compFloatingPointUsed == false))
9016 compFloatingPointUsed = true;
9019 #if FEATURE_MULTIREG_RET
9020 unsigned retRegCount = retTypeDesc->GetReturnRegCount();
9021 assert(retRegCount != 0);
9023 if (retRegCount >= 2)
9025 if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
9027 // Force a call returning multi-reg struct to be always of the IR form
9030 // No need to assign a multi-reg struct to a local var if:
9031 // - It is a tail call or
9032 // - The call is marked for in-lining later
9033 return impAssignMultiRegTypeToVar(call, retClsHnd);
9036 #endif // FEATURE_MULTIREG_RET
9039 #endif // not UNIX_AMD64_ABI
9044 /*****************************************************************************
9045 For struct return values, re-type the operand in the case where the ABI
9046 does not use a struct return buffer
9047 Note that this method is only call for !_TARGET_X86_
9050 GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd)
9052 assert(varTypeIsStruct(info.compRetType));
9053 assert(info.compRetBuffArg == BAD_VAR_NUM);
9055 JITDUMP("\nimpFixupStructReturnType: retyping\n");
9058 #if defined(_TARGET_XARCH_)
9060 #ifdef UNIX_AMD64_ABI
9061 // No VarArgs for CoreCLR on x64 Unix
9062 assert(!info.compIsVarArgs);
9064 // Is method returning a multi-reg struct?
9065 if (varTypeIsStruct(info.compRetNativeType) && IsMultiRegReturnedType(retClsHnd))
9067 // In case of multi-reg struct return, we force IR to be one of the following:
9068 // GT_RETURN(lclvar) or GT_RETURN(call). If op is anything other than a
9069 // lclvar or call, it is assigned to a temp to create: temp = op and GT_RETURN(tmp).
9071 if (op->gtOper == GT_LCL_VAR)
9073 // Make sure that this struct stays in memory and doesn't get promoted.
9074 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
9075 lvaTable[lclNum].lvIsMultiRegRet = true;
9077 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
9078 op->gtFlags |= GTF_DONT_CSE;
9083 if (op->gtOper == GT_CALL)
9088 return impAssignMultiRegTypeToVar(op, retClsHnd);
9090 #else // !UNIX_AMD64_ABI
9091 assert(info.compRetNativeType != TYP_STRUCT);
9092 #endif // !UNIX_AMD64_ABI
9094 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM_)
9096 if (varTypeIsStruct(info.compRetNativeType) && !info.compIsVarArgs && IsHfa(retClsHnd))
9098 if (op->gtOper == GT_LCL_VAR)
9100 // This LCL_VAR is an HFA return value, it stays as a TYP_STRUCT
9101 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
9102 // Make sure this struct type stays as struct so that we can return it as an HFA
9103 lvaTable[lclNum].lvIsMultiRegRet = true;
9105 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
9106 op->gtFlags |= GTF_DONT_CSE;
9111 if (op->gtOper == GT_CALL)
9113 if (op->gtCall.IsVarargs())
9115 // We cannot tail call because control needs to return to fixup the calling
9116 // convention for result return.
9117 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
9118 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
9125 return impAssignMultiRegTypeToVar(op, retClsHnd);
9128 #elif FEATURE_MULTIREG_RET && defined(_TARGET_ARM64_)
9130 // Is method returning a multi-reg struct?
9131 if (IsMultiRegReturnedType(retClsHnd))
9133 if (op->gtOper == GT_LCL_VAR)
9135 // This LCL_VAR stays as a TYP_STRUCT
9136 unsigned lclNum = op->gtLclVarCommon.gtLclNum;
9138 // Make sure this struct type is not struct promoted
9139 lvaTable[lclNum].lvIsMultiRegRet = true;
9141 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
9142 op->gtFlags |= GTF_DONT_CSE;
9147 if (op->gtOper == GT_CALL)
9149 if (op->gtCall.IsVarargs())
9151 // We cannot tail call because control needs to return to fixup the calling
9152 // convention for result return.
9153 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_TAILCALL;
9154 op->gtCall.gtCallMoreFlags &= ~GTF_CALL_M_EXPLICIT_TAILCALL;
9161 return impAssignMultiRegTypeToVar(op, retClsHnd);
9164 #endif // FEATURE_MULTIREG_RET && FEATURE_HFA
9167 // adjust the type away from struct to integral
9168 // and no normalizing
9169 if (op->gtOper == GT_LCL_VAR)
9171 // It is possible that we now have a lclVar of scalar type.
9172 // If so, don't transform it to GT_LCL_FLD.
9173 if (varTypeIsStruct(lvaTable[op->AsLclVar()->gtLclNum].lvType))
9175 op->ChangeOper(GT_LCL_FLD);
9178 else if (op->gtOper == GT_OBJ)
9180 GenTree* op1 = op->AsObj()->Addr();
9182 // We will fold away OBJ/ADDR
9183 // except for OBJ/ADDR/INDEX
9184 // as the array type influences the array element's offset
9185 // Later in this method we change op->gtType to info.compRetNativeType
9186 // This is not correct when op is a GT_INDEX as the starting offset
9187 // for the array elements 'elemOffs' is different for an array of
9188 // TYP_REF than an array of TYP_STRUCT (which simply wraps a TYP_REF)
9189 // Also refer to the GTF_INX_REFARR_LAYOUT flag
9191 if ((op1->gtOper == GT_ADDR) && (op1->gtOp.gtOp1->gtOper != GT_INDEX))
9193 // Change '*(&X)' to 'X' and see if we can do better
9194 op = op1->gtOp.gtOp1;
9195 goto REDO_RETURN_NODE;
9197 op->gtObj.gtClass = NO_CLASS_HANDLE;
9198 op->ChangeOperUnchecked(GT_IND);
9199 op->gtFlags |= GTF_IND_TGTANYWHERE;
9201 else if (op->gtOper == GT_CALL)
9203 if (op->AsCall()->TreatAsHasRetBufArg(this))
9205 // This must be one of those 'special' helpers that don't
9206 // really have a return buffer, but instead use it as a way
9207 // to keep the trees cleaner with fewer address-taken temps.
9209 // Well now we have to materialize the the return buffer as
9210 // an address-taken temp. Then we can return the temp.
9212 // NOTE: this code assumes that since the call directly
9213 // feeds the return, then the call must be returning the
9214 // same structure/class/type.
9216 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("pseudo return buffer"));
9218 // No need to spill anything as we're about to return.
9219 impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
9221 // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
9222 // jump directly to a GT_LCL_FLD.
9223 op = gtNewLclvNode(tmpNum, info.compRetNativeType);
9224 op->ChangeOper(GT_LCL_FLD);
9228 // Don't change the gtType of the call just yet, it will get changed later.
9232 #if defined(FEATURE_HW_INTRINSICS) && defined(_TARGET_ARM64_)
9233 else if ((op->gtOper == GT_HWIntrinsic) && varTypeIsSIMD(op->gtType))
9235 // TODO-ARM64-FIXME Implement ARM64 ABI for Short Vectors properly
9236 // assert(op->gtType == info.compRetNativeType)
9237 if (op->gtType != info.compRetNativeType)
9239 // Insert a register move to keep target type of SIMD intrinsic intact
9240 op = gtNewScalarHWIntrinsicNode(info.compRetNativeType, op, NI_ARM64_NONE_MOV);
9244 else if (op->gtOper == GT_COMMA)
9246 op->gtOp.gtOp2 = impFixupStructReturnType(op->gtOp.gtOp2, retClsHnd);
9249 op->gtType = info.compRetNativeType;
9251 JITDUMP("\nimpFixupStructReturnType: result of retyping is\n");
9257 /*****************************************************************************
9258 CEE_LEAVE may be jumping out of a protected block, viz, a catch or a
9259 finally-protected try. We find the finally blocks protecting the current
9260 offset (in order) by walking over the complete exception table and
9261 finding enclosing clauses. This assumes that the table is sorted.
9262 This will create a series of BBJ_CALLFINALLY -> BBJ_CALLFINALLY ... -> BBJ_ALWAYS.
9264 If we are leaving a catch handler, we need to attach the
9265 CPX_ENDCATCHes to the correct BBJ_CALLFINALLY blocks.
9267 After this function, the BBJ_LEAVE block has been converted to a different type.
9270 #if !FEATURE_EH_FUNCLETS
9272 void Compiler::impImportLeave(BasicBlock* block)
9277 printf("\nBefore import CEE_LEAVE:\n");
9278 fgDispBasicBlocks();
9283 bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
9284 unsigned blkAddr = block->bbCodeOffs;
9285 BasicBlock* leaveTarget = block->bbJumpDest;
9286 unsigned jmpAddr = leaveTarget->bbCodeOffs;
9288 // LEAVE clears the stack, spill side effects, and set stack to 0
9290 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
9291 verCurrentState.esStackDepth = 0;
9293 assert(block->bbJumpKind == BBJ_LEAVE);
9294 assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary
9296 BasicBlock* step = DUMMY_INIT(NULL);
9297 unsigned encFinallies = 0; // Number of enclosing finallies.
9298 GenTree* endCatches = NULL;
9299 GenTree* endLFin = NULL; // The statement tree to indicate the end of locally-invoked finally.
9304 for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
9306 // Grab the handler offsets
9308 IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
9309 IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
9310 IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
9311 IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
9313 /* Is this a catch-handler we are CEE_LEAVEing out of?
9314 * If so, we need to call CORINFO_HELP_ENDCATCH.
9317 if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
9319 // Can't CEE_LEAVE out of a finally/fault handler
9320 if (HBtab->HasFinallyOrFaultHandler())
9321 BADCODE("leave out of fault/finally block");
9323 // Create the call to CORINFO_HELP_ENDCATCH
9324 GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
9326 // Make a list of all the currently pending endCatches
9328 endCatches = gtNewOperNode(GT_COMMA, TYP_VOID, endCatches, endCatch);
9330 endCatches = endCatch;
9335 printf("impImportLeave - " FMT_BB " jumping out of catch handler EH#%u, adding call to "
9336 "CORINFO_HELP_ENDCATCH\n",
9337 block->bbNum, XTnum);
9341 else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9342 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9344 /* This is a finally-protected try we are jumping out of */
9346 /* If there are any pending endCatches, and we have already
9347 jumped out of a finally-protected try, then the endCatches
9348 have to be put in a block in an outer try for async
9349 exceptions to work correctly.
9350 Else, just use append to the original block */
9352 BasicBlock* callBlock;
9354 assert(!encFinallies == !endLFin); // if we have finallies, we better have an endLFin tree, and vice-versa
9356 if (encFinallies == 0)
9358 assert(step == DUMMY_INIT(NULL));
9360 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
9363 impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9368 printf("impImportLeave - jumping out of a finally-protected try, convert block to BBJ_CALLFINALLY "
9370 callBlock->dspToString());
9376 assert(step != DUMMY_INIT(NULL));
9378 /* Calling the finally block */
9379 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step);
9380 assert(step->bbJumpKind == BBJ_ALWAYS);
9381 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
9382 // finally in the chain)
9383 step->bbJumpDest->bbRefs++;
9385 /* The new block will inherit this block's weight */
9386 callBlock->setBBWeight(block->bbWeight);
9387 callBlock->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
9392 printf("impImportLeave - jumping out of a finally-protected try, new BBJ_CALLFINALLY block %s\n",
9393 callBlock->dspToString());
9401 lastStmt = gtNewStmt(endCatches);
9402 endLFin->gtNext = lastStmt;
9403 lastStmt->gtPrev = endLFin;
9410 // note that this sets BBF_IMPORTED on the block
9411 impEndTreeList(callBlock, endLFin, lastStmt);
9414 step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
9415 /* The new block will inherit this block's weight */
9416 step->setBBWeight(block->bbWeight);
9417 step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
9422 printf("impImportLeave - jumping out of a finally-protected try, created step (BBJ_ALWAYS) block %s\n",
9423 step->dspToString());
9427 unsigned finallyNesting = compHndBBtab[XTnum].ebdHandlerNestingLevel;
9428 assert(finallyNesting <= compHndBBtabCount);
9430 callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
9431 endLFin = new (this, GT_END_LFIN) GenTreeVal(GT_END_LFIN, TYP_VOID, finallyNesting);
9432 endLFin = gtNewStmt(endLFin);
9437 invalidatePreds = true;
9441 /* Append any remaining endCatches, if any */
9443 assert(!encFinallies == !endLFin);
9445 if (encFinallies == 0)
9447 assert(step == DUMMY_INIT(NULL));
9448 block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
9451 impAppendTree(endCatches, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
9456 printf("impImportLeave - no enclosing finally-protected try blocks; convert CEE_LEAVE block to BBJ_ALWAYS "
9458 block->dspToString());
9464 // If leaveTarget is the start of another try block, we want to make sure that
9465 // we do not insert finalStep into that try block. Hence, we find the enclosing
9467 unsigned tryIndex = bbFindInnermostCommonTryRegion(step, leaveTarget);
9469 // Insert a new BB either in the try region indicated by tryIndex or
9470 // the handler region indicated by leaveTarget->bbHndIndex,
9471 // depending on which is the inner region.
9472 BasicBlock* finalStep = fgNewBBinRegion(BBJ_ALWAYS, tryIndex, leaveTarget->bbHndIndex, step);
9473 finalStep->bbFlags |= BBF_KEEP_BBJ_ALWAYS;
9474 step->bbJumpDest = finalStep;
9476 /* The new block will inherit this block's weight */
9477 finalStep->setBBWeight(block->bbWeight);
9478 finalStep->bbFlags |= block->bbFlags & BBF_RUN_RARELY;
9483 printf("impImportLeave - finalStep block required (encFinallies(%d) > 0), new block %s\n", encFinallies,
9484 finalStep->dspToString());
9492 lastStmt = gtNewStmt(endCatches);
9493 endLFin->gtNext = lastStmt;
9494 lastStmt->gtPrev = endLFin;
9501 impEndTreeList(finalStep, endLFin, lastStmt);
9503 finalStep->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9505 // Queue up the jump target for importing
9507 impImportBlockPending(leaveTarget);
9509 invalidatePreds = true;
9512 if (invalidatePreds && fgComputePredsDone)
9514 JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9519 fgVerifyHandlerTab();
9523 printf("\nAfter import CEE_LEAVE:\n");
9524 fgDispBasicBlocks();
9530 #else // FEATURE_EH_FUNCLETS
9532 void Compiler::impImportLeave(BasicBlock* block)
9537 printf("\nBefore import CEE_LEAVE in " FMT_BB " (targetting " FMT_BB "):\n", block->bbNum,
9538 block->bbJumpDest->bbNum);
9539 fgDispBasicBlocks();
9544 bool invalidatePreds = false; // If we create new blocks, invalidate the predecessor lists (if created)
9545 unsigned blkAddr = block->bbCodeOffs;
9546 BasicBlock* leaveTarget = block->bbJumpDest;
9547 unsigned jmpAddr = leaveTarget->bbCodeOffs;
9549 // LEAVE clears the stack, spill side effects, and set stack to 0
9551 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportLeave"));
9552 verCurrentState.esStackDepth = 0;
9554 assert(block->bbJumpKind == BBJ_LEAVE);
9555 assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary
9557 BasicBlock* step = nullptr;
9561 // No step type; step == NULL.
9564 // Is the step block the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair?
9565 // That is, is step->bbJumpDest where a finally will return to?
9568 // The step block is a catch return.
9571 // The step block is in a "try", created as the target for a finally return or the target for a catch return.
9574 StepType stepType = ST_None;
9579 for (XTnum = 0, HBtab = compHndBBtab; XTnum < compHndBBtabCount; XTnum++, HBtab++)
9581 // Grab the handler offsets
9583 IL_OFFSET tryBeg = HBtab->ebdTryBegOffs();
9584 IL_OFFSET tryEnd = HBtab->ebdTryEndOffs();
9585 IL_OFFSET hndBeg = HBtab->ebdHndBegOffs();
9586 IL_OFFSET hndEnd = HBtab->ebdHndEndOffs();
9588 /* Is this a catch-handler we are CEE_LEAVEing out of?
9591 if (jitIsBetween(blkAddr, hndBeg, hndEnd) && !jitIsBetween(jmpAddr, hndBeg, hndEnd))
9593 // Can't CEE_LEAVE out of a finally/fault handler
9594 if (HBtab->HasFinallyOrFaultHandler())
9596 BADCODE("leave out of fault/finally block");
9599 /* We are jumping out of a catch */
9601 if (step == nullptr)
9604 step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET
9605 stepType = ST_Catch;
9610 printf("impImportLeave - jumping out of a catch (EH#%u), convert block " FMT_BB
9611 " to BBJ_EHCATCHRET "
9613 XTnum, step->bbNum);
9619 BasicBlock* exitBlock;
9621 /* Create a new catch exit block in the catch region for the existing step block to jump to in this
9623 exitBlock = fgNewBBinRegion(BBJ_EHCATCHRET, 0, XTnum + 1, step);
9625 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9626 step->bbJumpDest = exitBlock; // the previous step (maybe a call to a nested finally, or a nested catch
9627 // exit) returns to this block
9628 step->bbJumpDest->bbRefs++;
9630 #if defined(_TARGET_ARM_)
9631 if (stepType == ST_FinallyReturn)
9633 assert(step->bbJumpKind == BBJ_ALWAYS);
9634 // Mark the target of a finally return
9635 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9637 #endif // defined(_TARGET_ARM_)
9639 /* The new block will inherit this block's weight */
9640 exitBlock->setBBWeight(block->bbWeight);
9641 exitBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9643 /* This exit block is the new step */
9645 stepType = ST_Catch;
9647 invalidatePreds = true;
9652 printf("impImportLeave - jumping out of a catch (EH#%u), new BBJ_EHCATCHRET block " FMT_BB "\n",
9653 XTnum, exitBlock->bbNum);
9658 else if (HBtab->HasFinallyHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9659 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9661 /* We are jumping out of a finally-protected try */
9663 BasicBlock* callBlock;
9665 if (step == nullptr)
9667 #if FEATURE_EH_CALLFINALLY_THUNKS
9669 // Put the call to the finally in the enclosing region.
9670 unsigned callFinallyTryIndex =
9671 (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9672 unsigned callFinallyHndIndex =
9673 (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9674 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, block);
9676 // Convert the BBJ_LEAVE to BBJ_ALWAYS, jumping to the new BBJ_CALLFINALLY. This is because
9677 // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE,
9678 // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the
9679 // next block, and flow optimizations will remove it.
9680 block->bbJumpKind = BBJ_ALWAYS;
9681 block->bbJumpDest = callBlock;
9682 block->bbJumpDest->bbRefs++;
9684 /* The new block will inherit this block's weight */
9685 callBlock->setBBWeight(block->bbWeight);
9686 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9691 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB
9693 "BBJ_ALWAYS, add BBJ_CALLFINALLY block " FMT_BB "\n",
9694 XTnum, block->bbNum, callBlock->bbNum);
9698 #else // !FEATURE_EH_CALLFINALLY_THUNKS
9701 callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY
9706 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), convert block " FMT_BB
9708 "BBJ_CALLFINALLY block\n",
9709 XTnum, callBlock->bbNum);
9713 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9717 // Calling the finally block. We already have a step block that is either the call-to-finally from a
9718 // more nested try/finally (thus we are jumping out of multiple nested 'try' blocks, each protected by
9719 // a 'finally'), or the step block is the return from a catch.
9721 // Due to ThreadAbortException, we can't have the catch return target the call-to-finally block
9722 // directly. Note that if a 'catch' ends without resetting the ThreadAbortException, the VM will
9723 // automatically re-raise the exception, using the return address of the catch (that is, the target
9724 // block of the BBJ_EHCATCHRET) as the re-raise address. If this address is in a finally, the VM will
9725 // refuse to do the re-raise, and the ThreadAbortException will get eaten (and lost). On AMD64/ARM64,
9726 // we put the call-to-finally thunk in a special "cloned finally" EH region that does look like a
9727 // finally clause to the VM. Thus, on these platforms, we can't have BBJ_EHCATCHRET target a
9728 // BBJ_CALLFINALLY directly. (Note that on ARM32, we don't mark the thunk specially -- it lives directly
9729 // within the 'try' region protected by the finally, since we generate code in such a way that execution
9730 // never returns to the call-to-finally call, and the finally-protected 'try' region doesn't appear on
9733 assert(step->bbJumpKind == BBJ_ALWAYS || step->bbJumpKind == BBJ_EHCATCHRET);
9735 #if FEATURE_EH_CALLFINALLY_THUNKS
9736 if (step->bbJumpKind == BBJ_EHCATCHRET)
9738 // Need to create another step block in the 'try' region that will actually branch to the
9739 // call-to-finally thunk.
9740 BasicBlock* step2 = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9741 step->bbJumpDest = step2;
9742 step->bbJumpDest->bbRefs++;
9743 step2->setBBWeight(block->bbWeight);
9744 step2->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9749 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), step block is "
9750 "BBJ_EHCATCHRET (" FMT_BB "), new BBJ_ALWAYS step-step block " FMT_BB "\n",
9751 XTnum, step->bbNum, step2->bbNum);
9756 assert(stepType == ST_Catch); // Leave it as catch type for now.
9758 #endif // FEATURE_EH_CALLFINALLY_THUNKS
9760 #if FEATURE_EH_CALLFINALLY_THUNKS
9761 unsigned callFinallyTryIndex =
9762 (HBtab->ebdEnclosingTryIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingTryIndex + 1;
9763 unsigned callFinallyHndIndex =
9764 (HBtab->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX) ? 0 : HBtab->ebdEnclosingHndIndex + 1;
9765 #else // !FEATURE_EH_CALLFINALLY_THUNKS
9766 unsigned callFinallyTryIndex = XTnum + 1;
9767 unsigned callFinallyHndIndex = 0; // don't care
9768 #endif // !FEATURE_EH_CALLFINALLY_THUNKS
9770 callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, callFinallyTryIndex, callFinallyHndIndex, step);
9771 step->bbJumpDest = callBlock; // the previous call to a finally returns to this call (to the next
9772 // finally in the chain)
9773 step->bbJumpDest->bbRefs++;
9775 #if defined(_TARGET_ARM_)
9776 if (stepType == ST_FinallyReturn)
9778 assert(step->bbJumpKind == BBJ_ALWAYS);
9779 // Mark the target of a finally return
9780 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9782 #endif // defined(_TARGET_ARM_)
9784 /* The new block will inherit this block's weight */
9785 callBlock->setBBWeight(block->bbWeight);
9786 callBlock->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9791 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), new BBJ_CALLFINALLY "
9792 "block " FMT_BB "\n",
9793 XTnum, callBlock->bbNum);
9798 step = fgNewBBafter(BBJ_ALWAYS, callBlock, true);
9799 stepType = ST_FinallyReturn;
9801 /* The new block will inherit this block's weight */
9802 step->setBBWeight(block->bbWeight);
9803 step->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED | BBF_KEEP_BBJ_ALWAYS;
9808 printf("impImportLeave - jumping out of a finally-protected try (EH#%u), created step (BBJ_ALWAYS) "
9809 "block " FMT_BB "\n",
9810 XTnum, step->bbNum);
9814 callBlock->bbJumpDest = HBtab->ebdHndBeg; // This callBlock will call the "finally" handler.
9816 invalidatePreds = true;
9818 else if (HBtab->HasCatchHandler() && jitIsBetween(blkAddr, tryBeg, tryEnd) &&
9819 !jitIsBetween(jmpAddr, tryBeg, tryEnd))
9821 // We are jumping out of a catch-protected try.
9823 // If we are returning from a call to a finally, then we must have a step block within a try
9824 // that is protected by a catch. This is so when unwinding from that finally (e.g., if code within the
9825 // finally raises an exception), the VM will find this step block, notice that it is in a protected region,
9826 // and invoke the appropriate catch.
9828 // We also need to handle a special case with the handling of ThreadAbortException. If a try/catch
9829 // catches a ThreadAbortException (which might be because it catches a parent, e.g. System.Exception),
9830 // and the catch doesn't call System.Threading.Thread::ResetAbort(), then when the catch returns to the VM,
9831 // the VM will automatically re-raise the ThreadAbortException. When it does this, it uses the target
9832 // address of the catch return as the new exception address. That is, the re-raised exception appears to
9833 // occur at the catch return address. If this exception return address skips an enclosing try/catch that
9834 // catches ThreadAbortException, then the enclosing try/catch will not catch the exception, as it should.
9839 // // something here raises ThreadAbortException
9840 // LEAVE LABEL_1; // no need to stop at LABEL_2
9841 // } catch (Exception) {
9842 // // This catches ThreadAbortException, but doesn't call System.Threading.Thread::ResetAbort(), so
9843 // // ThreadAbortException is re-raised by the VM at the address specified by the LEAVE opcode.
9844 // // This is bad, since it means the outer try/catch won't get a chance to catch the re-raised
9845 // // ThreadAbortException. So, instead, create step block LABEL_2 and LEAVE to that. We only
9846 // // need to do this transformation if the current EH block is a try/catch that catches
9847 // // ThreadAbortException (or one of its parents), however we might not be able to find that
9848 // // information, so currently we do it for all catch types.
9849 // LEAVE LABEL_1; // Convert this to LEAVE LABEL2;
9851 // LABEL_2: LEAVE LABEL_1; // inserted by this step creation code
9852 // } catch (ThreadAbortException) {
9856 // Note that this pattern isn't theoretical: it occurs in ASP.NET, in IL code generated by the Roslyn C#
9859 if ((stepType == ST_FinallyReturn) || (stepType == ST_Catch))
9861 BasicBlock* catchStep;
9865 if (stepType == ST_FinallyReturn)
9867 assert(step->bbJumpKind == BBJ_ALWAYS);
9871 assert(stepType == ST_Catch);
9872 assert(step->bbJumpKind == BBJ_EHCATCHRET);
9875 /* Create a new exit block in the try region for the existing step block to jump to in this scope */
9876 catchStep = fgNewBBinRegion(BBJ_ALWAYS, XTnum + 1, 0, step);
9877 step->bbJumpDest = catchStep;
9878 step->bbJumpDest->bbRefs++;
9880 #if defined(_TARGET_ARM_)
9881 if (stepType == ST_FinallyReturn)
9883 // Mark the target of a finally return
9884 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9886 #endif // defined(_TARGET_ARM_)
9888 /* The new block will inherit this block's weight */
9889 catchStep->setBBWeight(block->bbWeight);
9890 catchStep->bbFlags |= (block->bbFlags & BBF_RUN_RARELY) | BBF_IMPORTED;
9895 if (stepType == ST_FinallyReturn)
9897 printf("impImportLeave - return from finally jumping out of a catch-protected try (EH#%u), new "
9898 "BBJ_ALWAYS block " FMT_BB "\n",
9899 XTnum, catchStep->bbNum);
9903 assert(stepType == ST_Catch);
9904 printf("impImportLeave - return from catch jumping out of a catch-protected try (EH#%u), new "
9905 "BBJ_ALWAYS block " FMT_BB "\n",
9906 XTnum, catchStep->bbNum);
9911 /* This block is the new step */
9915 invalidatePreds = true;
9920 if (step == nullptr)
9922 block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS
9927 printf("impImportLeave - no enclosing finally-protected try blocks or catch handlers; convert CEE_LEAVE "
9928 "block " FMT_BB " to BBJ_ALWAYS\n",
9935 step->bbJumpDest = leaveTarget; // this is the ultimate destination of the LEAVE
9937 #if defined(_TARGET_ARM_)
9938 if (stepType == ST_FinallyReturn)
9940 assert(step->bbJumpKind == BBJ_ALWAYS);
9941 // Mark the target of a finally return
9942 step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET;
9944 #endif // defined(_TARGET_ARM_)
9949 printf("impImportLeave - final destination of step blocks set to " FMT_BB "\n", leaveTarget->bbNum);
9953 // Queue up the jump target for importing
9955 impImportBlockPending(leaveTarget);
9958 if (invalidatePreds && fgComputePredsDone)
9960 JITDUMP("\n**** impImportLeave - Removing preds after creating new blocks\n");
9965 fgVerifyHandlerTab();
9969 printf("\nAfter import CEE_LEAVE:\n");
9970 fgDispBasicBlocks();
9976 #endif // FEATURE_EH_FUNCLETS
9978 /*****************************************************************************/
9979 // This is called when reimporting a leave block. It resets the JumpKind,
9980 // JumpDest, and bbNext to the original values
9982 void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr)
9984 #if FEATURE_EH_FUNCLETS
9985 // With EH Funclets, while importing leave opcode we create another block ending with BBJ_ALWAYS (call it B1)
9986 // and the block containing leave (say B0) is marked as BBJ_CALLFINALLY. Say for some reason we reimport B0,
9987 // it is reset (in this routine) by marking as ending with BBJ_LEAVE and further down when B0 is reimported, we
9988 // create another BBJ_ALWAYS (call it B2). In this process B1 gets orphaned and any blocks to which B1 is the
9989 // only predecessor are also considered orphans and attempted to be deleted.
9996 // leave OUTSIDE; // B0 is the block containing this leave, following this would be B1
10001 // In the above nested try-finally example, we create a step block (call it Bstep) which in branches to a block
10002 // where a finally would branch to (and such block is marked as finally target). Block B1 branches to step block.
10003 // Because of re-import of B0, Bstep is also orphaned. Since Bstep is a finally target it cannot be removed. To
10004 // work around this we will duplicate B0 (call it B0Dup) before reseting. B0Dup is marked as BBJ_CALLFINALLY and
10005 // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1
10006 // will be treated as pair and handled correctly.
10007 if (block->bbJumpKind == BBJ_CALLFINALLY)
10009 BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind);
10010 dupBlock->bbFlags = block->bbFlags;
10011 dupBlock->bbJumpDest = block->bbJumpDest;
10012 dupBlock->copyEHRegion(block);
10013 dupBlock->bbCatchTyp = block->bbCatchTyp;
10015 // Mark this block as
10016 // a) not referenced by any other block to make sure that it gets deleted
10018 // c) prevent from being imported
10020 // e) as rarely run
10021 dupBlock->bbRefs = 0;
10022 dupBlock->bbWeight = 0;
10023 dupBlock->bbFlags |= BBF_IMPORTED | BBF_INTERNAL | BBF_RUN_RARELY;
10025 // Insert the block right after the block which is getting reset so that BBJ_CALLFINALLY and BBJ_ALWAYS
10026 // will be next to each other.
10027 fgInsertBBafter(block, dupBlock);
10032 printf("New Basic Block " FMT_BB " duplicate of " FMT_BB " created.\n", dupBlock->bbNum, block->bbNum);
10036 #endif // FEATURE_EH_FUNCLETS
10038 block->bbJumpKind = BBJ_LEAVE;
10040 block->bbJumpDest = fgLookupBB(jmpAddr);
10042 // We will leave the BBJ_ALWAYS block we introduced. When it's reimported
10043 // the BBJ_ALWAYS block will be unreachable, and will be removed after. The
10044 // reason we don't want to remove the block at this point is that if we call
10045 // fgInitBBLookup() again we will do it wrong as the BBJ_ALWAYS block won't be
10046 // added and the linked list length will be different than fgBBcount.
10049 /*****************************************************************************/
10050 // Get the first non-prefix opcode. Used for verification of valid combinations
10051 // of prefixes and actual opcodes.
10053 static OPCODE impGetNonPrefixOpcode(const BYTE* codeAddr, const BYTE* codeEndp)
10055 while (codeAddr < codeEndp)
10057 OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
10058 codeAddr += sizeof(__int8);
10060 if (opcode == CEE_PREFIX1)
10062 if (codeAddr >= codeEndp)
10066 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
10067 codeAddr += sizeof(__int8);
10072 case CEE_UNALIGNED:
10075 case CEE_CONSTRAINED:
10082 codeAddr += opcodeSizes[opcode];
10085 return CEE_ILLEGAL;
10088 /*****************************************************************************/
10089 // Checks whether the opcode is a valid opcode for volatile. and unaligned. prefixes
10091 static void impValidateMemoryAccessOpcode(const BYTE* codeAddr, const BYTE* codeEndp, bool volatilePrefix)
10093 OPCODE opcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
10096 // Opcode of all ldind and stdind happen to be in continuous, except stind.i.
10097 ((CEE_LDIND_I1 <= opcode) && (opcode <= CEE_STIND_R8)) || (opcode == CEE_STIND_I) ||
10098 (opcode == CEE_LDFLD) || (opcode == CEE_STFLD) || (opcode == CEE_LDOBJ) || (opcode == CEE_STOBJ) ||
10099 (opcode == CEE_INITBLK) || (opcode == CEE_CPBLK) ||
10100 // volatile. prefix is allowed with the ldsfld and stsfld
10101 (volatilePrefix && ((opcode == CEE_LDSFLD) || (opcode == CEE_STSFLD)))))
10103 BADCODE("Invalid opcode for unaligned. or volatile. prefix");
10107 /*****************************************************************************/
10111 #undef RETURN // undef contracts RETURN macro
10126 const static controlFlow_t controlFlow[] = {
10127 #define OPDEF(c, s, pop, push, args, type, l, s1, s2, flow) flow,
10128 #include "opcode.def"
10134 /*****************************************************************************
10135 * Determine the result type of an arithemetic operation
10136 * On 64-bit inserts upcasts when native int is mixed with int32
10138 var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2)
10140 var_types type = TYP_UNDEF;
10141 GenTree* op1 = *pOp1;
10142 GenTree* op2 = *pOp2;
10144 // Arithemetic operations are generally only allowed with
10145 // primitive types, but certain operations are allowed
10148 if ((oper == GT_SUB) && (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
10150 if ((genActualType(op1->TypeGet()) == TYP_BYREF) && (genActualType(op2->TypeGet()) == TYP_BYREF))
10152 // byref1-byref2 => gives a native int
10155 else if (genActualTypeIsIntOrI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_BYREF))
10157 // [native] int - byref => gives a native int
10160 // The reason is that it is possible, in managed C++,
10161 // to have a tree like this:
10168 // const(h) int addr byref
10170 // <BUGNUM> VSW 318822 </BUGNUM>
10172 // So here we decide to make the resulting type to be a native int.
10173 CLANG_FORMAT_COMMENT_ANCHOR;
10175 #ifdef _TARGET_64BIT_
10176 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
10178 // insert an explicit upcast
10179 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10181 #endif // _TARGET_64BIT_
10187 // byref - [native] int => gives a byref
10188 assert(genActualType(op1->TypeGet()) == TYP_BYREF && genActualTypeIsIntOrI(op2->TypeGet()));
10190 #ifdef _TARGET_64BIT_
10191 if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
10193 // insert an explicit upcast
10194 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10196 #endif // _TARGET_64BIT_
10201 else if ((oper == GT_ADD) &&
10202 (genActualType(op1->TypeGet()) == TYP_BYREF || genActualType(op2->TypeGet()) == TYP_BYREF))
10204 // byref + [native] int => gives a byref
10206 // [native] int + byref => gives a byref
10208 // only one can be a byref : byref op byref not allowed
10209 assert(genActualType(op1->TypeGet()) != TYP_BYREF || genActualType(op2->TypeGet()) != TYP_BYREF);
10210 assert(genActualTypeIsIntOrI(op1->TypeGet()) || genActualTypeIsIntOrI(op2->TypeGet()));
10212 #ifdef _TARGET_64BIT_
10213 if (genActualType(op2->TypeGet()) == TYP_BYREF)
10215 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
10217 // insert an explicit upcast
10218 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10221 else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
10223 // insert an explicit upcast
10224 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10226 #endif // _TARGET_64BIT_
10230 #ifdef _TARGET_64BIT_
10231 else if (genActualType(op1->TypeGet()) == TYP_I_IMPL || genActualType(op2->TypeGet()) == TYP_I_IMPL)
10233 assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
10235 // int + long => gives long
10236 // long + int => gives long
10237 // we get this because in the IL the long isn't Int64, it's just IntPtr
10239 if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
10241 // insert an explicit upcast
10242 op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10244 else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
10246 // insert an explicit upcast
10247 op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
10252 #else // 32-bit TARGET
10253 else if (genActualType(op1->TypeGet()) == TYP_LONG || genActualType(op2->TypeGet()) == TYP_LONG)
10255 assert(!varTypeIsFloating(op1->gtType) && !varTypeIsFloating(op2->gtType));
10257 // int + long => gives long
10258 // long + int => gives long
10262 #endif // _TARGET_64BIT_
10265 // int + int => gives an int
10266 assert(genActualType(op1->TypeGet()) != TYP_BYREF && genActualType(op2->TypeGet()) != TYP_BYREF);
10268 assert(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
10269 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
10271 type = genActualType(op1->gtType);
10273 // If both operands are TYP_FLOAT, then leave it as TYP_FLOAT.
10274 // Otherwise, turn floats into doubles
10275 if ((type == TYP_FLOAT) && (genActualType(op2->gtType) != TYP_FLOAT))
10277 assert(genActualType(op2->gtType) == TYP_DOUBLE);
10282 assert(type == TYP_BYREF || type == TYP_DOUBLE || type == TYP_FLOAT || type == TYP_LONG || type == TYP_INT);
10286 //------------------------------------------------------------------------
10287 // impOptimizeCastClassOrIsInst: attempt to resolve a cast when jitting
10290 // op1 - value to cast
10291 // pResolvedToken - resolved token for type to cast to
10292 // isCastClass - true if this is a castclass, false if isinst
10295 // tree representing optimized cast, or null if no optimization possible
10297 GenTree* Compiler::impOptimizeCastClassOrIsInst(GenTree* op1, CORINFO_RESOLVED_TOKEN* pResolvedToken, bool isCastClass)
10299 assert(op1->TypeGet() == TYP_REF);
10301 // Don't optimize for minopts or debug codegen.
10302 if (opts.compDbgCode || opts.MinOpts())
10307 // See what we know about the type of the object being cast.
10308 bool isExact = false;
10309 bool isNonNull = false;
10310 CORINFO_CLASS_HANDLE fromClass = gtGetClassHandle(op1, &isExact, &isNonNull);
10311 GenTree* optResult = nullptr;
10313 if (fromClass != nullptr)
10315 CORINFO_CLASS_HANDLE toClass = pResolvedToken->hClass;
10316 JITDUMP("\nConsidering optimization of %s from %s%p (%s) to %p (%s)\n", isCastClass ? "castclass" : "isinst",
10317 isExact ? "exact " : "", dspPtr(fromClass), info.compCompHnd->getClassName(fromClass), dspPtr(toClass),
10318 info.compCompHnd->getClassName(toClass));
10320 // Perhaps we know if the cast will succeed or fail.
10321 TypeCompareState castResult = info.compCompHnd->compareTypesForCast(fromClass, toClass);
10323 if (castResult == TypeCompareState::Must)
10325 // Cast will succeed, result is simply op1.
10326 JITDUMP("Cast will succeed, optimizing to simply return input\n");
10329 else if (castResult == TypeCompareState::MustNot)
10331 // See if we can sharpen exactness by looking for final classes
10334 DWORD flags = info.compCompHnd->getClassAttribs(fromClass);
10335 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL |
10336 CORINFO_FLG_VARIANCE | CORINFO_FLG_ARRAY;
10337 isExact = ((flags & flagsMask) == CORINFO_FLG_FINAL);
10340 // Cast to exact type will fail. Handle case where we have
10341 // an exact type (that is, fromClass is not a subtype)
10342 // and we're not going to throw on failure.
10343 if (isExact && !isCastClass)
10345 JITDUMP("Cast will fail, optimizing to return null\n");
10346 GenTree* result = gtNewIconNode(0, TYP_REF);
10348 // If the cast was fed by a box, we can remove that too.
10349 if (op1->IsBoxedValue())
10351 JITDUMP("Also removing upstream box\n");
10352 gtTryRemoveBoxUpstreamEffects(op1);
10359 JITDUMP("Not optimizing failing castclass (yet)\n");
10363 JITDUMP("Can't optimize since fromClass is inexact\n");
10368 JITDUMP("Result of cast unknown, must generate runtime test\n");
10373 JITDUMP("\nCan't optimize since fromClass is unknown\n");
10379 //------------------------------------------------------------------------
10380 // impCastClassOrIsInstToTree: build and import castclass/isinst
10383 // op1 - value to cast
10384 // op2 - type handle for type to cast to
10385 // pResolvedToken - resolved token from the cast operation
10386 // isCastClass - true if this is castclass, false means isinst
10389 // Tree representing the cast
10392 // May expand into a series of runtime checks or a helper call.
10394 GenTree* Compiler::impCastClassOrIsInstToTree(GenTree* op1,
10396 CORINFO_RESOLVED_TOKEN* pResolvedToken,
10399 assert(op1->TypeGet() == TYP_REF);
10401 // Optimistically assume the jit should expand this as an inline test
10402 bool shouldExpandInline = true;
10404 // Profitability check.
10406 // Don't bother with inline expansion when jit is trying to
10407 // generate code quickly, or the cast is in code that won't run very
10408 // often, or the method already is pretty big.
10409 if (compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts())
10411 // not worth the code expansion if jitting fast or in a rarely run block
10412 shouldExpandInline = false;
10414 else if ((op1->gtFlags & GTF_GLOB_EFFECT) && lvaHaveManyLocals())
10416 // not worth creating an untracked local variable
10417 shouldExpandInline = false;
10420 // Pessimistically assume the jit cannot expand this as an inline test
10421 bool canExpandInline = false;
10422 const CorInfoHelpFunc helper = info.compCompHnd->getCastingHelper(pResolvedToken, isCastClass);
10426 // Not all classclass/isinst operations can be inline expanded.
10427 // Check legality only if an inline expansion is desirable.
10428 if (shouldExpandInline)
10432 // Jit can only inline expand the normal CHKCASTCLASS helper.
10433 canExpandInline = (helper == CORINFO_HELP_CHKCASTCLASS);
10437 if (helper == CORINFO_HELP_ISINSTANCEOFCLASS)
10439 // Check the class attributes.
10440 DWORD flags = info.compCompHnd->getClassAttribs(pResolvedToken->hClass);
10442 // If the class is final and is not marshal byref or
10443 // contextful, the jit can expand the IsInst check inline.
10444 DWORD flagsMask = CORINFO_FLG_FINAL | CORINFO_FLG_MARSHAL_BYREF | CORINFO_FLG_CONTEXTFUL;
10445 canExpandInline = ((flags & flagsMask) == CORINFO_FLG_FINAL);
10450 const bool expandInline = canExpandInline && shouldExpandInline;
10454 JITDUMP("\nExpanding %s as call because %s\n", isCastClass ? "castclass" : "isinst",
10455 canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
10457 // If we CSE this class handle we prevent assertionProp from making SubType assertions
10458 // so instead we force the CSE logic to not consider CSE-ing this class handle.
10460 op2->gtFlags |= GTF_DONT_CSE;
10462 return gtNewHelperCallNode(helper, TYP_REF, gtNewArgList(op2, op1));
10465 JITDUMP("\nExpanding %s inline\n", isCastClass ? "castclass" : "isinst");
10467 impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
10472 // expand the methodtable match:
10474 // condMT ==> GT_NE
10476 // GT_IND op2 (typically CNS_INT)
10481 // This can replace op1 with a GT_COMMA that evaluates op1 into a local
10483 op1 = impCloneExpr(op1, &temp, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL, nullptr DEBUGARG("CASTCLASS eval op1"));
10485 // op1 is now known to be a non-complex tree
10486 // thus we can use gtClone(op1) from now on
10489 GenTree* op2Var = op2;
10492 op2Var = fgInsertCommaFormTemp(&op2);
10493 lvaTable[op2Var->AsLclVarCommon()->GetLclNum()].lvIsCSE = true;
10495 temp = gtNewOperNode(GT_IND, TYP_I_IMPL, temp);
10496 temp->gtFlags |= GTF_EXCEPT;
10497 condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
10501 // expand the null check:
10503 // condNull ==> GT_EQ
10508 condNull = gtNewOperNode(GT_EQ, TYP_INT, gtClone(op1), gtNewIconNode(0, TYP_REF));
10511 // expand the true and false trees for the condMT
10513 GenTree* condFalse = gtClone(op1);
10518 // use the special helper that skips the cases checked by our inlined cast
10520 const CorInfoHelpFunc specialHelper = CORINFO_HELP_CHKCASTCLASS_SPECIAL;
10522 condTrue = gtNewHelperCallNode(specialHelper, TYP_REF, gtNewArgList(op2Var, gtClone(op1)));
10526 condTrue = gtNewIconNode(0, TYP_REF);
10529 #define USE_QMARK_TREES
10531 #ifdef USE_QMARK_TREES
10534 // Generate first QMARK - COLON tree
10536 // qmarkMT ==> GT_QMARK
10540 // condFalse condTrue
10542 temp = new (this, GT_COLON) GenTreeColon(TYP_REF, condTrue, condFalse);
10543 qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
10545 GenTree* qmarkNull;
10547 // Generate second QMARK - COLON tree
10549 // qmarkNull ==> GT_QMARK
10551 // condNull GT_COLON
10555 temp = new (this, GT_COLON) GenTreeColon(TYP_REF, gtClone(op1), qmarkMT);
10556 qmarkNull = gtNewQmarkNode(TYP_REF, condNull, temp);
10557 qmarkNull->gtFlags |= GTF_QMARK_CAST_INSTOF;
10559 // Make QMark node a top level node by spilling it.
10560 unsigned tmp = lvaGrabTemp(true DEBUGARG("spilling QMark2"));
10561 impAssignTempGen(tmp, qmarkNull, (unsigned)CHECK_SPILL_NONE);
10563 // TODO-CQ: Is it possible op1 has a better type?
10565 // See also gtGetHelperCallClassHandle where we make the same
10566 // determination for the helper call variants.
10567 LclVarDsc* lclDsc = lvaGetDesc(tmp);
10568 assert(lclDsc->lvSingleDef == 0);
10569 lclDsc->lvSingleDef = 1;
10570 JITDUMP("Marked V%02u as a single def temp\n", tmp);
10571 lvaSetClass(tmp, pResolvedToken->hClass);
10572 return gtNewLclvNode(tmp, TYP_REF);
10577 #define assertImp(cond) ((void)0)
10579 #define assertImp(cond) \
10584 const int cchAssertImpBuf = 600; \
10585 char* assertImpBuf = (char*)alloca(cchAssertImpBuf); \
10586 _snprintf_s(assertImpBuf, cchAssertImpBuf, cchAssertImpBuf - 1, \
10587 "%s : Possibly bad IL with CEE_%s at offset %04Xh (op1=%s op2=%s stkDepth=%d)", #cond, \
10588 impCurOpcName, impCurOpcOffs, op1 ? varTypeName(op1->TypeGet()) : "NULL", \
10589 op2 ? varTypeName(op2->TypeGet()) : "NULL", verCurrentState.esStackDepth); \
10590 assertAbort(assertImpBuf, __FILE__, __LINE__); \
10596 #pragma warning(push)
10597 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
10599 /*****************************************************************************
10600 * Import the instr for the given basic block
10602 void Compiler::impImportBlockCode(BasicBlock* block)
10604 #define _impResolveToken(kind) impResolveToken(codeAddr, &resolvedToken, kind)
10610 printf("\nImporting " FMT_BB " (PC=%03u) of '%s'", block->bbNum, block->bbCodeOffs, info.compFullName);
10614 unsigned nxtStmtIndex = impInitBlockLineInfo();
10615 IL_OFFSET nxtStmtOffs;
10617 GenTree* arrayNodeFrom;
10618 GenTree* arrayNodeTo;
10619 GenTree* arrayNodeToIndex;
10620 CorInfoHelpFunc helper;
10621 CorInfoIsAccessAllowedResult accessAllowedResult;
10622 CORINFO_HELPER_DESC calloutHelper;
10623 const BYTE* lastLoadToken = nullptr;
10625 // reject cyclic constraints
10626 if (tiVerificationNeeded)
10628 Verify(!info.hasCircularClassConstraints, "Method parent has circular class type parameter constraints.");
10629 Verify(!info.hasCircularMethodConstraints, "Method has circular method type parameter constraints.");
10632 /* Get the tree list started */
10634 impBeginTreeList();
10636 /* Walk the opcodes that comprise the basic block */
10638 const BYTE* codeAddr = info.compCode + block->bbCodeOffs;
10639 const BYTE* codeEndp = info.compCode + block->bbCodeOffsEnd;
10641 IL_OFFSET opcodeOffs = block->bbCodeOffs;
10642 IL_OFFSET lastSpillOffs = opcodeOffs;
10646 /* remember the start of the delegate creation sequence (used for verification) */
10647 const BYTE* delegateCreateStart = nullptr;
10649 int prefixFlags = 0;
10650 bool explicitTailCall, constraintCall, readonlyCall;
10654 unsigned numArgs = info.compArgsCount;
10656 /* Now process all the opcodes in the block */
10658 var_types callTyp = TYP_COUNT;
10659 OPCODE prevOpcode = CEE_ILLEGAL;
10661 if (block->bbCatchTyp)
10663 if (info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES)
10665 impCurStmtOffsSet(block->bbCodeOffs);
10668 // We will spill the GT_CATCH_ARG and the input of the BB_QMARK block
10669 // to a temp. This is a trade off for code simplicity
10670 impSpillSpecialSideEff();
10673 while (codeAddr < codeEndp)
10675 bool usingReadyToRunHelper = false;
10676 CORINFO_RESOLVED_TOKEN resolvedToken;
10677 CORINFO_RESOLVED_TOKEN constrainedResolvedToken;
10678 CORINFO_CALL_INFO callInfo;
10679 CORINFO_FIELD_INFO fieldInfo;
10681 tiRetVal = typeInfo(); // Default type info
10683 //---------------------------------------------------------------------
10685 /* We need to restrict the max tree depth as many of the Compiler
10686 functions are recursive. We do this by spilling the stack */
10688 if (verCurrentState.esStackDepth)
10690 /* Has it been a while since we last saw a non-empty stack (which
10691 guarantees that the tree depth isnt accumulating. */
10693 if ((opcodeOffs - lastSpillOffs) > MAX_TREE_SIZE && impCanSpillNow(prevOpcode))
10695 impSpillStackEnsure();
10696 lastSpillOffs = opcodeOffs;
10701 lastSpillOffs = opcodeOffs;
10702 impBoxTempInUse = false; // nothing on the stack, box temp OK to use again
10705 /* Compute the current instr offset */
10707 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10710 if (opts.compDbgInfo)
10713 if (!compIsForInlining())
10716 (nxtStmtIndex < info.compStmtOffsetsCount) ? info.compStmtOffsets[nxtStmtIndex] : BAD_IL_OFFSET;
10718 /* Have we reached the next stmt boundary ? */
10720 if (nxtStmtOffs != BAD_IL_OFFSET && opcodeOffs >= nxtStmtOffs)
10722 assert(nxtStmtOffs == info.compStmtOffsets[nxtStmtIndex]);
10724 if (verCurrentState.esStackDepth != 0 && opts.compDbgCode)
10726 /* We need to provide accurate IP-mapping at this point.
10727 So spill anything on the stack so that it will form
10728 gtStmts with the correct stmt offset noted */
10730 impSpillStackEnsure(true);
10733 // Has impCurStmtOffs been reported in any tree?
10735 if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
10737 GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
10738 impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10740 assert(impCurStmtOffs == BAD_IL_OFFSET);
10743 if (impCurStmtOffs == BAD_IL_OFFSET)
10745 /* Make sure that nxtStmtIndex is in sync with opcodeOffs.
10746 If opcodeOffs has gone past nxtStmtIndex, catch up */
10748 while ((nxtStmtIndex + 1) < info.compStmtOffsetsCount &&
10749 info.compStmtOffsets[nxtStmtIndex + 1] <= opcodeOffs)
10754 /* Go to the new stmt */
10756 impCurStmtOffsSet(info.compStmtOffsets[nxtStmtIndex]);
10758 /* Update the stmt boundary index */
10761 assert(nxtStmtIndex <= info.compStmtOffsetsCount);
10763 /* Are there any more line# entries after this one? */
10765 if (nxtStmtIndex < info.compStmtOffsetsCount)
10767 /* Remember where the next line# starts */
10769 nxtStmtOffs = info.compStmtOffsets[nxtStmtIndex];
10773 /* No more line# entries */
10775 nxtStmtOffs = BAD_IL_OFFSET;
10779 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::STACK_EMPTY_BOUNDARIES) &&
10780 (verCurrentState.esStackDepth == 0))
10782 /* At stack-empty locations, we have already added the tree to
10783 the stmt list with the last offset. We just need to update
10787 impCurStmtOffsSet(opcodeOffs);
10789 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::CALL_SITE_BOUNDARIES) &&
10790 impOpcodeIsCallSiteBoundary(prevOpcode))
10792 /* Make sure we have a type cached */
10793 assert(callTyp != TYP_COUNT);
10795 if (callTyp == TYP_VOID)
10797 impCurStmtOffsSet(opcodeOffs);
10799 else if (opts.compDbgCode)
10801 impSpillStackEnsure(true);
10802 impCurStmtOffsSet(opcodeOffs);
10805 else if ((info.compStmtOffsetsImplicit & ICorDebugInfo::NOP_BOUNDARIES) && (prevOpcode == CEE_NOP))
10807 if (opts.compDbgCode)
10809 impSpillStackEnsure(true);
10812 impCurStmtOffsSet(opcodeOffs);
10815 assert(impCurStmtOffs == BAD_IL_OFFSET || nxtStmtOffs == BAD_IL_OFFSET ||
10816 jitGetILoffs(impCurStmtOffs) <= nxtStmtOffs);
10820 CORINFO_CLASS_HANDLE clsHnd = DUMMY_INIT(NULL);
10821 CORINFO_CLASS_HANDLE ldelemClsHnd = DUMMY_INIT(NULL);
10822 CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
10824 var_types lclTyp, ovflType = TYP_UNKNOWN;
10825 GenTree* op1 = DUMMY_INIT(NULL);
10826 GenTree* op2 = DUMMY_INIT(NULL);
10827 GenTreeArgList* args = nullptr; // What good do these "DUMMY_INIT"s do?
10828 GenTree* newObjThisPtr = DUMMY_INIT(NULL);
10829 bool uns = DUMMY_INIT(false);
10830 bool isLocal = false;
10832 /* Get the next opcode and the size of its parameters */
10834 OPCODE opcode = (OPCODE)getU1LittleEndian(codeAddr);
10835 codeAddr += sizeof(__int8);
10838 impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10839 JITDUMP("\n [%2u] %3u (0x%03x) ", verCurrentState.esStackDepth, impCurOpcOffs, impCurOpcOffs);
10844 // Return if any previous code has caused inline to fail.
10845 if (compDonotInline())
10850 /* Get the size of additional parameters */
10852 signed int sz = opcodeSizes[opcode];
10855 clsHnd = NO_CLASS_HANDLE;
10856 lclTyp = TYP_COUNT;
10857 callTyp = TYP_COUNT;
10859 impCurOpcOffs = (IL_OFFSET)(codeAddr - info.compCode - 1);
10860 impCurOpcName = opcodeNames[opcode];
10862 if (verbose && (opcode != CEE_PREFIX1))
10864 printf("%s", impCurOpcName);
10867 /* Use assertImp() to display the opcode */
10869 op1 = op2 = nullptr;
10872 /* See what kind of an opcode we have, then */
10874 unsigned mflags = 0;
10875 unsigned clsFlags = 0;
10888 CORINFO_SIG_INFO sig;
10890 bool ovfl, unordered, callNode;
10892 CORINFO_CLASS_HANDLE tokenType;
10902 opcode = (OPCODE)(getU1LittleEndian(codeAddr) + 256);
10903 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
10904 codeAddr += sizeof(__int8);
10905 goto DECODE_OPCODE;
10909 // We need to call impSpillLclRefs() for a struct type lclVar.
10910 // This is done for non-block assignments in the handling of stloc.
10911 if ((op1->OperGet() == GT_ASG) && varTypeIsStruct(op1->gtOp.gtOp1) &&
10912 (op1->gtOp.gtOp1->gtOper == GT_LCL_VAR))
10914 impSpillLclRefs(op1->gtOp.gtOp1->AsLclVarCommon()->gtLclNum);
10917 /* Append 'op1' to the list of statements */
10918 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
10923 /* Append 'op1' to the list of statements */
10925 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
10931 // Remember at which BC offset the tree was finished
10932 impNoteLastILoffs();
10937 impPushNullObjRefOnStack();
10940 case CEE_LDC_I4_M1:
10950 cval.intVal = (opcode - CEE_LDC_I4_0);
10951 assert(-1 <= cval.intVal && cval.intVal <= 8);
10955 cval.intVal = getI1LittleEndian(codeAddr);
10958 cval.intVal = getI4LittleEndian(codeAddr);
10961 JITDUMP(" %d", cval.intVal);
10962 impPushOnStack(gtNewIconNode(cval.intVal), typeInfo(TI_INT));
10966 cval.lngVal = getI8LittleEndian(codeAddr);
10967 JITDUMP(" 0x%016llx", cval.lngVal);
10968 impPushOnStack(gtNewLconNode(cval.lngVal), typeInfo(TI_LONG));
10972 cval.dblVal = getR8LittleEndian(codeAddr);
10973 JITDUMP(" %#.17g", cval.dblVal);
10974 impPushOnStack(gtNewDconNode(cval.dblVal), typeInfo(TI_DOUBLE));
10978 cval.dblVal = getR4LittleEndian(codeAddr);
10979 JITDUMP(" %#.17g", cval.dblVal);
10981 GenTree* cnsOp = gtNewDconNode(cval.dblVal);
10982 cnsOp->gtType = TYP_FLOAT;
10983 impPushOnStack(cnsOp, typeInfo(TI_DOUBLE));
10989 if (compIsForInlining())
10991 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_NO_CALLEE_LDSTR)
10993 compInlineResult->NoteFatal(InlineObservation::CALLSITE_HAS_LDSTR_RESTRICTION);
10998 val = getU4LittleEndian(codeAddr);
10999 JITDUMP(" %08X", val);
11000 if (tiVerificationNeeded)
11002 Verify(info.compCompHnd->isValidStringRef(info.compScopeHnd, val), "bad string");
11003 tiRetVal = typeInfo(TI_REF, impGetStringClass());
11005 impPushOnStack(gtNewSconNode(val, info.compScopeHnd), tiRetVal);
11010 lclNum = getU2LittleEndian(codeAddr);
11011 JITDUMP(" %u", lclNum);
11012 impLoadArg(lclNum, opcodeOffs + sz + 1);
11016 lclNum = getU1LittleEndian(codeAddr);
11017 JITDUMP(" %u", lclNum);
11018 impLoadArg(lclNum, opcodeOffs + sz + 1);
11025 lclNum = (opcode - CEE_LDARG_0);
11026 assert(lclNum >= 0 && lclNum < 4);
11027 impLoadArg(lclNum, opcodeOffs + sz + 1);
11031 lclNum = getU2LittleEndian(codeAddr);
11032 JITDUMP(" %u", lclNum);
11033 impLoadLoc(lclNum, opcodeOffs + sz + 1);
11037 lclNum = getU1LittleEndian(codeAddr);
11038 JITDUMP(" %u", lclNum);
11039 impLoadLoc(lclNum, opcodeOffs + sz + 1);
11046 lclNum = (opcode - CEE_LDLOC_0);
11047 assert(lclNum >= 0 && lclNum < 4);
11048 impLoadLoc(lclNum, opcodeOffs + sz + 1);
11052 lclNum = getU2LittleEndian(codeAddr);
11056 lclNum = getU1LittleEndian(codeAddr);
11058 JITDUMP(" %u", lclNum);
11060 if (tiVerificationNeeded)
11062 Verify(lclNum < info.compILargsCount, "bad arg num");
11065 if (compIsForInlining())
11067 op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
11068 noway_assert(op1->gtOper == GT_LCL_VAR);
11069 lclNum = op1->AsLclVar()->gtLclNum;
11074 lclNum = compMapILargNum(lclNum); // account for possible hidden param
11075 assertImp(lclNum < numArgs);
11077 if (lclNum == info.compThisArg)
11079 lclNum = lvaArg0Var;
11082 // We should have seen this arg write in the prescan
11083 assert(lvaTable[lclNum].lvHasILStoreOp);
11085 if (tiVerificationNeeded)
11087 typeInfo& tiLclVar = lvaTable[lclNum].lvVerTypeInfo;
11088 Verify(tiCompatibleWith(impStackTop().seTypeInfo, NormaliseForStack(tiLclVar), true),
11091 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
11093 Verify(!tiLclVar.IsThisPtr(), "storing to uninit this ptr");
11100 lclNum = getU2LittleEndian(codeAddr);
11102 JITDUMP(" %u", lclNum);
11106 lclNum = getU1LittleEndian(codeAddr);
11108 JITDUMP(" %u", lclNum);
11116 lclNum = (opcode - CEE_STLOC_0);
11117 assert(lclNum >= 0 && lclNum < 4);
11120 if (tiVerificationNeeded)
11122 Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
11123 Verify(tiCompatibleWith(impStackTop().seTypeInfo,
11124 NormaliseForStack(lvaTable[lclNum + numArgs].lvVerTypeInfo), true),
11128 if (compIsForInlining())
11130 lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
11132 /* Have we allocated a temp for this local? */
11134 lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline stloc first use temp"));
11143 if (lclNum >= info.compLocalsCount && lclNum != lvaArg0Var)
11145 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
11151 /* if it is a struct assignment, make certain we don't overflow the buffer */
11152 assert(lclTyp != TYP_STRUCT || lvaLclSize(lclNum) >= info.compCompHnd->getClassSize(clsHnd));
11154 if (lvaTable[lclNum].lvNormalizeOnLoad())
11156 lclTyp = lvaGetRealType(lclNum);
11160 lclTyp = lvaGetActualType(lclNum);
11164 /* Pop the value being assigned */
11167 StackEntry se = impPopStack();
11168 clsHnd = se.seTypeInfo.GetClassHandle();
11170 tiRetVal = se.seTypeInfo;
11173 #ifdef FEATURE_SIMD
11174 if (varTypeIsSIMD(lclTyp) && (lclTyp != op1->TypeGet()))
11176 assert(op1->TypeGet() == TYP_STRUCT);
11177 op1->gtType = lclTyp;
11179 #endif // FEATURE_SIMD
11181 op1 = impImplicitIorI4Cast(op1, lclTyp);
11183 #ifdef _TARGET_64BIT_
11184 // Downcast the TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
11185 if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
11187 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
11188 op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT);
11190 #endif // _TARGET_64BIT_
11192 // We had better assign it a value of the correct type
11194 genActualType(lclTyp) == genActualType(op1->gtType) ||
11195 genActualType(lclTyp) == TYP_I_IMPL && op1->IsVarAddr() ||
11196 (genActualType(lclTyp) == TYP_I_IMPL && (op1->gtType == TYP_BYREF || op1->gtType == TYP_REF)) ||
11197 (genActualType(op1->gtType) == TYP_I_IMPL && lclTyp == TYP_BYREF) ||
11198 (varTypeIsFloating(lclTyp) && varTypeIsFloating(op1->TypeGet())) ||
11199 ((genActualType(lclTyp) == TYP_BYREF) && genActualType(op1->TypeGet()) == TYP_REF));
11201 /* If op1 is "&var" then its type is the transient "*" and it can
11202 be used either as TYP_BYREF or TYP_I_IMPL */
11204 if (op1->IsVarAddr())
11206 assertImp(genActualType(lclTyp) == TYP_I_IMPL || lclTyp == TYP_BYREF);
11208 /* When "&var" is created, we assume it is a byref. If it is
11209 being assigned to a TYP_I_IMPL var, change the type to
11210 prevent unnecessary GC info */
11212 if (genActualType(lclTyp) == TYP_I_IMPL)
11214 op1->gtType = TYP_I_IMPL;
11218 // If this is a local and the local is a ref type, see
11219 // if we can improve type information based on the
11220 // value being assigned.
11221 if (isLocal && (lclTyp == TYP_REF))
11223 // We should have seen a stloc in our IL prescan.
11224 assert(lvaTable[lclNum].lvHasILStoreOp);
11226 // Is there just one place this local is defined?
11227 const bool isSingleDefLocal = lvaTable[lclNum].lvSingleDef;
11229 // Conservative check that there is just one
11230 // definition that reaches this store.
11231 const bool hasSingleReachingDef = (block->bbStackDepthOnEntry() == 0);
11233 if (isSingleDefLocal && hasSingleReachingDef)
11235 lvaUpdateClass(lclNum, op1, clsHnd);
11239 /* Filter out simple assignments to itself */
11241 if (op1->gtOper == GT_LCL_VAR && lclNum == op1->gtLclVarCommon.gtLclNum)
11243 if (opts.compDbgCode)
11245 op1 = gtNewNothingNode();
11254 /* Create the assignment node */
11256 op2 = gtNewLclvNode(lclNum, lclTyp, opcodeOffs + sz + 1);
11258 /* If the local is aliased or pinned, we need to spill calls and
11259 indirections from the stack. */
11261 if ((lvaTable[lclNum].lvAddrExposed || lvaTable[lclNum].lvHasLdAddrOp || lvaTable[lclNum].lvPinned) &&
11262 (verCurrentState.esStackDepth > 0))
11264 impSpillSideEffects(false,
11265 (unsigned)CHECK_SPILL_ALL DEBUGARG("Local could be aliased or is pinned"));
11268 /* Spill any refs to the local from the stack */
11270 impSpillLclRefs(lclNum);
11272 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
11273 // We insert a cast to the dest 'op2' type
11275 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
11276 varTypeIsFloating(op2->gtType))
11278 op1 = gtNewCastNode(op2->TypeGet(), op1, false, op2->TypeGet());
11281 if (varTypeIsStruct(lclTyp))
11283 op1 = impAssignStruct(op2, op1, clsHnd, (unsigned)CHECK_SPILL_ALL);
11287 // The code generator generates GC tracking information
11288 // based on the RHS of the assignment. Later the LHS (which is
11289 // is a BYREF) gets used and the emitter checks that that variable
11290 // is being tracked. It is not (since the RHS was an int and did
11291 // not need tracking). To keep this assert happy, we change the RHS
11292 if (lclTyp == TYP_BYREF && !varTypeIsGC(op1->gtType))
11294 op1->gtType = TYP_BYREF;
11296 op1 = gtNewAssignNode(op2, op1);
11302 lclNum = getU2LittleEndian(codeAddr);
11306 lclNum = getU1LittleEndian(codeAddr);
11308 JITDUMP(" %u", lclNum);
11309 if (tiVerificationNeeded)
11311 Verify(lclNum < info.compMethodInfo->locals.numArgs, "bad local num");
11312 Verify(info.compInitMem, "initLocals not set");
11315 if (compIsForInlining())
11317 // Get the local type
11318 lclTyp = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt].lclTypeInfo;
11320 /* Have we allocated a temp for this local? */
11322 lclNum = impInlineFetchLocal(lclNum DEBUGARG("Inline ldloca(s) first use temp"));
11324 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum));
11330 assertImp(lclNum < info.compLocalsCount);
11334 lclNum = getU2LittleEndian(codeAddr);
11338 lclNum = getU1LittleEndian(codeAddr);
11340 JITDUMP(" %u", lclNum);
11341 Verify(lclNum < info.compILargsCount, "bad arg num");
11343 if (compIsForInlining())
11345 // In IL, LDARGA(_S) is used to load the byref managed pointer of struct argument,
11346 // followed by a ldfld to load the field.
11348 op1 = impInlineFetchArg(lclNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo);
11349 if (op1->gtOper != GT_LCL_VAR)
11351 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDARGA_NOT_LOCAL_VAR);
11355 assert(op1->gtOper == GT_LCL_VAR);
11360 lclNum = compMapILargNum(lclNum); // account for possible hidden param
11361 assertImp(lclNum < numArgs);
11363 if (lclNum == info.compThisArg)
11365 lclNum = lvaArg0Var;
11372 op1 = gtNewLclvNode(lclNum, lvaGetActualType(lclNum), opcodeOffs + sz + 1);
11375 assert(op1->gtOper == GT_LCL_VAR);
11377 /* Note that this is supposed to create the transient type "*"
11378 which may be used as a TYP_I_IMPL. However we catch places
11379 where it is used as a TYP_I_IMPL and change the node if needed.
11380 Thus we are pessimistic and may report byrefs in the GC info
11381 where it was not absolutely needed, but it is safer this way.
11383 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
11385 // &aliasedVar doesnt need GTF_GLOB_REF, though alisasedVar does
11386 assert((op1->gtFlags & GTF_GLOB_REF) == 0);
11388 tiRetVal = lvaTable[lclNum].lvVerTypeInfo;
11389 if (tiVerificationNeeded)
11391 // Don't allow taking address of uninit this ptr.
11392 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
11394 Verify(!tiRetVal.IsThisPtr(), "address of uninit this ptr");
11397 if (!tiRetVal.IsByRef())
11399 tiRetVal.MakeByRef();
11403 Verify(false, "byref to byref");
11407 impPushOnStack(op1, tiRetVal);
11412 if (!info.compIsVarArgs)
11414 BADCODE("arglist in non-vararg method");
11417 if (tiVerificationNeeded)
11419 tiRetVal = typeInfo(TI_STRUCT, impGetRuntimeArgumentHandle());
11421 assertImp((info.compMethodInfo->args.callConv & CORINFO_CALLCONV_MASK) == CORINFO_CALLCONV_VARARG);
11423 /* The ARGLIST cookie is a hidden 'last' parameter, we have already
11424 adjusted the arg count cos this is like fetching the last param */
11425 assertImp(0 < numArgs);
11426 assert(lvaTable[lvaVarargsHandleArg].lvAddrExposed);
11427 lclNum = lvaVarargsHandleArg;
11428 op1 = gtNewLclvNode(lclNum, TYP_I_IMPL, opcodeOffs + sz + 1);
11429 op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, op1);
11430 impPushOnStack(op1, tiRetVal);
11433 case CEE_ENDFINALLY:
11435 if (compIsForInlining())
11437 assert(!"Shouldn't have exception handlers in the inliner!");
11438 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFINALLY);
11442 if (verCurrentState.esStackDepth > 0)
11444 impEvalSideEffects();
11447 if (info.compXcptnsCount == 0)
11449 BADCODE("endfinally outside finally");
11452 assert(verCurrentState.esStackDepth == 0);
11454 op1 = gtNewOperNode(GT_RETFILT, TYP_VOID, nullptr);
11457 case CEE_ENDFILTER:
11459 if (compIsForInlining())
11461 assert(!"Shouldn't have exception handlers in the inliner!");
11462 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_ENDFILTER);
11466 block->bbSetRunRarely(); // filters are rare
11468 if (info.compXcptnsCount == 0)
11470 BADCODE("endfilter outside filter");
11473 if (tiVerificationNeeded)
11475 Verify(impStackTop().seTypeInfo.IsType(TI_INT), "bad endfilt arg");
11478 op1 = impPopStack().val;
11479 assertImp(op1->gtType == TYP_INT);
11480 if (!bbInFilterILRange(block))
11482 BADCODE("EndFilter outside a filter handler");
11485 /* Mark current bb as end of filter */
11487 assert(compCurBB->bbFlags & BBF_DONT_REMOVE);
11488 assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET);
11490 /* Mark catch handler as successor */
11492 op1 = gtNewOperNode(GT_RETFILT, op1->TypeGet(), op1);
11493 if (verCurrentState.esStackDepth != 0)
11495 verRaiseVerifyException(INDEBUG("stack must be 1 on end of filter") DEBUGARG(__FILE__)
11496 DEBUGARG(__LINE__));
11501 prefixFlags &= ~PREFIX_TAILCALL; // ret without call before it
11503 if (!impReturnInstruction(block, prefixFlags, opcode))
11514 assert(!compIsForInlining());
11516 if (tiVerificationNeeded)
11518 Verify(false, "Invalid opcode: CEE_JMP");
11521 if ((info.compFlags & CORINFO_FLG_SYNCH) || block->hasTryIndex() || block->hasHndIndex())
11523 /* CEE_JMP does not make sense in some "protected" regions. */
11525 BADCODE("Jmp not allowed in protected region");
11528 if (verCurrentState.esStackDepth != 0)
11530 BADCODE("Stack must be empty after CEE_JMPs");
11533 _impResolveToken(CORINFO_TOKENKIND_Method);
11535 JITDUMP(" %08X", resolvedToken.token);
11537 /* The signature of the target has to be identical to ours.
11538 At least check that argCnt and returnType match */
11540 eeGetMethodSig(resolvedToken.hMethod, &sig);
11541 if (sig.numArgs != info.compMethodInfo->args.numArgs ||
11542 sig.retType != info.compMethodInfo->args.retType ||
11543 sig.callConv != info.compMethodInfo->args.callConv)
11545 BADCODE("Incompatible target for CEE_JMPs");
11548 op1 = new (this, GT_JMP) GenTreeVal(GT_JMP, TYP_VOID, (size_t)resolvedToken.hMethod);
11550 /* Mark the basic block as being a JUMP instead of RETURN */
11552 block->bbFlags |= BBF_HAS_JMP;
11554 /* Set this flag to make sure register arguments have a location assigned
11555 * even if we don't use them inside the method */
11557 compJmpOpUsed = true;
11559 fgNoStructPromotion = true;
11564 assertImp(sz == sizeof(unsigned));
11566 _impResolveToken(CORINFO_TOKENKIND_Class);
11568 JITDUMP(" %08X", resolvedToken.token);
11570 ldelemClsHnd = resolvedToken.hClass;
11572 if (tiVerificationNeeded)
11574 typeInfo tiArray = impStackTop(1).seTypeInfo;
11575 typeInfo tiIndex = impStackTop().seTypeInfo;
11577 // As per ECMA 'index' specified can be either int32 or native int.
11578 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11580 typeInfo arrayElemType = verMakeTypeInfo(ldelemClsHnd);
11581 Verify(tiArray.IsNullObjRef() ||
11582 typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElemType),
11585 tiRetVal = arrayElemType;
11586 tiRetVal.MakeByRef();
11587 if (prefixFlags & PREFIX_READONLY)
11589 tiRetVal.SetIsReadonlyByRef();
11592 // an array interior pointer is always in the heap
11593 tiRetVal.SetIsPermanentHomeByRef();
11596 // If it's a value class array we just do a simple address-of
11597 if (eeIsValueClass(ldelemClsHnd))
11599 CorInfoType cit = info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd);
11600 if (cit == CORINFO_TYPE_UNDEF)
11602 lclTyp = TYP_STRUCT;
11606 lclTyp = JITtype2varType(cit);
11608 goto ARR_LD_POST_VERIFY;
11611 // Similarly, if its a readonly access, we can do a simple address-of
11612 // without doing a runtime type-check
11613 if (prefixFlags & PREFIX_READONLY)
11616 goto ARR_LD_POST_VERIFY;
11619 // Otherwise we need the full helper function with run-time type check
11620 op1 = impTokenToHandle(&resolvedToken);
11621 if (op1 == nullptr)
11622 { // compDonotInline()
11626 args = gtNewArgList(op1); // Type
11627 args = gtNewListNode(impPopStack().val, args); // index
11628 args = gtNewListNode(impPopStack().val, args); // array
11629 op1 = gtNewHelperCallNode(CORINFO_HELP_LDELEMA_REF, TYP_BYREF, args);
11631 impPushOnStack(op1, tiRetVal);
11634 // ldelem for reference and value types
11636 assertImp(sz == sizeof(unsigned));
11638 _impResolveToken(CORINFO_TOKENKIND_Class);
11640 JITDUMP(" %08X", resolvedToken.token);
11642 ldelemClsHnd = resolvedToken.hClass;
11644 if (tiVerificationNeeded)
11646 typeInfo tiArray = impStackTop(1).seTypeInfo;
11647 typeInfo tiIndex = impStackTop().seTypeInfo;
11649 // As per ECMA 'index' specified can be either int32 or native int.
11650 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11651 tiRetVal = verMakeTypeInfo(ldelemClsHnd);
11653 Verify(tiArray.IsNullObjRef() || tiCompatibleWith(verGetArrayElemType(tiArray), tiRetVal, false),
11654 "type of array incompatible with type operand");
11655 tiRetVal.NormaliseForStack();
11658 // If it's a reference type or generic variable type
11659 // then just generate code as though it's a ldelem.ref instruction
11660 if (!eeIsValueClass(ldelemClsHnd))
11663 opcode = CEE_LDELEM_REF;
11667 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(ldelemClsHnd);
11668 lclTyp = JITtype2varType(jitTyp);
11669 tiRetVal = verMakeTypeInfo(ldelemClsHnd); // precise type always needed for struct
11670 tiRetVal.NormaliseForStack();
11672 goto ARR_LD_POST_VERIFY;
11674 case CEE_LDELEM_I1:
11677 case CEE_LDELEM_I2:
11678 lclTyp = TYP_SHORT;
11681 lclTyp = TYP_I_IMPL;
11684 // Should be UINT, but since no platform widens 4->8 bytes it doesn't matter
11685 // and treating it as TYP_INT avoids other asserts.
11686 case CEE_LDELEM_U4:
11690 case CEE_LDELEM_I4:
11693 case CEE_LDELEM_I8:
11696 case CEE_LDELEM_REF:
11699 case CEE_LDELEM_R4:
11700 lclTyp = TYP_FLOAT;
11702 case CEE_LDELEM_R8:
11703 lclTyp = TYP_DOUBLE;
11705 case CEE_LDELEM_U1:
11706 lclTyp = TYP_UBYTE;
11708 case CEE_LDELEM_U2:
11709 lclTyp = TYP_USHORT;
11714 if (tiVerificationNeeded)
11716 typeInfo tiArray = impStackTop(1).seTypeInfo;
11717 typeInfo tiIndex = impStackTop().seTypeInfo;
11719 // As per ECMA 'index' specified can be either int32 or native int.
11720 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11721 if (tiArray.IsNullObjRef())
11723 if (lclTyp == TYP_REF)
11724 { // we will say a deref of a null array yields a null ref
11725 tiRetVal = typeInfo(TI_NULL);
11729 tiRetVal = typeInfo(lclTyp);
11734 tiRetVal = verGetArrayElemType(tiArray);
11735 typeInfo arrayElemTi = typeInfo(lclTyp);
11736 #ifdef _TARGET_64BIT_
11737 if (opcode == CEE_LDELEM_I)
11739 arrayElemTi = typeInfo::nativeInt();
11742 if (lclTyp != TYP_REF && lclTyp != TYP_STRUCT)
11744 Verify(typeInfo::AreEquivalent(tiRetVal, arrayElemTi), "bad array");
11747 #endif // _TARGET_64BIT_
11749 Verify(tiRetVal.IsType(arrayElemTi.GetType()), "bad array");
11752 tiRetVal.NormaliseForStack();
11754 ARR_LD_POST_VERIFY:
11756 /* Pull the index value and array address */
11757 op2 = impPopStack().val;
11758 op1 = impPopStack().val;
11759 assertImp(op1->gtType == TYP_REF);
11761 /* Check for null pointer - in the inliner case we simply abort */
11763 if (compIsForInlining())
11765 if (op1->gtOper == GT_CNS_INT)
11767 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NULL_FOR_LDELEM);
11772 op1 = impCheckForNullPointer(op1);
11774 /* Mark the block as containing an index expression */
11776 if (op1->gtOper == GT_LCL_VAR)
11778 if (op2->gtOper == GT_LCL_VAR || op2->gtOper == GT_CNS_INT || op2->gtOper == GT_ADD)
11780 block->bbFlags |= BBF_HAS_IDX_LEN;
11781 optMethodFlags |= OMF_HAS_ARRAYREF;
11785 /* Create the index node and push it on the stack */
11787 op1 = gtNewIndexRef(lclTyp, op1, op2);
11789 ldstruct = (opcode == CEE_LDELEM && lclTyp == TYP_STRUCT);
11791 if ((opcode == CEE_LDELEMA) || ldstruct ||
11792 (ldelemClsHnd != DUMMY_INIT(NULL) && eeIsValueClass(ldelemClsHnd)))
11794 assert(ldelemClsHnd != DUMMY_INIT(NULL));
11796 // remember the element size
11797 if (lclTyp == TYP_REF)
11799 op1->gtIndex.gtIndElemSize = TARGET_POINTER_SIZE;
11803 // If ldElemClass is precisely a primitive type, use that, otherwise, preserve the struct type.
11804 if (info.compCompHnd->getTypeForPrimitiveValueClass(ldelemClsHnd) == CORINFO_TYPE_UNDEF)
11806 op1->gtIndex.gtStructElemClass = ldelemClsHnd;
11808 assert(lclTyp != TYP_STRUCT || op1->gtIndex.gtStructElemClass != nullptr);
11809 if (lclTyp == TYP_STRUCT)
11811 size = info.compCompHnd->getClassSize(ldelemClsHnd);
11812 op1->gtIndex.gtIndElemSize = size;
11813 op1->gtType = lclTyp;
11817 if ((opcode == CEE_LDELEMA) || ldstruct)
11820 lclTyp = TYP_BYREF;
11822 op1 = gtNewOperNode(GT_ADDR, lclTyp, op1);
11826 assert(lclTyp != TYP_STRUCT);
11832 // Create an OBJ for the result
11833 op1 = gtNewObjNode(ldelemClsHnd, op1);
11834 op1->gtFlags |= GTF_EXCEPT;
11836 impPushOnStack(op1, tiRetVal);
11839 // stelem for reference and value types
11842 assertImp(sz == sizeof(unsigned));
11844 _impResolveToken(CORINFO_TOKENKIND_Class);
11846 JITDUMP(" %08X", resolvedToken.token);
11848 stelemClsHnd = resolvedToken.hClass;
11850 if (tiVerificationNeeded)
11852 typeInfo tiArray = impStackTop(2).seTypeInfo;
11853 typeInfo tiIndex = impStackTop(1).seTypeInfo;
11854 typeInfo tiValue = impStackTop().seTypeInfo;
11856 // As per ECMA 'index' specified can be either int32 or native int.
11857 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11858 typeInfo arrayElem = verMakeTypeInfo(stelemClsHnd);
11860 Verify(tiArray.IsNullObjRef() || tiCompatibleWith(arrayElem, verGetArrayElemType(tiArray), false),
11861 "type operand incompatible with array element type");
11862 arrayElem.NormaliseForStack();
11863 Verify(tiCompatibleWith(tiValue, arrayElem, true), "value incompatible with type operand");
11866 // If it's a reference type just behave as though it's a stelem.ref instruction
11867 if (!eeIsValueClass(stelemClsHnd))
11869 goto STELEM_REF_POST_VERIFY;
11872 // Otherwise extract the type
11874 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(stelemClsHnd);
11875 lclTyp = JITtype2varType(jitTyp);
11876 goto ARR_ST_POST_VERIFY;
11879 case CEE_STELEM_REF:
11881 if (tiVerificationNeeded)
11883 typeInfo tiArray = impStackTop(2).seTypeInfo;
11884 typeInfo tiIndex = impStackTop(1).seTypeInfo;
11885 typeInfo tiValue = impStackTop().seTypeInfo;
11887 // As per ECMA 'index' specified can be either int32 or native int.
11888 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11889 Verify(tiValue.IsObjRef(), "bad value");
11891 // we only check that it is an object referece, The helper does additional checks
11892 Verify(tiArray.IsNullObjRef() || verGetArrayElemType(tiArray).IsType(TI_REF), "bad array");
11895 STELEM_REF_POST_VERIFY:
11897 arrayNodeTo = impStackTop(2).val;
11898 arrayNodeToIndex = impStackTop(1).val;
11899 arrayNodeFrom = impStackTop().val;
11902 // Note that it is not legal to optimize away CORINFO_HELP_ARRADDR_ST in a
11903 // lot of cases because of covariance. ie. foo[] can be cast to object[].
11906 // Check for assignment to same array, ie. arrLcl[i] = arrLcl[j]
11907 // This does not need CORINFO_HELP_ARRADDR_ST
11908 if (arrayNodeFrom->OperGet() == GT_INDEX && arrayNodeFrom->gtOp.gtOp1->gtOper == GT_LCL_VAR &&
11909 arrayNodeTo->gtOper == GT_LCL_VAR &&
11910 arrayNodeTo->gtLclVarCommon.gtLclNum == arrayNodeFrom->gtOp.gtOp1->gtLclVarCommon.gtLclNum &&
11911 !lvaTable[arrayNodeTo->gtLclVarCommon.gtLclNum].lvAddrExposed)
11913 JITDUMP("\nstelem of ref from same array: skipping covariant store check\n");
11915 goto ARR_ST_POST_VERIFY;
11918 // Check for assignment of NULL. This does not need CORINFO_HELP_ARRADDR_ST
11919 if (arrayNodeFrom->OperGet() == GT_CNS_INT)
11921 JITDUMP("\nstelem of null: skipping covariant store check\n");
11922 assert(arrayNodeFrom->gtType == TYP_REF && arrayNodeFrom->gtIntCon.gtIconVal == 0);
11924 goto ARR_ST_POST_VERIFY;
11927 /* Call a helper function to do the assignment */
11928 op1 = gtNewHelperCallNode(CORINFO_HELP_ARRADDR_ST, TYP_VOID, impPopList(3, nullptr));
11932 case CEE_STELEM_I1:
11935 case CEE_STELEM_I2:
11936 lclTyp = TYP_SHORT;
11939 lclTyp = TYP_I_IMPL;
11941 case CEE_STELEM_I4:
11944 case CEE_STELEM_I8:
11947 case CEE_STELEM_R4:
11948 lclTyp = TYP_FLOAT;
11950 case CEE_STELEM_R8:
11951 lclTyp = TYP_DOUBLE;
11956 if (tiVerificationNeeded)
11958 typeInfo tiArray = impStackTop(2).seTypeInfo;
11959 typeInfo tiIndex = impStackTop(1).seTypeInfo;
11960 typeInfo tiValue = impStackTop().seTypeInfo;
11962 // As per ECMA 'index' specified can be either int32 or native int.
11963 Verify(tiIndex.IsIntOrNativeIntType(), "bad index");
11964 typeInfo arrayElem = typeInfo(lclTyp);
11965 #ifdef _TARGET_64BIT_
11966 if (opcode == CEE_STELEM_I)
11968 arrayElem = typeInfo::nativeInt();
11970 #endif // _TARGET_64BIT_
11971 Verify(tiArray.IsNullObjRef() || typeInfo::AreEquivalent(verGetArrayElemType(tiArray), arrayElem),
11974 Verify(tiCompatibleWith(NormaliseForStack(tiValue), arrayElem.NormaliseForStack(), true),
11978 ARR_ST_POST_VERIFY:
11979 /* The strict order of evaluation is LHS-operands, RHS-operands,
11980 range-check, and then assignment. However, codegen currently
11981 does the range-check before evaluation the RHS-operands. So to
11982 maintain strict ordering, we spill the stack. */
11984 if (impStackTop().val->gtFlags & GTF_SIDE_EFFECT)
11986 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
11987 "Strict ordering of exceptions for Array store"));
11990 /* Pull the new value from the stack */
11991 op2 = impPopStack().val;
11993 /* Pull the index value */
11994 op1 = impPopStack().val;
11996 /* Pull the array address */
11997 op3 = impPopStack().val;
11999 assertImp(op3->gtType == TYP_REF);
12000 if (op2->IsVarAddr())
12002 op2->gtType = TYP_I_IMPL;
12005 op3 = impCheckForNullPointer(op3);
12007 // Mark the block as containing an index expression
12009 if (op3->gtOper == GT_LCL_VAR)
12011 if (op1->gtOper == GT_LCL_VAR || op1->gtOper == GT_CNS_INT || op1->gtOper == GT_ADD)
12013 block->bbFlags |= BBF_HAS_IDX_LEN;
12014 optMethodFlags |= OMF_HAS_ARRAYREF;
12018 /* Create the index node */
12020 op1 = gtNewIndexRef(lclTyp, op3, op1);
12022 /* Create the assignment node and append it */
12024 if (lclTyp == TYP_STRUCT)
12026 assert(stelemClsHnd != DUMMY_INIT(NULL));
12028 op1->gtIndex.gtStructElemClass = stelemClsHnd;
12029 op1->gtIndex.gtIndElemSize = info.compCompHnd->getClassSize(stelemClsHnd);
12031 if (varTypeIsStruct(op1))
12033 op1 = impAssignStruct(op1, op2, stelemClsHnd, (unsigned)CHECK_SPILL_ALL);
12037 op2 = impImplicitR4orR8Cast(op2, op1->TypeGet());
12038 op1 = gtNewAssignNode(op1, op2);
12041 /* Mark the expression as containing an assignment */
12043 op1->gtFlags |= GTF_ASG;
12054 case CEE_ADD_OVF_UN:
12062 goto MATH_OP2_FLAGS;
12071 case CEE_SUB_OVF_UN:
12079 goto MATH_OP2_FLAGS;
12083 goto MATH_MAYBE_CALL_NO_OVF;
12088 case CEE_MUL_OVF_UN:
12095 goto MATH_MAYBE_CALL_OVF;
12097 // Other binary math operations
12101 goto MATH_MAYBE_CALL_NO_OVF;
12105 goto MATH_MAYBE_CALL_NO_OVF;
12109 goto MATH_MAYBE_CALL_NO_OVF;
12113 goto MATH_MAYBE_CALL_NO_OVF;
12115 MATH_MAYBE_CALL_NO_OVF:
12117 MATH_MAYBE_CALL_OVF:
12118 // Morpher has some complex logic about when to turn different
12119 // typed nodes on different platforms into helper calls. We
12120 // need to either duplicate that logic here, or just
12121 // pessimistically make all the nodes large enough to become
12122 // call nodes. Since call nodes aren't that much larger and
12123 // these opcodes are infrequent enough I chose the latter.
12125 goto MATH_OP2_FLAGS;
12137 MATH_OP2: // For default values of 'ovfl' and 'callNode'
12142 MATH_OP2_FLAGS: // If 'ovfl' and 'callNode' have already been set
12144 /* Pull two values and push back the result */
12146 if (tiVerificationNeeded)
12148 const typeInfo& tiOp1 = impStackTop(1).seTypeInfo;
12149 const typeInfo& tiOp2 = impStackTop().seTypeInfo;
12151 Verify(tiCompatibleWith(tiOp1, tiOp2, true), "different arg type");
12152 if (oper == GT_ADD || oper == GT_DIV || oper == GT_SUB || oper == GT_MUL || oper == GT_MOD)
12154 Verify(tiOp1.IsNumberType(), "not number");
12158 Verify(tiOp1.IsIntegerType(), "not integer");
12161 Verify(!ovfl || tiOp1.IsIntegerType(), "not integer");
12165 #ifdef _TARGET_64BIT_
12166 if (tiOp2.IsNativeIntType())
12170 #endif // _TARGET_64BIT_
12173 op2 = impPopStack().val;
12174 op1 = impPopStack().val;
12176 #if !CPU_HAS_FP_SUPPORT
12177 if (varTypeIsFloating(op1->gtType))
12182 /* Can't do arithmetic with references */
12183 assertImp(genActualType(op1->TypeGet()) != TYP_REF && genActualType(op2->TypeGet()) != TYP_REF);
12185 // Change both to TYP_I_IMPL (impBashVarAddrsToI won't change if its a true byref, only
12186 // if it is in the stack)
12187 impBashVarAddrsToI(op1, op2);
12189 type = impGetByRefResultType(oper, uns, &op1, &op2);
12191 assert(!ovfl || !varTypeIsFloating(op1->gtType));
12193 /* Special case: "int+0", "int-0", "int*1", "int/1" */
12195 if (op2->gtOper == GT_CNS_INT)
12197 if ((op2->IsIntegralConst(0) && (oper == GT_ADD || oper == GT_SUB)) ||
12198 (op2->IsIntegralConst(1) && (oper == GT_MUL || oper == GT_DIV)))
12201 impPushOnStack(op1, tiRetVal);
12206 // We can generate a TYP_FLOAT operation that has a TYP_DOUBLE operand
12208 if (varTypeIsFloating(type) && varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType))
12210 if (op1->TypeGet() != type)
12212 // We insert a cast of op1 to 'type'
12213 op1 = gtNewCastNode(type, op1, false, type);
12215 if (op2->TypeGet() != type)
12217 // We insert a cast of op2 to 'type'
12218 op2 = gtNewCastNode(type, op2, false, type);
12222 #if SMALL_TREE_NODES
12225 /* These operators can later be transformed into 'GT_CALL' */
12227 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MUL]);
12228 #ifndef _TARGET_ARM_
12229 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_DIV]);
12230 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UDIV]);
12231 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_MOD]);
12232 assert(GenTree::s_gtNodeSizes[GT_CALL] > GenTree::s_gtNodeSizes[GT_UMOD]);
12234 // It's tempting to use LargeOpOpcode() here, but this logic is *not* saying
12235 // that we'll need to transform into a general large node, but rather specifically
12236 // to a call: by doing it this way, things keep working if there are multiple sizes,
12237 // and a CALL is no longer the largest.
12238 // That said, as of now it *is* a large node, so we'll do this with an assert rather
12240 assert(GenTree::s_gtNodeSizes[GT_CALL] == TREE_NODE_SZ_LARGE);
12241 op1 = new (this, GT_CALL) GenTreeOp(oper, type, op1, op2 DEBUGARG(/*largeNode*/ true));
12244 #endif // SMALL_TREE_NODES
12246 op1 = gtNewOperNode(oper, type, op1, op2);
12249 /* Special case: integer/long division may throw an exception */
12251 if (varTypeIsIntegral(op1->TypeGet()) && op1->OperMayThrow(this))
12253 op1->gtFlags |= GTF_EXCEPT;
12258 assert(oper == GT_ADD || oper == GT_SUB || oper == GT_MUL);
12259 if (ovflType != TYP_UNKNOWN)
12261 op1->gtType = ovflType;
12263 op1->gtFlags |= (GTF_EXCEPT | GTF_OVERFLOW);
12266 op1->gtFlags |= GTF_UNSIGNED;
12270 impPushOnStack(op1, tiRetVal);
12285 if (tiVerificationNeeded)
12287 const typeInfo& tiVal = impStackTop(1).seTypeInfo;
12288 const typeInfo& tiShift = impStackTop(0).seTypeInfo;
12289 Verify(tiVal.IsIntegerType() && tiShift.IsType(TI_INT), "Bad shift args");
12292 op2 = impPopStack().val;
12293 op1 = impPopStack().val; // operand to be shifted
12294 impBashVarAddrsToI(op1, op2);
12296 type = genActualType(op1->TypeGet());
12297 op1 = gtNewOperNode(oper, type, op1, op2);
12299 impPushOnStack(op1, tiRetVal);
12303 if (tiVerificationNeeded)
12305 tiRetVal = impStackTop().seTypeInfo;
12306 Verify(tiRetVal.IsIntegerType(), "bad int value");
12309 op1 = impPopStack().val;
12310 impBashVarAddrsToI(op1, nullptr);
12311 type = genActualType(op1->TypeGet());
12312 impPushOnStack(gtNewOperNode(GT_NOT, type, op1), tiRetVal);
12316 if (tiVerificationNeeded)
12318 tiRetVal = impStackTop().seTypeInfo;
12319 Verify(tiRetVal.IsType(TI_DOUBLE), "bad R value");
12321 op1 = impPopStack().val;
12322 type = op1->TypeGet();
12323 op1 = gtNewOperNode(GT_CKFINITE, type, op1);
12324 op1->gtFlags |= GTF_EXCEPT;
12326 impPushOnStack(op1, tiRetVal);
12331 val = getI4LittleEndian(codeAddr); // jump distance
12332 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int32)) + val);
12336 val = getI1LittleEndian(codeAddr); // jump distance
12337 jmpAddr = (IL_OFFSET)((codeAddr - info.compCode + sizeof(__int8)) + val);
12341 if (compIsForInlining())
12343 compInlineResult->NoteFatal(InlineObservation::CALLEE_HAS_LEAVE);
12347 JITDUMP(" %04X", jmpAddr);
12348 if (block->bbJumpKind != BBJ_LEAVE)
12350 impResetLeaveBlock(block, jmpAddr);
12353 assert(jmpAddr == block->bbJumpDest->bbCodeOffs);
12354 impImportLeave(block);
12355 impNoteBranchOffs();
12361 jmpDist = (sz == 1) ? getI1LittleEndian(codeAddr) : getI4LittleEndian(codeAddr);
12363 if (compIsForInlining() && jmpDist == 0)
12368 impNoteBranchOffs();
12374 case CEE_BRFALSE_S:
12376 /* Pop the comparand (now there's a neat term) from the stack */
12377 if (tiVerificationNeeded)
12379 typeInfo& tiVal = impStackTop().seTypeInfo;
12380 Verify(tiVal.IsObjRef() || tiVal.IsByRef() || tiVal.IsIntegerType() || tiVal.IsMethod(),
12384 op1 = impPopStack().val;
12385 type = op1->TypeGet();
12387 // brfalse and brtrue is only allowed on I4, refs, and byrefs.
12388 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
12390 block->bbJumpKind = BBJ_NONE;
12392 if (op1->gtFlags & GTF_GLOB_EFFECT)
12394 op1 = gtUnusedValNode(op1);
12403 if (op1->OperIsCompare())
12405 if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S)
12407 // Flip the sense of the compare
12409 op1 = gtReverseCond(op1);
12414 /* We'll compare against an equally-sized integer 0 */
12415 /* For small types, we always compare against int */
12416 op2 = gtNewZeroConNode(genActualType(op1->gtType));
12418 /* Create the comparison operator and try to fold it */
12420 oper = (opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S) ? GT_NE : GT_EQ;
12421 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12428 /* Fold comparison if we can */
12430 op1 = gtFoldExpr(op1);
12432 /* Try to fold the really simple cases like 'iconst *, ifne/ifeq'*/
12433 /* Don't make any blocks unreachable in import only mode */
12435 if ((op1->gtOper == GT_CNS_INT) && !compIsForImportOnly())
12437 /* gtFoldExpr() should prevent this as we don't want to make any blocks
12438 unreachable under compDbgCode */
12439 assert(!opts.compDbgCode);
12441 BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->gtIntCon.gtIconVal ? BBJ_ALWAYS : BBJ_NONE);
12442 assertImp((block->bbJumpKind == BBJ_COND) // normal case
12443 || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the
12444 // block for the second time
12446 block->bbJumpKind = foldedJumpKind;
12450 if (op1->gtIntCon.gtIconVal)
12452 printf("\nThe conditional jump becomes an unconditional jump to " FMT_BB "\n",
12453 block->bbJumpDest->bbNum);
12457 printf("\nThe block falls through into the next " FMT_BB "\n", block->bbNext->bbNum);
12464 op1 = gtNewOperNode(GT_JTRUE, TYP_VOID, op1);
12466 /* GT_JTRUE is handled specially for non-empty stacks. See 'addStmt'
12467 in impImportBlock(block). For correct line numbers, spill stack. */
12469 if (opts.compDbgCode && impCurStmtOffs != BAD_IL_OFFSET)
12471 impSpillStackEnsure(true);
12498 if (tiVerificationNeeded)
12500 verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12501 tiRetVal = typeInfo(TI_INT);
12504 op2 = impPopStack().val;
12505 op1 = impPopStack().val;
12507 #ifdef _TARGET_64BIT_
12508 if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
12510 op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12512 else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
12514 op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12516 #endif // _TARGET_64BIT_
12518 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12519 varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12520 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12522 /* Create the comparison node */
12524 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12526 /* TODO: setting both flags when only one is appropriate */
12527 if (opcode == CEE_CGT_UN || opcode == CEE_CLT_UN)
12529 op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
12532 // Fold result, if possible.
12533 op1 = gtFoldExpr(op1);
12535 impPushOnStack(op1, tiRetVal);
12541 goto CMP_2_OPs_AND_BR;
12546 goto CMP_2_OPs_AND_BR;
12551 goto CMP_2_OPs_AND_BR_UN;
12556 goto CMP_2_OPs_AND_BR;
12561 goto CMP_2_OPs_AND_BR_UN;
12566 goto CMP_2_OPs_AND_BR;
12571 goto CMP_2_OPs_AND_BR_UN;
12576 goto CMP_2_OPs_AND_BR;
12581 goto CMP_2_OPs_AND_BR_UN;
12586 goto CMP_2_OPs_AND_BR_UN;
12588 CMP_2_OPs_AND_BR_UN:
12591 goto CMP_2_OPs_AND_BR_ALL;
12595 goto CMP_2_OPs_AND_BR_ALL;
12596 CMP_2_OPs_AND_BR_ALL:
12598 if (tiVerificationNeeded)
12600 verVerifyCond(impStackTop(1).seTypeInfo, impStackTop().seTypeInfo, opcode);
12603 /* Pull two values */
12604 op2 = impPopStack().val;
12605 op1 = impPopStack().val;
12607 #ifdef _TARGET_64BIT_
12608 if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
12610 op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12612 else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
12614 op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
12616 #endif // _TARGET_64BIT_
12618 assertImp(genActualType(op1->TypeGet()) == genActualType(op2->TypeGet()) ||
12619 varTypeIsI(op1->TypeGet()) && varTypeIsI(op2->TypeGet()) ||
12620 varTypeIsFloating(op1->gtType) && varTypeIsFloating(op2->gtType));
12622 if (!opts.MinOpts() && !opts.compDbgCode && block->bbJumpDest == block->bbNext)
12624 block->bbJumpKind = BBJ_NONE;
12626 if (op1->gtFlags & GTF_GLOB_EFFECT)
12628 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12629 "Branch to next Optimization, op1 side effect"));
12630 impAppendTree(gtUnusedValNode(op1), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12632 if (op2->gtFlags & GTF_GLOB_EFFECT)
12634 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG(
12635 "Branch to next Optimization, op2 side effect"));
12636 impAppendTree(gtUnusedValNode(op2), (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
12640 if ((op1->gtFlags | op2->gtFlags) & GTF_GLOB_EFFECT)
12642 impNoteLastILoffs();
12648 // We can generate an compare of different sized floating point op1 and op2
12649 // We insert a cast
12651 if (varTypeIsFloating(op1->TypeGet()))
12653 if (op1->TypeGet() != op2->TypeGet())
12655 assert(varTypeIsFloating(op2->TypeGet()));
12657 // say op1=double, op2=float. To avoid loss of precision
12658 // while comparing, op2 is converted to double and double
12659 // comparison is done.
12660 if (op1->TypeGet() == TYP_DOUBLE)
12662 // We insert a cast of op2 to TYP_DOUBLE
12663 op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE);
12665 else if (op2->TypeGet() == TYP_DOUBLE)
12667 // We insert a cast of op1 to TYP_DOUBLE
12668 op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE);
12673 /* Create and append the operator */
12675 op1 = gtNewOperNode(oper, TYP_INT, op1, op2);
12679 op1->gtFlags |= GTF_UNSIGNED;
12684 op1->gtFlags |= GTF_RELOP_NAN_UN;
12690 assert(!compIsForInlining());
12692 if (tiVerificationNeeded)
12694 Verify(impStackTop().seTypeInfo.IsType(TI_INT), "Bad switch val");
12696 /* Pop the switch value off the stack */
12697 op1 = impPopStack().val;
12698 assertImp(genActualTypeIsIntOrI(op1->TypeGet()));
12700 /* We can create a switch node */
12702 op1 = gtNewOperNode(GT_SWITCH, TYP_VOID, op1);
12704 val = (int)getU4LittleEndian(codeAddr);
12705 codeAddr += 4 + val * 4; // skip over the switch-table
12709 /************************** Casting OPCODES ***************************/
12711 case CEE_CONV_OVF_I1:
12714 case CEE_CONV_OVF_I2:
12715 lclTyp = TYP_SHORT;
12717 case CEE_CONV_OVF_I:
12718 lclTyp = TYP_I_IMPL;
12720 case CEE_CONV_OVF_I4:
12723 case CEE_CONV_OVF_I8:
12727 case CEE_CONV_OVF_U1:
12728 lclTyp = TYP_UBYTE;
12730 case CEE_CONV_OVF_U2:
12731 lclTyp = TYP_USHORT;
12733 case CEE_CONV_OVF_U:
12734 lclTyp = TYP_U_IMPL;
12736 case CEE_CONV_OVF_U4:
12739 case CEE_CONV_OVF_U8:
12740 lclTyp = TYP_ULONG;
12743 case CEE_CONV_OVF_I1_UN:
12746 case CEE_CONV_OVF_I2_UN:
12747 lclTyp = TYP_SHORT;
12749 case CEE_CONV_OVF_I_UN:
12750 lclTyp = TYP_I_IMPL;
12752 case CEE_CONV_OVF_I4_UN:
12755 case CEE_CONV_OVF_I8_UN:
12759 case CEE_CONV_OVF_U1_UN:
12760 lclTyp = TYP_UBYTE;
12762 case CEE_CONV_OVF_U2_UN:
12763 lclTyp = TYP_USHORT;
12765 case CEE_CONV_OVF_U_UN:
12766 lclTyp = TYP_U_IMPL;
12768 case CEE_CONV_OVF_U4_UN:
12771 case CEE_CONV_OVF_U8_UN:
12772 lclTyp = TYP_ULONG;
12777 goto CONV_OVF_COMMON;
12780 goto CONV_OVF_COMMON;
12790 lclTyp = TYP_SHORT;
12793 lclTyp = TYP_I_IMPL;
12803 lclTyp = TYP_UBYTE;
12806 lclTyp = TYP_USHORT;
12808 #if (REGSIZE_BYTES == 8)
12810 lclTyp = TYP_U_IMPL;
12814 lclTyp = TYP_U_IMPL;
12821 lclTyp = TYP_ULONG;
12825 lclTyp = TYP_FLOAT;
12828 lclTyp = TYP_DOUBLE;
12831 case CEE_CONV_R_UN:
12832 lclTyp = TYP_DOUBLE;
12846 // just check that we have a number on the stack
12847 if (tiVerificationNeeded)
12849 const typeInfo& tiVal = impStackTop().seTypeInfo;
12850 Verify(tiVal.IsNumberType(), "bad arg");
12852 #ifdef _TARGET_64BIT_
12853 bool isNative = false;
12857 case CEE_CONV_OVF_I:
12858 case CEE_CONV_OVF_I_UN:
12860 case CEE_CONV_OVF_U:
12861 case CEE_CONV_OVF_U_UN:
12865 // leave 'isNative' = false;
12870 tiRetVal = typeInfo::nativeInt();
12873 #endif // _TARGET_64BIT_
12875 tiRetVal = typeInfo(lclTyp).NormaliseForStack();
12879 // only converts from FLOAT or DOUBLE to an integer type
12880 // and converts from ULONG (or LONG on ARM) to DOUBLE are morphed to calls
12882 if (varTypeIsFloating(lclTyp))
12884 callNode = varTypeIsLong(impStackTop().val) || uns // uint->dbl gets turned into uint->long->dbl
12885 #ifdef _TARGET_64BIT_
12886 // TODO-ARM64-Bug?: This was AMD64; I enabled it for ARM64 also. OK?
12887 // TYP_BYREF could be used as TYP_I_IMPL which is long.
12888 // TODO-CQ: remove this when we lower casts long/ulong --> float/double
12889 // and generate SSE2 code instead of going through helper calls.
12890 || (impStackTop().val->TypeGet() == TYP_BYREF)
12896 callNode = varTypeIsFloating(impStackTop().val->TypeGet());
12899 // At this point uns, ovf, callNode all set
12901 op1 = impPopStack().val;
12902 impBashVarAddrsToI(op1);
12904 if (varTypeIsSmall(lclTyp) && !ovfl && op1->gtType == TYP_INT && op1->gtOper == GT_AND)
12906 op2 = op1->gtOp.gtOp2;
12908 if (op2->gtOper == GT_CNS_INT)
12910 ssize_t ival = op2->gtIntCon.gtIconVal;
12911 ssize_t mask, umask;
12927 assert(!"unexpected type");
12931 if (((ival & umask) == ival) || ((ival & mask) == ival && uns))
12933 /* Toss the cast, it's a waste of time */
12935 impPushOnStack(op1, tiRetVal);
12938 else if (ival == mask)
12940 /* Toss the masking, it's a waste of time, since
12941 we sign-extend from the small value anyways */
12943 op1 = op1->gtOp.gtOp1;
12948 /* The 'op2' sub-operand of a cast is the 'real' type number,
12949 since the result of a cast to one of the 'small' integer
12950 types is an integer.
12953 type = genActualType(lclTyp);
12955 // If this is a no-op cast, just use op1.
12956 if (!ovfl && (type == op1->TypeGet()) && (genTypeSize(type) == genTypeSize(lclTyp)))
12958 // Nothing needs to change
12960 // Work is evidently required, add cast node
12963 #if SMALL_TREE_NODES
12966 op1 = gtNewCastNodeL(type, op1, uns, lclTyp);
12969 #endif // SMALL_TREE_NODES
12971 op1 = gtNewCastNode(type, op1, uns, lclTyp);
12976 op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
12980 impPushOnStack(op1, tiRetVal);
12984 if (tiVerificationNeeded)
12986 tiRetVal = impStackTop().seTypeInfo;
12987 Verify(tiRetVal.IsNumberType(), "Bad arg");
12990 op1 = impPopStack().val;
12991 impBashVarAddrsToI(op1, nullptr);
12992 impPushOnStack(gtNewOperNode(GT_NEG, genActualType(op1->gtType), op1), tiRetVal);
12997 /* Pull the top value from the stack */
12999 StackEntry se = impPopStack();
13000 clsHnd = se.seTypeInfo.GetClassHandle();
13003 /* Get hold of the type of the value being duplicated */
13005 lclTyp = genActualType(op1->gtType);
13007 /* Does the value have any side effects? */
13009 if ((op1->gtFlags & GTF_SIDE_EFFECT) || opts.compDbgCode)
13011 // Since we are throwing away the value, just normalize
13012 // it to its address. This is more efficient.
13014 if (varTypeIsStruct(op1))
13016 JITDUMP("\n ... CEE_POP struct ...\n");
13018 #ifdef UNIX_AMD64_ABI
13019 // Non-calls, such as obj or ret_expr, have to go through this.
13020 // Calls with large struct return value have to go through this.
13021 // Helper calls with small struct return value also have to go
13022 // through this since they do not follow Unix calling convention.
13023 if (op1->gtOper != GT_CALL || !IsMultiRegReturnedType(clsHnd) ||
13024 op1->AsCall()->gtCallType == CT_HELPER)
13025 #endif // UNIX_AMD64_ABI
13027 // If the value being produced comes from loading
13028 // via an underlying address, just null check the address.
13029 if (op1->OperIs(GT_FIELD, GT_IND, GT_OBJ))
13031 op1->ChangeOper(GT_NULLCHECK);
13032 op1->gtType = TYP_BYTE;
13036 op1 = impGetStructAddr(op1, clsHnd, (unsigned)CHECK_SPILL_ALL, false);
13039 JITDUMP("\n ... optimized to ...\n");
13044 // If op1 is non-overflow cast, throw it away since it is useless.
13045 // Another reason for throwing away the useless cast is in the context of
13046 // implicit tail calls when the operand of pop is GT_CAST(GT_CALL(..)).
13047 // The cast gets added as part of importing GT_CALL, which gets in the way
13048 // of fgMorphCall() on the forms of tail call nodes that we assert.
13049 if ((op1->gtOper == GT_CAST) && !op1->gtOverflow())
13051 op1 = op1->gtOp.gtOp1;
13054 // If 'op1' is an expression, create an assignment node.
13055 // Helps analyses (like CSE) to work fine.
13057 if (op1->gtOper != GT_CALL)
13059 op1 = gtUnusedValNode(op1);
13062 /* Append the value to the tree list */
13066 /* No side effects - just throw the <BEEP> thing away */
13072 if (tiVerificationNeeded)
13074 // Dup could start the begining of delegate creation sequence, remember that
13075 delegateCreateStart = codeAddr - 1;
13079 // If the expression to dup is simple, just clone it.
13080 // Otherwise spill it to a temp, and reload the temp
13082 StackEntry se = impPopStack();
13083 GenTree* tree = se.val;
13084 tiRetVal = se.seTypeInfo;
13087 if (!opts.compDbgCode && !op1->IsIntegralConst(0) && !op1->IsFPZero() && !op1->IsLocal())
13089 const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("dup spill"));
13090 impAssignTempGen(tmpNum, op1, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL);
13091 var_types type = genActualType(lvaTable[tmpNum].TypeGet());
13092 op1 = gtNewLclvNode(tmpNum, type);
13094 // Propagate type info to the temp from the stack and the original tree
13095 if (type == TYP_REF)
13097 assert(lvaTable[tmpNum].lvSingleDef == 0);
13098 lvaTable[tmpNum].lvSingleDef = 1;
13099 JITDUMP("Marked V%02u as a single def local\n", tmpNum);
13100 lvaSetClass(tmpNum, tree, tiRetVal.GetClassHandle());
13104 op1 = impCloneExpr(op1, &op2, tiRetVal.GetClassHandle(), (unsigned)CHECK_SPILL_ALL,
13105 nullptr DEBUGARG("DUP instruction"));
13107 assert(!(op1->gtFlags & GTF_GLOB_EFFECT) && !(op2->gtFlags & GTF_GLOB_EFFECT));
13108 impPushOnStack(op1, tiRetVal);
13109 impPushOnStack(op2, tiRetVal);
13117 lclTyp = TYP_SHORT;
13126 lclTyp = TYP_I_IMPL;
13128 case CEE_STIND_REF:
13132 lclTyp = TYP_FLOAT;
13135 lclTyp = TYP_DOUBLE;
13139 if (tiVerificationNeeded)
13141 typeInfo instrType(lclTyp);
13142 #ifdef _TARGET_64BIT_
13143 if (opcode == CEE_STIND_I)
13145 instrType = typeInfo::nativeInt();
13147 #endif // _TARGET_64BIT_
13148 verVerifySTIND(impStackTop(1).seTypeInfo, impStackTop(0).seTypeInfo, instrType);
13152 compUnsafeCastUsed = true; // Have to go conservative
13157 op2 = impPopStack().val; // value to store
13158 op1 = impPopStack().val; // address to store to
13160 // you can indirect off of a TYP_I_IMPL (if we are in C) or a BYREF
13161 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
13163 impBashVarAddrsToI(op1, op2);
13165 op2 = impImplicitR4orR8Cast(op2, lclTyp);
13167 #ifdef _TARGET_64BIT_
13168 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
13169 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
13171 op2->gtType = TYP_I_IMPL;
13175 // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
13177 if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
13179 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
13180 op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
13182 // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13184 if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
13186 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
13187 op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
13190 #endif // _TARGET_64BIT_
13192 if (opcode == CEE_STIND_REF)
13194 // STIND_REF can be used to store TYP_INT, TYP_I_IMPL, TYP_REF, or TYP_BYREF
13195 assertImp(varTypeIsIntOrI(op2->gtType) || varTypeIsGC(op2->gtType));
13196 lclTyp = genActualType(op2->TypeGet());
13199 // Check target type.
13201 if (op2->gtType == TYP_BYREF || lclTyp == TYP_BYREF)
13203 if (op2->gtType == TYP_BYREF)
13205 assertImp(lclTyp == TYP_BYREF || lclTyp == TYP_I_IMPL);
13207 else if (lclTyp == TYP_BYREF)
13209 assertImp(op2->gtType == TYP_BYREF || varTypeIsIntOrI(op2->gtType));
13214 assertImp(genActualType(op2->gtType) == genActualType(lclTyp) ||
13215 ((lclTyp == TYP_I_IMPL) && (genActualType(op2->gtType) == TYP_INT)) ||
13216 (varTypeIsFloating(op2->gtType) && varTypeIsFloating(lclTyp)));
13220 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
13222 // stind could point anywhere, example a boxed class static int
13223 op1->gtFlags |= GTF_IND_TGTANYWHERE;
13225 if (prefixFlags & PREFIX_VOLATILE)
13227 assert(op1->OperGet() == GT_IND);
13228 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
13229 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13230 op1->gtFlags |= GTF_IND_VOLATILE;
13233 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
13235 assert(op1->OperGet() == GT_IND);
13236 op1->gtFlags |= GTF_IND_UNALIGNED;
13239 op1 = gtNewAssignNode(op1, op2);
13240 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
13242 // Spill side-effects AND global-data-accesses
13243 if (verCurrentState.esStackDepth > 0)
13245 impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STIND"));
13254 lclTyp = TYP_SHORT;
13263 case CEE_LDIND_REF:
13267 lclTyp = TYP_I_IMPL;
13270 lclTyp = TYP_FLOAT;
13273 lclTyp = TYP_DOUBLE;
13276 lclTyp = TYP_UBYTE;
13279 lclTyp = TYP_USHORT;
13283 if (tiVerificationNeeded)
13285 typeInfo lclTiType(lclTyp);
13286 #ifdef _TARGET_64BIT_
13287 if (opcode == CEE_LDIND_I)
13289 lclTiType = typeInfo::nativeInt();
13291 #endif // _TARGET_64BIT_
13292 tiRetVal = verVerifyLDIND(impStackTop().seTypeInfo, lclTiType);
13293 tiRetVal.NormaliseForStack();
13297 compUnsafeCastUsed = true; // Have to go conservative
13302 op1 = impPopStack().val; // address to load from
13303 impBashVarAddrsToI(op1);
13305 #ifdef _TARGET_64BIT_
13306 // Allow an upcast of op1 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
13308 if (genActualType(op1->gtType) == TYP_INT)
13310 assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
13311 op1 = gtNewCastNode(TYP_I_IMPL, op1, false, TYP_I_IMPL);
13315 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
13317 op1 = gtNewOperNode(GT_IND, lclTyp, op1);
13319 // ldind could point anywhere, example a boxed class static int
13320 op1->gtFlags |= (GTF_EXCEPT | GTF_GLOB_REF | GTF_IND_TGTANYWHERE);
13322 if (prefixFlags & PREFIX_VOLATILE)
13324 assert(op1->OperGet() == GT_IND);
13325 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
13326 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
13327 op1->gtFlags |= GTF_IND_VOLATILE;
13330 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
13332 assert(op1->OperGet() == GT_IND);
13333 op1->gtFlags |= GTF_IND_UNALIGNED;
13336 impPushOnStack(op1, tiRetVal);
13340 case CEE_UNALIGNED:
13343 val = getU1LittleEndian(codeAddr);
13345 JITDUMP(" %u", val);
13346 if ((val != 1) && (val != 2) && (val != 4))
13348 BADCODE("Alignment unaligned. must be 1, 2, or 4");
13351 Verify(!(prefixFlags & PREFIX_UNALIGNED), "Multiple unaligned. prefixes");
13352 prefixFlags |= PREFIX_UNALIGNED;
13354 impValidateMemoryAccessOpcode(codeAddr, codeEndp, false);
13357 opcode = (OPCODE)getU1LittleEndian(codeAddr);
13358 opcodeOffs = (IL_OFFSET)(codeAddr - info.compCode);
13359 codeAddr += sizeof(__int8);
13360 goto DECODE_OPCODE;
13364 Verify(!(prefixFlags & PREFIX_VOLATILE), "Multiple volatile. prefixes");
13365 prefixFlags |= PREFIX_VOLATILE;
13367 impValidateMemoryAccessOpcode(codeAddr, codeEndp, true);
13374 // Need to do a lookup here so that we perform an access check
13375 // and do a NOWAY if protections are violated
13376 _impResolveToken(CORINFO_TOKENKIND_Method);
13378 JITDUMP(" %08X", resolvedToken.token);
13380 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
13381 addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN)),
13384 // This check really only applies to intrinsic Array.Address methods
13385 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
13387 NO_WAY("Currently do not support LDFTN of Parameterized functions");
13390 // Do this before DO_LDFTN since CEE_LDVIRTFN does it on its own.
13391 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13393 if (tiVerificationNeeded)
13395 // LDFTN could start the begining of delegate creation sequence, remember that
13396 delegateCreateStart = codeAddr - 2;
13398 // check any constraints on the callee's class and type parameters
13399 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
13400 "method has unsatisfied class constraints");
13401 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
13402 resolvedToken.hMethod),
13403 "method has unsatisfied method constraints");
13405 mflags = callInfo.verMethodFlags;
13406 Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDFTN on a constructor");
13410 op1 = impMethodPointer(&resolvedToken, &callInfo);
13412 if (compDonotInline())
13417 // Call info may have more precise information about the function than
13418 // the resolved token.
13419 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
13420 assert(callInfo.hMethod != nullptr);
13421 heapToken->hMethod = callInfo.hMethod;
13422 impPushOnStack(op1, typeInfo(heapToken));
13427 case CEE_LDVIRTFTN:
13429 /* Get the method token */
13431 _impResolveToken(CORINFO_TOKENKIND_Method);
13433 JITDUMP(" %08X", resolvedToken.token);
13435 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef */,
13436 addVerifyFlag(combine(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_LDFTN),
13437 CORINFO_CALLINFO_CALLVIRT)),
13440 // This check really only applies to intrinsic Array.Address methods
13441 if (callInfo.sig.callConv & CORINFO_CALLCONV_PARAMTYPE)
13443 NO_WAY("Currently do not support LDFTN of Parameterized functions");
13446 mflags = callInfo.methodFlags;
13448 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13450 if (compIsForInlining())
13452 if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
13454 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDVIRTFN_ON_NON_VIRTUAL);
13459 CORINFO_SIG_INFO& ftnSig = callInfo.sig;
13461 if (tiVerificationNeeded)
13464 Verify(ftnSig.hasThis(), "ldvirtftn on a static method");
13465 Verify(!(mflags & CORINFO_FLG_CONSTRUCTOR), "LDVIRTFTN on a constructor");
13467 // JIT32 verifier rejects verifiable ldvirtftn pattern
13468 typeInfo declType =
13469 verMakeTypeInfo(resolvedToken.hClass, true); // Change TI_STRUCT to TI_REF when necessary
13471 typeInfo arg = impStackTop().seTypeInfo;
13472 Verify((arg.IsType(TI_REF) || arg.IsType(TI_NULL)) && tiCompatibleWith(arg, declType, true),
13475 CORINFO_CLASS_HANDLE instanceClassHnd = info.compClassHnd;
13476 if (!(arg.IsType(TI_NULL) || (mflags & CORINFO_FLG_STATIC)))
13478 instanceClassHnd = arg.GetClassHandleForObjRef();
13481 // check any constraints on the method's class and type parameters
13482 VerifyOrReturn(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
13483 "method has unsatisfied class constraints");
13484 VerifyOrReturn(info.compCompHnd->satisfiesMethodConstraints(resolvedToken.hClass,
13485 resolvedToken.hMethod),
13486 "method has unsatisfied method constraints");
13488 if (mflags & CORINFO_FLG_PROTECTED)
13490 Verify(info.compCompHnd->canAccessFamily(info.compMethodHnd, instanceClassHnd),
13491 "Accessing protected method through wrong type.");
13495 /* Get the object-ref */
13496 op1 = impPopStack().val;
13497 assertImp(op1->gtType == TYP_REF);
13499 if (opts.IsReadyToRun())
13501 if (callInfo.kind != CORINFO_VIRTUALCALL_LDVIRTFTN)
13503 if (op1->gtFlags & GTF_SIDE_EFFECT)
13505 op1 = gtUnusedValNode(op1);
13506 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13511 else if (mflags & (CORINFO_FLG_FINAL | CORINFO_FLG_STATIC) || !(mflags & CORINFO_FLG_VIRTUAL))
13513 if (op1->gtFlags & GTF_SIDE_EFFECT)
13515 op1 = gtUnusedValNode(op1);
13516 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
13521 GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
13522 if (compDonotInline())
13527 CORINFO_RESOLVED_TOKEN* heapToken = impAllocateToken(resolvedToken);
13529 assert(heapToken->tokenType == CORINFO_TOKENKIND_Method);
13530 assert(callInfo.hMethod != nullptr);
13532 heapToken->tokenType = CORINFO_TOKENKIND_Ldvirtftn;
13533 heapToken->hMethod = callInfo.hMethod;
13534 impPushOnStack(fptr, typeInfo(heapToken));
13539 case CEE_CONSTRAINED:
13541 assertImp(sz == sizeof(unsigned));
13542 impResolveToken(codeAddr, &constrainedResolvedToken, CORINFO_TOKENKIND_Constrained);
13543 codeAddr += sizeof(unsigned); // prefix instructions must increment codeAddr manually
13544 JITDUMP(" (%08X) ", constrainedResolvedToken.token);
13546 Verify(!(prefixFlags & PREFIX_CONSTRAINED), "Multiple constrained. prefixes");
13547 prefixFlags |= PREFIX_CONSTRAINED;
13550 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13551 if (actualOpcode != CEE_CALLVIRT)
13553 BADCODE("constrained. has to be followed by callvirt");
13560 JITDUMP(" readonly.");
13562 Verify(!(prefixFlags & PREFIX_READONLY), "Multiple readonly. prefixes");
13563 prefixFlags |= PREFIX_READONLY;
13566 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13567 if (actualOpcode != CEE_LDELEMA && !impOpcodeIsCallOpcode(actualOpcode))
13569 BADCODE("readonly. has to be followed by ldelema or call");
13579 Verify(!(prefixFlags & PREFIX_TAILCALL_EXPLICIT), "Multiple tailcall. prefixes");
13580 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13583 OPCODE actualOpcode = impGetNonPrefixOpcode(codeAddr, codeEndp);
13584 if (!impOpcodeIsCallOpcode(actualOpcode))
13586 BADCODE("tailcall. has to be followed by call, callvirt or calli");
13594 /* Since we will implicitly insert newObjThisPtr at the start of the
13595 argument list, spill any GTF_ORDER_SIDEEFF */
13596 impSpillSpecialSideEff();
13598 /* NEWOBJ does not respond to TAIL */
13599 prefixFlags &= ~PREFIX_TAILCALL_EXPLICIT;
13601 /* NEWOBJ does not respond to CONSTRAINED */
13602 prefixFlags &= ~PREFIX_CONSTRAINED;
13604 _impResolveToken(CORINFO_TOKENKIND_NewObj);
13606 eeGetCallInfo(&resolvedToken, nullptr /* constraint typeRef*/,
13607 addVerifyFlag(combine(CORINFO_CALLINFO_SECURITYCHECKS, CORINFO_CALLINFO_ALLOWINSTPARAM)),
13610 if (compIsForInlining())
13612 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13614 // Check to see if this call violates the boundary.
13615 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_SECURITY);
13620 mflags = callInfo.methodFlags;
13622 if ((mflags & (CORINFO_FLG_STATIC | CORINFO_FLG_ABSTRACT)) != 0)
13624 BADCODE("newobj on static or abstract method");
13627 // Insert the security callout before any actual code is generated
13628 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13630 // There are three different cases for new
13631 // Object size is variable (depends on arguments)
13632 // 1) Object is an array (arrays treated specially by the EE)
13633 // 2) Object is some other variable sized object (e.g. String)
13634 // 3) Class Size can be determined beforehand (normal case)
13635 // In the first case, we need to call a NEWOBJ helper (multinewarray)
13636 // in the second case we call the constructor with a '0' this pointer
13637 // In the third case we alloc the memory, then call the constuctor
13639 clsFlags = callInfo.classFlags;
13640 if (clsFlags & CORINFO_FLG_ARRAY)
13642 if (tiVerificationNeeded)
13644 CORINFO_CLASS_HANDLE elemTypeHnd;
13645 INDEBUG(CorInfoType corType =)
13646 info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
13647 assert(!(elemTypeHnd == nullptr && corType == CORINFO_TYPE_VALUECLASS));
13648 Verify(elemTypeHnd == nullptr ||
13649 !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
13650 "newarr of byref-like objects");
13651 verVerifyCall(opcode, &resolvedToken, nullptr, ((prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0),
13652 ((prefixFlags & PREFIX_READONLY) != 0), delegateCreateStart, codeAddr - 1,
13653 &callInfo DEBUGARG(info.compFullName));
13655 // Arrays need to call the NEWOBJ helper.
13656 assertImp(clsFlags & CORINFO_FLG_VAROBJSIZE);
13658 impImportNewObjArray(&resolvedToken, &callInfo);
13659 if (compDonotInline())
13667 // At present this can only be String
13668 else if (clsFlags & CORINFO_FLG_VAROBJSIZE)
13670 if (IsTargetAbi(CORINFO_CORERT_ABI))
13672 // The dummy argument does not exist in CoreRT
13673 newObjThisPtr = nullptr;
13677 // This is the case for variable-sized objects that are not
13678 // arrays. In this case, call the constructor with a null 'this'
13680 newObjThisPtr = gtNewIconNode(0, TYP_REF);
13683 /* Remember that this basic block contains 'new' of an object */
13684 block->bbFlags |= BBF_HAS_NEWOBJ;
13685 optMethodFlags |= OMF_HAS_NEWOBJ;
13689 // This is the normal case where the size of the object is
13690 // fixed. Allocate the memory and call the constructor.
13692 // Note: We cannot add a peep to avoid use of temp here
13693 // becase we don't have enough interference info to detect when
13694 // sources and destination interfere, example: s = new S(ref);
13696 // TODO: We find the correct place to introduce a general
13697 // reverse copy prop for struct return values from newobj or
13698 // any function returning structs.
13700 /* get a temporary for the new object */
13701 lclNum = lvaGrabTemp(true DEBUGARG("NewObj constructor temp"));
13702 if (compDonotInline())
13704 // Fail fast if lvaGrabTemp fails with CALLSITE_TOO_MANY_LOCALS.
13705 assert(compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS);
13709 // In the value class case we only need clsHnd for size calcs.
13711 // The lookup of the code pointer will be handled by CALL in this case
13712 if (clsFlags & CORINFO_FLG_VALUECLASS)
13714 if (compIsForInlining())
13716 // If value class has GC fields, inform the inliner. It may choose to
13717 // bail out on the inline.
13718 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
13719 if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
13721 compInlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
13722 if (compInlineResult->IsFailure())
13727 // Do further notification in the case where the call site is rare;
13728 // some policies do not track the relative hotness of call sites for
13729 // "always" inline cases.
13730 if (impInlineInfo->iciBlock->isRunRarely())
13732 compInlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
13733 if (compInlineResult->IsFailure())
13741 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
13742 unsigned size = info.compCompHnd->getClassSize(resolvedToken.hClass);
13744 if (impIsPrimitive(jitTyp))
13746 lvaTable[lclNum].lvType = JITtype2varType(jitTyp);
13750 // The local variable itself is the allocated space.
13751 // Here we need unsafe value cls check, since the address of struct is taken for further use
13752 // and potentially exploitable.
13753 lvaSetStruct(lclNum, resolvedToken.hClass, true /* unsafe value cls check */);
13755 if (compIsForInlining() || fgStructTempNeedsExplicitZeroInit(lvaTable + lclNum, block))
13757 // Append a tree to zero-out the temp
13758 newObjThisPtr = gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet());
13760 newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest
13761 gtNewIconNode(0), // Value
13763 false, // isVolatile
13764 false); // not copyBlock
13765 impAppendTree(newObjThisPtr, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
13768 // Obtain the address of the temp
13770 gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewLclvNode(lclNum, lvaTable[lclNum].TypeGet()));
13774 #ifdef FEATURE_READYTORUN_COMPILER
13775 if (opts.IsReadyToRun())
13777 op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEW, TYP_REF);
13778 usingReadyToRunHelper = (op1 != nullptr);
13781 if (!usingReadyToRunHelper)
13784 op1 = impParentClassTokenToHandle(&resolvedToken, nullptr, TRUE);
13785 if (op1 == nullptr)
13786 { // compDonotInline()
13790 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
13791 // and the newfast call with a single call to a dynamic R2R cell that will:
13792 // 1) Load the context
13793 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate
13795 // 3) Allocate and return the new object
13796 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
13798 op1 = gtNewAllocObjNode(info.compCompHnd->getNewHelper(&resolvedToken, info.compMethodHnd),
13799 resolvedToken.hClass, TYP_REF, op1);
13802 // Remember that this basic block contains 'new' of an object
13803 block->bbFlags |= BBF_HAS_NEWOBJ;
13804 optMethodFlags |= OMF_HAS_NEWOBJ;
13806 // Append the assignment to the temp/local. Dont need to spill
13807 // at all as we are just calling an EE-Jit helper which can only
13808 // cause an (async) OutOfMemoryException.
13810 // We assign the newly allocated object (by a GT_ALLOCOBJ node)
13811 // to a temp. Note that the pattern "temp = allocObj" is required
13812 // by ObjectAllocator phase to be able to determine GT_ALLOCOBJ nodes
13813 // without exhaustive walk over all expressions.
13815 impAssignTempGen(lclNum, op1, (unsigned)CHECK_SPILL_NONE);
13817 assert(lvaTable[lclNum].lvSingleDef == 0);
13818 lvaTable[lclNum].lvSingleDef = 1;
13819 JITDUMP("Marked V%02u as a single def local\n", lclNum);
13820 lvaSetClass(lclNum, resolvedToken.hClass, true /* is Exact */);
13822 newObjThisPtr = gtNewLclvNode(lclNum, TYP_REF);
13829 /* CALLI does not respond to CONSTRAINED */
13830 prefixFlags &= ~PREFIX_CONSTRAINED;
13832 if (compIsForInlining())
13834 // CALLI doesn't have a method handle, so assume the worst.
13835 if (impInlineInfo->inlineCandidateInfo->dwRestrictions & INLINE_RESPECT_BOUNDARY)
13837 compInlineResult->NoteFatal(InlineObservation::CALLSITE_CROSS_BOUNDARY_CALLI);
13847 // We can't call getCallInfo on the token from a CALLI, but we need it in
13848 // many other places. We unfortunately embed that knowledge here.
13849 if (opcode != CEE_CALLI)
13851 _impResolveToken(CORINFO_TOKENKIND_Method);
13853 eeGetCallInfo(&resolvedToken,
13854 (prefixFlags & PREFIX_CONSTRAINED) ? &constrainedResolvedToken : nullptr,
13855 // this is how impImportCall invokes getCallInfo
13857 combine(combine(CORINFO_CALLINFO_ALLOWINSTPARAM, CORINFO_CALLINFO_SECURITYCHECKS),
13858 (opcode == CEE_CALLVIRT) ? CORINFO_CALLINFO_CALLVIRT
13859 : CORINFO_CALLINFO_NONE)),
13864 // Suppress uninitialized use warning.
13865 memset(&resolvedToken, 0, sizeof(resolvedToken));
13866 memset(&callInfo, 0, sizeof(callInfo));
13868 resolvedToken.token = getU4LittleEndian(codeAddr);
13869 resolvedToken.tokenContext = impTokenLookupContextHandle;
13870 resolvedToken.tokenScope = info.compScopeHnd;
13873 CALL: // memberRef should be set.
13874 // newObjThisPtr should be set for CEE_NEWOBJ
13876 JITDUMP(" %08X", resolvedToken.token);
13877 constraintCall = (prefixFlags & PREFIX_CONSTRAINED) != 0;
13879 bool newBBcreatedForTailcallStress;
13881 newBBcreatedForTailcallStress = false;
13883 if (compIsForInlining())
13885 if (compDonotInline())
13889 // We rule out inlinees with explicit tail calls in fgMakeBasicBlocks.
13890 assert((prefixFlags & PREFIX_TAILCALL_EXPLICIT) == 0);
13894 if (compTailCallStress())
13896 // Have we created a new BB after the "call" instruction in fgMakeBasicBlocks()?
13897 // Tail call stress only recognizes call+ret patterns and forces them to be
13898 // explicit tail prefixed calls. Also fgMakeBasicBlocks() under tail call stress
13899 // doesn't import 'ret' opcode following the call into the basic block containing
13900 // the call instead imports it to a new basic block. Note that fgMakeBasicBlocks()
13901 // is already checking that there is an opcode following call and hence it is
13902 // safe here to read next opcode without bounds check.
13903 newBBcreatedForTailcallStress =
13904 impOpcodeIsCallOpcode(opcode) && // Current opcode is a CALL, (not a CEE_NEWOBJ). So, don't
13905 // make it jump to RET.
13906 (OPCODE)getU1LittleEndian(codeAddr + sz) == CEE_RET; // Next opcode is a CEE_RET
13908 bool hasTailPrefix = (prefixFlags & PREFIX_TAILCALL_EXPLICIT);
13909 if (newBBcreatedForTailcallStress && !hasTailPrefix && // User hasn't set "tail." prefix yet.
13910 verCheckTailCallConstraint(opcode, &resolvedToken,
13911 constraintCall ? &constrainedResolvedToken : nullptr,
13912 true) // Is it legal to do tailcall?
13915 CORINFO_METHOD_HANDLE declaredCalleeHnd = callInfo.hMethod;
13916 bool isVirtual = (callInfo.kind == CORINFO_VIRTUALCALL_STUB) ||
13917 (callInfo.kind == CORINFO_VIRTUALCALL_VTABLE);
13918 CORINFO_METHOD_HANDLE exactCalleeHnd = isVirtual ? nullptr : declaredCalleeHnd;
13919 if (info.compCompHnd->canTailCall(info.compMethodHnd, declaredCalleeHnd, exactCalleeHnd,
13920 hasTailPrefix)) // Is it legal to do tailcall?
13922 // Stress the tailcall.
13923 JITDUMP(" (Tailcall stress: prefixFlags |= PREFIX_TAILCALL_EXPLICIT)");
13924 prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
13930 // This is split up to avoid goto flow warnings.
13932 isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd);
13934 // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
13935 // hence will not be considered for implicit tail calling.
13936 if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
13938 if (compIsForInlining())
13940 #if FEATURE_TAILCALL_OPT_SHARED_RETURN
13941 // Are we inlining at an implicit tail call site? If so the we can flag
13942 // implicit tail call sites in the inline body. These call sites
13943 // often end up in non BBJ_RETURN blocks, so only flag them when
13944 // we're able to handle shared returns.
13945 if (impInlineInfo->iciCall->IsImplicitTailCall())
13947 JITDUMP(" (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13948 prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13950 #endif // FEATURE_TAILCALL_OPT_SHARED_RETURN
13954 JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
13955 prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
13959 // Treat this call as tail call for verification only if "tail" prefixed (i.e. explicit tail call).
13960 explicitTailCall = (prefixFlags & PREFIX_TAILCALL_EXPLICIT) != 0;
13961 readonlyCall = (prefixFlags & PREFIX_READONLY) != 0;
13963 if (opcode != CEE_CALLI && opcode != CEE_NEWOBJ)
13965 // All calls and delegates need a security callout.
13966 // For delegates, this is the call to the delegate constructor, not the access check on the
13968 impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
13971 if (tiVerificationNeeded)
13973 verVerifyCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13974 explicitTailCall, readonlyCall, delegateCreateStart, codeAddr - 1,
13975 &callInfo DEBUGARG(info.compFullName));
13978 callTyp = impImportCall(opcode, &resolvedToken, constraintCall ? &constrainedResolvedToken : nullptr,
13979 newObjThisPtr, prefixFlags, &callInfo, opcodeOffs);
13980 if (compDonotInline())
13982 // We do not check fails after lvaGrabTemp. It is covered with CoreCLR_13272 issue.
13983 assert((callTyp == TYP_UNDEF) ||
13984 (compInlineResult->GetObservation() == InlineObservation::CALLSITE_TOO_MANY_LOCALS));
13988 if (explicitTailCall || newBBcreatedForTailcallStress) // If newBBcreatedForTailcallStress is true, we
13989 // have created a new BB after the "call"
13990 // instruction in fgMakeBasicBlocks(). So we need to jump to RET regardless.
13992 assert(!compIsForInlining());
14004 BOOL isLoadAddress = (opcode == CEE_LDFLDA || opcode == CEE_LDSFLDA);
14005 BOOL isLoadStatic = (opcode == CEE_LDSFLD || opcode == CEE_LDSFLDA);
14007 /* Get the CP_Fieldref index */
14008 assertImp(sz == sizeof(unsigned));
14010 _impResolveToken(CORINFO_TOKENKIND_Field);
14012 JITDUMP(" %08X", resolvedToken.token);
14014 int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
14016 GenTree* obj = nullptr;
14017 typeInfo* tiObj = nullptr;
14018 CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
14020 if (opcode == CEE_LDFLD || opcode == CEE_LDFLDA)
14022 tiObj = &impStackTop().seTypeInfo;
14023 StackEntry se = impPopStack();
14024 objType = se.seTypeInfo.GetClassHandle();
14027 if (impIsThis(obj))
14029 aflags |= CORINFO_ACCESS_THIS;
14031 // An optimization for Contextful classes:
14032 // we unwrap the proxy when we have a 'this reference'
14034 if (info.compUnwrapContextful)
14036 aflags |= CORINFO_ACCESS_UNWRAP;
14041 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
14043 // Figure out the type of the member. We always call canAccessField, so you always need this
14045 CorInfoType ciType = fieldInfo.fieldType;
14046 clsHnd = fieldInfo.structType;
14048 lclTyp = JITtype2varType(ciType);
14050 #ifdef _TARGET_AMD64
14051 noway_assert(varTypeIsIntegralOrI(lclTyp) || varTypeIsFloating(lclTyp) || lclTyp == TYP_STRUCT);
14052 #endif // _TARGET_AMD64
14054 if (compIsForInlining())
14056 switch (fieldInfo.fieldAccessor)
14058 case CORINFO_FIELD_INSTANCE_HELPER:
14059 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14060 case CORINFO_FIELD_STATIC_ADDR_HELPER:
14061 case CORINFO_FIELD_STATIC_TLS:
14063 compInlineResult->NoteFatal(InlineObservation::CALLEE_LDFLD_NEEDS_HELPER);
14066 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14067 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14068 /* We may be able to inline the field accessors in specific instantiations of generic
14070 compInlineResult->NoteFatal(InlineObservation::CALLSITE_LDFLD_NEEDS_HELPER);
14077 if (!isLoadAddress && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && lclTyp == TYP_STRUCT &&
14080 if ((info.compCompHnd->getTypeForPrimitiveValueClass(clsHnd) == CORINFO_TYPE_UNDEF) &&
14081 !(info.compFlags & CORINFO_FLG_FORCEINLINE))
14083 // Loading a static valuetype field usually will cause a JitHelper to be called
14084 // for the static base. This will bloat the code.
14085 compInlineResult->Note(InlineObservation::CALLEE_LDFLD_STATIC_VALUECLASS);
14087 if (compInlineResult->IsFailure())
14095 tiRetVal = verMakeTypeInfo(ciType, clsHnd);
14098 tiRetVal.MakeByRef();
14102 tiRetVal.NormaliseForStack();
14105 // Perform this check always to ensure that we get field access exceptions even with
14106 // SkipVerification.
14107 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
14109 if (tiVerificationNeeded)
14111 // You can also pass the unboxed struct to LDFLD
14112 BOOL bAllowPlainValueTypeAsThis = FALSE;
14113 if (opcode == CEE_LDFLD && impIsValueType(tiObj))
14115 bAllowPlainValueTypeAsThis = TRUE;
14118 verVerifyField(&resolvedToken, fieldInfo, tiObj, isLoadAddress, bAllowPlainValueTypeAsThis);
14120 // If we're doing this on a heap object or from a 'safe' byref
14121 // then the result is a safe byref too
14122 if (isLoadAddress) // load address
14124 if (fieldInfo.fieldFlags &
14125 CORINFO_FLG_FIELD_STATIC) // statics marked as safe will have permanent home
14127 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_SAFESTATIC_BYREF_RETURN)
14129 tiRetVal.SetIsPermanentHomeByRef();
14132 else if (tiObj->IsObjRef() || tiObj->IsPermanentHomeByRef())
14134 // ldflda of byref is safe if done on a gc object or on a
14136 tiRetVal.SetIsPermanentHomeByRef();
14142 // tiVerificationNeeded is false.
14143 // Raise InvalidProgramException if static load accesses non-static field
14144 if (isLoadStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
14146 BADCODE("static access on an instance field");
14150 // We are using ldfld/a on a static field. We allow it, but need to get side-effect from obj.
14151 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
14153 if (obj->gtFlags & GTF_SIDE_EFFECT)
14155 obj = gtUnusedValNode(obj);
14156 impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14161 /* Preserve 'small' int types */
14162 if (!varTypeIsSmall(lclTyp))
14164 lclTyp = genActualType(lclTyp);
14167 bool usesHelper = false;
14169 switch (fieldInfo.fieldAccessor)
14171 case CORINFO_FIELD_INSTANCE:
14172 #ifdef FEATURE_READYTORUN_COMPILER
14173 case CORINFO_FIELD_INSTANCE_WITH_BASE:
14176 obj = impCheckForNullPointer(obj);
14178 // If the object is a struct, what we really want is
14179 // for the field to operate on the address of the struct.
14180 if (!varTypeGCtype(obj->TypeGet()) && impIsValueType(tiObj))
14182 assert(opcode == CEE_LDFLD && objType != nullptr);
14184 obj = impGetStructAddr(obj, objType, (unsigned)CHECK_SPILL_ALL, true);
14187 /* Create the data member node */
14188 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
14190 #ifdef FEATURE_READYTORUN_COMPILER
14191 if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
14193 op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
14197 op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
14199 if (fgAddrCouldBeNull(obj))
14201 op1->gtFlags |= GTF_EXCEPT;
14204 // If gtFldObj is a BYREF then our target is a value class and
14205 // it could point anywhere, example a boxed class static int
14206 if (obj->gtType == TYP_BYREF)
14208 op1->gtFlags |= GTF_IND_TGTANYWHERE;
14211 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14212 if (StructHasOverlappingFields(typeFlags))
14214 op1->gtField.gtFldMayOverlap = true;
14217 // wrap it in a address of operator if necessary
14220 op1 = gtNewOperNode(GT_ADDR,
14221 (var_types)(varTypeIsGC(obj->TypeGet()) ? TYP_BYREF : TYP_I_IMPL), op1);
14225 if (compIsForInlining() &&
14226 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(nullptr, obj,
14227 impInlineInfo->inlArgInfo))
14229 impInlineInfo->thisDereferencedFirst = true;
14235 case CORINFO_FIELD_STATIC_TLS:
14236 #ifdef _TARGET_X86_
14237 // Legacy TLS access is implemented as intrinsic on x86 only
14239 /* Create the data member node */
14240 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
14241 op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
14245 op1 = gtNewOperNode(GT_ADDR, (var_types)TYP_I_IMPL, op1);
14249 fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
14254 case CORINFO_FIELD_STATIC_ADDR_HELPER:
14255 case CORINFO_FIELD_INSTANCE_HELPER:
14256 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14257 op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
14262 case CORINFO_FIELD_STATIC_ADDRESS:
14263 // Replace static read-only fields with constant if possible
14264 if ((aflags & CORINFO_ACCESS_GET) && (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_FINAL) &&
14265 !(fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) &&
14266 (varTypeIsIntegral(lclTyp) || varTypeIsFloating(lclTyp)))
14268 CorInfoInitClassResult initClassResult =
14269 info.compCompHnd->initClass(resolvedToken.hField, info.compMethodHnd,
14270 impTokenLookupContextHandle);
14272 if (initClassResult & CORINFO_INITCLASS_INITIALIZED)
14274 void** pFldAddr = nullptr;
14276 info.compCompHnd->getFieldAddress(resolvedToken.hField, (void**)&pFldAddr);
14278 // We should always be able to access this static's address directly
14279 assert(pFldAddr == nullptr);
14281 op1 = impImportStaticReadOnlyField(fldAddr, lclTyp);
14288 case CORINFO_FIELD_STATIC_RVA_ADDRESS:
14289 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
14290 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14291 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14292 op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
14296 case CORINFO_FIELD_INTRINSIC_ZERO:
14298 assert(aflags & CORINFO_ACCESS_GET);
14299 op1 = gtNewIconNode(0, lclTyp);
14304 case CORINFO_FIELD_INTRINSIC_EMPTY_STRING:
14306 assert(aflags & CORINFO_ACCESS_GET);
14309 InfoAccessType iat = info.compCompHnd->emptyStringLiteral(&pValue);
14310 op1 = gtNewStringLiteralNode(iat, pValue);
14315 case CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN:
14317 assert(aflags & CORINFO_ACCESS_GET);
14319 op1 = gtNewIconNode(0, lclTyp);
14321 op1 = gtNewIconNode(1, lclTyp);
14328 assert(!"Unexpected fieldAccessor");
14331 if (!isLoadAddress)
14334 if (prefixFlags & PREFIX_VOLATILE)
14336 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
14337 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
14341 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
14342 (op1->OperGet() == GT_OBJ));
14343 op1->gtFlags |= GTF_IND_VOLATILE;
14347 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
14351 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND) ||
14352 (op1->OperGet() == GT_OBJ));
14353 op1->gtFlags |= GTF_IND_UNALIGNED;
14358 /* Check if the class needs explicit initialization */
14360 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
14362 GenTree* helperNode = impInitClass(&resolvedToken);
14363 if (compDonotInline())
14367 if (helperNode != nullptr)
14369 op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
14374 impPushOnStack(op1, tiRetVal);
14382 BOOL isStoreStatic = (opcode == CEE_STSFLD);
14384 CORINFO_CLASS_HANDLE fieldClsHnd; // class of the field (if it's a ref type)
14386 /* Get the CP_Fieldref index */
14388 assertImp(sz == sizeof(unsigned));
14390 _impResolveToken(CORINFO_TOKENKIND_Field);
14392 JITDUMP(" %08X", resolvedToken.token);
14394 int aflags = CORINFO_ACCESS_SET;
14395 GenTree* obj = nullptr;
14396 typeInfo* tiObj = nullptr;
14399 /* Pull the value from the stack */
14400 StackEntry se = impPopStack();
14402 tiVal = se.seTypeInfo;
14403 clsHnd = tiVal.GetClassHandle();
14405 if (opcode == CEE_STFLD)
14407 tiObj = &impStackTop().seTypeInfo;
14408 obj = impPopStack().val;
14410 if (impIsThis(obj))
14412 aflags |= CORINFO_ACCESS_THIS;
14414 // An optimization for Contextful classes:
14415 // we unwrap the proxy when we have a 'this reference'
14417 if (info.compUnwrapContextful)
14419 aflags |= CORINFO_ACCESS_UNWRAP;
14424 eeGetFieldInfo(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo);
14426 // Figure out the type of the member. We always call canAccessField, so you always need this
14428 CorInfoType ciType = fieldInfo.fieldType;
14429 fieldClsHnd = fieldInfo.structType;
14431 lclTyp = JITtype2varType(ciType);
14433 if (compIsForInlining())
14435 /* Is this a 'special' (COM) field? or a TLS ref static field?, field stored int GC heap? or
14436 * per-inst static? */
14438 switch (fieldInfo.fieldAccessor)
14440 case CORINFO_FIELD_INSTANCE_HELPER:
14441 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14442 case CORINFO_FIELD_STATIC_ADDR_HELPER:
14443 case CORINFO_FIELD_STATIC_TLS:
14445 compInlineResult->NoteFatal(InlineObservation::CALLEE_STFLD_NEEDS_HELPER);
14448 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14449 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14450 /* We may be able to inline the field accessors in specific instantiations of generic
14452 compInlineResult->NoteFatal(InlineObservation::CALLSITE_STFLD_NEEDS_HELPER);
14460 impHandleAccessAllowed(fieldInfo.accessAllowed, &fieldInfo.accessCalloutHelper);
14462 if (tiVerificationNeeded)
14464 verVerifyField(&resolvedToken, fieldInfo, tiObj, TRUE);
14465 typeInfo fieldType = verMakeTypeInfo(ciType, fieldClsHnd);
14466 Verify(tiCompatibleWith(tiVal, fieldType.NormaliseForStack(), true), "type mismatch");
14470 // tiVerificationNeed is false.
14471 // Raise InvalidProgramException if static store accesses non-static field
14472 if (isStoreStatic && ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) == 0))
14474 BADCODE("static access on an instance field");
14478 // We are using stfld on a static field.
14479 // We allow it, but need to eval any side-effects for obj
14480 if ((fieldInfo.fieldFlags & CORINFO_FLG_FIELD_STATIC) && obj != nullptr)
14482 if (obj->gtFlags & GTF_SIDE_EFFECT)
14484 obj = gtUnusedValNode(obj);
14485 impAppendTree(obj, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
14490 /* Preserve 'small' int types */
14491 if (!varTypeIsSmall(lclTyp))
14493 lclTyp = genActualType(lclTyp);
14496 switch (fieldInfo.fieldAccessor)
14498 case CORINFO_FIELD_INSTANCE:
14499 #ifdef FEATURE_READYTORUN_COMPILER
14500 case CORINFO_FIELD_INSTANCE_WITH_BASE:
14503 obj = impCheckForNullPointer(obj);
14505 /* Create the data member node */
14506 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, obj, fieldInfo.offset);
14507 DWORD typeFlags = info.compCompHnd->getClassAttribs(resolvedToken.hClass);
14508 if (StructHasOverlappingFields(typeFlags))
14510 op1->gtField.gtFldMayOverlap = true;
14513 #ifdef FEATURE_READYTORUN_COMPILER
14514 if (fieldInfo.fieldAccessor == CORINFO_FIELD_INSTANCE_WITH_BASE)
14516 op1->gtField.gtFieldLookup = fieldInfo.fieldLookup;
14520 op1->gtFlags |= (obj->gtFlags & GTF_GLOB_EFFECT);
14522 if (fgAddrCouldBeNull(obj))
14524 op1->gtFlags |= GTF_EXCEPT;
14527 // If gtFldObj is a BYREF then our target is a value class and
14528 // it could point anywhere, example a boxed class static int
14529 if (obj->gtType == TYP_BYREF)
14531 op1->gtFlags |= GTF_IND_TGTANYWHERE;
14534 if (compIsForInlining() &&
14535 impInlineIsGuaranteedThisDerefBeforeAnySideEffects(op2, obj, impInlineInfo->inlArgInfo))
14537 impInlineInfo->thisDereferencedFirst = true;
14542 case CORINFO_FIELD_STATIC_TLS:
14543 #ifdef _TARGET_X86_
14544 // Legacy TLS access is implemented as intrinsic on x86 only
14546 /* Create the data member node */
14547 op1 = gtNewFieldRef(lclTyp, resolvedToken.hField, NULL, fieldInfo.offset);
14548 op1->gtFlags |= GTF_IND_TLS_REF; // fgMorphField will handle the transformation
14552 fieldInfo.fieldAccessor = CORINFO_FIELD_STATIC_ADDR_HELPER;
14557 case CORINFO_FIELD_STATIC_ADDR_HELPER:
14558 case CORINFO_FIELD_INSTANCE_HELPER:
14559 case CORINFO_FIELD_INSTANCE_ADDR_HELPER:
14560 op1 = gtNewRefCOMfield(obj, &resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo, lclTyp,
14564 case CORINFO_FIELD_STATIC_ADDRESS:
14565 case CORINFO_FIELD_STATIC_RVA_ADDRESS:
14566 case CORINFO_FIELD_STATIC_SHARED_STATIC_HELPER:
14567 case CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER:
14568 case CORINFO_FIELD_STATIC_READYTORUN_HELPER:
14569 op1 = impImportStaticFieldAccess(&resolvedToken, (CORINFO_ACCESS_FLAGS)aflags, &fieldInfo,
14574 assert(!"Unexpected fieldAccessor");
14577 // Create the member assignment, unless we have a struct.
14578 // TODO-1stClassStructs: This could be limited to TYP_STRUCT, to avoid extra copies.
14579 bool deferStructAssign = varTypeIsStruct(lclTyp);
14581 if (!deferStructAssign)
14583 if (prefixFlags & PREFIX_VOLATILE)
14585 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14586 op1->gtFlags |= GTF_DONT_CSE; // Can't CSE a volatile
14587 op1->gtFlags |= GTF_ORDER_SIDEEFF; // Prevent this from being reordered
14588 op1->gtFlags |= GTF_IND_VOLATILE;
14590 if ((prefixFlags & PREFIX_UNALIGNED) && !varTypeIsByte(lclTyp))
14592 assert((op1->OperGet() == GT_FIELD) || (op1->OperGet() == GT_IND));
14593 op1->gtFlags |= GTF_IND_UNALIGNED;
14596 /* V4.0 allows assignment of i4 constant values to i8 type vars when IL verifier is bypassed (full
14597 trust apps). The reason this works is that JIT stores an i4 constant in Gentree union during
14598 importation and reads from the union as if it were a long during code generation. Though this
14599 can potentially read garbage, one can get lucky to have this working correctly.
14601 This code pattern is generated by Dev10 MC++ compiler while storing to fields when compiled with
14602 /O2 switch (default when compiling retail configs in Dev10) and a customer app has taken a
14603 dependency on it. To be backward compatible, we will explicitly add an upward cast here so that
14604 it works correctly always.
14606 Note that this is limited to x86 alone as there is no back compat to be addressed for Arm JIT
14609 CLANG_FORMAT_COMMENT_ANCHOR;
14611 #ifndef _TARGET_64BIT_
14612 // In UWP6.0 and beyond (post-.NET Core 2.0), we decided to let this cast from int to long be
14613 // generated for ARM as well as x86, so the following IR will be accepted:
14615 // | /--* CNS_INT int 2
14617 // \--* CLS_VAR long
14619 if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
14620 varTypeIsLong(op1->TypeGet()))
14622 op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
14626 #ifdef _TARGET_64BIT_
14627 // Automatic upcast for a GT_CNS_INT into TYP_I_IMPL
14628 if ((op2->OperGet() == GT_CNS_INT) && varTypeIsI(lclTyp) && !varTypeIsI(op2->gtType))
14630 op2->gtType = TYP_I_IMPL;
14634 // Allow a downcast of op2 from TYP_I_IMPL into a 32-bit Int for x86 JIT compatiblity
14636 if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
14638 op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
14640 // Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
14642 if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
14644 op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
14649 // We can generate an assignment to a TYP_FLOAT from a TYP_DOUBLE
14650 // We insert a cast to the dest 'op1' type
14652 if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
14653 varTypeIsFloating(op2->gtType))
14655 op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
14658 op1 = gtNewAssignNode(op1, op2);
14660 /* Mark the expression as containing an assignment */
14662 op1->gtFlags |= GTF_ASG;
14665 /* Check if the class needs explicit initialization */
14667 if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
14669 GenTree* helperNode = impInitClass(&resolvedToken);
14670 if (compDonotInline())
14674 if (helperNode != nullptr)
14676 op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
14680 /* stfld can interfere with value classes (consider the sequence
14681 ldloc, ldloca, ..., stfld, stloc). We will be conservative and
14682 spill all value class references from the stack. */
14684 if (obj && ((obj->gtType == TYP_BYREF) || (obj->gtType == TYP_I_IMPL)))
14688 if (impIsValueType(tiObj))
14690 impSpillEvalStack();
14694 impSpillValueClasses();
14698 /* Spill any refs to the same member from the stack */
14700 impSpillLclRefs((ssize_t)resolvedToken.hField);
14702 /* stsfld also interferes with indirect accesses (for aliased
14703 statics) and calls. But don't need to spill other statics
14704 as we have explicitly spilled this particular static field. */
14706 impSpillSideEffects(false, (unsigned)CHECK_SPILL_ALL DEBUGARG("spill side effects before STFLD"));
14708 if (deferStructAssign)
14710 op1 = impAssignStruct(op1, op2, clsHnd, (unsigned)CHECK_SPILL_ALL);
14718 /* Get the class type index operand */
14720 _impResolveToken(CORINFO_TOKENKIND_Newarr);
14722 JITDUMP(" %08X", resolvedToken.token);
14724 if (!opts.IsReadyToRun())
14726 // Need to restore array classes before creating array objects on the heap
14727 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14728 if (op1 == nullptr)
14729 { // compDonotInline()
14734 if (tiVerificationNeeded)
14736 // As per ECMA 'numElems' specified can be either int32 or native int.
14737 Verify(impStackTop().seTypeInfo.IsIntOrNativeIntType(), "bad bound");
14739 CORINFO_CLASS_HANDLE elemTypeHnd;
14740 info.compCompHnd->getChildType(resolvedToken.hClass, &elemTypeHnd);
14741 Verify(elemTypeHnd == nullptr ||
14742 !(info.compCompHnd->getClassAttribs(elemTypeHnd) & CORINFO_FLG_CONTAINS_STACK_PTR),
14743 "array of byref-like type");
14746 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
14748 accessAllowedResult =
14749 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14750 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14752 /* Form the arglist: array class handle, size */
14753 op2 = impPopStack().val;
14754 assertImp(genActualTypeIsIntOrI(op2->gtType));
14756 #ifdef _TARGET_64BIT_
14757 // The array helper takes a native int for array length.
14758 // So if we have an int, explicitly extend it to be a native int.
14759 if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
14761 if (op2->IsIntegralConst())
14763 op2->gtType = TYP_I_IMPL;
14767 bool isUnsigned = false;
14768 op2 = gtNewCastNode(TYP_I_IMPL, op2, isUnsigned, TYP_I_IMPL);
14771 #endif // _TARGET_64BIT_
14773 #ifdef FEATURE_READYTORUN_COMPILER
14774 if (opts.IsReadyToRun())
14776 op1 = impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_NEWARR_1, TYP_REF,
14777 gtNewArgList(op2));
14778 usingReadyToRunHelper = (op1 != nullptr);
14780 if (!usingReadyToRunHelper)
14782 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14783 // and the newarr call with a single call to a dynamic R2R cell that will:
14784 // 1) Load the context
14785 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate stub
14786 // 3) Allocate the new array
14787 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14789 // Need to restore array classes before creating array objects on the heap
14790 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE /*mustRestoreHandle*/);
14791 if (op1 == nullptr)
14792 { // compDonotInline()
14798 if (!usingReadyToRunHelper)
14801 args = gtNewArgList(op1, op2);
14803 /* Create a call to 'new' */
14805 // Note that this only works for shared generic code because the same helper is used for all
14806 // reference array types
14807 op1 = gtNewHelperCallNode(info.compCompHnd->getNewArrHelper(resolvedToken.hClass), TYP_REF, args);
14810 op1->gtCall.compileTimeHelperArgumentHandle = (CORINFO_GENERIC_HANDLE)resolvedToken.hClass;
14812 /* Remember that this basic block contains 'new' of an sd array */
14814 block->bbFlags |= BBF_HAS_NEWARRAY;
14815 optMethodFlags |= OMF_HAS_NEWARRAY;
14817 /* Push the result of the call on the stack */
14819 impPushOnStack(op1, tiRetVal);
14826 if (tiVerificationNeeded)
14828 Verify(false, "bad opcode");
14831 // We don't allow locallocs inside handlers
14832 if (block->hasHndIndex())
14834 BADCODE("Localloc can't be inside handler");
14837 // Get the size to allocate
14839 op2 = impPopStack().val;
14840 assertImp(genActualTypeIsIntOrI(op2->gtType));
14842 if (verCurrentState.esStackDepth != 0)
14844 BADCODE("Localloc can only be used when the stack is empty");
14847 // If the localloc is not in a loop and its size is a small constant,
14848 // create a new local var of TYP_BLK and return its address.
14850 bool convertedToLocal = false;
14852 // Need to aggressively fold here, as even fixed-size locallocs
14853 // will have casts in the way.
14854 op2 = gtFoldExpr(op2);
14856 if (op2->IsIntegralConst())
14858 const ssize_t allocSize = op2->AsIntCon()->IconValue();
14860 if (allocSize == 0)
14862 // Result is nullptr
14863 JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n");
14864 op1 = gtNewIconNode(0, TYP_I_IMPL);
14865 convertedToLocal = true;
14867 else if ((allocSize > 0) && ((compCurBB->bbFlags & BBF_BACKWARD_JUMP) == 0))
14869 // Get the size threshold for local conversion
14870 ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE;
14873 // Optionally allow this to be modified
14874 maxSize = JitConfig.JitStackAllocToLocalSize();
14877 if (allocSize <= maxSize)
14879 const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal"));
14880 JITDUMP("Converting stackalloc of %lld bytes to new local V%02u\n", allocSize,
14881 stackallocAsLocal);
14882 lvaTable[stackallocAsLocal].lvType = TYP_BLK;
14883 lvaTable[stackallocAsLocal].lvExactSize = (unsigned)allocSize;
14884 lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true;
14885 op1 = gtNewLclvNode(stackallocAsLocal, TYP_BLK);
14886 op1 = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1);
14887 convertedToLocal = true;
14889 if (!this->opts.compDbgEnC)
14891 // Ensure we have stack security for this method.
14892 // Reorder layout since the converted localloc is treated as an unsafe buffer.
14893 setNeedsGSSecurityCookie();
14894 compGSReorderStackLayout = true;
14900 if (!convertedToLocal)
14902 // Bail out if inlining and the localloc was not converted.
14904 // Note we might consider allowing the inline, if the call
14905 // site is not in a loop.
14906 if (compIsForInlining())
14908 InlineObservation obs = op2->IsIntegralConst()
14909 ? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE
14910 : InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN;
14911 compInlineResult->NoteFatal(obs);
14915 op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
14916 // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
14917 op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
14919 // Ensure we have stack security for this method.
14920 setNeedsGSSecurityCookie();
14922 /* The FP register may not be back to the original value at the end
14923 of the method, even if the frame size is 0, as localloc may
14924 have modified it. So we will HAVE to reset it */
14925 compLocallocUsed = true;
14929 compLocallocOptimized = true;
14933 impPushOnStack(op1, tiRetVal);
14938 /* Get the type token */
14939 assertImp(sz == sizeof(unsigned));
14941 _impResolveToken(CORINFO_TOKENKIND_Casting);
14943 JITDUMP(" %08X", resolvedToken.token);
14945 if (!opts.IsReadyToRun())
14947 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14948 if (op2 == nullptr)
14949 { // compDonotInline()
14954 if (tiVerificationNeeded)
14956 Verify(impStackTop().seTypeInfo.IsObjRef(), "obj reference needed");
14957 // Even if this is a value class, we know it is boxed.
14958 tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
14960 accessAllowedResult =
14961 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
14962 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
14964 op1 = impPopStack().val;
14966 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, false);
14968 if (optTree != nullptr)
14970 impPushOnStack(optTree, tiRetVal);
14975 #ifdef FEATURE_READYTORUN_COMPILER
14976 if (opts.IsReadyToRun())
14978 GenTreeCall* opLookup =
14979 impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_ISINSTANCEOF, TYP_REF,
14980 gtNewArgList(op1));
14981 usingReadyToRunHelper = (opLookup != nullptr);
14982 op1 = (usingReadyToRunHelper ? opLookup : op1);
14984 if (!usingReadyToRunHelper)
14986 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
14987 // and the isinstanceof_any call with a single call to a dynamic R2R cell that will:
14988 // 1) Load the context
14989 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate
14991 // 3) Perform the 'is instance' check on the input object
14992 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
14994 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
14995 if (op2 == nullptr)
14996 { // compDonotInline()
15002 if (!usingReadyToRunHelper)
15005 op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, false);
15007 if (compDonotInline())
15012 impPushOnStack(op1, tiRetVal);
15017 case CEE_REFANYVAL:
15019 // get the class handle and make a ICON node out of it
15021 _impResolveToken(CORINFO_TOKENKIND_Class);
15023 JITDUMP(" %08X", resolvedToken.token);
15025 op2 = impTokenToHandle(&resolvedToken);
15026 if (op2 == nullptr)
15027 { // compDonotInline()
15031 if (tiVerificationNeeded)
15033 Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
15035 tiRetVal = verMakeTypeInfo(resolvedToken.hClass).MakeByRef();
15038 op1 = impPopStack().val;
15039 // make certain it is normalized;
15040 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
15042 // Call helper GETREFANY(classHandle, op1);
15043 args = gtNewArgList(op2, op1);
15044 op1 = gtNewHelperCallNode(CORINFO_HELP_GETREFANY, TYP_BYREF, args);
15046 impPushOnStack(op1, tiRetVal);
15049 case CEE_REFANYTYPE:
15051 if (tiVerificationNeeded)
15053 Verify(typeInfo::AreEquivalent(impStackTop().seTypeInfo, verMakeTypeInfo(impGetRefAnyClass())),
15057 op1 = impPopStack().val;
15059 // make certain it is normalized;
15060 op1 = impNormStructVal(op1, impGetRefAnyClass(), (unsigned)CHECK_SPILL_ALL);
15062 if (op1->gtOper == GT_OBJ)
15064 // Get the address of the refany
15065 op1 = op1->gtOp.gtOp1;
15067 // Fetch the type from the correct slot
15068 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
15069 gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL));
15070 op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
15074 assertImp(op1->gtOper == GT_MKREFANY);
15076 // The pointer may have side-effects
15077 if (op1->gtOp.gtOp1->gtFlags & GTF_SIDE_EFFECT)
15079 impAppendTree(op1->gtOp.gtOp1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
15081 impNoteLastILoffs();
15085 // We already have the class handle
15086 op1 = op1->gtOp.gtOp2;
15089 // convert native TypeHandle to RuntimeTypeHandle
15091 GenTreeArgList* helperArgs = gtNewArgList(op1);
15093 op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL, TYP_STRUCT,
15096 // The handle struct is returned in register
15097 op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
15099 tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
15102 impPushOnStack(op1, tiRetVal);
15107 /* Get the Class index */
15108 assertImp(sz == sizeof(unsigned));
15109 lastLoadToken = codeAddr;
15110 _impResolveToken(CORINFO_TOKENKIND_Ldtoken);
15112 tokenType = info.compCompHnd->getTokenTypeAsHandle(&resolvedToken);
15114 op1 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
15115 if (op1 == nullptr)
15116 { // compDonotInline()
15120 helper = CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE;
15121 assert(resolvedToken.hClass != nullptr);
15123 if (resolvedToken.hMethod != nullptr)
15125 helper = CORINFO_HELP_METHODDESC_TO_STUBRUNTIMEMETHOD;
15127 else if (resolvedToken.hField != nullptr)
15129 helper = CORINFO_HELP_FIELDDESC_TO_STUBRUNTIMEFIELD;
15132 GenTreeArgList* helperArgs = gtNewArgList(op1);
15134 op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs);
15136 // The handle struct is returned in register
15137 op1->gtCall.gtReturnType = GetRuntimeHandleUnderlyingType();
15139 tiRetVal = verMakeTypeInfo(tokenType);
15140 impPushOnStack(op1, tiRetVal);
15145 case CEE_UNBOX_ANY:
15147 /* Get the Class index */
15148 assertImp(sz == sizeof(unsigned));
15150 _impResolveToken(CORINFO_TOKENKIND_Class);
15152 JITDUMP(" %08X", resolvedToken.token);
15154 BOOL runtimeLookup;
15155 op2 = impTokenToHandle(&resolvedToken, &runtimeLookup);
15156 if (op2 == nullptr)
15158 assert(compDonotInline());
15162 // Run this always so we can get access exceptions even with SkipVerification.
15163 accessAllowedResult =
15164 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15165 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15167 if (opcode == CEE_UNBOX_ANY && !eeIsValueClass(resolvedToken.hClass))
15169 if (tiVerificationNeeded)
15171 typeInfo tiUnbox = impStackTop().seTypeInfo;
15172 Verify(tiUnbox.IsObjRef(), "bad unbox.any arg");
15173 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15174 tiRetVal.NormaliseForStack();
15176 JITDUMP("\n Importing UNBOX.ANY(refClass) as CASTCLASS\n");
15177 op1 = impPopStack().val;
15181 /* Pop the object and create the unbox helper call */
15182 /* You might think that for UNBOX_ANY we need to push a different */
15183 /* (non-byref) type, but here we're making the tiRetVal that is used */
15184 /* for the intermediate pointer which we then transfer onto the OBJ */
15185 /* instruction. OBJ then creates the appropriate tiRetVal. */
15186 if (tiVerificationNeeded)
15188 typeInfo tiUnbox = impStackTop().seTypeInfo;
15189 Verify(tiUnbox.IsObjRef(), "Bad unbox arg");
15191 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15192 Verify(tiRetVal.IsValueClass(), "not value class");
15193 tiRetVal.MakeByRef();
15195 // We always come from an objref, so this is safe byref
15196 tiRetVal.SetIsPermanentHomeByRef();
15197 tiRetVal.SetIsReadonlyByRef();
15200 op1 = impPopStack().val;
15201 assertImp(op1->gtType == TYP_REF);
15203 helper = info.compCompHnd->getUnBoxHelper(resolvedToken.hClass);
15204 assert(helper == CORINFO_HELP_UNBOX || helper == CORINFO_HELP_UNBOX_NULLABLE);
15206 // Check legality and profitability of inline expansion for unboxing.
15207 const bool canExpandInline = (helper == CORINFO_HELP_UNBOX);
15208 const bool shouldExpandInline = !(compCurBB->isRunRarely() || opts.compDbgCode || opts.MinOpts());
15210 if (canExpandInline && shouldExpandInline)
15212 JITDUMP("\n Importing %s as inline sequence\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY");
15213 // we are doing normal unboxing
15214 // inline the common case of the unbox helper
15215 // UNBOX(exp) morphs into
15216 // clone = pop(exp);
15217 // ((*clone == typeToken) ? nop : helper(clone, typeToken));
15218 // push(clone + TARGET_POINTER_SIZE)
15220 GenTree* cloneOperand;
15221 op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
15222 nullptr DEBUGARG("inline UNBOX clone1"));
15223 op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
15225 GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
15227 op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
15228 nullptr DEBUGARG("inline UNBOX clone2"));
15229 op2 = impTokenToHandle(&resolvedToken);
15230 if (op2 == nullptr)
15231 { // compDonotInline()
15234 args = gtNewArgList(op2, op1);
15235 op1 = gtNewHelperCallNode(helper, TYP_VOID, args);
15237 op1 = new (this, GT_COLON) GenTreeColon(TYP_VOID, gtNewNothingNode(), op1);
15238 op1 = gtNewQmarkNode(TYP_VOID, condBox, op1);
15240 // QMARK nodes cannot reside on the evaluation stack. Because there
15241 // may be other trees on the evaluation stack that side-effect the
15242 // sources of the UNBOX operation we must spill the stack.
15244 impAppendTree(op1, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
15246 // Create the address-expression to reference past the object header
15247 // to the beginning of the value-type. Today this means adjusting
15248 // past the base of the objects vtable field which is pointer sized.
15250 op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
15251 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, cloneOperand, op2);
15255 JITDUMP("\n Importing %s as helper call because %s\n", opcode == CEE_UNBOX ? "UNBOX" : "UNBOX.ANY",
15256 canExpandInline ? "want smaller code or faster jitting" : "inline expansion not legal");
15258 // Don't optimize, just call the helper and be done with it
15259 args = gtNewArgList(op2, op1);
15261 gtNewHelperCallNode(helper,
15262 (var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT), args);
15265 assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
15266 helper == CORINFO_HELP_UNBOX_NULLABLE &&
15267 varTypeIsStruct(op1) // UnboxNullable helper returns a struct.
15271 ----------------------------------------------------------------------
15274 | \ | CORINFO_HELP_UNBOX | CORINFO_HELP_UNBOX_NULLABLE |
15275 | \ | (which returns a BYREF) | (which returns a STRUCT) | |
15277 |---------------------------------------------------------------------
15278 | UNBOX | push the BYREF | spill the STRUCT to a local, |
15279 | | | push the BYREF to this local |
15280 |---------------------------------------------------------------------
15281 | UNBOX_ANY | push a GT_OBJ of | push the STRUCT |
15282 | | the BYREF | For Linux when the |
15283 | | | struct is returned in two |
15284 | | | registers create a temp |
15285 | | | which address is passed to |
15286 | | | the unbox_nullable helper. |
15287 |---------------------------------------------------------------------
15290 if (opcode == CEE_UNBOX)
15292 if (helper == CORINFO_HELP_UNBOX_NULLABLE)
15294 // Unbox nullable helper returns a struct type.
15295 // We need to spill it to a temp so than can take the address of it.
15296 // Here we need unsafe value cls check, since the address of struct is taken to be used
15297 // further along and potetially be exploitable.
15299 unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a nullable"));
15300 lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
15302 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
15303 op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
15304 assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
15306 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
15307 op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
15308 op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
15311 assert(op1->gtType == TYP_BYREF);
15312 assert(!tiVerificationNeeded || tiRetVal.IsByRef());
15316 assert(opcode == CEE_UNBOX_ANY);
15318 if (helper == CORINFO_HELP_UNBOX)
15320 // Normal unbox helper returns a TYP_BYREF.
15321 impPushOnStack(op1, tiRetVal);
15326 assert(helper == CORINFO_HELP_UNBOX_NULLABLE && "Make sure the helper is nullable!");
15328 #if FEATURE_MULTIREG_RET
15330 if (varTypeIsStruct(op1) && IsMultiRegReturnedType(resolvedToken.hClass))
15332 // Unbox nullable helper returns a TYP_STRUCT.
15333 // For the multi-reg case we need to spill it to a temp so that
15334 // we can pass the address to the unbox_nullable jit helper.
15336 unsigned tmp = lvaGrabTemp(true DEBUGARG("UNBOXing a register returnable nullable"));
15337 lvaTable[tmp].lvIsMultiRegArg = true;
15338 lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
15340 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
15341 op1 = impAssignStruct(op2, op1, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
15342 assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
15344 op2 = gtNewLclvNode(tmp, TYP_STRUCT);
15345 op2 = gtNewOperNode(GT_ADDR, TYP_BYREF, op2);
15346 op1 = gtNewOperNode(GT_COMMA, TYP_BYREF, op1, op2);
15348 // In this case the return value of the unbox helper is TYP_BYREF.
15349 // Make sure the right type is placed on the operand type stack.
15350 impPushOnStack(op1, tiRetVal);
15352 // Load the struct.
15355 assert(op1->gtType == TYP_BYREF);
15356 assert(!tiVerificationNeeded || tiRetVal.IsByRef());
15362 #endif // !FEATURE_MULTIREG_RET
15365 // If non register passable struct we have it materialized in the RetBuf.
15366 assert(op1->gtType == TYP_STRUCT);
15367 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15368 assert(tiRetVal.IsValueClass());
15372 impPushOnStack(op1, tiRetVal);
15378 /* Get the Class index */
15379 assertImp(sz == sizeof(unsigned));
15381 _impResolveToken(CORINFO_TOKENKIND_Box);
15383 JITDUMP(" %08X", resolvedToken.token);
15385 if (tiVerificationNeeded)
15387 typeInfo tiActual = impStackTop().seTypeInfo;
15388 typeInfo tiBox = verMakeTypeInfo(resolvedToken.hClass);
15390 Verify(verIsBoxable(tiBox), "boxable type expected");
15392 // check the class constraints of the boxed type in case we are boxing an uninitialized value
15393 Verify(info.compCompHnd->satisfiesClassConstraints(resolvedToken.hClass),
15394 "boxed type has unsatisfied class constraints");
15396 Verify(tiCompatibleWith(tiActual, tiBox.NormaliseForStack(), true), "type mismatch");
15398 // Observation: the following code introduces a boxed value class on the stack, but,
15399 // according to the ECMA spec, one would simply expect: tiRetVal =
15400 // typeInfo(TI_REF,impGetObjectClass());
15402 // Push the result back on the stack,
15403 // even if clsHnd is a value class we want the TI_REF
15404 // we call back to the EE to get find out what hte type we should push (for nullable<T> we push T)
15405 tiRetVal = typeInfo(TI_REF, info.compCompHnd->getTypeForBox(resolvedToken.hClass));
15408 accessAllowedResult =
15409 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15410 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15412 // Note BOX can be used on things that are not value classes, in which
15413 // case we get a NOP. However the verifier's view of the type on the
15414 // stack changes (in generic code a 'T' becomes a 'boxed T')
15415 if (!eeIsValueClass(resolvedToken.hClass))
15417 JITDUMP("\n Importing BOX(refClass) as NOP\n");
15418 verCurrentState.esStack[verCurrentState.esStackDepth - 1].seTypeInfo = tiRetVal;
15422 // Look ahead for unbox.any
15423 if (codeAddr + (sz + 1 + sizeof(mdToken)) <= codeEndp && codeAddr[sz] == CEE_UNBOX_ANY)
15425 CORINFO_RESOLVED_TOKEN unboxResolvedToken;
15427 impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
15429 // See if the resolved tokens describe types that are equal.
15430 const TypeCompareState compare =
15431 info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, resolvedToken.hClass);
15433 // If so, box/unbox.any is a nop.
15434 if (compare == TypeCompareState::Must)
15436 JITDUMP("\n Importing BOX; UNBOX.ANY as NOP\n");
15437 // Skip the next unbox.any instruction
15438 sz += sizeof(mdToken) + 1;
15443 impImportAndPushBox(&resolvedToken);
15444 if (compDonotInline())
15453 /* Get the Class index */
15454 assertImp(sz == sizeof(unsigned));
15456 _impResolveToken(CORINFO_TOKENKIND_Class);
15458 JITDUMP(" %08X", resolvedToken.token);
15460 if (tiVerificationNeeded)
15462 tiRetVal = typeInfo(TI_INT);
15465 op1 = gtNewIconNode(info.compCompHnd->getClassSize(resolvedToken.hClass));
15466 impPushOnStack(op1, tiRetVal);
15469 case CEE_CASTCLASS:
15471 /* Get the Class index */
15473 assertImp(sz == sizeof(unsigned));
15475 _impResolveToken(CORINFO_TOKENKIND_Casting);
15477 JITDUMP(" %08X", resolvedToken.token);
15479 if (!opts.IsReadyToRun())
15481 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
15482 if (op2 == nullptr)
15483 { // compDonotInline()
15488 if (tiVerificationNeeded)
15490 Verify(impStackTop().seTypeInfo.IsObjRef(), "object ref expected");
15492 tiRetVal = typeInfo(TI_REF, resolvedToken.hClass);
15495 accessAllowedResult =
15496 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15497 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15499 op1 = impPopStack().val;
15501 /* Pop the address and create the 'checked cast' helper call */
15503 // At this point we expect typeRef to contain the token, op1 to contain the value being cast,
15504 // and op2 to contain code that creates the type handle corresponding to typeRef
15507 GenTree* optTree = impOptimizeCastClassOrIsInst(op1, &resolvedToken, true);
15509 if (optTree != nullptr)
15511 impPushOnStack(optTree, tiRetVal);
15516 #ifdef FEATURE_READYTORUN_COMPILER
15517 if (opts.IsReadyToRun())
15519 GenTreeCall* opLookup =
15520 impReadyToRunHelperToTree(&resolvedToken, CORINFO_HELP_READYTORUN_CHKCAST, TYP_REF,
15521 gtNewArgList(op1));
15522 usingReadyToRunHelper = (opLookup != nullptr);
15523 op1 = (usingReadyToRunHelper ? opLookup : op1);
15525 if (!usingReadyToRunHelper)
15527 // TODO: ReadyToRun: When generic dictionary lookups are necessary, replace the lookup call
15528 // and the chkcastany call with a single call to a dynamic R2R cell that will:
15529 // 1) Load the context
15530 // 2) Perform the generic dictionary lookup and caching, and generate the appropriate
15532 // 3) Check the object on the stack for the type-cast
15533 // Reason: performance (today, we'll always use the slow helper for the R2R generics case)
15535 op2 = impTokenToHandle(&resolvedToken, nullptr, FALSE);
15536 if (op2 == nullptr)
15537 { // compDonotInline()
15543 if (!usingReadyToRunHelper)
15546 op1 = impCastClassOrIsInstToTree(op1, op2, &resolvedToken, true);
15548 if (compDonotInline())
15553 /* Push the result back on the stack */
15554 impPushOnStack(op1, tiRetVal);
15561 if (compIsForInlining())
15563 // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15564 // TODO: Will this be too strict, given that we will inline many basic blocks?
15565 // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15567 /* Do we have just the exception on the stack ?*/
15569 if (verCurrentState.esStackDepth != 1)
15571 /* if not, just don't inline the method */
15573 compInlineResult->NoteFatal(InlineObservation::CALLEE_THROW_WITH_INVALID_STACK);
15578 if (tiVerificationNeeded)
15580 tiRetVal = impStackTop().seTypeInfo;
15581 Verify(tiRetVal.IsObjRef(), "object ref expected");
15582 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
15584 Verify(!tiRetVal.IsThisPtr(), "throw uninitialized this");
15588 block->bbSetRunRarely(); // any block with a throw is rare
15589 /* Pop the exception object and create the 'throw' helper call */
15591 op1 = gtNewHelperCallNode(CORINFO_HELP_THROW, TYP_VOID, gtNewArgList(impPopStack().val));
15594 if (verCurrentState.esStackDepth > 0)
15596 impEvalSideEffects();
15599 assert(verCurrentState.esStackDepth == 0);
15605 assert(!compIsForInlining());
15607 if (info.compXcptnsCount == 0)
15609 BADCODE("rethrow outside catch");
15612 if (tiVerificationNeeded)
15614 Verify(block->hasHndIndex(), "rethrow outside catch");
15615 if (block->hasHndIndex())
15617 EHblkDsc* HBtab = ehGetDsc(block->getHndIndex());
15618 Verify(!HBtab->HasFinallyOrFaultHandler(), "rethrow in finally or fault");
15619 if (HBtab->HasFilter())
15621 // we better be in the handler clause part, not the filter part
15622 Verify(jitIsBetween(compCurBB->bbCodeOffs, HBtab->ebdHndBegOffs(), HBtab->ebdHndEndOffs()),
15623 "rethrow in filter");
15628 /* Create the 'rethrow' helper call */
15630 op1 = gtNewHelperCallNode(CORINFO_HELP_RETHROW, TYP_VOID);
15636 assertImp(sz == sizeof(unsigned));
15638 _impResolveToken(CORINFO_TOKENKIND_Class);
15640 JITDUMP(" %08X", resolvedToken.token);
15642 if (tiVerificationNeeded)
15644 typeInfo tiTo = impStackTop().seTypeInfo;
15645 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15647 Verify(tiTo.IsByRef(), "byref expected");
15648 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15650 Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15651 "type operand incompatible with type of address");
15654 size = info.compCompHnd->getClassSize(resolvedToken.hClass); // Size
15655 op2 = gtNewIconNode(0); // Value
15656 op1 = impPopStack().val; // Dest
15657 op1 = gtNewBlockVal(op1, size);
15658 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15663 if (tiVerificationNeeded)
15665 Verify(false, "bad opcode");
15668 op3 = impPopStack().val; // Size
15669 op2 = impPopStack().val; // Value
15670 op1 = impPopStack().val; // Dest
15672 if (op3->IsCnsIntOrI())
15674 size = (unsigned)op3->AsIntConCommon()->IconValue();
15675 op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15679 op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15682 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, false);
15688 if (tiVerificationNeeded)
15690 Verify(false, "bad opcode");
15692 op3 = impPopStack().val; // Size
15693 op2 = impPopStack().val; // Src
15694 op1 = impPopStack().val; // Dest
15696 if (op3->IsCnsIntOrI())
15698 size = (unsigned)op3->AsIntConCommon()->IconValue();
15699 op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
15703 op1 = new (this, GT_DYN_BLK) GenTreeDynBlk(op1, op3);
15706 if (op2->OperGet() == GT_ADDR)
15708 op2 = op2->gtOp.gtOp1;
15712 op2 = gtNewOperNode(GT_IND, TYP_STRUCT, op2);
15715 op1 = gtNewBlkOpNode(op1, op2, size, (prefixFlags & PREFIX_VOLATILE) != 0, true);
15720 assertImp(sz == sizeof(unsigned));
15722 _impResolveToken(CORINFO_TOKENKIND_Class);
15724 JITDUMP(" %08X", resolvedToken.token);
15726 if (tiVerificationNeeded)
15728 typeInfo tiFrom = impStackTop().seTypeInfo;
15729 typeInfo tiTo = impStackTop(1).seTypeInfo;
15730 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15732 Verify(tiFrom.IsByRef(), "expected byref source");
15733 Verify(tiTo.IsByRef(), "expected byref destination");
15735 Verify(tiCompatibleWith(tiFrom.DereferenceByRef(), tiInstr, false),
15736 "type of source address incompatible with type operand");
15737 Verify(!tiTo.IsReadonlyByRef(), "write to readonly byref");
15738 Verify(tiCompatibleWith(tiInstr, tiTo.DereferenceByRef(), false),
15739 "type operand incompatible with type of destination address");
15742 if (!eeIsValueClass(resolvedToken.hClass))
15744 op1 = impPopStack().val; // address to load from
15746 impBashVarAddrsToI(op1);
15748 assertImp(genActualType(op1->gtType) == TYP_I_IMPL || op1->gtType == TYP_BYREF);
15750 op1 = gtNewOperNode(GT_IND, TYP_REF, op1);
15751 op1->gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
15753 impPushOnStack(op1, typeInfo());
15754 opcode = CEE_STIND_REF;
15756 goto STIND_POST_VERIFY;
15759 op2 = impPopStack().val; // Src
15760 op1 = impPopStack().val; // Dest
15761 op1 = gtNewCpObjNode(op1, op2, resolvedToken.hClass, ((prefixFlags & PREFIX_VOLATILE) != 0));
15766 assertImp(sz == sizeof(unsigned));
15768 _impResolveToken(CORINFO_TOKENKIND_Class);
15770 JITDUMP(" %08X", resolvedToken.token);
15772 if (eeIsValueClass(resolvedToken.hClass))
15774 lclTyp = TYP_STRUCT;
15781 if (tiVerificationNeeded)
15784 typeInfo tiPtr = impStackTop(1).seTypeInfo;
15786 // Make sure we have a good looking byref
15787 Verify(tiPtr.IsByRef(), "pointer not byref");
15788 Verify(!tiPtr.IsReadonlyByRef(), "write to readonly byref");
15789 if (!tiPtr.IsByRef() || tiPtr.IsReadonlyByRef())
15791 compUnsafeCastUsed = true;
15794 typeInfo ptrVal = DereferenceByRef(tiPtr);
15795 typeInfo argVal = verMakeTypeInfo(resolvedToken.hClass);
15797 if (!tiCompatibleWith(impStackTop(0).seTypeInfo, NormaliseForStack(argVal), true))
15799 Verify(false, "type of value incompatible with type operand");
15800 compUnsafeCastUsed = true;
15803 if (!tiCompatibleWith(argVal, ptrVal, false))
15805 Verify(false, "type operand incompatible with type of address");
15806 compUnsafeCastUsed = true;
15811 compUnsafeCastUsed = true;
15814 if (lclTyp == TYP_REF)
15816 opcode = CEE_STIND_REF;
15817 goto STIND_POST_VERIFY;
15820 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15821 if (impIsPrimitive(jitTyp))
15823 lclTyp = JITtype2varType(jitTyp);
15824 goto STIND_POST_VERIFY;
15827 op2 = impPopStack().val; // Value
15828 op1 = impPopStack().val; // Ptr
15830 assertImp(varTypeIsStruct(op2));
15832 op1 = impAssignStructPtr(op1, op2, resolvedToken.hClass, (unsigned)CHECK_SPILL_ALL);
15834 if (op1->OperIsBlkOp() && (prefixFlags & PREFIX_UNALIGNED))
15836 op1->gtFlags |= GTF_BLK_UNALIGNED;
15843 assert(!compIsForInlining());
15845 // Being lazy here. Refanys are tricky in terms of gc tracking.
15846 // Since it is uncommon, just don't perform struct promotion in any method that contains mkrefany.
15848 JITDUMP("disabling struct promotion because of mkrefany\n");
15849 fgNoStructPromotion = true;
15851 oper = GT_MKREFANY;
15852 assertImp(sz == sizeof(unsigned));
15854 _impResolveToken(CORINFO_TOKENKIND_Class);
15856 JITDUMP(" %08X", resolvedToken.token);
15858 op2 = impTokenToHandle(&resolvedToken, nullptr, TRUE);
15859 if (op2 == nullptr)
15860 { // compDonotInline()
15864 if (tiVerificationNeeded)
15866 typeInfo tiPtr = impStackTop().seTypeInfo;
15867 typeInfo tiInstr = verMakeTypeInfo(resolvedToken.hClass);
15869 Verify(!verIsByRefLike(tiInstr), "mkrefany of byref-like class");
15870 Verify(!tiPtr.IsReadonlyByRef(), "readonly byref used with mkrefany");
15871 Verify(typeInfo::AreEquivalent(tiPtr.DereferenceByRef(), tiInstr), "type mismatch");
15874 accessAllowedResult =
15875 info.compCompHnd->canAccessClass(&resolvedToken, info.compMethodHnd, &calloutHelper);
15876 impHandleAccessAllowed(accessAllowedResult, &calloutHelper);
15878 op1 = impPopStack().val;
15880 // @SPECVIOLATION: TYP_INT should not be allowed here by a strict reading of the spec.
15881 // But JIT32 allowed it, so we continue to allow it.
15882 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL || op1->TypeGet() == TYP_INT);
15884 // MKREFANY returns a struct. op2 is the class token.
15885 op1 = gtNewOperNode(oper, TYP_STRUCT, op1, op2);
15887 impPushOnStack(op1, verMakeTypeInfo(impGetRefAnyClass()));
15893 assertImp(sz == sizeof(unsigned));
15895 _impResolveToken(CORINFO_TOKENKIND_Class);
15897 JITDUMP(" %08X", resolvedToken.token);
15901 tiRetVal = verMakeTypeInfo(resolvedToken.hClass);
15903 if (tiVerificationNeeded)
15905 typeInfo tiPtr = impStackTop().seTypeInfo;
15907 // Make sure we have a byref
15908 if (!tiPtr.IsByRef())
15910 Verify(false, "pointer not byref");
15911 compUnsafeCastUsed = true;
15913 typeInfo tiPtrVal = DereferenceByRef(tiPtr);
15915 if (!tiCompatibleWith(tiPtrVal, tiRetVal, false))
15917 Verify(false, "type of address incompatible with type operand");
15918 compUnsafeCastUsed = true;
15920 tiRetVal.NormaliseForStack();
15924 compUnsafeCastUsed = true;
15927 if (eeIsValueClass(resolvedToken.hClass))
15929 lclTyp = TYP_STRUCT;
15934 opcode = CEE_LDIND_REF;
15935 goto LDIND_POST_VERIFY;
15938 op1 = impPopStack().val;
15940 assertImp(op1->TypeGet() == TYP_BYREF || op1->TypeGet() == TYP_I_IMPL);
15942 CorInfoType jitTyp = info.compCompHnd->asCorInfoType(resolvedToken.hClass);
15943 if (impIsPrimitive(jitTyp))
15945 op1 = gtNewOperNode(GT_IND, JITtype2varType(jitTyp), op1);
15947 // Could point anywhere, example a boxed class static int
15948 op1->gtFlags |= GTF_IND_TGTANYWHERE | GTF_GLOB_REF;
15949 assertImp(varTypeIsArithmetic(op1->gtType));
15953 // OBJ returns a struct
15954 // and an inline argument which is the class token of the loaded obj
15955 op1 = gtNewObjNode(resolvedToken.hClass, op1);
15957 op1->gtFlags |= GTF_EXCEPT;
15959 if (prefixFlags & PREFIX_UNALIGNED)
15961 op1->gtFlags |= GTF_IND_UNALIGNED;
15964 impPushOnStack(op1, tiRetVal);
15969 if (tiVerificationNeeded)
15971 typeInfo tiArray = impStackTop().seTypeInfo;
15972 Verify(verIsSDArray(tiArray), "bad array");
15973 tiRetVal = typeInfo(TI_INT);
15976 op1 = impPopStack().val;
15977 if (!opts.MinOpts() && !opts.compDbgCode)
15979 /* Use GT_ARR_LENGTH operator so rng check opts see this */
15980 GenTreeArrLen* arrLen = gtNewArrLen(TYP_INT, op1, OFFSETOF__CORINFO_Array__length);
15982 /* Mark the block as containing a length expression */
15984 if (op1->gtOper == GT_LCL_VAR)
15986 block->bbFlags |= BBF_HAS_IDX_LEN;
15993 /* Create the expression "*(array_addr + ArrLenOffs)" */
15994 op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
15995 gtNewIconNode(OFFSETOF__CORINFO_Array__length, TYP_I_IMPL));
15996 op1 = gtNewIndir(TYP_INT, op1);
15999 /* Push the result back on the stack */
16000 impPushOnStack(op1, tiRetVal);
16004 op1 = gtNewHelperCallNode(CORINFO_HELP_USER_BREAKPOINT, TYP_VOID);
16008 if (opts.compDbgCode)
16010 op1 = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
16015 /******************************** NYI *******************************/
16018 OutputDebugStringA("CLR: Invalid x86 breakpoint in IL stream\n");
16021 case CEE_MACRO_END:
16024 BADCODE3("unknown opcode", ": %02X", (int)opcode);
16028 prevOpcode = opcode;
16034 #undef _impResolveToken
16037 #pragma warning(pop)
16040 // Push a local/argument treeon the operand stack
16041 void Compiler::impPushVar(GenTree* op, typeInfo tiRetVal)
16043 tiRetVal.NormaliseForStack();
16045 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init) && tiRetVal.IsThisPtr())
16047 tiRetVal.SetUninitialisedObjRef();
16050 impPushOnStack(op, tiRetVal);
16053 // Load a local/argument on the operand stack
16054 // lclNum is an index into lvaTable *NOT* the arg/lcl index in the IL
16055 void Compiler::impLoadVar(unsigned lclNum, IL_OFFSET offset, typeInfo tiRetVal)
16059 if (lvaTable[lclNum].lvNormalizeOnLoad())
16061 lclTyp = lvaGetRealType(lclNum);
16065 lclTyp = lvaGetActualType(lclNum);
16068 impPushVar(gtNewLclvNode(lclNum, lclTyp, offset), tiRetVal);
16071 // Load an argument on the operand stack
16072 // Shared by the various CEE_LDARG opcodes
16073 // ilArgNum is the argument index as specified in IL.
16074 // It will be mapped to the correct lvaTable index
16075 void Compiler::impLoadArg(unsigned ilArgNum, IL_OFFSET offset)
16077 Verify(ilArgNum < info.compILargsCount, "bad arg num");
16079 if (compIsForInlining())
16081 if (ilArgNum >= info.compArgsCount)
16083 compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_ARGUMENT_NUMBER);
16087 impPushVar(impInlineFetchArg(ilArgNum, impInlineInfo->inlArgInfo, impInlineInfo->lclVarInfo),
16088 impInlineInfo->lclVarInfo[ilArgNum].lclVerTypeInfo);
16092 if (ilArgNum >= info.compArgsCount)
16097 unsigned lclNum = compMapILargNum(ilArgNum); // account for possible hidden param
16099 if (lclNum == info.compThisArg)
16101 lclNum = lvaArg0Var;
16104 impLoadVar(lclNum, offset);
16108 // Load a local on the operand stack
16109 // Shared by the various CEE_LDLOC opcodes
16110 // ilLclNum is the local index as specified in IL.
16111 // It will be mapped to the correct lvaTable index
16112 void Compiler::impLoadLoc(unsigned ilLclNum, IL_OFFSET offset)
16114 if (tiVerificationNeeded)
16116 Verify(ilLclNum < info.compMethodInfo->locals.numArgs, "bad loc num");
16117 Verify(info.compInitMem, "initLocals not set");
16120 if (compIsForInlining())
16122 if (ilLclNum >= info.compMethodInfo->locals.numArgs)
16124 compInlineResult->NoteFatal(InlineObservation::CALLEE_BAD_LOCAL_NUMBER);
16128 // Get the local type
16129 var_types lclTyp = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclTypeInfo;
16131 typeInfo tiRetVal = impInlineInfo->lclVarInfo[ilLclNum + impInlineInfo->argCnt].lclVerTypeInfo;
16133 /* Have we allocated a temp for this local? */
16135 unsigned lclNum = impInlineFetchLocal(ilLclNum DEBUGARG("Inline ldloc first use temp"));
16137 // All vars of inlined methods should be !lvNormalizeOnLoad()
16139 assert(!lvaTable[lclNum].lvNormalizeOnLoad());
16140 lclTyp = genActualType(lclTyp);
16142 impPushVar(gtNewLclvNode(lclNum, lclTyp), tiRetVal);
16146 if (ilLclNum >= info.compMethodInfo->locals.numArgs)
16151 unsigned lclNum = info.compArgsCount + ilLclNum;
16153 impLoadVar(lclNum, offset);
16157 #ifdef _TARGET_ARM_
16158 /**************************************************************************************
16160 * When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
16161 * dst struct, because struct promotion will turn it into a float/double variable while
16162 * the rhs will be an int/long variable. We don't code generate assignment of int into
16163 * a float, but there is nothing that might prevent us from doing so. The tree however
16164 * would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
16166 * tmpNum - the lcl dst variable num that is a struct.
16167 * src - the src tree assigned to the dest that is a struct/int (when varargs call.)
16168 * hClass - the type handle for the struct variable.
16170 * TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
16171 * however, we could do a codegen of transferring from int to float registers
16172 * (transfer, not a cast.)
16175 void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass)
16177 if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
16179 int hfaSlots = GetHfaCount(hClass);
16180 var_types hfaType = GetHfaType(hClass);
16182 // If we have varargs we morph the method's return type to be "int" irrespective of its original
16183 // type: struct/float at importer because the ABI calls out return in integer registers.
16184 // We don't want struct promotion to replace an expression like this:
16185 // lclFld_int = callvar_int() into lclFld_float = callvar_int();
16186 // This means an int is getting assigned to a float without a cast. Prevent the promotion.
16187 if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
16188 (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
16190 // Make sure this struct type stays as struct so we can receive the call in a struct.
16191 lvaTable[tmpNum].lvIsMultiRegRet = true;
16195 #endif // _TARGET_ARM_
16197 //------------------------------------------------------------------------
16198 // impAssignSmallStructTypeToVar: ensure calls that return small structs whose
16199 // sizes are not supported integral type sizes return values to temps.
16202 // op -- call returning a small struct in a register
16203 // hClass -- class handle for struct
16206 // Tree with reference to struct local to use as call return value.
16209 // The call will be spilled into a preceding statement.
16210 // Currently handles struct returns for 3, 5, 6, and 7 byte structs.
16212 GenTree* Compiler::impAssignSmallStructTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass)
16214 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for small struct return."));
16215 impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
16216 GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
16218 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of small struct returns.
16219 ret->gtFlags |= GTF_DONT_CSE;
16224 #if FEATURE_MULTIREG_RET
16225 //------------------------------------------------------------------------
16226 // impAssignMultiRegTypeToVar: ensure calls that return structs in multiple
16227 // registers return values to suitable temps.
16230 // op -- call returning a struct in a registers
16231 // hClass -- class handle for struct
16234 // Tree with reference to struct local to use as call return value.
16236 GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass)
16238 unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
16239 impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
16240 GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
16242 // TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
16243 ret->gtFlags |= GTF_DONT_CSE;
16245 assert(IsMultiRegReturnedType(hClass));
16247 // Mark the var so that fields are not promoted and stay together.
16248 lvaTable[tmpNum].lvIsMultiRegRet = true;
16252 #endif // FEATURE_MULTIREG_RET
16254 // do import for a return
16255 // returns false if inlining was aborted
16256 // opcode can be ret or call in the case of a tail.call
16257 bool Compiler::impReturnInstruction(BasicBlock* block, int prefixFlags, OPCODE& opcode)
16259 if (tiVerificationNeeded)
16261 verVerifyThisPtrInitialised();
16263 unsigned expectedStack = 0;
16264 if (info.compRetType != TYP_VOID)
16266 typeInfo tiVal = impStackTop().seTypeInfo;
16267 typeInfo tiDeclared =
16268 verMakeTypeInfo(info.compMethodInfo->args.retType, info.compMethodInfo->args.retTypeClass);
16270 Verify(!verIsByRefLike(tiDeclared) || verIsSafeToReturnByRef(tiVal), "byref return");
16272 Verify(tiCompatibleWith(tiVal, tiDeclared.NormaliseForStack(), true), "type mismatch");
16275 Verify(verCurrentState.esStackDepth == expectedStack, "stack non-empty on return");
16279 // If we are importing an inlinee and have GC ref locals we always
16280 // need to have a spill temp for the return value. This temp
16281 // should have been set up in advance, over in fgFindBasicBlocks.
16282 if (compIsForInlining() && impInlineInfo->HasGcRefLocals() && (info.compRetType != TYP_VOID))
16284 assert(lvaInlineeReturnSpillTemp != BAD_VAR_NUM);
16288 GenTree* op2 = nullptr;
16289 GenTree* op1 = nullptr;
16290 CORINFO_CLASS_HANDLE retClsHnd = nullptr;
16292 if (info.compRetType != TYP_VOID)
16294 StackEntry se = impPopStack();
16295 retClsHnd = se.seTypeInfo.GetClassHandle();
16298 if (!compIsForInlining())
16300 impBashVarAddrsToI(op2);
16301 op2 = impImplicitIorI4Cast(op2, info.compRetType);
16302 op2 = impImplicitR4orR8Cast(op2, info.compRetType);
16303 assertImp((genActualType(op2->TypeGet()) == genActualType(info.compRetType)) ||
16304 ((op2->TypeGet() == TYP_I_IMPL) && (info.compRetType == TYP_BYREF)) ||
16305 ((op2->TypeGet() == TYP_BYREF) && (info.compRetType == TYP_I_IMPL)) ||
16306 (varTypeIsFloating(op2->gtType) && varTypeIsFloating(info.compRetType)) ||
16307 (varTypeIsStruct(op2) && varTypeIsStruct(info.compRetType)));
16310 if (opts.compGcChecks && info.compRetType == TYP_REF)
16312 // DDB 3483 : JIT Stress: early termination of GC ref's life time in exception code path
16313 // VSW 440513: Incorrect gcinfo on the return value under COMPlus_JitGCChecks=1 for methods with
16316 assert(op2->gtType == TYP_REF);
16318 // confirm that the argument is a GC pointer (for debugging (GC stress))
16319 GenTreeArgList* args = gtNewArgList(op2);
16320 op2 = gtNewHelperCallNode(CORINFO_HELP_CHECK_OBJ, TYP_REF, args);
16324 printf("\ncompGcChecks tree:\n");
16332 // inlinee's stack should be empty now.
16333 assert(verCurrentState.esStackDepth == 0);
16338 printf("\n\n Inlinee Return expression (before normalization) =>\n");
16343 // Make sure the type matches the original call.
16345 var_types returnType = genActualType(op2->gtType);
16346 var_types originalCallType = impInlineInfo->inlineCandidateInfo->fncRetType;
16347 if ((returnType != originalCallType) && (originalCallType == TYP_STRUCT))
16349 originalCallType = impNormStructType(impInlineInfo->inlineCandidateInfo->methInfo.args.retTypeClass);
16352 if (returnType != originalCallType)
16354 // Allow TYP_BYREF to be returned as TYP_I_IMPL and vice versa
16355 if (((returnType == TYP_BYREF) && (originalCallType == TYP_I_IMPL)) ||
16356 ((returnType == TYP_I_IMPL) && (originalCallType == TYP_BYREF)))
16358 JITDUMP("Allowing return type mismatch: have %s, needed %s\n", varTypeName(returnType),
16359 varTypeName(originalCallType));
16363 JITDUMP("Return type mismatch: have %s, needed %s\n", varTypeName(returnType),
16364 varTypeName(originalCallType));
16365 compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
16370 // Below, we are going to set impInlineInfo->retExpr to the tree with the return
16371 // expression. At this point, retExpr could already be set if there are multiple
16372 // return blocks (meaning fgNeedReturnSpillTemp() == true) and one of
16373 // the other blocks already set it. If there is only a single return block,
16374 // retExpr shouldn't be set. However, this is not true if we reimport a block
16375 // with a return. In that case, retExpr will be set, then the block will be
16376 // reimported, but retExpr won't get cleared as part of setting the block to
16377 // be reimported. The reimported retExpr value should be the same, so even if
16378 // we don't unconditionally overwrite it, it shouldn't matter.
16379 if (info.compRetNativeType != TYP_STRUCT)
16381 // compRetNativeType is not TYP_STRUCT.
16382 // This implies it could be either a scalar type or SIMD vector type or
16383 // a struct type that can be normalized to a scalar type.
16385 if (varTypeIsStruct(info.compRetType))
16387 noway_assert(info.compRetBuffArg == BAD_VAR_NUM);
16388 // adjust the type away from struct to integral
16389 // and no normalizing
16390 op2 = impFixupStructReturnType(op2, retClsHnd);
16394 // Do we have to normalize?
16395 var_types fncRealRetType = JITtype2varType(info.compMethodInfo->args.retType);
16396 if ((varTypeIsSmall(op2->TypeGet()) || varTypeIsSmall(fncRealRetType)) &&
16397 fgCastNeeded(op2, fncRealRetType))
16399 // Small-typed return values are normalized by the callee
16400 op2 = gtNewCastNode(TYP_INT, op2, false, fncRealRetType);
16404 if (fgNeedReturnSpillTemp())
16406 assert(info.compRetNativeType != TYP_VOID &&
16407 (fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals()));
16409 // If this method returns a ref type, track the actual types seen
16411 if (info.compRetType == TYP_REF)
16413 bool isExact = false;
16414 bool isNonNull = false;
16415 CORINFO_CLASS_HANDLE returnClsHnd = gtGetClassHandle(op2, &isExact, &isNonNull);
16417 if (impInlineInfo->retExpr == nullptr)
16419 // This is the first return, so best known type is the type
16420 // of this return value.
16421 impInlineInfo->retExprClassHnd = returnClsHnd;
16422 impInlineInfo->retExprClassHndIsExact = isExact;
16424 else if (impInlineInfo->retExprClassHnd != returnClsHnd)
16426 // This return site type differs from earlier seen sites,
16427 // so reset the info and we'll fall back to using the method's
16428 // declared return type for the return spill temp.
16429 impInlineInfo->retExprClassHnd = nullptr;
16430 impInlineInfo->retExprClassHndIsExact = false;
16434 // This is a bit of a workaround...
16435 // If we are inlining a call that returns a struct, where the actual "native" return type is
16436 // not a struct (for example, the struct is composed of exactly one int, and the native
16437 // return type is thus an int), and the inlinee has multiple return blocks (thus,
16438 // fgNeedReturnSpillTemp() == true, and is the index of a local var that is set
16439 // to the *native* return type), and at least one of the return blocks is the result of
16440 // a call, then we have a problem. The situation is like this (from a failed test case):
16443 // // Note: valuetype plinq_devtests.LazyTests/LIX is a struct with only a single int
16444 // call !!0 [mscorlib]System.Threading.LazyInitializer::EnsureInitialized<valuetype
16445 // plinq_devtests.LazyTests/LIX>(!!0&, bool&, object&, class [mscorlib]System.Func`1<!!0>)
16449 // ldobj !!T // this gets bashed to a GT_LCL_FLD, type TYP_INT
16452 // call !!0 System.Threading.LazyInitializer::EnsureInitializedCore<!!0>(!!0&, bool&,
16453 // object&, class System.Func`1<!!0>)
16456 // In the code above, when we call impFixupStructReturnType(), we will change the op2 return type
16457 // of the inlinee return node, but we don't do that for GT_CALL nodes, which we delay until
16458 // morphing when we call fgFixupStructReturn(). We do this, apparently, to handle nested
16459 // inlining properly by leaving the correct type on the GT_CALL node through importing.
16461 // To fix this, for this case, we temporarily change the GT_CALL node type to the
16462 // native return type, which is what it will be set to eventually. We generate the
16463 // assignment to the return temp, using the correct type, and then restore the GT_CALL
16464 // node type. During morphing, the GT_CALL will get the correct, final, native return type.
16466 bool restoreType = false;
16467 if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
16469 noway_assert(op2->TypeGet() == TYP_STRUCT);
16470 op2->gtType = info.compRetNativeType;
16471 restoreType = true;
16474 impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
16475 (unsigned)CHECK_SPILL_ALL);
16477 GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
16481 op2->gtType = TYP_STRUCT; // restore it to what it was
16487 if (impInlineInfo->retExpr)
16489 // Some other block(s) have seen the CEE_RET first.
16490 // Better they spilled to the same temp.
16491 assert(impInlineInfo->retExpr->gtOper == GT_LCL_VAR);
16492 assert(impInlineInfo->retExpr->gtLclVarCommon.gtLclNum == op2->gtLclVarCommon.gtLclNum);
16500 printf("\n\n Inlinee Return expression (after normalization) =>\n");
16505 // Report the return expression
16506 impInlineInfo->retExpr = op2;
16510 // compRetNativeType is TYP_STRUCT.
16511 // This implies that struct return via RetBuf arg or multi-reg struct return
16513 GenTreeCall* iciCall = impInlineInfo->iciCall->AsCall();
16515 // Assign the inlinee return into a spill temp.
16516 // spill temp only exists if there are multiple return points
16517 if (lvaInlineeReturnSpillTemp != BAD_VAR_NUM)
16519 // in this case we have to insert multiple struct copies to the temp
16520 // and the retexpr is just the temp.
16521 assert(info.compRetNativeType != TYP_VOID);
16522 assert(fgMoreThanOneReturnBlock() || impInlineInfo->HasGcRefLocals());
16524 impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
16525 (unsigned)CHECK_SPILL_ALL);
16528 #if defined(_TARGET_ARM_) || defined(UNIX_AMD64_ABI)
16529 #if defined(_TARGET_ARM_)
16530 // TODO-ARM64-NYI: HFA
16531 // TODO-AMD64-Unix and TODO-ARM once the ARM64 functionality is implemented the
16532 // next ifdefs could be refactored in a single method with the ifdef inside.
16533 if (IsHfa(retClsHnd))
16535 // Same as !IsHfa but just don't bother with impAssignStructPtr.
16536 #else // defined(UNIX_AMD64_ABI)
16537 ReturnTypeDesc retTypeDesc;
16538 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
16539 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
16541 if (retRegCount != 0)
16543 // If single eightbyte, the return type would have been normalized and there won't be a temp var.
16544 // This code will be called only if the struct return has not been normalized (i.e. 2 eightbytes -
16546 assert(retRegCount == MAX_RET_REG_COUNT);
16547 // Same as !structDesc.passedInRegisters but just don't bother with impAssignStructPtr.
16548 CLANG_FORMAT_COMMENT_ANCHOR;
16549 #endif // defined(UNIX_AMD64_ABI)
16551 if (fgNeedReturnSpillTemp())
16553 if (!impInlineInfo->retExpr)
16555 #if defined(_TARGET_ARM_)
16556 impInlineInfo->retExpr = gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType);
16557 #else // defined(UNIX_AMD64_ABI)
16558 // The inlinee compiler has figured out the type of the temp already. Use it here.
16559 impInlineInfo->retExpr =
16560 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
16561 #endif // defined(UNIX_AMD64_ABI)
16566 impInlineInfo->retExpr = op2;
16570 #elif defined(_TARGET_ARM64_)
16571 ReturnTypeDesc retTypeDesc;
16572 retTypeDesc.InitializeStructReturnType(this, retClsHnd);
16573 unsigned retRegCount = retTypeDesc.GetReturnRegCount();
16575 if (retRegCount != 0)
16577 assert(!iciCall->HasRetBufArg());
16578 assert(retRegCount >= 2);
16579 if (fgNeedReturnSpillTemp())
16581 if (!impInlineInfo->retExpr)
16583 // The inlinee compiler has figured out the type of the temp already. Use it here.
16584 impInlineInfo->retExpr =
16585 gtNewLclvNode(lvaInlineeReturnSpillTemp, lvaTable[lvaInlineeReturnSpillTemp].lvType);
16590 impInlineInfo->retExpr = op2;
16594 #endif // defined(_TARGET_ARM64_)
16596 assert(iciCall->HasRetBufArg());
16597 GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->gtOp.gtOp1);
16598 // spill temp only exists if there are multiple return points
16599 if (fgNeedReturnSpillTemp())
16601 // if this is the first return we have seen set the retExpr
16602 if (!impInlineInfo->retExpr)
16604 impInlineInfo->retExpr =
16605 impAssignStructPtr(dest, gtNewLclvNode(lvaInlineeReturnSpillTemp, info.compRetType),
16606 retClsHnd, (unsigned)CHECK_SPILL_ALL);
16611 impInlineInfo->retExpr = impAssignStructPtr(dest, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16618 if (compIsForInlining())
16623 if (info.compRetType == TYP_VOID)
16626 op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16628 else if (info.compRetBuffArg != BAD_VAR_NUM)
16630 // Assign value to return buff (first param)
16631 GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
16633 op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
16634 impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16636 // There are cases where the address of the implicit RetBuf should be returned explicitly (in RAX).
16637 CLANG_FORMAT_COMMENT_ANCHOR;
16639 #if defined(_TARGET_AMD64_)
16641 // x64 (System V and Win64) calling convention requires to
16642 // return the implicit return buffer explicitly (in RAX).
16643 // Change the return type to be BYREF.
16644 op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16645 #else // !defined(_TARGET_AMD64_)
16646 // In case of non-AMD64 targets the profiler hook requires to return the implicit RetBuf explicitly (in RAX).
16647 // In such case the return value of the function is changed to BYREF.
16648 // If profiler hook is not needed the return type of the function is TYP_VOID.
16649 if (compIsProfilerHookNeeded())
16651 op1 = gtNewOperNode(GT_RETURN, TYP_BYREF, gtNewLclvNode(info.compRetBuffArg, TYP_BYREF));
16656 op1 = new (this, GT_RETURN) GenTreeOp(GT_RETURN, TYP_VOID);
16658 #endif // !defined(_TARGET_AMD64_)
16660 else if (varTypeIsStruct(info.compRetType))
16662 #if !FEATURE_MULTIREG_RET
16663 // For both ARM architectures the HFA native types are maintained as structs.
16664 // Also on System V AMD64 the multireg structs returns are also left as structs.
16665 noway_assert(info.compRetNativeType != TYP_STRUCT);
16667 op2 = impFixupStructReturnType(op2, retClsHnd);
16669 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
16674 op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetType), op2);
16677 // We must have imported a tailcall and jumped to RET
16678 if (prefixFlags & PREFIX_TAILCALL)
16680 #if defined(FEATURE_CORECLR) || !defined(_TARGET_AMD64_)
16682 // This cannot be asserted on Amd64 since we permit the following IL pattern:
16686 assert(verCurrentState.esStackDepth == 0 && impOpcodeIsCallOpcode(opcode));
16687 #endif // FEATURE_CORECLR || !_TARGET_AMD64_
16689 opcode = CEE_RET; // To prevent trying to spill if CALL_SITE_BOUNDARIES
16691 // impImportCall() would have already appended TYP_VOID calls
16692 if (info.compRetType == TYP_VOID)
16698 impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
16700 // Remember at which BC offset the tree was finished
16701 impNoteLastILoffs();
16706 /*****************************************************************************
16707 * Mark the block as unimported.
16708 * Note that the caller is responsible for calling impImportBlockPending(),
16709 * with the appropriate stack-state
16712 inline void Compiler::impReimportMarkBlock(BasicBlock* block)
16715 if (verbose && (block->bbFlags & BBF_IMPORTED))
16717 printf("\n" FMT_BB " will be reimported\n", block->bbNum);
16721 block->bbFlags &= ~BBF_IMPORTED;
16724 /*****************************************************************************
16725 * Mark the successors of the given block as unimported.
16726 * Note that the caller is responsible for calling impImportBlockPending()
16727 * for all the successors, with the appropriate stack-state.
16730 void Compiler::impReimportMarkSuccessors(BasicBlock* block)
16732 const unsigned numSuccs = block->NumSucc();
16733 for (unsigned i = 0; i < numSuccs; i++)
16735 impReimportMarkBlock(block->GetSucc(i));
16739 /*****************************************************************************
16741 * Filter wrapper to handle only passed in exception code
16745 LONG FilterVerificationExceptions(PEXCEPTION_POINTERS pExceptionPointers, LPVOID lpvParam)
16747 if (pExceptionPointers->ExceptionRecord->ExceptionCode == SEH_VERIFICATION_EXCEPTION)
16749 return EXCEPTION_EXECUTE_HANDLER;
16752 return EXCEPTION_CONTINUE_SEARCH;
16755 void Compiler::impVerifyEHBlock(BasicBlock* block, bool isTryStart)
16757 assert(block->hasTryIndex());
16758 assert(!compIsForInlining());
16760 unsigned tryIndex = block->getTryIndex();
16761 EHblkDsc* HBtab = ehGetDsc(tryIndex);
16765 assert(block->bbFlags & BBF_TRY_BEG);
16767 // The Stack must be empty
16769 if (block->bbStkDepth != 0)
16771 BADCODE("Evaluation stack must be empty on entry into a try block");
16775 // Save the stack contents, we'll need to restore it later
16777 SavedStack blockState;
16778 impSaveStackState(&blockState, false);
16780 while (HBtab != nullptr)
16784 // Are we verifying that an instance constructor properly initializes it's 'this' pointer once?
16785 // We do not allow the 'this' pointer to be uninitialized when entering most kinds try regions
16787 if (verTrackObjCtorInitState && (verCurrentState.thisInitialized != TIS_Init))
16789 // We trigger an invalid program exception here unless we have a try/fault region.
16791 if (HBtab->HasCatchHandler() || HBtab->HasFinallyHandler() || HBtab->HasFilter())
16794 "The 'this' pointer of an instance constructor is not intialized upon entry to a try region");
16798 // Allow a try/fault region to proceed.
16799 assert(HBtab->HasFaultHandler());
16803 /* Recursively process the handler block */
16804 BasicBlock* hndBegBB = HBtab->ebdHndBeg;
16806 // Construct the proper verification stack state
16807 // either empty or one that contains just
16808 // the Exception Object that we are dealing with
16810 verCurrentState.esStackDepth = 0;
16812 if (handlerGetsXcptnObj(hndBegBB->bbCatchTyp))
16814 CORINFO_CLASS_HANDLE clsHnd;
16816 if (HBtab->HasFilter())
16818 clsHnd = impGetObjectClass();
16822 CORINFO_RESOLVED_TOKEN resolvedToken;
16824 resolvedToken.tokenContext = impTokenLookupContextHandle;
16825 resolvedToken.tokenScope = info.compScopeHnd;
16826 resolvedToken.token = HBtab->ebdTyp;
16827 resolvedToken.tokenType = CORINFO_TOKENKIND_Class;
16828 info.compCompHnd->resolveToken(&resolvedToken);
16830 clsHnd = resolvedToken.hClass;
16833 // push catch arg the stack, spill to a temp if necessary
16834 // Note: can update HBtab->ebdHndBeg!
16835 hndBegBB = impPushCatchArgOnStack(hndBegBB, clsHnd, false);
16838 // Queue up the handler for importing
16840 impImportBlockPending(hndBegBB);
16842 if (HBtab->HasFilter())
16844 /* @VERIFICATION : Ideally the end of filter state should get
16845 propagated to the catch handler, this is an incompleteness,
16846 but is not a security/compliance issue, since the only
16847 interesting state is the 'thisInit' state.
16850 verCurrentState.esStackDepth = 0;
16852 BasicBlock* filterBB = HBtab->ebdFilter;
16854 // push catch arg the stack, spill to a temp if necessary
16855 // Note: can update HBtab->ebdFilter!
16856 const bool isSingleBlockFilter = (filterBB->bbNext == hndBegBB);
16857 filterBB = impPushCatchArgOnStack(filterBB, impGetObjectClass(), isSingleBlockFilter);
16859 impImportBlockPending(filterBB);
16862 else if (verTrackObjCtorInitState && HBtab->HasFaultHandler())
16864 /* Recursively process the handler block */
16866 verCurrentState.esStackDepth = 0;
16868 // Queue up the fault handler for importing
16870 impImportBlockPending(HBtab->ebdHndBeg);
16873 // Now process our enclosing try index (if any)
16875 tryIndex = HBtab->ebdEnclosingTryIndex;
16876 if (tryIndex == EHblkDsc::NO_ENCLOSING_INDEX)
16882 HBtab = ehGetDsc(tryIndex);
16886 // Restore the stack contents
16887 impRestoreStackState(&blockState);
16890 //***************************************************************
16891 // Import the instructions for the given basic block. Perform
16892 // verification, throwing an exception on failure. Push any successor blocks that are enabled for the first
16893 // time, or whose verification pre-state is changed.
16896 #pragma warning(push)
16897 #pragma warning(disable : 21000) // Suppress PREFast warning about overly large function
16899 void Compiler::impImportBlock(BasicBlock* block)
16901 // BBF_INTERNAL blocks only exist during importation due to EH canonicalization. We need to
16902 // handle them specially. In particular, there is no IL to import for them, but we do need
16903 // to mark them as imported and put their successors on the pending import list.
16904 if (block->bbFlags & BBF_INTERNAL)
16906 JITDUMP("Marking BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", block->bbNum);
16907 block->bbFlags |= BBF_IMPORTED;
16909 const unsigned numSuccs = block->NumSucc();
16910 for (unsigned i = 0; i < numSuccs; i++)
16912 impImportBlockPending(block->GetSucc(i));
16922 /* Make the block globaly available */
16927 /* Initialize the debug variables */
16928 impCurOpcName = "unknown";
16929 impCurOpcOffs = block->bbCodeOffs;
16932 /* Set the current stack state to the merged result */
16933 verResetCurrentState(block, &verCurrentState);
16935 /* Now walk the code and import the IL into GenTrees */
16937 struct FilterVerificationExceptionsParam
16942 FilterVerificationExceptionsParam param;
16944 param.pThis = this;
16945 param.block = block;
16947 PAL_TRY(FilterVerificationExceptionsParam*, pParam, ¶m)
16949 /* @VERIFICATION : For now, the only state propagation from try
16950 to it's handler is "thisInit" state (stack is empty at start of try).
16951 In general, for state that we track in verification, we need to
16952 model the possibility that an exception might happen at any IL
16953 instruction, so we really need to merge all states that obtain
16954 between IL instructions in a try block into the start states of
16957 However we do not allow the 'this' pointer to be uninitialized when
16958 entering most kinds try regions (only try/fault are allowed to have
16959 an uninitialized this pointer on entry to the try)
16961 Fortunately, the stack is thrown away when an exception
16962 leads to a handler, so we don't have to worry about that.
16963 We DO, however, have to worry about the "thisInit" state.
16964 But only for the try/fault case.
16966 The only allowed transition is from TIS_Uninit to TIS_Init.
16968 So for a try/fault region for the fault handler block
16969 we will merge the start state of the try begin
16970 and the post-state of each block that is part of this try region
16973 // merge the start state of the try begin
16975 if (pParam->block->bbFlags & BBF_TRY_BEG)
16977 pParam->pThis->impVerifyEHBlock(pParam->block, true);
16980 pParam->pThis->impImportBlockCode(pParam->block);
16982 // As discussed above:
16983 // merge the post-state of each block that is part of this try region
16985 if (pParam->block->hasTryIndex())
16987 pParam->pThis->impVerifyEHBlock(pParam->block, false);
16990 PAL_EXCEPT_FILTER(FilterVerificationExceptions)
16992 verHandleVerificationFailure(block DEBUGARG(false));
16996 if (compDonotInline())
17001 assert(!compDonotInline());
17003 markImport = false;
17007 unsigned baseTmp = NO_BASE_TMP; // input temps assigned to successor blocks
17008 bool reimportSpillClique = false;
17009 BasicBlock* tgtBlock = nullptr;
17011 /* If the stack is non-empty, we might have to spill its contents */
17013 if (verCurrentState.esStackDepth != 0)
17015 impBoxTemp = BAD_VAR_NUM; // if a box temp is used in a block that leaves something
17016 // on the stack, its lifetime is hard to determine, simply
17017 // don't reuse such temps.
17019 GenTree* addStmt = nullptr;
17021 /* Do the successors of 'block' have any other predecessors ?
17022 We do not want to do some of the optimizations related to multiRef
17023 if we can reimport blocks */
17025 unsigned multRef = impCanReimport ? unsigned(~0) : 0;
17027 switch (block->bbJumpKind)
17031 /* Temporarily remove the 'jtrue' from the end of the tree list */
17033 assert(impTreeLast);
17034 assert(impTreeLast->gtOper == GT_STMT);
17035 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_JTRUE);
17037 addStmt = impTreeLast;
17038 impTreeLast = impTreeLast->gtPrev;
17040 /* Note if the next block has more than one ancestor */
17042 multRef |= block->bbNext->bbRefs;
17044 /* Does the next block have temps assigned? */
17046 baseTmp = block->bbNext->bbStkTempsIn;
17047 tgtBlock = block->bbNext;
17049 if (baseTmp != NO_BASE_TMP)
17054 /* Try the target of the jump then */
17056 multRef |= block->bbJumpDest->bbRefs;
17057 baseTmp = block->bbJumpDest->bbStkTempsIn;
17058 tgtBlock = block->bbJumpDest;
17062 multRef |= block->bbJumpDest->bbRefs;
17063 baseTmp = block->bbJumpDest->bbStkTempsIn;
17064 tgtBlock = block->bbJumpDest;
17068 multRef |= block->bbNext->bbRefs;
17069 baseTmp = block->bbNext->bbStkTempsIn;
17070 tgtBlock = block->bbNext;
17075 BasicBlock** jmpTab;
17078 /* Temporarily remove the GT_SWITCH from the end of the tree list */
17080 assert(impTreeLast);
17081 assert(impTreeLast->gtOper == GT_STMT);
17082 assert(impTreeLast->gtStmt.gtStmtExpr->gtOper == GT_SWITCH);
17084 addStmt = impTreeLast;
17085 impTreeLast = impTreeLast->gtPrev;
17087 jmpCnt = block->bbJumpSwt->bbsCount;
17088 jmpTab = block->bbJumpSwt->bbsDstTab;
17092 tgtBlock = (*jmpTab);
17094 multRef |= tgtBlock->bbRefs;
17096 // Thanks to spill cliques, we should have assigned all or none
17097 assert((baseTmp == NO_BASE_TMP) || (baseTmp == tgtBlock->bbStkTempsIn));
17098 baseTmp = tgtBlock->bbStkTempsIn;
17103 } while (++jmpTab, --jmpCnt);
17107 case BBJ_CALLFINALLY:
17108 case BBJ_EHCATCHRET:
17110 case BBJ_EHFINALLYRET:
17111 case BBJ_EHFILTERRET:
17113 NO_WAY("can't have 'unreached' end of BB with non-empty stack");
17117 noway_assert(!"Unexpected bbJumpKind");
17121 assert(multRef >= 1);
17123 /* Do we have a base temp number? */
17125 bool newTemps = (baseTmp == NO_BASE_TMP);
17129 /* Grab enough temps for the whole stack */
17130 baseTmp = impGetSpillTmpBase(block);
17133 /* Spill all stack entries into temps */
17134 unsigned level, tempNum;
17136 JITDUMP("\nSpilling stack entries into temps\n");
17137 for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
17139 GenTree* tree = verCurrentState.esStack[level].val;
17141 /* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
17142 the other. This should merge to a byref in unverifiable code.
17143 However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
17144 successor would be imported assuming there was a TYP_I_IMPL on
17145 the stack. Thus the value would not get GC-tracked. Hence,
17146 change the temp to TYP_BYREF and reimport the successors.
17147 Note: We should only allow this in unverifiable code.
17149 if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL && !verNeedsVerification())
17151 lvaTable[tempNum].lvType = TYP_BYREF;
17152 impReimportMarkSuccessors(block);
17156 #ifdef _TARGET_64BIT_
17157 if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
17159 if (tiVerificationNeeded && tgtBlock->bbEntryState != nullptr &&
17160 (tgtBlock->bbFlags & BBF_FAILED_VERIFICATION) == 0)
17162 // Merge the current state into the entry state of block;
17163 // the call to verMergeEntryStates must have changed
17164 // the entry state of the block by merging the int local var
17165 // and the native-int stack entry.
17166 bool changed = false;
17167 if (verMergeEntryStates(tgtBlock, &changed))
17169 impRetypeEntryStateTemps(tgtBlock);
17170 impReimportBlockPending(tgtBlock);
17175 tgtBlock->bbFlags |= BBF_FAILED_VERIFICATION;
17180 // Some other block in the spill clique set this to "int", but now we have "native int".
17181 // Change the type and go back to re-import any blocks that used the wrong type.
17182 lvaTable[tempNum].lvType = TYP_I_IMPL;
17183 reimportSpillClique = true;
17185 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
17187 // Spill clique has decided this should be "native int", but this block only pushes an "int".
17188 // Insert a sign-extension to "native int" so we match the clique.
17189 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
17192 // Consider the case where one branch left a 'byref' on the stack and the other leaves
17193 // an 'int'. On 32-bit, this is allowed (in non-verifiable code) since they are the same
17194 // size. JIT64 managed to make this work on 64-bit. For compatibility, we support JIT64
17195 // behavior instead of asserting and then generating bad code (where we save/restore the
17196 // low 32 bits of a byref pointer to an 'int' sized local). If the 'int' side has been
17197 // imported already, we need to change the type of the local and reimport the spill clique.
17198 // If the 'byref' side has imported, we insert a cast from int to 'native int' to match
17199 // the 'byref' size.
17200 if (!tiVerificationNeeded)
17202 if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
17204 // Some other block in the spill clique set this to "int", but now we have "byref".
17205 // Change the type and go back to re-import any blocks that used the wrong type.
17206 lvaTable[tempNum].lvType = TYP_BYREF;
17207 reimportSpillClique = true;
17209 else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
17211 // Spill clique has decided this should be "byref", but this block only pushes an "int".
17212 // Insert a sign-extension to "native int" so we match the clique size.
17213 verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
17216 #endif // _TARGET_64BIT_
17218 if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
17220 // Some other block in the spill clique set this to "float", but now we have "double".
17221 // Change the type and go back to re-import any blocks that used the wrong type.
17222 lvaTable[tempNum].lvType = TYP_DOUBLE;
17223 reimportSpillClique = true;
17225 else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
17227 // Spill clique has decided this should be "double", but this block only pushes a "float".
17228 // Insert a cast to "double" so we match the clique.
17229 verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE);
17232 /* If addStmt has a reference to tempNum (can only happen if we
17233 are spilling to the temps already used by a previous block),
17234 we need to spill addStmt */
17236 if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
17238 GenTree* addTree = addStmt->gtStmt.gtStmtExpr;
17240 if (addTree->gtOper == GT_JTRUE)
17242 GenTree* relOp = addTree->gtOp.gtOp1;
17243 assert(relOp->OperIsCompare());
17245 var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
17247 if (gtHasRef(relOp->gtOp.gtOp1, tempNum, false))
17249 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op1"));
17250 impAssignTempGen(temp, relOp->gtOp.gtOp1, level);
17251 type = genActualType(lvaTable[temp].TypeGet());
17252 relOp->gtOp.gtOp1 = gtNewLclvNode(temp, type);
17255 if (gtHasRef(relOp->gtOp.gtOp2, tempNum, false))
17257 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt JTRUE ref Op2"));
17258 impAssignTempGen(temp, relOp->gtOp.gtOp2, level);
17259 type = genActualType(lvaTable[temp].TypeGet());
17260 relOp->gtOp.gtOp2 = gtNewLclvNode(temp, type);
17265 assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->gtOp.gtOp1->TypeGet()));
17267 unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
17268 impAssignTempGen(temp, addTree->gtOp.gtOp1, level);
17269 addTree->gtOp.gtOp1 = gtNewLclvNode(temp, genActualType(addTree->gtOp.gtOp1->TypeGet()));
17273 /* Spill the stack entry, and replace with the temp */
17275 if (!impSpillStackEntry(level, tempNum
17278 true, "Spill Stack Entry"
17284 BADCODE("bad stack state");
17287 // Oops. Something went wrong when spilling. Bad code.
17288 verHandleVerificationFailure(block DEBUGARG(true));
17294 /* Put back the 'jtrue'/'switch' if we removed it earlier */
17298 impAppendStmt(addStmt, (unsigned)CHECK_SPILL_NONE);
17302 // Some of the append/spill logic works on compCurBB
17304 assert(compCurBB == block);
17306 /* Save the tree list in the block */
17307 impEndTreeList(block);
17309 // impEndTreeList sets BBF_IMPORTED on the block
17310 // We do *NOT* want to set it later than this because
17311 // impReimportSpillClique might clear it if this block is both a
17312 // predecessor and successor in the current spill clique
17313 assert(block->bbFlags & BBF_IMPORTED);
17315 // If we had a int/native int, or float/double collision, we need to re-import
17316 if (reimportSpillClique)
17318 // This will re-import all the successors of block (as well as each of their predecessors)
17319 impReimportSpillClique(block);
17321 // For blocks that haven't been imported yet, we still need to mark them as pending import.
17322 const unsigned numSuccs = block->NumSucc();
17323 for (unsigned i = 0; i < numSuccs; i++)
17325 BasicBlock* succ = block->GetSucc(i);
17326 if ((succ->bbFlags & BBF_IMPORTED) == 0)
17328 impImportBlockPending(succ);
17332 else // the normal case
17334 // otherwise just import the successors of block
17336 /* Does this block jump to any other blocks? */
17337 const unsigned numSuccs = block->NumSucc();
17338 for (unsigned i = 0; i < numSuccs; i++)
17340 impImportBlockPending(block->GetSucc(i));
17345 #pragma warning(pop)
17348 /*****************************************************************************/
17350 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
17351 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
17352 // impPendingBlockMembers). Merges the current verification state into the verification state of "block"
17353 // (its "pre-state").
17355 void Compiler::impImportBlockPending(BasicBlock* block)
17360 printf("\nimpImportBlockPending for " FMT_BB "\n", block->bbNum);
17364 // We will add a block to the pending set if it has not already been imported (or needs to be re-imported),
17365 // or if it has, but merging in a predecessor's post-state changes the block's pre-state.
17366 // (When we're doing verification, we always attempt the merge to detect verification errors.)
17368 // If the block has not been imported, add to pending set.
17369 bool addToPending = ((block->bbFlags & BBF_IMPORTED) == 0);
17371 // Initialize bbEntryState just the first time we try to add this block to the pending list
17372 // Just because bbEntryState is NULL, doesn't mean the pre-state wasn't previously set
17373 // We use NULL to indicate the 'common' state to avoid memory allocation
17374 if ((block->bbEntryState == nullptr) && ((block->bbFlags & (BBF_IMPORTED | BBF_FAILED_VERIFICATION)) == 0) &&
17375 (impGetPendingBlockMember(block) == 0))
17377 verInitBBEntryState(block, &verCurrentState);
17378 assert(block->bbStkDepth == 0);
17379 block->bbStkDepth = static_cast<unsigned short>(verCurrentState.esStackDepth);
17380 assert(addToPending);
17381 assert(impGetPendingBlockMember(block) == 0);
17385 // The stack should have the same height on entry to the block from all its predecessors.
17386 if (block->bbStkDepth != verCurrentState.esStackDepth)
17390 sprintf_s(buffer, sizeof(buffer),
17391 "Block at offset %4.4x to %4.4x in %s entered with different stack depths.\n"
17392 "Previous depth was %d, current depth is %d",
17393 block->bbCodeOffs, block->bbCodeOffsEnd, info.compFullName, block->bbStkDepth,
17394 verCurrentState.esStackDepth);
17395 buffer[400 - 1] = 0;
17398 NO_WAY("Block entered with different stack depths");
17402 // Additionally, if we need to verify, merge the verification state.
17403 if (tiVerificationNeeded)
17405 // Merge the current state into the entry state of block; if this does not change the entry state
17406 // by merging, do not add the block to the pending-list.
17407 bool changed = false;
17408 if (!verMergeEntryStates(block, &changed))
17410 block->bbFlags |= BBF_FAILED_VERIFICATION;
17411 addToPending = true; // We will pop it off, and check the flag set above.
17415 addToPending = true;
17417 JITDUMP("Adding " FMT_BB " to pending set due to new merge result\n", block->bbNum);
17426 if (block->bbStkDepth > 0)
17428 // We need to fix the types of any spill temps that might have changed:
17429 // int->native int, float->double, int->byref, etc.
17430 impRetypeEntryStateTemps(block);
17433 // OK, we must add to the pending list, if it's not already in it.
17434 if (impGetPendingBlockMember(block) != 0)
17440 // Get an entry to add to the pending list
17444 if (impPendingFree)
17446 // We can reuse one of the freed up dscs.
17447 dsc = impPendingFree;
17448 impPendingFree = dsc->pdNext;
17452 // We have to create a new dsc
17453 dsc = new (this, CMK_Unknown) PendingDsc;
17457 dsc->pdSavedStack.ssDepth = verCurrentState.esStackDepth;
17458 dsc->pdThisPtrInit = verCurrentState.thisInitialized;
17460 // Save the stack trees for later
17462 if (verCurrentState.esStackDepth)
17464 impSaveStackState(&dsc->pdSavedStack, false);
17467 // Add the entry to the pending list
17469 dsc->pdNext = impPendingList;
17470 impPendingList = dsc;
17471 impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
17473 // Various assertions require us to now to consider the block as not imported (at least for
17474 // the final time...)
17475 block->bbFlags &= ~BBF_IMPORTED;
17480 printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum);
17485 /*****************************************************************************/
17487 // Ensures that "block" is a member of the list of BBs waiting to be imported, pushing it on the list if
17488 // necessary (and ensures that it is a member of the set of BB's on the list, by setting its byte in
17489 // impPendingBlockMembers). Does *NOT* change the existing "pre-state" of the block.
17491 void Compiler::impReimportBlockPending(BasicBlock* block)
17493 JITDUMP("\nimpReimportBlockPending for " FMT_BB, block->bbNum);
17495 assert(block->bbFlags & BBF_IMPORTED);
17497 // OK, we must add to the pending list, if it's not already in it.
17498 if (impGetPendingBlockMember(block) != 0)
17503 // Get an entry to add to the pending list
17507 if (impPendingFree)
17509 // We can reuse one of the freed up dscs.
17510 dsc = impPendingFree;
17511 impPendingFree = dsc->pdNext;
17515 // We have to create a new dsc
17516 dsc = new (this, CMK_ImpStack) PendingDsc;
17521 if (block->bbEntryState)
17523 dsc->pdThisPtrInit = block->bbEntryState->thisInitialized;
17524 dsc->pdSavedStack.ssDepth = block->bbEntryState->esStackDepth;
17525 dsc->pdSavedStack.ssTrees = block->bbEntryState->esStack;
17529 dsc->pdThisPtrInit = TIS_Bottom;
17530 dsc->pdSavedStack.ssDepth = 0;
17531 dsc->pdSavedStack.ssTrees = nullptr;
17534 // Add the entry to the pending list
17536 dsc->pdNext = impPendingList;
17537 impPendingList = dsc;
17538 impSetPendingBlockMember(block, 1); // And indicate that it's now a member of the set.
17540 // Various assertions require us to now to consider the block as not imported (at least for
17541 // the final time...)
17542 block->bbFlags &= ~BBF_IMPORTED;
17547 printf("Added PendingDsc - %08p for " FMT_BB "\n", dspPtr(dsc), block->bbNum);
17552 void* Compiler::BlockListNode::operator new(size_t sz, Compiler* comp)
17554 if (comp->impBlockListNodeFreeList == nullptr)
17556 return comp->getAllocator(CMK_BasicBlock).allocate<BlockListNode>(1);
17560 BlockListNode* res = comp->impBlockListNodeFreeList;
17561 comp->impBlockListNodeFreeList = res->m_next;
17566 void Compiler::FreeBlockListNode(Compiler::BlockListNode* node)
17568 node->m_next = impBlockListNodeFreeList;
17569 impBlockListNodeFreeList = node;
17572 void Compiler::impWalkSpillCliqueFromPred(BasicBlock* block, SpillCliqueWalker* callback)
17576 noway_assert(!fgComputePredsDone);
17577 if (!fgCheapPredsValid)
17579 fgComputeCheapPreds();
17582 BlockListNode* succCliqueToDo = nullptr;
17583 BlockListNode* predCliqueToDo = new (this) BlockListNode(block);
17587 // Look at the successors of every member of the predecessor to-do list.
17588 while (predCliqueToDo != nullptr)
17590 BlockListNode* node = predCliqueToDo;
17591 predCliqueToDo = node->m_next;
17592 BasicBlock* blk = node->m_blk;
17593 FreeBlockListNode(node);
17595 const unsigned numSuccs = blk->NumSucc();
17596 for (unsigned succNum = 0; succNum < numSuccs; succNum++)
17598 BasicBlock* succ = blk->GetSucc(succNum);
17599 // If it's not already in the clique, add it, and also add it
17600 // as a member of the successor "toDo" set.
17601 if (impSpillCliqueGetMember(SpillCliqueSucc, succ) == 0)
17603 callback->Visit(SpillCliqueSucc, succ);
17604 impSpillCliqueSetMember(SpillCliqueSucc, succ, 1);
17605 succCliqueToDo = new (this) BlockListNode(succ, succCliqueToDo);
17610 // Look at the predecessors of every member of the successor to-do list.
17611 while (succCliqueToDo != nullptr)
17613 BlockListNode* node = succCliqueToDo;
17614 succCliqueToDo = node->m_next;
17615 BasicBlock* blk = node->m_blk;
17616 FreeBlockListNode(node);
17618 for (BasicBlockList* pred = blk->bbCheapPreds; pred != nullptr; pred = pred->next)
17620 BasicBlock* predBlock = pred->block;
17621 // If it's not already in the clique, add it, and also add it
17622 // as a member of the predecessor "toDo" set.
17623 if (impSpillCliqueGetMember(SpillCliquePred, predBlock) == 0)
17625 callback->Visit(SpillCliquePred, predBlock);
17626 impSpillCliqueSetMember(SpillCliquePred, predBlock, 1);
17627 predCliqueToDo = new (this) BlockListNode(predBlock, predCliqueToDo);
17634 // If this fails, it means we didn't walk the spill clique properly and somehow managed
17635 // miss walking back to include the predecessor we started from.
17636 // This most likely cause: missing or out of date bbPreds
17637 assert(impSpillCliqueGetMember(SpillCliquePred, block) != 0);
17640 void Compiler::SetSpillTempsBase::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17642 if (predOrSucc == SpillCliqueSucc)
17644 assert(blk->bbStkTempsIn == NO_BASE_TMP); // Should not already be a member of a clique as a successor.
17645 blk->bbStkTempsIn = m_baseTmp;
17649 assert(predOrSucc == SpillCliquePred);
17650 assert(blk->bbStkTempsOut == NO_BASE_TMP); // Should not already be a member of a clique as a predecessor.
17651 blk->bbStkTempsOut = m_baseTmp;
17655 void Compiler::ReimportSpillClique::Visit(SpillCliqueDir predOrSucc, BasicBlock* blk)
17657 // For Preds we could be a little smarter and just find the existing store
17658 // and re-type it/add a cast, but that is complicated and hopefully very rare, so
17659 // just re-import the whole block (just like we do for successors)
17661 if (((blk->bbFlags & BBF_IMPORTED) == 0) && (m_pComp->impGetPendingBlockMember(blk) == 0))
17663 // If we haven't imported this block and we're not going to (because it isn't on
17664 // the pending list) then just ignore it for now.
17666 // This block has either never been imported (EntryState == NULL) or it failed
17667 // verification. Neither state requires us to force it to be imported now.
17668 assert((blk->bbEntryState == nullptr) || (blk->bbFlags & BBF_FAILED_VERIFICATION));
17672 // For successors we have a valid verCurrentState, so just mark them for reimport
17673 // the 'normal' way
17674 // Unlike predecessors, we *DO* need to reimport the current block because the
17675 // initial import had the wrong entry state types.
17676 // Similarly, blocks that are currently on the pending list, still need to call
17677 // impImportBlockPending to fixup their entry state.
17678 if (predOrSucc == SpillCliqueSucc)
17680 m_pComp->impReimportMarkBlock(blk);
17682 // Set the current stack state to that of the blk->bbEntryState
17683 m_pComp->verResetCurrentState(blk, &m_pComp->verCurrentState);
17684 assert(m_pComp->verCurrentState.thisInitialized == blk->bbThisOnEntry());
17686 m_pComp->impImportBlockPending(blk);
17688 else if ((blk != m_pComp->compCurBB) && ((blk->bbFlags & BBF_IMPORTED) != 0))
17690 // As described above, we are only visiting predecessors so they can
17691 // add the appropriate casts, since we have already done that for the current
17692 // block, it does not need to be reimported.
17693 // Nor do we need to reimport blocks that are still pending, but not yet
17696 // For predecessors, we have no state to seed the EntryState, so we just have
17697 // to assume the existing one is correct.
17698 // If the block is also a successor, it will get the EntryState properly
17699 // updated when it is visited as a successor in the above "if" block.
17700 assert(predOrSucc == SpillCliquePred);
17701 m_pComp->impReimportBlockPending(blk);
17705 // Re-type the incoming lclVar nodes to match the varDsc.
17706 void Compiler::impRetypeEntryStateTemps(BasicBlock* blk)
17708 if (blk->bbEntryState != nullptr)
17710 EntryState* es = blk->bbEntryState;
17711 for (unsigned level = 0; level < es->esStackDepth; level++)
17713 GenTree* tree = es->esStack[level].val;
17714 if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
17716 unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
17717 noway_assert(lclNum < lvaCount);
17718 LclVarDsc* varDsc = lvaTable + lclNum;
17719 es->esStack[level].val->gtType = varDsc->TypeGet();
17725 unsigned Compiler::impGetSpillTmpBase(BasicBlock* block)
17727 if (block->bbStkTempsOut != NO_BASE_TMP)
17729 return block->bbStkTempsOut;
17735 printf("\n*************** In impGetSpillTmpBase(" FMT_BB ")\n", block->bbNum);
17739 // Otherwise, choose one, and propagate to all members of the spill clique.
17740 // Grab enough temps for the whole stack.
17741 unsigned baseTmp = lvaGrabTemps(verCurrentState.esStackDepth DEBUGARG("IL Stack Entries"));
17742 SetSpillTempsBase callback(baseTmp);
17744 // We do *NOT* need to reset the SpillClique*Members because a block can only be the predecessor
17745 // to one spill clique, and similarly can only be the sucessor to one spill clique
17746 impWalkSpillCliqueFromPred(block, &callback);
17751 void Compiler::impReimportSpillClique(BasicBlock* block)
17756 printf("\n*************** In impReimportSpillClique(" FMT_BB ")\n", block->bbNum);
17760 // If we get here, it is because this block is already part of a spill clique
17761 // and one predecessor had an outgoing live stack slot of type int, and this
17762 // block has an outgoing live stack slot of type native int.
17763 // We need to reset these before traversal because they have already been set
17764 // by the previous walk to determine all the members of the spill clique.
17765 impInlineRoot()->impSpillCliquePredMembers.Reset();
17766 impInlineRoot()->impSpillCliqueSuccMembers.Reset();
17768 ReimportSpillClique callback(this);
17770 impWalkSpillCliqueFromPred(block, &callback);
17773 // Set the pre-state of "block" (which should not have a pre-state allocated) to
17774 // a copy of "srcState", cloning tree pointers as required.
17775 void Compiler::verInitBBEntryState(BasicBlock* block, EntryState* srcState)
17777 if (srcState->esStackDepth == 0 && srcState->thisInitialized == TIS_Bottom)
17779 block->bbEntryState = nullptr;
17783 block->bbEntryState = getAllocator(CMK_Unknown).allocate<EntryState>(1);
17785 // block->bbEntryState.esRefcount = 1;
17787 block->bbEntryState->esStackDepth = srcState->esStackDepth;
17788 block->bbEntryState->thisInitialized = TIS_Bottom;
17790 if (srcState->esStackDepth > 0)
17792 block->bbSetStack(new (this, CMK_Unknown) StackEntry[srcState->esStackDepth]);
17793 unsigned stackSize = srcState->esStackDepth * sizeof(StackEntry);
17795 memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
17796 for (unsigned level = 0; level < srcState->esStackDepth; level++)
17798 GenTree* tree = srcState->esStack[level].val;
17799 block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
17803 if (verTrackObjCtorInitState)
17805 verSetThisInit(block, srcState->thisInitialized);
17811 void Compiler::verSetThisInit(BasicBlock* block, ThisInitState tis)
17813 assert(tis != TIS_Bottom); // Precondition.
17814 if (block->bbEntryState == nullptr)
17816 block->bbEntryState = new (this, CMK_Unknown) EntryState();
17819 block->bbEntryState->thisInitialized = tis;
17823 * Resets the current state to the state at the start of the basic block
17825 void Compiler::verResetCurrentState(BasicBlock* block, EntryState* destState)
17828 if (block->bbEntryState == nullptr)
17830 destState->esStackDepth = 0;
17831 destState->thisInitialized = TIS_Bottom;
17835 destState->esStackDepth = block->bbEntryState->esStackDepth;
17837 if (destState->esStackDepth > 0)
17839 unsigned stackSize = destState->esStackDepth * sizeof(StackEntry);
17841 memcpy(destState->esStack, block->bbStackOnEntry(), stackSize);
17844 destState->thisInitialized = block->bbThisOnEntry();
17849 ThisInitState BasicBlock::bbThisOnEntry()
17851 return bbEntryState ? bbEntryState->thisInitialized : TIS_Bottom;
17854 unsigned BasicBlock::bbStackDepthOnEntry()
17856 return (bbEntryState ? bbEntryState->esStackDepth : 0);
17859 void BasicBlock::bbSetStack(void* stackBuffer)
17861 assert(bbEntryState);
17862 assert(stackBuffer);
17863 bbEntryState->esStack = (StackEntry*)stackBuffer;
17866 StackEntry* BasicBlock::bbStackOnEntry()
17868 assert(bbEntryState);
17869 return bbEntryState->esStack;
17872 void Compiler::verInitCurrentState()
17874 verTrackObjCtorInitState = FALSE;
17875 verCurrentState.thisInitialized = TIS_Bottom;
17877 if (tiVerificationNeeded)
17879 // Track this ptr initialization
17880 if (!info.compIsStatic && (info.compFlags & CORINFO_FLG_CONSTRUCTOR) && lvaTable[0].lvVerTypeInfo.IsObjRef())
17882 verTrackObjCtorInitState = TRUE;
17883 verCurrentState.thisInitialized = TIS_Uninit;
17887 // initialize stack info
17889 verCurrentState.esStackDepth = 0;
17890 assert(verCurrentState.esStack != nullptr);
17892 // copy current state to entry state of first BB
17893 verInitBBEntryState(fgFirstBB, &verCurrentState);
17896 Compiler* Compiler::impInlineRoot()
17898 if (impInlineInfo == nullptr)
17904 return impInlineInfo->InlineRoot;
17908 BYTE Compiler::impSpillCliqueGetMember(SpillCliqueDir predOrSucc, BasicBlock* blk)
17910 if (predOrSucc == SpillCliquePred)
17912 return impInlineRoot()->impSpillCliquePredMembers.Get(blk->bbInd());
17916 assert(predOrSucc == SpillCliqueSucc);
17917 return impInlineRoot()->impSpillCliqueSuccMembers.Get(blk->bbInd());
17921 void Compiler::impSpillCliqueSetMember(SpillCliqueDir predOrSucc, BasicBlock* blk, BYTE val)
17923 if (predOrSucc == SpillCliquePred)
17925 impInlineRoot()->impSpillCliquePredMembers.Set(blk->bbInd(), val);
17929 assert(predOrSucc == SpillCliqueSucc);
17930 impInlineRoot()->impSpillCliqueSuccMembers.Set(blk->bbInd(), val);
17934 /*****************************************************************************
17936 * Convert the instrs ("import") into our internal format (trees). The
17937 * basic flowgraph has already been constructed and is passed in.
17940 void Compiler::impImport(BasicBlock* method)
17945 printf("*************** In impImport() for %s\n", info.compFullName);
17949 Compiler* inlineRoot = impInlineRoot();
17951 if (info.compMaxStack <= SMALL_STACK_SIZE)
17953 impStkSize = SMALL_STACK_SIZE;
17957 impStkSize = info.compMaxStack;
17960 if (this == inlineRoot)
17962 // Allocate the stack contents
17963 verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
17967 // This is the inlinee compiler, steal the stack from the inliner compiler
17968 // (after ensuring that it is large enough).
17969 if (inlineRoot->impStkSize < impStkSize)
17971 inlineRoot->impStkSize = impStkSize;
17972 inlineRoot->verCurrentState.esStack = new (this, CMK_ImpStack) StackEntry[impStkSize];
17975 verCurrentState.esStack = inlineRoot->verCurrentState.esStack;
17978 // initialize the entry state at start of method
17979 verInitCurrentState();
17981 // Initialize stuff related to figuring "spill cliques" (see spec comment for impGetSpillTmpBase).
17982 if (this == inlineRoot) // These are only used on the root of the inlining tree.
17984 // We have initialized these previously, but to size 0. Make them larger.
17985 impPendingBlockMembers.Init(getAllocator(), fgBBNumMax * 2);
17986 impSpillCliquePredMembers.Init(getAllocator(), fgBBNumMax * 2);
17987 impSpillCliqueSuccMembers.Init(getAllocator(), fgBBNumMax * 2);
17989 inlineRoot->impPendingBlockMembers.Reset(fgBBNumMax * 2);
17990 inlineRoot->impSpillCliquePredMembers.Reset(fgBBNumMax * 2);
17991 inlineRoot->impSpillCliqueSuccMembers.Reset(fgBBNumMax * 2);
17992 impBlockListNodeFreeList = nullptr;
17995 impLastILoffsStmt = nullptr;
17996 impNestedStackSpill = false;
17998 impBoxTemp = BAD_VAR_NUM;
18000 impPendingList = impPendingFree = nullptr;
18002 /* Add the entry-point to the worker-list */
18004 // Skip leading internal blocks. There can be one as a leading scratch BB, and more
18005 // from EH normalization.
18006 // NOTE: It might be possible to always just put fgFirstBB on the pending list, and let everything else just fall
18008 for (; method->bbFlags & BBF_INTERNAL; method = method->bbNext)
18010 // Treat these as imported.
18011 assert(method->bbJumpKind == BBJ_NONE); // We assume all the leading ones are fallthrough.
18012 JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", method->bbNum);
18013 method->bbFlags |= BBF_IMPORTED;
18016 impImportBlockPending(method);
18018 /* Import blocks in the worker-list until there are no more */
18020 while (impPendingList)
18022 /* Remove the entry at the front of the list */
18024 PendingDsc* dsc = impPendingList;
18025 impPendingList = impPendingList->pdNext;
18026 impSetPendingBlockMember(dsc->pdBB, 0);
18028 /* Restore the stack state */
18030 verCurrentState.thisInitialized = dsc->pdThisPtrInit;
18031 verCurrentState.esStackDepth = dsc->pdSavedStack.ssDepth;
18032 if (verCurrentState.esStackDepth)
18034 impRestoreStackState(&dsc->pdSavedStack);
18037 /* Add the entry to the free list for reuse */
18039 dsc->pdNext = impPendingFree;
18040 impPendingFree = dsc;
18042 /* Now import the block */
18044 if (dsc->pdBB->bbFlags & BBF_FAILED_VERIFICATION)
18047 #ifdef _TARGET_64BIT_
18048 // On AMD64, during verification we have to match JIT64 behavior since the VM is very tighly
18049 // coupled with the JIT64 IL Verification logic. Look inside verHandleVerificationFailure
18050 // method for further explanation on why we raise this exception instead of making the jitted
18051 // code throw the verification exception during execution.
18052 if (tiVerificationNeeded && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_IMPORT_ONLY))
18054 BADCODE("Basic block marked as not verifiable");
18057 #endif // _TARGET_64BIT_
18059 verConvertBBToThrowVerificationException(dsc->pdBB DEBUGARG(true));
18060 impEndTreeList(dsc->pdBB);
18065 impImportBlock(dsc->pdBB);
18067 if (compDonotInline())
18071 if (compIsForImportOnly() && !tiVerificationNeeded)
18079 if (verbose && info.compXcptnsCount)
18081 printf("\nAfter impImport() added block for try,catch,finally");
18082 fgDispBasicBlocks();
18086 // Used in impImportBlockPending() for STRESS_CHK_REIMPORT
18087 for (BasicBlock* block = fgFirstBB; block; block = block->bbNext)
18089 block->bbFlags &= ~BBF_VISITED;
18093 assert(!compIsForInlining() || !tiVerificationNeeded);
18096 // Checks if a typeinfo (usually stored in the type stack) is a struct.
18097 // The invariant here is that if it's not a ref or a method and has a class handle
18098 // it's a valuetype
18099 bool Compiler::impIsValueType(typeInfo* pTypeInfo)
18101 if (pTypeInfo && pTypeInfo->IsValueClassWithClsHnd())
18111 /*****************************************************************************
18112 * Check to see if the tree is the address of a local or
18113 the address of a field in a local.
18115 *lclVarTreeOut will contain the GT_LCL_VAR tree when it returns TRUE.
18119 BOOL Compiler::impIsAddressInLocal(GenTree* tree, GenTree** lclVarTreeOut)
18121 if (tree->gtOper != GT_ADDR)
18126 GenTree* op = tree->gtOp.gtOp1;
18127 while (op->gtOper == GT_FIELD)
18129 op = op->gtField.gtFldObj;
18130 if (op && op->gtOper == GT_ADDR) // Skip static fields where op will be NULL.
18132 op = op->gtOp.gtOp1;
18140 if (op->gtOper == GT_LCL_VAR)
18142 *lclVarTreeOut = op;
18151 //------------------------------------------------------------------------
18152 // impMakeDiscretionaryInlineObservations: make observations that help
18153 // determine the profitability of a discretionary inline
18156 // pInlineInfo -- InlineInfo for the inline, or null for the prejit root
18157 // inlineResult -- InlineResult accumulating information about this inline
18160 // If inlining or prejitting the root, this method also makes
18161 // various observations about the method that factor into inline
18162 // decisions. It sets `compNativeSizeEstimate` as a side effect.
18164 void Compiler::impMakeDiscretionaryInlineObservations(InlineInfo* pInlineInfo, InlineResult* inlineResult)
18166 assert(pInlineInfo != nullptr && compIsForInlining() || // Perform the actual inlining.
18167 pInlineInfo == nullptr && !compIsForInlining() // Calculate the static inlining hint for ngen.
18170 // If we're really inlining, we should just have one result in play.
18171 assert((pInlineInfo == nullptr) || (inlineResult == pInlineInfo->inlineResult));
18173 // If this is a "forceinline" method, the JIT probably shouldn't have gone
18174 // to the trouble of estimating the native code size. Even if it did, it
18175 // shouldn't be relying on the result of this method.
18176 assert(inlineResult->GetObservation() == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE);
18178 // Note if the caller contains NEWOBJ or NEWARR.
18179 Compiler* rootCompiler = impInlineRoot();
18181 if ((rootCompiler->optMethodFlags & OMF_HAS_NEWARRAY) != 0)
18183 inlineResult->Note(InlineObservation::CALLER_HAS_NEWARRAY);
18186 if ((rootCompiler->optMethodFlags & OMF_HAS_NEWOBJ) != 0)
18188 inlineResult->Note(InlineObservation::CALLER_HAS_NEWOBJ);
18191 bool calleeIsStatic = (info.compFlags & CORINFO_FLG_STATIC) != 0;
18192 bool isSpecialMethod = (info.compFlags & CORINFO_FLG_CONSTRUCTOR) != 0;
18194 if (isSpecialMethod)
18196 if (calleeIsStatic)
18198 inlineResult->Note(InlineObservation::CALLEE_IS_CLASS_CTOR);
18202 inlineResult->Note(InlineObservation::CALLEE_IS_INSTANCE_CTOR);
18205 else if (!calleeIsStatic)
18207 // Callee is an instance method.
18209 // Check if the callee has the same 'this' as the root.
18210 if (pInlineInfo != nullptr)
18212 GenTree* thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
18214 bool isSameThis = impIsThis(thisArg);
18215 inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
18219 // Note if the callee's class is a promotable struct
18220 if ((info.compClassAttr & CORINFO_FLG_VALUECLASS) != 0)
18222 assert(structPromotionHelper != nullptr);
18223 if (structPromotionHelper->CanPromoteStructType(info.compClassHnd))
18225 inlineResult->Note(InlineObservation::CALLEE_CLASS_PROMOTABLE);
18229 #ifdef FEATURE_SIMD
18231 // Note if this method is has SIMD args or return value
18232 if (pInlineInfo != nullptr && pInlineInfo->hasSIMDTypeArgLocalOrReturn)
18234 inlineResult->Note(InlineObservation::CALLEE_HAS_SIMD);
18237 #endif // FEATURE_SIMD
18239 // Roughly classify callsite frequency.
18240 InlineCallsiteFrequency frequency = InlineCallsiteFrequency::UNUSED;
18242 // If this is a prejit root, or a maximally hot block...
18243 if ((pInlineInfo == nullptr) || (pInlineInfo->iciBlock->bbWeight >= BB_MAX_WEIGHT))
18245 frequency = InlineCallsiteFrequency::HOT;
18247 // No training data. Look for loop-like things.
18248 // We consider a recursive call loop-like. Do not give the inlining boost to the method itself.
18249 // However, give it to things nearby.
18250 else if ((pInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) &&
18251 (pInlineInfo->fncHandle != pInlineInfo->inlineCandidateInfo->ilCallerHandle))
18253 frequency = InlineCallsiteFrequency::LOOP;
18255 else if (pInlineInfo->iciBlock->hasProfileWeight() && (pInlineInfo->iciBlock->bbWeight > BB_ZERO_WEIGHT))
18257 frequency = InlineCallsiteFrequency::WARM;
18259 // Now modify the multiplier based on where we're called from.
18260 else if (pInlineInfo->iciBlock->isRunRarely() || ((info.compFlags & FLG_CCTOR) == FLG_CCTOR))
18262 frequency = InlineCallsiteFrequency::RARE;
18266 frequency = InlineCallsiteFrequency::BORING;
18269 // Also capture the block weight of the call site. In the prejit
18270 // root case, assume there's some hot call site for this method.
18271 unsigned weight = 0;
18273 if (pInlineInfo != nullptr)
18275 weight = pInlineInfo->iciBlock->bbWeight;
18279 weight = BB_MAX_WEIGHT;
18282 inlineResult->NoteInt(InlineObservation::CALLSITE_FREQUENCY, static_cast<int>(frequency));
18283 inlineResult->NoteInt(InlineObservation::CALLSITE_WEIGHT, static_cast<int>(weight));
18286 /*****************************************************************************
18287 This method makes STATIC inlining decision based on the IL code.
18288 It should not make any inlining decision based on the context.
18289 If forceInline is true, then the inlining decision should not depend on
18290 performance heuristics (code size, etc.).
18293 void Compiler::impCanInlineIL(CORINFO_METHOD_HANDLE fncHandle,
18294 CORINFO_METHOD_INFO* methInfo,
18296 InlineResult* inlineResult)
18298 unsigned codeSize = methInfo->ILCodeSize;
18300 // We shouldn't have made up our minds yet...
18301 assert(!inlineResult->IsDecided());
18303 if (methInfo->EHcount)
18305 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_EH);
18309 if ((methInfo->ILCode == nullptr) || (codeSize == 0))
18311 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_NO_BODY);
18315 // For now we don't inline varargs (import code can't handle it)
18317 if (methInfo->args.isVarArg())
18319 inlineResult->NoteFatal(InlineObservation::CALLEE_HAS_MANAGED_VARARGS);
18323 // Reject if it has too many locals.
18324 // This is currently an implementation limit due to fixed-size arrays in the
18325 // inline info, rather than a performance heuristic.
18327 inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_LOCALS, methInfo->locals.numArgs);
18329 if (methInfo->locals.numArgs > MAX_INL_LCLS)
18331 inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_LOCALS);
18335 // Make sure there aren't too many arguments.
18336 // This is currently an implementation limit due to fixed-size arrays in the
18337 // inline info, rather than a performance heuristic.
18339 inlineResult->NoteInt(InlineObservation::CALLEE_NUMBER_OF_ARGUMENTS, methInfo->args.numArgs);
18341 if (methInfo->args.numArgs > MAX_INL_ARGS)
18343 inlineResult->NoteFatal(InlineObservation::CALLEE_TOO_MANY_ARGUMENTS);
18347 // Note force inline state
18349 inlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, forceInline);
18351 // Note IL code size
18353 inlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
18355 if (inlineResult->IsFailure())
18360 // Make sure maxstack is not too big
18362 inlineResult->NoteInt(InlineObservation::CALLEE_MAXSTACK, methInfo->maxStack);
18364 if (inlineResult->IsFailure())
18370 /*****************************************************************************
18373 void Compiler::impCheckCanInline(GenTree* call,
18374 CORINFO_METHOD_HANDLE fncHandle,
18376 CORINFO_CONTEXT_HANDLE exactContextHnd,
18377 InlineCandidateInfo** ppInlineCandidateInfo,
18378 InlineResult* inlineResult)
18380 // Either EE or JIT might throw exceptions below.
18381 // If that happens, just don't inline the method.
18387 CORINFO_METHOD_HANDLE fncHandle;
18389 CORINFO_CONTEXT_HANDLE exactContextHnd;
18390 InlineResult* result;
18391 InlineCandidateInfo** ppInlineCandidateInfo;
18393 memset(¶m, 0, sizeof(param));
18395 param.pThis = this;
18397 param.fncHandle = fncHandle;
18398 param.methAttr = methAttr;
18399 param.exactContextHnd = (exactContextHnd != nullptr) ? exactContextHnd : MAKE_METHODCONTEXT(fncHandle);
18400 param.result = inlineResult;
18401 param.ppInlineCandidateInfo = ppInlineCandidateInfo;
18403 bool success = eeRunWithErrorTrap<Param>(
18404 [](Param* pParam) {
18405 DWORD dwRestrictions = 0;
18406 CorInfoInitClassResult initClassResult;
18409 const char* methodName;
18410 const char* className;
18411 methodName = pParam->pThis->eeGetMethodName(pParam->fncHandle, &className);
18413 if (JitConfig.JitNoInline())
18415 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_JIT_NOINLINE);
18420 /* Try to get the code address/size for the method */
18422 CORINFO_METHOD_INFO methInfo;
18423 if (!pParam->pThis->info.compCompHnd->getMethodInfo(pParam->fncHandle, &methInfo))
18425 pParam->result->NoteFatal(InlineObservation::CALLEE_NO_METHOD_INFO);
18430 forceInline = !!(pParam->methAttr & CORINFO_FLG_FORCEINLINE);
18432 pParam->pThis->impCanInlineIL(pParam->fncHandle, &methInfo, forceInline, pParam->result);
18434 if (pParam->result->IsFailure())
18436 assert(pParam->result->IsNever());
18440 // Speculatively check if initClass() can be done.
18441 // If it can be done, we will try to inline the method. If inlining
18442 // succeeds, then we will do the non-speculative initClass() and commit it.
18443 // If this speculative call to initClass() fails, there is no point
18444 // trying to inline this method.
18446 pParam->pThis->info.compCompHnd->initClass(nullptr /* field */, pParam->fncHandle /* method */,
18447 pParam->exactContextHnd /* context */,
18448 TRUE /* speculative */);
18450 if (initClassResult & CORINFO_INITCLASS_DONT_INLINE)
18452 pParam->result->NoteFatal(InlineObservation::CALLSITE_CLASS_INIT_FAILURE_SPEC);
18456 // Given the EE the final say in whether to inline or not.
18457 // This should be last since for verifiable code, this can be expensive
18459 /* VM Inline check also ensures that the method is verifiable if needed */
18460 CorInfoInline vmResult;
18461 vmResult = pParam->pThis->info.compCompHnd->canInline(pParam->pThis->info.compMethodHnd, pParam->fncHandle,
18464 if (vmResult == INLINE_FAIL)
18466 pParam->result->NoteFatal(InlineObservation::CALLSITE_IS_VM_NOINLINE);
18468 else if (vmResult == INLINE_NEVER)
18470 pParam->result->NoteFatal(InlineObservation::CALLEE_IS_VM_NOINLINE);
18473 if (pParam->result->IsFailure())
18475 // Make sure not to report this one. It was already reported by the VM.
18476 pParam->result->SetReported();
18480 // check for unsupported inlining restrictions
18481 assert((dwRestrictions & ~(INLINE_RESPECT_BOUNDARY | INLINE_NO_CALLEE_LDSTR | INLINE_SAME_THIS)) == 0);
18483 if (dwRestrictions & INLINE_SAME_THIS)
18485 GenTree* thisArg = pParam->call->gtCall.gtCallObjp;
18488 if (!pParam->pThis->impIsThis(thisArg))
18490 pParam->result->NoteFatal(InlineObservation::CALLSITE_REQUIRES_SAME_THIS);
18495 /* Get the method properties */
18497 CORINFO_CLASS_HANDLE clsHandle;
18498 clsHandle = pParam->pThis->info.compCompHnd->getMethodClass(pParam->fncHandle);
18500 clsAttr = pParam->pThis->info.compCompHnd->getClassAttribs(clsHandle);
18502 /* Get the return type */
18504 var_types fncRetType;
18505 fncRetType = pParam->call->TypeGet();
18508 var_types fncRealRetType;
18509 fncRealRetType = JITtype2varType(methInfo.args.retType);
18511 assert((genActualType(fncRealRetType) == genActualType(fncRetType)) ||
18512 // <BUGNUM> VSW 288602 </BUGNUM>
18513 // In case of IJW, we allow to assign a native pointer to a BYREF.
18514 (fncRetType == TYP_BYREF && methInfo.args.retType == CORINFO_TYPE_PTR) ||
18515 (varTypeIsStruct(fncRetType) && (fncRealRetType == TYP_STRUCT)));
18519 // Allocate an InlineCandidateInfo structure
18521 InlineCandidateInfo* pInfo;
18522 pInfo = new (pParam->pThis, CMK_Inlining) InlineCandidateInfo;
18524 pInfo->dwRestrictions = dwRestrictions;
18525 pInfo->methInfo = methInfo;
18526 pInfo->methAttr = pParam->methAttr;
18527 pInfo->clsHandle = clsHandle;
18528 pInfo->clsAttr = clsAttr;
18529 pInfo->fncRetType = fncRetType;
18530 pInfo->exactContextHnd = pParam->exactContextHnd;
18531 pInfo->ilCallerHandle = pParam->pThis->info.compMethodHnd;
18532 pInfo->initClassResult = initClassResult;
18533 pInfo->preexistingSpillTemp = BAD_VAR_NUM;
18535 *(pParam->ppInlineCandidateInfo) = pInfo;
18542 param.result->NoteFatal(InlineObservation::CALLSITE_COMPILATION_ERROR);
18546 //------------------------------------------------------------------------
18547 // impInlineRecordArgInfo: record information about an inline candidate argument
18550 // pInlineInfo - inline info for the inline candidate
18551 // curArgVal - tree for the caller actual argument value
18552 // argNum - logical index of this argument
18553 // inlineResult - result of ongoing inline evaluation
18557 // Checks for various inline blocking conditions and makes notes in
18558 // the inline info arg table about the properties of the actual. These
18559 // properties are used later by impFetchArg to determine how best to
18560 // pass the argument into the inlinee.
18562 void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo,
18563 GenTree* curArgVal,
18565 InlineResult* inlineResult)
18567 InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
18569 if (curArgVal->gtOper == GT_MKREFANY)
18571 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
18575 inlCurArgInfo->argNode = curArgVal;
18577 GenTree* lclVarTree;
18578 if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
18580 inlCurArgInfo->argIsByRefToStructLocal = true;
18581 #ifdef FEATURE_SIMD
18582 if (lvaTable[lclVarTree->AsLclVarCommon()->gtLclNum].lvSIMDType)
18584 pInlineInfo->hasSIMDTypeArgLocalOrReturn = true;
18586 #endif // FEATURE_SIMD
18589 if (curArgVal->gtFlags & GTF_ALL_EFFECT)
18591 inlCurArgInfo->argHasGlobRef = (curArgVal->gtFlags & GTF_GLOB_REF) != 0;
18592 inlCurArgInfo->argHasSideEff = (curArgVal->gtFlags & (GTF_ALL_EFFECT & ~GTF_GLOB_REF)) != 0;
18595 if (curArgVal->gtOper == GT_LCL_VAR)
18597 inlCurArgInfo->argIsLclVar = true;
18599 /* Remember the "original" argument number */
18600 curArgVal->gtLclVar.gtLclILoffs = argNum;
18603 if ((curArgVal->OperKind() & GTK_CONST) ||
18604 ((curArgVal->gtOper == GT_ADDR) && (curArgVal->gtOp.gtOp1->gtOper == GT_LCL_VAR)))
18606 inlCurArgInfo->argIsInvariant = true;
18607 if (inlCurArgInfo->argIsThis && (curArgVal->gtOper == GT_CNS_INT) && (curArgVal->gtIntCon.gtIconVal == 0))
18609 // Abort inlining at this call site
18610 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_HAS_NULL_THIS);
18615 // If the arg is a local that is address-taken, we can't safely
18616 // directly substitute it into the inlinee.
18618 // Previously we'd accomplish this by setting "argHasLdargaOp" but
18619 // that has a stronger meaning: that the arg value can change in
18620 // the method body. Using that flag prevents type propagation,
18621 // which is safe in this case.
18623 // Instead mark the arg as having a caller local ref.
18624 if (!inlCurArgInfo->argIsInvariant && gtHasLocalsWithAddrOp(curArgVal))
18626 inlCurArgInfo->argHasCallerLocalRef = true;
18632 if (inlCurArgInfo->argIsThis)
18634 printf("thisArg:");
18638 printf("\nArgument #%u:", argNum);
18640 if (inlCurArgInfo->argIsLclVar)
18642 printf(" is a local var");
18644 if (inlCurArgInfo->argIsInvariant)
18646 printf(" is a constant");
18648 if (inlCurArgInfo->argHasGlobRef)
18650 printf(" has global refs");
18652 if (inlCurArgInfo->argHasCallerLocalRef)
18654 printf(" has caller local ref");
18656 if (inlCurArgInfo->argHasSideEff)
18658 printf(" has side effects");
18660 if (inlCurArgInfo->argHasLdargaOp)
18662 printf(" has ldarga effect");
18664 if (inlCurArgInfo->argHasStargOp)
18666 printf(" has starg effect");
18668 if (inlCurArgInfo->argIsByRefToStructLocal)
18670 printf(" is byref to a struct local");
18674 gtDispTree(curArgVal);
18680 //------------------------------------------------------------------------
18681 // impInlineInitVars: setup inline information for inlinee args and locals
18684 // pInlineInfo - inline info for the inline candidate
18687 // This method primarily adds caller-supplied info to the inlArgInfo
18688 // and sets up the lclVarInfo table.
18690 // For args, the inlArgInfo records properties of the actual argument
18691 // including the tree node that produces the arg value. This node is
18692 // usually the tree node present at the call, but may also differ in
18694 // - when the call arg is a GT_RET_EXPR, we search back through the ret
18695 // expr chain for the actual node. Note this will either be the original
18696 // call (which will be a failed inline by this point), or the return
18697 // expression from some set of inlines.
18698 // - when argument type casting is needed the necessary casts are added
18699 // around the argument node.
18700 // - if an argment can be simplified by folding then the node here is the
18703 // The method may make observations that lead to marking this candidate as
18704 // a failed inline. If this happens the initialization is abandoned immediately
18705 // to try and reduce the jit time cost for a failed inline.
18707 void Compiler::impInlineInitVars(InlineInfo* pInlineInfo)
18709 assert(!compIsForInlining());
18711 GenTree* call = pInlineInfo->iciCall;
18712 CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo;
18713 unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr;
18714 InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo;
18715 InlLclVarInfo* lclVarInfo = pInlineInfo->lclVarInfo;
18716 InlineResult* inlineResult = pInlineInfo->inlineResult;
18718 const bool hasRetBuffArg = impMethodInfo_hasRetBuffArg(methInfo);
18720 /* init the argument stuct */
18722 memset(inlArgInfo, 0, (MAX_INL_ARGS + 1) * sizeof(inlArgInfo[0]));
18724 /* Get hold of the 'this' pointer and the argument list proper */
18726 GenTree* thisArg = call->gtCall.gtCallObjp;
18727 GenTree* argList = call->gtCall.gtCallArgs;
18728 unsigned argCnt = 0; // Count of the arguments
18730 assert((methInfo->args.hasThis()) == (thisArg != nullptr));
18734 inlArgInfo[0].argIsThis = true;
18735 GenTree* actualThisArg = thisArg->gtRetExprVal();
18736 impInlineRecordArgInfo(pInlineInfo, actualThisArg, argCnt, inlineResult);
18738 if (inlineResult->IsFailure())
18743 /* Increment the argument count */
18747 /* Record some information about each of the arguments */
18748 bool hasTypeCtxtArg = (methInfo->args.callConv & CORINFO_CALLCONV_PARAMTYPE) != 0;
18750 #if USER_ARGS_COME_LAST
18751 unsigned typeCtxtArg = thisArg ? 1 : 0;
18752 #else // USER_ARGS_COME_LAST
18753 unsigned typeCtxtArg = methInfo->args.totalILArgs();
18754 #endif // USER_ARGS_COME_LAST
18756 for (GenTree* argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
18758 if (argTmp == argList && hasRetBuffArg)
18763 // Ignore the type context argument
18764 if (hasTypeCtxtArg && (argCnt == typeCtxtArg))
18766 pInlineInfo->typeContextArg = typeCtxtArg;
18767 typeCtxtArg = 0xFFFFFFFF;
18771 assert(argTmp->gtOper == GT_LIST);
18772 GenTree* arg = argTmp->gtOp.gtOp1;
18773 GenTree* actualArg = arg->gtRetExprVal();
18774 impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult);
18776 if (inlineResult->IsFailure())
18781 /* Increment the argument count */
18785 /* Make sure we got the arg number right */
18786 assert(argCnt == methInfo->args.totalILArgs());
18788 #ifdef FEATURE_SIMD
18789 bool foundSIMDType = pInlineInfo->hasSIMDTypeArgLocalOrReturn;
18790 #endif // FEATURE_SIMD
18792 /* We have typeless opcodes, get type information from the signature */
18798 if (clsAttr & CORINFO_FLG_VALUECLASS)
18800 sigType = TYP_BYREF;
18807 lclVarInfo[0].lclVerTypeInfo = verMakeTypeInfo(pInlineInfo->inlineCandidateInfo->clsHandle);
18808 lclVarInfo[0].lclHasLdlocaOp = false;
18810 #ifdef FEATURE_SIMD
18811 // We always want to check isSIMDClass, since we want to set foundSIMDType (to increase
18812 // the inlining multiplier) for anything in that assembly.
18813 // But we only need to normalize it if it is a TYP_STRUCT
18814 // (which we need to do even if we have already set foundSIMDType).
18815 if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
18817 if (sigType == TYP_STRUCT)
18819 sigType = impNormStructType(lclVarInfo[0].lclVerTypeInfo.GetClassHandle());
18821 foundSIMDType = true;
18823 #endif // FEATURE_SIMD
18824 lclVarInfo[0].lclTypeInfo = sigType;
18826 assert(varTypeIsGC(thisArg->gtType) || // "this" is managed
18827 (thisArg->gtType == TYP_I_IMPL && // "this" is unmgd but the method's class doesnt care
18828 (clsAttr & CORINFO_FLG_VALUECLASS)));
18830 if (genActualType(thisArg->gtType) != genActualType(sigType))
18832 if (sigType == TYP_REF)
18834 /* The argument cannot be bashed into a ref (see bug 750871) */
18835 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_REF);
18839 /* This can only happen with byrefs <-> ints/shorts */
18841 assert(genActualType(sigType) == TYP_I_IMPL || sigType == TYP_BYREF);
18842 assert(genActualType(thisArg->gtType) == TYP_I_IMPL || thisArg->gtType == TYP_BYREF);
18844 if (sigType == TYP_BYREF)
18846 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18848 else if (thisArg->gtType == TYP_BYREF)
18850 assert(sigType == TYP_I_IMPL);
18852 /* If possible change the BYREF to an int */
18853 if (thisArg->IsVarAddr())
18855 thisArg->gtType = TYP_I_IMPL;
18856 lclVarInfo[0].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18860 /* Arguments 'int <- byref' cannot be bashed */
18861 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18868 /* Init the types of the arguments and make sure the types
18869 * from the trees match the types in the signature */
18871 CORINFO_ARG_LIST_HANDLE argLst;
18872 argLst = methInfo->args.args;
18875 for (i = (thisArg ? 1 : 0); i < argCnt; i++, argLst = info.compCompHnd->getArgNext(argLst))
18877 var_types sigType = (var_types)eeGetArgType(argLst, &methInfo->args);
18879 lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
18881 #ifdef FEATURE_SIMD
18882 if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
18884 // If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
18885 // found a SIMD type, even if this may not be a type we recognize (the assumption is that
18886 // it is likely to use a SIMD type, and therefore we want to increase the inlining multiplier).
18887 foundSIMDType = true;
18888 if (sigType == TYP_STRUCT)
18890 var_types structType = impNormStructType(lclVarInfo[i].lclVerTypeInfo.GetClassHandle());
18891 sigType = structType;
18894 #endif // FEATURE_SIMD
18896 lclVarInfo[i].lclTypeInfo = sigType;
18897 lclVarInfo[i].lclHasLdlocaOp = false;
18899 /* Does the tree type match the signature type? */
18901 GenTree* inlArgNode = inlArgInfo[i].argNode;
18903 if (sigType != inlArgNode->gtType)
18905 /* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
18906 but in bad IL cases with caller-callee signature mismatches we can see other types.
18907 Intentionally reject cases with mismatches so the jit is more flexible when
18908 encountering bad IL. */
18910 bool isPlausibleTypeMatch = (genActualType(sigType) == genActualType(inlArgNode->gtType)) ||
18911 (genActualTypeIsIntOrI(sigType) && inlArgNode->gtType == TYP_BYREF) ||
18912 (sigType == TYP_BYREF && genActualTypeIsIntOrI(inlArgNode->gtType));
18914 if (!isPlausibleTypeMatch)
18916 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_TYPES_INCOMPATIBLE);
18920 /* Is it a narrowing or widening cast?
18921 * Widening casts are ok since the value computed is already
18922 * normalized to an int (on the IL stack) */
18924 if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
18926 if (sigType == TYP_BYREF)
18928 lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18930 else if (inlArgNode->gtType == TYP_BYREF)
18932 assert(varTypeIsIntOrI(sigType));
18934 /* If possible bash the BYREF to an int */
18935 if (inlArgNode->IsVarAddr())
18937 inlArgNode->gtType = TYP_I_IMPL;
18938 lclVarInfo[i].lclVerTypeInfo = typeInfo(varType2tiType(TYP_I_IMPL));
18942 /* Arguments 'int <- byref' cannot be changed */
18943 inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_NO_BASH_TO_INT);
18947 else if (genTypeSize(sigType) < EA_PTRSIZE)
18949 /* Narrowing cast */
18951 if (inlArgNode->gtOper == GT_LCL_VAR &&
18952 !lvaTable[inlArgNode->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad() &&
18953 sigType == lvaGetRealType(inlArgNode->gtLclVarCommon.gtLclNum))
18955 /* We don't need to insert a cast here as the variable
18956 was assigned a normalized value of the right type */
18961 inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType);
18963 inlArgInfo[i].argIsLclVar = false;
18965 /* Try to fold the node in case we have constant arguments */
18967 if (inlArgInfo[i].argIsInvariant)
18969 inlArgNode = gtFoldExprConst(inlArgNode);
18970 inlArgInfo[i].argNode = inlArgNode;
18971 assert(inlArgNode->OperIsConst());
18974 #ifdef _TARGET_64BIT_
18975 else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
18977 // This should only happen for int -> native int widening
18978 inlArgNode = inlArgInfo[i].argNode =
18979 gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType);
18981 inlArgInfo[i].argIsLclVar = false;
18983 /* Try to fold the node in case we have constant arguments */
18985 if (inlArgInfo[i].argIsInvariant)
18987 inlArgNode = gtFoldExprConst(inlArgNode);
18988 inlArgInfo[i].argNode = inlArgNode;
18989 assert(inlArgNode->OperIsConst());
18992 #endif // _TARGET_64BIT_
18997 /* Init the types of the local variables */
18999 CORINFO_ARG_LIST_HANDLE localsSig;
19000 localsSig = methInfo->locals.args;
19002 for (i = 0; i < methInfo->locals.numArgs; i++)
19005 var_types type = (var_types)eeGetArgType(localsSig, &methInfo->locals, &isPinned);
19007 lclVarInfo[i + argCnt].lclHasLdlocaOp = false;
19008 lclVarInfo[i + argCnt].lclIsPinned = isPinned;
19009 lclVarInfo[i + argCnt].lclTypeInfo = type;
19011 if (varTypeIsGC(type))
19013 pInlineInfo->numberOfGcRefLocals++;
19018 // Pinned locals may cause inlines to fail.
19019 inlineResult->Note(InlineObservation::CALLEE_HAS_PINNED_LOCALS);
19020 if (inlineResult->IsFailure())
19026 lclVarInfo[i + argCnt].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->locals, localsSig);
19028 // If this local is a struct type with GC fields, inform the inliner. It may choose to bail
19029 // out on the inline.
19030 if (type == TYP_STRUCT)
19032 CORINFO_CLASS_HANDLE lclHandle = lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle();
19033 DWORD typeFlags = info.compCompHnd->getClassAttribs(lclHandle);
19034 if ((typeFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0)
19036 inlineResult->Note(InlineObservation::CALLEE_HAS_GC_STRUCT);
19037 if (inlineResult->IsFailure())
19042 // Do further notification in the case where the call site is rare; some policies do
19043 // not track the relative hotness of call sites for "always" inline cases.
19044 if (pInlineInfo->iciBlock->isRunRarely())
19046 inlineResult->Note(InlineObservation::CALLSITE_RARE_GC_STRUCT);
19047 if (inlineResult->IsFailure())
19056 localsSig = info.compCompHnd->getArgNext(localsSig);
19058 #ifdef FEATURE_SIMD
19059 if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
19061 foundSIMDType = true;
19062 if (featureSIMD && type == TYP_STRUCT)
19064 var_types structType = impNormStructType(lclVarInfo[i + argCnt].lclVerTypeInfo.GetClassHandle());
19065 lclVarInfo[i + argCnt].lclTypeInfo = structType;
19068 #endif // FEATURE_SIMD
19071 #ifdef FEATURE_SIMD
19072 if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd))
19074 foundSIMDType = true;
19076 pInlineInfo->hasSIMDTypeArgLocalOrReturn = foundSIMDType;
19077 #endif // FEATURE_SIMD
19080 //------------------------------------------------------------------------
19081 // impInlineFetchLocal: get a local var that represents an inlinee local
19084 // lclNum -- number of the inlinee local
19085 // reason -- debug string describing purpose of the local var
19088 // Number of the local to use
19091 // This method is invoked only for locals actually used in the
19094 // Allocates a new temp if necessary, and copies key properties
19095 // over from the inlinee local var info.
19097 unsigned Compiler::impInlineFetchLocal(unsigned lclNum DEBUGARG(const char* reason))
19099 assert(compIsForInlining());
19101 unsigned tmpNum = impInlineInfo->lclTmpNum[lclNum];
19103 if (tmpNum == BAD_VAR_NUM)
19105 const InlLclVarInfo& inlineeLocal = impInlineInfo->lclVarInfo[lclNum + impInlineInfo->argCnt];
19106 const var_types lclTyp = inlineeLocal.lclTypeInfo;
19108 // The lifetime of this local might span multiple BBs.
19109 // So it is a long lifetime local.
19110 impInlineInfo->lclTmpNum[lclNum] = tmpNum = lvaGrabTemp(false DEBUGARG(reason));
19112 // Copy over key info
19113 lvaTable[tmpNum].lvType = lclTyp;
19114 lvaTable[tmpNum].lvHasLdAddrOp = inlineeLocal.lclHasLdlocaOp;
19115 lvaTable[tmpNum].lvPinned = inlineeLocal.lclIsPinned;
19116 lvaTable[tmpNum].lvHasILStoreOp = inlineeLocal.lclHasStlocOp;
19117 lvaTable[tmpNum].lvHasMultipleILStoreOp = inlineeLocal.lclHasMultipleStlocOp;
19119 // Copy over class handle for ref types. Note this may be a
19120 // shared type -- someday perhaps we can get the exact
19121 // signature and pass in a more precise type.
19122 if (lclTyp == TYP_REF)
19124 assert(lvaTable[tmpNum].lvSingleDef == 0);
19126 lvaTable[tmpNum].lvSingleDef = !inlineeLocal.lclHasMultipleStlocOp && !inlineeLocal.lclHasLdlocaOp;
19127 if (lvaTable[tmpNum].lvSingleDef)
19129 JITDUMP("Marked V%02u as a single def temp\n", tmpNum);
19132 lvaSetClass(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandleForObjRef());
19135 if (inlineeLocal.lclVerTypeInfo.IsStruct())
19137 if (varTypeIsStruct(lclTyp))
19139 lvaSetStruct(tmpNum, inlineeLocal.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
19143 // This is a wrapped primitive. Make sure the verstate knows that
19144 lvaTable[tmpNum].lvVerTypeInfo = inlineeLocal.lclVerTypeInfo;
19149 // Sanity check that we're properly prepared for gc ref locals.
19150 if (varTypeIsGC(lclTyp))
19152 // Since there are gc locals we should have seen them earlier
19153 // and if there was a return value, set up the spill temp.
19154 assert(impInlineInfo->HasGcRefLocals());
19155 assert((info.compRetNativeType == TYP_VOID) || fgNeedReturnSpillTemp());
19159 // Make sure all pinned locals count as gc refs.
19160 assert(!inlineeLocal.lclIsPinned);
19168 //------------------------------------------------------------------------
19169 // impInlineFetchArg: return tree node for argument value in an inlinee
19172 // lclNum -- argument number in inlinee IL
19173 // inlArgInfo -- argument info for inlinee
19174 // lclVarInfo -- var info for inlinee
19177 // Tree for the argument's value. Often an inlinee-scoped temp
19178 // GT_LCL_VAR but can be other tree kinds, if the argument
19179 // expression from the caller can be directly substituted into the
19183 // Must be used only for arguments -- use impInlineFetchLocal for
19186 // Direct substitution is performed when the formal argument cannot
19187 // change value in the inlinee body (no starg or ldarga), and the
19188 // actual argument expression's value cannot be changed if it is
19189 // substituted it into the inlinee body.
19191 // Even if an inlinee-scoped temp is returned here, it may later be
19192 // "bashed" to a caller-supplied tree when arguments are actually
19193 // passed (see fgInlinePrependStatements). Bashing can happen if
19194 // the argument ends up being single use and other conditions are
19195 // met. So the contents of the tree returned here may not end up
19196 // being the ones ultimately used for the argument.
19198 // This method will side effect inlArgInfo. It should only be called
19199 // for actual uses of the argument in the inlinee.
19201 GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
19203 // Cache the relevant arg and lcl info for this argument.
19204 // We will modify argInfo but not lclVarInfo.
19205 InlArgInfo& argInfo = inlArgInfo[lclNum];
19206 const InlLclVarInfo& lclInfo = lclVarInfo[lclNum];
19207 const bool argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp;
19208 const var_types lclTyp = lclInfo.lclTypeInfo;
19209 GenTree* op1 = nullptr;
19211 if (argInfo.argIsInvariant && !argCanBeModified)
19213 // Directly substitute constants or addresses of locals
19215 // Clone the constant. Note that we cannot directly use
19216 // argNode in the trees even if !argInfo.argIsUsed as this
19217 // would introduce aliasing between inlArgInfo[].argNode and
19218 // impInlineExpr. Then gtFoldExpr() could change it, causing
19219 // further references to the argument working off of the
19221 op1 = gtCloneExpr(argInfo.argNode);
19222 PREFIX_ASSUME(op1 != nullptr);
19223 argInfo.argTmpNum = BAD_VAR_NUM;
19225 // We may need to retype to ensure we match the callee's view of the type.
19226 // Otherwise callee-pass throughs of arguments can create return type
19227 // mismatches that block inlining.
19229 // Note argument type mismatches that prevent inlining should
19230 // have been caught in impInlineInitVars.
19231 if (op1->TypeGet() != lclTyp)
19233 op1->gtType = genActualType(lclTyp);
19236 else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef)
19238 // Directly substitute unaliased caller locals for args that cannot be modified
19240 // Use the caller-supplied node if this is the first use.
19241 op1 = argInfo.argNode;
19242 argInfo.argTmpNum = op1->gtLclVarCommon.gtLclNum;
19244 // Use an equivalent copy if this is the second or subsequent
19245 // use, or if we need to retype.
19247 // Note argument type mismatches that prevent inlining should
19248 // have been caught in impInlineInitVars.
19249 if (argInfo.argIsUsed || (op1->TypeGet() != lclTyp))
19251 assert(op1->gtOper == GT_LCL_VAR);
19252 assert(lclNum == op1->gtLclVar.gtLclILoffs);
19254 var_types newTyp = lclTyp;
19256 if (!lvaTable[op1->gtLclVarCommon.gtLclNum].lvNormalizeOnLoad())
19258 newTyp = genActualType(lclTyp);
19261 // Create a new lcl var node - remember the argument lclNum
19262 op1 = gtNewLclvNode(op1->gtLclVarCommon.gtLclNum, newTyp, op1->gtLclVar.gtLclILoffs);
19265 else if (argInfo.argIsByRefToStructLocal && !argInfo.argHasStargOp)
19267 /* Argument is a by-ref address to a struct, a normed struct, or its field.
19268 In these cases, don't spill the byref to a local, simply clone the tree and use it.
19269 This way we will increase the chance for this byref to be optimized away by
19270 a subsequent "dereference" operation.
19272 From Dev11 bug #139955: Argument node can also be TYP_I_IMPL if we've bashed the tree
19273 (in impInlineInitVars()), if the arg has argHasLdargaOp as well as argIsByRefToStructLocal.
19274 For example, if the caller is:
19275 ldloca.s V_1 // V_1 is a local struct
19276 call void Test.ILPart::RunLdargaOnPointerArg(int32*)
19277 and the callee being inlined has:
19278 .method public static void RunLdargaOnPointerArg(int32* ptrToInts) cil managed
19280 call void Test.FourInts::NotInlined_SetExpectedValuesThroughPointerToPointer(int32**)
19281 then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
19282 soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
19284 assert(argInfo.argNode->TypeGet() == TYP_BYREF || argInfo.argNode->TypeGet() == TYP_I_IMPL);
19285 op1 = gtCloneExpr(argInfo.argNode);
19289 /* Argument is a complex expression - it must be evaluated into a temp */
19291 if (argInfo.argHasTmp)
19293 assert(argInfo.argIsUsed);
19294 assert(argInfo.argTmpNum < lvaCount);
19296 /* Create a new lcl var node - remember the argument lclNum */
19297 op1 = gtNewLclvNode(argInfo.argTmpNum, genActualType(lclTyp));
19299 /* This is the second or later use of the this argument,
19300 so we have to use the temp (instead of the actual arg) */
19301 argInfo.argBashTmpNode = nullptr;
19305 /* First time use */
19306 assert(!argInfo.argIsUsed);
19308 /* Reserve a temp for the expression.
19309 * Use a large size node as we may change it later */
19311 const unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Inlining Arg"));
19313 lvaTable[tmpNum].lvType = lclTyp;
19315 // For ref types, determine the type of the temp.
19316 if (lclTyp == TYP_REF)
19318 if (!argCanBeModified)
19320 // If the arg can't be modified in the method
19321 // body, use the type of the value, if
19322 // known. Otherwise, use the declared type.
19323 assert(lvaTable[tmpNum].lvSingleDef == 0);
19324 lvaTable[tmpNum].lvSingleDef = 1;
19325 JITDUMP("Marked V%02u as a single def temp\n", tmpNum);
19326 lvaSetClass(tmpNum, argInfo.argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
19330 // Arg might be modified, use the declared type of
19332 lvaSetClass(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
19336 assert(lvaTable[tmpNum].lvAddrExposed == 0);
19337 if (argInfo.argHasLdargaOp)
19339 lvaTable[tmpNum].lvHasLdAddrOp = 1;
19342 if (lclInfo.lclVerTypeInfo.IsStruct())
19344 if (varTypeIsStruct(lclTyp))
19346 lvaSetStruct(tmpNum, lclInfo.lclVerTypeInfo.GetClassHandle(), true /* unsafe value cls check */);
19347 if (info.compIsVarArgs)
19349 lvaSetStructUsedAsVarArg(tmpNum);
19354 // This is a wrapped primitive. Make sure the verstate knows that
19355 lvaTable[tmpNum].lvVerTypeInfo = lclInfo.lclVerTypeInfo;
19359 argInfo.argHasTmp = true;
19360 argInfo.argTmpNum = tmpNum;
19362 // If we require strict exception order, then arguments must
19363 // be evaluated in sequence before the body of the inlined method.
19364 // So we need to evaluate them to a temp.
19365 // Also, if arguments have global or local references, we need to
19366 // evaluate them to a temp before the inlined body as the
19367 // inlined body may be modifying the global ref.
19368 // TODO-1stClassStructs: We currently do not reuse an existing lclVar
19369 // if it is a struct, because it requires some additional handling.
19371 if (!varTypeIsStruct(lclTyp) && !argInfo.argHasSideEff && !argInfo.argHasGlobRef &&
19372 !argInfo.argHasCallerLocalRef)
19374 /* Get a *LARGE* LCL_VAR node */
19375 op1 = gtNewLclLNode(tmpNum, genActualType(lclTyp), lclNum);
19377 /* Record op1 as the very first use of this argument.
19378 If there are no further uses of the arg, we may be
19379 able to use the actual arg node instead of the temp.
19380 If we do see any further uses, we will clear this. */
19381 argInfo.argBashTmpNode = op1;
19385 /* Get a small LCL_VAR node */
19386 op1 = gtNewLclvNode(tmpNum, genActualType(lclTyp));
19387 /* No bashing of this argument */
19388 argInfo.argBashTmpNode = nullptr;
19393 // Mark this argument as used.
19394 argInfo.argIsUsed = true;
19399 /******************************************************************************
19400 Is this the original "this" argument to the call being inlined?
19402 Note that we do not inline methods with "starg 0", and so we do not need to
19406 BOOL Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo)
19408 assert(compIsForInlining());
19409 return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
19412 //-----------------------------------------------------------------------------
19413 // This function checks if a dereference in the inlinee can guarantee that
19414 // the "this" is non-NULL.
19415 // If we haven't hit a branch or a side effect, and we are dereferencing
19416 // from 'this' to access a field or make GTF_CALL_NULLCHECK call,
19417 // then we can avoid a separate null pointer check.
19419 // "additionalTreesToBeEvaluatedBefore"
19420 // is the set of pending trees that have not yet been added to the statement list,
19421 // and which have been removed from verCurrentState.esStack[]
19423 BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTreesToBeEvaluatedBefore,
19424 GenTree* variableBeingDereferenced,
19425 InlArgInfo* inlArgInfo)
19427 assert(compIsForInlining());
19428 assert(opts.OptEnabled(CLFLG_INLINING));
19430 BasicBlock* block = compCurBB;
19435 if (block != fgFirstBB)
19440 if (!impInlineIsThis(variableBeingDereferenced, inlArgInfo))
19445 if (additionalTreesToBeEvaluatedBefore &&
19446 GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(additionalTreesToBeEvaluatedBefore->gtFlags))
19451 for (stmt = impTreeList->gtNext; stmt; stmt = stmt->gtNext)
19453 expr = stmt->gtStmt.gtStmtExpr;
19455 if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(expr->gtFlags))
19461 for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
19463 unsigned stackTreeFlags = verCurrentState.esStack[level].val->gtFlags;
19464 if (GTF_GLOBALLY_VISIBLE_SIDE_EFFECTS(stackTreeFlags))
19473 //------------------------------------------------------------------------
19474 // impMarkInlineCandidate: determine if this call can be subsequently inlined
19477 // callNode -- call under scrutiny
19478 // exactContextHnd -- context handle for inlining
19479 // exactContextNeedsRuntimeLookup -- true if context required runtime lookup
19480 // callInfo -- call info from VM
19483 // If callNode is an inline candidate, this method sets the flag
19484 // GTF_CALL_INLINE_CANDIDATE, and ensures that helper methods have
19485 // filled in the associated InlineCandidateInfo.
19487 // If callNode is not an inline candidate, and the reason is
19488 // something that is inherent to the method being called, the
19489 // method may be marked as "noinline" to short-circuit any
19490 // future assessments of calls to this method.
19492 void Compiler::impMarkInlineCandidate(GenTree* callNode,
19493 CORINFO_CONTEXT_HANDLE exactContextHnd,
19494 bool exactContextNeedsRuntimeLookup,
19495 CORINFO_CALL_INFO* callInfo)
19497 // Let the strategy know there's another call
19498 impInlineRoot()->m_inlineStrategy->NoteCall();
19500 if (!opts.OptEnabled(CLFLG_INLINING))
19502 /* XXX Mon 8/18/2008
19503 * This assert is misleading. The caller does not ensure that we have CLFLG_INLINING set before
19504 * calling impMarkInlineCandidate. However, if this assert trips it means that we're an inlinee and
19505 * CLFLG_MINOPT is set. That doesn't make a lot of sense. If you hit this assert, work back and
19506 * figure out why we did not set MAXOPT for this compile.
19508 assert(!compIsForInlining());
19512 if (compIsForImportOnly())
19514 // Don't bother creating the inline candidate during verification.
19515 // Otherwise the call to info.compCompHnd->canInline will trigger a recursive verification
19516 // that leads to the creation of multiple instances of Compiler.
19520 GenTreeCall* call = callNode->AsCall();
19521 InlineResult inlineResult(this, call, nullptr, "impMarkInlineCandidate");
19523 // Don't inline if not optimizing root method
19524 if (opts.compDbgCode)
19526 inlineResult.NoteFatal(InlineObservation::CALLER_DEBUG_CODEGEN);
19530 // Don't inline if inlining into root method is disabled.
19531 if (InlineStrategy::IsNoInline(info.compCompHnd, info.compMethodHnd))
19533 inlineResult.NoteFatal(InlineObservation::CALLER_IS_JIT_NOINLINE);
19537 // Inlining candidate determination needs to honor only IL tail prefix.
19538 // Inlining takes precedence over implicit tail call optimization (if the call is not directly recursive).
19539 if (call->IsTailPrefixedCall())
19541 inlineResult.NoteFatal(InlineObservation::CALLSITE_EXPLICIT_TAIL_PREFIX);
19545 // Tail recursion elimination takes precedence over inlining.
19546 // TODO: We may want to do some of the additional checks from fgMorphCall
19547 // here to reduce the chance we don't inline a call that won't be optimized
19548 // as a fast tail call or turned into a loop.
19549 if (gtIsRecursiveCall(call) && call->IsImplicitTailCall())
19551 inlineResult.NoteFatal(InlineObservation::CALLSITE_IMPLICIT_REC_TAIL_CALL);
19555 if (call->IsVirtual())
19557 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT);
19561 /* Ignore helper calls */
19563 if (call->gtCallType == CT_HELPER)
19565 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_CALL_TO_HELPER);
19569 /* Ignore indirect calls */
19570 if (call->gtCallType == CT_INDIRECT)
19572 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_NOT_DIRECT_MANAGED);
19576 /* I removed the check for BBJ_THROW. BBJ_THROW is usually marked as rarely run. This more or less
19577 * restricts the inliner to non-expanding inlines. I removed the check to allow for non-expanding
19578 * inlining in throw blocks. I should consider the same thing for catch and filter regions. */
19580 CORINFO_METHOD_HANDLE fncHandle = call->gtCallMethHnd;
19583 // Reuse method flags from the original callInfo if possible
19584 if (fncHandle == callInfo->hMethod)
19586 methAttr = callInfo->methodFlags;
19590 methAttr = info.compCompHnd->getMethodAttribs(fncHandle);
19594 if (compStressCompile(STRESS_FORCE_INLINE, 0))
19596 methAttr |= CORINFO_FLG_FORCEINLINE;
19600 // Check for COMPlus_AggressiveInlining
19601 if (compDoAggressiveInlining)
19603 methAttr |= CORINFO_FLG_FORCEINLINE;
19606 if (!(methAttr & CORINFO_FLG_FORCEINLINE))
19608 /* Don't bother inline blocks that are in the filter region */
19609 if (bbInCatchHandlerILRange(compCurBB))
19614 printf("\nWill not inline blocks that are in the catch handler region\n");
19619 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_CATCH);
19623 if (bbInFilterILRange(compCurBB))
19628 printf("\nWill not inline blocks that are in the filter region\n");
19632 inlineResult.NoteFatal(InlineObservation::CALLSITE_IS_WITHIN_FILTER);
19637 /* If the caller's stack frame is marked, then we can't do any inlining. Period. */
19639 if (opts.compNeedSecurityCheck)
19641 inlineResult.NoteFatal(InlineObservation::CALLER_NEEDS_SECURITY_CHECK);
19645 /* Check if we tried to inline this method before */
19647 if (methAttr & CORINFO_FLG_DONT_INLINE)
19649 inlineResult.NoteFatal(InlineObservation::CALLEE_IS_NOINLINE);
19653 /* Cannot inline synchronized methods */
19655 if (methAttr & CORINFO_FLG_SYNCH)
19657 inlineResult.NoteFatal(InlineObservation::CALLEE_IS_SYNCHRONIZED);
19661 /* Do not inline if callee needs security checks (since they would then mark the wrong frame) */
19663 if (methAttr & CORINFO_FLG_SECURITYCHECK)
19665 inlineResult.NoteFatal(InlineObservation::CALLEE_NEEDS_SECURITY_CHECK);
19669 /* Check legality of PInvoke callsite (for inlining of marshalling code) */
19671 if (methAttr & CORINFO_FLG_PINVOKE)
19673 // See comment in impCheckForPInvokeCall
19674 BasicBlock* block = compIsForInlining() ? impInlineInfo->iciBlock : compCurBB;
19675 if (!impCanPInvokeInlineCallSite(block))
19677 inlineResult.NoteFatal(InlineObservation::CALLSITE_PINVOKE_EH);
19682 InlineCandidateInfo* inlineCandidateInfo = nullptr;
19683 impCheckCanInline(call, fncHandle, methAttr, exactContextHnd, &inlineCandidateInfo, &inlineResult);
19685 if (inlineResult.IsFailure())
19690 // The old value should be NULL
19691 assert(call->gtInlineCandidateInfo == nullptr);
19693 // The new value should not be NULL.
19694 assert(inlineCandidateInfo != nullptr);
19695 inlineCandidateInfo->exactContextNeedsRuntimeLookup = exactContextNeedsRuntimeLookup;
19697 call->gtInlineCandidateInfo = inlineCandidateInfo;
19699 // Mark the call node as inline candidate.
19700 call->gtFlags |= GTF_CALL_INLINE_CANDIDATE;
19702 // Let the strategy know there's another candidate.
19703 impInlineRoot()->m_inlineStrategy->NoteCandidate();
19705 // Since we're not actually inlining yet, and this call site is
19706 // still just an inline candidate, there's nothing to report.
19707 inlineResult.SetReported();
19710 /******************************************************************************/
19711 // Returns true if the given intrinsic will be implemented by target-specific
19714 bool Compiler::IsTargetIntrinsic(CorInfoIntrinsics intrinsicId)
19716 #if defined(_TARGET_XARCH_)
19717 switch (intrinsicId)
19719 // AMD64/x86 has SSE2 instructions to directly compute sqrt/abs and SSE4.1
19720 // instructions to directly compute round/ceiling/floor.
19722 // TODO: Because the x86 backend only targets SSE for floating-point code,
19723 // it does not treat Sine, Cosine, or Round as intrinsics (JIT32
19724 // implemented those intrinsics as x87 instructions). If this poses
19725 // a CQ problem, it may be necessary to change the implementation of
19726 // the helper calls to decrease call overhead or switch back to the
19727 // x87 instructions. This is tracked by #7097.
19728 case CORINFO_INTRINSIC_Sqrt:
19729 case CORINFO_INTRINSIC_Abs:
19732 case CORINFO_INTRINSIC_Round:
19733 case CORINFO_INTRINSIC_Ceiling:
19734 case CORINFO_INTRINSIC_Floor:
19735 return compSupports(InstructionSet_SSE41);
19740 #elif defined(_TARGET_ARM64_)
19741 switch (intrinsicId)
19743 case CORINFO_INTRINSIC_Sqrt:
19744 case CORINFO_INTRINSIC_Abs:
19745 case CORINFO_INTRINSIC_Round:
19746 case CORINFO_INTRINSIC_Floor:
19747 case CORINFO_INTRINSIC_Ceiling:
19753 #elif defined(_TARGET_ARM_)
19754 switch (intrinsicId)
19756 case CORINFO_INTRINSIC_Sqrt:
19757 case CORINFO_INTRINSIC_Abs:
19758 case CORINFO_INTRINSIC_Round:
19765 // TODO: This portion of logic is not implemented for other arch.
19766 // The reason for returning true is that on all other arch the only intrinsic
19767 // enabled are target intrinsics.
19772 /******************************************************************************/
19773 // Returns true if the given intrinsic will be implemented by calling System.Math
19776 bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
19778 // Currently, if a math intrinsic is not implemented by target-specific
19779 // instructions, it will be implemented by a System.Math call. In the
19780 // future, if we turn to implementing some of them with helper calls,
19781 // this predicate needs to be revisited.
19782 return !IsTargetIntrinsic(intrinsicId);
19785 bool Compiler::IsMathIntrinsic(CorInfoIntrinsics intrinsicId)
19787 switch (intrinsicId)
19789 case CORINFO_INTRINSIC_Sin:
19790 case CORINFO_INTRINSIC_Cbrt:
19791 case CORINFO_INTRINSIC_Sqrt:
19792 case CORINFO_INTRINSIC_Abs:
19793 case CORINFO_INTRINSIC_Cos:
19794 case CORINFO_INTRINSIC_Round:
19795 case CORINFO_INTRINSIC_Cosh:
19796 case CORINFO_INTRINSIC_Sinh:
19797 case CORINFO_INTRINSIC_Tan:
19798 case CORINFO_INTRINSIC_Tanh:
19799 case CORINFO_INTRINSIC_Asin:
19800 case CORINFO_INTRINSIC_Asinh:
19801 case CORINFO_INTRINSIC_Acos:
19802 case CORINFO_INTRINSIC_Acosh:
19803 case CORINFO_INTRINSIC_Atan:
19804 case CORINFO_INTRINSIC_Atan2:
19805 case CORINFO_INTRINSIC_Atanh:
19806 case CORINFO_INTRINSIC_Log10:
19807 case CORINFO_INTRINSIC_Pow:
19808 case CORINFO_INTRINSIC_Exp:
19809 case CORINFO_INTRINSIC_Ceiling:
19810 case CORINFO_INTRINSIC_Floor:
19817 bool Compiler::IsMathIntrinsic(GenTree* tree)
19819 return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
19822 //------------------------------------------------------------------------
19823 // impDevirtualizeCall: Attempt to change a virtual vtable call into a
19827 // call -- the call node to examine/modify
19828 // method -- [IN/OUT] the method handle for call. Updated iff call devirtualized.
19829 // methodFlags -- [IN/OUT] flags for the method to call. Updated iff call devirtualized.
19830 // contextHandle -- [IN/OUT] context handle for the call. Updated iff call devirtualized.
19831 // exactContextHnd -- [OUT] updated context handle iff call devirtualized
19834 // Virtual calls in IL will always "invoke" the base class method.
19836 // This transformation looks for evidence that the type of 'this'
19837 // in the call is exactly known, is a final class or would invoke
19838 // a final method, and if that and other safety checks pan out,
19839 // modifies the call and the call info to create a direct call.
19841 // This transformation is initially done in the importer and not
19842 // in some subsequent optimization pass because we want it to be
19843 // upstream of inline candidate identification.
19845 // However, later phases may supply improved type information that
19846 // can enable further devirtualization. We currently reinvoke this
19847 // code after inlining, if the return value of the inlined call is
19848 // the 'this obj' of a subsequent virtual call.
19850 // If devirtualization succeeds and the call's this object is the
19851 // result of a box, the jit will ask the EE for the unboxed entry
19852 // point. If this exists, the jit will see if it can rework the box
19853 // to instead make a local copy. If that is doable, the call is
19854 // updated to invoke the unboxed entry on the local copy.
19856 void Compiler::impDevirtualizeCall(GenTreeCall* call,
19857 CORINFO_METHOD_HANDLE* method,
19858 unsigned* methodFlags,
19859 CORINFO_CONTEXT_HANDLE* contextHandle,
19860 CORINFO_CONTEXT_HANDLE* exactContextHandle)
19862 assert(call != nullptr);
19863 assert(method != nullptr);
19864 assert(methodFlags != nullptr);
19865 assert(contextHandle != nullptr);
19867 // This should be a virtual vtable or virtual stub call.
19868 assert(call->IsVirtual());
19870 // Bail if not optimizing
19871 if (opts.MinOpts())
19876 // Bail if debuggable codegen
19877 if (opts.compDbgCode)
19883 // Bail if devirt is disabled.
19884 if (JitConfig.JitEnableDevirtualization() == 0)
19889 const bool doPrint = JitConfig.JitPrintDevirtualizedMethods() == 1;
19892 // Fetch information about the virtual method we're calling.
19893 CORINFO_METHOD_HANDLE baseMethod = *method;
19894 unsigned baseMethodAttribs = *methodFlags;
19896 if (baseMethodAttribs == 0)
19898 // For late devirt we may not have method attributes, so fetch them.
19899 baseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19904 // Validate that callInfo has up to date method flags
19905 const DWORD freshBaseMethodAttribs = info.compCompHnd->getMethodAttribs(baseMethod);
19907 // All the base method attributes should agree, save that
19908 // CORINFO_FLG_DONT_INLINE may have changed from 0 to 1
19909 // because of concurrent jitting activity.
19911 // Note we don't look at this particular flag bit below, and
19912 // later on (if we do try and inline) we will rediscover why
19913 // the method can't be inlined, so there's no danger here in
19914 // seeing this particular flag bit in different states between
19915 // the cached and fresh values.
19916 if ((freshBaseMethodAttribs & ~CORINFO_FLG_DONT_INLINE) != (baseMethodAttribs & ~CORINFO_FLG_DONT_INLINE))
19918 assert(!"mismatched method attributes");
19923 // In R2R mode, we might see virtual stub calls to
19924 // non-virtuals. For instance cases where the non-virtual method
19925 // is in a different assembly but is called via CALLVIRT. For
19926 // verison resilience we must allow for the fact that the method
19927 // might become virtual in some update.
19929 // In non-R2R modes CALLVIRT <nonvirtual> will be turned into a
19930 // regular call+nullcheck upstream, so we won't reach this
19932 if ((baseMethodAttribs & CORINFO_FLG_VIRTUAL) == 0)
19934 assert(call->IsVirtualStub());
19935 assert(opts.IsReadyToRun());
19936 JITDUMP("\nimpDevirtualizeCall: [R2R] base method not virtual, sorry\n");
19940 // See what we know about the type of 'this' in the call.
19941 GenTree* thisObj = call->gtCallObjp->gtEffectiveVal(false);
19942 GenTree* actualThisObj = nullptr;
19943 bool isExact = false;
19944 bool objIsNonNull = false;
19945 CORINFO_CLASS_HANDLE objClass = gtGetClassHandle(thisObj, &isExact, &objIsNonNull);
19947 // See if we have special knowlege that can get us a type or a better type.
19948 if ((objClass == nullptr) || !isExact)
19950 // Walk back through any return expression placeholders
19951 actualThisObj = thisObj->gtRetExprVal();
19953 // See if we landed on a call to a special intrinsic method
19954 if (actualThisObj->IsCall())
19956 GenTreeCall* thisObjCall = actualThisObj->AsCall();
19957 if ((thisObjCall->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
19959 assert(thisObjCall->gtCallType == CT_USER_FUNC);
19960 CORINFO_METHOD_HANDLE specialIntrinsicHandle = thisObjCall->gtCallMethHnd;
19961 CORINFO_CLASS_HANDLE specialObjClass = impGetSpecialIntrinsicExactReturnType(specialIntrinsicHandle);
19962 if (specialObjClass != nullptr)
19964 objClass = specialObjClass;
19966 objIsNonNull = true;
19972 // Bail if we know nothing.
19973 if (objClass == nullptr)
19975 JITDUMP("\nimpDevirtualizeCall: no type available (op=%s)\n", GenTree::OpName(thisObj->OperGet()));
19979 // Fetch information about the class that introduced the virtual method.
19980 CORINFO_CLASS_HANDLE baseClass = info.compCompHnd->getMethodClass(baseMethod);
19981 const DWORD baseClassAttribs = info.compCompHnd->getClassAttribs(baseClass);
19983 #if !defined(FEATURE_CORECLR)
19984 // If base class is not beforefieldinit then devirtualizing may
19985 // cause us to miss a base class init trigger. Spec says we don't
19986 // need a trigger for ref class callvirts but desktop seems to
19987 // have one anyways. So defer.
19988 if ((baseClassAttribs & CORINFO_FLG_BEFOREFIELDINIT) == 0)
19990 JITDUMP("\nimpDevirtualizeCall: base class has precise initialization, sorry\n");
19993 #endif // FEATURE_CORECLR
19995 // Is the call an interface call?
19996 const bool isInterface = (baseClassAttribs & CORINFO_FLG_INTERFACE) != 0;
19998 // If the objClass is sealed (final), then we may be able to devirtualize.
19999 const DWORD objClassAttribs = info.compCompHnd->getClassAttribs(objClass);
20000 const bool objClassIsFinal = (objClassAttribs & CORINFO_FLG_FINAL) != 0;
20003 const char* callKind = isInterface ? "interface" : "virtual";
20004 const char* objClassNote = "[?]";
20005 const char* objClassName = "?objClass";
20006 const char* baseClassName = "?baseClass";
20007 const char* baseMethodName = "?baseMethod";
20009 if (verbose || doPrint)
20011 objClassNote = isExact ? " [exact]" : objClassIsFinal ? " [final]" : "";
20012 objClassName = info.compCompHnd->getClassName(objClass);
20013 baseClassName = info.compCompHnd->getClassName(baseClass);
20014 baseMethodName = eeGetMethodName(baseMethod, nullptr);
20018 printf("\nimpDevirtualizeCall: Trying to devirtualize %s call:\n"
20019 " class for 'this' is %s%s (attrib %08x)\n"
20020 " base method is %s::%s\n",
20021 callKind, objClassName, objClassNote, objClassAttribs, baseClassName, baseMethodName);
20024 #endif // defined(DEBUG)
20026 // Bail if obj class is an interface.
20027 // See for instance System.ValueTuple`8::GetHashCode, where lcl 0 is System.IValueTupleInternal
20028 // IL_021d: ldloc.0
20029 // IL_021e: callvirt instance int32 System.Object::GetHashCode()
20030 if ((objClassAttribs & CORINFO_FLG_INTERFACE) != 0)
20032 JITDUMP("--- obj class is interface, sorry\n");
20038 assert(call->IsVirtualStub());
20039 JITDUMP("--- base class is interface\n");
20042 // Fetch the method that would be called based on the declared type of 'this'
20043 CORINFO_CONTEXT_HANDLE ownerType = *contextHandle;
20044 CORINFO_METHOD_HANDLE derivedMethod = info.compCompHnd->resolveVirtualMethod(baseMethod, objClass, ownerType);
20046 // If we failed to get a handle, we can't devirtualize. This can
20047 // happen when prejitting, if the devirtualization crosses
20048 // servicing bubble boundaries.
20049 if (derivedMethod == nullptr)
20051 JITDUMP("--- no derived method, sorry\n");
20055 // Fetch method attributes to see if method is marked final.
20056 DWORD derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod);
20057 const bool derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0);
20060 const char* derivedClassName = "?derivedClass";
20061 const char* derivedMethodName = "?derivedMethod";
20063 const char* note = "speculative";
20068 else if (objClassIsFinal)
20070 note = "final class";
20072 else if (derivedMethodIsFinal)
20074 note = "final method";
20077 if (verbose || doPrint)
20079 derivedMethodName = eeGetMethodName(derivedMethod, &derivedClassName);
20082 printf(" devirt to %s::%s -- %s\n", derivedClassName, derivedMethodName, note);
20086 #endif // defined(DEBUG)
20088 if (!isExact && !objClassIsFinal && !derivedMethodIsFinal)
20090 // Type is not exact, and neither class or method is final.
20092 // We could speculatively devirtualize, but there's no
20093 // reason to believe the derived method is the one that
20094 // is likely to be invoked.
20096 // If there's currently no further overriding (that is, at
20097 // the time of jitting, objClass has no subclasses that
20098 // override this method), then perhaps we'd be willing to
20100 JITDUMP(" Class not final or exact, method not final, no devirtualization\n");
20104 // For interface calls we must have an exact type or final class.
20105 if (isInterface && !isExact && !objClassIsFinal)
20107 JITDUMP(" Class not final or exact for interface, no devirtualization\n");
20111 JITDUMP(" %s; can devirtualize\n", note);
20113 // Make the updates.
20114 call->gtFlags &= ~GTF_CALL_VIRT_VTABLE;
20115 call->gtFlags &= ~GTF_CALL_VIRT_STUB;
20116 call->gtCallMethHnd = derivedMethod;
20117 call->gtCallType = CT_USER_FUNC;
20118 call->gtCallMoreFlags |= GTF_CALL_M_DEVIRTUALIZED;
20120 // Virtual calls include an implicit null check, which we may
20121 // now need to make explicit.
20124 call->gtFlags |= GTF_CALL_NULLCHECK;
20127 // Clear the inline candidate info (may be non-null since
20128 // it's a union field used for other things by virtual
20130 call->gtInlineCandidateInfo = nullptr;
20135 printf("... after devirt...\n");
20141 printf("Devirtualized %s call to %s:%s; now direct call to %s:%s [%s]\n", callKind, baseClassName,
20142 baseMethodName, derivedClassName, derivedMethodName, note);
20144 #endif // defined(DEBUG)
20146 // If the 'this' object is a box, see if we can find the unboxed entry point for the call.
20147 if (thisObj->IsBoxedValue())
20149 JITDUMP("Now have direct call to boxed entry point, looking for unboxed entry point\n");
20151 // Note for some shared methods the unboxed entry point requires an extra parameter.
20152 bool requiresInstMethodTableArg = false;
20153 CORINFO_METHOD_HANDLE unboxedEntryMethod =
20154 info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg);
20156 if (unboxedEntryMethod != nullptr)
20158 // Since the call is the only consumer of the box, we know the box can't escape
20159 // since it is being passed an interior pointer.
20161 // So, revise the box to simply create a local copy, use the address of that copy
20162 // as the this pointer, and update the entry point to the unboxed entry.
20164 // Ideally, we then inline the boxed method and and if it turns out not to modify
20165 // the copy, we can undo the copy too.
20166 if (requiresInstMethodTableArg)
20168 // Perform a trial box removal and ask for the type handle tree.
20169 JITDUMP("Unboxed entry needs method table arg...\n");
20170 GenTree* methodTableArg = gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE);
20172 if (methodTableArg != nullptr)
20174 // If that worked, turn the box into a copy to a local var
20175 JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg));
20176 GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
20178 if (localCopyThis != nullptr)
20180 // Pass the local var as this and the type handle as a new arg
20181 JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table arg\n");
20182 call->gtCallObjp = localCopyThis;
20183 call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED;
20185 // Prepend for R2L arg passing or empty L2R passing
20186 if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr))
20188 call->gtCallArgs = gtNewListNode(methodTableArg, call->gtCallArgs);
20190 // Append for non-empty L2R
20193 GenTreeArgList* beforeArg = call->gtCallArgs;
20194 while (beforeArg->Rest() != nullptr)
20196 beforeArg = beforeArg->Rest();
20199 beforeArg->Rest() = gtNewListNode(methodTableArg, nullptr);
20202 call->gtCallMethHnd = unboxedEntryMethod;
20203 derivedMethod = unboxedEntryMethod;
20205 // Method attributes will differ because unboxed entry point is shared
20206 const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod);
20207 JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs,
20208 unboxedMethodAttribs);
20209 derivedMethodAttribs = unboxedMethodAttribs;
20213 JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n");
20218 JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n");
20223 JITDUMP("Found unboxed entry point, trying to simplify box to a local copy\n");
20224 GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
20226 if (localCopyThis != nullptr)
20228 JITDUMP("Success! invoking unboxed entry point on local copy\n");
20229 call->gtCallObjp = localCopyThis;
20230 call->gtCallMethHnd = unboxedEntryMethod;
20231 call->gtCallMoreFlags |= GTF_CALL_M_UNBOXED;
20232 derivedMethod = unboxedEntryMethod;
20236 JITDUMP("Sorry, failed to undo the box\n");
20242 // Many of the low-level methods on value classes won't have unboxed entries,
20243 // as they need access to the type of the object.
20245 // Note this may be a cue for us to stack allocate the boxed object, since
20246 // we probably know that these objects don't escape.
20247 JITDUMP("Sorry, failed to find unboxed entry point\n");
20251 // Fetch the class that introduced the derived method.
20253 // Note this may not equal objClass, if there is a
20254 // final method that objClass inherits.
20255 CORINFO_CLASS_HANDLE derivedClass = info.compCompHnd->getMethodClass(derivedMethod);
20257 // Need to update call info too. This is fragile
20258 // but hopefully the derived method conforms to
20259 // the base in most other ways.
20260 *method = derivedMethod;
20261 *methodFlags = derivedMethodAttribs;
20262 *contextHandle = MAKE_METHODCONTEXT(derivedMethod);
20264 // Update context handle.
20265 if ((exactContextHandle != nullptr) && (*exactContextHandle != nullptr))
20267 *exactContextHandle = MAKE_METHODCONTEXT(derivedMethod);
20270 #ifdef FEATURE_READYTORUN_COMPILER
20271 if (opts.IsReadyToRun())
20273 // For R2R, getCallInfo triggers bookkeeping on the zap
20274 // side so we need to call it here.
20276 // First, cons up a suitable resolved token.
20277 CORINFO_RESOLVED_TOKEN derivedResolvedToken = {};
20279 derivedResolvedToken.tokenScope = info.compScopeHnd;
20280 derivedResolvedToken.tokenContext = *contextHandle;
20281 derivedResolvedToken.token = info.compCompHnd->getMethodDefFromMethod(derivedMethod);
20282 derivedResolvedToken.tokenType = CORINFO_TOKENKIND_Method;
20283 derivedResolvedToken.hClass = derivedClass;
20284 derivedResolvedToken.hMethod = derivedMethod;
20286 // Look up the new call info.
20287 CORINFO_CALL_INFO derivedCallInfo;
20288 eeGetCallInfo(&derivedResolvedToken, nullptr, addVerifyFlag(CORINFO_CALLINFO_ALLOWINSTPARAM), &derivedCallInfo);
20290 // Update the call.
20291 call->gtCallMoreFlags &= ~GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
20292 call->gtCallMoreFlags &= ~GTF_CALL_M_R2R_REL_INDIRECT;
20293 call->setEntryPoint(derivedCallInfo.codePointerLookup.constLookup);
20295 #endif // FEATURE_READYTORUN_COMPILER
20298 //------------------------------------------------------------------------
20299 // impGetSpecialIntrinsicExactReturnType: Look for special cases where a call
20300 // to an intrinsic returns an exact type
20303 // methodHnd -- handle for the special intrinsic method
20306 // Exact class handle returned by the intrinsic call, if known.
20307 // Nullptr if not known, or not likely to lead to beneficial optimization.
20309 CORINFO_CLASS_HANDLE Compiler::impGetSpecialIntrinsicExactReturnType(CORINFO_METHOD_HANDLE methodHnd)
20311 JITDUMP("Special intrinsic: looking for exact type returned by %s\n", eeGetMethodFullName(methodHnd));
20313 CORINFO_CLASS_HANDLE result = nullptr;
20315 // See what intrinisc we have...
20316 const NamedIntrinsic ni = lookupNamedIntrinsic(methodHnd);
20319 case NI_System_Collections_Generic_EqualityComparer_get_Default:
20321 // Expect one class generic parameter; figure out which it is.
20322 CORINFO_SIG_INFO sig;
20323 info.compCompHnd->getMethodSig(methodHnd, &sig);
20324 assert(sig.sigInst.classInstCount == 1);
20325 CORINFO_CLASS_HANDLE typeHnd = sig.sigInst.classInst[0];
20326 assert(typeHnd != nullptr);
20328 // Lookup can incorrect when we have __Canon as it won't appear
20329 // to implement any interface types.
20331 // And if we do not have a final type, devirt & inlining is
20332 // unlikely to result in much simplification.
20334 // We can use CORINFO_FLG_FINAL to screen out both of these cases.
20335 const DWORD typeAttribs = info.compCompHnd->getClassAttribs(typeHnd);
20336 const bool isFinalType = ((typeAttribs & CORINFO_FLG_FINAL) != 0);
20340 result = info.compCompHnd->getDefaultEqualityComparerClass(typeHnd);
20341 JITDUMP("Special intrinsic for type %s: return type is %s\n", eeGetClassName(typeHnd),
20342 result != nullptr ? eeGetClassName(result) : "unknown");
20346 JITDUMP("Special intrinsic for type %s: type not final, so deferring opt\n", eeGetClassName(typeHnd));
20354 JITDUMP("This special intrinsic not handled, sorry...\n");
20362 //------------------------------------------------------------------------
20363 // impAllocateToken: create CORINFO_RESOLVED_TOKEN into jit-allocated memory and init it.
20366 // token - init value for the allocated token.
20369 // pointer to token into jit-allocated memory.
20370 CORINFO_RESOLVED_TOKEN* Compiler::impAllocateToken(CORINFO_RESOLVED_TOKEN token)
20372 CORINFO_RESOLVED_TOKEN* memory = getAllocator(CMK_Unknown).allocate<CORINFO_RESOLVED_TOKEN>(1);
20377 //------------------------------------------------------------------------
20378 // SpillRetExprHelper: iterate through arguments tree and spill ret_expr to local variables.
20380 class SpillRetExprHelper
20383 SpillRetExprHelper(Compiler* comp) : comp(comp)
20387 void StoreRetExprResultsInArgs(GenTreeCall* call)
20389 GenTreeArgList** pArgs = &call->gtCallArgs;
20390 if (*pArgs != nullptr)
20392 comp->fgWalkTreePre((GenTree**)pArgs, SpillRetExprVisitor, this);
20395 GenTree** pThisArg = &call->gtCallObjp;
20396 if (*pThisArg != nullptr)
20398 comp->fgWalkTreePre(pThisArg, SpillRetExprVisitor, this);
20403 static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre)
20405 assert((pTree != nullptr) && (*pTree != nullptr));
20406 GenTree* tree = *pTree;
20407 if ((tree->gtFlags & GTF_CALL) == 0)
20409 // Trees with ret_expr are marked as GTF_CALL.
20410 return Compiler::WALK_SKIP_SUBTREES;
20412 if (tree->OperGet() == GT_RET_EXPR)
20414 SpillRetExprHelper* walker = static_cast<SpillRetExprHelper*>(fgWalkPre->pCallbackData);
20415 walker->StoreRetExprAsLocalVar(pTree);
20417 return Compiler::WALK_CONTINUE;
20420 void StoreRetExprAsLocalVar(GenTree** pRetExpr)
20422 GenTree* retExpr = *pRetExpr;
20423 assert(retExpr->OperGet() == GT_RET_EXPR);
20424 JITDUMP("Store return expression %u as a local var.\n", retExpr->gtTreeID);
20425 unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr"));
20426 comp->impAssignTempGen(tmp, retExpr, (unsigned)Compiler::CHECK_SPILL_NONE);
20427 *pRetExpr = comp->gtNewLclvNode(tmp, retExpr->TypeGet());
20434 //------------------------------------------------------------------------
20435 // addFatPointerCandidate: mark the call and the method, that they have a fat pointer candidate.
20436 // Spill ret_expr in the call node, because they can't be cloned.
20439 // call - fat calli candidate
20441 void Compiler::addFatPointerCandidate(GenTreeCall* call)
20443 setMethodHasFatPointer();
20444 call->SetFatPointerCandidate();
20445 SpillRetExprHelper helper(this);
20446 helper.StoreRetExprResultsInArgs(call);