Create `GT_PUTARG_TYPE` when signature type does not match node type.
Check in morph that this information has survived inlining and other phases between.
InlineCandidateInfo** ppInlineCandidateInfo,
InlineResult* inlineResult);
- void impInlineRecordArgInfo(InlineInfo* pInlineInfo,
- GenTree* curArgVal,
- unsigned argNum,
- unsigned __int64 bbFlags,
- InlineResult* inlineResult);
+ void impInlineRecordArgInfo(InlineInfo* pInlineInfo,
+ GenTree* curArgVal,
+ unsigned argNum,
+ InlineResult* inlineResult);
void impInlineInitVars(InlineInfo* pInlineInfo);
BasicBlock* canonicalBlock,
flowList* predEdge);
+ GenTree* fgCheckCallArgUpdate(GenTree* parent, GenTree* child, var_types origType);
+
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
// Sometimes we need to defer updating the BBF_FINALLY_TARGET bit. fgNeedToAddFinallyTargetBits signals
// when this is necessary.
case GT_NULLCHECK:
case GT_PUTARG_REG:
case GT_PUTARG_STK:
+ case GT_PUTARG_TYPE:
case GT_RETURNTRAP:
case GT_NOP:
case GT_RETURN:
case GT_NULLCHECK:
case GT_PUTARG_REG:
case GT_PUTARG_STK:
+ case GT_PUTARG_TYPE:
#if FEATURE_ARG_SPLIT
case GT_PUTARG_SPLIT:
#endif // FEATURE_ARG_SPLIT
if (varTypeIsStruct(argType))
{
unsigned structSize = info.compCompHnd->getClassSize(argClass);
- return structSize; // TODO: roundUp() needed here?
+ return roundUp(structSize, TARGET_POINTER_SIZE);
}
#endif // UNIX_AMD64_ABI
return TARGET_POINTER_SIZE;
// If the return type is a struct type and we're on a platform
// where structs can be returned in multiple registers, ensure the
// call has a suitable parent.
-
+//
+// If the original call type and the substitution type are different
+// the functions makes necessary updates. It could happen if there was
+// an implicit conversion in the inlinee body.
+//
Compiler::fgWalkResult Compiler::fgUpdateInlineReturnExpressionPlaceHolder(GenTree** pTree, fgWalkData* data)
{
// All the operations here and in the corresponding postorder
}
#endif // DEBUG
+ var_types newType = inlineCandidate->TypeGet();
+
+ // If we end up swapping type we may need to retype the tree:
+ if (retType != newType)
+ {
+ if ((retType == TYP_BYREF) && (tree->OperGet() == GT_IND))
+ {
+ // - in an RVA static if we've reinterpreted it as a byref;
+ assert(newType == TYP_I_IMPL);
+ JITDUMP("Updating type of the return GT_IND expression to TYP_BYREF\n");
+ inlineCandidate->gtType = TYP_BYREF;
+ }
+ else
+ {
+ // - under a call if we changed size of the argument.
+ GenTree* putArgType = comp->fgCheckCallArgUpdate(data->parent, inlineCandidate, retType);
+ if (putArgType != nullptr)
+ {
+ inlineCandidate = putArgType;
+ }
+ }
+ }
+
tree->ReplaceWith(inlineCandidate, comp);
comp->compCurBB->bbFlags |= (bbFlags & BBF_SPLIT_GAINED);
printf("\n");
}
#endif // DEBUG
-
- var_types newType = tree->TypeGet();
-
- // If we end up swapping in an RVA static we may need to retype it here,
- // if we've reinterpreted it as a byref.
- if ((retType != newType) && (retType == TYP_BYREF) && (tree->OperGet() == GT_IND))
- {
- assert(newType == TYP_I_IMPL);
- JITDUMP("Updating type of the return GT_IND expression to TYP_BYREF\n");
- tree->gtType = TYP_BYREF;
- }
}
// If an inline was rejected and the call returns a struct, we may
}
else
{
- GenTree* foldedTree = comp->gtFoldExpr(tree);
- *pTree = foldedTree;
+ const var_types retType = tree->TypeGet();
+ GenTree* foldedTree = comp->gtFoldExpr(tree);
+ const var_types newType = foldedTree->TypeGet();
+
+ GenTree* putArgType = comp->fgCheckCallArgUpdate(data->parent, foldedTree, retType);
+ if (putArgType != nullptr)
+ {
+ foldedTree = putArgType;
+ }
+ *pTree = foldedTree;
}
return WALK_CONTINUE;
{
const InlArgInfo& argInfo = inlArgInfo[argNum];
const bool argIsSingleDef = !argInfo.argHasLdargaOp && !argInfo.argHasStargOp;
- GenTree* const argNode = inlArgInfo[argNum].argNode;
- unsigned __int64 bbFlags = inlArgInfo[argNum].bbFlags;
+ GenTree* argNode = inlArgInfo[argNum].argNode;
+ const bool argHasPutArg = argNode->OperIs(GT_PUTARG_TYPE);
+
+ unsigned __int64 bbFlags = 0;
+ argNode = argNode->gtSkipPutArgType();
+ argNode = argNode->gtRetExprVal(&bbFlags);
if (argInfo.argHasTmp)
{
GenTree* argSingleUseNode = argInfo.argBashTmpNode;
- if ((argSingleUseNode != nullptr) && !(argSingleUseNode->gtFlags & GTF_VAR_CLONED) && argIsSingleDef)
+ // argHasPutArg disqualifies the arg from a direct substitution because we don't have information about
+ // its user. For example: replace `LCL_VAR short` with `PUTARG_TYPE short->LCL_VAR int`,
+ // we should keep `PUTARG_TYPE` iff the user is a call that needs `short` and delete it otherwise.
+ if ((argSingleUseNode != nullptr) && !(argSingleUseNode->gtFlags & GTF_VAR_CLONED) && argIsSingleDef &&
+ !argHasPutArg)
{
// Change the temp in-place to the actual argument.
// We currently do not support this for struct arguments, so it must not be a GT_OBJ.
canonicalBlock->bbFlags |= (BBF_JMP_TARGET | BBF_HAS_LABEL);
}
+//------------------------------------------------------------------------
+// fgCheckCallArgUpdate: check if we are replacing a call argument and add GT_PUTARG_TYPE if necessary.
+//
+// Arguments:
+// parent - the parent that could be a call;
+// child - the new child node;
+// origType - the original child type;
+//
+// Returns:
+// PUT_ARG_TYPE node if it is needed, nullptr otherwise.
+//
+GenTree* Compiler::fgCheckCallArgUpdate(GenTree* parent, GenTree* child, var_types origType)
+{
+ if ((parent == nullptr) || !parent->IsCall())
+ {
+ return nullptr;
+ }
+ const var_types newType = child->TypeGet();
+ if (newType == origType)
+ {
+ return nullptr;
+ }
+ if (varTypeIsStruct(origType) || (genTypeSize(origType) == genTypeSize(newType)))
+ {
+ assert(!varTypeIsStruct(newType));
+ return nullptr;
+ }
+ GenTree* putArgType = gtNewOperNode(GT_PUTARG_TYPE, origType, child);
+#if defined(DEBUG)
+ if (verbose)
+ {
+ printf("For call [%06d] the new argument's type [%06d]", dspTreeID(parent), dspTreeID(child));
+ printf(" does not match the original type size, add a GT_PUTARG_TYPE [%06d]\n", dspTreeID(parent));
+ }
+#endif
+ return putArgType;
+}
+
#ifdef DEBUG
//------------------------------------------------------------------------
case GT_NULLCHECK:
case GT_PUTARG_REG:
case GT_PUTARG_STK:
+ case GT_PUTARG_TYPE:
case GT_RETURNTRAP:
case GT_NOP:
case GT_RETURN:
case GT_NULLCHECK:
case GT_PUTARG_REG:
case GT_PUTARG_STK:
+ case GT_PUTARG_TYPE:
case GT_BSWAP:
case GT_BSWAP16:
case GT_KEEPALIVE:
}
#endif // TARGET_ARM
#if FEATURE_FIXED_OUT_ARGS
- sprintf_s(bufp, bufLength, "arg%d out+%02x%c", argNum, curArgTabEntry->slotNum * TARGET_POINTER_SIZE, 0);
+ sprintf_s(bufp, bufLength, "arg%d out+%02x%c", argNum, curArgTabEntry->GetByteOffset(), 0);
#else
sprintf_s(bufp, bufLength, "arg%d on STK%c", argNum, 0);
#endif
// There are 2 special cases:
// 1. we have lost classHandle from a FIELD node because the parent struct has overlapping fields,
// the field was transformed as IND opr GT_LCL_FLD;
- // 2. we are propogating `ASG(struct V01, 0)` to `RETURN(struct V01)`, `CNT_INT` doesn't `structHnd`;
+ // 2. we are propagation `ASG(struct V01, 0)` to `RETURN(struct V01)`, `CNT_INT` doesn't `structHnd`;
// in these cases, we can use the type of the merge return for the assignment.
assert(val->OperIs(GT_IND, GT_LCL_FLD, GT_CNS_INT));
assert(!compDoOldStructRetyping());
inline GenTree* gtEffectiveVal(bool commaOnly = false);
// Tunnel through any GT_RET_EXPRs
- inline GenTree* gtRetExprVal(unsigned __int64* pbbFlags);
+ inline GenTree* gtRetExprVal(unsigned __int64* pbbFlags = nullptr);
+
+ inline GenTree* gtSkipPutArgType();
// Return the child of this node if it is a GT_RELOAD or GT_COPY; otherwise simply return the node itself
inline GenTree* gtSkipReloadOrCopy();
return op2;
}
-inline GenTree* GenTree::gtEffectiveVal(bool commaOnly)
+inline GenTree* GenTree::gtEffectiveVal(bool commaOnly /* = false */)
{
GenTree* effectiveVal = this;
for (;;)
{
+ assert(!effectiveVal->OperIs(GT_PUTARG_TYPE));
if (effectiveVal->gtOper == GT_COMMA)
{
- effectiveVal = effectiveVal->AsOp()->gtOp2;
+ effectiveVal = effectiveVal->AsOp()->gtGetOp2();
}
else if (!commaOnly && (effectiveVal->gtOper == GT_NOP) && (effectiveVal->AsOp()->gtOp1 != nullptr))
{
// Multi-level inlines can form chains of GT_RET_EXPRs.
// This method walks back to the root of the chain.
-inline GenTree* GenTree::gtRetExprVal(unsigned __int64* pbbFlags)
+inline GenTree* GenTree::gtRetExprVal(unsigned __int64* pbbFlags /* = nullptr */)
{
GenTree* retExprVal = this;
unsigned __int64 bbFlags = 0;
- assert(pbbFlags != nullptr);
+ assert(!retExprVal->OperIs(GT_PUTARG_TYPE));
- for (; retExprVal->gtOper == GT_RET_EXPR; retExprVal = retExprVal->AsRetExpr()->gtInlineCandidate)
+ while (retExprVal->OperIs(GT_RET_EXPR))
{
- bbFlags = retExprVal->AsRetExpr()->bbFlags;
+ const GenTreeRetExpr* retExpr = retExprVal->AsRetExpr();
+ bbFlags = retExpr->bbFlags;
+ retExprVal = retExpr->gtInlineCandidate;
}
- *pbbFlags = bbFlags;
+ if (pbbFlags != nullptr)
+ {
+ *pbbFlags = bbFlags;
+ }
return retExprVal;
}
+//-------------------------------------------------------------------------
+// gtSkipPutArgType - skip PUTARG_TYPE if it is presented.
+//
+// Returns:
+// the original tree or its child if it was a PUTARG_TYPE.
+//
+// Notes:
+// PUTARG_TYPE should be skipped when we are doing transformations
+// that are not affected by ABI, for example: inlining, implicit byref morphing.
+//
+inline GenTree* GenTree::gtSkipPutArgType()
+{
+ if (OperIs(GT_PUTARG_TYPE))
+ {
+ GenTree* res = AsUnOp()->gtGetOp1();
+ assert(!res->OperIs(GT_PUTARG_TYPE));
+ return res;
+ }
+ return this;
+}
+
inline GenTree* GenTree::gtSkipReloadOrCopy()
{
// There can be only one reload or copy (we can't have a reload/copy of a reload/copy)
#else
GTNODE(PUTARG_REG , GenTreeOp ,0,GTK_UNOP) // operator that places outgoing arg in register
#endif
+GTNODE(PUTARG_TYPE , GenTreeOp ,0,GTK_UNOP|GTK_NOTLIR) // operator that places saves argument type between importation and morph
GTNODE(PUTARG_STK , GenTreePutArgStk ,0,GTK_UNOP|GTK_NOVALUE) // operator that places outgoing arg in stack
#if FEATURE_ARG_SPLIT
GTNODE(PUTARG_SPLIT , GenTreePutArgSplit ,0,GTK_UNOP) // operator that places outgoing arg in registers with stack (split struct in ARM32)
info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(sig->retTypeSigClass);
}
- CORINFO_ARG_LIST_HANDLE argLst = sig->args;
- CORINFO_CLASS_HANDLE argClass;
- CORINFO_CLASS_HANDLE argRealClass;
+ CORINFO_ARG_LIST_HANDLE sigArgs = sig->args;
GenTreeCall::Use* arg;
for (arg = argList, count = sig->numArgs; count > 0; arg = arg->GetNext(), count--)
{
PREFIX_ASSUME(arg != nullptr);
- CorInfoType corType = strip(info.compCompHnd->getArgType(sig, argLst, &argClass));
+ CORINFO_CLASS_HANDLE classHnd;
+ CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArgs, &classHnd));
var_types jitSigType = JITtype2varType(corType);
}
// insert implied casts (from float to double or double to float)
-
if ((jitSigType == TYP_DOUBLE) && (arg->GetNode()->TypeGet() == TYP_FLOAT))
{
arg->SetNode(gtNewCastNode(TYP_DOUBLE, arg->GetNode(), false, TYP_DOUBLE));
}
// insert any widening or narrowing casts for backwards compatibility
-
arg->SetNode(impImplicitIorI4Cast(arg->GetNode(), jitSigType));
if (corType != CORINFO_TYPE_CLASS && corType != CORINFO_TYPE_BYREF && corType != CORINFO_TYPE_PTR &&
- corType != CORINFO_TYPE_VAR && (argRealClass = info.compCompHnd->getArgClass(sig, argLst)) != nullptr)
+ corType != CORINFO_TYPE_VAR)
+ {
+ CORINFO_CLASS_HANDLE argRealClass = info.compCompHnd->getArgClass(sig, sigArgs);
+ if (argRealClass != nullptr)
+ {
+ // Make sure that all valuetypes (including enums) that we push are loaded.
+ // This is to guarantee that if a GC is triggered from the prestub of this methods,
+ // all valuetypes in the method signature are already loaded.
+ // We need to be able to find the size of the valuetypes, but we cannot
+ // do a class-load from within GC.
+ info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
+ }
+ }
+
+ const var_types nodeArgType = arg->GetNode()->TypeGet();
+ if (!varTypeIsStruct(jitSigType) && genTypeSize(nodeArgType) != genTypeSize(jitSigType))
{
- // Make sure that all valuetypes (including enums) that we push are loaded.
- // This is to guarantee that if a GC is triggered from the prestub of this methods,
- // all valuetypes in the method signature are already loaded.
- // We need to be able to find the size of the valuetypes, but we cannot
- // do a class-load from within GC.
- info.compCompHnd->classMustBeLoadedBeforeCodeIsRun(argRealClass);
+ assert(!varTypeIsStruct(nodeArgType));
+ // Some ABI require precise size information for call arguments less than target pointer size,
+ // for example arm64 OSX. Create a special node to keep this information until morph
+ // consumes it into `fgArgInfo`.
+ GenTree* putArgType = gtNewOperNode(GT_PUTARG_TYPE, jitSigType, arg->GetNode());
+ arg->SetNode(putArgType);
}
- argLst = info.compCompHnd->getArgNext(argLst);
+ sigArgs = info.compCompHnd->getArgNext(sigArgs);
}
}
// properties are used later by impInlineFetchArg to determine how best to
// pass the argument into the inlinee.
-void Compiler::impInlineRecordArgInfo(
- InlineInfo* pInlineInfo, GenTree* curArgVal, unsigned argNum, unsigned __int64 bbFlags, InlineResult* inlineResult)
+void Compiler::impInlineRecordArgInfo(InlineInfo* pInlineInfo,
+ GenTree* curArgVal,
+ unsigned argNum,
+ InlineResult* inlineResult)
{
InlArgInfo* inlCurArgInfo = &pInlineInfo->inlArgInfo[argNum];
+ inlCurArgInfo->argNode = curArgVal; // Save the original tree, with PUT_ARG and RET_EXPR.
+
+ curArgVal = curArgVal->gtSkipPutArgType();
+ curArgVal = curArgVal->gtRetExprVal();
+
if (curArgVal->gtOper == GT_MKREFANY)
{
inlineResult->NoteFatal(InlineObservation::CALLSITE_ARG_IS_MKREFANY);
return;
}
- inlCurArgInfo->argNode = curArgVal;
- inlCurArgInfo->bbFlags = bbFlags;
-
GenTree* lclVarTree;
const bool isAddressInLocal = impIsAddressInLocal(curArgVal, &lclVarTree);
assert((methInfo->args.hasThis()) == (thisArg != nullptr));
- if (thisArg)
+ if (thisArg != nullptr)
{
- inlArgInfo[0].argIsThis = true;
- unsigned __int64 bbFlags = 0;
- GenTree* actualThisArg = thisArg->GetNode()->gtRetExprVal(&bbFlags);
- impInlineRecordArgInfo(pInlineInfo, actualThisArg, argCnt, bbFlags, inlineResult);
+ inlArgInfo[0].argIsThis = true;
+ impInlineRecordArgInfo(pInlineInfo, thisArg->GetNode(), argCnt, inlineResult);
if (inlineResult->IsFailure())
{
continue;
}
- unsigned __int64 bbFlags = 0;
- GenTree* actualArg = use.GetNode()->gtRetExprVal(&bbFlags);
- impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, bbFlags, inlineResult);
+ GenTree* actualArg = use.GetNode();
+ impInlineRecordArgInfo(pInlineInfo, actualArg, argCnt, inlineResult);
if (inlineResult->IsFailure())
{
GenTree* inlArgNode = inlArgInfo[i].argNode;
- if (sigType != inlArgNode->gtType)
+ if ((sigType != inlArgNode->gtType) || inlArgNode->OperIs(GT_PUTARG_TYPE))
{
+ assert(impCheckImplicitArgumentCoercion(sigType, inlArgNode->gtType));
+ assert(!varTypeIsStruct(inlArgNode->gtType) && !varTypeIsStruct(sigType) &&
+ genTypeSize(inlArgNode->gtType) == genTypeSize(sigType));
+
/* In valid IL, this can only happen for short integer types or byrefs <-> [native] ints,
but in bad IL cases with caller-callee signature mismatches we can see other types.
Intentionally reject cases with mismatches so the jit is more flexible when
return;
}
+ GenTree** pInlArgNode;
+ if (inlArgNode->OperIs(GT_PUTARG_TYPE))
+ {
+ // There was a widening or narrowing cast.
+ GenTreeUnOp* putArgType = inlArgNode->AsUnOp();
+ pInlArgNode = &putArgType->gtOp1;
+ inlArgNode = putArgType->gtOp1;
+ }
+ else
+ {
+ // The same size but different type of the arguments.
+ pInlArgNode = &inlArgInfo[i].argNode;
+ }
+
/* Is it a narrowing or widening cast?
* Widening casts are ok since the value computed is already
* normalized to an int (on the IL stack) */
-
if (genTypeSize(inlArgNode->gtType) >= genTypeSize(sigType))
{
if (sigType == TYP_BYREF)
}
else if (genTypeSize(sigType) < EA_PTRSIZE)
{
- /* Narrowing cast */
-
- if (inlArgNode->gtOper == GT_LCL_VAR &&
- !lvaTable[inlArgNode->AsLclVarCommon()->GetLclNum()].lvNormalizeOnLoad() &&
- sigType == lvaGetRealType(inlArgNode->AsLclVarCommon()->GetLclNum()))
+ // Narrowing cast.
+ if (inlArgNode->OperIs(GT_LCL_VAR))
{
- /* We don't need to insert a cast here as the variable
- was assigned a normalized value of the right type */
-
- continue;
+ const unsigned lclNum = inlArgNode->AsLclVarCommon()->GetLclNum();
+ if (!lvaTable[lclNum].lvNormalizeOnLoad() && sigType == lvaGetRealType(lclNum))
+ {
+ // We don't need to insert a cast here as the variable
+ // was assigned a normalized value of the right type.
+ continue;
+ }
}
- inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType);
+ inlArgNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType);
inlArgInfo[i].argIsLclVar = false;
-
- /* Try to fold the node in case we have constant arguments */
-
+ // Try to fold the node in case we have constant arguments.
if (inlArgInfo[i].argIsInvariant)
{
- inlArgNode = gtFoldExprConst(inlArgNode);
- inlArgInfo[i].argNode = inlArgNode;
+ inlArgNode = gtFoldExprConst(inlArgNode);
assert(inlArgNode->OperIsConst());
}
+ *pInlArgNode = inlArgNode;
}
#ifdef TARGET_64BIT
else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
{
// This should only happen for int -> native int widening
- inlArgNode = inlArgInfo[i].argNode =
- gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType);
+ inlArgNode = gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType);
inlArgInfo[i].argIsLclVar = false;
if (inlArgInfo[i].argIsInvariant)
{
- inlArgNode = gtFoldExprConst(inlArgNode);
- inlArgInfo[i].argNode = inlArgNode;
+ inlArgNode = gtFoldExprConst(inlArgNode);
assert(inlArgNode->OperIsConst());
}
+ *pInlArgNode = inlArgNode;
}
#endif // TARGET_64BIT
}
const var_types lclTyp = lclInfo.lclTypeInfo;
GenTree* op1 = nullptr;
+ GenTree* argNode = argInfo.argNode->gtSkipPutArgType()->gtRetExprVal();
+
if (argInfo.argIsInvariant && !argCanBeModified)
{
// Directly substitute constants or addresses of locals
// impInlineExpr. Then gtFoldExpr() could change it, causing
// further references to the argument working off of the
// bashed copy.
- op1 = gtCloneExpr(argInfo.argNode);
+ op1 = gtCloneExpr(argNode);
PREFIX_ASSUME(op1 != nullptr);
argInfo.argTmpNum = BAD_VAR_NUM;
// Directly substitute unaliased caller locals for args that cannot be modified
//
// Use the caller-supplied node if this is the first use.
- op1 = argInfo.argNode;
+ op1 = argNode;
argInfo.argTmpNum = op1->AsLclVarCommon()->GetLclNum();
// Use an equivalent copy if this is the second or subsequent
then we change the argument tree (of "ldloca.s V_1") to TYP_I_IMPL to match the callee signature. We'll
soon afterwards reject the inlining anyway, since the tree we return isn't a GT_LCL_VAR.
*/
- assert(argInfo.argNode->TypeGet() == TYP_BYREF || argInfo.argNode->TypeGet() == TYP_I_IMPL);
- op1 = gtCloneExpr(argInfo.argNode);
+ assert(argNode->TypeGet() == TYP_BYREF || argNode->TypeGet() == TYP_I_IMPL);
+ op1 = gtCloneExpr(argNode);
}
else
{
assert(lvaTable[tmpNum].lvSingleDef == 0);
lvaTable[tmpNum].lvSingleDef = 1;
JITDUMP("Marked V%02u as a single def temp\n", tmpNum);
- lvaSetClass(tmpNum, argInfo.argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
+ lvaSetClass(tmpNum, argNode, lclInfo.lclVerTypeInfo.GetClassHandleForObjRef());
}
else
{
if ((objClass == nullptr) || !isExact)
{
// Walk back through any return expression placeholders
- unsigned __int64 bbFlags = 0;
- actualThisObj = thisObj->gtRetExprVal(&bbFlags);
+ actualThisObj = thisObj->gtRetExprVal();
// See if we landed on a call to a special intrinsic method
if (actualThisObj->IsCall())
struct InlArgInfo
{
- unsigned __int64 bbFlags; // basic block flags that need to be added when replacing GT_RET_EXPR
- // with argNode
GenTree* argNode; // caller node for this argument
GenTree* argBashTmpNode; // tmp node created, if it may be replaced with actual arg
unsigned argTmpNum; // the argument tmp number
#endif // TARGET_XXX
#if FEATURE_FASTTAILCALL
- varDsc->SetStackOffset(varDscInfo->stackArgSize);
#if defined(OSX_ARM64_ABI)
+ unsigned argAlignment = TARGET_POINTER_SIZE;
+ if (argSize <= TARGET_POINTER_SIZE)
+ {
+ argAlignment = argSize;
+ }
+ varDscInfo->stackArgSize = roundUp(varDscInfo->stackArgSize, argAlignment);
+ assert(argSize % argAlignment == 0);
+#else // !OSX_ARM64_ABI
+ assert((argSize % TARGET_POINTER_SIZE) == 0);
+ assert((varDscInfo->stackArgSize % TARGET_POINTER_SIZE) == 0);
+#endif // !OSX_ARM64_ABI
+
+ varDsc->SetStackOffset(varDscInfo->stackArgSize);
varDscInfo->stackArgSize += argSize;
-#else
- varDscInfo->stackArgSize += roundUp(argSize, TARGET_POINTER_SIZE);
-#endif
#endif // FEATURE_FASTTAILCALL
}
break;
}
#endif // TARGET_ARM
+#if defined(OSX_ARM64_ABI)
+ unsigned argAlignment = TARGET_POINTER_SIZE;
+ if (argSize <= TARGET_POINTER_SIZE)
+ {
+ argAlignment = argSize;
+ }
+ argOffs = roundUp(argOffs, argAlignment);
+ assert((argOffs % argAlignment) == 0);
+#else // !OSX_ARM64_ABI
+ assert((argSize % TARGET_POINTER_SIZE) == 0);
+ assert((argOffs % TARGET_POINTER_SIZE) == 0);
+#endif // !OSX_ARM64_ABI
varDsc->SetStackOffset(argOffs);
}
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR structDesc;
#endif // UNIX_AMD64_ABI
+#if defined(DEBUG)
+ // Check that we have valid information about call's argument types.
+ // For example:
+ // load byte; call(int) -> CALL(PUTARG_TYPE byte(IND byte));
+ // load int; call(byte) -> CALL(PUTARG_TYPE int (IND int));
+ // etc.
+ if (call->callSig != nullptr)
+ {
+ CORINFO_SIG_INFO* sig = call->callSig;
+ const unsigned sigArgsCount = sig->numArgs;
+
+ GenTreeCall::Use* nodeArgs = call->gtCallArgs;
+ // It could include many arguments not included in `sig->numArgs`, for example, `this`, runtime lookup, cookie
+ // etc.
+ unsigned nodeArgsCount = call->NumChildren();
+
+ if (call->gtCallThisArg != nullptr)
+ {
+ // Handle the most common argument not in the `sig->numArgs`.
+ // so the following check works on more methods.
+ nodeArgsCount--;
+ }
+
+ assert(nodeArgsCount >= sigArgsCount);
+ if ((nodeArgsCount == sigArgsCount) &&
+ ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (nodeArgsCount == 1)))
+ {
+ CORINFO_ARG_LIST_HANDLE sigArg = sig->args;
+ for (unsigned i = 0; i < sig->numArgs; ++i)
+ {
+ CORINFO_CLASS_HANDLE argClass;
+ const CorInfoType corType = strip(info.compCompHnd->getArgType(sig, sigArg, &argClass));
+ const var_types sigType = JITtype2varType(corType);
+
+ assert(nodeArgs != nullptr);
+ const GenTree* nodeArg = nodeArgs->GetNode();
+ assert(nodeArg != nullptr);
+ const var_types nodeType = nodeArg->TypeGet();
+
+ assert((nodeType == sigType) || varTypeIsStruct(sigType) ||
+ genTypeSize(nodeType) == genTypeSize(sigType));
+
+ sigArg = info.compCompHnd->getArgNext(sigArg);
+ nodeArgs = nodeArgs->GetNext();
+ }
+ assert(nodeArgs == nullptr);
+ }
+ }
+#endif // DEBUG
+
for (args = call->gtCallArgs; args != nullptr; args = args->GetNext(), argIndex++)
{
- argx = args->GetNode();
+ argx = args->GetNode()->gtSkipPutArgType();
+
fgArgTabEntry* argEntry = nullptr;
// Change the node to TYP_I_IMPL so we don't report GC info
}
}
+ if (args->GetNode()->OperIs(GT_PUTARG_TYPE))
+ {
+ const GenTreeUnOp* putArgType = args->GetNode()->AsUnOp();
+ byteSize = genTypeSize(putArgType->TypeGet());
+ }
+
// The 'size' value has now must have been set. (the original value of zero is an invalid value)
assert(size != 0);
assert(byteSize != 0);
LclVarDsc* varDsc = lvaGetDesc(lclVar);
if (varDsc->CanBeReplacedWithItsField(this))
{
- // We can replace the struct with its only field and allow copy propogation to replace
+ // We can replace the struct with its only field and allow copy propagation to replace
// return value that was written as a field.
unsigned fieldLclNum = varDsc->lvFieldLclStart;
LclVarDsc* fieldDsc = lvaGetDesc(fieldLclNum);
// Special handling for the arg list.
return fgMorphArgList(tree->AsArgList(), mac);
+ case GT_PUTARG_TYPE:
+ return fgMorphTree(tree->AsUnOp()->gtGetOp1());
+
default:
break;
}
{
for (GenTree** pTree : tree->UseEdges())
{
- GenTree* childTree = *pTree;
+ GenTree** pTreeCopy = pTree;
+ GenTree* childTree = *pTree;
if (childTree->gtOper == GT_LCL_VAR)
{
GenTree* newChildTree = fgMorphImplicitByRefArgs(childTree, false);
if (newChildTree != nullptr)
{
- changed = true;
- *pTree = newChildTree;
+ changed = true;
+ *pTreeCopy = newChildTree;
}
}
}
for (GenTree* tree = statement->GetTreeList(); tree; tree = tree->gtNext)
{
- // QMARK nodes should have been removed before this phase.
- assert(tree->OperGet() != GT_QMARK);
+ // QMARK and PUT_ARG_TYPE nodes should have been removed before this phase.
+ assert(!tree->OperIs(GT_QMARK, GT_PUTARG_TYPE));
if (tree->OperGet() == GT_ASG)
{
[MemberData(nameof(UInt64TestNotEquals))]
[MemberData(nameof(UInt64TestSubtractions))]
[ActiveIssue("https://github.com/dotnet/runtime/issues/26798", TargetFrameworkMonikers.NetFramework)]
+ [SkipOnCoreClr("https://github.com/dotnet/runtime/issues/42719", RuntimeConfiguration.Checked)]
public void ConstantExpressions(object x, object y, ExpressionType type, object result, bool shouldSucceedChecked)
{
var callsite = GetBinaryOperationCallSite(type, false, true, true);