if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
{
- args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
+ args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), false, TYP_DOUBLE);
}
else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
{
- args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
+ args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), false, TYP_FLOAT);
}
// insert any widening or narrowing casts for backwards compatibility
// We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
lcl->ChangeOper(GT_LCL_FLD);
fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
+ lcl->gtType = src->gtType;
+ asgType = src->gtType;
}
- lcl->gtType = src->gtType;
- asgType = src->gtType;
- dest = lcl;
+ dest = lcl;
#if defined(_TARGET_ARM_)
// TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
assert(structVal->gtObj.gtClass == structHnd);
return (structVal->gtObj.Addr());
}
- else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
+ else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY ||
+ structVal->OperIsSimdHWIntrinsic())
{
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
}
#ifdef FEATURE_SIMD
- if (blockNode->OperGet() == GT_SIMD)
- {
- parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
- alreadyNormalized = true;
- }
- else
-#endif
-#ifdef FEATURE_HW_INTRINSICS
- if (blockNode->OperGet() == GT_HWIntrinsic && blockNode->AsHWIntrinsic()->isSIMD())
+ if (blockNode->OperIsSIMDorSimdHWintrinsic())
{
parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
alreadyNormalized = true;
CORINFO_GENERIC_HANDLE handle = nullptr;
void* pIndirection = nullptr;
- assert(pLookup->constLookup.accessType != IAT_PPVALUE);
+ assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE);
if (pLookup->constLookup.accessType == IAT_VALUE)
{
{
CORINFO_GENERIC_HANDLE handle = nullptr;
void* pIndirection = nullptr;
- assert(pLookup->accessType != IAT_PPVALUE);
+ assert(pLookup->accessType != IAT_PPVALUE && pLookup->accessType != IAT_RELPVALUE);
if (pLookup->accessType == IAT_VALUE)
{
}
else
{
- op1->gtFptrVal.gtEntryPoint.addr = nullptr;
+ op1->gtFptrVal.gtEntryPoint.addr = nullptr;
+ op1->gtFptrVal.gtEntryPoint.accessType = IAT_VALUE;
}
#endif
break;
else if (varTypeIsI(wantedType) && (currType == TYP_INT))
{
// Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
- tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
+ tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
}
else if ((wantedType == TYP_INT) && varTypeIsI(currType))
{
// Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
- tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
+ tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT);
}
#endif // _TARGET_64BIT_
}
#ifndef LEGACY_BACKEND
if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
{
- tree = gtNewCastNode(dstTyp, tree, dstTyp);
+ tree = gtNewCastNode(dstTyp, tree, false, dstTyp);
}
#endif // !LEGACY_BACKEND
noway_assert(varTypeIsFloating(op1));
#else // FEATURE_X87_DOUBLES
+ assert(varTypeIsFloating(op1));
if (op1->TypeGet() != callType)
{
- op1 = gtNewCastNode(callType, op1, callType);
+ op1 = gtNewCastNode(callType, op1, false, callType);
}
#endif // FEATURE_X87_DOUBLES
noway_assert(varTypeIsFloating(op1));
#else // FEATURE_X87_DOUBLES
+ assert(varTypeIsFloating(op1));
+ assert(varTypeIsFloating(op2));
if (op2->TypeGet() != callType)
{
- op2 = gtNewCastNode(callType, op2, callType);
+ op2 = gtNewCastNode(callType, op2, false, callType);
}
if (op1->TypeGet() != callType)
{
- op1 = gtNewCastNode(callType, op1, callType);
+ op1 = gtNewCastNode(callType, op1, false, callType);
}
#endif // FEATURE_X87_DOUBLES
{
assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
(varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
- exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
+ exprToBox = gtNewCastNode(dstTyp, exprToBox, false, dstTyp);
}
op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
}
}
else
{
- call->gtIntrinsic.gtEntryPoint.addr = nullptr;
+ call->gtIntrinsic.gtEntryPoint.addr = nullptr;
+ call->gtIntrinsic.gtEntryPoint.accessType = IAT_VALUE;
}
}
#endif
call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
call->gtFlags |= GTF_CALL_VIRT_STUB;
- assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
+ assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE &&
+ callInfo->stubLookup.constLookup.accessType != IAT_RELPVALUE);
if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
{
call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
{
- call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
+ call = gtNewCastNode(genActualType(callRetTyp), call, false, callRetTyp);
}
}
if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
- op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
+ op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // _TARGET_64BIT_
if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
{
// insert an explicit upcast
- op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
+ op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // _TARGET_64BIT_
if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
- op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
+ op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
}
else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
- op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
+ op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // _TARGET_64BIT_
if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
- op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
+ op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
- op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
+ op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
type = TYP_I_IMPL;
if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
{
assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
- op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
+ op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT);
}
#endif // _TARGET_64BIT_
if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
varTypeIsFloating(op2->gtType))
{
- op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
+ op1 = gtNewCastNode(op2->TypeGet(), op1, false, op2->TypeGet());
}
#endif // !FEATURE_X87_DOUBLES
if (op1->TypeGet() != type)
{
// We insert a cast of op1 to 'type'
- op1 = gtNewCastNode(type, op1, type);
+ op1 = gtNewCastNode(type, op1, false, type);
}
if (op2->TypeGet() != type)
{
// We insert a cast of op2 to 'type'
- op2 = gtNewCastNode(type, op2, type);
+ op2 = gtNewCastNode(type, op2, false, type);
}
}
#endif // !FEATURE_X87_DOUBLES
#ifdef _TARGET_64BIT_
if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
{
- op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
+ op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
}
else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
{
- op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
+ op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // _TARGET_64BIT_
#ifdef _TARGET_64BIT_
if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
{
- op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
+ op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
}
else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
{
- op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
+ op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // _TARGET_64BIT_
if (op1->TypeGet() == TYP_DOUBLE)
{
// We insert a cast of op2 to TYP_DOUBLE
- op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
+ op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE);
}
else if (op2->TypeGet() == TYP_DOUBLE)
{
// We insert a cast of op1 to TYP_DOUBLE
- op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
+ op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE);
}
}
}
#if SMALL_TREE_NODES
if (callNode)
{
- op1 = gtNewCastNodeL(type, op1, lclTyp);
+ op1 = gtNewCastNodeL(type, op1, uns, lclTyp);
}
else
#endif // SMALL_TREE_NODES
{
- op1 = gtNewCastNode(type, op1, lclTyp);
+ op1 = gtNewCastNode(type, op1, uns, lclTyp);
}
if (ovfl)
{
op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
}
- if (uns)
- {
- op1->gtFlags |= GTF_UNSIGNED;
- }
impPushOnStack(op1, tiRetVal);
break;
if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
{
assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
- op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
+ op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
}
// Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
//
if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
{
assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
- op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
+ op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
}
}
#endif // _TARGET_64BIT_
if (genActualType(op1->gtType) == TYP_INT)
{
assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
- op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
+ op1 = gtNewCastNode(TYP_I_IMPL, op1, false, TYP_I_IMPL);
}
#endif
if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
varTypeIsLong(op1->TypeGet()))
{
- op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
+ op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
}
#endif
//
if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
{
- op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
+ op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
}
// Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
//
if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
{
- op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
+ op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
}
}
#endif
if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
varTypeIsFloating(op2->gtType))
{
- op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
+ op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
}
#endif // !FEATURE_X87_DOUBLES
{
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
- GenTree* ret = gtNewLclvNode(tmpNum, op->gtType);
+ GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
// TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
ret->gtFlags |= GTF_DONT_CSE;
if (returnType != originalCallType)
{
+ JITDUMP("Return type mismatch, have %s, needed %s\n", varTypeName(returnType),
+ varTypeName(originalCallType));
compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
return false;
}
fgCastNeeded(op2, fncRealRetType))
{
// Small-typed return values are normalized by the callee
- op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
+ op2 = gtNewCastNode(TYP_INT, op2, false, fncRealRetType);
}
}
{
// Spill clique has decided this should be "native int", but this block only pushes an "int".
// Insert a sign-extension to "native int" so we match the clique.
- verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
+ verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
}
// Consider the case where one branch left a 'byref' on the stack and the other leaves
{
// Spill clique has decided this should be "byref", but this block only pushes an "int".
// Insert a sign-extension to "native int" so we match the clique size.
- verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
+ verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
}
}
#endif // _TARGET_64BIT_
{
// Spill clique has decided this should be "double", but this block only pushes a "float".
// Insert a cast to "double" so we match the clique.
- verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
+ verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE);
}
#endif // FEATURE_X87_DOUBLES
// the inlining multiplier) for anything in that assembly.
// But we only need to normalize it if it is a TYP_STRUCT
// (which we need to do even if we have already set foundSIMDType).
- if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
+ if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
{
if (sigType == TYP_STRUCT)
{
lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
#ifdef FEATURE_SIMD
- if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
+ if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
{
// If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
// found a SIMD type, even if this may not be a type we recognize (the assumption is that
continue;
}
- inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
+ inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType);
inlArgInfo[i].argIsLclVar = false;
else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
{
// This should only happen for int -> native int widening
- inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
+ inlArgNode = inlArgInfo[i].argNode =
+ gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType);
inlArgInfo[i].argIsLclVar = false;
localsSig = info.compCompHnd->getArgNext(localsSig);
#ifdef FEATURE_SIMD
- if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
+ if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
{
foundSIMDType = true;
if (featureSIMD && type == TYP_STRUCT)
}
#ifdef FEATURE_SIMD
- if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
+ if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd))
{
foundSIMDType = true;
}
op1 = gtCloneExpr(argInfo.argNode);
PREFIX_ASSUME(op1 != nullptr);
argInfo.argTmpNum = BAD_VAR_NUM;
+
+ // We may need to retype to ensure we match the callee's view of the type.
+ // Otherwise callee-pass throughs of arguments can create return type
+ // mismatches that block inlining.
+ //
+ // Note argument type mismatches that prevent inlining should
+ // have been caught in impInlineInitVars.
+ if (op1->TypeGet() != lclTyp)
+ {
+ op1->gtType = genActualType(lclTyp);
+ }
}
else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef)
{
case CORINFO_INTRINSIC_Round:
case CORINFO_INTRINSIC_Ceiling:
case CORINFO_INTRINSIC_Floor:
- // TODO-XArch-CQ: Update to work on non-AVX machines: https://github.com/dotnet/coreclr/issues/15908
- return compSupports(InstructionSet_SSE41) && canUseVexEncoding();
+ return compSupports(InstructionSet_SSE41);
default:
return false;
bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
{
- // Currently, if an math intrisic is not implemented by target-specific
- // intructions, it will be implemented by a System.Math call. In the
- // future, if we turn to implementing some of them with helper callers,
+ // Currently, if a math intrinsic is not implemented by target-specific
+ // instructions, it will be implemented by a System.Math call. In the
+ // future, if we turn to implementing some of them with helper calls,
// this predicate needs to be revisited.
return !IsTargetIntrinsic(intrinsicId);
}
}
// Fetch method attributes to see if method is marked final.
- const DWORD derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod);
- const bool derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0);
+ DWORD derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod);
+ const bool derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0);
#if defined(DEBUG)
const char* derivedClassName = "?derivedClass";
JITDUMP("Now have direct call to boxed entry point, looking for unboxed entry point\n");
// Note for some shared methods the unboxed entry point requires an extra parameter.
- // We defer optimizing if so.
bool requiresInstMethodTableArg = false;
CORINFO_METHOD_HANDLE unboxedEntryMethod =
info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg);
// the copy, we can undo the copy too.
if (requiresInstMethodTableArg)
{
- // We can likely handle this case by grabbing the argument passed to
- // the newobj in the box. But defer for now.
- JITDUMP("Found unboxed entry point, but it needs method table arg, deferring\n");
+ // Perform a trial box removal and ask for the type handle tree.
+ JITDUMP("Unboxed entry needs method table arg...\n");
+ GenTree* methodTableArg = gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE);
+
+ if (methodTableArg != nullptr)
+ {
+ // If that worked, turn the box into a copy to a local var
+ JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg));
+ GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
+
+ if (localCopyThis != nullptr)
+ {
+ // Pass the local var as this and the type handle as a new arg
+ JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table arg\n");
+ call->gtCallObjp = localCopyThis;
+
+ // Prepend for R2L arg passing or empty L2R passing
+ if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr))
+ {
+ call->gtCallArgs = gtNewListNode(methodTableArg, call->gtCallArgs);
+ }
+ // Append for non-empty L2R
+ else
+ {
+ GenTreeArgList* beforeArg = call->gtCallArgs;
+ while (beforeArg->Rest() != nullptr)
+ {
+ beforeArg = beforeArg->Rest();
+ }
+
+ beforeArg->Rest() = gtNewListNode(methodTableArg, nullptr);
+ }
+
+ call->gtCallMethHnd = unboxedEntryMethod;
+ derivedMethod = unboxedEntryMethod;
+
+ // Method attributes will differ because unboxed entry point is shared
+ const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod);
+ JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs,
+ unboxedMethodAttribs);
+ derivedMethodAttribs = unboxedMethodAttribs;
+ }
+ else
+ {
+ JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n");
+ }
+ }
+ else
+ {
+ JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n");
+ }
}
else
{