{
*pSimdBaseType = simdBaseType;
}
-#ifdef _TARGET_AMD64_
- // Amd64: also indicate that we use floating point registers
+ // Also indicate that we use floating point registers.
compFloatingPointUsed = true;
-#endif
}
}
}
eeGetSig(pResolvedToken->token, info.compScopeHnd, impTokenLookupContextHandle, &calliSig);
callRetTyp = JITtype2varType(calliSig.retType);
+ clsHnd = calliSig.retTypeClass;
call = impImportIndirectCall(&calliSig, ilOffset);
call->gtCall.callSig = new (this, CMK_CorSig) CORINFO_SIG_INFO;
*call->gtCall.callSig = calliSig;
#endif // DEBUG
+
+ if (IsTargetAbi(CORINFO_CORERT_ABI))
+ {
+ bool managedCall = (calliSig.callConv & GTF_CALL_UNMANAGED) == 0;
+ if (managedCall)
+ {
+ addFatPointerCandidate(call->AsCall());
+ }
+ }
}
else // (opcode != CEE_CALLI)
{
if ((mflags & CORINFO_FLG_VIRTUAL) && (mflags & CORINFO_FLG_EnC) && (opcode == CEE_CALLVIRT))
{
NO_WAY("Virtual call to a function added via EnC is not supported");
- goto DONE_CALL;
}
if ((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_DEFAULT &&
thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("LDVIRTFTN this pointer"));
- GenTreePtr fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
+ GenTreePtr fptr = nullptr;
+ bool coreRTGenericVirtualMethod =
+ ((sig->callConv & CORINFO_CALLCONV_GENERIC) != 0) && IsTargetAbi(CORINFO_CORERT_ABI);
+#if COR_JIT_EE_VERSION > 460
+ if (coreRTGenericVirtualMethod)
+ {
+ GenTreePtr runtimeMethodHandle = nullptr;
+ if (callInfo->exactContextNeedsRuntimeLookup)
+ {
+ runtimeMethodHandle =
+ impRuntimeLookupToTree(pResolvedToken, &callInfo->codePointerLookup, methHnd);
+ }
+ else
+ {
+ runtimeMethodHandle = gtNewIconEmbMethHndNode(pResolvedToken->hMethod);
+ }
+ fptr = gtNewHelperCallNode(CORINFO_HELP_GVMLOOKUP_FOR_SLOT, TYP_I_IMPL, GTF_EXCEPT,
+ gtNewArgList(thisPtr, runtimeMethodHandle));
+ }
+ else
+#endif // COR_JIT_EE_VERSION
+ {
+ fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
+ }
+
if (compDonotInline())
{
return callRetTyp;
call->gtCall.gtCallObjp = thisPtrCopy;
call->gtFlags |= GTF_EXCEPT | (fptr->gtFlags & GTF_GLOB_EFFECT);
+ if (coreRTGenericVirtualMethod)
+ {
+ addFatPointerCandidate(call->AsCall());
+ }
#ifdef FEATURE_READYTORUN_COMPILER
if (opts.IsReadyToRun())
{
}
}
-// Note: we assume that small return types are already normalized by the managed callee
-// or by the pinvoke stub for calls to unmanaged code.
-
-DONE_CALL:
+ // Note: we assume that small return types are already normalized by the managed callee
+ // or by the pinvoke stub for calls to unmanaged code.
if (!bIntrinsicImported)
{
impMarkInlineCandidate(call, exactContextHnd, callInfo);
}
+DONE_CALL:
// Push or append the result of the call
if (callRetTyp == TYP_VOID)
{
}
}
- if (call->gtOper == GT_CALL)
+ if (call->IsCall())
{
// Sometimes "call" is not a GT_CALL (if we imported an intrinsic that didn't turn into a call)
+
+ bool fatPointerCandidate = call->AsCall()->IsFatPointerCandidate();
if (varTypeIsStruct(callRetTyp))
{
call = impFixupCallStructReturn(call, sig->retTypeClass);
if ((call->gtFlags & GTF_CALL_INLINE_CANDIDATE) != 0)
{
assert(opts.OptEnabled(CLFLG_INLINING));
+ assert(!fatPointerCandidate); // We should not try to inline calli.
// Make the call its own tree (spill the stack if needed).
impAppendTree(call, (unsigned)CHECK_SPILL_ALL, impCurStmtOffs);
}
else
{
+ if (fatPointerCandidate)
+ {
+ // fatPointer candidates should be in statements of the form call() or var = call().
+ // Such form allows to find statements with fat calls without walking through whole trees
+ // and removes problems with cutting trees.
+ assert(!bIntrinsicImported);
+ assert(IsTargetAbi(CORINFO_CORERT_ABI));
+ if (call->OperGet() != GT_LCL_VAR) // can be already converted by impFixupCallStructReturn.
+ {
+ unsigned calliSlot = lvaGrabTemp(true DEBUGARG("calli"));
+ LclVarDsc* varDsc = &lvaTable[calliSlot];
+ varDsc->lvVerTypeInfo = tiRetVal;
+ impAssignTempGen(calliSlot, call, clsHnd, (unsigned)CHECK_SPILL_NONE);
+ // impAssignTempGen can change src arg list and return type for call that returns struct.
+ var_types type = genActualType(lvaTable[calliSlot].TypeGet());
+ call = gtNewLclvNode(calliSlot, type);
+ }
+ }
+
// For non-candidates we must also spill, since we
// might have locals live on the eval stack that this
// call can modify.
- impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
+ //
+ // Suppress this for certain well-known call targets
+ // that we know won't modify locals, eg calls that are
+ // recognized in gtCanOptimizeTypeEquality. Otherwise
+ // we may break key fragile pattern matches later on.
+ bool spillStack = true;
+ if (call->IsCall())
+ {
+ GenTreeCall* callNode = call->AsCall();
+ if ((callNode->gtCallType == CT_HELPER) && gtIsTypeHandleToRuntimeTypeHelper(callNode))
+ {
+ spillStack = false;
+ }
+ else if ((callNode->gtCallMoreFlags & GTF_CALL_M_SPECIAL_INTRINSIC) != 0)
+ {
+ spillStack = false;
+ }
+ }
+
+ if (spillStack)
+ {
+ impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("non-inline candidate call"));
+ }
}
}
prefixFlags |= PREFIX_TAILCALL_EXPLICIT;
}
}
+ }
- // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
- // hence will not be considered for implicit tail calling.
- bool isRecursive = (callInfo.hMethod == info.compMethodHnd);
- if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
+ // This is split up to avoid goto flow warnings.
+ bool isRecursive;
+ isRecursive = !compIsForInlining() && (callInfo.hMethod == info.compMethodHnd);
+
+ // Note that when running under tail call stress, a call will be marked as explicit tail prefixed
+ // hence will not be considered for implicit tail calling.
+ if (impIsImplicitTailCallCandidate(opcode, codeAddr + sz, codeEndp, prefixFlags, isRecursive))
+ {
+ if (compIsForInlining())
+ {
+#if FEATURE_TAILCALL_OPT_SHARED_RETURN
+ // Are we inlining at an implicit tail call site? If so the we can flag
+ // implicit tail call sites in the inline body. These call sites
+ // often end up in non BBJ_RETURN blocks, so only flag them when
+ // we're able to handle shared returns.
+ if (impInlineInfo->iciCall->IsImplicitTailCall())
+ {
+ JITDUMP(" (Inline Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
+ prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
+ }
+#endif // FEATURE_TAILCALL_OPT_SHARED_RETURN
+ }
+ else
{
JITDUMP(" (Implicit Tail call: prefixFlags |= PREFIX_TAILCALL_IMPLICIT)");
prefixFlags |= PREFIX_TAILCALL_IMPLICIT;
impHandleAccessAllowed(callInfo.accessAllowed, &callInfo.callsiteCalloutHelper);
#if 0 // DevDiv 410397 - This breaks too many obfuscated apps to do this in an in-place release
-
+
// DevDiv 291703 - we need to check for accessibility between the caller of InitializeArray
// and the field it is reading, thus it is now unverifiable to not immediately precede with
// ldtoken <filed token>, and we now check accessibility