* Pushes the given tree on the stack.
*/
-void Compiler::impPushOnStack(GenTreePtr tree, typeInfo ti)
+void Compiler::impPushOnStack(GenTree* tree, typeInfo ti)
{
/* Check for overflow. If inlining, we may be using a bigger stack */
*/
#ifdef DEBUG // only used in asserts
-static bool impValidSpilledStackEntry(GenTreePtr tree)
+static bool impValidSpilledStackEntry(GenTree* tree)
{
if (tree->gtOper == GT_LCL_VAR)
{
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++, table++)
{
table->seTypeInfo = verCurrentState.esStack[level].seTypeInfo;
- GenTreePtr tree = verCurrentState.esStack[level].val;
+ GenTree* tree = verCurrentState.esStack[level].val;
assert(impValidSpilledStackEntry(tree));
* directly only for handling CEE_LEAVEs out of finally-protected try's.
*/
-inline void Compiler::impEndTreeList(BasicBlock* block, GenTreePtr firstStmt, GenTreePtr lastStmt)
+inline void Compiler::impEndTreeList(BasicBlock* block, GenTree* firstStmt, GenTree* lastStmt)
{
assert(firstStmt->gtOper == GT_STMT);
assert(lastStmt->gtOper == GT_STMT);
{
assert(impTreeList->gtOper == GT_BEG_STMTS);
- GenTreePtr firstTree = impTreeList->gtNext;
+ GenTree* firstTree = impTreeList->gtNext;
if (!firstTree)
{
* that this has only limited value as we can only check [0..chkLevel).
*/
-inline void Compiler::impAppendStmtCheck(GenTreePtr stmt, unsigned chkLevel)
+inline void Compiler::impAppendStmtCheck(GenTree* stmt, unsigned chkLevel)
{
#ifndef DEBUG
return;
return;
}
- GenTreePtr tree = stmt->gtStmt.gtStmtExpr;
+ GenTree* tree = stmt->gtStmt.gtStmtExpr;
// Calls can only be appended if there are no GTF_GLOB_EFFECT on the stack
* interference with stmt and spill if needed.
*/
-inline void Compiler::impAppendStmt(GenTreePtr stmt, unsigned chkLevel)
+inline void Compiler::impAppendStmt(GenTree* stmt, unsigned chkLevel)
{
assert(stmt->gtOper == GT_STMT);
noway_assert(impTreeLast != nullptr);
/* If the statement being appended has any side-effects, check the stack
to see if anything needs to be spilled to preserve correct ordering. */
- GenTreePtr expr = stmt->gtStmt.gtStmtExpr;
- unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
+ GenTree* expr = stmt->gtStmt.gtStmtExpr;
+ unsigned flags = expr->gtFlags & GTF_GLOB_EFFECT;
// Assignment to (unaliased) locals don't count as a side-effect as
// we handle them specially using impSpillLclRefs(). Temp locals should
* Insert the given GT_STMT "stmt" before GT_STMT "stmtBefore"
*/
-inline void Compiler::impInsertStmtBefore(GenTreePtr stmt, GenTreePtr stmtBefore)
+inline void Compiler::impInsertStmtBefore(GenTree* stmt, GenTree* stmtBefore)
{
assert(stmt->gtOper == GT_STMT);
assert(stmtBefore->gtOper == GT_STMT);
- GenTreePtr stmtPrev = stmtBefore->gtPrev;
- stmt->gtPrev = stmtPrev;
- stmt->gtNext = stmtBefore;
- stmtPrev->gtNext = stmt;
- stmtBefore->gtPrev = stmt;
+ GenTree* stmtPrev = stmtBefore->gtPrev;
+ stmt->gtPrev = stmtPrev;
+ stmt->gtNext = stmtBefore;
+ stmtPrev->gtNext = stmt;
+ stmtBefore->gtPrev = stmt;
}
/*****************************************************************************
* Return the newly created statement.
*/
-GenTreePtr Compiler::impAppendTree(GenTreePtr tree, unsigned chkLevel, IL_OFFSETX offset)
+GenTree* Compiler::impAppendTree(GenTree* tree, unsigned chkLevel, IL_OFFSETX offset)
{
assert(tree);
/* Allocate an 'expression statement' node */
- GenTreePtr expr = gtNewStmt(tree, offset);
+ GenTree* expr = gtNewStmt(tree, offset);
/* Append the statement to the current block's stmt list */
* Insert the given exression tree before GT_STMT "stmtBefore"
*/
-void Compiler::impInsertTreeBefore(GenTreePtr tree, IL_OFFSETX offset, GenTreePtr stmtBefore)
+void Compiler::impInsertTreeBefore(GenTree* tree, IL_OFFSETX offset, GenTree* stmtBefore)
{
assert(stmtBefore->gtOper == GT_STMT);
/* Allocate an 'expression statement' node */
- GenTreePtr expr = gtNewStmt(tree, offset);
+ GenTree* expr = gtNewStmt(tree, offset);
/* Append the statement to the current block's stmt list */
*/
void Compiler::impAssignTempGen(unsigned tmp,
- GenTreePtr val,
+ GenTree* val,
unsigned curLevel,
- GenTreePtr* pAfterStmt, /* = NULL */
+ GenTree** pAfterStmt, /* = NULL */
IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
BasicBlock* block /* = NULL */
)
{
- GenTreePtr asg = gtNewTempAssign(tmp, val);
+ GenTree* asg = gtNewTempAssign(tmp, val);
if (!asg->IsNothingNode())
{
if (pAfterStmt)
{
- GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
- *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
+ GenTree* asgStmt = gtNewStmt(asg, ilOffset);
+ *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
}
else
{
*/
void Compiler::impAssignTempGen(unsigned tmpNum,
- GenTreePtr val,
+ GenTree* val,
CORINFO_CLASS_HANDLE structType,
unsigned curLevel,
- GenTreePtr* pAfterStmt, /* = NULL */
+ GenTree** pAfterStmt, /* = NULL */
IL_OFFSETX ilOffset, /* = BAD_IL_OFFSET */
BasicBlock* block /* = NULL */
)
{
- GenTreePtr asg;
+ GenTree* asg;
if (varTypeIsStruct(val))
{
val->gtType = lvaTable[tmpNum].lvType;
- GenTreePtr dst = gtNewLclvNode(tmpNum, val->gtType);
- asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
+ GenTree* dst = gtNewLclvNode(tmpNum, val->gtType);
+ asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, block);
}
else
{
{
if (pAfterStmt)
{
- GenTreePtr asgStmt = gtNewStmt(asg, ilOffset);
- *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
+ GenTree* asgStmt = gtNewStmt(asg, ilOffset);
+ *pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, asgStmt);
}
else
{
{
StackEntry se = impPopStack();
typeInfo ti = se.seTypeInfo;
- GenTreePtr temp = se.val;
+ GenTree* temp = se.val;
if (varTypeIsStruct(temp))
{
if (corType == CORINFO_TYPE_DOUBLE && args->Current()->TypeGet() == TYP_FLOAT)
{
- args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), TYP_DOUBLE);
+ args->Current() = gtNewCastNode(TYP_DOUBLE, args->Current(), false, TYP_DOUBLE);
}
else if (corType == CORINFO_TYPE_FLOAT && args->Current()->TypeGet() == TYP_DOUBLE)
{
- args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), TYP_FLOAT);
+ args->Current() = gtNewCastNode(TYP_FLOAT, args->Current(), false, TYP_FLOAT);
}
// insert any widening or narrowing casts for backwards compatibility
curLevel is the stack level for which a spill may be being done.
*/
-GenTreePtr Compiler::impAssignStruct(GenTreePtr dest,
- GenTreePtr src,
- CORINFO_CLASS_HANDLE structHnd,
- unsigned curLevel,
- GenTreePtr* pAfterStmt, /* = NULL */
- BasicBlock* block /* = NULL */
- )
+GenTree* Compiler::impAssignStruct(GenTree* dest,
+ GenTree* src,
+ CORINFO_CLASS_HANDLE structHnd,
+ unsigned curLevel,
+ GenTree** pAfterStmt, /* = NULL */
+ BasicBlock* block /* = NULL */
+ )
{
assert(varTypeIsStruct(dest));
// TODO-1stClassStructs: Avoid creating an address if it is not needed,
// or re-creating a Blk node if it is.
- GenTreePtr destAddr;
+ GenTree* destAddr;
if (dest->gtOper == GT_IND || dest->OperIsBlk())
{
/*****************************************************************************/
-GenTreePtr Compiler::impAssignStructPtr(GenTreePtr destAddr,
- GenTreePtr src,
- CORINFO_CLASS_HANDLE structHnd,
- unsigned curLevel,
- GenTreePtr* pAfterStmt, /* = NULL */
- BasicBlock* block /* = NULL */
- )
+GenTree* Compiler::impAssignStructPtr(GenTree* destAddr,
+ GenTree* src,
+ CORINFO_CLASS_HANDLE structHnd,
+ unsigned curLevel,
+ GenTree** pAfterStmt, /* = NULL */
+ BasicBlock* block /* = NULL */
+ )
{
- var_types destType;
- GenTreePtr dest = nullptr;
- unsigned destFlags = 0;
+ var_types destType;
+ GenTree* dest = nullptr;
+ unsigned destFlags = 0;
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
assert(varTypeIsStruct(src) || (src->gtOper == GT_ADDR && src->TypeGet() == TYP_BYREF));
// If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
// That is, the IR will be of the form lclVar = call for multi-reg return
//
- GenTreePtr lcl = destAddr->gtOp.gtOp1;
+ GenTree* lcl = destAddr->gtOp.gtOp1;
if (src->AsCall()->HasMultiRegRetVal())
{
// Mark the struct LclVar as used in a MultiReg return context
// We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR)
lcl->ChangeOper(GT_LCL_FLD);
fgLclFldAssign(lcl->gtLclVarCommon.gtLclNum);
+ lcl->gtType = src->gtType;
+ asgType = src->gtType;
}
- lcl->gtType = src->gtType;
- asgType = src->gtType;
- dest = lcl;
+ dest = lcl;
#if defined(_TARGET_ARM_)
// TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
// Since we are assigning the result of a GT_MKREFANY,
// "destAddr" must point to a refany.
- GenTreePtr destAddrClone;
+ GenTree* destAddrClone;
destAddr =
impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
assert(offsetof(CORINFO_RefAny, dataPtr) == 0);
assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
GetZeroOffsetFieldMap()->Set(destAddr, GetFieldSeqStore()->CreateSingleton(GetRefanyDataField()));
- GenTreePtr ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
+ GenTree* ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
GenTreeIntCon* typeFieldOffset = gtNewIconNode(offsetof(CORINFO_RefAny, type), TYP_I_IMPL);
typeFieldOffset->gtFieldSeq = GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField());
- GenTreePtr typeSlot =
+ GenTree* typeSlot =
gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
// append the assign of the pointer value
- GenTreePtr asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
+ GenTree* asg = gtNewAssignNode(ptrSlot, src->gtOp.gtOp1);
if (pAfterStmt)
{
*pAfterStmt = fgInsertStmtAfter(block, *pAfterStmt, gtNewStmt(asg, impCurStmtOffs));
willDeref - does the caller guarantee to dereference the pointer.
*/
-GenTreePtr Compiler::impGetStructAddr(GenTreePtr structVal,
- CORINFO_CLASS_HANDLE structHnd,
- unsigned curLevel,
- bool willDeref)
+GenTree* Compiler::impGetStructAddr(GenTree* structVal,
+ CORINFO_CLASS_HANDLE structHnd,
+ unsigned curLevel,
+ bool willDeref)
{
assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
assert(structVal->gtObj.gtClass == structHnd);
return (structVal->gtObj.Addr());
}
- else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
+ else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY ||
+ structVal->OperIsSimdHWIntrinsic())
{
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
// The 'return value' is now the temp itself
- type = genActualType(lvaTable[tmpNum].TypeGet());
- GenTreePtr temp = gtNewLclvNode(tmpNum, type);
- temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
+ type = genActualType(lvaTable[tmpNum].TypeGet());
+ GenTree* temp = gtNewLclvNode(tmpNum, type);
+ temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
return temp;
}
else if (oper == GT_COMMA)
{
assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
- GenTreePtr oldTreeLast = impTreeLast;
- structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
- structVal->gtType = TYP_BYREF;
+ GenTree* oldTreeLast = impTreeLast;
+ structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
+ structVal->gtType = TYP_BYREF;
if (oldTreeLast != impTreeLast)
{
// Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
// it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
//
-GenTreePtr Compiler::impNormStructVal(GenTreePtr structVal,
- CORINFO_CLASS_HANDLE structHnd,
- unsigned curLevel,
- bool forceNormalization /*=false*/)
+GenTree* Compiler::impNormStructVal(GenTree* structVal,
+ CORINFO_CLASS_HANDLE structHnd,
+ unsigned curLevel,
+ bool forceNormalization /*=false*/)
{
assert(forceNormalization || varTypeIsStruct(structVal));
assert(structHnd != NO_CLASS_HANDLE);
}
#ifdef FEATURE_SIMD
- if (blockNode->OperGet() == GT_SIMD)
- {
- parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
- alreadyNormalized = true;
- }
- else
-#endif
-#ifdef FEATURE_HW_INTRINSICS
- if (blockNode->OperGet() == GT_HWIntrinsic && blockNode->AsHWIntrinsic()->isSIMD())
+ if (blockNode->OperIsSIMDorSimdHWintrinsic())
{
parent->gtOp.gtOp2 = impNormStructVal(blockNode, structHnd, curLevel, forceNormalization);
alreadyNormalized = true;
// and the token refers to formal type parameters whose instantiation is not known
// at compile-time.
//
-GenTreePtr Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
- BOOL* pRuntimeLookup /* = NULL */,
- BOOL mustRestoreHandle /* = FALSE */,
- BOOL importParent /* = FALSE */)
+GenTree* Compiler::impTokenToHandle(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ BOOL* pRuntimeLookup /* = NULL */,
+ BOOL mustRestoreHandle /* = FALSE */,
+ BOOL importParent /* = FALSE */)
{
assert(!fgGlobalMorph);
return result;
}
-GenTreePtr Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
- CORINFO_LOOKUP* pLookup,
- unsigned handleFlags,
- void* compileTimeHandle)
+GenTree* Compiler::impLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_LOOKUP* pLookup,
+ unsigned handleFlags,
+ void* compileTimeHandle)
{
if (!pLookup->lookupKind.needsRuntimeLookup)
{
CORINFO_GENERIC_HANDLE handle = nullptr;
void* pIndirection = nullptr;
- assert(pLookup->constLookup.accessType != IAT_PPVALUE);
+ assert(pLookup->constLookup.accessType != IAT_PPVALUE && pLookup->constLookup.accessType != IAT_RELPVALUE);
if (pLookup->constLookup.accessType == IAT_VALUE)
{
}
#ifdef FEATURE_READYTORUN_COMPILER
-GenTreePtr Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
- unsigned handleFlags,
- void* compileTimeHandle)
+GenTree* Compiler::impReadyToRunLookupToTree(CORINFO_CONST_LOOKUP* pLookup,
+ unsigned handleFlags,
+ void* compileTimeHandle)
{
CORINFO_GENERIC_HANDLE handle = nullptr;
void* pIndirection = nullptr;
- assert(pLookup->accessType != IAT_PPVALUE);
+ assert(pLookup->accessType != IAT_PPVALUE && pLookup->accessType != IAT_RELPVALUE);
if (pLookup->accessType == IAT_VALUE)
{
}
#endif
-GenTreePtr Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
+GenTree* Compiler::impMethodPointer(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
{
- GenTreePtr op1 = nullptr;
+ GenTree* op1 = nullptr;
switch (pCallInfo->kind)
{
}
else
{
- op1->gtFptrVal.gtEntryPoint.addr = nullptr;
+ op1->gtFptrVal.gtEntryPoint.addr = nullptr;
+ op1->gtFptrVal.gtEntryPoint.accessType = IAT_VALUE;
}
#endif
break;
// Notes:
// Reports about generic context using.
-GenTreePtr Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
+GenTree* Compiler::getRuntimeContextTree(CORINFO_RUNTIME_LOOKUP_KIND kind)
{
- GenTreePtr ctxTree = nullptr;
+ GenTree* ctxTree = nullptr;
// Collectible types requires that for shared generic code, if we use the generic context parameter
// that we report it. (This is a conservative approach, we could detect some cases particularly when the
to lookup the handle.
*/
-GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
- CORINFO_LOOKUP* pLookup,
- void* compileTimeHandle)
+GenTree* Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_LOOKUP* pLookup,
+ void* compileTimeHandle)
{
// This method can only be called from the importer instance of the Compiler.
// In other word, it cannot be called by the instance of the Compiler for the inlinee.
assert(!compIsForInlining());
- GenTreePtr ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
+ GenTree* ctxTree = getRuntimeContextTree(pLookup->lookupKind.runtimeLookupKind);
CORINFO_RUNTIME_LOOKUP* pRuntimeLookup = &pLookup->runtimeLookup;
// It's available only via the run-time helper function
}
// Slot pointer
- GenTreePtr slotPtrTree = ctxTree;
+ GenTree* slotPtrTree = ctxTree;
if (pRuntimeLookup->testForNull)
{
nullptr DEBUGARG("impRuntimeLookup slot"));
}
- GenTreePtr indOffTree = nullptr;
+ GenTree* indOffTree = nullptr;
// Applied repeated indirections
for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark1"));
// Extract the handle
- GenTreePtr handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
+ GenTree* handle = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
handle->gtFlags |= GTF_IND_NONFAULTING;
- GenTreePtr handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
- nullptr DEBUGARG("impRuntimeLookup typehandle"));
+ GenTree* handleCopy = impCloneExpr(handle, &handle, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
+ nullptr DEBUGARG("impRuntimeLookup typehandle"));
// Call to helper
GenTree* argNode = gtNewIconEmbHndNode(pRuntimeLookup->signature, nullptr, GTF_ICON_TOKEN_HDL, compileTimeHandle);
GenTreeArgList* helperArgs = gtNewArgList(ctxTree, argNode);
- GenTreePtr helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
+ GenTree* helperCall = gtNewHelperCallNode(pRuntimeLookup->helper, TYP_I_IMPL, helperArgs);
// Check for null and possibly call helper
- GenTreePtr relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
+ GenTree* relop = gtNewOperNode(GT_NE, TYP_INT, handle, gtNewIconNode(0, TYP_I_IMPL));
relop->gtFlags |= GTF_RELOP_QMARK;
- GenTreePtr colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
- gtNewNothingNode(), // do nothing if nonnull
- helperCall);
+ GenTree* colon = new (this, GT_COLON) GenTreeColon(TYP_I_IMPL,
+ gtNewNothingNode(), // do nothing if nonnull
+ helperCall);
- GenTreePtr qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
+ GenTree* qmark = gtNewQmarkNode(TYP_I_IMPL, relop, colon);
unsigned tmp;
if (handleCopy->IsLocal())
guard.Init(&impNestedStackSpill, bAssertOnRecursion);
#endif
- GenTreePtr tree = verCurrentState.esStack[level].val;
+ GenTree* tree = verCurrentState.esStack[level].val;
/* Allocate a temp if we haven't been asked to use a particular one */
}
// The tree type may be modified by impAssignTempGen, so use the type of the lclVar.
- var_types type = genActualType(lvaTable[tnum].TypeGet());
- GenTreePtr temp = gtNewLclvNode(tnum, type);
+ var_types type = genActualType(lvaTable[tnum].TypeGet());
+ GenTree* temp = gtNewLclvNode(tnum, type);
verCurrentState.esStack[level].val = temp;
return true;
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
- GenTreePtr tree = verCurrentState.esStack[level].val;
+ GenTree* tree = verCurrentState.esStack[level].val;
if (!spillLeaves && tree->OperIsLeaf())
{
for (unsigned i = 0; i < chkLevel; i++)
{
- GenTreePtr tree = verCurrentState.esStack[i].val;
+ GenTree* tree = verCurrentState.esStack[i].val;
- GenTreePtr lclVarTree;
+ GenTree* lclVarTree;
if ((tree->gtFlags & spillFlags) != 0 ||
(spillGlobEffects && // Only consider the following when spillGlobEffects == TRUE
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
- GenTreePtr tree = verCurrentState.esStack[level].val;
+ GenTree* tree = verCurrentState.esStack[level].val;
// Make sure if we have an exception object in the sub tree we spill ourselves.
if (gtHasCatchArg(tree))
{
{
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
- GenTreePtr tree = verCurrentState.esStack[level].val;
+ GenTree* tree = verCurrentState.esStack[level].val;
if (fgWalkTreePre(&tree, impFindValueClasses) == WALK_ABORT)
{
* Callback that checks if a tree node is TYP_STRUCT
*/
-Compiler::fgWalkResult Compiler::impFindValueClasses(GenTreePtr* pTree, fgWalkData* data)
+Compiler::fgWalkResult Compiler::impFindValueClasses(GenTree** pTree, fgWalkData* data)
{
fgWalkResult walkResult = WALK_CONTINUE;
for (unsigned level = 0; level < verCurrentState.esStackDepth; level++)
{
- GenTreePtr tree = verCurrentState.esStack[level].val;
+ GenTree* tree = verCurrentState.esStack[level].val;
/* If the tree may throw an exception, and the block has a handler,
then we need to spill assignments to the local if the local is
if ((hndBlk->bbFlags & (BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET)) ==
(BBF_IMPORTED | BBF_INTERNAL | BBF_DONT_REMOVE | BBF_HAS_LABEL | BBF_JMP_TARGET))
{
- GenTreePtr tree = hndBlk->bbTreeList;
+ GenTree* tree = hndBlk->bbTreeList;
if (tree != nullptr && tree->gtOper == GT_STMT)
{
}
/* Push the exception address value on the stack */
- GenTreePtr arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
+ GenTree* arg = new (this, GT_CATCH_ARG) GenTree(GT_CATCH_ARG, TYP_REF);
/* Mark the node as having a side-effect - i.e. cannot be
* moved around since it is tied to a fixed location (EAX) */
* If the tree has side-effects, it will be spilled to a temp.
*/
-GenTreePtr Compiler::impCloneExpr(GenTreePtr tree,
- GenTreePtr* pClone,
- CORINFO_CLASS_HANDLE structHnd,
- unsigned curLevel,
- GenTreePtr* pAfterStmt DEBUGARG(const char* reason))
+GenTree* Compiler::impCloneExpr(GenTree* tree,
+ GenTree** pClone,
+ CORINFO_CLASS_HANDLE structHnd,
+ unsigned curLevel,
+ GenTree** pAfterStmt DEBUGARG(const char* reason))
{
if (!(tree->gtFlags & GTF_GLOB_EFFECT))
{
- GenTreePtr clone = gtClone(tree, true);
+ GenTree* clone = gtClone(tree, true);
if (clone)
{
{
if (compIsForInlining())
{
- GenTreePtr callStmt = impInlineInfo->iciStmt;
+ GenTree* callStmt = impInlineInfo->iciStmt;
assert(callStmt->gtOper == GT_STMT);
impCurStmtOffs = callStmt->gtStmt.gtStmtILoffsx;
}
*/
/* static */
-void Compiler::impBashVarAddrsToI(GenTreePtr tree1, GenTreePtr tree2)
+void Compiler::impBashVarAddrsToI(GenTree* tree1, GenTree* tree2)
{
if (tree1->IsVarAddr())
{
* We also allow an implicit conversion of a ldnull into a TYP_I_IMPL(0)
*/
-GenTreePtr Compiler::impImplicitIorI4Cast(GenTreePtr tree, var_types dstTyp)
+GenTree* Compiler::impImplicitIorI4Cast(GenTree* tree, var_types dstTyp)
{
var_types currType = genActualType(tree->gtType);
var_types wantedType = genActualType(dstTyp);
else if (varTypeIsI(wantedType) && (currType == TYP_INT))
{
// Note that this allows TYP_INT to be cast to a TYP_I_IMPL when wantedType is a TYP_BYREF or TYP_REF
- tree = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
+ tree = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
}
else if ((wantedType == TYP_INT) && varTypeIsI(currType))
{
// Note that this allows TYP_BYREF or TYP_REF to be cast to a TYP_INT
- tree = gtNewCastNode(TYP_INT, tree, TYP_INT);
+ tree = gtNewCastNode(TYP_INT, tree, false, TYP_INT);
}
#endif // _TARGET_64BIT_
}
* that exist in the IL are turned into explicit casts here.
*/
-GenTreePtr Compiler::impImplicitR4orR8Cast(GenTreePtr tree, var_types dstTyp)
+GenTree* Compiler::impImplicitR4orR8Cast(GenTree* tree, var_types dstTyp)
{
#ifndef LEGACY_BACKEND
if (varTypeIsFloating(tree) && varTypeIsFloating(dstTyp) && (dstTyp != tree->gtType))
{
- tree = gtNewCastNode(dstTyp, tree, dstTyp);
+ tree = gtNewCastNode(dstTyp, tree, false, dstTyp);
}
#endif // !LEGACY_BACKEND
// The function recognizes all kinds of arrays thus enabling a small runtime
// such as CoreRT to skip providing an implementation for InitializeArray.
-GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
+GenTree* Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
{
assert(sig->numArgs == 2);
- GenTreePtr fieldTokenNode = impStackTop(0).val;
- GenTreePtr arrayLocalNode = impStackTop(1).val;
+ GenTree* fieldTokenNode = impStackTop(0).val;
+ GenTree* arrayLocalNode = impStackTop(1).val;
//
// Verify that the field token is known and valid. Note that It's also
// We start by looking at the last statement, making sure it's an assignment, and
// that the target of the assignment is the array passed to InitializeArray.
//
- GenTreePtr arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
+ GenTree* arrayAssignment = impTreeLast->gtStmt.gtStmtExpr;
if ((arrayAssignment->gtOper != GT_ASG) || (arrayAssignment->gtOp.gtOp1->gtOper != GT_LCL_VAR) ||
(arrayLocalNode->gtOper != GT_LCL_VAR) ||
(arrayAssignment->gtOp.gtOp1->gtLclVarCommon.gtLclNum != arrayLocalNode->gtLclVarCommon.gtLclNum))
// Make sure that the object being assigned is a helper call.
//
- GenTreePtr newArrayCall = arrayAssignment->gtOp.gtOp2;
+ GenTree* newArrayCall = arrayAssignment->gtOp.gtOp2;
if ((newArrayCall->gtOper != GT_CALL) || (newArrayCall->gtCall.gtCallType != CT_HELPER))
{
return nullptr;
// the number of elements.
//
- GenTreePtr arrayLengthNode;
+ GenTree* arrayLengthNode;
GenTreeArgList* args = newArrayCall->gtCall.gtCallArgs;
#ifdef FEATURE_READYTORUN_COMPILER
dataOffset = eeGetArrayDataOffset(elementType);
}
- GenTreePtr dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
- GenTreePtr blk = gtNewBlockVal(dst, blkSize);
- GenTreePtr src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_STATIC_HDL, false);
+ GenTree* dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
+ GenTree* blk = gtNewBlockVal(dst, blkSize);
+ GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_STATIC_HDL, false);
return gtNewBlkOpNode(blk, // dst
src, // src
ni = lookupNamedIntrinsic(method);
#ifdef FEATURE_HW_INTRINSICS
-#ifdef _TARGET_XARCH_
- if (ni > NI_HW_INTRINSIC_START && ni < NI_HW_INTRINSIC_END)
- {
- return impX86HWIntrinsic(ni, method, sig, mustExpand);
- }
-#endif // _TARGET_XARCH_
-#ifdef _TARGET_ARM64_
if (ni > NI_HW_INTRINSIC_START && ni < NI_HW_INTRINSIC_END)
{
return impHWIntrinsic(ni, method, sig, mustExpand);
}
-#endif // _TARGET_XARCH_
#endif // FEATURE_HW_INTRINSICS
}
}
assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
#endif
- GenTreePtr retNode = nullptr;
+ GenTree* retNode = nullptr;
// Under debug and minopts, only expand what is required.
if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
switch (intrinsicID)
{
- GenTreePtr op1, op2;
+ GenTree* op1;
+ GenTree* op2;
case CORINFO_INTRINSIC_Sin:
case CORINFO_INTRINSIC_Cbrt:
{
assert(callType != TYP_STRUCT);
assert(sig->numArgs == 3);
- GenTreePtr op3;
+ GenTree* op3;
op3 = impPopStack().val; // comparand
op2 = impPopStack().val; // value
op1 = impPopStack().val; // location
- GenTreePtr node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
+ GenTree* node = new (this, GT_CMPXCHG) GenTreeCmpXchg(genActualType(callType), op1, op2, op3);
node->gtCmpXchg.gtOpLocation->gtFlags |= GTF_DONT_CSE;
retNode = node;
impPopStack();
GenTree* typeHandleOp =
impTokenToHandle(pConstrainedResolvedToken, nullptr, TRUE /* mustRestoreHandle */);
+ if (typeHandleOp == nullptr)
+ {
+ assert(compDonotInline());
+ return nullptr;
+ }
GenTreeArgList* helperArgs = gtNewArgList(typeHandleOp);
GenTree* runtimeType =
gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPE, TYP_REF, helperArgs);
// Remove call to constructor and directly assign the byref passed
// to the call to the first slot of the ByReference struct.
op1 = impPopStack().val;
- GenTreePtr thisptr = newobjThis;
+ GenTree* thisptr = newobjThis;
CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
- GenTreePtr field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0, false);
- GenTreePtr assign = gtNewAssignNode(field, op1);
- GenTreePtr byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
+ GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, thisptr, 0, false);
+ GenTree* assign = gtNewAssignNode(field, op1);
+ GenTree* byReferenceStruct = gtCloneExpr(thisptr->gtGetOp1());
assert(byReferenceStruct != nullptr);
impPushOnStack(byReferenceStruct, typeInfo(TI_STRUCT, clsHnd));
retNode = assign;
{
op1 = impPopStack().val;
CORINFO_FIELD_HANDLE fldHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
- GenTreePtr field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0, false);
+ GenTree* field = gtNewFieldRef(TYP_BYREF, fldHnd, op1, 0, false);
retNode = field;
break;
}
JITDUMP("\nimpIntrinsic: Expanding %sSpan<T>.get_Item, T=%s, sizeof(T)=%u\n", isReadOnly ? "ReadOnly" : "",
info.compCompHnd->getClassName(spanElemHnd), elemSize);
- GenTreePtr index = impPopStack().val;
- GenTreePtr ptrToSpan = impPopStack().val;
- GenTreePtr indexClone = nullptr;
- GenTreePtr ptrToSpanClone = nullptr;
+ GenTree* index = impPopStack().val;
+ GenTree* ptrToSpan = impPopStack().val;
+ GenTree* indexClone = nullptr;
+ GenTree* ptrToSpanClone = nullptr;
#if defined(DEBUG)
if (verbose)
// Bounds check
CORINFO_FIELD_HANDLE lengthHnd = info.compCompHnd->getFieldInClass(clsHnd, 1);
const unsigned lengthOffset = info.compCompHnd->getFieldOffset(lengthHnd);
- GenTreePtr length = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset, false);
- GenTreePtr boundsCheck = new (this, GT_ARR_BOUNDS_CHECK)
+ GenTree* length = gtNewFieldRef(TYP_INT, lengthHnd, ptrToSpan, lengthOffset, false);
+ GenTree* boundsCheck = new (this, GT_ARR_BOUNDS_CHECK)
GenTreeBoundsChk(GT_ARR_BOUNDS_CHECK, TYP_VOID, index, length, SCK_RNGCHK_FAIL);
// Element access
- GenTreePtr indexIntPtr = impImplicitIorI4Cast(indexClone, TYP_I_IMPL);
- GenTreePtr sizeofNode = gtNewIconNode(elemSize);
- GenTreePtr mulNode = gtNewOperNode(GT_MUL, TYP_I_IMPL, indexIntPtr, sizeofNode);
+ GenTree* indexIntPtr = impImplicitIorI4Cast(indexClone, TYP_I_IMPL);
+ GenTree* sizeofNode = gtNewIconNode(elemSize);
+ GenTree* mulNode = gtNewOperNode(GT_MUL, TYP_I_IMPL, indexIntPtr, sizeofNode);
CORINFO_FIELD_HANDLE ptrHnd = info.compCompHnd->getFieldInClass(clsHnd, 0);
const unsigned ptrOffset = info.compCompHnd->getFieldOffset(ptrHnd);
- GenTreePtr data = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset, false);
- GenTreePtr result = gtNewOperNode(GT_ADD, TYP_BYREF, data, mulNode);
+ GenTree* data = gtNewFieldRef(TYP_BYREF, ptrHnd, ptrToSpanClone, ptrOffset, false);
+ GenTree* result = gtNewOperNode(GT_ADD, TYP_BYREF, data, mulNode);
// Prepare result
var_types resultType = JITtype2varType(sig->retType);
CORINFO_GENERICHANDLE_RESULT embedInfo;
info.compCompHnd->expandRawHandleIntrinsic(&resolvedToken, &embedInfo);
- GenTreePtr rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef),
- embedInfo.compileTimeHandle);
+ GenTree* rawHandle = impLookupToTree(&resolvedToken, &embedInfo.lookup, gtTokenToIconFlags(memberRef),
+ embedInfo.compileTimeHandle);
if (rawHandle == nullptr)
{
return nullptr;
unsigned rawHandleSlot = lvaGrabTemp(true DEBUGARG("rawHandle"));
impAssignTempGen(rawHandleSlot, rawHandle, clsHnd, (unsigned)CHECK_SPILL_NONE);
- GenTreePtr lclVar = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL);
- GenTreePtr lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar);
- var_types resultType = JITtype2varType(sig->retType);
- retNode = gtNewOperNode(GT_IND, resultType, lclVarAddr);
+ GenTree* lclVar = gtNewLclvNode(rawHandleSlot, TYP_I_IMPL);
+ GenTree* lclVarAddr = gtNewOperNode(GT_ADDR, TYP_I_IMPL, lclVar);
+ var_types resultType = JITtype2varType(sig->retType);
+ retNode = gtNewOperNode(GT_IND, resultType, lclVarAddr);
break;
}
noway_assert(varTypeIsFloating(op1));
#else // FEATURE_X87_DOUBLES
+ assert(varTypeIsFloating(op1));
if (op1->TypeGet() != callType)
{
- op1 = gtNewCastNode(callType, op1, callType);
+ op1 = gtNewCastNode(callType, op1, false, callType);
}
#endif // FEATURE_X87_DOUBLES
noway_assert(varTypeIsFloating(op1));
#else // FEATURE_X87_DOUBLES
+ assert(varTypeIsFloating(op1));
+ assert(varTypeIsFloating(op2));
if (op2->TypeGet() != callType)
{
- op2 = gtNewCastNode(callType, op2, callType);
+ op2 = gtNewCastNode(callType, op2, false, callType);
}
if (op1->TypeGet() != callType)
{
- op1 = gtNewCastNode(callType, op1, callType);
+ op1 = gtNewCastNode(callType, op1, false, callType);
}
#endif // FEATURE_X87_DOUBLES
/*****************************************************************************/
-GenTreePtr Compiler::impArrayAccessIntrinsic(
+GenTree* Compiler::impArrayAccessIntrinsic(
CORINFO_CLASS_HANDLE clsHnd, CORINFO_SIG_INFO* sig, int memberRef, bool readonlyCall, CorInfoIntrinsics intrinsicID)
{
/* If we are generating SMALL_CODE, we don't want to use intrinsics for
return nullptr;
}
- GenTreePtr val = nullptr;
+ GenTree* val = nullptr;
if (intrinsicID == CORINFO_INTRINSIC_Array_Set)
{
noway_assert((unsigned char)GT_ARR_MAX_RANK == GT_ARR_MAX_RANK);
- GenTreePtr inds[GT_ARR_MAX_RANK];
+ GenTree* inds[GT_ARR_MAX_RANK];
for (unsigned k = rank; k > 0; k--)
{
inds[k - 1] = impPopStack().val;
}
- GenTreePtr arr = impPopStack().val;
+ GenTree* arr = impPopStack().val;
assert(arr->gtType == TYP_REF);
- GenTreePtr arrElem =
+ GenTree* arrElem =
new (this, GT_ARR_ELEM) GenTreeArrElem(TYP_BYREF, arr, static_cast<unsigned char>(rank),
static_cast<unsigned char>(arrayElemSize), elemType, &inds[0]);
}
assert(verCurrentState.esStackDepth == 0);
- GenTreePtr op1 =
+ GenTree* op1 =
gtNewHelperCallNode(CORINFO_HELP_VERIFICATION, TYP_VOID, gtNewArgList(gtNewIconNode(block->bbCodeOffs)));
// verCurrentState.esStackDepth = 0;
impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
return ((target == context) || (target == info.compCompHnd->getParentType(context)));
}
-GenTreePtr Compiler::impImportLdvirtftn(GenTreePtr thisPtr,
- CORINFO_RESOLVED_TOKEN* pResolvedToken,
- CORINFO_CALL_INFO* pCallInfo)
+GenTree* Compiler::impImportLdvirtftn(GenTree* thisPtr,
+ CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_CALL_INFO* pCallInfo)
{
if ((pCallInfo->methodFlags & CORINFO_FLG_EnC) && !(pCallInfo->classFlags & CORINFO_FLG_INTERFACE))
{
// CoreRT generic virtual method
if ((pCallInfo->sig.sigInst.methInstCount != 0) && IsTargetAbi(CORINFO_CORERT_ABI))
{
- GenTreePtr runtimeMethodHandle = nullptr;
+ GenTree* runtimeMethodHandle = nullptr;
if (pCallInfo->exactContextNeedsRuntimeLookup)
{
runtimeMethodHandle =
// We need a runtime lookup. CoreRT has a ReadyToRun helper for that too.
if (IsTargetAbi(CORINFO_CORERT_ABI))
{
- GenTreePtr ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
+ GenTree* ctxTree = getRuntimeContextTree(pCallInfo->codePointerLookup.lookupKind.runtimeLookupKind);
return impReadyToRunHelperToTree(pResolvedToken, CORINFO_HELP_READYTORUN_GENERIC_HANDLE, TYP_I_IMPL,
gtNewArgList(ctxTree), &pCallInfo->codePointerLookup.lookupKind);
#endif
// Get the exact descriptor for the static callsite
- GenTreePtr exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
+ GenTree* exactTypeDesc = impParentClassTokenToHandle(pResolvedToken);
if (exactTypeDesc == nullptr)
{ // compDonotInline()
return nullptr;
}
- GenTreePtr exactMethodDesc = impTokenToHandle(pResolvedToken);
+ GenTree* exactMethodDesc = impTokenToHandle(pResolvedToken);
if (exactMethodDesc == nullptr)
{ // compDonotInline()
return nullptr;
impSpillSpecialSideEff();
// Get get the expression to box from the stack.
- GenTreePtr op1 = nullptr;
- GenTreePtr op2 = nullptr;
+ GenTree* op1 = nullptr;
+ GenTree* op2 = nullptr;
StackEntry se = impPopStack();
CORINFO_CLASS_HANDLE operCls = se.seTypeInfo.GetClassHandle();
- GenTreePtr exprToBox = se.val;
+ GenTree* exprToBox = se.val;
// Look at what helper we should use.
CorInfoHelpFunc boxHelper = info.compCompHnd->getBoxHelper(pResolvedToken->hClass);
compCurBB->bbFlags |= BBF_HAS_NEWOBJ;
optMethodFlags |= OMF_HAS_NEWOBJ;
- GenTreePtr asg = gtNewTempAssign(impBoxTemp, op1);
+ GenTree* asg = gtNewTempAssign(impBoxTemp, op1);
- GenTreePtr asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
+ GenTree* asgStmt = impAppendTree(asg, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
op2 = gtNewIconNode(TARGET_POINTER_SIZE, TYP_I_IMPL);
{
assert((varTypeIsFloating(srcTyp) && varTypeIsFloating(dstTyp)) ||
(varTypeIsIntegral(srcTyp) && varTypeIsIntegral(dstTyp)));
- exprToBox = gtNewCastNode(dstTyp, exprToBox, dstTyp);
+ exprToBox = gtNewCastNode(dstTyp, exprToBox, false, dstTyp);
}
op1 = gtNewAssignNode(gtNewOperNode(GT_IND, lclTyp, op1), exprToBox);
}
impSpillSideEffects(true, (unsigned)CHECK_SPILL_ALL DEBUGARG("impImportAndPushBox"));
// Set up this copy as a second assignment.
- GenTreePtr copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
+ GenTree* copyStmt = impAppendTree(op1, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
op1 = gtNewLclvNode(impBoxTemp, TYP_REF);
void Compiler::impImportNewObjArray(CORINFO_RESOLVED_TOKEN* pResolvedToken, CORINFO_CALL_INFO* pCallInfo)
{
- GenTreePtr classHandle = impParentClassTokenToHandle(pResolvedToken);
+ GenTree* classHandle = impParentClassTokenToHandle(pResolvedToken);
if (classHandle == nullptr)
{ // compDonotInline()
return;
assert(pCallInfo->sig.numArgs);
- GenTreePtr node;
+ GenTree* node;
GenTreeArgList* args;
//
// into lvaNewObjArrayArgs temp.
for (int i = pCallInfo->sig.numArgs - 1; i >= 0; i--)
{
- GenTreePtr arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
+ GenTree* arg = impImplicitIorI4Cast(impPopStack().val, TYP_INT);
- GenTreePtr dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
- dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
- dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
+ GenTree* dest = gtNewLclvNode(lvaNewObjArrayArgs, TYP_BLK);
+ dest = gtNewOperNode(GT_ADDR, TYP_I_IMPL, dest);
+ dest = gtNewOperNode(GT_ADD, TYP_I_IMPL, dest,
new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, sizeof(INT32) * i));
dest = gtNewOperNode(GT_IND, TYP_INT, dest);
impPushOnStack(node, typeInfo(TI_REF, pResolvedToken->hClass));
}
-GenTreePtr Compiler::impTransformThis(GenTreePtr thisPtr,
- CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
- CORINFO_THIS_TRANSFORM transform)
+GenTree* Compiler::impTransformThis(GenTree* thisPtr,
+ CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
+ CORINFO_THIS_TRANSFORM transform)
{
switch (transform)
{
case CORINFO_DEREF_THIS:
{
- GenTreePtr obj = thisPtr;
+ GenTree* obj = thisPtr;
// This does a LDIND on the obj, which should be a byref. pointing to a ref
impBashVarAddrsToI(obj);
// method from System.Object or System.ValueType. The EE does not provide us with
// "unboxed" versions of these methods.
- GenTreePtr obj = thisPtr;
+ GenTree* obj = thisPtr;
assert(obj->TypeGet() == TYP_BYREF || obj->TypeGet() == TYP_I_IMPL);
obj = gtNewObjNode(pConstrainedResolvedToken->hClass, obj);
/* Get the function pointer */
- GenTreePtr fptr = impPopStack().val;
+ GenTree* fptr = impPopStack().val;
// The function pointer is typically a sized to match the target pointer size
// However, stubgen IL optimization can change LDC.I8 to LDC.I4
/*****************************************************************************/
-void Compiler::impPopArgsForUnmanagedCall(GenTreePtr call, CORINFO_SIG_INFO* sig)
+void Compiler::impPopArgsForUnmanagedCall(GenTree* call, CORINFO_SIG_INFO* sig)
{
assert(call->gtFlags & GTF_CALL_UNMANAGED);
/* The argument list is now "clean" - no out-of-order side effects
* Pop the argument list in reverse order */
- GenTreePtr args = call->gtCall.gtCallArgs = impPopRevList(sig->numArgs, sig, sig->numArgs - argsToReverse);
+ GenTree* args = call->gtCall.gtCallArgs = impPopRevList(sig->numArgs, sig, sig->numArgs - argsToReverse);
if (call->gtCall.gtCallMoreFlags & GTF_CALL_M_UNMGD_THISCALL)
{
- GenTreePtr thisPtr = args->Current();
+ GenTree* thisPtr = args->Current();
impBashVarAddrsToI(thisPtr);
assert(thisPtr->TypeGet() == TYP_I_IMPL || thisPtr->TypeGet() == TYP_BYREF);
}
// initializtion. Otherwise, nullptr.
//
-GenTreePtr Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
+GenTree* Compiler::impInitClass(CORINFO_RESOLVED_TOKEN* pResolvedToken)
{
CorInfoInitClassResult initClassResult =
info.compCompHnd->initClass(pResolvedToken->hField, info.compMethodHnd, impTokenLookupContextHandle);
}
BOOL runtimeLookup;
- GenTreePtr node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
+ GenTree* node = impParentClassTokenToHandle(pResolvedToken, &runtimeLookup);
if (node == nullptr)
{
return node;
}
-GenTreePtr Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
+GenTree* Compiler::impImportStaticReadOnlyField(void* fldAddr, var_types lclTyp)
{
- GenTreePtr op1 = nullptr;
+ GenTree* op1 = nullptr;
switch (lclTyp)
{
return op1;
}
-GenTreePtr Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
- CORINFO_ACCESS_FLAGS access,
- CORINFO_FIELD_INFO* pFieldInfo,
- var_types lclTyp)
+GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ CORINFO_ACCESS_FLAGS access,
+ CORINFO_FIELD_INFO* pFieldInfo,
+ var_types lclTyp)
{
- GenTreePtr op1;
+ GenTree* op1;
switch (pFieldInfo->fieldAccessor)
{
CORINFO_LOOKUP_KIND kind = info.compCompHnd->getLocationOfThisType(info.compMethodHnd);
assert(kind.needsRuntimeLookup);
- GenTreePtr ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
+ GenTree* ctxTree = getRuntimeContextTree(kind.runtimeLookupKind);
GenTreeArgList* args = gtNewArgList(ctxTree);
unsigned callFlags = 0;
for (unsigned i = helperInfo->numArgs; i > 0; --i)
{
const CORINFO_HELPER_ARG& helperArg = helperInfo->args[i - 1];
- GenTreePtr currentArg = nullptr;
+ GenTree* currentArg = nullptr;
switch (helperArg.argType)
{
case CORINFO_HELPER_ARG_TYPE_Field:
* Mark as CSE'able, and hoistable. Consider marking hoistable unless you're in the inlinee.
* Also, consider sticking this in the first basic block.
*/
- GenTreePtr callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args);
+ GenTree* callout = gtNewHelperCallNode(helperInfo->helperNum, TYP_VOID, args);
impAppendTree(callout, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
}
var_types Compiler::impImportCall(OPCODE opcode,
CORINFO_RESOLVED_TOKEN* pResolvedToken,
CORINFO_RESOLVED_TOKEN* pConstrainedResolvedToken,
- GenTreePtr newobjThis,
+ GenTree* newobjThis,
int prefixFlags,
CORINFO_CALL_INFO* callInfo,
IL_OFFSET rawILOffset)
unsigned clsFlags = 0;
unsigned mflags = 0;
unsigned argFlags = 0;
- GenTreePtr call = nullptr;
+ GenTree* call = nullptr;
GenTreeArgList* args = nullptr;
CORINFO_THIS_TRANSFORM constraintCallThisTransform = CORINFO_NO_THIS_TRANSFORM;
CORINFO_CONTEXT_HANDLE exactContextHnd = nullptr;
}
else
{
- call->gtIntrinsic.gtEntryPoint.addr = nullptr;
+ call->gtIntrinsic.gtEntryPoint.addr = nullptr;
+ call->gtIntrinsic.gtEntryPoint.accessType = IAT_VALUE;
}
}
#endif
return TYP_UNDEF;
}
- GenTreePtr stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
+ GenTree* stubAddr = impRuntimeLookupToTree(pResolvedToken, &callInfo->stubLookup, methHnd);
assert(!compDonotInline());
// This is the rough code to set up an indirect stub call
call = gtNewCallNode(CT_USER_FUNC, callInfo->hMethod, callRetTyp, nullptr, ilOffset);
call->gtCall.gtStubCallStubAddr = callInfo->stubLookup.constLookup.addr;
call->gtFlags |= GTF_CALL_VIRT_STUB;
- assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE);
+ assert(callInfo->stubLookup.constLookup.accessType != IAT_PPVALUE &&
+ callInfo->stubLookup.constLookup.accessType != IAT_RELPVALUE);
if (callInfo->stubLookup.constLookup.accessType == IAT_PVALUE)
{
call->gtCall.gtCallMoreFlags |= GTF_CALL_M_VIRTSTUB_REL_INDIRECT;
args = impPopList(sig->numArgs, sig);
- GenTreePtr thisPtr = impPopStack().val;
- thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
+ GenTree* thisPtr = impPopStack().val;
+ thisPtr = impTransformThis(thisPtr, pConstrainedResolvedToken, callInfo->thisTransform);
assert(thisPtr != nullptr);
// Clone the (possibly transformed) "this" pointer
- GenTreePtr thisPtrCopy;
+ GenTree* thisPtrCopy;
thisPtr = impCloneExpr(thisPtr, &thisPtrCopy, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("LDVIRTFTN this pointer"));
- GenTreePtr fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
+ GenTree* fptr = impImportLdvirtftn(thisPtr, pResolvedToken, callInfo);
assert(fptr != nullptr);
thisPtr = nullptr; // can't reuse it
assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_VARARG);
assert((sig->callConv & CORINFO_CALLCONV_MASK) != CORINFO_CALLCONV_NATIVEVARARG);
- GenTreePtr fptr =
+ GenTree* fptr =
impLookupToTree(pResolvedToken, &callInfo->codePointerLookup, GTF_ICON_FTN_ADDR, callInfo->hMethod);
if (compDonotInline())
return TYP_UNDEF;
}
- GenTreePtr cookie = eeGetPInvokeCookie(sig);
+ GenTree* cookie = eeGetPInvokeCookie(sig);
// This cookie is required to be either a simple GT_CNS_INT or
// an indirection of a GT_CNS_INT
//
- GenTreePtr cookieConst = cookie;
+ GenTree* cookieConst = cookie;
if (cookie->gtOper == GT_IND)
{
cookieConst = cookie->gtOp.gtOp1;
varCookie = info.compCompHnd->getVarArgsHandle(sig, &pVarCookie);
assert((!varCookie) != (!pVarCookie));
- GenTreePtr cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig);
+ GenTree* cookie = gtNewIconEmbHndNode(varCookie, pVarCookie, GTF_ICON_VARG_HDL, sig);
assert(extraArg == nullptr);
extraArg = gtNewArgList(cookie);
assert(opcode != CEE_CALLI);
- GenTreePtr instParam;
- BOOL runtimeLookup;
+ GenTree* instParam;
+ BOOL runtimeLookup;
// Instantiated generic method
if (((SIZE_T)exactContextHnd & CORINFO_CONTEXTFLAGS_MASK) == CORINFO_CONTEXTFLAGS_METHOD)
if (!(mflags & CORINFO_FLG_STATIC) && !((opcode == CEE_NEWOBJ) && (newobjThis == nullptr)))
{
- GenTreePtr obj;
+ GenTree* obj;
if (opcode == CEE_NEWOBJ)
{
// True virtual or indirect calls, shouldn't pass in a callee handle.
CORINFO_METHOD_HANDLE exactCalleeHnd =
((call->gtCall.gtCallType != CT_USER_FUNC) || call->gtCall.IsVirtual()) ? nullptr : methHnd;
- GenTreePtr thisArg = call->gtCall.gtCallObjp;
+ GenTree* thisArg = call->gtCall.gtCallObjp;
if (info.compCompHnd->canTailCall(info.compMethodHnd, methHnd, exactCalleeHnd, explicitTailCall))
{
if (compIsForInlining() && opcode == CEE_CALLVIRT)
{
- GenTreePtr callObj = call->gtCall.gtCallObjp;
+ GenTree* callObj = call->gtCall.gtCallObjp;
assert(callObj != nullptr);
if ((call->gtCall.IsVirtual() || (call->gtFlags & GTF_CALL_NULLCHECK)) &&
if (checkForSmallType && varTypeIsIntegral(callRetTyp) && genTypeSize(callRetTyp) < genTypeSize(TYP_INT))
{
- call = gtNewCastNode(genActualType(callRetTyp), call, callRetTyp);
+ call = gtNewCastNode(genActualType(callRetTyp), call, false, callRetTyp);
}
}
tlAndN.m_num = 0;
StackEntry se = impPopStack();
assert(se.seTypeInfo.GetType() == TI_INT);
- GenTreePtr val = se.val;
+ GenTree* val = se.val;
assert(val->IsCnsIntOrI());
tlAndN.m_tl = (TestLabel)val->AsIntConCommon()->IconValue();
}
{
StackEntry se = impPopStack();
assert(se.seTypeInfo.GetType() == TI_INT);
- GenTreePtr val = se.val;
+ GenTree* val = se.val;
assert(val->IsCnsIntOrI());
tlAndN.m_num = val->AsIntConCommon()->IconValue();
se = impPopStack();
}
StackEntry expSe = impPopStack();
- GenTreePtr node = expSe.val;
+ GenTree* node = expSe.val;
// There are a small number of special cases, where we actually put the annotation on a subnode.
if (tlAndN.m_tl == TL_LoopHoist && tlAndN.m_num >= 100)
// a GT_IND of a static field address, which should be the sum of a (hoistable) helper call and possibly some
// offset within the the static field block whose address is returned by the helper call.
// The annotation is saying that this address calculation, but not the entire access, should be hoisted.
- GenTreePtr helperCall = nullptr;
+ GenTree* helperCall = nullptr;
assert(node->OperGet() == GT_IND);
tlAndN.m_num -= 100;
GetNodeTestData()->Set(node->gtOp.gtOp1, tlAndN);
// Return Value:
// Returns new GenTree node after fixing struct return of call node
//
-GenTreePtr Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd)
+GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HANDLE retClsHnd)
{
if (!varTypeIsStruct(call))
{
Note that this method is only call for !_TARGET_X86_
*/
-GenTreePtr Compiler::impFixupStructReturnType(GenTreePtr op, CORINFO_CLASS_HANDLE retClsHnd)
+GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE retClsHnd)
{
assert(varTypeIsStruct(info.compRetType));
assert(info.compRetBuffArg == BAD_VAR_NUM);
}
else if (op->gtOper == GT_OBJ)
{
- GenTreePtr op1 = op->AsObj()->Addr();
+ GenTree* op1 = op->AsObj()->Addr();
// We will fold away OBJ/ADDR
// except for OBJ/ADDR/INDEX
BasicBlock* step = DUMMY_INIT(NULL);
unsigned encFinallies = 0; // Number of enclosing finallies.
- GenTreePtr endCatches = NULL;
- GenTreePtr endLFin = NULL; // The statement tree to indicate the end of locally-invoked finally.
+ GenTree* endCatches = NULL;
+ GenTree* endLFin = NULL; // The statement tree to indicate the end of locally-invoked finally.
unsigned XTnum;
EHblkDsc* HBtab;
BADCODE("leave out of fault/finally block");
// Create the call to CORINFO_HELP_ENDCATCH
- GenTreePtr endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
+ GenTree* endCatch = gtNewHelperCallNode(CORINFO_HELP_ENDCATCH, TYP_VOID);
// Make a list of all the currently pending endCatches
if (endCatches)
}
#endif
- GenTreePtr lastStmt;
+ GenTree* lastStmt;
if (endCatches)
{
}
#endif
- GenTreePtr lastStmt;
+ GenTree* lastStmt;
if (endCatches)
{
* Determine the result type of an arithemetic operation
* On 64-bit inserts upcasts when native int is mixed with int32
*/
-var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTreePtr* pOp1, GenTreePtr* pOp2)
+var_types Compiler::impGetByRefResultType(genTreeOps oper, bool fUnsigned, GenTree** pOp1, GenTree** pOp2)
{
- var_types type = TYP_UNDEF;
- GenTreePtr op1 = *pOp1, op2 = *pOp2;
+ var_types type = TYP_UNDEF;
+ GenTree* op1 = *pOp1;
+ GenTree* op2 = *pOp2;
// Arithemetic operations are generally only allowed with
// primitive types, but certain operations are allowed
if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
- op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
+ op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // _TARGET_64BIT_
if ((genActualType(op2->TypeGet()) != TYP_I_IMPL))
{
// insert an explicit upcast
- op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
+ op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // _TARGET_64BIT_
if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
- op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
+ op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
}
else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
- op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
+ op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // _TARGET_64BIT_
if (genActualType(op1->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
- op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
+ op1 = *pOp1 = gtNewCastNode(TYP_I_IMPL, op1, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
else if (genActualType(op2->TypeGet()) != TYP_I_IMPL)
{
// insert an explicit upcast
- op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(fUnsigned ? TYP_U_IMPL : TYP_I_IMPL));
+ op2 = *pOp2 = gtNewCastNode(TYP_I_IMPL, op2, fUnsigned, fUnsigned ? TYP_U_IMPL : TYP_I_IMPL);
}
type = TYP_I_IMPL;
// Notes:
// May expand into a series of runtime checks or a helper call.
-GenTreePtr Compiler::impCastClassOrIsInstToTree(GenTreePtr op1,
- GenTreePtr op2,
- CORINFO_RESOLVED_TOKEN* pResolvedToken,
- bool isCastClass)
+GenTree* Compiler::impCastClassOrIsInstToTree(GenTree* op1,
+ GenTree* op2,
+ CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ bool isCastClass)
{
assert(op1->TypeGet() == TYP_REF);
impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("bubbling QMark2"));
- GenTreePtr temp;
- GenTreePtr condMT;
+ GenTree* temp;
+ GenTree* condMT;
//
// expand the methodtable match:
//
// thus we can use gtClone(op1) from now on
//
- GenTreePtr op2Var = op2;
+ GenTree* op2Var = op2;
if (isCastClass)
{
op2Var = fgInsertCommaFormTemp(&op2);
temp->gtFlags |= GTF_EXCEPT;
condMT = gtNewOperNode(GT_NE, TYP_INT, temp, op2);
- GenTreePtr condNull;
+ GenTree* condNull;
//
// expand the null check:
//
//
// expand the true and false trees for the condMT
//
- GenTreePtr condFalse = gtClone(op1);
- GenTreePtr condTrue;
+ GenTree* condFalse = gtClone(op1);
+ GenTree* condTrue;
if (isCastClass)
{
//
#define USE_QMARK_TREES
#ifdef USE_QMARK_TREES
- GenTreePtr qmarkMT;
+ GenTree* qmarkMT;
//
// Generate first QMARK - COLON tree
//
qmarkMT = gtNewQmarkNode(TYP_REF, condMT, temp);
condMT->gtFlags |= GTF_RELOP_QMARK;
- GenTreePtr qmarkNull;
+ GenTree* qmarkNull;
//
// Generate second QMARK - COLON tree
//
unsigned nxtStmtIndex = impInitBlockLineInfo();
IL_OFFSET nxtStmtOffs;
- GenTreePtr arrayNodeFrom, arrayNodeTo, arrayNodeToIndex;
+ GenTree* arrayNodeFrom;
+ GenTree* arrayNodeTo;
+ GenTree* arrayNodeToIndex;
CorInfoHelpFunc helper;
CorInfoIsAccessAllowedResult accessAllowedResult;
CORINFO_HELPER_DESC calloutHelper;
if (impCurStmtOffs != BAD_IL_OFFSET && opts.compDbgCode)
{
- GenTreePtr placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
+ GenTree* placeHolder = new (this, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
impAppendTree(placeHolder, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
assert(impCurStmtOffs == BAD_IL_OFFSET);
CORINFO_CLASS_HANDLE stelemClsHnd = DUMMY_INIT(NULL);
var_types lclTyp, ovflType = TYP_UNKNOWN;
- GenTreePtr op1 = DUMMY_INIT(NULL);
- GenTreePtr op2 = DUMMY_INIT(NULL);
+ GenTree* op1 = DUMMY_INIT(NULL);
+ GenTree* op2 = DUMMY_INIT(NULL);
GenTreeArgList* args = nullptr; // What good do these "DUMMY_INIT"s do?
- GenTreePtr newObjThisPtr = DUMMY_INIT(NULL);
+ GenTree* newObjThisPtr = DUMMY_INIT(NULL);
bool uns = DUMMY_INIT(false);
bool isLocal = false;
unsigned lclNum;
var_types type;
- GenTreePtr op3;
+ GenTree* op3;
genTreeOps oper;
unsigned size;
cval.dblVal = getR4LittleEndian(codeAddr);
JITDUMP(" %#.17g", cval.dblVal);
{
- GenTreePtr cnsOp = gtNewDconNode(cval.dblVal);
+ GenTree* cnsOp = gtNewDconNode(cval.dblVal);
#if !FEATURE_X87_DOUBLES
// X87 stack doesn't differentiate between float/double
// so R4 is treated as R8, but everybody else does
if (varTypeIsI(op1->TypeGet()) && (genActualType(lclTyp) == TYP_INT))
{
assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
- op1 = gtNewCastNode(TYP_INT, op1, TYP_INT);
+ op1 = gtNewCastNode(TYP_INT, op1, false, TYP_INT);
}
#endif // _TARGET_64BIT_
if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
varTypeIsFloating(op2->gtType))
{
- op1 = gtNewCastNode(op2->TypeGet(), op1, op2->TypeGet());
+ op1 = gtNewCastNode(op2->TypeGet(), op1, false, op2->TypeGet());
}
#endif // !FEATURE_X87_DOUBLES
if (op1->TypeGet() != type)
{
// We insert a cast of op1 to 'type'
- op1 = gtNewCastNode(type, op1, type);
+ op1 = gtNewCastNode(type, op1, false, type);
}
if (op2->TypeGet() != type)
{
// We insert a cast of op2 to 'type'
- op2 = gtNewCastNode(type, op2, type);
+ op2 = gtNewCastNode(type, op2, false, type);
}
}
#endif // !FEATURE_X87_DOUBLES
#ifdef _TARGET_64BIT_
if (varTypeIsI(op1->TypeGet()) && (genActualType(op2->TypeGet()) == TYP_INT))
{
- op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
+ op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
}
else if (varTypeIsI(op2->TypeGet()) && (genActualType(op1->TypeGet()) == TYP_INT))
{
- op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
+ op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // _TARGET_64BIT_
op1->gtFlags |= GTF_RELOP_NAN_UN | GTF_UNSIGNED;
}
+ // Fold result, if possible.
+ op1 = gtFoldExpr(op1);
+
impPushOnStack(op1, tiRetVal);
break;
#ifdef _TARGET_64BIT_
if ((op1->TypeGet() == TYP_I_IMPL) && (genActualType(op2->TypeGet()) == TYP_INT))
{
- op2 = gtNewCastNode(TYP_I_IMPL, op2, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
+ op2 = gtNewCastNode(TYP_I_IMPL, op2, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
}
else if ((op2->TypeGet() == TYP_I_IMPL) && (genActualType(op1->TypeGet()) == TYP_INT))
{
- op1 = gtNewCastNode(TYP_I_IMPL, op1, (var_types)(uns ? TYP_U_IMPL : TYP_I_IMPL));
+ op1 = gtNewCastNode(TYP_I_IMPL, op1, uns, uns ? TYP_U_IMPL : TYP_I_IMPL);
}
#endif // _TARGET_64BIT_
if (op1->TypeGet() == TYP_DOUBLE)
{
// We insert a cast of op2 to TYP_DOUBLE
- op2 = gtNewCastNode(TYP_DOUBLE, op2, TYP_DOUBLE);
+ op2 = gtNewCastNode(TYP_DOUBLE, op2, false, TYP_DOUBLE);
}
else if (op2->TypeGet() == TYP_DOUBLE)
{
// We insert a cast of op1 to TYP_DOUBLE
- op1 = gtNewCastNode(TYP_DOUBLE, op1, TYP_DOUBLE);
+ op1 = gtNewCastNode(TYP_DOUBLE, op1, false, TYP_DOUBLE);
}
}
}
#if SMALL_TREE_NODES
if (callNode)
{
- op1 = gtNewCastNodeL(type, op1, lclTyp);
+ op1 = gtNewCastNodeL(type, op1, uns, lclTyp);
}
else
#endif // SMALL_TREE_NODES
{
- op1 = gtNewCastNode(type, op1, lclTyp);
+ op1 = gtNewCastNode(type, op1, uns, lclTyp);
}
if (ovfl)
{
op1->gtFlags |= (GTF_OVERFLOW | GTF_EXCEPT);
}
- if (uns)
- {
- op1->gtFlags |= GTF_UNSIGNED;
- }
impPushOnStack(op1, tiRetVal);
break;
if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
{
assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
- op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
+ op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
}
// Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
//
if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
{
assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
- op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
+ op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
}
}
#endif // _TARGET_64BIT_
if (genActualType(op1->gtType) == TYP_INT)
{
assert(!tiVerificationNeeded); // We should have thrown the VerificationException before.
- op1 = gtNewCastNode(TYP_I_IMPL, op1, TYP_I_IMPL);
+ op1 = gtNewCastNode(TYP_I_IMPL, op1, false, TYP_I_IMPL);
}
#endif
goto DO_LDFTN;
}
- GenTreePtr fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
+ GenTree* fptr = impImportLdvirtftn(op1, &resolvedToken, &callInfo);
if (compDonotInline())
{
return;
int aflags = isLoadAddress ? CORINFO_ACCESS_ADDRESS : CORINFO_ACCESS_GET;
- GenTreePtr obj = nullptr;
+ GenTree* obj = nullptr;
typeInfo* tiObj = nullptr;
CORINFO_CLASS_HANDLE objType = nullptr; // used for fields
if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
{
- GenTreePtr helperNode = impInitClass(&resolvedToken);
+ GenTree* helperNode = impInitClass(&resolvedToken);
if (compDonotInline())
{
return;
JITDUMP(" %08X", resolvedToken.token);
- int aflags = CORINFO_ACCESS_SET;
- GenTreePtr obj = nullptr;
- typeInfo* tiObj = nullptr;
- typeInfo tiVal;
+ int aflags = CORINFO_ACCESS_SET;
+ GenTree* obj = nullptr;
+ typeInfo* tiObj = nullptr;
+ typeInfo tiVal;
/* Pull the value from the stack */
StackEntry se = impPopStack();
if ((op1->TypeGet() != op2->TypeGet()) && op2->OperIsConst() && varTypeIsIntOrI(op2->TypeGet()) &&
varTypeIsLong(op1->TypeGet()))
{
- op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
+ op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
}
#endif
//
if (varTypeIsI(op2->gtType) && (genActualType(lclTyp) == TYP_INT))
{
- op2 = gtNewCastNode(TYP_INT, op2, TYP_INT);
+ op2 = gtNewCastNode(TYP_INT, op2, false, TYP_INT);
}
// Allow an upcast of op2 from a 32-bit Int into TYP_I_IMPL for x86 JIT compatiblity
//
if (varTypeIsI(lclTyp) && (genActualType(op2->gtType) == TYP_INT))
{
- op2 = gtNewCastNode(TYP_I_IMPL, op2, TYP_I_IMPL);
+ op2 = gtNewCastNode(TYP_I_IMPL, op2, false, TYP_I_IMPL);
}
}
#endif
if ((op1->TypeGet() != op2->TypeGet()) && varTypeIsFloating(op1->gtType) &&
varTypeIsFloating(op2->gtType))
{
- op2 = gtNewCastNode(op1->TypeGet(), op2, op1->TypeGet());
+ op2 = gtNewCastNode(op1->TypeGet(), op2, false, op1->TypeGet());
}
#endif // !FEATURE_X87_DOUBLES
if (fieldInfo.fieldFlags & CORINFO_FLG_FIELD_INITCLASS)
{
- GenTreePtr helperNode = impInitClass(&resolvedToken);
+ GenTree* helperNode = impInitClass(&resolvedToken);
if (compDonotInline())
{
return;
// ((*clone == typeToken) ? nop : helper(clone, typeToken));
// push(clone + TARGET_POINTER_SIZE)
//
- GenTreePtr cloneOperand;
+ GenTree* cloneOperand;
op1 = impCloneExpr(op1, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("inline UNBOX clone1"));
op1 = gtNewOperNode(GT_IND, TYP_I_IMPL, op1);
- GenTreePtr condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
+ GenTree* condBox = gtNewOperNode(GT_EQ, TYP_INT, op1, op2);
op1 = impCloneExpr(cloneOperand, &cloneOperand, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
nullptr DEBUGARG("inline UNBOX clone2"));
* (transfer, not a cast.)
*
*/
-void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTreePtr src, CORINFO_CLASS_HANDLE hClass)
+void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass)
{
if (src->gtOper == GT_CALL && src->gtCall.IsVarargs() && IsHfa(hClass))
{
#endif // _TARGET_ARM_
#if FEATURE_MULTIREG_RET
-GenTreePtr Compiler::impAssignMultiRegTypeToVar(GenTreePtr op, CORINFO_CLASS_HANDLE hClass)
+GenTree* Compiler::impAssignMultiRegTypeToVar(GenTree* op, CORINFO_CLASS_HANDLE hClass)
{
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("Return value temp for multireg return."));
impAssignTempGen(tmpNum, op, hClass, (unsigned)CHECK_SPILL_ALL);
- GenTreePtr ret = gtNewLclvNode(tmpNum, op->gtType);
+ GenTree* ret = gtNewLclvNode(tmpNum, lvaTable[tmpNum].lvType);
// TODO-1stClassStructs: Handle constant propagation and CSE-ing of multireg returns.
ret->gtFlags |= GTF_DONT_CSE;
if (returnType != originalCallType)
{
+ JITDUMP("Return type mismatch, have %s, needed %s\n", varTypeName(returnType),
+ varTypeName(originalCallType));
compInlineResult->NoteFatal(InlineObservation::CALLSITE_RETURN_TYPE_MISMATCH);
return false;
}
fgCastNeeded(op2, fncRealRetType))
{
// Small-typed return values are normalized by the callee
- op2 = gtNewCastNode(TYP_INT, op2, fncRealRetType);
+ op2 = gtNewCastNode(TYP_INT, op2, false, fncRealRetType);
}
}
#endif // defined(_TARGET_ARM64_)
{
assert(iciCall->HasRetBufArg());
- GenTreePtr dest = gtCloneExpr(iciCall->gtCallArgs->gtOp.gtOp1);
+ GenTree* dest = gtCloneExpr(iciCall->gtCallArgs->gtOp.gtOp1);
// spill temp only exists if there are multiple return points
if (fgNeedReturnSpillTemp())
{
else if (info.compRetBuffArg != BAD_VAR_NUM)
{
// Assign value to return buff (first param)
- GenTreePtr retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
+ GenTree* retBuffAddr = gtNewLclvNode(info.compRetBuffArg, TYP_BYREF, impCurStmtOffs);
op2 = impAssignStructPtr(retBuffAddr, op2, retClsHnd, (unsigned)CHECK_SPILL_ALL);
impAppendTree(op2, (unsigned)CHECK_SPILL_NONE, impCurStmtOffs);
// on the stack, its lifetime is hard to determine, simply
// don't reuse such temps.
- GenTreePtr addStmt = nullptr;
+ GenTree* addStmt = nullptr;
/* Do the successors of 'block' have any other predecessors ?
We do not want to do some of the optimizations related to multiRef
JITDUMP("\nSpilling stack entries into temps\n");
for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
{
- GenTreePtr tree = verCurrentState.esStack[level].val;
+ GenTree* tree = verCurrentState.esStack[level].val;
/* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
the other. This should merge to a byref in unverifiable code.
{
// Spill clique has decided this should be "native int", but this block only pushes an "int".
// Insert a sign-extension to "native int" so we match the clique.
- verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
+ verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
}
// Consider the case where one branch left a 'byref' on the stack and the other leaves
{
// Spill clique has decided this should be "byref", but this block only pushes an "int".
// Insert a sign-extension to "native int" so we match the clique size.
- verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, TYP_I_IMPL);
+ verCurrentState.esStack[level].val = gtNewCastNode(TYP_I_IMPL, tree, false, TYP_I_IMPL);
}
}
#endif // _TARGET_64BIT_
{
// Spill clique has decided this should be "double", but this block only pushes a "float".
// Insert a cast to "double" so we match the clique.
- verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, TYP_DOUBLE);
+ verCurrentState.esStack[level].val = gtNewCastNode(TYP_DOUBLE, tree, false, TYP_DOUBLE);
}
#endif // FEATURE_X87_DOUBLES
if (addStmt && !newTemps && gtHasRef(addStmt->gtStmt.gtStmtExpr, tempNum, false))
{
- GenTreePtr addTree = addStmt->gtStmt.gtStmtExpr;
+ GenTree* addTree = addStmt->gtStmt.gtStmtExpr;
if (addTree->gtOper == GT_JTRUE)
{
- GenTreePtr relOp = addTree->gtOp.gtOp1;
+ GenTree* relOp = addTree->gtOp.gtOp1;
assert(relOp->OperIsCompare());
var_types type = genActualType(relOp->gtOp.gtOp1->TypeGet());
EntryState* es = blk->bbEntryState;
for (unsigned level = 0; level < es->esStackDepth; level++)
{
- GenTreePtr tree = es->esStack[level].val;
+ GenTree* tree = es->esStack[level].val;
if ((tree->gtOper == GT_LCL_VAR) || (tree->gtOper == GT_LCL_FLD))
{
unsigned lclNum = tree->gtLclVarCommon.gtLclNum;
memcpy(block->bbEntryState->esStack, srcState->esStack, stackSize);
for (unsigned level = 0; level < srcState->esStackDepth; level++)
{
- GenTreePtr tree = srcState->esStack[level].val;
+ GenTree* tree = srcState->esStack[level].val;
block->bbEntryState->esStack[level].val = gtCloneExpr(tree);
}
}
*/
-BOOL Compiler::impIsAddressInLocal(GenTreePtr tree, GenTreePtr* lclVarTreeOut)
+BOOL Compiler::impIsAddressInLocal(GenTree* tree, GenTree** lclVarTreeOut)
{
if (tree->gtOper != GT_ADDR)
{
return FALSE;
}
- GenTreePtr op = tree->gtOp.gtOp1;
+ GenTree* op = tree->gtOp.gtOp1;
while (op->gtOper == GT_FIELD)
{
op = op->gtField.gtFldObj;
// Check if the callee has the same 'this' as the root.
if (pInlineInfo != nullptr)
{
- GenTreePtr thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
+ GenTree* thisArg = pInlineInfo->iciCall->gtCall.gtCallObjp;
assert(thisArg);
bool isSameThis = impIsThis(thisArg);
inlineResult->NoteBool(InlineObservation::CALLSITE_IS_SAME_THIS, isSameThis);
/*****************************************************************************
*/
-void Compiler::impCheckCanInline(GenTreePtr call,
+void Compiler::impCheckCanInline(GenTree* call,
CORINFO_METHOD_HANDLE fncHandle,
unsigned methAttr,
CORINFO_CONTEXT_HANDLE exactContextHnd,
struct Param
{
Compiler* pThis;
- GenTreePtr call;
+ GenTree* call;
CORINFO_METHOD_HANDLE fncHandle;
unsigned methAttr;
CORINFO_CONTEXT_HANDLE exactContextHnd;
if (dwRestrictions & INLINE_SAME_THIS)
{
- GenTreePtr thisArg = pParam->call->gtCall.gtCallObjp;
+ GenTree* thisArg = pParam->call->gtCall.gtCallObjp;
assert(thisArg);
if (!pParam->pThis->impIsThis(thisArg))
inlCurArgInfo->argNode = curArgVal;
- GenTreePtr lclVarTree;
+ GenTree* lclVarTree;
if (impIsAddressInLocal(curArgVal, &lclVarTree) && varTypeIsStruct(lclVarTree))
{
inlCurArgInfo->argIsByRefToStructLocal = true;
{
assert(!compIsForInlining());
- GenTreePtr call = pInlineInfo->iciCall;
+ GenTree* call = pInlineInfo->iciCall;
CORINFO_METHOD_INFO* methInfo = &pInlineInfo->inlineCandidateInfo->methInfo;
unsigned clsAttr = pInlineInfo->inlineCandidateInfo->clsAttr;
InlArgInfo* inlArgInfo = pInlineInfo->inlArgInfo;
/* Get hold of the 'this' pointer and the argument list proper */
- GenTreePtr thisArg = call->gtCall.gtCallObjp;
- GenTreePtr argList = call->gtCall.gtCallArgs;
- unsigned argCnt = 0; // Count of the arguments
+ GenTree* thisArg = call->gtCall.gtCallObjp;
+ GenTree* argList = call->gtCall.gtCallArgs;
+ unsigned argCnt = 0; // Count of the arguments
assert((methInfo->args.hasThis()) == (thisArg != nullptr));
unsigned typeCtxtArg = methInfo->args.totalILArgs();
#endif // USER_ARGS_COME_LAST
- for (GenTreePtr argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
+ for (GenTree* argTmp = argList; argTmp; argTmp = argTmp->gtOp.gtOp2)
{
if (argTmp == argList && hasRetBuffArg)
{
// the inlining multiplier) for anything in that assembly.
// But we only need to normalize it if it is a TYP_STRUCT
// (which we need to do even if we have already set foundSIMDType).
- if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
+ if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[0].lclVerTypeInfo)))
{
if (sigType == TYP_STRUCT)
{
lclVarInfo[i].lclVerTypeInfo = verParseArgSigToTypeInfo(&methInfo->args, argLst);
#ifdef FEATURE_SIMD
- if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
+ if ((!foundSIMDType || (sigType == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i].lclVerTypeInfo)))
{
// If this is a SIMD class (i.e. in the SIMD assembly), then we will consider that we've
// found a SIMD type, even if this may not be a type we recognize (the assumption is that
/* Does the tree type match the signature type? */
- GenTreePtr inlArgNode = inlArgInfo[i].argNode;
+ GenTree* inlArgNode = inlArgInfo[i].argNode;
if (sigType != inlArgNode->gtType)
{
continue;
}
- inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, sigType);
+ inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(TYP_INT, inlArgNode, false, sigType);
inlArgInfo[i].argIsLclVar = false;
else if (genTypeSize(genActualType(inlArgNode->gtType)) < genTypeSize(sigType))
{
// This should only happen for int -> native int widening
- inlArgNode = inlArgInfo[i].argNode = gtNewCastNode(genActualType(sigType), inlArgNode, sigType);
+ inlArgNode = inlArgInfo[i].argNode =
+ gtNewCastNode(genActualType(sigType), inlArgNode, false, sigType);
inlArgInfo[i].argIsLclVar = false;
localsSig = info.compCompHnd->getArgNext(localsSig);
#ifdef FEATURE_SIMD
- if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
+ if ((!foundSIMDType || (type == TYP_STRUCT)) && isSIMDorHWSIMDClass(&(lclVarInfo[i + argCnt].lclVerTypeInfo)))
{
foundSIMDType = true;
if (featureSIMD && type == TYP_STRUCT)
}
#ifdef FEATURE_SIMD
- if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDClass(call->AsCall()->gtRetClsHnd))
+ if (!foundSIMDType && (call->AsCall()->gtRetClsHnd != nullptr) && isSIMDorHWSIMDClass(call->AsCall()->gtRetClsHnd))
{
foundSIMDType = true;
}
// This method will side effect inlArgInfo. It should only be called
// for actual uses of the argument in the inlinee.
-GenTreePtr Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
+GenTree* Compiler::impInlineFetchArg(unsigned lclNum, InlArgInfo* inlArgInfo, InlLclVarInfo* lclVarInfo)
{
// Cache the relevant arg and lcl info for this argument.
// We will modify argInfo but not lclVarInfo.
const InlLclVarInfo& lclInfo = lclVarInfo[lclNum];
const bool argCanBeModified = argInfo.argHasLdargaOp || argInfo.argHasStargOp;
const var_types lclTyp = lclInfo.lclTypeInfo;
- GenTreePtr op1 = nullptr;
+ GenTree* op1 = nullptr;
if (argInfo.argIsInvariant && !argCanBeModified)
{
op1 = gtCloneExpr(argInfo.argNode);
PREFIX_ASSUME(op1 != nullptr);
argInfo.argTmpNum = BAD_VAR_NUM;
+
+ // We may need to retype to ensure we match the callee's view of the type.
+ // Otherwise callee-pass throughs of arguments can create return type
+ // mismatches that block inlining.
+ //
+ // Note argument type mismatches that prevent inlining should
+ // have been caught in impInlineInitVars.
+ if (op1->TypeGet() != lclTyp)
+ {
+ op1->gtType = genActualType(lclTyp);
+ }
}
else if (argInfo.argIsLclVar && !argCanBeModified && !argInfo.argHasCallerLocalRef)
{
worry about it.
*/
-BOOL Compiler::impInlineIsThis(GenTreePtr tree, InlArgInfo* inlArgInfo)
+BOOL Compiler::impInlineIsThis(GenTree* tree, InlArgInfo* inlArgInfo)
{
assert(compIsForInlining());
return (tree->gtOper == GT_LCL_VAR && tree->gtLclVarCommon.gtLclNum == inlArgInfo[0].argTmpNum);
// is the set of pending trees that have not yet been added to the statement list,
// and which have been removed from verCurrentState.esStack[]
-BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTreePtr additionalTreesToBeEvaluatedBefore,
- GenTreePtr variableBeingDereferenced,
+BOOL Compiler::impInlineIsGuaranteedThisDerefBeforeAnySideEffects(GenTree* additionalTreesToBeEvaluatedBefore,
+ GenTree* variableBeingDereferenced,
InlArgInfo* inlArgInfo)
{
assert(compIsForInlining());
BasicBlock* block = compCurBB;
- GenTreePtr stmt;
- GenTreePtr expr;
+ GenTree* stmt;
+ GenTree* expr;
if (block != fgFirstBB)
{
// method may be marked as "noinline" to short-circuit any
// future assessments of calls to this method.
-void Compiler::impMarkInlineCandidate(GenTreePtr callNode,
+void Compiler::impMarkInlineCandidate(GenTree* callNode,
CORINFO_CONTEXT_HANDLE exactContextHnd,
bool exactContextNeedsRuntimeLookup,
CORINFO_CALL_INFO* callInfo)
case CORINFO_INTRINSIC_Round:
case CORINFO_INTRINSIC_Ceiling:
case CORINFO_INTRINSIC_Floor:
- // TODO-XArch-CQ: Update to work on non-AVX machines: https://github.com/dotnet/coreclr/issues/15908
- return compSupports(InstructionSet_SSE41) && canUseVexEncoding();
+ return compSupports(InstructionSet_SSE41);
default:
return false;
bool Compiler::IsIntrinsicImplementedByUserCall(CorInfoIntrinsics intrinsicId)
{
- // Currently, if an math intrisic is not implemented by target-specific
- // intructions, it will be implemented by a System.Math call. In the
- // future, if we turn to implementing some of them with helper callers,
+ // Currently, if a math intrinsic is not implemented by target-specific
+ // instructions, it will be implemented by a System.Math call. In the
+ // future, if we turn to implementing some of them with helper calls,
// this predicate needs to be revisited.
return !IsTargetIntrinsic(intrinsicId);
}
}
}
-bool Compiler::IsMathIntrinsic(GenTreePtr tree)
+bool Compiler::IsMathIntrinsic(GenTree* tree)
{
return (tree->OperGet() == GT_INTRINSIC) && IsMathIntrinsic(tree->gtIntrinsic.gtIntrinsicId);
}
}
// Fetch method attributes to see if method is marked final.
- const DWORD derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod);
- const bool derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0);
+ DWORD derivedMethodAttribs = info.compCompHnd->getMethodAttribs(derivedMethod);
+ const bool derivedMethodIsFinal = ((derivedMethodAttribs & CORINFO_FLG_FINAL) != 0);
#if defined(DEBUG)
const char* derivedClassName = "?derivedClass";
JITDUMP("Now have direct call to boxed entry point, looking for unboxed entry point\n");
// Note for some shared methods the unboxed entry point requires an extra parameter.
- // We defer optimizing if so.
bool requiresInstMethodTableArg = false;
CORINFO_METHOD_HANDLE unboxedEntryMethod =
info.compCompHnd->getUnboxedEntry(derivedMethod, &requiresInstMethodTableArg);
// the copy, we can undo the copy too.
if (requiresInstMethodTableArg)
{
- // We can likely handle this case by grabbing the argument passed to
- // the newobj in the box. But defer for now.
- JITDUMP("Found unboxed entry point, but it needs method table arg, deferring\n");
+ // Perform a trial box removal and ask for the type handle tree.
+ JITDUMP("Unboxed entry needs method table arg...\n");
+ GenTree* methodTableArg = gtTryRemoveBoxUpstreamEffects(thisObj, BR_DONT_REMOVE_WANT_TYPE_HANDLE);
+
+ if (methodTableArg != nullptr)
+ {
+ // If that worked, turn the box into a copy to a local var
+ JITDUMP("Found suitable method table arg tree [%06u]\n", dspTreeID(methodTableArg));
+ GenTree* localCopyThis = gtTryRemoveBoxUpstreamEffects(thisObj, BR_MAKE_LOCAL_COPY);
+
+ if (localCopyThis != nullptr)
+ {
+ // Pass the local var as this and the type handle as a new arg
+ JITDUMP("Success! invoking unboxed entry point on local copy, and passing method table arg\n");
+ call->gtCallObjp = localCopyThis;
+
+ // Prepend for R2L arg passing or empty L2R passing
+ if ((Target::g_tgtArgOrder == Target::ARG_ORDER_R2L) || (call->gtCallArgs == nullptr))
+ {
+ call->gtCallArgs = gtNewListNode(methodTableArg, call->gtCallArgs);
+ }
+ // Append for non-empty L2R
+ else
+ {
+ GenTreeArgList* beforeArg = call->gtCallArgs;
+ while (beforeArg->Rest() != nullptr)
+ {
+ beforeArg = beforeArg->Rest();
+ }
+
+ beforeArg->Rest() = gtNewListNode(methodTableArg, nullptr);
+ }
+
+ call->gtCallMethHnd = unboxedEntryMethod;
+ derivedMethod = unboxedEntryMethod;
+
+ // Method attributes will differ because unboxed entry point is shared
+ const DWORD unboxedMethodAttribs = info.compCompHnd->getMethodAttribs(unboxedEntryMethod);
+ JITDUMP("Updating method attribs from 0x%08x to 0x%08x\n", derivedMethodAttribs,
+ unboxedMethodAttribs);
+ derivedMethodAttribs = unboxedMethodAttribs;
+ }
+ else
+ {
+ JITDUMP("Sorry, failed to undo the box -- can't convert to local copy\n");
+ }
+ }
+ else
+ {
+ JITDUMP("Sorry, failed to undo the box -- can't find method table arg\n");
+ }
}
else
{
void StoreRetExprResultsInArgs(GenTreeCall* call)
{
- GenTreePtr args = call->gtCallArgs;
+ GenTree* args = call->gtCallArgs;
if (args != nullptr)
{
comp->fgWalkTreePre(&args, SpillRetExprVisitor, this);
}
- GenTreePtr thisArg = call->gtCallObjp;
+ GenTree* thisArg = call->gtCallObjp;
if (thisArg != nullptr)
{
comp->fgWalkTreePre(&thisArg, SpillRetExprVisitor, this);
static Compiler::fgWalkResult SpillRetExprVisitor(GenTree** pTree, Compiler::fgWalkData* fgWalkPre)
{
assert((pTree != nullptr) && (*pTree != nullptr));
- GenTreePtr tree = *pTree;
+ GenTree* tree = *pTree;
if ((tree->gtFlags & GTF_CALL) == 0)
{
// Trees with ret_expr are marked as GTF_CALL.
return Compiler::WALK_CONTINUE;
}
- void StoreRetExprAsLocalVar(GenTreePtr* pRetExpr)
+ void StoreRetExprAsLocalVar(GenTree** pRetExpr)
{
- GenTreePtr retExpr = *pRetExpr;
+ GenTree* retExpr = *pRetExpr;
assert(retExpr->OperGet() == GT_RET_EXPR);
JITDUMP("Store return expression %u as a local var.\n", retExpr->gtTreeID);
unsigned tmp = comp->lvaGrabTemp(true DEBUGARG("spilling ret_expr"));