structHnd = AsCall()->gtRetClsHnd;
break;
+ case GT_RET_EXPR:
+ structHnd = AsRetExpr()->gtInlineCandidate->gtRetClsHnd;
+ break;
+
default:
unreached();
}
{
GenTree* zero;
- switch (type)
+ switch (genActualType(type))
{
case TYP_INT:
case TYP_REF:
// extended to the size of the assignment when an initBlk is transformed
// to an assignment of a primitive type.
// This performs the appropriate extension.
-
+//
void GenTreeIntCon::FixupInitBlkValue(var_types asgType)
{
assert(varTypeIsIntegralOrI(asgType));
}
}
-//
//------------------------------------------------------------------------
-// gtBlockOpInit: Initializes a BlkOp GenTree
+// gtNewBlkOpNode: Creates a GenTree for a block (struct) assignment.
//
// Arguments:
-// result - an assignment node that is to be initialized.
-// dst - the target (destination) we want to either initialize or copy to.
-// src - the init value for InitBlk or the source struct for CpBlk/CpObj.
-// isVolatile - specifies whether this node is a volatile memory operation.
+// dst - The destination node: local var / block node.
+// srcOrFillVall - The value to assign for CopyBlk, the integer "fill" for InitBlk
+// isVolatile - Whether this is a volatile memory operation or not.
//
-// Assumptions:
-// 'result' is an assignment that is newly constructed.
-// If 'dst' is TYP_STRUCT, then it must be a block node or lclVar.
+// Return Value:
+// Returns the newly constructed and initialized block operation.
//
-// Notes:
-// This procedure centralizes all the logic to both enforce proper structure and
-// to properly construct any InitBlk/CpBlk node.
-
-void Compiler::gtBlockOpInit(GenTree* result, GenTree* dst, GenTree* srcOrFillVal, bool isVolatile)
+GenTree* Compiler::gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile)
{
- if (!result->OperIsBlkOp())
+ assert(varTypeIsStruct(dst) && (dst->OperIsBlk() || dst->OperIsLocal() || dst->OperIs(GT_FIELD)));
+
+ bool isCopyBlock = srcOrFillVal->TypeGet() == dst->TypeGet();
+ if (!isCopyBlock) // InitBlk
{
- assert(dst->TypeGet() != TYP_STRUCT);
- return;
+ assert(genActualTypeIsInt(srcOrFillVal));
+ if (!srcOrFillVal->IsIntegralConst(0))
+ {
+ srcOrFillVal = gtNewOperNode(GT_INIT_VAL, TYP_INT, srcOrFillVal);
+ }
}
+ GenTree* result = gtNewAssignNode(dst, srcOrFillVal);
+
/* In the case of CpBlk, we want to avoid generating
* nodes where the source and destination are the same
* because of two reasons, first, is useless, second
* surface if struct promotion is ON (which is the case on x86/arm). But still the
* fundamental issue exists that needs to be addressed.
*/
- if (result->OperIsCopyBlkOp())
+ if (isCopyBlock)
{
GenTree* currSrc = srcOrFillVal;
GenTree* currDst = dst;
if (currSrc->OperGet() == GT_LCL_VAR && currDst->OperGet() == GT_LCL_VAR &&
currSrc->AsLclVarCommon()->GetLclNum() == currDst->AsLclVarCommon()->GetLclNum())
{
- // Make this a NOP
- // TODO-Cleanup: probably doesn't matter, but could do this earlier and avoid creating a GT_ASG
- result->gtBashToNOP();
- return;
+ result->gtBashToNOP(); // Make this a NOP.
+ return result;
}
}
- // Propagate all effect flags from children
- result->gtFlags |= dst->gtFlags & GTF_ALL_EFFECT;
- result->gtFlags |= result->AsOp()->gtOp2->gtFlags & GTF_ALL_EFFECT;
-
- result->gtFlags |= (dst->gtFlags & GTF_EXCEPT) | (srcOrFillVal->gtFlags & GTF_EXCEPT);
-
if (isVolatile)
{
result->gtFlags |= GTF_BLK_VOLATILE;
}
#ifdef FEATURE_SIMD
- if (result->OperIsCopyBlkOp() && varTypeIsSIMD(srcOrFillVal))
+ // If the source is a SIMD/HWI node of SIMD type, then the dst lclvar struct
+ // should be labeled as simd intrinsic related struct. This is done so that
+ // we do not promote the local, thus avoiding conflicting access methods
+ // (fields vs. whole-register).
+ if (varTypeIsSIMD(srcOrFillVal) && srcOrFillVal->OperIsSimdOrHWintrinsic())
{
- // If the source is a GT_SIMD node of SIMD type, then the dst lclvar struct
- // should be labeled as simd intrinsic related struct.
- // This is done so that the morpher can transform any field accesses into
- // intrinsics, thus avoiding conflicting access methods (fields vs. whole-register).
-
- GenTree* src = srcOrFillVal;
- if (src->OperIsIndir() && (src->AsIndir()->Addr()->OperGet() == GT_ADDR))
+ // TODO-Cleanup: similar logic already exists in "gtNewAssignNode",
+ // however, it is not enabled for x86. Fix that and delete this code.
+ if (dst->OperIsBlk() && (dst->AsIndir()->Addr()->OperGet() == GT_ADDR))
{
- src = src->AsIndir()->Addr()->gtGetOp1();
+ dst = dst->AsIndir()->Addr()->gtGetOp1();
}
-#ifdef FEATURE_HW_INTRINSICS
- if ((src->OperGet() == GT_SIMD) || (src->OperGet() == GT_HWINTRINSIC))
-#else
- if (src->OperGet() == GT_SIMD)
-#endif // FEATURE_HW_INTRINSICS
- {
- if (dst->OperIsBlk() && (dst->AsIndir()->Addr()->OperGet() == GT_ADDR))
- {
- dst = dst->AsIndir()->Addr()->gtGetOp1();
- }
-
- if (dst->OperIsLocal() && varTypeIsStruct(dst))
- {
- setLclRelatedToSIMDIntrinsic(dst);
- }
- }
- }
-#endif // FEATURE_SIMD
-}
-
-//------------------------------------------------------------------------
-// gtNewBlkOpNode: Creates a GenTree for a block (struct) assignment.
-//
-// Arguments:
-// dst - The destination node: local var / block node.
-// srcOrFillVall - The value to assign for CopyBlk, the integer "fill" for InitBlk
-// isVolatile - Whether this is a volatile memory operation or not.
-// isCopyBlock - True if this is a block copy (rather than a block init).
-//
-// Return Value:
-// Returns the newly constructed and initialized block operation.
-//
-GenTree* Compiler::gtNewBlkOpNode(GenTree* dst, GenTree* srcOrFillVal, bool isVolatile, bool isCopyBlock)
-{
- assert(dst->OperIsBlk() || dst->OperIsLocal());
- if (!isCopyBlock)
- {
- // InitBlk
- assert(varTypeIsIntegral(srcOrFillVal));
- if (varTypeIsStruct(dst))
+ if (dst->OperIsLocal() && varTypeIsStruct(dst))
{
- if (!srcOrFillVal->IsIntegralConst(0))
- {
- srcOrFillVal = gtNewOperNode(GT_INIT_VAL, TYP_INT, srcOrFillVal);
- }
+ setLclRelatedToSIMDIntrinsic(dst);
}
}
+#endif // FEATURE_SIMD
- GenTree* result = gtNewAssignNode(dst, srcOrFillVal);
- gtBlockOpInit(result, dst, srcOrFillVal, isVolatile);
return result;
}
}
valx->gtFlags |= GTF_DONT_CSE;
- asg = impAssignStruct(dest, val, valStructHnd, CHECK_SPILL_NONE, pAfterStmt, di, block);
+ asg = impAssignStruct(dest, val, CHECK_SPILL_NONE, pAfterStmt, di, block);
}
else
{
// calls that may not actually be required - e.g. if we only access a field of a struct.
GenTree* dst = gtNewLclvNode(tmpNum, varType);
- asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, di, block);
+ asg = impAssignStruct(dst, val, curLevel, pAfterStmt, di, block);
}
else
{
// Arguments:
// dest - the destination of the assignment
// src - the value to be assigned
-// structHnd - handle representing the struct type
// curLevel - stack level for which a spill may be being done
// pAfterStmt - statement to insert any additional statements after
-// ilOffset - il offset for new statements
+// di - debug info for new statements
// block - block to insert any additional statements in
//
// Return Value:
//
// Notes:
// Temp assignments may be appended to impStmtList if spilling is necessary.
-
-GenTree* Compiler::impAssignStruct(GenTree* dest,
- GenTree* src,
- CORINFO_CLASS_HANDLE structHnd,
- unsigned curLevel,
- Statement** pAfterStmt, /* = nullptr */
- const DebugInfo& di, /* = DebugInfo() */
- BasicBlock* block /* = nullptr */
+//
+GenTree* Compiler::impAssignStruct(GenTree* dest,
+ GenTree* src,
+ unsigned curLevel,
+ Statement** pAfterStmt, /* = nullptr */
+ const DebugInfo& di, /* = DebugInfo() */
+ BasicBlock* block /* = nullptr */
)
{
- assert(varTypeIsStruct(dest));
+ assert(varTypeIsStruct(dest) && (dest->OperIsLocal() || dest->OperIsIndir() || dest->OperIs(GT_FIELD)));
- DebugInfo usedDI = di;
- if (!usedDI.IsValid())
+ assert(dest->TypeGet() == src->TypeGet());
+ // TODO-1stClassStructs: delete the "!IND" condition once "IND<struct>" nodes are no more.
+ if (dest->TypeIs(TYP_STRUCT) && !src->gtEffectiveVal()->OperIs(GT_IND))
{
- usedDI = impCurStmtDI;
- }
-
- while (dest->gtOper == GT_COMMA)
- {
- // Second thing is the struct.
- assert(varTypeIsStruct(dest->AsOp()->gtOp2));
-
- // Append all the op1 of GT_COMMA trees before we evaluate op2 of the GT_COMMA tree.
- if (pAfterStmt)
- {
- Statement* newStmt = gtNewStmt(dest->AsOp()->gtOp1, usedDI);
- fgInsertStmtAfter(block, *pAfterStmt, newStmt);
- *pAfterStmt = newStmt;
- }
- else
- {
- impAppendTree(dest->AsOp()->gtOp1, curLevel, usedDI); // do the side effect
- }
-
- // set dest to the second thing
- dest = dest->AsOp()->gtOp2;
+ assert(ClassLayout::AreCompatible(dest->GetLayout(this), src->GetLayout(this)));
}
- assert(dest->gtOper == GT_LCL_VAR || dest->gtOper == GT_RETURN || dest->gtOper == GT_FIELD ||
- dest->gtOper == GT_IND || dest->gtOper == GT_OBJ);
-
- // Return a NOP if this is a self-assignment.
- if (dest->OperGet() == GT_LCL_VAR && src->OperGet() == GT_LCL_VAR &&
- src->AsLclVarCommon()->GetLclNum() == dest->AsLclVarCommon()->GetLclNum())
+ if (dest->OperIs(GT_FIELD) && dest->TypeIs(TYP_STRUCT))
{
- return gtNewNothingNode();
+ // TODO-ADDR: delete this once FIELD<struct> nodes are transformed into OBJs (not INDs).
+ dest = gtNewObjNode(dest->GetLayout(this), gtNewOperNode(GT_ADDR, TYP_BYREF, dest));
}
- // TODO-1stClassStructs: Avoid creating an address if it is not needed,
- // or re-creating a Blk node if it is.
- GenTree* destAddr;
-
- if (dest->gtOper == GT_IND || dest->OperIsBlk())
- {
- destAddr = dest->AsOp()->gtOp1;
- }
- else
- {
- destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
- }
-
- return (impAssignStructPtr(destAddr, src, structHnd, curLevel, pAfterStmt, usedDI, block));
-}
-
-//------------------------------------------------------------------------
-// impAssignStructPtr: Assign (copy) the structure from 'src' to 'destAddr'.
-//
-// Arguments:
-// destAddr - address of the destination of the assignment
-// src - source of the assignment
-// structHnd - handle representing the struct type
-// curLevel - stack level for which a spill may be being done
-// pAfterStmt - statement to insert any additional statements after
-// di - debug info for new statements
-// block - block to insert any additional statements in
-//
-// Return Value:
-// The tree that should be appended to the statement list that represents the assignment.
-//
-// Notes:
-// Temp assignments may be appended to impStmtList if spilling is necessary.
-
-GenTree* Compiler::impAssignStructPtr(GenTree* destAddr,
- GenTree* src,
- CORINFO_CLASS_HANDLE structHnd,
- unsigned curLevel,
- Statement** pAfterStmt, /* = NULL */
- const DebugInfo& di, /* = DebugInfo() */
- BasicBlock* block /* = NULL */
- )
-{
- GenTree* dest = nullptr;
-
DebugInfo usedDI = di;
if (!usedDI.IsValid())
{
usedDI = impCurStmtDI;
}
-#ifdef DEBUG
-#ifdef FEATURE_HW_INTRINSICS
- if (src->OperIs(GT_HWINTRINSIC))
- {
- const GenTreeHWIntrinsic* intrinsic = src->AsHWIntrinsic();
-
- if (HWIntrinsicInfo::IsMultiReg(intrinsic->GetHWIntrinsicId()))
- {
- assert(src->TypeGet() == TYP_STRUCT);
- }
- else
- {
- assert(varTypeIsSIMD(src));
- }
- }
- else
-#endif // FEATURE_HW_INTRINSICS
- {
- assert(src->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_FIELD, GT_IND, GT_OBJ, GT_BLK, GT_CALL, GT_MKREFANY, GT_RET_EXPR,
- GT_COMMA, GT_CNS_VEC) ||
- ((src->TypeGet() != TYP_STRUCT) && (src->OperIsSIMD() || src->OperIs(GT_BITCAST))));
- }
-#endif // DEBUG
-
- var_types asgType = src->TypeGet();
-
if (src->IsCall())
{
GenTreeCall* srcCall = src->AsCall();
if (srcCall->TreatAsShouldHaveRetBufArg(this))
{
- // Case of call returning a struct via hidden retbuf arg
- CLANG_FORMAT_COMMENT_ANCHOR;
-
+ // Case of call returning a struct via hidden retbuf arg.
// Some calls have an "out buffer" that is not actually a ret buff
// in the ABI sense. We take the path here for those but it should
// not be marked as the ret buff arg since it always follow the
WellKnownArg wellKnownArgType =
srcCall->ShouldHaveRetBufArg() ? WellKnownArg::RetBuffer : WellKnownArg::None;
- NewCallArg newArg = NewCallArg::Primitive(destAddr).WellKnown(wellKnownArgType);
+ GenTree* destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
+ NewCallArg newArg = NewCallArg::Primitive(destAddr).WellKnown(wellKnownArgType);
#if !defined(TARGET_ARM)
// Unmanaged instance methods on Windows or Unix X86 need the retbuf arg after the first (this) parameter
// return the morphed call node
return src;
}
- else
- {
- // Case of call returning a struct in one or more registers.
-
- var_types returnType = (var_types)srcCall->gtReturnType;
-
- // First we try to change this to "LclVar/LclFld = call"
- //
- if ((destAddr->gtOper == GT_ADDR) && (destAddr->AsOp()->gtOp1->gtOper == GT_LCL_VAR))
- {
- // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD.
- // That is, the IR will be of the form lclVar = call for multi-reg return
- //
- GenTreeLclVar* lcl = destAddr->AsOp()->gtOp1->AsLclVar();
- unsigned lclNum = lcl->GetLclNum();
- LclVarDsc* varDsc = lvaGetDesc(lclNum);
- if (src->AsCall()->HasMultiRegRetVal())
- {
- // Mark the struct LclVar as used in a MultiReg return context
- // which currently makes it non promotable.
- // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
- // handle multireg returns.
- lcl->gtFlags |= GTF_DONT_CSE;
- varDsc->lvIsMultiRegRet = true;
- }
- dest = lcl;
-
-#if defined(TARGET_ARM)
- // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case,
- // but that method has not been updadted to include ARM.
- impMarkLclDstNotPromotable(lclNum, src, structHnd);
- lcl->gtFlags |= GTF_DONT_CSE;
-#elif defined(UNIX_AMD64_ABI)
- // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs.
- assert(!src->AsCall()->IsVarargs() && "varargs not allowed for System V OSs.");
-
- // Make the struct non promotable. The eightbytes could contain multiple fields.
- // TODO-1stClassStructs: Eliminate this pessimization when we can more generally
- // handle multireg returns.
- // TODO-Cleanup: Why is this needed here? This seems that it will set this even for
- // non-multireg returns.
- lcl->gtFlags |= GTF_DONT_CSE;
- varDsc->lvIsMultiRegRet = true;
-#endif
- }
- else // we don't have a GT_ADDR of a GT_LCL_VAR
- {
- asgType = returnType;
- }
+#ifdef UNIX_AMD64_ABI
+ if (dest->OperIs(GT_LCL_VAR))
+ {
+ // TODO-Cleanup: delete this quirk.
+ lvaGetDesc(dest->AsLclVar())->lvIsMultiRegRet = true;
}
+#endif // UNIX_AMD64_ABI
}
- else if (src->gtOper == GT_RET_EXPR)
+ else if (src->OperIs(GT_RET_EXPR))
{
- noway_assert(src->AsRetExpr()->gtInlineCandidate->OperIs(GT_CALL));
- GenTreeCall* call = src->AsRetExpr()->gtInlineCandidate->AsCall();
+ assert(src->AsRetExpr()->gtInlineCandidate->OperIs(GT_CALL));
+ GenTreeCall* call = src->AsRetExpr()->gtInlineCandidate;
if (call->ShouldHaveRetBufArg())
{
// insert the return value buffer into the argument list as first byref parameter after 'this'
+ GenTree* destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
call->gtArgs.InsertAfterThisOrFirst(this,
NewCallArg::Primitive(destAddr).WellKnown(WellKnownArg::RetBuffer));
// So now we just return an empty node (pruning the GT_RET_EXPR)
return src;
}
- else
- {
- // Case of inline method returning a struct in one or more registers.
- // We won't need a return buffer
- asgType = src->gtType;
- }
- }
- else if (src->OperIsBlk())
- {
- asgType = impNormStructType(structHnd);
- assert(ClassLayout::AreCompatible(src->AsBlk()->GetLayout(), typGetObjLayout(structHnd)));
}
- else if (src->gtOper == GT_MKREFANY)
+ else if (src->OperIs(GT_MKREFANY))
{
- // Since we are assigning the result of a GT_MKREFANY,
- // "destAddr" must point to a refany.
-
+ // Since we are assigning the result of a GT_MKREFANY, "destAddr" must point to a refany.
+ // TODO-CQ: we can do this without address-exposing the local on the LHS.
+ GenTree* destAddr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest);
GenTree* destAddrClone;
- destAddr =
- impCloneExpr(destAddr, &destAddrClone, structHnd, curLevel, pAfterStmt DEBUGARG("MKREFANY assignment"));
+ destAddr = impCloneExpr(destAddr, &destAddrClone, NO_CLASS_HANDLE, curLevel,
+ pAfterStmt DEBUGARG("MKREFANY assignment"));
assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0);
assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
// return the assign of the type value, to be appended
return gtNewAssignNode(typeSlot, src->AsOp()->gtOp2);
}
- else if (src->gtOper == GT_COMMA)
+ else if (src->OperIs(GT_COMMA))
{
- // The second thing is the struct or its address.
- assert(varTypeIsStruct(src->AsOp()->gtOp2) || src->AsOp()->gtOp2->gtType == TYP_BYREF);
if (pAfterStmt)
{
// Insert op1 after '*pAfterStmt'
// In this case we have neither been given a statement to insert after, nor are we
// in the importer where we can append the side effect.
// Instead, we're going to sink the assignment below the COMMA.
- src->AsOp()->gtOp2 =
- impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block);
+ src->AsOp()->gtOp2 = impAssignStruct(dest, src->AsOp()->gtOp2, curLevel, pAfterStmt, usedDI, block);
src->AddAllEffectsFlags(src->AsOp()->gtOp2);
return src;
}
// Evaluate the second thing using recursion.
- return impAssignStructPtr(destAddr, src->AsOp()->gtOp2, structHnd, curLevel, pAfterStmt, usedDI, block);
- }
- else if (src->IsLocal())
- {
- asgType = src->TypeGet();
- }
- else if (asgType == TYP_STRUCT)
- {
- // It should already have the appropriate type.
- assert(asgType == impNormStructType(structHnd));
- }
- if ((dest == nullptr) && (destAddr->OperGet() == GT_ADDR))
- {
- GenTree* destNode = destAddr->gtGetOp1();
- // If the actual destination is a local, or a block node,
- // don't insert an OBJ(ADDR) if it already has the right type.
- if (destNode->OperIs(GT_LCL_VAR) || destNode->OperIsBlk())
- {
- var_types destType = destNode->TypeGet();
- // If one or both types are TYP_STRUCT (one may not yet be normalized), they are compatible
- // iff their handles are the same.
- // Otherwise, they are compatible if their types are the same.
- bool typesAreCompatible =
- ((destType == TYP_STRUCT) || (asgType == TYP_STRUCT))
- ? ((gtGetStructHandleIfPresent(destNode) == structHnd) && varTypeIsStruct(asgType))
- : (destType == asgType);
- if (typesAreCompatible)
- {
- dest = destNode;
- if (destType != TYP_STRUCT)
- {
- // Use a normalized type if available. We know from above that they're equivalent.
- asgType = destType;
- }
- }
- }
- }
-
- if (dest == nullptr)
- {
- if (asgType == TYP_STRUCT)
- {
- dest = gtNewObjNode(structHnd, destAddr);
- gtSetObjGcInfo(dest->AsObj());
- }
- else
- {
- dest = gtNewOperNode(GT_IND, asgType, destAddr);
- }
+ return impAssignStruct(dest, src->AsOp()->gtOp2, curLevel, pAfterStmt, usedDI, block);
}
if (dest->OperIs(GT_LCL_VAR) && src->IsMultiRegNode())
lvaGetDesc(dest->AsLclVar())->lvIsMultiRegRet = true;
}
- // return an assignment node, to be appended
- GenTree* asgNode = gtNewAssignNode(dest, src);
- gtBlockOpInit(asgNode, dest, src, false);
+ // Return a store node, to be appended.
+ GenTree* storeNode = gtNewBlkOpNode(dest, src);
- return asgNode;
+ return storeNode;
+}
+
+//------------------------------------------------------------------------
+// impAssignStructPtr: Assign (copy) the structure from 'src' to 'destAddr'.
+//
+// Arguments:
+// destAddr - address of the destination of the assignment
+// src - source of the assignment
+// structHnd - handle representing the struct type
+// curLevel - stack level for which a spill may be being done
+// pAfterStmt - statement to insert any additional statements after
+// di - debug info for new statements
+// block - block to insert any additional statements in
+//
+// Return Value:
+// The tree that should be appended to the statement list that represents the assignment.
+//
+// Notes:
+// Temp assignments may be appended to impStmtList if spilling is necessary.
+//
+GenTree* Compiler::impAssignStructPtr(GenTree* destAddr,
+ GenTree* src,
+ CORINFO_CLASS_HANDLE structHnd,
+ unsigned curLevel,
+ Statement** pAfterStmt, /* = NULL */
+ const DebugInfo& di, /* = DebugInfo() */
+ BasicBlock* block /* = NULL */
+ )
+{
+ GenTree* dst = gtNewStructVal(typGetObjLayout(structHnd), destAddr);
+ return impAssignStruct(dst, src, curLevel, pAfterStmt, di, block);
}
/*****************************************************************************
#if defined(JIT32_GCENCODER)
const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5);
#else
- const bool forceInsertNewBlock = compStressCompile(STRESS_CATCH_ARG, 5);
+ const bool forceInsertNewBlock = compStressCompile(STRESS_CATCH_ARG, 5);
#endif // defined(JIT32_GCENCODER)
/* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */
// realType is either struct or SIMD
var_types realType = lvaGetRealType(structTempNum);
GenTreeLclVar* structLcl = gtNewLclvNode(structTempNum, realType);
- impAppendTree(gtNewBlkOpNode(structLcl, gtNewIconNode(0), false, false), CHECK_SPILL_NONE,
- impCurStmtDI);
+ impAppendTree(gtNewBlkOpNode(structLcl, gtNewIconNode(0)), CHECK_SPILL_NONE, impCurStmtDI);
return gtNewLclvNode(structTempNum, realType);
}
if (varTypeIsStruct(lclTyp))
{
- op1 = impAssignStruct(op2, op1, clsHnd, CHECK_SPILL_ALL);
+ op1 = impAssignStruct(op2, op1, CHECK_SPILL_ALL);
}
else
{
// Create the assignment node and append it.
if (varTypeIsStruct(op1))
{
- op1 = impAssignStruct(op1, op2, stelemClsHnd, CHECK_SPILL_ALL);
+ op1 = impAssignStruct(op1, op2, CHECK_SPILL_ALL);
}
else
{
if (fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn))
{
// Append a tree to zero-out the temp
- newObjThisPtr = gtNewLclvNode(lclNum, lclDsc->TypeGet());
-
- newObjThisPtr = gtNewBlkOpNode(newObjThisPtr, // Dest
- gtNewIconNode(0), // Value
- false, // isVolatile
- false); // not copyBlock
- impAppendTree(newObjThisPtr, CHECK_SPILL_NONE, impCurStmtDI);
+ GenTree* newObjDst = gtNewLclvNode(lclNum, lclDsc->TypeGet());
+ GenTree* newObjInit;
+ if (lclDsc->TypeGet() == TYP_STRUCT)
+ {
+ newObjInit = gtNewBlkOpNode(newObjDst, gtNewIconNode(0));
+ }
+ else
+ {
+ newObjInit = gtNewAssignNode(newObjDst, gtNewZeroConNode(lclDsc->TypeGet()));
+ }
+ impAppendTree(newObjInit, CHECK_SPILL_NONE, impCurStmtDI);
}
else
{
}
if (helperNode != nullptr)
{
- op1 = gtNewOperNode(GT_COMMA, op1->TypeGet(), helperNode, op1);
+ impAppendTree(helperNode, CHECK_SPILL_ALL, impCurStmtDI);
}
}
if (lclTyp == TYP_STRUCT)
{
- op1 = impAssignStruct(op1, op2, clsHnd, CHECK_SPILL_ALL);
+ op1 = impAssignStruct(op1, op2, CHECK_SPILL_ALL);
}
goto SPILL_APPEND;
}
lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
op2 = gtNewLclvNode(tmp, TYP_STRUCT);
- op1 = impAssignStruct(op2, op1, resolvedToken.hClass, CHECK_SPILL_ALL);
+ op1 = impAssignStruct(op2, op1, CHECK_SPILL_ALL);
assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
op2 = gtNewLclvNode(tmp, TYP_STRUCT);
lvaSetStruct(tmp, resolvedToken.hClass, true /* unsafe value cls check */);
op2 = gtNewLclvNode(tmp, TYP_STRUCT);
- op1 = impAssignStruct(op2, op1, resolvedToken.hClass, CHECK_SPILL_ALL);
+ op1 = impAssignStruct(op2, op1, CHECK_SPILL_ALL);
assert(op1->gtType == TYP_VOID); // We must be assigning the return struct to the temp.
op2 = gtNewLclvNode(tmp, TYP_STRUCT);
op1 = impPopStack().val;
op1 = gtNewStructVal(typGetObjLayout(resolvedToken.hClass), op1);
op2 = gtNewIconNode(0);
-
- op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false);
+ op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0);
goto SPILL_APPEND;
case CEE_INITBLK:
{
size = (unsigned)op3->AsIntConCommon()->IconValue();
op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size));
- op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, false);
+ op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0);
}
else
{
op1 = gtNewBlockVal(op1, size);
op2 = gtNewBlockVal(op2, size);
- op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0, /* isCopyBlock */ true);
+ op1 = gtNewBlkOpNode(op1, op2, (prefixFlags & PREFIX_VOLATILE) != 0);
}
else
{
{
gtSetObjGcInfo(op1->AsObj());
}
- op1 = gtNewBlkOpNode(op1, op2, ((prefixFlags & PREFIX_VOLATILE) != 0), /* isCopyBlock */ true);
+ op1 = gtNewBlkOpNode(op1, op2, ((prefixFlags & PREFIX_VOLATILE) != 0));
goto SPILL_APPEND;
}
}
}
-#ifdef TARGET_ARM
-/**************************************************************************************
- *
- * When assigning a vararg call src to a HFA lcl dest, mark that we cannot promote the
- * dst struct, because struct promotion will turn it into a float/double variable while
- * the rhs will be an int/long variable. We don't code generate assignment of int into
- * a float, but there is nothing that might prevent us from doing so. The tree however
- * would like: (=, (typ_float, typ_int)) or (GT_TRANSFER, (typ_float, typ_int))
- *
- * tmpNum - the lcl dst variable num that is a struct.
- * src - the src tree assigned to the dest that is a struct/int (when varargs call.)
- * hClass - the type handle for the struct variable.
- *
- * TODO-ARM-CQ: [301608] This is a rare scenario with varargs and struct promotion coming into play,
- * however, we could do a codegen of transferring from int to float registers
- * (transfer, not a cast.)
- *
- */
-void Compiler::impMarkLclDstNotPromotable(unsigned tmpNum, GenTree* src, CORINFO_CLASS_HANDLE hClass)
-{
- if (src->gtOper == GT_CALL && src->AsCall()->IsVarargs() && IsHfa(hClass))
- {
- int hfaSlots = GetHfaCount(hClass);
- var_types hfaType = GetHfaType(hClass);
-
- // If we have varargs we morph the method's return type to be "int" irrespective of its original
- // type: struct/float at importer because the ABI calls out return in integer registers.
- // We don't want struct promotion to replace an expression like this:
- // lclFld_int = callvar_int() into lclFld_float = callvar_int();
- // This means an int is getting assigned to a float without a cast. Prevent the promotion.
- if ((hfaType == TYP_DOUBLE && hfaSlots == sizeof(double) / REGSIZE_BYTES) ||
- (hfaType == TYP_FLOAT && hfaSlots == sizeof(float) / REGSIZE_BYTES))
- {
- // Make sure this struct type stays as struct so we can receive the call in a struct.
- lvaTable[tmpNum].lvIsMultiRegRet = true;
- }
- }
-}
-#endif // TARGET_ARM
-
//------------------------------------------------------------------------
// impAssignMultiRegTypeToVar: ensure calls that return structs in multiple
// registers return values to suitable temps.