return true;
}
+ op = op->gtEffectiveVal(/* commaOnly */ true);
+
if (!op->OperIs(GT_LCL_VAR))
{
return false;
// Quirks:
//
- // Don't substitute nodes "AddFinalArgsAndDetermineABIInfo" doesn't handle into struct args.
+ // Don't substitute nodes args morphing doesn't handle into struct args.
//
if (fsv.IsCallArg() && fsv.GetNode()->TypeIs(TYP_STRUCT) &&
- !fwdSubNode->OperIs(GT_OBJ, GT_LCL_VAR, GT_LCL_FLD, GT_MKREFANY))
+ !fwdSubNode->OperIs(GT_OBJ, GT_FIELD, GT_LCL_VAR, GT_LCL_FLD, GT_MKREFANY))
{
JITDUMP(" use is a struct arg; fwd sub node is not OBJ/LCL_VAR/LCL_FLD/MKREFANY\n");
return false;
if (varTypeIsStruct(varDsc) && (valStructHnd == NO_CLASS_HANDLE) && !varTypeIsSIMD(valTyp))
{
// There are some cases where we do not have a struct handle on the return value:
- // 1. Handle-less IND/BLK/LCL_FLD<struct> nodes.
+ // 1. Handle-less BLK/LCL_FLD nodes.
// 2. The zero constant created by local assertion propagation.
- // In these cases, we can use the type of the merge return for the assignment.
+ // In these cases, we can use the type of the local for the assignment.
assert(val->gtEffectiveVal(true)->OperIs(GT_IND, GT_BLK, GT_LCL_FLD, GT_CNS_INT));
- assert(tmp == genReturnLocal);
- valStructHnd = lvaGetStruct(genReturnLocal);
+ valStructHnd = lvaGetDesc(tmp)->GetStructHnd();
assert(valStructHnd != NO_CLASS_HANDLE);
}
GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, ClassLayout* layout)
: GenTreeIndir(oper, type, addr, nullptr)
- , m_layout(layout)
- , gtBlkOpKind(BlkOpKindInvalid)
-#ifndef JIT32_GCENCODER
- , gtBlkOpGcUnsafe(false)
-#endif
{
- assert(OperIsBlk(oper));
- assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
- gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
+ Initialize(layout);
}
GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, GenTree* data, ClassLayout* layout)
: GenTreeIndir(oper, type, addr, data)
- , m_layout(layout)
- , gtBlkOpKind(BlkOpKindInvalid)
+ {
+ Initialize(layout);
+ }
+
+ void Initialize(ClassLayout* layout)
+ {
+ assert(OperIsBlk(OperGet()) && ((layout != nullptr) || OperIs(GT_STORE_DYN_BLK)));
+
+ m_layout = layout;
+ gtBlkOpKind = BlkOpKindInvalid;
#ifndef JIT32_GCENCODER
- , gtBlkOpGcUnsafe(false)
+ gtBlkOpGcUnsafe = false;
#endif
- {
- assert(OperIsBlk(oper));
- assert((layout != nullptr) || OperIs(GT_STORE_DYN_BLK));
- gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
- gtFlags |= (data->gtFlags & GTF_ALL_EFFECT);
}
#if DEBUGGABLE_GENTREE
assert(varTypeIsStruct(dest) && (dest->OperIsLocal() || dest->OperIsIndir() || dest->OperIs(GT_FIELD)));
assert(dest->TypeGet() == src->TypeGet());
- // TODO-1stClassStructs: delete the "!IND" condition once "IND<struct>" nodes are no more.
- if (dest->TypeIs(TYP_STRUCT) && !src->gtEffectiveVal()->OperIs(GT_IND))
+ if (dest->TypeIs(TYP_STRUCT))
{
assert(ClassLayout::AreCompatible(dest->GetLayout(this), src->GetLayout(this)));
}
- if (dest->OperIs(GT_FIELD) && dest->TypeIs(TYP_STRUCT))
- {
- // TODO-ADDR: delete this once FIELD<struct> nodes are transformed into OBJs (not INDs).
- dest = gtNewObjNode(dest->GetLayout(this), gtNewOperNode(GT_ADDR, TYP_BYREF, dest));
- }
-
DebugInfo usedDI = di;
if (!usedDI.IsValid())
{
{
structType = impNormStructType(structHnd);
}
- bool alreadyNormalized = false;
- GenTreeLclVarCommon* structLcl = nullptr;
+ GenTreeLclVarCommon* structLcl = nullptr;
- genTreeOps oper = structVal->OperGet();
- switch (oper)
+ switch (structVal->OperGet())
{
- // GT_MKREFANY is supported directly by args morphing.
- case GT_MKREFANY:
- alreadyNormalized = true;
- break;
-
case GT_CALL:
case GT_RET_EXPR:
makeTemp = true;
break;
- case GT_FIELD:
- // Wrap it in a GT_OBJ, if needed.
- structVal->gtType = structType;
- if (structType == TYP_STRUCT)
- {
- structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
- }
- break;
-
case GT_LCL_VAR:
case GT_LCL_FLD:
structLcl = structVal->AsLclVarCommon();
structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
FALLTHROUGH;
+ case GT_IND:
case GT_OBJ:
case GT_BLK:
- // These should already have the appropriate type.
- assert(structVal->gtType == structType);
- alreadyNormalized = true;
- break;
-
- case GT_IND:
- assert(structVal->gtType == structType);
- structVal = gtNewObjNode(structHnd, structVal->gtGetOp1());
- alreadyNormalized = true;
- break;
-
+ case GT_FIELD:
case GT_CNS_VEC:
- assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
- break;
-
#ifdef FEATURE_SIMD
case GT_SIMD:
- assert(varTypeIsSIMD(structVal) && (structVal->gtType == structType));
- break;
-#endif // FEATURE_SIMD
+#endif
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
- assert(structVal->gtType == structType);
- assert(varTypeIsSIMD(structVal) ||
- HWIntrinsicInfo::IsMultiReg(structVal->AsHWIntrinsic()->GetHWIntrinsicId()));
- break;
#endif
+ case GT_MKREFANY:
+ // These should already have the appropriate type.
+ assert(structVal->TypeGet() == structType);
+ break;
case GT_COMMA:
{
} while (blockNode->OperGet() == GT_COMMA);
}
- if (blockNode->OperGet() == GT_FIELD)
- {
- // If we have a GT_FIELD then wrap it in a GT_OBJ.
- blockNode = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, blockNode));
- }
-
#ifdef FEATURE_SIMD
if (blockNode->OperIsSimdOrHWintrinsic() || blockNode->IsCnsVec())
{
parent->AsOp()->gtOp2 = impNormStructVal(blockNode, structHnd, curLevel);
- alreadyNormalized = true;
}
else
#endif
{
- noway_assert(blockNode->OperIsBlk());
+ noway_assert(blockNode->OperIsBlk() || blockNode->OperIs(GT_FIELD));
// Sink the GT_COMMA below the blockNode addr.
// That is GT_COMMA(op1, op2=blockNode) is transformed into
{
structVal = blockNode;
}
- alreadyNormalized = true;
}
}
break;
noway_assert(!"Unexpected node in impNormStructVal()");
break;
}
- structVal->gtType = structType;
- if (!alreadyNormalized)
+ if (makeTemp)
{
- if (makeTemp)
- {
- unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
+ unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
- impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
+ impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
- // The structVal is now the temp itself
+ // The structVal is now the temp itself
- structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
- structVal = structLcl;
- }
- if ((structType == TYP_STRUCT) && !structVal->OperIsBlk())
+ structLcl = gtNewLclvNode(tmpNum, structType)->AsLclVarCommon();
+ structVal = structLcl;
+
+ if (structType == TYP_STRUCT)
{
// Wrap it in a GT_OBJ
structVal = gtNewObjNode(structHnd, gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
break;
}
case CEE_REFANYTYPE:
-
+ {
op1 = impPopStack().val;
- // make certain it is normalized;
- op1 = impNormStructVal(op1, impGetRefAnyClass(), CHECK_SPILL_ALL);
-
- if (op1->gtOper == GT_OBJ)
+ if (op1->OperIs(GT_MKREFANY))
{
- // Get the address of the refany
- op1 = op1->AsOp()->gtOp1;
-
- // Fetch the type from the correct slot
- op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
- gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL));
- op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
- }
- else
- {
- assertImp(op1->gtOper == GT_MKREFANY);
-
// The pointer may have side-effects
if (op1->AsOp()->gtOp1->gtFlags & GTF_SIDE_EFFECT)
{
// We already have the class handle
op1 = op1->AsOp()->gtOp2;
}
-
- // convert native TypeHandle to RuntimeTypeHandle
+ else
{
- op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL, TYP_STRUCT, op1);
+ // Get the address of the refany
+ op1 = impGetStructAddr(op1, impGetRefAnyClass(), CHECK_SPILL_ALL, /* willDeref */ true);
+
+ // Fetch the type from the correct slot
+ op1 = gtNewOperNode(GT_ADD, TYP_BYREF, op1,
+ gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL));
+ op1 = gtNewOperNode(GT_IND, TYP_BYREF, op1);
+ }
+
+ // Convert native TypeHandle to RuntimeTypeHandle.
+ op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL, TYP_STRUCT, op1);
- CORINFO_CLASS_HANDLE classHandle = impGetTypeHandleClass();
+ CORINFO_CLASS_HANDLE classHandle = impGetTypeHandleClass();
- // The handle struct is returned in register
- op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType();
- op1->AsCall()->gtRetClsHnd = classHandle;
+ // The handle struct is returned in register
+ op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType();
+ op1->AsCall()->gtRetClsHnd = classHandle;
#if FEATURE_MULTIREG_RET
- op1->AsCall()->InitializeStructReturnType(this, classHandle, op1->AsCall()->GetUnmanagedCallConv());
+ op1->AsCall()->InitializeStructReturnType(this, classHandle, op1->AsCall()->GetUnmanagedCallConv());
#endif
- tiRetVal = typeInfo(TI_STRUCT, classHandle);
- }
-
+ tiRetVal = typeInfo(TI_STRUCT, classHandle);
impPushOnStack(op1, tiRetVal);
- break;
+ }
+ break;
case CEE_LDTOKEN:
{
dataOffset = eeGetArrayDataOffset();
}
- GenTree* dstAddr = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
- GenTree* dst = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, dstAddr, typGetBlkLayout(blkSize));
- GenTree* src = gtNewIndOfIconHandleNode(TYP_STRUCT, (size_t)initData, GTF_ICON_CONST_PTR, true);
+ ClassLayout* blkLayout = typGetBlkLayout(blkSize);
+ GenTree* dstAddr = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
+ GenTree* dst = gtNewStructVal(blkLayout, dstAddr);
+ dst->gtFlags |= GTF_GLOB_REF;
+
+ GenTree* srcAddr = gtNewIconHandleNode((size_t)initData, GTF_ICON_CONST_PTR);
+ GenTree* src = gtNewStructVal(blkLayout, srcAddr);
#ifdef DEBUG
src->gtGetOp1()->AsIntCon()->gtTargetHandle = THT_InitializeArrayIntrinsics;
{
ClassLayout* layout = node->GetLayout(m_compiler);
node->SetOper(GT_OBJ);
- node->AsBlk()->SetLayout(layout);
- node->AsBlk()->gtBlkOpKind = GenTreeBlk::BlkOpKindInvalid;
-#ifndef JIT32_GCENCODER
- node->AsBlk()->gtBlkOpGcUnsafe = false;
-#endif // !JIT32_GCENCODER
+ node->AsBlk()->Initialize(layout);
}
else
{
if (node->TypeIs(TYP_STRUCT))
{
node->SetOper(GT_OBJ);
- node->AsBlk()->SetLayout(layout);
- node->AsBlk()->gtBlkOpKind = GenTreeBlk::BlkOpKindInvalid;
-#ifndef JIT32_GCENCODER
- node->AsBlk()->gtBlkOpGcUnsafe = false;
-#endif // !JIT32_GCENCODER
+ node->AsBlk()->Initialize(layout);
}
else
{
lclStore->ChangeOper(GT_STORE_OBJ);
GenTreeBlk* objStore = lclStore->AsObj();
objStore->gtFlags = GTF_ASG | GTF_IND_NONFAULTING | GTF_IND_TGT_NOT_HEAP;
-#ifndef JIT32_GCENCODER
- objStore->gtBlkOpGcUnsafe = false;
-#endif
- objStore->gtBlkOpKind = GenTreeObj::BlkOpKindInvalid;
- objStore->SetLayout(layout);
+ objStore->Initialize(layout);
objStore->SetAddr(addr);
objStore->SetData(src);
+
BlockRange().InsertBefore(objStore, addr);
LowerNode(objStore);
return;
{
// Spill to a local if sizes don't match so we can avoid the "load more than requested"
// problem, e.g. struct size is 5 and we emit "ldr x0, [x1]"
- unsigned realSize = retVal->AsIndir()->Size();
- CORINFO_CLASS_HANDLE structCls = comp->info.compMethodInfo->args.retTypeClass;
- if (realSize == 0)
- {
- // TODO-ADDR: delete once "IND<struct>" nodes are no more
- realSize = comp->info.compCompHnd->getClassSize(structCls);
- }
-
- if (genTypeSize(nativeReturnType) > realSize)
+ if (genTypeSize(nativeReturnType) > retVal->AsIndir()->Size())
{
LIR::Use retValUse(BlockRange(), &ret->gtOp1, ret);
unsigned tmpNum = comp->lvaGrabTemp(true DEBUGARG("mis-sized struct return"));
- comp->lvaSetStruct(tmpNum, structCls, false);
- comp->genReturnLocal = tmpNum;
+ comp->lvaSetStruct(tmpNum, comp->info.compMethodInfo->args.retTypeClass, false);
+
ReplaceWithLclVar(retValUse, tmpNum);
LowerRetSingleRegStructLclVar(ret);
break;
src->ChangeOper(GT_OBJ);
src->AsObj()->SetAddr(lclAddr);
- src->AsObj()->SetLayout(layout);
- src->AsObj()->gtBlkOpKind = GenTreeBlk::BlkOpKindInvalid;
- src->AsObj()->gtBlkOpGcUnsafe = false;
+ src->AsObj()->Initialize(layout);
BlockRange().InsertBefore(src, lclAddr);
}
if (tree->OperIs(GT_FIELD))
{
- tree->SetOper(GT_IND);
+ if (tree->TypeIs(TYP_STRUCT))
+ {
+ ClassLayout* layout = tree->GetLayout(this);
+ tree->SetOper(GT_OBJ);
+ tree->AsBlk()->Initialize(layout);
+ }
+ else
+ {
+ tree->SetOper(GT_IND);
+ }
+
tree->AsIndir()->SetAddr(addr);
}
else // Otherwise, we have a FIELD_ADDR.
m_srcVarDsc = m_comp->lvaGetDesc(m_srcLclNum);
}
- // Verify that the types on the LHS and RHS match and morph away "IND<struct>" nodes.
+ // Verify that the types on the LHS and RHS match.
assert(m_dst->TypeGet() == m_src->TypeGet());
if (m_dst->TypeIs(TYP_STRUCT))
{
- // TODO-1stClassStructs: delete this once "IND<struct>" nodes are no more.
- if (m_src->OperIs(GT_IND))
- {
- m_src->SetOper(m_blockLayout->IsBlockLayout() ? GT_BLK : GT_OBJ);
- m_src->AsBlk()->SetLayout(m_blockLayout);
- m_src->AsBlk()->gtBlkOpKind = GenTreeBlk::BlkOpKindInvalid;
-#ifndef JIT32_GCENCODER
- m_src->AsBlk()->gtBlkOpGcUnsafe = false;
-#endif // !JIT32_GCENCODER
- }
-
assert(ClassLayout::AreCompatible(m_blockLayout, m_src->GetLayout(m_comp)));
}
// TODO-1stClassStructs: produce simple "IND<simd>" nodes in importer.
if ((size != 0) && FitsIn<int32_t>(size))
{
- GenTree* lhs = gtNewBlockVal(tree->Addr(), static_cast<unsigned>(size));
- GenTree* asg = gtNewAssignNode(lhs, tree->Data());
+ ClassLayout* layout = typGetBlkLayout(static_cast<unsigned>(size));
+ GenTree* dst = gtNewStructVal(layout, tree->Addr());
+ dst->gtFlags |= GTF_GLOB_REF;
+
+ GenTree* src = tree->Data();
+ if (src->OperIs(GT_IND))
+ {
+ assert(src->TypeIs(TYP_STRUCT));
+ src->SetOper(GT_BLK);
+ src->AsBlk()->Initialize(layout);
+ }
+
+ GenTree* asg = gtNewAssignNode(dst, src);
asg->gtFlags |= (tree->gtFlags & (GTF_ALL_EFFECT | GTF_BLK_VOLATILE | GTF_BLK_UNALIGNED));
INDEBUG(asg->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
- JITDUMP("MorphStoreDynBlock: transformed STORE_DYN_BLK into ASG(BLK, Data())\n");
+ fgAssignSetVarDef(asg);
- GenTree* lclVarTree = fgIsIndirOfAddrOfLocal(lhs);
- if (lclVarTree != nullptr)
- {
- lclVarTree->gtFlags |= GTF_VAR_DEF;
- }
+ JITDUMP("MorphStoreDynBlock: transformed STORE_DYN_BLK into ASG(BLK, Data())\n");
return tree->OperIsCopyBlkOp() ? fgMorphCopyBlock(asg) : fgMorphInitBlock(asg);
}