assertionprop.cpp
bitset.cpp
block.cpp
+ layout.cpp
codegencommon.cpp
codegenlinear.cpp
compiler.cpp
alloc.h
arraystack.h
bitset.h
+ layout.h
bitsetasshortlong.h
bitsetasuint64.h
bitsetasuint64inclass.h
// This GenTree node has data about GC pointers, this means we're dealing
// with CpObj.
- assert(cpObjNode->gtGcPtrCount > 0);
+ assert(cpObjNode->GetLayout()->HasGCPtr());
#endif // DEBUG
// Consume the operands and get them into the right registers.
instGen_MemoryBarrier();
}
- unsigned slots = cpObjNode->gtSlots;
- emitter* emit = getEmitter();
-
- BYTE* gcPtrs = cpObjNode->gtGcPtrs;
+ emitter* emit = getEmitter();
+ ClassLayout* layout = cpObjNode->GetLayout();
+ unsigned slots = layout->GetSlotCount();
// If we can prove it's on the stack we don't need to use the write barrier.
- emitAttr attr = EA_PTRSIZE;
if (dstOnStack)
{
for (unsigned i = 0; i < slots; ++i)
{
- if (gcPtrs[i] == GCT_GCREF)
- {
- attr = EA_GCREF;
- }
- else if (gcPtrs[i] == GCT_BYREF)
- {
- attr = EA_BYREF;
- }
- else
- {
- attr = EA_PTRSIZE;
- }
+ emitAttr attr = emitTypeSize(layout->GetGCPtrType(i));
emit->emitIns_R_R_I(INS_ldr, attr, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE,
INS_FLAGS_DONT_CARE, INS_OPTS_LDST_POST_INC);
}
else
{
- unsigned gcPtrCount = cpObjNode->gtGcPtrCount;
+ unsigned gcPtrCount = layout->GetGCPtrCount();
unsigned i = 0;
while (i < slots)
{
- switch (gcPtrs[i])
+ if (!layout->IsGCPtr(i))
+ {
+ emit->emitIns_R_R_I(INS_ldr, EA_PTRSIZE, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE,
+ INS_FLAGS_DONT_CARE, INS_OPTS_LDST_POST_INC);
+ emit->emitIns_R_R_I(INS_str, EA_PTRSIZE, tmpReg, REG_WRITE_BARRIER_DST_BYREF, TARGET_POINTER_SIZE,
+ INS_FLAGS_DONT_CARE, INS_OPTS_LDST_POST_INC);
+ }
+ else
{
- case TYPE_GC_NONE:
- emit->emitIns_R_R_I(INS_ldr, attr, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE,
- INS_FLAGS_DONT_CARE, INS_OPTS_LDST_POST_INC);
- emit->emitIns_R_R_I(INS_str, attr, tmpReg, REG_WRITE_BARRIER_DST_BYREF, TARGET_POINTER_SIZE,
- INS_FLAGS_DONT_CARE, INS_OPTS_LDST_POST_INC);
- break;
-
- default:
- // In the case of a GC-Pointer we'll call the ByRef write barrier helper
- genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE);
-
- gcPtrCount--;
- break;
+ genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE);
+ gcPtrCount--;
}
++i;
}
// This GenTree node has data about GC pointers, this means we're dealing
// with CpObj.
- assert(cpObjNode->gtGcPtrCount > 0);
+ assert(cpObjNode->GetLayout()->HasGCPtr());
#endif // DEBUG
// Consume the operands and get them into the right registers.
gcInfo.gcMarkRegPtrVal(REG_WRITE_BARRIER_SRC_BYREF, srcAddrType);
gcInfo.gcMarkRegPtrVal(REG_WRITE_BARRIER_DST_BYREF, dstAddr->TypeGet());
- unsigned slots = cpObjNode->gtSlots;
+ ClassLayout* layout = cpObjNode->GetLayout();
+ unsigned slots = layout->GetSlotCount();
// Temp register(s) used to perform the sequence of loads and stores.
regNumber tmpReg = cpObjNode->ExtractTempReg();
emitter* emit = getEmitter();
- BYTE* gcPtrs = cpObjNode->gtGcPtrs;
-
// If we can prove it's on the stack we don't need to use the write barrier.
if (dstOnStack)
{
// Check if two or more remaining slots and use a ldp/stp sequence
while (i < slots - 1)
{
- emitAttr attr0 = emitTypeSize(compiler->getJitGCType(gcPtrs[i + 0]));
- emitAttr attr1 = emitTypeSize(compiler->getJitGCType(gcPtrs[i + 1]));
+ emitAttr attr0 = emitTypeSize(layout->GetGCPtrType(i + 0));
+ emitAttr attr1 = emitTypeSize(layout->GetGCPtrType(i + 1));
emit->emitIns_R_R_R_I(INS_ldp, attr0, tmpReg, tmpReg2, REG_WRITE_BARRIER_SRC_BYREF, 2 * TARGET_POINTER_SIZE,
INS_OPTS_POST_INDEX, attr1);
// Use a ldr/str sequence for the last remainder
if (i < slots)
{
- emitAttr attr0 = emitTypeSize(compiler->getJitGCType(gcPtrs[i + 0]));
+ emitAttr attr0 = emitTypeSize(layout->GetGCPtrType(i + 0));
emit->emitIns_R_R_I(INS_ldr, attr0, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE,
INS_OPTS_POST_INDEX);
}
else
{
- unsigned gcPtrCount = cpObjNode->gtGcPtrCount;
+ unsigned gcPtrCount = cpObjNode->GetLayout()->GetGCPtrCount();
unsigned i = 0;
while (i < slots)
{
- switch (gcPtrs[i])
+ if (!layout->IsGCPtr(i))
{
- case TYPE_GC_NONE:
- // Check if the next slot's type is also TYP_GC_NONE and use ldp/stp
- if ((i + 1 < slots) && (gcPtrs[i + 1] == TYPE_GC_NONE))
- {
- emit->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, tmpReg, tmpReg2, REG_WRITE_BARRIER_SRC_BYREF,
- 2 * TARGET_POINTER_SIZE, INS_OPTS_POST_INDEX);
- emit->emitIns_R_R_R_I(INS_stp, EA_8BYTE, tmpReg, tmpReg2, REG_WRITE_BARRIER_DST_BYREF,
- 2 * TARGET_POINTER_SIZE, INS_OPTS_POST_INDEX);
- ++i; // extra increment of i, since we are copying two items
- }
- else
- {
- emit->emitIns_R_R_I(INS_ldr, EA_8BYTE, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE,
- INS_OPTS_POST_INDEX);
- emit->emitIns_R_R_I(INS_str, EA_8BYTE, tmpReg, REG_WRITE_BARRIER_DST_BYREF, TARGET_POINTER_SIZE,
- INS_OPTS_POST_INDEX);
- }
- break;
-
- default:
- // In the case of a GC-Pointer we'll call the ByRef write barrier helper
- genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE);
-
- gcPtrCount--;
- break;
+ // Check if the next slot's type is also TYP_GC_NONE and use ldp/stp
+ if ((i + 1 < slots) && !layout->IsGCPtr(i + 1))
+ {
+ emit->emitIns_R_R_R_I(INS_ldp, EA_8BYTE, tmpReg, tmpReg2, REG_WRITE_BARRIER_SRC_BYREF,
+ 2 * TARGET_POINTER_SIZE, INS_OPTS_POST_INDEX);
+ emit->emitIns_R_R_R_I(INS_stp, EA_8BYTE, tmpReg, tmpReg2, REG_WRITE_BARRIER_DST_BYREF,
+ 2 * TARGET_POINTER_SIZE, INS_OPTS_POST_INDEX);
+ ++i; // extra increment of i, since we are copying two items
+ }
+ else
+ {
+ emit->emitIns_R_R_I(INS_ldr, EA_8BYTE, tmpReg, REG_WRITE_BARRIER_SRC_BYREF, TARGET_POINTER_SIZE,
+ INS_OPTS_POST_INDEX);
+ emit->emitIns_R_R_I(INS_str, EA_8BYTE, tmpReg, REG_WRITE_BARRIER_DST_BYREF, TARGET_POINTER_SIZE,
+ INS_OPTS_POST_INDEX);
+ }
+ }
+ else
+ {
+ // In the case of a GC-Pointer we'll call the ByRef write barrier helper
+ genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE);
+ gcPtrCount--;
}
++i;
}
// the xor ensures that only one of the two is setup, not both
assert((varNode != nullptr) ^ (addrNode != nullptr));
- BYTE gcPtrArray[MAX_ARG_REG_COUNT] = {}; // TYPE_GC_NONE = 0
- BYTE* gcPtrs = gcPtrArray;
+ ClassLayout* layout;
+ unsigned structSize;
+ bool isHfa;
- unsigned gcPtrCount; // The count of GC pointers in the struct
- unsigned structSize;
- bool isHfa;
-
-#ifdef _TARGET_ARM_
- // On ARM32, size of reference map can be larger than MAX_ARG_REG_COUNT
- gcPtrs = treeNode->gtGcPtrs;
- gcPtrCount = treeNode->gtNumberReferenceSlots;
-#endif
// Setup the structSize, isHFa, and gcPtrCount
if (source->OperGet() == GT_LCL_VAR)
{
structSize = varDsc->lvSize(); // This yields the roundUp size, but that is fine
// as that is how much stack is allocated for this LclVar
- isHfa = varDsc->lvIsHfa();
-#ifdef _TARGET_ARM64_
- gcPtrCount = varDsc->lvStructGcCount;
- for (unsigned i = 0; i < gcPtrCount; ++i)
- gcPtrs[i] = varDsc->lvGcLayout[i];
-#endif // _TARGET_ARM_
+ isHfa = varDsc->lvIsHfa();
+ layout = varDsc->GetLayout();
}
else // we must have a GT_OBJ
{
// it provides (size and GC layout) even if the node wraps a lclvar. Due
// to struct reinterpretation (e.g. Unsafe.As<X, Y>) it is possible that
// the OBJ node has a different type than the lclvar.
- CORINFO_CLASS_HANDLE objClass = source->gtObj.gtClass;
+ layout = source->AsObj()->GetLayout();
- structSize = compiler->info.compCompHnd->getClassSize(objClass);
+ structSize = layout->GetSize();
// The codegen code below doesn't have proper support for struct sizes
// that are not multiple of the slot size. Call arg morphing handles this
assert((structSize % TARGET_POINTER_SIZE) == 0);
}
- isHfa = compiler->IsHfa(objClass);
-
-#ifdef _TARGET_ARM64_
- // On ARM32, Lowering places the correct GC layout information in the
- // GenTreePutArgStk node and the code above already use that. On ARM64,
- // this information is not available (in order to keep GenTreePutArgStk
- // nodes small) and we need to retrieve it from the VM here.
- gcPtrCount = compiler->info.compCompHnd->getClassGClayout(objClass, &gcPtrs[0]);
-#endif
+ isHfa = compiler->IsHfa(layout->GetClassHandle());
}
// If we have an HFA we can't have any GC pointers,
// if not then the max size for the the struct is 16 bytes
if (isHfa)
{
- noway_assert(gcPtrCount == 0);
+ noway_assert(!layout->HasGCPtr());
}
#ifdef _TARGET_ARM64_
else
while (remainingSize >= 2 * TARGET_POINTER_SIZE)
{
- var_types type0 = compiler->getJitGCType(gcPtrs[nextIndex + 0]);
- var_types type1 = compiler->getJitGCType(gcPtrs[nextIndex + 1]);
+ var_types type0 = layout->GetGCPtrType(nextIndex + 0);
+ var_types type1 = layout->GetGCPtrType(nextIndex + 1);
if (varNode != nullptr)
{
// str r2, [sp, #16]
while (remainingSize >= TARGET_POINTER_SIZE)
{
- var_types type = compiler->getJitGCType(gcPtrs[nextIndex]);
+ var_types type = layout->GetGCPtrType(nextIndex);
if (varNode != nullptr)
{
{
if (remainingSize >= TARGET_POINTER_SIZE)
{
- var_types nextType = compiler->getJitGCType(gcPtrs[nextIndex]);
+ var_types nextType = layout->GetGCPtrType(nextIndex);
emitAttr nextAttr = emitTypeSize(nextType);
remainingSize -= TARGET_POINTER_SIZE;
assert(varNode == nullptr);
// the left over size is smaller than a pointer and thus can never be a GC type
- assert(varTypeIsGC(compiler->getJitGCType(gcPtrs[nextIndex])) == false);
+ assert(!layout->IsGCPtr(nextIndex));
var_types loadType = TYP_UINT;
if (loadSize == 1)
// the xor ensures that only one of the two is setup, not both
assert((varNode != nullptr) ^ (addrNode != nullptr));
- // Setup the structSize, isHFa, and gcPtrCount
- BYTE* gcPtrs = treeNode->gtGcPtrs;
- unsigned gcPtrCount = treeNode->gtNumberReferenceSlots; // The count of GC pointers in the struct
- int structSize = treeNode->getArgSize();
-
// This is the varNum for our load operations,
// only used when we have a struct with a LclVar source
unsigned srcVarNum = BAD_VAR_NUM;
assert(baseReg != addrReg);
// We don't split HFA struct
- assert(!compiler->IsHfa(source->gtObj.gtClass));
+ assert(!compiler->IsHfa(source->AsObj()->GetLayout()->GetClassHandle()));
}
+ int structSize = treeNode->getArgSize();
+ ClassLayout* layout = source->AsObj()->GetLayout();
+
// Put on stack first
unsigned nextIndex = treeNode->gtNumRegs;
unsigned structOffset = nextIndex * TARGET_POINTER_SIZE;
assert(remainingSize % TARGET_POINTER_SIZE == 0);
while (remainingSize > 0)
{
- var_types type = compiler->getJitGCType(gcPtrs[nextIndex]);
+ var_types type = layout->GetGCPtrType(nextIndex);
if (varNode != nullptr)
{
// Must be <= 16 bytes or else it wouldn't be passed in registers, except for HFA,
// which can be bigger (and is handled above).
noway_assert(EA_SIZE_IN_BYTES(varDsc->lvSize()) <= 16);
- loadType = compiler->getJitGCType(varDsc->lvGcLayout[0]);
+ loadType = varDsc->GetLayout()->GetGCPtrType(0);
}
else
{
// Restore the second register.
argRegNext = genRegArgNext(argReg);
- loadType = compiler->getJitGCType(varDsc->lvGcLayout[1]);
+ loadType = varDsc->GetLayout()->GetGCPtrType(1);
loadSize = emitActualTypeSize(loadType);
getEmitter()->emitIns_R_S(ins_Load(loadType), loadSize, argRegNext, varNum, TARGET_POINTER_SIZE);
for (unsigned ofs = 0; ofs < maxSize; ofs += REGSIZE_BYTES)
{
unsigned idx = ofs / REGSIZE_BYTES;
- loadType = compiler->getJitGCType(varDsc->lvGcLayout[idx]);
+ loadType = varDsc->GetLayout()->GetGCPtrType(idx);
if (varDsc->lvRegNum != argReg)
{
if (blkOp->OperIs(GT_STORE_OBJ) && blkOp->OperIsCopyBlkOp())
{
- assert(blkOp->AsObj()->gtGcPtrCount != 0);
+ assert(blkOp->AsObj()->GetLayout()->HasGCPtr());
genCodeForCpObj(blkOp->AsObj());
return;
}
if (op->gtOper == GT_OBJ)
{
- CORINFO_CLASS_HANDLE clsHnd = op->AsObj()->gtClass;
- opSize = compiler->info.compCompHnd->getClassSize(clsHnd);
+ CORINFO_CLASS_HANDLE clsHnd = op->AsObj()->GetLayout()->GetClassHandle();
+ opSize = op->AsObj()->GetLayout()->GetSize();
alignment = roundUp(compiler->info.compCompHnd->getClassAlignmentRequirement(clsHnd), TARGET_POINTER_SIZE);
}
else if (op->gtOper == GT_LCL_VAR)
continue;
}
- if (compiler->info.compInitMem || varTypeIsGC(varDsc->TypeGet()) || (varDsc->lvStructGcCount > 0) ||
- varDsc->lvMustInit)
+ if (compiler->info.compInitMem || varDsc->HasGCPtr() || varDsc->lvMustInit)
{
if (varDsc->lvTracked)
{
CLANG_FORMAT_COMMENT_ANCHOR;
if ((!varDsc->lvTracked || (varDsc->lvType == TYP_STRUCT)) && varDsc->lvOnFrame &&
- (!varDsc->lvIsTemp || varTypeIsGC(varDsc->TypeGet()) || (varDsc->lvStructGcCount > 0)))
+ (!varDsc->lvIsTemp || varDsc->HasGCPtr()))
{
varDsc->lvMustInit = true;
/* Ignore if not a pointer variable or value class with a GC field */
- if (!compiler->lvaTypeIsGC(varNum))
+ if (!varDsc->HasGCPtr())
{
continue;
}
/* Is this a 'must-init' stack pointer local? */
- if (varDsc->lvMustInit && varDsc->lvOnFrame)
+ if (varDsc->lvMustInit && varDsc->lvOnFrame && !counted)
{
- if (!counted)
+ if (varDsc->TypeGet() == TYP_STRUCT)
{
- initStkLclCnt += varDsc->lvStructGcCount;
- counted = true;
+ initStkLclCnt += varDsc->GetLayout()->GetGCPtrCount();
}
+ else
+ {
+ assert(varTypeIsGC(varDsc->TypeGet()));
+ initStkLclCnt += 1;
+ }
+ counted = true;
}
if ((compiler->lvaLclSize(varNum) > (3 * TARGET_POINTER_SIZE)) && (largeGcStructs <= 4))
{
// We only initialize the GC variables in the TYP_STRUCT
const unsigned slots = (unsigned)compiler->lvaLclSize(varNum) / REGSIZE_BYTES;
- const BYTE* gcPtrs = compiler->lvaGetGcLayout(varNum);
+ ClassLayout* layout = varDsc->GetLayout();
for (unsigned i = 0; i < slots; i++)
{
- if (gcPtrs[i] != TYPE_GC_NONE)
+ if (layout->IsGCPtr(i))
{
getEmitter()->emitIns_S_R(ins_Store(TYP_I_IMPL), EA_PTRSIZE,
genGetZeroReg(initReg, pInitRegZeroed), varNum, i * REGSIZE_BYTES);
/* We need to know the offset range of tracked stack GC refs */
/* We assume that the GC reference can be anywhere in the TYP_STRUCT */
- if (compiler->lvaTypeIsGC(varNum) && varDsc->lvTrackedNonStruct() && varDsc->lvOnFrame)
+ if (varDsc->HasGCPtr() && varDsc->lvTrackedNonStruct() && varDsc->lvOnFrame)
{
// For fields of PROMOTION_TYPE_DEPENDENT type of promotion, they should have been
// taken care of by the parent struct.
unsigned flags = splitArg->GetRegSpillFlagByIdx(i);
if ((flags & GTF_SPILLED) != 0)
{
- BYTE* gcPtrs = splitArg->gtGcPtrs;
var_types dstType = splitArg->GetRegType(i);
regNumber dstReg = splitArg->GetRegNumByIdx(i);
if (storeBlkNode->OperIs(GT_STORE_OBJ) && storeBlkNode->OperIsCopyBlkOp() && !storeBlkNode->gtBlkOpGcUnsafe)
{
- assert(storeBlkNode->AsObj()->gtGcPtrCount != 0);
+ assert(storeBlkNode->AsObj()->GetLayout()->HasGCPtr());
genCodeForCpObj(storeBlkNode->AsObj());
return;
}
bool dstOnStack = dstAddr->gtSkipReloadOrCopy()->OperIsLocalAddr();
#ifdef DEBUG
-
- // A CpObj always involves one or more GC pointers.
- assert(cpObjNode->gtGcPtrCount > 0);
+ // If the GenTree node has data about GC pointers, this means we're dealing
+ // with CpObj, so this requires special logic.
+ assert(cpObjNode->GetLayout()->HasGCPtr());
// MovSp (alias for movsq on x64 and movsd on x86) instruction is used for copying non-gcref fields
// and it needs src = RSI and dst = RDI.
gcInfo.gcMarkRegPtrVal(REG_RSI, srcAddrType);
gcInfo.gcMarkRegPtrVal(REG_RDI, dstAddr->TypeGet());
- unsigned slots = cpObjNode->gtSlots;
+ unsigned slots = cpObjNode->GetLayout()->GetSlotCount();
// If we can prove it's on the stack we don't need to use the write barrier.
if (dstOnStack)
}
else
{
- BYTE* gcPtrs = cpObjNode->gtGcPtrs;
- unsigned gcPtrCount = cpObjNode->gtGcPtrCount;
+ ClassLayout* layout = cpObjNode->GetLayout();
+ unsigned gcPtrCount = layout->GetGCPtrCount();
unsigned i = 0;
while (i < slots)
{
- switch (gcPtrs[i])
+ if (!layout->IsGCPtr(i))
{
- case TYPE_GC_NONE:
- // Let's see if we can use rep movsp instead of a sequence of movsp instructions
- // to save cycles and code size.
+ // Let's see if we can use rep movsp instead of a sequence of movsp instructions
+ // to save cycles and code size.
+ unsigned nonGcSlotCount = 0;
+
+ do
+ {
+ nonGcSlotCount++;
+ i++;
+ } while ((i < slots) && !layout->IsGCPtr(i));
+
+ // If we have a very small contiguous non-gc region, it's better just to
+ // emit a sequence of movsp instructions
+ if (nonGcSlotCount < CPOBJ_NONGC_SLOTS_LIMIT)
+ {
+ while (nonGcSlotCount > 0)
{
- unsigned nonGcSlotCount = 0;
-
- do
- {
- nonGcSlotCount++;
- i++;
- } while (i < slots && gcPtrs[i] == TYPE_GC_NONE);
-
- // If we have a very small contiguous non-gc region, it's better just to
- // emit a sequence of movsp instructions
- if (nonGcSlotCount < CPOBJ_NONGC_SLOTS_LIMIT)
- {
- while (nonGcSlotCount > 0)
- {
- instGen(INS_movsp);
- nonGcSlotCount--;
- }
- }
- else
- {
- // Otherwise, we can save code-size and improve CQ by emitting
- // rep movsp (alias for movsd/movsq for x86/x64)
- assert((cpObjNode->gtRsvdRegs & RBM_RCX) != 0);
-
- getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, nonGcSlotCount);
- instGen(INS_r_movsp);
- }
+ instGen(INS_movsp);
+ nonGcSlotCount--;
}
- break;
- default:
- // We have a GC pointer, call the memory barrier.
- genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE);
- gcPtrCount--;
- i++;
+ }
+ else
+ {
+ // Otherwise, we can save code-size and improve CQ by emitting
+ // rep movsp (alias for movsd/movsq for x86/x64)
+ assert((cpObjNode->gtRsvdRegs & RBM_RCX) != 0);
+
+ getEmitter()->emitIns_R_I(INS_mov, EA_4BYTE, REG_RCX, nonGcSlotCount);
+ instGen(INS_r_movsp);
+ }
+ }
+ else
+ {
+ genEmitHelperCall(CORINFO_HELP_ASSIGN_BYREF, 0, EA_PTRSIZE);
+ gcPtrCount--;
+ i++;
}
}
if (source->TypeGet() == TYP_STRUCT)
{
GenTreeObj* obj = source->AsObj();
- unsigned argBytes = roundUp(obj->gtBlkSize, TARGET_POINTER_SIZE);
+ unsigned argBytes = roundUp(obj->GetLayout()->GetSize(), TARGET_POINTER_SIZE);
#ifdef _TARGET_X86_
// If we have an OBJ, we must have created a copy if the original arg was not a
// local and was not a multiple of TARGET_POINTER_SIZE.
// Note that on x64/ux this will be handled by unrolling in genStructPutArgUnroll.
- assert((argBytes == obj->gtBlkSize) || obj->Addr()->IsLocalAddrExpr());
+ assert((argBytes == obj->GetLayout()->GetSize()) || obj->Addr()->IsLocalAddrExpr());
#endif // _TARGET_X86_
assert((curArgTabEntry->numSlots * TARGET_POINTER_SIZE) == argBytes);
}
{
case GenTreePutArgStk::Kind::RepInstr:
case GenTreePutArgStk::Kind::Unroll:
- assert((putArgStk->gtNumberReferenceSlots == 0) && (source->OperGet() != GT_FIELD_LIST) && (argSize >= 16));
+ assert(!source->AsObj()->GetLayout()->HasGCPtr() && (argSize >= 16));
break;
case GenTreePutArgStk::Kind::Push:
case GenTreePutArgStk::Kind::PushAllSlots:
- assert((putArgStk->gtNumberReferenceSlots != 0) || (source->OperGet() == GT_FIELD_LIST) || (argSize < 16));
+ assert(source->OperIs(GT_FIELD_LIST) || source->AsObj()->GetLayout()->HasGCPtr() || (argSize < 16));
break;
case GenTreePutArgStk::Kind::Invalid:
default:
assert(targetType == TYP_STRUCT);
- if (putArgStk->gtNumberReferenceSlots == 0)
+ ClassLayout* layout = source->AsObj()->GetLayout();
+
+ if (!layout->HasGCPtr())
{
switch (putArgStk->gtPutArgStkKind)
{
assert(m_pushStkArg);
GenTree* srcAddr = source->gtGetOp1();
- BYTE* gcPtrs = putArgStk->gtGcPtrs;
const unsigned numSlots = putArgStk->gtNumSlots;
regNumber srcRegNum = srcAddr->gtRegNum;
for (int i = numSlots - 1; i >= 0; --i)
{
- emitAttr slotAttr;
- if (gcPtrs[i] == TYPE_GC_NONE)
- {
- slotAttr = EA_4BYTE;
- }
- else if (gcPtrs[i] == TYPE_GC_REF)
- {
- slotAttr = EA_GCREF;
- }
- else
- {
- assert(gcPtrs[i] == TYPE_GC_BYREF);
- slotAttr = EA_BYREF;
- }
-
- const unsigned offset = i * TARGET_POINTER_SIZE;
+ emitAttr slotAttr = emitTypeSize(layout->GetGCPtrType(i));
+ const unsigned offset = i * TARGET_POINTER_SIZE;
if (srcAddrInReg)
{
getEmitter()->emitIns_AR_R(INS_push, slotAttr, REG_NA, srcRegNum, offset);
unsigned numGCSlotsCopied = 0;
#endif // DEBUG
- BYTE* gcPtrs = putArgStk->gtGcPtrs;
const unsigned numSlots = putArgStk->gtNumSlots;
for (unsigned i = 0; i < numSlots;)
{
- if (gcPtrs[i] == TYPE_GC_NONE)
+ if (!layout->IsGCPtr(i))
{
// Let's see if we can use rep movsp (alias for movsd or movsq for 32 and 64 bits respectively)
// instead of a sequence of movsp instructions to save cycles and code size.
{
adjacentNonGCSlotCount++;
i++;
- } while ((i < numSlots) && (gcPtrs[i] == TYPE_GC_NONE));
+ } while ((i < numSlots) && !layout->IsGCPtr(i));
// If we have a very small contiguous non-ref region, it's better just to
// emit a sequence of movsp instructions
}
else
{
- assert((gcPtrs[i] == TYPE_GC_REF) || (gcPtrs[i] == TYPE_GC_BYREF));
-
// We have a GC (byref or ref) pointer
// TODO-Amd64-Unix: Here a better solution (for code size and CQ) would be to use movsp instruction,
// but the logic for emitting a GC info record is not available (it is internal for the emitter
// only.) See emitGCVarLiveUpd function. If we could call it separately, we could do
// instGen(INS_movsp); and emission of gc info.
- var_types memType = (gcPtrs[i] == TYPE_GC_REF) ? TYP_REF : TYP_BYREF;
+ var_types memType = layout->GetGCPtrType(i);
getEmitter()->emitIns_R_AR(ins_Load(memType), emitTypeSize(memType), REG_RCX, REG_RSI, 0);
genStoreRegToStackArg(memType, REG_RCX, i * TARGET_POINTER_SIZE);
#ifdef DEBUG
}
}
- assert(numGCSlotsCopied == putArgStk->gtNumberReferenceSlots);
+ assert(numGCSlotsCopied == layout->GetGCPtrCount());
#endif // _TARGET_X86_
}
}
return result;
}
-#if FEATURE_MULTIREG_ARGS
-//---------------------------------------------------------------------------
-// getStructGcPtrsFromOp: Given a GenTree node of TYP_STRUCT that represents
-// a pass by value argument, return the gcPtr layout
-// for the pointers sized fields
-// Arguments:
-// op - the operand of TYP_STRUCT that is passed by value
-// gcPtrsOut - an array of BYTES that are written by this method
-// they will contain the VM's CorInfoGCType values
-// for each pointer sized field
-// Return Value:
-// Two [or more] values are written into the gcPtrs array
-//
-// Note that for ARM64 there will always be exactly two pointer sized fields
-
-void Compiler::getStructGcPtrsFromOp(GenTree* op, BYTE* gcPtrsOut)
-{
- assert(op->TypeGet() == TYP_STRUCT);
-
-#ifdef _TARGET_ARM64_
- if (op->OperGet() == GT_OBJ)
- {
- CORINFO_CLASS_HANDLE objClass = op->gtObj.gtClass;
-
- int structSize = info.compCompHnd->getClassSize(objClass);
- assert(structSize <= 2 * TARGET_POINTER_SIZE);
-
- BYTE gcPtrsTmp[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
-
- info.compCompHnd->getClassGClayout(objClass, &gcPtrsTmp[0]);
-
- gcPtrsOut[0] = gcPtrsTmp[0];
- gcPtrsOut[1] = gcPtrsTmp[1];
- }
- else if (op->OperGet() == GT_LCL_VAR)
- {
- GenTreeLclVarCommon* varNode = op->AsLclVarCommon();
- unsigned varNum = varNode->gtLclNum;
- assert(varNum < lvaCount);
- LclVarDsc* varDsc = &lvaTable[varNum];
-
- // At this point any TYP_STRUCT LclVar must be a 16-byte pass by value argument
- assert(varDsc->lvSize() == 2 * TARGET_POINTER_SIZE);
-
- gcPtrsOut[0] = varDsc->lvGcLayout[0];
- gcPtrsOut[1] = varDsc->lvGcLayout[1];
- }
- else
-#endif
- {
- noway_assert(!"Unsupported Oper for getStructGcPtrsFromOp");
- }
-}
-#endif // FEATURE_MULTIREG_ARGS
-
#ifdef ARM_SOFTFP
//---------------------------------------------------------------------------
// IsSingleFloat32Struct:
// We start with the flow graph in tree-order
fgOrder = FGOrderTree;
+ m_classLayoutTable = nullptr;
+
#ifdef FEATURE_SIMD
m_simdHandleCache = nullptr;
#endif // FEATURE_SIMD
// This fixes the PPP backward compat issue
varDscExposedStruct->lvExactSize += 32;
- // Update the GC info to indicate that the padding area does
- // not contain any GC pointers.
- //
// The struct is now 64 bytes.
- //
// We're on x64 so this should be 8 pointer slots.
assert((varDscExposedStruct->lvExactSize / TARGET_POINTER_SIZE) == 8);
- BYTE* oldGCPtrs = varDscExposedStruct->lvGcLayout;
- BYTE* newGCPtrs = getAllocator(CMK_LvaTable).allocate<BYTE>(8);
-
- for (int i = 0; i < 4; i++)
- {
- newGCPtrs[i] = oldGCPtrs[i];
- }
-
- for (int i = 4; i < 8; i++)
- {
- newGCPtrs[i] = TYPE_GC_NONE;
- }
-
- varDscExposedStruct->lvGcLayout = newGCPtrs;
+ varDscExposedStruct->SetLayout(
+ varDscExposedStruct->GetLayout()->GetPPPQuirkLayout(getAllocator(CMK_ClassLayout)));
return true;
}
case GT_OBJ:
case GT_STORE_OBJ:
- if (tree->AsObj()->HasGCPtr())
+ if (tree->AsObj()->GetLayout()->HasGCPtr())
{
chars += printf("[BLK_HASGCPTR]");
}
unsigned char lvIsRegArg : 1; // is this an argument that was passed by register?
unsigned char lvFramePointerBased : 1; // 0 = off of REG_SPBASE (e.g., ESP), 1 = off of REG_FPBASE (e.g., EBP)
- unsigned char lvStructGcCount : 3; // if struct, how many GC pointer (stop counting at 7). The only use of values >1
- // is to help determine whether to use block init in the prolog.
- unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame
- unsigned char lvRegister : 1; // assigned to live in a register? For RyuJIT backend, this is only set if the
- // variable is in the same register for the entire function.
- unsigned char lvTracked : 1; // is this a tracked variable?
+ unsigned char lvOnFrame : 1; // (part of) the variable lives on the frame
+ unsigned char lvRegister : 1; // assigned to live in a register? For RyuJIT backend, this is only set if the
+ // variable is in the same register for the entire function.
+ unsigned char lvTracked : 1; // is this a tracked variable?
bool lvTrackedNonStruct()
{
return lvTracked && lvType != TYP_STRUCT;
CORINFO_FIELD_HANDLE lvFieldHnd; // field handle for promoted struct fields
- BYTE* lvGcLayout; // GC layout info for structs
+private:
+ ClassLayout* m_layout; // layout info for structs
+public:
#if ASSERTION_PROP
BlockSet lvRefBlks; // Set of blocks that contain refs
GenTreeStmt* lvDefStmt; // Pointer to the statement with the single definition
var_types lvaArgType();
+ // Returns true if this variable contains GC pointers (including being a GC pointer itself).
+ bool HasGCPtr()
+ {
+ return varTypeIsGC(lvType) || ((lvType == TYP_STRUCT) && m_layout->HasGCPtr());
+ }
+
+ // Returns the layout of a struct variable.
+ ClassLayout* GetLayout()
+ {
+ assert(varTypeIsStruct(lvType));
+ return m_layout;
+ }
+
+ // Sets the layout of a struct variable.
+ void SetLayout(ClassLayout* layout)
+ {
+ assert(varTypeIsStruct(lvType));
+ m_layout = layout;
+ }
+
SsaDefArray<LclSsaVarDsc> lvPerSsaData;
// Returns the address of the per-Ssa data for the given ssaNum (which is required
}
#endif // defined(FEATURE_SIMD)
- BYTE* lvaGetGcLayout(unsigned varNum);
- bool lvaTypeIsGC(unsigned varNum);
unsigned lvaGSSecurityCookie; // LclVar number
bool lvaTempsHaveLargerOffsetThanVars();
GenTree* impGetStructAddr(GenTree* structVal, CORINFO_CLASS_HANDLE structHnd, unsigned curLevel, bool willDeref);
- var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd,
- BYTE* gcLayout = nullptr,
- unsigned* numGCVars = nullptr,
- var_types* simdBaseType = nullptr);
+ var_types impNormStructType(CORINFO_CLASS_HANDLE structHnd, var_types* simdBaseType = nullptr);
GenTree* impNormStructVal(GenTree* structVal,
CORINFO_CLASS_HANDLE structHnd,
#endif // FEATURE_MULTIREG_RET
}
-#if FEATURE_MULTIREG_ARGS
- // Given a GenTree node of TYP_STRUCT that represents a pass by value argument
- // return the gcPtr layout for the pointers sized fields
- void getStructGcPtrsFromOp(GenTree* op, BYTE* gcPtrsOut);
-#endif // FEATURE_MULTIREG_ARGS
-
// Returns true if the method being compiled returns a value
bool compMethodHasRetVal()
{
#endif // DEBUG
+private:
+ class ClassLayoutTable* m_classLayoutTable;
+
+ class ClassLayoutTable* typCreateClassLayoutTable();
+ class ClassLayoutTable* typGetClassLayoutTable();
+
+public:
+ // Get the layout having the specified layout number.
+ ClassLayout* typGetLayoutByNum(unsigned layoutNum);
+ // Get the layout number of the specified layout.
+ unsigned typGetLayoutNum(ClassLayout* layout);
+ // Get the layout having the specified size but no class handle.
+ ClassLayout* typGetBlkLayout(unsigned blockSize);
+ // Get the number of a layout having the specified size but no class handle.
+ unsigned typGetBlkLayoutNum(unsigned blockSize);
+ // Get the layout for the specified class handle.
+ ClassLayout* typGetObjLayout(CORINFO_CLASS_HANDLE classHandle);
+ // Get the number of a layout for the specified class handle.
+ unsigned typGetObjLayoutNum(CORINFO_CLASS_HANDLE classHandle);
+
//-------------------------- Global Compiler Data ------------------------------------
#ifdef DEBUG
}
/*****************************************************************************
- * Returns true if the lvType is a TYP_REF or a TYP_BYREF.
- * When the lvType is a TYP_STRUCT it searches the GC layout
- * of the struct and returns true iff it contains a GC ref.
- */
-
-inline bool Compiler::lvaTypeIsGC(unsigned varNum)
-{
- if (lvaTable[varNum].TypeGet() == TYP_STRUCT)
- {
- assert(lvaTable[varNum].lvGcLayout != nullptr); // bits are intialized
- return (lvaTable[varNum].lvStructGcCount != 0);
- }
- return (varTypeIsGC(lvaTable[varNum].TypeGet()));
-}
-
-/*****************************************************************************
Is this a synchronized instance method? If so, we will need to report "this"
in the GC information, so that the EE can release the object lock
in case of an exception
bool Compiler::fgStructTempNeedsExplicitZeroInit(LclVarDsc* varDsc, BasicBlock* block)
{
- bool containsGCPtr = (varDsc->lvStructGcCount > 0);
- return (!info.compInitMem || ((block->bbFlags & BBF_BACKWARD_JUMP) != 0) || (!containsGCPtr && varDsc->lvIsTemp));
+ return !info.compInitMem || ((block->bbFlags & BBF_BACKWARD_JUMP) != 0) ||
+ (!varDsc->HasGCPtr() && varDsc->lvIsTemp);
}
/*****************************************************************************/
CompMemKindMacro(SideEffects)
CompMemKindMacro(ObjectAllocator)
CompMemKindMacro(VariableLiveRanges)
+CompMemKindMacro(ClassLayout)
//clang-format on
#undef CompMemKindMacro
totalSize += sz;
}
}
-
- else if (varDsc->lvType == TYP_STRUCT && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE))
+ else if ((varDsc->TypeGet() == TYP_STRUCT) && varDsc->lvOnFrame && varDsc->HasGCPtr())
{
- // A struct will have gcSlots only if it is at least TARGET_POINTER_SIZE.
- unsigned slots = compiler->lvaLclSize(varNum) / TARGET_POINTER_SIZE;
- BYTE* gcPtrs = compiler->lvaGetGcLayout(varNum);
+ ClassLayout* layout = varDsc->GetLayout();
+ unsigned slots = layout->GetSlotCount();
- // walk each member of the array
for (unsigned i = 0; i < slots; i++)
{
- if (gcPtrs[i] == TYPE_GC_NONE) // skip non-gc slots
- continue;
-
+ if (!layout->IsGCPtr(i))
{
+ continue;
+ }
- unsigned offset = varDsc->lvStkOffs + i * TARGET_POINTER_SIZE;
+ unsigned offset = varDsc->lvStkOffs + i * TARGET_POINTER_SIZE;
#if DOUBLE_ALIGN
- // For genDoubleAlign(), locals are addressed relative to ESP and
- // arguments are addressed relative to EBP.
+ // For genDoubleAlign(), locals are addressed relative to ESP and
+ // arguments are addressed relative to EBP.
- if (compiler->genDoubleAlign() && varDsc->lvIsParam && !varDsc->lvIsRegArg)
- offset += compiler->codeGen->genTotalFrameSize();
+ if (compiler->genDoubleAlign() && varDsc->lvIsParam && !varDsc->lvIsRegArg)
+ {
+ offset += compiler->codeGen->genTotalFrameSize();
+ }
#endif
- if (gcPtrs[i] == TYPE_GC_BYREF)
- offset |= byref_OFFSET_FLAG; // indicate it is a byref GC pointer
+ if (layout->GetGCPtrType(i) == TYP_BYREF)
+ {
+ offset |= byref_OFFSET_FLAG; // indicate it is a byref GC pointer
+ }
- int encodedoffset = lastoffset - offset;
- lastoffset = offset;
+ int encodedoffset = lastoffset - offset;
+ lastoffset = offset;
- if (mask == 0)
- totalSize += encodeSigned(NULL, encodedoffset);
- else
- {
- unsigned sz = encodeSigned(dest, encodedoffset);
- dest += sz;
- totalSize += sz;
- }
+ if (mask == 0)
+ {
+ totalSize += encodeSigned(NULL, encodedoffset);
+ }
+ else
+ {
+ unsigned sz = encodeSigned(dest, encodedoffset);
+ dest += sz;
+ totalSize += sz;
}
}
}
// If this is a TYP_STRUCT, handle its GC pointers.
// Note that the enregisterable struct types cannot have GC pointers in them.
- if ((varDsc->lvType == TYP_STRUCT) && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE))
+ if ((varDsc->TypeGet() == TYP_STRUCT) && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE))
{
- unsigned slots = compiler->lvaLclSize(varNum) / TARGET_POINTER_SIZE;
- BYTE* gcPtrs = compiler->lvaGetGcLayout(varNum);
+ ClassLayout* layout = varDsc->GetLayout();
+ unsigned slots = layout->GetSlotCount();
- // walk each member of the array
for (unsigned i = 0; i < slots; i++)
{
- if (gcPtrs[i] == TYPE_GC_NONE)
- { // skip non-gc slots
+ if (!layout->IsGCPtr(i))
+ {
continue;
}
offset += compiler->codeGen->genTotalFrameSize();
#endif
GcSlotFlags flags = GC_SLOT_UNTRACKED;
- if (gcPtrs[i] == TYPE_GC_BYREF)
+ if (layout->GetGCPtrType(i) == TYP_BYREF)
{
flags = (GcSlotFlags)(flags | GC_SLOT_INTERIOR);
}
untrackedCount++;
}
- else if (varDsc->lvType == TYP_STRUCT && varDsc->lvOnFrame && (varDsc->lvExactSize >= TARGET_POINTER_SIZE))
+ else if ((varDsc->TypeGet() == TYP_STRUCT) && varDsc->lvOnFrame)
{
- // A struct will have gcSlots only if it is at least TARGET_POINTER_SIZE.
- unsigned slots = compiler->lvaLclSize(varNum) / TARGET_POINTER_SIZE;
- BYTE* gcPtrs = compiler->lvaGetGcLayout(varNum);
-
- // walk each member of the array
- for (unsigned i = 0; i < slots; i++)
- {
- if (gcPtrs[i] != TYPE_GC_NONE)
- { // count only gc slots
- untrackedCount++;
- }
- }
+ untrackedCount += varDsc->GetLayout()->GetGCPtrCount();
}
}
GenTree::s_gtNodeSizes[GT_ARR_INDEX] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_ARR_OFFSET] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_RET_EXPR] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_OBJ] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_FIELD] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_CMPXCHG] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_QMARK] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_LEA] = TREE_NODE_SZ_LARGE;
- GenTree::s_gtNodeSizes[GT_STORE_OBJ] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_DYN_BLK] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_STORE_DYN_BLK] = TREE_NODE_SZ_LARGE;
GenTree::s_gtNodeSizes[GT_INTRINSIC] = TREE_NODE_SZ_LARGE;
static_assert_no_msg(sizeof(GenTreeIndir) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeStoreInd) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeAddrMode) <= TREE_NODE_SZ_SMALL);
- static_assert_no_msg(sizeof(GenTreeObj) <= TREE_NODE_SZ_LARGE); // *** large node
+ static_assert_no_msg(sizeof(GenTreeObj) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeBlk) <= TREE_NODE_SZ_SMALL);
+ static_assert_no_msg(sizeof(GenTreeDynBlk) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeRetExpr) <= TREE_NODE_SZ_LARGE); // *** large node
static_assert_no_msg(sizeof(GenTreeILOffset) <= TREE_NODE_SZ_SMALL);
static_assert_no_msg(sizeof(GenTreeStmt) <= TREE_NODE_SZ_LARGE); // *** large node
}
break;
case GT_OBJ:
- if (op1->AsObj()->gtClass != op2->AsObj()->gtClass)
+ if (op1->AsObj()->GetLayout() != op2->AsObj()->GetLayout())
{
return false;
}
case GT_OBJ:
hash =
- genTreeHashAdd(hash, static_cast<unsigned>(reinterpret_cast<uintptr_t>(tree->gtObj.gtClass)));
+ genTreeHashAdd(hash,
+ static_cast<unsigned>(reinterpret_cast<uintptr_t>(tree->AsObj()->GetLayout())));
break;
// For the ones below no extra argument matters for comparison.
case GT_BOX:
case GT_BLK:
case GT_STORE_BLK:
- hash += tree->gtBlk.gtBlkSize;
+ hash += tree->AsBlk()->GetLayout()->GetSize();
break;
case GT_OBJ:
case GT_STORE_OBJ:
- hash ^= PtrToUlong(tree->AsObj()->gtClass);
+ hash ^= PtrToUlong(tree->AsObj()->GetLayout());
break;
case GT_DYN_BLK:
// Return Value:
// Returns a node representing the struct value at the given address.
//
-// Assumptions:
-// Any entry and exit conditions, such as required preconditions of
-// data structures, memory to be freed by caller, etc.
-//
// Notes:
// It will currently return a GT_OBJ node for any struct type, but may
// return a GT_IND or a non-indirection for a scalar type.
-// The node will not yet have its GC info initialized. This is because
-// we may not need this info if this is an r-value.
GenTree* Compiler::gtNewObjNode(CORINFO_CLASS_HANDLE structHnd, GenTree* addr)
{
var_types nodeType = impNormStructType(structHnd);
assert(varTypeIsStruct(nodeType));
- unsigned size = info.compCompHnd->getClassSize(structHnd);
- // It would be convenient to set the GC info at this time, but we don't actually require
- // it unless this is going to be a destination.
if (!varTypeIsStruct(nodeType))
{
if ((addr->gtOper == GT_ADDR) && (addr->gtGetOp1()->TypeGet() == nodeType))
return gtNewOperNode(GT_IND, nodeType, addr);
}
}
- GenTreeBlk* newBlkOrObjNode = new (this, GT_OBJ) GenTreeObj(nodeType, addr, structHnd, size);
+
+ GenTreeObj* objNode = new (this, GT_OBJ) GenTreeObj(nodeType, addr, typGetObjLayout(structHnd));
// An Obj is not a global reference, if it is known to be a local struct.
if ((addr->gtFlags & GTF_GLOB_REF) == 0)
GenTreeLclVarCommon* lclNode = addr->IsLocalAddrExpr();
if (lclNode != nullptr)
{
- newBlkOrObjNode->gtFlags |= GTF_IND_NONFAULTING;
+ objNode->gtFlags |= GTF_IND_NONFAULTING;
if (!lvaIsImplicitByRefLocal(lclNode->gtLclNum))
{
- newBlkOrObjNode->gtFlags &= ~GTF_GLOB_REF;
+ objNode->gtFlags &= ~GTF_GLOB_REF;
}
}
}
- return newBlkOrObjNode;
+ return objNode;
}
//------------------------------------------------------------------------
void Compiler::gtSetObjGcInfo(GenTreeObj* objNode)
{
- CORINFO_CLASS_HANDLE structHnd = objNode->gtClass;
- var_types nodeType = objNode->TypeGet();
- unsigned size = objNode->gtBlkSize;
- unsigned slots = 0;
- unsigned gcPtrCount = 0;
- BYTE* gcPtrs = nullptr;
-
- assert(varTypeIsStruct(nodeType));
- assert(size == info.compCompHnd->getClassSize(structHnd));
- assert(nodeType == impNormStructType(structHnd));
+ assert(varTypeIsStruct(objNode->TypeGet()));
+ assert(objNode->TypeGet() == impNormStructType(objNode->GetLayout()->GetClassHandle()));
- if (nodeType == TYP_STRUCT)
+ if (!objNode->GetLayout()->HasGCPtr())
{
- if (size >= TARGET_POINTER_SIZE)
- {
- // Get the GC fields info
- var_types simdBaseType; // Dummy argument
- slots = roundUp(size, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE;
- gcPtrs = new (this, CMK_ASTNode) BYTE[slots];
- nodeType = impNormStructType(structHnd, gcPtrs, &gcPtrCount, &simdBaseType);
- }
+ objNode->SetOper(objNode->OperIs(GT_OBJ) ? GT_BLK : GT_STORE_BLK);
}
- objNode->SetGCInfo(gcPtrs, gcPtrCount, slots);
- assert(objNode->gtType == nodeType);
}
//------------------------------------------------------------------------
}
}
}
- return new (this, GT_BLK) GenTreeBlk(GT_BLK, blkType, addr, size);
+ return new (this, GT_BLK) GenTreeBlk(GT_BLK, blkType, addr, typGetBlkLayout(size));
}
// Creates a new assignment node for a CpObj.
if (lhs->OperIsBlk())
{
- size = lhs->AsBlk()->gtBlkSize;
+ size = lhs->AsBlk()->GetLayout()->GetSize();
if (lhs->OperGet() == GT_OBJ)
{
gtSetObjGcInfo(lhs->AsObj());
assert(dst->TypeGet() != TYP_STRUCT);
return;
}
-#ifdef DEBUG
- // If the copy involves GC pointers, the caller must have already set
- // the node additional members (gtGcPtrs, gtGcPtrCount, gtSlots) on the dst.
- if ((dst->gtOper == GT_OBJ) && dst->AsBlk()->HasGCPtr())
- {
- GenTreeObj* objNode = dst->AsObj();
- assert(objNode->gtGcPtrs != nullptr);
- assert(!IsUninitialized(objNode->gtGcPtrs));
- assert(!IsUninitialized(objNode->gtGcPtrCount));
- assert(!IsUninitialized(objNode->gtSlots) && objNode->gtSlots > 0);
-
- for (unsigned i = 0; i < objNode->gtGcPtrCount; ++i)
- {
- CorInfoGCType t = (CorInfoGCType)objNode->gtGcPtrs[i];
- switch (t)
- {
- case TYPE_GC_NONE:
- case TYPE_GC_REF:
- case TYPE_GC_BYREF:
- case TYPE_GC_OTHER:
- break;
- default:
- unreached();
- }
- }
- }
-#endif // DEBUG
/* In the case of CpBlk, we want to avoid generating
* nodes where the source and destination are the same
break;
case GT_OBJ:
- copy = new (this, GT_OBJ)
- GenTreeObj(tree->TypeGet(), tree->gtOp.gtOp1, tree->AsObj()->gtClass, tree->gtBlk.gtBlkSize);
- copy->AsObj()->CopyGCInfo(tree->AsObj());
+ copy =
+ new (this, GT_OBJ) GenTreeObj(tree->TypeGet(), tree->AsObj()->Addr(), tree->AsObj()->GetLayout());
copy->gtBlk.gtBlkOpGcUnsafe = tree->gtBlk.gtBlkOpGcUnsafe;
break;
case GT_BLK:
- copy = new (this, GT_BLK) GenTreeBlk(GT_BLK, tree->TypeGet(), tree->gtOp.gtOp1, tree->gtBlk.gtBlkSize);
+ copy = new (this, GT_BLK)
+ GenTreeBlk(GT_BLK, tree->TypeGet(), tree->AsBlk()->Addr(), tree->AsBlk()->GetLayout());
copy->gtBlk.gtBlkOpGcUnsafe = tree->gtBlk.gtBlkOpGcUnsafe;
break;
}
else if (tree->OperIsBlk() && !tree->OperIsDynBlk())
{
- sprintf_s(bufp, sizeof(buf), " %s(%d)", name, tree->AsBlk()->gtBlkSize);
+ sprintf_s(bufp, sizeof(buf), " %s(%d)", name, tree->AsBlk()->GetLayout()->GetSize());
}
else
{
if (dstTyp == TYP_UNDEF)
{
varDsc->lvType = dstTyp = genActualType(valTyp);
- if (varTypeIsGC(dstTyp))
- {
- varDsc->lvStructGcCount = 1;
- }
#if FEATURE_SIMD
- else if (varTypeIsSIMD(dstTyp))
+ if (varTypeIsSIMD(dstTyp))
{
varDsc->lvSIMDType = 1;
}
if (blkNode != nullptr)
{
GenTree* destAddr = blkNode->Addr();
- unsigned width = blkNode->gtBlkSize;
+ unsigned width = blkNode->Size();
// Do we care about whether this assigns the entire variable?
if (pIsEntire != nullptr && blkNode->OperIs(GT_DYN_BLK))
{
structHnd = impGetRefAnyClass();
break;
case GT_OBJ:
- structHnd = tree->gtObj.gtClass;
+ structHnd = tree->AsObj()->GetLayout()->GetClassHandle();
break;
case GT_CALL:
structHnd = tree->gtCall.gtRetClsHnd;
#include "jithashtable.h"
#include "simd.h"
#include "namedintrinsiclist.h"
+#include "layout.h"
// Debugging GenTree is much easier if we add a magic virtual function to make the debugger able to figure out what type
// it's got. This is enabled by default in DEBUG. To enable it in RET builds (temporarily!), you need to change the
struct GenTreeBlk : public GenTreeIndir
{
+private:
+ ClassLayout* m_layout;
+
public:
+ ClassLayout* GetLayout() const
+ {
+ return m_layout;
+ }
+
+ void SetLayout(ClassLayout* layout)
+ {
+ assert((layout != nullptr) || OperIs(GT_DYN_BLK, GT_STORE_DYN_BLK));
+ m_layout = layout;
+ }
+
// The data to be stored (null for GT_BLK)
GenTree*& Data()
{
// The size of the buffer to be copied.
unsigned Size() const
{
- return gtBlkSize;
+ assert((m_layout != nullptr) || OperIs(GT_DYN_BLK, GT_STORE_DYN_BLK));
+ return (m_layout != nullptr) ? m_layout->GetSize() : 0;
}
- unsigned gtBlkSize;
-
- // Return true iff the object being copied contains one or more GC pointers.
- bool HasGCPtr();
-
// True if this BlkOpNode is a volatile memory operation.
bool IsVolatile() const
{
bool gtBlkOpGcUnsafe;
- GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, unsigned size)
+ GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, ClassLayout* layout)
: GenTreeIndir(oper, type, addr, nullptr)
- , gtBlkSize(size)
+ , m_layout(layout)
, gtBlkOpKind(BlkOpKindInvalid)
, gtBlkOpGcUnsafe(false)
{
assert(OperIsBlk(oper));
+ assert((layout != nullptr) || OperIs(GT_DYN_BLK, GT_STORE_DYN_BLK));
gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
}
- GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, GenTree* data, unsigned size)
- : GenTreeIndir(oper, type, addr, data), gtBlkSize(size), gtBlkOpKind(BlkOpKindInvalid), gtBlkOpGcUnsafe(false)
+ GenTreeBlk(genTreeOps oper, var_types type, GenTree* addr, GenTree* data, ClassLayout* layout)
+ : GenTreeIndir(oper, type, addr, data), m_layout(layout), gtBlkOpKind(BlkOpKindInvalid), gtBlkOpGcUnsafe(false)
{
assert(OperIsBlk(oper));
+ assert((layout != nullptr) || OperIs(GT_DYN_BLK, GT_STORE_DYN_BLK));
gtFlags |= (addr->gtFlags & GTF_ALL_EFFECT);
gtFlags |= (data->gtFlags & GTF_ALL_EFFECT);
}
struct GenTreeObj : public GenTreeBlk
{
- CORINFO_CLASS_HANDLE gtClass; // the class of the object
-
- // If non-null, this array represents the gc-layout of the class.
- // This may be simply copied when cloning this node, because it is not changed once computed.
- BYTE* gtGcPtrs;
-
- // If non-zero, this is the number of slots in the class layout that
- // contain gc-pointers.
- __declspec(property(get = GetGcPtrCount)) unsigned gtGcPtrCount;
- unsigned GetGcPtrCount() const
- {
- assert(_gtGcPtrCount != UINT32_MAX);
- return _gtGcPtrCount;
- }
- unsigned _gtGcPtrCount;
-
- // If non-zero, the number of pointer-sized slots that constitutes the class token.
- unsigned gtSlots;
-
- bool IsGCInfoInitialized()
- {
- return (_gtGcPtrCount != UINT32_MAX);
- }
-
- void SetGCInfo(BYTE* gcPtrs, unsigned gcPtrCount, unsigned slots)
- {
- gtGcPtrs = gcPtrs;
- _gtGcPtrCount = gcPtrCount;
- gtSlots = slots;
- if (gtGcPtrCount != 0)
- {
- // We assume that we cannot have a struct with GC pointers that is not a multiple
- // of the register size.
- // The EE currently does not allow this, but it could change.
- // Let's assert it just to be safe.
- noway_assert(roundUp(gtBlkSize, REGSIZE_BYTES) == gtBlkSize);
- }
- else
- {
- genTreeOps newOper = GT_BLK;
- if (gtOper == GT_STORE_OBJ)
- {
- newOper = GT_STORE_BLK;
- }
- else
- {
- assert(gtOper == GT_OBJ);
- }
- SetOper(newOper);
- }
- }
-
- void CopyGCInfo(GenTreeObj* srcObj)
- {
- if (srcObj->IsGCInfoInitialized())
- {
- gtGcPtrs = srcObj->gtGcPtrs;
- _gtGcPtrCount = srcObj->gtGcPtrCount;
- gtSlots = srcObj->gtSlots;
- }
- }
-
void Init()
{
// By default, an OBJ is assumed to be a global reference, unless it is local.
{
gtFlags |= GTF_GLOB_REF;
}
- noway_assert(gtClass != NO_CLASS_HANDLE);
- _gtGcPtrCount = UINT32_MAX;
+ noway_assert(GetLayout()->GetClassHandle() != NO_CLASS_HANDLE);
}
- GenTreeObj(var_types type, GenTree* addr, CORINFO_CLASS_HANDLE cls, unsigned size)
- : GenTreeBlk(GT_OBJ, type, addr, size), gtClass(cls)
+ GenTreeObj(var_types type, GenTree* addr, ClassLayout* layout) : GenTreeBlk(GT_OBJ, type, addr, layout)
{
Init();
}
- GenTreeObj(var_types type, GenTree* addr, GenTree* data, CORINFO_CLASS_HANDLE cls, unsigned size)
- : GenTreeBlk(GT_STORE_OBJ, type, addr, data, size), gtClass(cls)
+ GenTreeObj(var_types type, GenTree* addr, GenTree* data, ClassLayout* layout)
+ : GenTreeBlk(GT_STORE_OBJ, type, addr, data, layout)
{
Init();
}
bool gtEvalSizeFirst;
GenTreeDynBlk(GenTree* addr, GenTree* dynamicSize)
- : GenTreeBlk(GT_DYN_BLK, TYP_STRUCT, addr, 0), gtDynamicSize(dynamicSize), gtEvalSizeFirst(false)
+ : GenTreeBlk(GT_DYN_BLK, TYP_STRUCT, addr, nullptr), gtDynamicSize(dynamicSize), gtEvalSizeFirst(false)
{
// Conservatively the 'addr' could be null or point into the global heap.
gtFlags |= GTF_EXCEPT | GTF_GLOB_REF;
#ifdef FEATURE_PUT_STRUCT_ARG_STK
, gtPutArgStkKind(Kind::Invalid)
, gtNumSlots(numSlots)
- , gtNumberReferenceSlots(0)
- , gtGcPtrs(nullptr)
#endif // FEATURE_PUT_STRUCT_ARG_STK
#if defined(DEBUG) || defined(UNIX_X86_ABI)
, gtCall(callNode)
return (varTypeIsSIMD(gtOp1) && (gtNumSlots == 3));
}
- //------------------------------------------------------------------------
- // setGcPointers: Sets the number of references and the layout of the struct object returned by the VM.
- //
- // Arguments:
- // numPointers - Number of pointer references.
- // pointers - layout of the struct (with pointers marked.)
- //
- // Return Value:
- // None
- //
- // Notes:
- // This data is used in the codegen for GT_PUTARG_STK to decide how to copy the struct to the stack by value.
- // If no pointer references are used, block copying instructions are used.
- // Otherwise the pointer reference slots are copied atomically in a way that gcinfo is emitted.
- // Any non pointer references between the pointer reference slots are copied in block fashion.
- //
- void setGcPointers(unsigned numPointers, BYTE* pointers)
- {
- gtNumberReferenceSlots = numPointers;
- gtGcPtrs = pointers;
- }
-
// Instruction selection: during codegen time, what code sequence we will be using
// to encode this operation.
// TODO-Throughput: The following information should be obtained from the child
return (gtPutArgStkKind == Kind::Push) || (gtPutArgStkKind == Kind::PushAllSlots);
}
- unsigned gtNumSlots; // Number of slots for the argument to be passed on stack
- unsigned gtNumberReferenceSlots; // Number of reference slots.
- BYTE* gtGcPtrs; // gcPointers
+ unsigned gtNumSlots; // Number of slots for the argument to be passed on stack
#else // !FEATURE_PUT_STRUCT_ARG_STK
unsigned getArgSize();
return this->gtCast.gtCastType;
}
-//-----------------------------------------------------------------------------------
-// HasGCPtr: determine whether this block op involves GC pointers
-//
-// Arguments:
-// None
-//
-// Return Value:
-// Returns true iff the object being copied contains one or more GC pointers.
-//
-// Notes:
-// Of the block nodes, only GT_OBJ and ST_STORE_OBJ are allowed to have GC pointers.
-//
-inline bool GenTreeBlk::HasGCPtr()
-{
- if ((gtOper == GT_OBJ) || (gtOper == GT_STORE_OBJ))
- {
- return (AsObj()->gtGcPtrCount != 0);
- }
- return false;
-}
-
inline bool GenTree::isUsedFromSpillTemp() const
{
// If spilled and no reg at use, then it is used from the spill temp location rather than being reloaded.
lvaTable[shadowVar].lvLclFieldExpr = varDsc->lvLclFieldExpr;
lvaTable[shadowVar].lvLiveAcrossUCall = varDsc->lvLiveAcrossUCall;
#endif
- lvaTable[shadowVar].lvVerTypeInfo = varDsc->lvVerTypeInfo;
- lvaTable[shadowVar].lvGcLayout = varDsc->lvGcLayout;
+ lvaTable[shadowVar].lvVerTypeInfo = varDsc->lvVerTypeInfo;
+ if (varTypeIsStruct(type))
+ {
+ lvaTable[shadowVar].SetLayout(varDsc->GetLayout());
+ }
lvaTable[shadowVar].lvIsUnsafeBuffer = varDsc->lvIsUnsafeBuffer;
lvaTable[shadowVar].lvIsPtr = varDsc->lvIsPtr;
asgType = impNormStructType(structHnd);
if (src->gtOper == GT_OBJ)
{
- assert(src->gtObj.gtClass == structHnd);
+ assert(src->AsObj()->GetLayout()->GetClassHandle() == structHnd);
}
}
else if (src->gtOper == GT_INDEX)
if (oper == GT_OBJ && willDeref)
{
- assert(structVal->gtObj.gtClass == structHnd);
+ assert(structVal->AsObj()->GetLayout()->GetClassHandle() == structHnd);
return (structVal->gtObj.Addr());
}
else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY ||
}
//------------------------------------------------------------------------
-// impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
-// and optionally determine the GC layout of the struct.
+// impNormStructType: Normalize the type of a (known to be) struct class handle.
//
// Arguments:
// structHnd - The class handle for the struct type of interest.
-// gcLayout - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
-// into which the gcLayout will be written.
-// pNumGCVars - (optional, default nullptr) - if non-null, a pointer to an unsigned,
-// which will be set to the number of GC fields in the struct.
// pSimdBaseType - (optional, default nullptr) - if non-null, and the struct is a SIMD
// type, set to the SIMD base type
//
// Return Value:
// The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
-// The gcLayout will be returned using the pointers provided by the caller, if non-null.
// It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
//
-// Assumptions:
-// The caller must set gcLayout to nullptr OR ensure that it is large enough
-// (see ICorStaticInfo::getClassGClayout in corinfo.h).
-//
// Notes:
// Normalizing the type involves examining the struct type to determine if it should
// be modified to one that is handled specially by the JIT, possibly being a candidate
// for full enregistration, e.g. TYP_SIMD16.
-var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
- BYTE* gcLayout,
- unsigned* pNumGCVars,
- var_types* pSimdBaseType)
+var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd, var_types* pSimdBaseType)
{
assert(structHnd != NO_CLASS_HANDLE);
- const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
- var_types structType = TYP_STRUCT;
-
- // On coreclr the check for GC includes a "may" to account for the special
- // ByRef like span structs. The added check for "CONTAINS_STACK_PTR" is the particular bit.
- // When this is set the struct will contain a ByRef that could be a GC pointer or a native
- // pointer.
- const bool mayContainGCPtrs =
- ((structFlags & CORINFO_FLG_CONTAINS_STACK_PTR) != 0 || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0));
+ var_types structType = TYP_STRUCT;
#ifdef FEATURE_SIMD
- // Check to see if this is a SIMD type.
- if (supportSIMDTypes() && !mayContainGCPtrs)
+ if (supportSIMDTypes())
{
- unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
+ const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
- if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
+ // Don't bother if the struct contains GC references of byrefs, it can't be a SIMD type.
+ if ((structFlags & (CORINFO_FLG_CONTAINS_GC_PTR | CORINFO_FLG_CONTAINS_STACK_PTR)) == 0)
{
- unsigned int sizeBytes;
- var_types simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
- if (simdBaseType != TYP_UNKNOWN)
+ unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
+
+ if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
{
- assert(sizeBytes == originalSize);
- structType = getSIMDTypeForSize(sizeBytes);
- if (pSimdBaseType != nullptr)
+ unsigned int sizeBytes;
+ var_types simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
+ if (simdBaseType != TYP_UNKNOWN)
{
- *pSimdBaseType = simdBaseType;
+ assert(sizeBytes == originalSize);
+ structType = getSIMDTypeForSize(sizeBytes);
+ if (pSimdBaseType != nullptr)
+ {
+ *pSimdBaseType = simdBaseType;
+ }
+ // Also indicate that we use floating point registers.
+ compFloatingPointUsed = true;
}
- // Also indicate that we use floating point registers.
- compFloatingPointUsed = true;
}
}
}
#endif // FEATURE_SIMD
- // Fetch GC layout info if requested
- if (gcLayout != nullptr)
- {
- unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
-
- // Verify that the quick test up above via the class attributes gave a
- // safe view of the type's GCness.
- //
- // Note there are cases where mayContainGCPtrs is true but getClassGClayout
- // does not report any gc fields.
-
- assert(mayContainGCPtrs || (numGCVars == 0));
-
- if (pNumGCVars != nullptr)
- {
- *pNumGCVars = numGCVars;
- }
- }
- else
- {
- // Can't safely ask for number of GC pointers without also
- // asking for layout.
- assert(pNumGCVars == nullptr);
- }
-
return structType;
}
op = op1->gtOp.gtOp1;
goto REDO_RETURN_NODE;
}
- op->gtObj.gtClass = NO_CLASS_HANDLE;
op->ChangeOperUnchecked(GT_IND);
op->gtFlags |= GTF_IND_TGTANYWHERE;
}
if (op3->IsCnsIntOrI())
{
size = (unsigned)op3->AsIntConCommon()->IconValue();
- op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
+ op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size));
}
else
{
if (op3->IsCnsIntOrI())
{
size = (unsigned)op3->AsIntConCommon()->IconValue();
- op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, size);
+ op1 = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, op1, typGetBlkLayout(size));
}
else
{
--- /dev/null
+#include "jitpch.h"
+#include "layout.h"
+#include "compiler.h"
+
+// Keeps track of layout objects associated to class handles or block sizes. A layout is usually
+// referenced by a pointer (ClassLayout*) but can also be referenced by a number (unsigned,
+// FirstLayoutNum-based), when space constraints or other needs make numbers more appealing.
+// Layout objects are immutable and there's always a 1:1 mapping between class handles/block sizes,
+// pointers and numbers (e.g. class handle equality implies ClassLayout pointer equality).
+class ClassLayoutTable
+{
+ // Each layout is assigned a number, starting with TYP_UNKNOWN + 1. This way one could use a single
+ // unsigned value to represent the notion of type - values below TYP_UNKNOWN are var_types and values
+ // above it are struct layouts.
+ static constexpr unsigned FirstLayoutNum = TYP_UNKNOWN + 1;
+
+ typedef JitHashTable<unsigned, JitSmallPrimitiveKeyFuncs<unsigned>, unsigned> BlkLayoutIndexMap;
+ typedef JitHashTable<CORINFO_CLASS_HANDLE, JitPtrKeyFuncs<CORINFO_CLASS_STRUCT_>, unsigned> ObjLayoutIndexMap;
+
+ union {
+ // Up to 3 layouts can be stored "inline" and finding a layout by handle/size can be done using linear search.
+ // Most methods need no more than 2 layouts.
+ ClassLayout* m_layoutArray[3];
+ // Otherwise a dynamic array is allocated and hashtables are used to map from handle/size to layout array index.
+ struct
+ {
+ ClassLayout** m_layoutLargeArray;
+ BlkLayoutIndexMap* m_blkLayoutMap;
+ ObjLayoutIndexMap* m_objLayoutMap;
+ };
+ };
+ // The number of layout objects stored in this table.
+ unsigned m_layoutCount;
+ // The capacity of m_layoutLargeArray (when more than 3 layouts are stored).
+ unsigned m_layoutLargeCapacity;
+
+public:
+ ClassLayoutTable() : m_layoutCount(0), m_layoutLargeCapacity(0)
+ {
+ }
+
+ // Get the layout number (FirstLayoutNum-based) of the specified layout.
+ unsigned GetLayoutNum(ClassLayout* layout) const
+ {
+ return GetLayoutIndex(layout) + FirstLayoutNum;
+ }
+
+ // Get the layout having the specified layout number (FirstLayoutNum-based)
+ ClassLayout* GetLayoutByNum(unsigned num) const
+ {
+ assert(num >= FirstLayoutNum);
+ return GetLayoutByIndex(num - FirstLayoutNum);
+ }
+
+ // Get the layout having the specified size but no class handle.
+ ClassLayout* GetBlkLayout(Compiler* compiler, unsigned blockSize)
+ {
+ return GetLayoutByIndex(GetBlkLayoutIndex(compiler, blockSize));
+ }
+
+ // Get the number of a layout having the specified size but no class handle.
+ unsigned GetBlkLayoutNum(Compiler* compiler, unsigned blockSize)
+ {
+ return GetBlkLayoutIndex(compiler, blockSize) + FirstLayoutNum;
+ }
+
+ // Get the layout for the specified class handle.
+ ClassLayout* GetObjLayout(Compiler* compiler, CORINFO_CLASS_HANDLE classHandle)
+ {
+ return GetLayoutByIndex(GetObjLayoutIndex(compiler, classHandle));
+ }
+
+ // Get the number of a layout for the specified class handle.
+ unsigned GetObjLayoutNum(Compiler* compiler, CORINFO_CLASS_HANDLE classHandle)
+ {
+ return GetObjLayoutIndex(compiler, classHandle) + FirstLayoutNum;
+ }
+
+private:
+ bool HasSmallCapacity() const
+ {
+ return m_layoutCount <= _countof(m_layoutArray);
+ }
+
+ ClassLayout* GetLayoutByIndex(unsigned index) const
+ {
+ assert(index < m_layoutCount);
+
+ if (HasSmallCapacity())
+ {
+ return m_layoutArray[index];
+ }
+ else
+ {
+ return m_layoutLargeArray[index];
+ }
+ }
+
+ unsigned GetLayoutIndex(ClassLayout* layout) const
+ {
+ assert(layout != nullptr);
+
+ if (HasSmallCapacity())
+ {
+ for (unsigned i = 0; i < m_layoutCount; i++)
+ {
+ if (m_layoutArray[i] == layout)
+ {
+ return i;
+ }
+ }
+ }
+ else
+ {
+ unsigned index = 0;
+ if ((layout->IsBlockLayout() && m_blkLayoutMap->Lookup(layout->GetSize(), &index)) ||
+ m_objLayoutMap->Lookup(layout->GetClassHandle(), &index))
+ {
+ return index;
+ }
+ }
+
+ unreached();
+ }
+
+ unsigned GetBlkLayoutIndex(Compiler* compiler, unsigned blockSize)
+ {
+ if (HasSmallCapacity())
+ {
+ for (unsigned i = 0; i < m_layoutCount; i++)
+ {
+ if (m_layoutArray[i]->IsBlockLayout() && (m_layoutArray[i]->GetSize() == blockSize))
+ {
+ return i;
+ }
+ }
+ }
+ else
+ {
+ unsigned index;
+ if (m_blkLayoutMap->Lookup(blockSize, &index))
+ {
+ return index;
+ }
+ }
+
+ return AddBlkLayout(compiler, CreateBlkLayout(compiler, blockSize));
+ }
+
+ ClassLayout* CreateBlkLayout(Compiler* compiler, unsigned blockSize)
+ {
+ return new (compiler, CMK_ClassLayout) ClassLayout(blockSize);
+ }
+
+ unsigned AddBlkLayout(Compiler* compiler, ClassLayout* layout)
+ {
+ if (m_layoutCount < _countof(m_layoutArray))
+ {
+ m_layoutArray[m_layoutCount] = layout;
+ return m_layoutCount++;
+ }
+
+ unsigned index = AddLayoutLarge(compiler, layout);
+ m_blkLayoutMap->Set(layout->GetSize(), index);
+ return index;
+ }
+
+ unsigned GetObjLayoutIndex(Compiler* compiler, CORINFO_CLASS_HANDLE classHandle)
+ {
+ assert(classHandle != NO_CLASS_HANDLE);
+
+ if (HasSmallCapacity())
+ {
+ for (unsigned i = 0; i < m_layoutCount; i++)
+ {
+ if (m_layoutArray[i]->GetClassHandle() == classHandle)
+ {
+ return i;
+ }
+ }
+ }
+ else
+ {
+ unsigned index;
+ if (m_objLayoutMap->Lookup(classHandle, &index))
+ {
+ return index;
+ }
+ }
+
+ return AddObjLayout(compiler, CreateObjLayout(compiler, classHandle));
+ }
+
+ ClassLayout* CreateObjLayout(Compiler* compiler, CORINFO_CLASS_HANDLE classHandle)
+ {
+ return ClassLayout::Create(compiler, classHandle);
+ }
+
+ unsigned AddObjLayout(Compiler* compiler, ClassLayout* layout)
+ {
+ if (m_layoutCount < _countof(m_layoutArray))
+ {
+ m_layoutArray[m_layoutCount] = layout;
+ return m_layoutCount++;
+ }
+
+ unsigned index = AddLayoutLarge(compiler, layout);
+ m_objLayoutMap->Set(layout->GetClassHandle(), index);
+ return index;
+ }
+
+ unsigned AddLayoutLarge(Compiler* compiler, ClassLayout* layout)
+ {
+ if (m_layoutCount >= m_layoutLargeCapacity)
+ {
+ CompAllocator alloc = compiler->getAllocator(CMK_ClassLayout);
+ unsigned newCapacity = m_layoutCount * 2;
+ ClassLayout** newArray = alloc.allocate<ClassLayout*>(newCapacity);
+
+ if (m_layoutCount <= _countof(m_layoutArray))
+ {
+ BlkLayoutIndexMap* blkLayoutMap = new (alloc) BlkLayoutIndexMap(alloc);
+ ObjLayoutIndexMap* objLayoutMap = new (alloc) ObjLayoutIndexMap(alloc);
+
+ for (unsigned i = 0; i < m_layoutCount; i++)
+ {
+ ClassLayout* l = m_layoutArray[i];
+ newArray[i] = l;
+
+ if (l->IsBlockLayout())
+ {
+ blkLayoutMap->Set(l->GetSize(), i);
+ }
+ else
+ {
+ objLayoutMap->Set(l->GetClassHandle(), i);
+ }
+ }
+
+ m_blkLayoutMap = blkLayoutMap;
+ m_objLayoutMap = objLayoutMap;
+ }
+ else
+ {
+ memcpy(newArray, m_layoutLargeArray, m_layoutCount * sizeof(newArray[0]));
+ }
+
+ m_layoutLargeArray = newArray;
+ m_layoutLargeCapacity = newCapacity;
+ }
+
+ m_layoutLargeArray[m_layoutCount] = layout;
+ return m_layoutCount++;
+ }
+};
+
+ClassLayoutTable* Compiler::typCreateClassLayoutTable()
+{
+ assert(m_classLayoutTable == nullptr);
+
+ if (compIsForInlining())
+ {
+ m_classLayoutTable = impInlineInfo->InlinerCompiler->m_classLayoutTable;
+
+ if (m_classLayoutTable == nullptr)
+ {
+ m_classLayoutTable = new (this, CMK_ClassLayout) ClassLayoutTable();
+
+ impInlineInfo->InlinerCompiler->m_classLayoutTable = m_classLayoutTable;
+ }
+ }
+ else
+ {
+ m_classLayoutTable = new (this, CMK_ClassLayout) ClassLayoutTable();
+ }
+
+ return m_classLayoutTable;
+}
+
+ClassLayoutTable* Compiler::typGetClassLayoutTable()
+{
+ if (m_classLayoutTable == nullptr)
+ {
+ return typCreateClassLayoutTable();
+ }
+
+ return m_classLayoutTable;
+}
+
+ClassLayout* Compiler::typGetLayoutByNum(unsigned layoutNum)
+{
+ return typGetClassLayoutTable()->GetLayoutByNum(layoutNum);
+}
+
+unsigned Compiler::typGetLayoutNum(ClassLayout* layout)
+{
+ return typGetClassLayoutTable()->GetLayoutNum(layout);
+}
+
+unsigned Compiler::typGetBlkLayoutNum(unsigned blockSize)
+{
+ return typGetClassLayoutTable()->GetBlkLayoutNum(this, blockSize);
+}
+
+ClassLayout* Compiler::typGetBlkLayout(unsigned blockSize)
+{
+ return typGetClassLayoutTable()->GetBlkLayout(this, blockSize);
+}
+
+unsigned Compiler::typGetObjLayoutNum(CORINFO_CLASS_HANDLE classHandle)
+{
+ return typGetClassLayoutTable()->GetObjLayoutNum(this, classHandle);
+}
+
+ClassLayout* Compiler::typGetObjLayout(CORINFO_CLASS_HANDLE classHandle)
+{
+ return typGetClassLayoutTable()->GetObjLayout(this, classHandle);
+}
+
+ClassLayout* ClassLayout::Create(Compiler* compiler, CORINFO_CLASS_HANDLE classHandle)
+{
+ bool isValueClass = compiler->info.compCompHnd->isValueClass(classHandle);
+ unsigned size;
+
+ if (isValueClass)
+ {
+ size = compiler->info.compCompHnd->getClassSize(classHandle);
+ }
+ else
+ {
+ size = compiler->info.compCompHnd->getHeapClassSize(classHandle);
+ }
+
+ INDEBUG(const char* className = compiler->info.compCompHnd->getClassName(classHandle);)
+
+ ClassLayout* layout =
+ new (compiler, CMK_ClassLayout) ClassLayout(classHandle, isValueClass, size DEBUGARG(className));
+ layout->InitializeGCPtrs(compiler);
+ return layout;
+}
+
+void ClassLayout::InitializeGCPtrs(Compiler* compiler)
+{
+ assert(!m_gcPtrsInitialized);
+ assert(!IsBlockLayout());
+
+ if (m_size < TARGET_POINTER_SIZE)
+ {
+ assert(GetSlotCount() == 1);
+ assert(m_gcPtrCount == 0);
+
+ m_gcPtrsArray[0] = TYPE_GC_NONE;
+ }
+ else
+ {
+ BYTE* gcPtrs;
+
+ if (GetSlotCount() > sizeof(m_gcPtrsArray))
+ {
+ gcPtrs = m_gcPtrs = new (compiler, CMK_ClassLayout) BYTE[GetSlotCount()];
+ }
+ else
+ {
+ gcPtrs = m_gcPtrsArray;
+ }
+
+ unsigned gcPtrCount = compiler->info.compCompHnd->getClassGClayout(m_classHandle, gcPtrs);
+
+ assert((gcPtrCount == 0) || ((compiler->info.compCompHnd->getClassAttribs(m_classHandle) &
+ (CORINFO_FLG_CONTAINS_GC_PTR | CORINFO_FLG_CONTAINS_STACK_PTR)) != 0));
+
+ // Since class size is unsigned there's no way we could have more than 2^30 slots
+ // so it should be safe to fit this into a 30 bits bit field.
+ assert(gcPtrCount < (1 << 30));
+
+ // We assume that we cannot have a struct with GC pointers that is not a multiple
+ // of the register size.
+ // The EE currently does not allow this, but it could change.
+ // Let's assert it just to be safe.
+ noway_assert((gcPtrCount == 0) || (roundUp(m_size, REGSIZE_BYTES) == m_size));
+
+ m_gcPtrCount = gcPtrCount;
+ }
+
+ INDEBUG(m_gcPtrsInitialized = true;)
+}
+
+#ifdef _TARGET_AMD64_
+ClassLayout* ClassLayout::GetPPPQuirkLayout(CompAllocator alloc)
+{
+ assert(m_gcPtrsInitialized);
+ assert(m_classHandle != NO_CLASS_HANDLE);
+ assert(m_isValueClass);
+ assert(m_size == 32);
+
+ if (m_pppQuirkLayout == nullptr)
+ {
+ m_pppQuirkLayout = new (alloc) ClassLayout(m_classHandle, m_isValueClass, 64 DEBUGARG(m_className));
+ m_pppQuirkLayout->m_gcPtrCount = m_gcPtrCount;
+
+ static_assert_no_msg(_countof(m_gcPtrsArray) == 8);
+
+ for (int i = 0; i < 4; i++)
+ {
+ m_pppQuirkLayout->m_gcPtrsArray[i] = m_gcPtrsArray[i];
+ }
+
+ for (int i = 4; i < 8; i++)
+ {
+ m_pppQuirkLayout->m_gcPtrsArray[i] = TYPE_GC_NONE;
+ }
+
+ INDEBUG(m_pppQuirkLayout->m_gcPtrsInitialized = true;)
+ }
+
+ return m_pppQuirkLayout;
+}
+#endif // _TARGET_AMD64_
--- /dev/null
+#ifndef LAYOUT_H
+#define LAYOUT_H
+
+#include "jit.h"
+
+// Encapsulates layout information about a class (typically a value class but this can also be
+// be used for reference classes when they are stack allocated). The class handle is optional,
+// allowing the creation of "block" layout objects having a specific size but lacking any other
+// layout information. The JIT uses such layout objects in cases where a class handle is not
+// available (cpblk/initblk operations) or not necessary (classes that do not contain GC pointers).
+class ClassLayout
+{
+ // Class handle or NO_CLASS_HANDLE for "block" layouts.
+ const CORINFO_CLASS_HANDLE m_classHandle;
+
+ // Size of the layout in bytes (as reported by ICorJitInfo::getClassSize/getHeapClassSize
+ // for non "block" layouts). For "block" layouts this may be 0 due to 0 being a valid size
+ // for cpblk/initblk.
+ const unsigned m_size;
+
+ const unsigned m_isValueClass : 1;
+ INDEBUG(unsigned m_gcPtrsInitialized : 1;)
+ // The number of GC pointers in this layout. Since the the maximum size is 2^32-1 the count
+ // can fit in at most 30 bits.
+ unsigned m_gcPtrCount : 30;
+
+ // Array of CorInfoGCType (as BYTE) that describes the GC layout of the class.
+ // For small classes the array is stored inline, avoiding an extra allocation
+ // and the pointer size overhead.
+ union {
+ BYTE* m_gcPtrs;
+ BYTE m_gcPtrsArray[sizeof(BYTE*)];
+ };
+
+#ifdef _TARGET_AMD64_
+ // A layout that has its size artificially inflated to avoid stack corruption due to
+ // bugs in user code - see Compiler::compQuirkForPPP for details.
+ ClassLayout* m_pppQuirkLayout;
+#endif
+
+ // Class name as reported by ICorJitInfo::getClassName
+ INDEBUG(const char* m_className;)
+
+ // ClassLayout instances should only be obtained via ClassLayoutTable.
+ friend class ClassLayoutTable;
+
+ ClassLayout(unsigned size)
+ : m_classHandle(NO_CLASS_HANDLE)
+ , m_size(size)
+ , m_isValueClass(false)
+#ifdef DEBUG
+ , m_gcPtrsInitialized(true)
+#endif
+ , m_gcPtrCount(0)
+ , m_gcPtrs(nullptr)
+#ifdef _TARGET_AMD64_
+ , m_pppQuirkLayout(nullptr)
+#endif
+#ifdef DEBUG
+ , m_className("block")
+#endif
+ {
+ }
+
+ static ClassLayout* Create(Compiler* compiler, CORINFO_CLASS_HANDLE classHandle);
+
+ ClassLayout(CORINFO_CLASS_HANDLE classHandle, bool isValueClass, unsigned size DEBUGARG(const char* className))
+ : m_classHandle(classHandle)
+ , m_size(size)
+ , m_isValueClass(isValueClass)
+#ifdef DEBUG
+ , m_gcPtrsInitialized(false)
+#endif
+ , m_gcPtrCount(0)
+ , m_gcPtrs(nullptr)
+#ifdef _TARGET_AMD64_
+ , m_pppQuirkLayout(nullptr)
+#endif
+#ifdef DEBUG
+ , m_className(className)
+#endif
+ {
+ assert(size != 0);
+ }
+
+ void InitializeGCPtrs(Compiler* compiler);
+
+public:
+#ifdef _TARGET_AMD64_
+ // Get the layout for the PPP quirk - see Compiler::compQuirkForPPP for details.
+ ClassLayout* GetPPPQuirkLayout(CompAllocator alloc);
+#endif
+
+ CORINFO_CLASS_HANDLE GetClassHandle() const
+ {
+ return m_classHandle;
+ }
+
+ bool IsBlockLayout() const
+ {
+ return m_classHandle == NO_CLASS_HANDLE;
+ }
+
+#ifdef DEBUG
+ const char* GetClassName() const
+ {
+ return m_className;
+ }
+#endif
+
+ bool IsValueClass() const
+ {
+ assert(!IsBlockLayout());
+
+ return m_isValueClass;
+ }
+
+ unsigned GetSize() const
+ {
+ return m_size;
+ }
+
+ unsigned GetSlotCount() const
+ {
+ return roundUp(m_size, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE;
+ }
+
+ unsigned GetGCPtrCount() const
+ {
+ assert(m_gcPtrsInitialized);
+
+ return m_gcPtrCount;
+ }
+
+ bool HasGCPtr() const
+ {
+ assert(m_gcPtrsInitialized);
+
+ return m_gcPtrCount != 0;
+ }
+
+ bool IsGCPtr(unsigned slot) const
+ {
+ return GetGCPtr(slot) != TYPE_GC_NONE;
+ }
+
+ var_types GetGCPtrType(unsigned slot) const
+ {
+ switch (GetGCPtr(slot))
+ {
+ case TYPE_GC_NONE:
+ return TYP_I_IMPL;
+ case TYPE_GC_REF:
+ return TYP_REF;
+ case TYPE_GC_BYREF:
+ return TYP_BYREF;
+ default:
+ unreached();
+ }
+ }
+
+private:
+ const BYTE* GetGCPtrs() const
+ {
+ assert(m_gcPtrsInitialized);
+ assert(!IsBlockLayout());
+
+ return (GetSlotCount() > sizeof(m_gcPtrsArray)) ? m_gcPtrs : m_gcPtrsArray;
+ }
+
+ CorInfoGCType GetGCPtr(unsigned slot) const
+ {
+ assert(m_gcPtrsInitialized);
+ assert(slot < GetSlotCount());
+
+ if (m_gcPtrCount == 0)
+ {
+ return TYPE_GC_NONE;
+ }
+
+ return static_cast<CorInfoGCType>(GetGCPtrs()[slot]);
+ }
+};
+
+#endif // LAYOUT_H
if (supportSIMDTypes())
{
var_types simdBaseType = TYP_UNKNOWN;
- var_types type = impNormStructType(info.compClassHnd, nullptr, nullptr, &simdBaseType);
+ var_types type = impNormStructType(info.compClassHnd, &simdBaseType);
if (simdBaseType != TYP_UNKNOWN)
{
assert(varTypeIsSIMD(type));
varDsc->lvOverlappingFields = StructHasOverlappingFields(cFlags);
}
- if (varTypeIsGC(type))
- {
- varDsc->lvStructGcCount = 1;
- }
-
#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
varDsc->lvIsImplicitByRef = 0;
#endif // defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
}
if (varDsc->lvExactSize == 0)
{
- BOOL isValueClass = info.compCompHnd->isValueClass(typeHnd);
-
- if (isValueClass)
- {
- varDsc->lvExactSize = info.compCompHnd->getClassSize(typeHnd);
- }
- else
- {
- varDsc->lvExactSize = info.compCompHnd->getHeapClassSize(typeHnd);
- }
-
- // Normalize struct types, and fill in GC info for all types
- unsigned lvSize = varDsc->lvSize();
- // The struct needs to be a multiple of TARGET_POINTER_SIZE bytes for getClassGClayout() to be valid.
- assert((lvSize % TARGET_POINTER_SIZE) == 0);
- varDsc->lvGcLayout = getAllocator(CMK_LvaTable).allocate<BYTE>(lvSize / TARGET_POINTER_SIZE);
- unsigned numGCVars = 0;
- var_types simdBaseType = TYP_UNKNOWN;
- if (isValueClass)
- {
- varDsc->lvType = impNormStructType(typeHnd, varDsc->lvGcLayout, &numGCVars, &simdBaseType);
- }
- else
- {
- numGCVars = info.compCompHnd->getClassGClayout(typeHnd, varDsc->lvGcLayout);
- }
-
- // We only save the count of GC vars in a struct up to 7.
- if (numGCVars >= 8)
- {
- numGCVars = 7;
- }
+ ClassLayout* layout = typGetObjLayout(typeHnd);
+ varDsc->SetLayout(layout);
+ varDsc->lvExactSize = layout->GetSize();
- varDsc->lvStructGcCount = numGCVars;
-
- if (isValueClass)
+ if (layout->IsValueClass())
{
+ var_types simdBaseType = TYP_UNKNOWN;
+ varDsc->lvType = impNormStructType(typeHnd, &simdBaseType);
#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
// Mark implicit byref struct parameters
varDsc->SetHfaType(hfaType);
// hfa variables can never contain GC pointers
- assert(varDsc->lvStructGcCount == 0);
+ assert(!layout->HasGCPtr());
// The size of this struct should be evenly divisible by 4 or 8
assert((varDsc->lvExactSize % genTypeSize(hfaType)) == 0);
// The number of elements in the HFA should fit into our MAX_ARG_REG_COUNT limit
}
}
-/*****************************************************************************
- * Returns the array of BYTEs containing the GC layout information
- */
-
-BYTE* Compiler::lvaGetGcLayout(unsigned varNum)
-{
- assert(varTypeIsStruct(lvaTable[varNum].lvType) && (lvaTable[varNum].lvExactSize >= TARGET_POINTER_SIZE));
-
- return lvaTable[varNum].lvGcLayout;
-}
-
//------------------------------------------------------------------------
// lvaLclSize: returns size of a local variable, in bytes
//
type = TYP_INT;
break;
case 8:
- switch (*lvGcLayout)
- {
- case TYPE_GC_NONE:
- type = TYP_I_IMPL;
- break;
-
- case TYPE_GC_REF:
- type = TYP_REF;
- break;
-
- case TYPE_GC_BYREF:
- type = TYP_BYREF;
- break;
-
- default:
- unreached();
- }
+ type = m_layout->GetGCPtrType(0);
break;
-
default:
type = TYP_BYREF;
break;
if (arg->OperGet() == GT_OBJ)
{
- BYTE* gcLayout = nullptr;
- unsigned numRefs = 0;
- GenTreeObj* argObj = arg->AsObj();
-
- if (argObj->IsGCInfoInitialized())
- {
- gcLayout = argObj->gtGcPtrs;
- numRefs = argObj->GetGcPtrCount();
- }
- else
- {
- // Set GC Pointer info
- gcLayout = new (comp, CMK_Codegen) BYTE[info->numSlots + info->numRegs];
- numRefs = comp->info.compCompHnd->getClassGClayout(arg->gtObj.gtClass, gcLayout);
- argSplit->setGcPointers(numRefs, gcLayout);
- }
+ ClassLayout* layout = arg->AsObj()->GetLayout();
// Set type of registers
for (unsigned index = 0; index < info->numRegs; index++)
{
- var_types regType = comp->getJitGCType(gcLayout[index]);
- // Account for the possibility that float fields may be passed in integer registers.
- if (varTypeIsFloating(regType) && !genIsValidFloatReg(argSplit->GetRegNumByIdx(index)))
- {
- regType = (regType == TYP_FLOAT) ? TYP_INT : TYP_LONG;
- }
- argSplit->m_regType[index] = regType;
+ argSplit->m_regType[index] = layout->GetGCPtrType(index);
}
}
else
}
else if (arg->OperIs(GT_OBJ))
{
- unsigned numRefs = 0;
- BYTE* gcLayout = new (comp, CMK_Codegen) BYTE[info->numSlots];
assert(!varTypeIsSIMD(arg));
- numRefs = comp->info.compCompHnd->getClassGClayout(arg->gtObj.gtClass, gcLayout);
- putArg->AsPutArgStk()->setGcPointers(numRefs, gcLayout);
#ifdef _TARGET_X86_
// On x86 VM lies about the type of a struct containing a pointer sized
void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
{
GenTree* dstAddr = blkNode->Addr();
- unsigned size = blkNode->gtBlkSize;
+ unsigned size = blkNode->Size();
GenTree* source = blkNode->Data();
Compiler* compiler = comp;
if (!isInitBlk)
{
// CopyObj or CopyBlk
- if ((blkNode->OperGet() == GT_STORE_OBJ) && ((blkNode->AsObj()->gtGcPtrCount == 0) || blkNode->gtBlkOpGcUnsafe))
+ if (blkNode->OperIs(GT_STORE_OBJ) && (!blkNode->AsObj()->GetLayout()->HasGCPtr() || blkNode->gtBlkOpGcUnsafe))
{
blkNode->SetOper(GT_STORE_BLK);
}
// CopyObj
GenTreeObj* objNode = blkNode->AsObj();
- unsigned slots = objNode->gtSlots;
+ unsigned slots = objNode->GetLayout()->GetSlotCount();
#ifdef DEBUG
// CpObj must always have at least one GC-Pointer as a member.
- assert(objNode->gtGcPtrCount > 0);
+ assert(objNode->GetLayout()->HasGCPtr());
assert(dstAddr->gtType == TYP_BYREF || dstAddr->gtType == TYP_I_IMPL);
- CORINFO_CLASS_HANDLE clsHnd = objNode->gtClass;
- size_t classSize = compiler->info.compCompHnd->getClassSize(clsHnd);
- size_t blkSize = roundUp(classSize, TARGET_POINTER_SIZE);
+ size_t classSize = objNode->GetLayout()->GetSize();
+ size_t blkSize = roundUp(classSize, TARGET_POINTER_SIZE);
// Currently, the EE always round up a class data structure so
// we are not handling the case where we have a non multiple of pointer sized
// handle this case.
assert(classSize == blkSize);
assert((blkSize / TARGET_POINTER_SIZE) == slots);
- assert(objNode->HasGCPtr());
#endif
blkNode->gtBlkOpKind = GenTreeBlk::BlkOpKindUnroll;
void Lowering::LowerBlockStore(GenTreeBlk* blkNode)
{
GenTree* dstAddr = blkNode->Addr();
- unsigned size = blkNode->gtBlkSize;
+ unsigned size = blkNode->Size();
GenTree* source = blkNode->Data();
GenTree* srcAddrOrFill = nullptr;
bool isInitBlk = blkNode->OperIsInitBlkOp();
if (!isInitBlk)
{
// CopyObj or CopyBlk
- if ((blkNode->OperGet() == GT_STORE_OBJ) && ((blkNode->AsObj()->gtGcPtrCount == 0) || blkNode->gtBlkOpGcUnsafe))
+ if (blkNode->OperIs(GT_STORE_OBJ) && (!blkNode->AsObj()->GetLayout()->HasGCPtr() || blkNode->gtBlkOpGcUnsafe))
{
blkNode->SetOper(GT_STORE_BLK);
}
GenTreeObj* cpObjNode = blkNode->AsObj();
- unsigned slots = cpObjNode->gtSlots;
+ unsigned slots = cpObjNode->GetLayout()->GetSlotCount();
#ifdef DEBUG
// CpObj must always have at least one GC-Pointer as a member.
- assert(cpObjNode->gtGcPtrCount > 0);
+ assert(cpObjNode->GetLayout()->HasGCPtr());
assert(dstAddr->gtType == TYP_BYREF || dstAddr->gtType == TYP_I_IMPL);
- CORINFO_CLASS_HANDLE clsHnd = cpObjNode->gtClass;
+ CORINFO_CLASS_HANDLE clsHnd = cpObjNode->GetLayout()->GetClassHandle();
size_t classSize = comp->info.compCompHnd->getClassSize(clsHnd);
size_t blkSize = roundUp(classSize, TARGET_POINTER_SIZE);
// handle this case.
assert(classSize == blkSize);
assert((blkSize / TARGET_POINTER_SIZE) == slots);
- assert(cpObjNode->HasGCPtr());
#endif
bool IsRepMovsProfitable = false;
// Let's inspect the struct/class layout and determine if it's profitable
// to use rep movsq for copying non-gc memory instead of using single movsq
// instructions for each memory slot.
- unsigned i = 0;
- BYTE* gcPtrs = cpObjNode->gtGcPtrs;
+ unsigned i = 0;
+ ClassLayout* layout = cpObjNode->GetLayout();
do
{
unsigned nonGCSlots = 0;
// Measure a contiguous non-gc area inside the struct and note the maximum.
- while (i < slots && gcPtrs[i] == TYPE_GC_NONE)
+ while ((i < slots) && !layout->IsGCPtr(i))
{
nonGCSlots++;
i++;
}
- while (i < slots && gcPtrs[i] != TYPE_GC_NONE)
+ while ((i < slots) && layout->IsGCPtr(i))
{
i++;
}
#ifdef _TARGET_X86_
if (putArgStk->gtOp1->gtOper == GT_FIELD_LIST)
{
- putArgStk->gtNumberReferenceSlots = 0;
- putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Invalid;
+ putArgStk->gtPutArgStkKind = GenTreePutArgStk::Kind::Invalid;
GenTreeFieldList* fieldList = putArgStk->gtOp1->AsFieldList();
allFieldsAreSlots = false;
}
- if (varTypeIsGC(fieldType))
- {
- putArgStk->gtNumberReferenceSlots++;
- }
-
// For x86 we must mark all integral fields as contained or reg-optional, and handle them
// accordingly in code generation, since we may have up to 8 fields, which cannot all be in
// registers to be consumed atomically by the call.
assert(varTypeIsSIMD(putArgStk));
}
+ ClassLayout* layout = src->AsObj()->GetLayout();
+
// In case of a CpBlk we could use a helper call. In case of putarg_stk we
// can't do that since the helper call could kill some already set up outgoing args.
// TODO-Amd64-Unix: converge the code for putarg_stk with cpyblk/cpyobj.
// If we have a buffer between XMM_REGSIZE_BYTES and CPBLK_UNROLL_LIMIT bytes, we'll use SSE2.
// Structs and buffer with sizes <= CPBLK_UNROLL_LIMIT bytes are occurring in more than 95% of
// our framework assemblies, so this is the main code generation scheme we'll use.
- if (size <= CPBLK_UNROLL_LIMIT && putArgStk->gtNumberReferenceSlots == 0)
+ if (size <= CPBLK_UNROLL_LIMIT && !layout->HasGCPtr())
{
#ifdef _TARGET_X86_
if (size < XMM_REGSIZE_BYTES)
}
}
#ifdef _TARGET_X86_
- else if (putArgStk->gtNumberReferenceSlots != 0)
+ else if (layout->HasGCPtr())
{
// On x86, we must use `push` to store GC references to the stack in order for the emitter to properly update
// the function's GC info. These `putargstk` nodes will generate a sequence of `push` instructions.
int LinearScan::BuildBlockStore(GenTreeBlk* blkNode)
{
GenTree* dstAddr = blkNode->Addr();
- unsigned size = blkNode->gtBlkSize;
+ unsigned size = blkNode->Size();
GenTree* source = blkNode->Data();
int srcCount = 0;
if ((blkNode->OperGet() == GT_STORE_OBJ) && blkNode->OperIsCopyBlkOp())
{
- assert(blkNode->AsObj()->gtGcPtrCount != 0);
+ assert(blkNode->AsObj()->GetLayout()->HasGCPtr());
killMask = compiler->compHelperCallKillSet(CORINFO_HELP_ASSIGN_BYREF);
}
else
{
GenTreeObj* obj = op1->AsObj();
GenTree* addr = obj->Addr();
- unsigned size = obj->gtBlkSize;
+ unsigned size = obj->GetLayout()->GetSize();
assert(size <= MAX_PASS_SINGLEREG_BYTES);
if (addr->OperIsLocalAddr())
{
int LinearScan::BuildBlockStore(GenTreeBlk* blkNode)
{
GenTree* dstAddr = blkNode->Addr();
- unsigned size = blkNode->gtBlkSize;
+ unsigned size = blkNode->Size();
GenTree* source = blkNode->Data();
int srcCount = 0;
}
#endif // _TARGET_X86_
- if (varTypeIsGC(fieldType))
- {
- putArgStk->gtNumberReferenceSlots++;
- }
prevOffset = fieldOffset;
}
return BuildSimple(putArgStk);
}
- GenTree* dst = putArgStk;
- GenTree* srcAddr = nullptr;
+ ClassLayout* layout = src->AsObj()->GetLayout();
// If we have a buffer between XMM_REGSIZE_BYTES and CPBLK_UNROLL_LIMIT bytes, we'll use SSE2.
// Structs and buffer with sizes <= CPBLK_UNROLL_LIMIT bytes are occurring in more than 95% of
// x86 specific note: if the size is odd, the last copy operation would be of size 1 byte.
// But on x86 only RBM_BYTE_REGS could be used as byte registers. Therefore, exclude
// RBM_NON_BYTE_REGS from internal candidates.
- if ((putArgStk->gtNumberReferenceSlots == 0) && (size & (XMM_REGSIZE_BYTES - 1)) != 0)
+ if (!layout->HasGCPtr() && (size & (XMM_REGSIZE_BYTES - 1)) != 0)
{
regMaskTP regMask = allRegs(TYP_INT);
//
else if (argx->OperGet() == GT_OBJ)
{
- GenTreeObj* argObj = argx->AsObj();
- CORINFO_CLASS_HANDLE objClass = argObj->gtClass;
- unsigned structSize = compiler->info.compCompHnd->getClassSize(objClass);
+ GenTreeObj* argObj = argx->AsObj();
+ unsigned structSize = argObj->GetLayout()->GetSize();
switch (structSize)
{
case 3:
switch (actualArg->OperGet())
{
case GT_OBJ:
- // Get the size off the OBJ node.
- structSize = actualArg->AsObj()->gtBlkSize;
+ structSize = actualArg->AsObj()->GetLayout()->GetSize();
assert(structSize == info.compCompHnd->getClassSize(objClass));
break;
case GT_LCL_VAR:
if (argObj->OperIs(GT_OBJ))
{
// Get the size off the OBJ node.
- originalSize = argObj->AsObj()->gtBlkSize;
+ originalSize = argObj->AsObj()->GetLayout()->GetSize();
assert(originalSize == info.compCompHnd->getClassSize(objClass));
}
else
unsigned structSize;
if (argx->OperIs(GT_OBJ))
{
- structSize = argx->AsObj()->gtBlkSize;
+ structSize = argx->AsObj()->GetLayout()->GetSize();
}
else
{
else if (arg->OperGet() == GT_OBJ)
{
GenTreeObj* argObj = arg->AsObj();
- structSize = argObj->Size();
+ structSize = argObj->GetLayout()->GetSize();
assert(structSize == info.compCompHnd->getClassSize(objClass));
// If we have a GT_OBJ of a GT_ADDR then we set argValue to the child node of the GT_ADDR.
for (unsigned inx = 0; inx < elemCount; inx++)
{
- CorInfoGCType currentGcLayoutType = (CorInfoGCType)varDsc->lvGcLayout[inx];
+ var_types currentGcLayoutType = varDsc->GetLayout()->GetGCPtrType(inx);
// We setup the type[inx] value above using the GC info from 'objClass'
// This GT_LCL_VAR must have the same GC layout info
//
- if (currentGcLayoutType != TYPE_GC_NONE)
+ if (varTypeIsGC(currentGcLayoutType))
{
- noway_assert(type[inx] == getJitGCType((BYTE)currentGcLayoutType));
+ noway_assert(type[inx] == currentGcLayoutType);
}
else
{
// The allocated size of our LocalVar must be at least as big as lastOffset
assert(varDsc->lvSize() >= lastOffset);
- if (varDsc->lvStructGcCount > 0)
+ if (varDsc->HasGCPtr())
{
// alignment of the baseOffset is required
noway_assert((baseOffset % TARGET_POINTER_SIZE) == 0);
#ifndef UNIX_AMD64_ABI
noway_assert(elemSize == TARGET_POINTER_SIZE);
#endif
- unsigned baseIndex = baseOffset / TARGET_POINTER_SIZE;
- const BYTE* gcPtrs = varDsc->lvGcLayout; // Get the GC layout for the local variable
+ unsigned baseIndex = baseOffset / TARGET_POINTER_SIZE;
+ ClassLayout* layout = varDsc->GetLayout();
for (unsigned inx = 0; (inx < elemCount); inx++)
{
// The GC information must match what we setup using 'objClass'
- if ((gcPtrs[baseIndex + inx] != TYPE_GC_NONE) || varTypeGCtype(type[inx]))
+ if (layout->IsGCPtr(baseIndex + inx) || varTypeGCtype(type[inx]))
{
- noway_assert(type[inx] == getJitGCType(gcPtrs[baseIndex + inx]));
+ noway_assert(type[inx] == layout->GetGCPtrType(baseIndex + inx));
}
}
}
{
var_types lclType = varDsc->TypeGet();
bool isUserLocal = (varNum < info.compLocalsCount);
- bool structWithGCFields = ((lclType == TYP_STRUCT) && (varDsc->lvStructGcCount > 0));
+ bool structWithGCFields = ((lclType == TYP_STRUCT) && varDsc->GetLayout()->HasGCPtr());
if (isUserLocal || structWithGCFields)
{
GenTree* lcl = gtNewLclvNode(varNum, lclType);
}
if (lhsBlk->OperGet() == GT_OBJ)
{
- clsHnd = lhsBlk->AsObj()->gtClass;
+ clsHnd = lhsBlk->AsObj()->GetLayout()->GetClassHandle();
}
}
else
}
else
{
- tree = new (this, GT_BLK) GenTreeBlk(GT_BLK, structType, addr, genTypeSize(structType));
+ tree = new (this, GT_BLK) GenTreeBlk(GT_BLK, structType, addr, typGetBlkLayout(genTypeSize(structType)));
}
gtUpdateNodeSideEffects(tree);
{
blkNode->AsDynBlk()->gtDynamicSize = nullptr;
blkNode->ChangeOper(GT_BLK);
- blkNode->gtBlkSize = size;
+ blkNode->SetLayout(typGetBlkLayout(size));
}
else
{
CORINFO_CLASS_HANDLE clsHnd = gtGetStructHandleIfPresent(effectiveVal);
if (clsHnd == NO_CLASS_HANDLE)
{
- newTree = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, blockWidth);
+ newTree = new (this, GT_BLK) GenTreeBlk(GT_BLK, TYP_STRUCT, addr, typGetBlkLayout(blockWidth));
}
else
{
void Compiler::fgMorphUnsafeBlk(GenTreeObj* dest)
{
#if defined(CPBLK_UNROLL_LIMIT) && !defined(JIT32_GCENCODER)
- assert(dest->gtGcPtrCount != 0);
- unsigned blockWidth = dest->AsBlk()->gtBlkSize;
+ assert(dest->GetLayout()->HasGCPtr());
+ unsigned blockWidth = dest->GetLayout()->GetSize();
#ifdef DEBUG
bool destOnStack = false;
GenTree* destAddr = dest->Addr();
{
blockWidth = genTypeSize(destLclVar->lvType);
}
- hasGCPtrs = destLclVar->lvStructGcCount != 0;
+ hasGCPtrs = destLclVar->HasGCPtr();
}
else
{
assert(effectiveDest->OperIsBlk());
GenTreeBlk* blk = effectiveDest->AsBlk();
- blockWidth = blk->gtBlkSize;
+ blockWidth = blk->Size();
blockWidthIsConst = (blk->gtOper != GT_DYN_BLK);
if ((dest == effectiveDest) && ((dest->gtFlags & GTF_IND_ARR_INDEX) == 0))
{
m_compiler->info.compCompHnd->getFieldClass(indir->AsField()->gtFldHnd));
case GT_BLK:
case GT_OBJ:
- return indir->AsBlk()->gtBlkSize;
+ return indir->AsBlk()->GetLayout()->GetSize();
default:
assert(indir->OperIs(GT_IND, GT_DYN_BLK));
return 0;
GenTreeBlk* storeBlk = nullptr;
unsigned int size = varDsc->lvExactSize;
- if (varDsc->lvStructGcCount != 0)
+ if (varDsc->HasGCPtr())
{
CORINFO_CLASS_HANDLE structHnd = varDsc->lvVerTypeInfo.GetClassHandle();
GenTreeObj* objNode = comp->gtNewObjNode(structHnd, location)->AsObj();
- unsigned int slots = roundUp(size, TARGET_POINTER_SIZE) / TARGET_POINTER_SIZE;
-
- objNode->SetGCInfo(varDsc->lvGcLayout, varDsc->lvStructGcCount, slots);
objNode->ChangeOper(GT_STORE_OBJ);
objNode->SetData(value);
comp->fgMorphUnsafeBlk(objNode);
}
else
{
- storeBlk = new (comp, GT_STORE_BLK) GenTreeBlk(GT_STORE_BLK, TYP_STRUCT, location, value, size);
+ storeBlk = new (comp, GT_STORE_BLK)
+ GenTreeBlk(GT_STORE_BLK, TYP_STRUCT, location, value, comp->typGetBlkLayout(size));
}
storeBlk->gtFlags |= GTF_ASG;
storeBlk->gtFlags |= ((location->gtFlags | value->gtFlags) & GTF_ALL_EFFECT);
//
if (tree->OperGet() == GT_OBJ)
{
- if ((structHandle != NO_CLASS_HANDLE) && (tree->AsObj()->gtClass != structHandle))
+ if ((structHandle != NO_CLASS_HANDLE) && (tree->AsObj()->GetLayout()->GetClassHandle() != structHandle))
{
// In this case we need to retain the GT_OBJ to retype the value.
- tree->AsObj()->gtClass = structHandle;
+ tree->AsObj()->SetLayout(typGetObjLayout(structHandle));
}
else
{
// Widen the lower half and assign it to dstAddrLo.
simdTree = gtNewSIMDNode(simdType, op1, nullptr, SIMDIntrinsicWidenLo, baseType, size);
- GenTree* loDest =
- new (this, GT_BLK) GenTreeBlk(GT_BLK, simdType, dstAddrLo, getSIMDTypeSizeInBytes(clsHnd));
+ // TODO-1stClassStructs: With the introduction of ClassLayout it would be preferrable to use
+ // GT_OBJ instead of GT_BLK nodes to avoid losing information about the actual vector type.
+ GenTree* loDest = new (this, GT_BLK)
+ GenTreeBlk(GT_BLK, simdType, dstAddrLo, typGetBlkLayout(getSIMDTypeSizeInBytes(clsHnd)));
GenTree* loAsg = gtNewBlkOpNode(loDest, simdTree, getSIMDTypeSizeInBytes(clsHnd),
false, // not volatile
true); // copyBlock
loAsg->gtFlags |= ((simdTree->gtFlags | dstAddrLo->gtFlags) & GTF_ALL_EFFECT);
// Widen the upper half and assign it to dstAddrHi.
- simdTree = gtNewSIMDNode(simdType, dupOp1, nullptr, SIMDIntrinsicWidenHi, baseType, size);
- GenTree* hiDest =
- new (this, GT_BLK) GenTreeBlk(GT_BLK, simdType, dstAddrHi, getSIMDTypeSizeInBytes(clsHnd));
+ simdTree = gtNewSIMDNode(simdType, dupOp1, nullptr, SIMDIntrinsicWidenHi, baseType, size);
+ GenTree* hiDest = new (this, GT_BLK)
+ GenTreeBlk(GT_BLK, simdType, dstAddrHi, typGetBlkLayout(getSIMDTypeSizeInBytes(clsHnd)));
GenTree* hiAsg = gtNewBlkOpNode(hiDest, simdTree, getSIMDTypeSizeInBytes(clsHnd),
false, // not volatile
true); // copyBlock
// block ops.
if (doCopyBlk)
{
- GenTree* dest = new (this, GT_BLK) GenTreeBlk(GT_BLK, simdType, copyBlkDst, getSIMDTypeSizeInBytes(clsHnd));
+ GenTree* dest = new (this, GT_BLK)
+ GenTreeBlk(GT_BLK, simdType, copyBlkDst, typGetBlkLayout(getSIMDTypeSizeInBytes(clsHnd)));
dest->gtFlags |= GTF_GLOB_REF;
retVal = gtNewBlkOpNode(dest, simdTree, getSIMDTypeSizeInBytes(clsHnd),
false, // not volatile