{
printf(".%02u", curAssertion->op1.lcl.ssaNum);
}
- if (curAssertion->op2.zeroOffsetFieldSeq != nullptr)
- {
- printf(" Zero");
- gtDispFieldSeq(curAssertion->op2.zeroOffsetFieldSeq);
- }
break;
case O2K_CONST_INT:
goto DONE_ASSERTION; // Don't make an assertion
}
- FieldSeqNode* zeroOffsetFieldSeq = nullptr;
- GetZeroOffsetFieldMap()->Lookup(op2, &zeroOffsetFieldSeq);
-
- assertion.op2.kind = O2K_LCLVAR_COPY;
- assertion.op2.vn = vnStore->VNConservativeNormalValue(op2->gtVNPair);
- assertion.op2.lcl.lclNum = lclNum2;
- assertion.op2.lcl.ssaNum = op2->AsLclVarCommon()->GetSsaNum();
- assertion.op2.zeroOffsetFieldSeq = zeroOffsetFieldSeq;
+ assertion.op2.kind = O2K_LCLVAR_COPY;
+ assertion.op2.vn = vnStore->VNConservativeNormalValue(op2->gtVNPair);
+ assertion.op2.lcl.lclNum = lclNum2;
+ assertion.op2.lcl.ssaNum = op2->AsLclVarCommon()->GetSsaNum();
// Ok everything has been set and the assertion looks good
assertion.assertionKind = assertionKind;
}
// Extract the matching lclNum and ssaNum, as well as the field sequence.
- unsigned copyLclNum;
- unsigned copySsaNum;
- FieldSeqNode* zeroOffsetFieldSeq;
+ unsigned copyLclNum;
+ unsigned copySsaNum;
if (op1.lcl.lclNum == lclNum)
{
- copyLclNum = op2.lcl.lclNum;
- copySsaNum = op2.lcl.ssaNum;
- zeroOffsetFieldSeq = op2.zeroOffsetFieldSeq;
+ copyLclNum = op2.lcl.lclNum;
+ copySsaNum = op2.lcl.ssaNum;
}
else
{
- copyLclNum = op1.lcl.lclNum;
- copySsaNum = op1.lcl.ssaNum;
- zeroOffsetFieldSeq = nullptr; // Only the RHS of an assignment can have a FldSeq.
- assert(optLocalAssertionProp); // Were we to perform replacements in global propagation, that makes copy
- // assertions for control flow ("if (a == b) { ... }"), where both operands
- // could have a FldSeq, we'd need to save it for "op1" too.
+ copyLclNum = op1.lcl.lclNum;
+ copySsaNum = op1.lcl.ssaNum;
}
if (!optLocalAssertionProp)
tree->SetLclNum(copyLclNum);
tree->SetSsaNum(copySsaNum);
- // The sequence we are propagating (if any) represents the inner fields.
- if (zeroOffsetFieldSeq != nullptr)
- {
- FieldSeqNode* outerZeroOffsetFieldSeq = nullptr;
- if (GetZeroOffsetFieldMap()->Lookup(tree, &outerZeroOffsetFieldSeq))
- {
- zeroOffsetFieldSeq = GetFieldSeqStore()->Append(zeroOffsetFieldSeq, outerZeroOffsetFieldSeq);
- GetZeroOffsetFieldMap()->Remove(tree);
- }
-
- fgAddFieldSeqForZeroOffset(tree, zeroOffsetFieldSeq);
- }
-
#ifdef DEBUG
if (verbose)
{
m_nodeTestData = nullptr;
m_loopHoistCSEClass = FIRST_LOOP_HOIST_CSE_CLASS;
#endif
- m_switchDescMap = nullptr;
- m_blockToEHPreds = nullptr;
- m_fieldSeqStore = nullptr;
- m_zeroOffsetFieldMap = nullptr;
- m_refAnyClass = nullptr;
+ m_switchDescMap = nullptr;
+ m_blockToEHPreds = nullptr;
+ m_fieldSeqStore = nullptr;
+ m_refAnyClass = nullptr;
for (MemoryKind memoryKind : allMemoryKinds())
{
m_memorySsaMap[memoryKind] = nullptr;
chars += printf("[GTF_ICON_STATIC_BOX_PTR]");
break;
- case GTF_ICON_FIELD_OFF:
-
- chars += printf("[ICON_FIELD_OFF]");
- break;
-
default:
assert(!"a forgotten handle flag");
break;
GenTree* op2 = nullptr);
GenTreeIntCon* gtNewIconNode(ssize_t value, var_types type = TYP_INT);
- GenTreeIntCon* gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq);
+ GenTreeIntCon* gtNewIconNode(unsigned fieldOffset, FieldSeq* fieldSeq);
GenTreeIntCon* gtNewNull();
GenTreeIntCon* gtNewTrue();
GenTreeIntCon* gtNewFalse();
GenTree* gtNewIndOfIconHandleNode(var_types indType, size_t value, GenTreeFlags iconFlags, bool isInvariant);
- GenTreeIntCon* gtNewIconHandleNode(size_t value, GenTreeFlags flags, FieldSeqNode* fields = nullptr);
+ GenTreeIntCon* gtNewIconHandleNode(size_t value, GenTreeFlags flags, FieldSeq* fields = nullptr);
GenTreeFlags gtTokenToIconFlags(unsigned token);
unsigned gtDispMultiRegCount(GenTree* tree);
#endif
void gtDispRegVal(GenTree* tree);
- void gtDispZeroFieldSeq(GenTree* tree);
void gtDispVN(GenTree* tree);
void gtDispCommonEndLine(GenTree* tree);
void gtGetArgMsg(GenTreeCall* call, CallArg* arg, char* bufp, unsigned bufLength);
void gtGetLateArgMsg(GenTreeCall* call, CallArg* arg, char* bufp, unsigned bufLength);
void gtDispArgList(GenTreeCall* call, GenTree* lastCallOperand, IndentStack* indentStack);
- void gtDispAnyFieldSeq(FieldSeqNode* fieldSeq);
- void gtDispFieldSeq(FieldSeqNode* pfsn);
+ void gtDispFieldSeq(FieldSeq* fieldSeq, ssize_t offset);
void gtDispRange(LIR::ReadOnlyRange const& range);
void fgValueNumberArrayElemStore(GenTree* storeNode, VNFuncApp* addrFunc, unsigned storeSize, ValueNum value);
- void fgValueNumberFieldLoad(GenTree* loadTree, GenTree* baseAddr, FieldSeqNode* fieldSeq, ssize_t offset);
+ void fgValueNumberFieldLoad(GenTree* loadTree, GenTree* baseAddr, FieldSeq* fieldSeq, ssize_t offset);
- void fgValueNumberFieldStore(GenTree* storeNode,
- GenTree* baseAddr,
- FieldSeqNode* fieldSeq,
- ssize_t offset,
- unsigned storeSize,
- ValueNum value);
+ void fgValueNumberFieldStore(
+ GenTree* storeNode, GenTree* baseAddr, FieldSeq* fieldSeq, ssize_t offset, unsigned storeSize, ValueNum value);
// Compute the value number for a byref-exposed load of the given type via the given pointerVN.
ValueNum fgValueNumberByrefExposedLoad(var_types type, ValueNum pointerVN);
#ifdef DEBUG
void fgDebugCheckExceptionSets();
- void fgDebugCheckValueNumberedTree(GenTree* tree);
#endif
// These are the current value number for the memory implicit variables while
GenTreeFlags iconFlags; // gtFlags
};
union {
- struct
- {
- SsaVar lcl;
- FieldSeqNode* zeroOffsetFieldSeq;
- };
+ SsaVar lcl;
IntVal u1;
__int64 lconVal;
double dconVal;
case O2K_LCLVAR_COPY:
return (op2.lcl.lclNum == that->op2.lcl.lclNum) &&
- (!vnBased || op2.lcl.ssaNum == that->op2.lcl.ssaNum) &&
- (op2.zeroOffsetFieldSeq == that->op2.zeroOffsetFieldSeq);
+ (!vnBased || (op2.lcl.ssaNum == that->op2.lcl.ssaNum));
case O2K_SUBRANGE:
return op2.u2.Equals(that->op2.u2);
void JitTestCheckVN(); // Value numbering tests.
#endif // DEBUG
- // The "FieldSeqStore", for canonicalizing field sequences. See the definition of FieldSeqStore for
- // operations.
FieldSeqStore* m_fieldSeqStore;
FieldSeqStore* GetFieldSeqStore()
Compiler* compRoot = impInlineRoot();
if (compRoot->m_fieldSeqStore == nullptr)
{
- // Create a CompAllocator that labels sub-structure with CMK_FieldSeqStore, and use that for allocation.
- CompAllocator ialloc(getAllocator(CMK_FieldSeqStore));
- compRoot->m_fieldSeqStore = new (ialloc) FieldSeqStore(ialloc);
+ CompAllocator alloc = getAllocator(CMK_FieldSeqStore);
+ compRoot->m_fieldSeqStore = new (alloc) FieldSeqStore(alloc);
}
return compRoot->m_fieldSeqStore;
}
- typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, FieldSeqNode*> NodeToFieldSeqMap;
-
- // Some nodes of "TYP_BYREF" or "TYP_I_IMPL" actually represent the address of a field within a struct, but since
- // the offset of the field is zero, there's no "GT_ADD" node. We normally attach a field sequence to the constant
- // that is added, but what do we do when that constant is zero, and is thus not present? We use this mechanism to
- // attach the field sequence directly to the address node.
- NodeToFieldSeqMap* m_zeroOffsetFieldMap;
-
- NodeToFieldSeqMap* GetZeroOffsetFieldMap()
- {
- // Don't need to worry about inlining here
- if (m_zeroOffsetFieldMap == nullptr)
- {
- // Create a CompAllocator that labels sub-structure with CMK_ZeroOffsetFieldMap, and use that for
- // allocation.
- CompAllocator ialloc(getAllocator(CMK_ZeroOffsetFieldMap));
- m_zeroOffsetFieldMap = new (ialloc) NodeToFieldSeqMap(ialloc);
- }
- return m_zeroOffsetFieldMap;
- }
-
- // Requires that "op1" is a node of type "TYP_BYREF" or "TYP_I_IMPL". We are dereferencing this with the fields in
- // "fieldSeq", whose offsets are required all to be zero. Ensures that any field sequence annotation currently on
- // "op1" or its components is augmented by appending "fieldSeq". In practice, if "op1" is a GT_LCL_FLD, it has
- // a field sequence as a member; otherwise, it may be the addition of an a byref and a constant, where the const
- // has a field sequence -- in this case "fieldSeq" is appended to that of the constant; otherwise, we
- // record the field sequence using the ZeroOffsetFieldMap described above.
- //
- // One exception above is that "op1" is a node of type "TYP_REF" where "op1" is a GT_LCL_VAR.
- // This happens when System.Object vtable pointer is a regular field at offset 0 in System.Private.CoreLib in
- // NativeAOT. Such case is handled same as the default case.
- void fgAddFieldSeqForZeroOffset(GenTree* op1, FieldSeqNode* fieldSeq);
+ typedef JitHashTable<GenTree*, JitPtrKeyFuncs<GenTree>, FieldSeq*> NodeToFieldSeqMap;
NodeToUnsignedMap* m_memorySsaMap[MemoryKindCount];
* that may need to be fixed up).
*/
-inline GenTreeIntCon* Compiler::gtNewIconHandleNode(size_t value, GenTreeFlags flags, FieldSeqNode* fields)
+inline GenTreeIntCon* Compiler::gtNewIconHandleNode(size_t value, GenTreeFlags flags, FieldSeq* fields)
{
- assert((flags & (GTF_ICON_HDL_MASK | GTF_ICON_FIELD_OFF)) != 0);
-
- // Interpret "fields == NULL" as "not a field."
- if (fields == nullptr)
- {
- fields = FieldSeqStore::NotAField();
- }
+ assert((flags & GTF_ICON_HDL_MASK) != 0);
GenTreeIntCon* node;
#if defined(LATE_DISASM)
switch (oper)
{
case GT_CNS_INT:
- AsIntCon()->gtFieldSeq = FieldSeqStore::NotAField();
+ AsIntCon()->gtFieldSeq = nullptr;
INDEBUG(AsIntCon()->gtTargetHandle = 0);
break;
#if defined(TARGET_ARM)
}
AsIntCon()->SetIconValue(static_cast<ssize_t>(value));
- AsIntCon()->gtFieldSeq = FieldSeqStore::NotAField();
+ AsIntCon()->gtFieldSeq = nullptr;
break;
#if !defined(TARGET_64BIT)
CompMemKindMacro(Generic)
CompMemKindMacro(LocalAddressVisitor)
CompMemKindMacro(FieldSeqStore)
-CompMemKindMacro(ZeroOffsetFieldMap)
CompMemKindMacro(MemorySsaMap)
CompMemKindMacro(MemoryPhiArg)
CompMemKindMacro(CSE)
//
bool Compiler::gtMarkAddrMode(GenTree* addr, int* pCostEx, int* pCostSz, var_types type)
{
+ GenTree* addrComma = addr;
+ addr = addr->gtEffectiveVal(/* commaOnly */ true);
+
// These are "out" parameters on the call to genCreateAddrMode():
bool rev; // This will be true if the operands will need to be reversed. At this point we
// don't care about this because we're not yet instantiating this addressing mode.
// We can form a complex addressing mode, so mark each of the interior
// nodes with GTF_ADDRMODE_NO_CSE and calculate a more accurate cost.
-
addr->gtFlags |= GTF_ADDRMODE_NO_CSE;
+
+ int originalAddrCostEx = addr->GetCostEx();
+ int originalAddrCostSz = addr->GetCostSz();
+ int addrModeCostEx = 0;
+ int addrModeCostSz = 0;
+
#ifdef TARGET_XARCH
// addrmodeCount is the count of items that we used to form
// an addressing mode. The maximum value is 4 when we have
unsigned addrmodeCount = 0;
if (base)
{
- *pCostEx += base->GetCostEx();
- *pCostSz += base->GetCostSz();
+ addrModeCostEx += base->GetCostEx();
+ addrModeCostSz += base->GetCostSz();
addrmodeCount++;
}
if (idx)
{
- *pCostEx += idx->GetCostEx();
- *pCostSz += idx->GetCostSz();
+ addrModeCostEx += idx->GetCostEx();
+ addrModeCostSz += idx->GetCostSz();
addrmodeCount++;
}
{
if (((signed char)cns) == ((int)cns))
{
- *pCostSz += 1;
+ addrModeCostSz += 1;
}
else
{
- *pCostSz += 4;
+ addrModeCostSz += 4;
}
addrmodeCount++;
}
#elif defined TARGET_ARM
if (base)
{
- *pCostEx += base->GetCostEx();
- *pCostSz += base->GetCostSz();
+ addrModeCostEx += base->GetCostEx();
+ addrModeCostSz += base->GetCostSz();
if ((base->gtOper == GT_LCL_VAR) && ((idx == NULL) || (cns == 0)))
{
- *pCostSz -= 1;
+ addrModeCostSz -= 1;
}
}
if (idx)
{
- *pCostEx += idx->GetCostEx();
- *pCostSz += idx->GetCostSz();
+ addrModeCostEx += idx->GetCostEx();
+ addrModeCostSz += idx->GetCostSz();
if (mul > 0)
{
- *pCostSz += 2;
+ addrModeCostSz += 2;
}
}
{
if (!varTypeIsFloating(type))
{
- *pCostSz += 2;
+ addrModeCostSz += 2;
}
}
else
{
- *pCostEx += 2; // Very large offsets require movw/movt instructions
- *pCostSz += 8;
+ addrModeCostEx += 2; // Very large offsets require movw/movt instructions
+ addrModeCostSz += 8;
}
}
}
#elif defined TARGET_ARM64
if (base)
{
- *pCostEx += base->GetCostEx();
- *pCostSz += base->GetCostSz();
+ addrModeCostEx += base->GetCostEx();
+ addrModeCostSz += base->GetCostSz();
}
if (idx)
{
- *pCostEx += idx->GetCostEx();
- *pCostSz += idx->GetCostSz();
+ addrModeCostEx += idx->GetCostEx();
+ addrModeCostSz += idx->GetCostSz();
}
if (cns != 0)
{
if (cns >= (4096 * genTypeSize(type)))
{
- *pCostEx += 1;
- *pCostSz += 4;
+ addrModeCostEx += 1;
+ addrModeCostSz += 4;
}
}
#elif defined(TARGET_LOONGARCH64)
if (base)
{
- *pCostEx += base->GetCostEx();
- *pCostSz += base->GetCostSz();
+ addrModeCostEx += base->GetCostEx();
+ addrModeCostSz += base->GetCostSz();
}
if (idx)
{
- *pCostEx += idx->GetCostEx();
- *pCostSz += idx->GetCostSz();
+ addrModeCostEx += idx->GetCostEx();
+ addrModeCostSz += idx->GetCostSz();
}
if (cns != 0)
{
if (!emitter::isValidSimm12(cns))
{
// TODO-LoongArch64-CQ: tune for LoongArch64.
- *pCostEx += 1;
- *pCostSz += 4;
+ addrModeCostEx += 1;
+ addrModeCostSz += 4;
}
}
#else
// op1 isn't base or idx. Is this possible? Or should there be an assert?
}
}
+
+ // Finally, adjust the costs on the parenting COMMAs.
+ while (addrComma != addr)
+ {
+ int addrCostExDelta = originalAddrCostEx - addrModeCostEx;
+ int addrCostSzDelta = originalAddrCostSz - addrModeCostSz;
+ addrComma->SetCosts(addrComma->GetCostEx() - addrCostExDelta, addrComma->GetCostSz() - addrCostExDelta);
+
+ *pCostEx += addrComma->AsOp()->gtGetOp1()->GetCostEx();
+ *pCostSz += addrComma->AsOp()->gtGetOp1()->GetCostSz();
+
+ addrComma = addrComma->AsOp()->gtGetOp2();
+ }
+
+ *pCostEx += addrModeCostEx;
+ *pCostSz += addrModeCostSz;
+
return true;
- } // end if (genCreateAddrMode(...))
+ } // if (genCreateAddrMode(...))
+
return false;
}
case GT_BLK:
case GT_IND:
-
+ {
/* An indirection should always have a non-zero level.
* Only constant leaf nodes have level 0.
*/
#endif // TARGET_ARM
// Can we form an addressing mode with this indirection?
- // TODO-CQ: Consider changing this to op1->gtEffectiveVal() to take into account
- // addressing modes hidden under a comma node.
+ GenTree* addr = op1->gtEffectiveVal();
- if (op1->gtOper == GT_ADD)
+ if (addr->OperIs(GT_ADD))
{
// See if we can form a complex addressing mode.
-
- GenTree* addr = op1->gtEffectiveVal();
-
bool doAddrMode = true;
+
// TODO-1stClassStructs: Always do this, but first make sure it's done in Lowering as well.
if (tree->TypeGet() == TYP_STRUCT)
{
doAddrMode = false;
}
-
#ifdef TARGET_ARM64
- if (tree->gtFlags & GTF_IND_VOLATILE)
+ if (tree->AsIndir()->IsVolatile())
{
// For volatile store/loads when address is contained we always emit `dmb`
// if it's not - we emit one-way barriers i.e. ldar/stlr
doAddrMode = false;
}
#endif // TARGET_ARM64
- if (doAddrMode && gtMarkAddrMode(addr, &costEx, &costSz, tree->TypeGet()))
+ if (doAddrMode && gtMarkAddrMode(op1, &costEx, &costSz, tree->TypeGet()))
{
goto DONE;
}
- } // end if (op1->gtOper == GT_ADD)
+ }
else if (gtIsLikelyRegVar(op1))
{
/* Indirection of an enregister LCL_VAR, don't increase costEx/costSz */
goto DONE;
}
#endif
- break;
+ }
+ break;
default:
break;
return new (this, GT_CNS_INT) GenTreeIntCon(type, value);
}
+GenTreeIntCon* Compiler::gtNewIconNode(unsigned fieldOffset, FieldSeq* fieldSeq)
+{
+ return new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, static_cast<ssize_t>(fieldOffset), fieldSeq);
+}
+
GenTreeIntCon* Compiler::gtNewNull()
{
return gtNewIconNode(0, TYP_REF);
return gtNewIconNode(0, TYP_INT);
}
-GenTreeIntCon* Compiler::gtNewIconNode(unsigned fieldOffset, FieldSeqNode* fieldSeq)
-{
- GenTreeIntCon* node = new (this, GT_CNS_INT) GenTreeIntCon(TYP_I_IMPL, static_cast<ssize_t>(fieldOffset));
- node->gtFieldSeq = fieldSeq == nullptr ? FieldSeqStore::NotAField() : fieldSeq;
- return node;
-}
-
// return a new node representing the value in a physical register
GenTree* Compiler::gtNewPhysRegNode(regNumber reg, var_types type)
{
DONE:
- // If it has a zero-offset field seq, copy annotation.
- if (tree->TypeGet() == TYP_BYREF)
- {
- FieldSeqNode* fldSeq = nullptr;
- if (GetZeroOffsetFieldMap()->Lookup(tree, &fldSeq))
- {
- fgAddFieldSeqForZeroOffset(copy, fldSeq);
- }
- }
-
copy->gtVNPair = tree->gtVNPair; // A cloned tree gets the orginal's Value number pair
/* Compute the flags for the copied node. Note that we can do this only
}
//------------------------------------------------------------------------
-// gtDispZeroFieldSeq: If this node has a zero fieldSeq annotation
-// then print this Field Sequence
-//
-void Compiler::gtDispZeroFieldSeq(GenTree* tree)
-{
- NodeToFieldSeqMap* map = GetZeroOffsetFieldMap();
-
- // THe most common case is having no entries in this map
- if (map->GetCount() > 0)
- {
- FieldSeqNode* fldSeq = nullptr;
- if (map->Lookup(tree, &fldSeq))
- {
- printf(" Zero");
- gtDispAnyFieldSeq(fldSeq);
- }
- }
-}
-
-//------------------------------------------------------------------------
// gtDispVN: Utility function that prints a tree's ValueNumber: gtVNPair
//
void Compiler::gtDispVN(GenTree* tree)
//
void Compiler::gtDispCommonEndLine(GenTree* tree)
{
- gtDispZeroFieldSeq(tree);
gtDispRegVal(tree);
gtDispVN(tree);
printf("\n");
--msgLength;
break;
}
- else if ((tree->gtFlags & GTF_ICON_FIELD_OFF) != 0)
- {
- printf("O");
- --msgLength;
- break;
- }
else
{
// Some other handle
}
}
- if ((tree->gtFlags & GTF_ICON_FIELD_OFF) != 0)
- {
- printf(" field offset");
- }
-
#ifdef FEATURE_SIMD
if ((tree->gtFlags & GTF_ICON_SIMD_COUNT) != 0)
{
}
}
- gtDispFieldSeq(tree->AsIntCon()->gtFieldSeq);
-
+ if (tree->AsIntCon()->gtFieldSeq != nullptr)
+ {
+ FieldSeq* fieldSeq = tree->AsIntCon()->gtFieldSeq;
+ gtDispFieldSeq(fieldSeq, tree->AsIntCon()->IconValue() - fieldSeq->GetOffset());
+ }
break;
case GT_CNS_LNG:
}
//------------------------------------------------------------------------
-// gtDispFieldSeq: "gtDispFieldSeq" that also prints "<NotAField>".
-//
-// Useful for printing zero-offset field sequences.
-//
-void Compiler::gtDispAnyFieldSeq(FieldSeqNode* fieldSeq)
-{
- if (fieldSeq == FieldSeqStore::NotAField())
- {
- printf(" Fseq<NotAField>");
- return;
- }
-
- gtDispFieldSeq(fieldSeq);
-}
-
-//------------------------------------------------------------------------
// gtDispFieldSeq: Print out the fields in this field sequence.
//
-void Compiler::gtDispFieldSeq(FieldSeqNode* pfsn)
+// Arguments:
+// fieldSeq - The field sequence
+// offset - Offset of the (implicit) struct fields in the sequence
+//
+void Compiler::gtDispFieldSeq(FieldSeq* fieldSeq, ssize_t offset)
{
- if ((pfsn == nullptr) || (pfsn == FieldSeqStore::NotAField()))
+ if (fieldSeq == nullptr)
{
return;
}
- // Otherwise...
printf(" Fseq[");
- while (pfsn != nullptr)
+ printf("%s", eeGetFieldName(fieldSeq->GetFieldHandle()));
+ if (offset != 0)
{
- assert(pfsn != FieldSeqStore::NotAField()); // Can't exist in a field sequence list except alone
-
- CORINFO_FIELD_HANDLE fldHnd = pfsn->GetFieldHandle();
- printf("%s", eeGetFieldName(fldHnd));
-
- pfsn = pfsn->GetNext();
-
- if (pfsn != nullptr)
- {
- printf(", ");
- }
+ printf(", %zd", offset);
}
printf("]");
}
#endif
GenTree* Compiler::gtFoldExprConst(GenTree* tree)
{
- SSIZE_T i1, i2, itemp;
- INT64 lval1, lval2, ltemp;
- float f1, f2;
- double d1, d2;
- var_types switchType;
- FieldSeqNode* fieldSeq = FieldSeqStore::NotAField(); // default unless we override it when folding
+ SSIZE_T i1, i2, itemp;
+ INT64 lval1, lval2, ltemp;
+ float f1, f2;
+ double d1, d2;
+ var_types switchType;
+ FieldSeq* fieldSeq = nullptr; // default unless we override it when folding
assert(tree->OperIsUnary() || tree->OperIsBinary());
CNS_LONG:
#if !defined(TARGET_64BIT)
- if (fieldSeq != FieldSeqStore::NotAField())
+ if (fieldSeq != nullptr)
{
assert(!"Field sequences on CNS_LNG nodes!?");
return tree;
// this: ADD(baseAddr, CONST [FldSeq])
// this: ADD(CONST [FldSeq], baseAddr)
// this: CONST [FldSeq]
-// this: Zero [FldSeq]
//
// Arguments:
// comp - the Compiler object
// pOffset - [out] parameter for the offset of the component struct fields
//
// Return Value:
-// If "this" matches patterns denoted above, and the FldSeq found is "full",
-// i. e. starts with a class field or a static field, and includes all the
-// struct fields that this tree represents the address of, this method will
-// return "true" and set either "pBaseAddr" to some value, which must be used
-// by the caller as the key into the "first field map" to obtain the actual
-// value for the field. For instance fields, "base address" will be the object
-// reference, for statics - the address to which the field offset with the
-// field sequence is added, see "impImportStaticFieldAccess" and "fgMorphField".
+// If "this" matches patterns denoted above, with a valid FldSeq, this method
+// will return "true" and set "pBaseAddr" to some value, which must be used by
+// the caller as the key into the "first field map" to obtain the value for the
+// field, or to "nullptr", in which case the handle for the field is the primary
+// selector. For instance fields, "base address" will be the object reference,
+// for statics - the address to which the field offset with the field sequence
+// is added, see "impImportStaticFieldAccess" and "fgMorphField".
//
-bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq, ssize_t* pOffset)
+bool GenTree::IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeq** pFldSeq, ssize_t* pOffset)
{
assert(TypeIs(TYP_I_IMPL, TYP_BYREF, TYP_REF));
*pBaseAddr = nullptr;
- *pFldSeq = FieldSeqStore::NotAField();
+ *pFldSeq = nullptr;
- GenTree* baseAddr = nullptr;
- FieldSeqNode* fldSeq = FieldSeqStore::NotAField();
- ssize_t offset = 0;
+ GenTree* baseAddr = nullptr;
+ FieldSeq* fldSeq = nullptr;
+ ssize_t offset = 0;
if (OperIs(GT_ADD))
{
- // If one operand has a field sequence, the other operand must not have one
- // as the order of fields in that case would not be well-defined.
- if (AsOp()->gtOp1->IsIconHandle())
+ if (AsOp()->gtOp2->IsCnsIntOrI())
{
- assert(!AsOp()->gtOp2->IsCnsIntOrI() || !AsOp()->gtOp2->IsIconHandle());
- baseAddr = AsOp()->gtOp2;
- fldSeq = AsOp()->gtOp1->AsIntCon()->gtFieldSeq;
- offset = AsOp()->gtOp1->AsIntCon()->IconValue();
- }
- else if (AsOp()->gtOp2->IsCnsIntOrI())
- {
- assert(!AsOp()->gtOp1->IsCnsIntOrI() || !AsOp()->gtOp1->IsIconHandle());
baseAddr = AsOp()->gtOp1;
fldSeq = AsOp()->gtOp2->AsIntCon()->gtFieldSeq;
offset = AsOp()->gtOp2->AsIntCon()->IconValue();
{
return false;
}
-
- assert(!baseAddr->TypeIs(TYP_REF) || !comp->GetZeroOffsetFieldMap()->Lookup(baseAddr));
}
else if (IsIconHandle(GTF_ICON_STATIC_HDL))
{
- assert(!comp->GetZeroOffsetFieldMap()->Lookup(this) && (AsIntCon()->gtFieldSeq != nullptr));
baseAddr = this;
fldSeq = AsIntCon()->gtFieldSeq;
offset = AsIntCon()->IconValue();
}
- else if (comp->GetZeroOffsetFieldMap()->Lookup(this, &fldSeq))
- {
- assert((fldSeq != FieldSeqStore::NotAField()) || (fldSeq->GetOffset() == 0));
- baseAddr = this;
- offset = 0;
- }
else
{
return false;
}
- assert((fldSeq != nullptr) && (baseAddr != nullptr));
+ assert(baseAddr != nullptr);
- if (fldSeq == FieldSeqStore::NotAField())
+ if (fldSeq == nullptr)
{
return false;
}
}
#endif
}
- else
#endif
- {
- // Attempt to find a handle for this expression.
- // We can do this for an array element indirection, or for a field indirection.
- GenTree* addr = tree->AsIndir()->Addr();
- if (addr->OperIs(GT_ARR_ADDR))
- {
- structHnd = addr->AsArrAddr()->GetElemClassHandle();
- }
- }
break;
#ifdef FEATURE_SIMD
case GT_SIMD:
if (op1IsStaticFieldBase && (op2->OperGet() == GT_CNS_INT))
{
- FieldSeqNode* fieldSeq = op2->AsIntCon()->gtFieldSeq;
+ FieldSeq* fieldSeq = op2->AsIntCon()->gtFieldSeq;
- if (fieldSeq != nullptr)
+ if ((fieldSeq != nullptr) && (fieldSeq->GetOffset() == op2->AsIntCon()->IconValue()))
{
- fieldSeq = fieldSeq->GetTail();
-
// No benefit to calling gtGetFieldClassHandle here, as
// the exact field being accessed can vary.
CORINFO_FIELD_HANDLE fieldHnd = fieldSeq->GetFieldHandle();
CORINFO_CLASS_HANDLE fieldClass = NO_CLASS_HANDLE;
var_types fieldType = eeGetFieldType(fieldHnd, &fieldClass);
- assert(fieldType == TYP_REF);
- objClass = fieldClass;
+ if (fieldType == TYP_REF)
+ {
+ objClass = fieldClass;
+ }
}
}
}
// If the other arg is an int constant, and is a "not-a-field", choose
// that as the multiplier, thus preserving constant index offsets...
if (tree->AsOp()->gtOp2->OperGet() == GT_CNS_INT &&
- tree->AsOp()->gtOp2->AsIntCon()->gtFieldSeq == FieldSeqStore::NotAField())
+ tree->AsOp()->gtOp2->AsIntCon()->gtFieldSeq == nullptr)
{
assert(!tree->AsOp()->gtOp2->AsIntCon()->ImmedValNeedsReloc(comp));
// TODO-CrossBitness: we wouldn't need the cast below if GenTreeIntConCommon::gtIconVal had
return false;
}
-// Note that the value of the below field doesn't matter; it exists only to provide a distinguished address.
+//------------------------------------------------------------------------
+// Create: Create or retrieve a field sequence for the given field handle.
//
-// static
-FieldSeqNode FieldSeqStore::s_notAField(nullptr, nullptr, 0, FieldSeqNode::FieldKind::Instance);
-
-// FieldSeqStore methods.
-FieldSeqStore::FieldSeqStore(CompAllocator alloc) : m_alloc(alloc), m_canonMap(new (alloc) FieldSeqNodeCanonMap(alloc))
+// The field sequence instance contains some cached information relevant to
+// its usage; thus for a given handle all callers of this method must pass
+// the same set of arguments.
+//
+// Arguments:
+// fieldHnd - The field handle
+// offset - The "offset" value for the field sequence
+// fieldKind - The field's kind
+//
+// Return Value:
+// The canonical field sequence for the given field.
+//
+FieldSeq* FieldSeqStore::Create(CORINFO_FIELD_HANDLE fieldHnd, ssize_t offset, FieldSeq::FieldKind fieldKind)
{
-}
+ FieldSeq* fieldSeq = m_map.Emplace(fieldHnd, fieldHnd, offset, fieldKind);
-FieldSeqNode* FieldSeqStore::CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd,
- size_t offset,
- FieldSeqNode::FieldKind fieldKind)
-{
- FieldSeqNode fsn(fieldHnd, nullptr, offset, fieldKind);
- FieldSeqNode* res = nullptr;
- if (m_canonMap->Lookup(fsn, &res))
- {
- return res;
- }
- else
- {
- res = m_alloc.allocate<FieldSeqNode>(1);
- *res = fsn;
- m_canonMap->Set(fsn, res);
- return res;
- }
+ assert(fieldSeq->GetOffset() == offset);
+ assert(fieldSeq->GetKind() == fieldKind);
+
+ return fieldSeq;
}
-FieldSeqNode* FieldSeqStore::Append(FieldSeqNode* a, FieldSeqNode* b)
+//------------------------------------------------------------------------
+// Append: "Merge" two field sequences together.
+//
+// A field sequence only explicitly represents its "head", i. e. the static
+// or class field with which it begins. The struct fields that are part of
+// it are "implicit" - represented in IR as offsets with "empty" sequences.
+// Thus when two sequences are merged, only one can be explicit:
+//
+// field seq + empty => field seq
+// empty + field seq => field seq
+// empty + empty => empty
+// field seq + field seq => illegal
+//
+// Arguments:
+// a - The field sequence
+// b - The second sequence
+//
+// Return Value:
+// The result of "merging" "a" and "b" (see description).
+//
+FieldSeq* FieldSeqStore::Append(FieldSeq* a, FieldSeq* b)
{
if (a == nullptr)
{
return b;
}
- else if (a == NotAField())
- {
- return NotAField();
- }
- else if (b == nullptr)
+ if (b == nullptr)
{
return a;
}
- else if (b == NotAField())
- {
- return NotAField();
- }
- else
- {
- // We should never add a duplicate FieldSeqNode
- assert(a != b);
- FieldSeqNode* tmp = Append(a->GetNext(), b);
- FieldSeqNode fsn(a->GetFieldHandleValue(), tmp, a->GetOffset(), a->GetKind());
- FieldSeqNode* res = nullptr;
- if (m_canonMap->Lookup(fsn, &res))
- {
- return res;
- }
- else
- {
- res = m_alloc.allocate<FieldSeqNode>(1);
- *res = fsn;
- m_canonMap->Set(fsn, res);
- return res;
- }
- }
+ assert(!"Duplicate field sequences!");
+ return nullptr;
}
-FieldSeqNode::FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next, size_t offset, FieldKind fieldKind)
- : m_next(next), m_offset(offset)
+FieldSeq::FieldSeq(CORINFO_FIELD_HANDLE fieldHnd, ssize_t offset, FieldKind fieldKind) : m_offset(offset)
{
+ assert(fieldHnd != NO_FIELD_HANDLE);
+
uintptr_t handleValue = reinterpret_cast<uintptr_t>(fieldHnd);
assert((handleValue & FIELD_KIND_MASK) == 0);
m_fieldHandleAndKind = handleValue | static_cast<uintptr_t>(fieldKind);
- if (fieldHnd != NO_FIELD_HANDLE)
- {
- assert(JitTls::GetCompiler()->eeIsFieldStatic(fieldHnd) == IsStaticField());
- // TODO-PhysicalVN: assert that "offset" is correct.
- }
- else
+ assert(JitTls::GetCompiler()->eeIsFieldStatic(fieldHnd) == IsStaticField());
+ if (fieldKind == FieldKind::Instance)
{
- // Use the default for NotAField.
- assert(fieldKind == FieldKind::Instance);
+ // TODO: enable this assert. At the time of writing, crossgen2 had a bug where the value "getFieldOffset"
+ // would return for fields with an offset unknown at compile time was incorrect (not zero).
+ // assert(static_cast<ssize_t>(JitTls::GetCompiler()->info.compCompHnd->getFieldOffset(fieldHnd)) == offset);
}
}
}
};
-// GT_FIELD nodes will be lowered into more "code-gen-able" representations, like
-// GT_IND's of addresses, or GT_LCL_FLD nodes. We'd like to preserve the more abstract
-// information, and will therefore annotate such lowered nodes with FieldSeq's. A FieldSeq
-// represents a (possibly) empty sequence of fields. The fields are in the order
-// in which they are dereferenced. The first field may be an object field or a struct field;
-// all subsequent fields must be struct fields.
-class FieldSeqNode
+// GT_FIELD nodes will be lowered into more "code-gen-able" representations, like GT_IND's of addresses.
+// For value numbering, we would like to preserve the aliasing information for class and static fields,
+// and so will annotate such lowered addresses with "field sequences", representing the "base" static or
+// class field and any additional struct fields. We only need to preserve the handle for the first field,
+// so any struct fields will be represented implicitly (via offsets). See also "IsFieldAddr".
+//
+class FieldSeq
{
public:
enum class FieldKind : uintptr_t
{
- Instance = 0, // An instance field, object or struct.
+ Instance = 0, // An instance field.
SimpleStatic = 1, // Simple static field - the handle represents a unique location.
SharedStatic = 2, // Static field on a shared generic type: "Class<__Canon>.StaticField".
};
static_assert_no_msg(sizeof(CORINFO_FIELD_HANDLE) == sizeof(uintptr_t));
- uintptr_t m_fieldHandleAndKind;
- FieldSeqNode* m_next;
- size_t m_offset;
+ uintptr_t m_fieldHandleAndKind;
+ ssize_t m_offset;
public:
- FieldSeqNode(CORINFO_FIELD_HANDLE fieldHnd, FieldSeqNode* next, size_t offset, FieldKind fieldKind);
+ FieldSeq(CORINFO_FIELD_HANDLE fieldHnd, ssize_t offset, FieldKind fieldKind);
FieldKind GetKind() const
{
CORINFO_FIELD_HANDLE GetFieldHandle() const
{
- assert(GetFieldHandleValue() != NO_FIELD_HANDLE);
- return GetFieldHandleValue();
- }
-
- CORINFO_FIELD_HANDLE GetFieldHandleValue() const
- {
return CORINFO_FIELD_HANDLE(m_fieldHandleAndKind & ~FIELD_KIND_MASK);
}
- FieldSeqNode* GetNext() const
- {
- return m_next;
- }
-
//------------------------------------------------------------------------
// GetOffset: Retrieve "the offset" for the field this node represents.
//
// For boxed statics, it will be TARGET_POINTER_SIZE (the method table pointer size).
// For other fields, it will be equal to the value "getFieldOffset" would return.
//
- size_t GetOffset() const
+ ssize_t GetOffset() const
{
return m_offset;
}
{
return GetKind() == FieldKind::SharedStatic;
}
-
- FieldSeqNode* GetTail()
- {
- FieldSeqNode* tail = this;
- while (tail->m_next != nullptr)
- {
- tail = tail->m_next;
- }
- return tail;
- }
-
- // Make sure this provides methods that allow it to be used as a KeyFuncs type in JitHashTable.
- // Note that there is a one-to-one relationship between the field handle and the field kind, so
- // we do not need to mask away the latter for comparison purposes. Likewise, we do not need to
- // include the offset.
- static int GetHashCode(FieldSeqNode fsn)
- {
- return static_cast<int>(fsn.m_fieldHandleAndKind) ^ static_cast<int>(reinterpret_cast<intptr_t>(fsn.m_next));
- }
-
- static bool Equals(const FieldSeqNode& fsn1, const FieldSeqNode& fsn2)
- {
- return fsn1.m_fieldHandleAndKind == fsn2.m_fieldHandleAndKind && fsn1.m_next == fsn2.m_next;
- }
};
// This class canonicalizes field sequences.
+//
class FieldSeqStore
{
- typedef JitHashTable<FieldSeqNode, /*KeyFuncs*/ FieldSeqNode, FieldSeqNode*> FieldSeqNodeCanonMap;
-
- CompAllocator m_alloc;
- FieldSeqNodeCanonMap* m_canonMap;
-
- static FieldSeqNode s_notAField; // No value, just exists to provide an address.
+ // Maps field handles to field sequence instances.
+ //
+ JitHashTable<CORINFO_FIELD_HANDLE, JitPtrKeyFuncs<CORINFO_FIELD_STRUCT_>, FieldSeq> m_map;
public:
- FieldSeqStore(CompAllocator alloc);
-
- // Returns the (canonical in the store) singleton field sequence for the given handle.
- FieldSeqNode* CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd,
- size_t offset,
- FieldSeqNode::FieldKind fieldKind = FieldSeqNode::FieldKind::Instance);
-
- // This is a special distinguished FieldSeqNode indicating that a constant does *not*
- // represent a valid field sequence. This is "infectious", in the sense that appending it
- // (on either side) to any field sequence yields the "NotAField()" sequence.
- static FieldSeqNode* NotAField()
+ FieldSeqStore(CompAllocator alloc) : m_map(alloc)
{
- return &s_notAField;
}
- // Returns the (canonical in the store) field sequence representing the concatenation of
- // the sequences represented by "a" and "b". Assumes that "a" and "b" are canonical; that is,
- // they are the results of CreateSingleton, NotAField, or Append calls. If either of the arguments
- // are the "NotAField" value, so is the result.
- FieldSeqNode* Append(FieldSeqNode* a, FieldSeqNode* b);
+ FieldSeq* Create(CORINFO_FIELD_HANDLE fieldHnd, ssize_t offset, FieldSeq::FieldKind fieldKind);
+
+ FieldSeq* Append(FieldSeq* a, FieldSeq* b);
};
class GenTreeUseEdgeIterator;
GTF_ICON_CIDMID_HDL = 0x0E000000, // GT_CNS_INT -- constant is a class ID or a module ID
GTF_ICON_BBC_PTR = 0x0F000000, // GT_CNS_INT -- constant is a basic block count pointer
GTF_ICON_STATIC_BOX_PTR = 0x10000000, // GT_CNS_INT -- constant is an address of the box for a STATIC_IN_HEAP field
- GTF_ICON_FIELD_SEQ = 0x11000000, // <--------> -- constant is a FieldSeqNode* (used only as VNHandle)
+ GTF_ICON_FIELD_SEQ = 0x11000000, // <--------> -- constant is a FieldSeq* (used only as VNHandle)
// GTF_ICON_REUSE_REG_VAL = 0x00800000 // GT_CNS_INT -- GTF_REUSE_REG_VAL, defined above
- GTF_ICON_FIELD_OFF = 0x00400000, // GT_CNS_INT -- constant is a field offset
GTF_ICON_SIMD_COUNT = 0x00200000, // GT_CNS_INT -- constant is Vector<T>.Count
GTF_ICON_INITCLASS = 0x00100000, // GT_CNS_INT -- Constant is used to access a static that requires preceding
// class/static init helper. In some cases, the constant is
// Determine whether this tree is a basic block profile count update.
bool IsBlockProfileUpdate();
- bool IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeqNode** pFldSeq, ssize_t* pOffset);
+ bool IsFieldAddr(Compiler* comp, GenTree** pBaseAddr, FieldSeq** pFldSeq, ssize_t* pOffset);
bool IsArrayAddr(GenTreeArrAddr** pArrAddr);
// If this constant represents the offset of one or more fields, "gtFieldSeq" represents that
// sequence of fields.
- FieldSeqNode* gtFieldSeq;
+ FieldSeq* gtFieldSeq;
#ifdef DEBUG
// If the value represents target address (for a field or call), holds the handle of the field (or call).
: GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
, gtIconVal(value)
, gtCompileTimeHandle(0)
- , gtFieldSeq(FieldSeqStore::NotAField())
+ , gtFieldSeq(nullptr)
{
}
- GenTreeIntCon(var_types type, ssize_t value, FieldSeqNode* fields DEBUGARG(bool largeNode = false))
+ GenTreeIntCon(var_types type, ssize_t value, FieldSeq* fields DEBUGARG(bool largeNode = false))
: GenTreeIntConCommon(GT_CNS_INT, type DEBUGARG(largeNode))
, gtIconVal(value)
, gtCompileTimeHandle(0)
, gtFieldSeq(fields)
{
- assert(fields != nullptr);
}
void FixupInitBlkValue(var_types asgType);
assert(OFFSETOF__CORINFO_TypedReference__dataPtr == 0);
assert(destAddr->gtType == TYP_I_IMPL || destAddr->gtType == TYP_BYREF);
- fgAddFieldSeqForZeroOffset(destAddr,
- GetFieldSeqStore()->CreateSingleton(GetRefanyDataField(),
- OFFSETOF__CORINFO_TypedReference__dataPtr));
GenTree* ptrSlot = gtNewOperNode(GT_IND, TYP_I_IMPL, destAddr);
GenTreeIntCon* typeFieldOffset = gtNewIconNode(OFFSETOF__CORINFO_TypedReference__type, TYP_I_IMPL);
- typeFieldOffset->gtFieldSeq =
- GetFieldSeqStore()->CreateSingleton(GetRefanyTypeField(), OFFSETOF__CORINFO_TypedReference__type);
+
GenTree* typeSlot =
gtNewOperNode(GT_IND, TYP_I_IMPL, gtNewOperNode(GT_ADD, destAddr->gtType, destAddrClone, typeFieldOffset));
// read-only (using "stsfld" on them is UB). In mixed-mode assemblies, RVA statics can
// be mutable, but the only current producer of such images, the C++/CLI compiler, does
// not appear to support mapping different fields to the same address. So we will say
- // that "mutable overlapping RVA statics" are UB as well. If this ever changes, code in
- // value numbering will need to be updated to respect "NotAField FldSeq".
+ // that "mutable overlapping RVA statics" are UB as well.
// For statics that are not "boxed", the initial address tree will contain the field sequence.
// For those that are, we will attach it later, when adding the indirection for the box, since
bool isBoxedStatic = (pFieldInfo->fieldFlags & CORINFO_FLG_FIELD_STATIC_IN_HEAP) != 0;
bool isSharedStatic = (pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_GENERICS_STATIC_HELPER) ||
(pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_READYTORUN_HELPER);
- FieldSeqNode::FieldKind fieldKind =
- isSharedStatic ? FieldSeqNode::FieldKind::SharedStatic : FieldSeqNode::FieldKind::SimpleStatic;
+ FieldSeq::FieldKind fieldKind =
+ isSharedStatic ? FieldSeq::FieldKind::SharedStatic : FieldSeq::FieldKind::SimpleStatic;
- FieldSeqNode* innerFldSeq = nullptr;
- FieldSeqNode* outerFldSeq = nullptr;
+ FieldSeq* innerFldSeq;
+ FieldSeq* outerFldSeq;
if (isBoxedStatic)
{
- innerFldSeq = FieldSeqStore::NotAField();
- outerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField, TARGET_POINTER_SIZE, fieldKind);
+ innerFldSeq = nullptr;
+ outerFldSeq = GetFieldSeqStore()->Create(pResolvedToken->hField, TARGET_POINTER_SIZE, fieldKind);
}
else
{
bool hasConstAddr = (pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_ADDRESS) ||
(pFieldInfo->fieldAccessor == CORINFO_FIELD_STATIC_RVA_ADDRESS);
- size_t offset;
+ ssize_t offset;
if (hasConstAddr)
{
- offset = reinterpret_cast<size_t>(info.compCompHnd->getFieldAddress(pResolvedToken->hField));
+ offset = reinterpret_cast<ssize_t>(info.compCompHnd->getFieldAddress(pResolvedToken->hField));
assert(offset != 0);
}
else
offset = pFieldInfo->offset;
}
- innerFldSeq = GetFieldSeqStore()->CreateSingleton(pResolvedToken->hField, offset, fieldKind);
- outerFldSeq = FieldSeqStore::NotAField();
+ innerFldSeq = GetFieldSeqStore()->Create(pResolvedToken->hField, offset, fieldKind);
+ outerFldSeq = nullptr;
}
GenTree* op1;
addr->gtFlags |= GTF_ARR_ADDR_NONNULL;
}
- // Transfer the zero-offset annotation from INDEX_ADDR to ARR_ADDR...
- FieldSeqNode* zeroOffsetFldSeq;
- if (GetZeroOffsetFieldMap()->Lookup(indexAddr, &zeroOffsetFldSeq))
- {
- fgAddFieldSeqForZeroOffset(addr, zeroOffsetFldSeq);
- }
-
GenTree* tree = addr;
// Prepend the bounds check and the assignment trees that were created (if any).
unsigned fldOffset = tree->AsField()->gtFldOffset;
GenTree* objRef = tree->AsField()->GetFldObj();
bool fldMayOverlap = tree->AsField()->gtFldMayOverlap;
- FieldSeqNode* fieldSeq = FieldSeqStore::NotAField();
+ FieldSeq* fieldSeq = nullptr;
// Reset the flag because we may reuse the node.
tree->AsField()->gtFldMayOverlap = false;
}
#endif
- if (!fldMayOverlap)
+ // We only need to attach the field offset information for class fields.
+ if ((objRefType == TYP_REF) && !fldMayOverlap)
{
- fieldSeq = GetFieldSeqStore()->CreateSingleton(symHnd, fldOffset, FieldSeqNode::FieldKind::Instance);
+ fieldSeq = GetFieldSeqStore()->Create(symHnd, fldOffset, FieldSeq::FieldKind::Instance);
}
+ // Add the member offset to the object's address.
if (fldOffset != 0)
{
- // Generate the "addr" node.
- // Add the member offset to the object's address.
addr = gtNewOperNode(GT_ADD, (objRefType == TYP_I_IMPL) ? TYP_I_IMPL : TYP_BYREF, addr,
- gtNewIconHandleNode(fldOffset, GTF_ICON_FIELD_OFF, fieldSeq));
+ gtNewIconNode(fldOffset, fieldSeq));
}
// Now let's set the "tree" as a GT_IND tree.
//
// Create "comma2" node and link it to "tree".
//
- GenTree* comma2;
- comma2 = gtNewOperNode(GT_COMMA,
- addr->TypeGet(), // The type of "comma2" node is the same as the type of "addr" node.
- comma, addr);
+ GenTree* comma2 = gtNewOperNode(GT_COMMA, addr->TypeGet(), comma, addr);
tree->AsOp()->gtOp1 = comma2;
}
/* indirect to have tlsRef point at the base of the DLLs Thread Local Storage */
tlsRef = gtNewOperNode(GT_IND, TYP_I_IMPL, tlsRef);
+ // Add the TLS static field offset to the address.
assert(!fldMayOverlap);
- fieldSeq = GetFieldSeqStore()->CreateSingleton(symHnd, fldOffset, FieldSeqNode::FieldKind::SimpleStatic);
-
- if (fldOffset != 0)
- {
- GenTree* fldOffsetNode = new (this, GT_CNS_INT) GenTreeIntCon(TYP_INT, fldOffset, fieldSeq);
-
- /* Add the TLS static field offset to the address */
-
- tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, fldOffsetNode);
- }
+ fieldSeq = GetFieldSeqStore()->Create(symHnd, fldOffset, FieldSeq::FieldKind::SimpleStatic);
+ tlsRef = gtNewOperNode(GT_ADD, TYP_I_IMPL, tlsRef, gtNewIconNode(fldOffset, fieldSeq));
// Final indirect to get to actual value of TLS static field
if (!isBoxedStatic)
{
// Only simple statics get importred as GT_FIELDs.
- fieldSeq = GetFieldSeqStore()->CreateSingleton(symHnd, reinterpret_cast<size_t>(fldAddr),
- FieldSeqNode::FieldKind::SimpleStatic);
+ fieldSeq = GetFieldSeqStore()->Create(symHnd, reinterpret_cast<size_t>(fldAddr),
+ FieldSeq::FieldKind::SimpleStatic);
}
// TODO-CQ: enable this optimization for 32 bit targets.
return fgMorphSmpOp(tree);
}
}
- noway_assert(tree->gtOper == GT_IND);
-
- if (fldOffset == 0)
- {
- GenTree* addr = tree->AsOp()->gtOp1;
-
- // 'addr' may be a GT_COMMA. Skip over any comma nodes
- addr = addr->gtEffectiveVal();
-#ifdef DEBUG
- if (verbose)
- {
- printf("\nBefore calling fgAddFieldSeqForZeroOffset:\n");
- gtDispTree(tree);
- }
-#endif
-
- // We expect 'addr' to be an address at this point.
- assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF);
-
- // Since we don't make a constant zero to attach the field sequence to, associate it with the "addr" node.
- fgAddFieldSeqForZeroOffset(addr, fieldSeq);
- }
+ noway_assert(tree->OperIs(GT_IND));
// Pass down the current mac; if non null we are computing an address
GenTree* result = fgMorphSmpOp(tree, mac);
-#ifdef DEBUG
- if (verbose)
- {
- printf("\nFinal value of Compiler::fgMorphField after calling fgMorphSmpOp:\n");
- gtDispTree(result);
- }
-#endif
+ JITDUMP("\nFinal value of Compiler::fgMorphField after calling fgMorphSmpOp:\n");
+ DISPTREE(result);
return result;
}
* Perform the required oper-specific postorder morphing
*/
- GenTree* temp;
- GenTree* lclVarTree;
- FieldSeqNode* fieldSeq = nullptr;
+ GenTree* temp;
+ GenTree* lclVarTree;
switch (oper)
{
// except when `op2` is a const byref.
op2->AsIntConCommon()->SetIconValue(-op2->AsIntConCommon()->IconValue());
- op2->AsIntConRef().gtFieldSeq = FieldSeqStore::NotAField();
+ op2->AsIntConRef().gtFieldSeq = nullptr;
oper = GT_ADD;
tree->ChangeOper(oper);
goto CM_ADD_OP;
}
// Perform the transform ADDR(IND(...)) == (...).
- GenTree* addr = op1->AsOp()->gtOp1;
+ GenTree* addr = op1->AsIndir()->Addr();
- // If tree has a zero field sequence annotation, update the annotation
- // on addr node.
- FieldSeqNode* zeroFieldSeq = nullptr;
- if (GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq))
- {
- fgAddFieldSeqForZeroOffset(addr, zeroFieldSeq);
- }
-
- noway_assert(varTypeIsGC(addr->gtType) || addr->gtType == TYP_I_IMPL);
+ noway_assert(varTypeIsI(addr));
DEBUG_DESTROY_NODE(op1);
DEBUG_DESTROY_NODE(tree);
}
GenTree* commaNode = commas.Top();
- // The top-level addr might be annotated with a zeroOffset field.
- FieldSeqNode* zeroFieldSeq = nullptr;
- bool isZeroOffset = GetZeroOffsetFieldMap()->Lookup(tree, &zeroFieldSeq);
- tree = op1;
+ tree = op1;
commaNode->AsOp()->gtOp2->gtFlags |= GTF_DONT_CSE;
// If the node we're about to put under a GT_ADDR is an indirection, it
commaOp2->gtFlags |= (commaOp2->AsOp()->gtOp1->gtFlags & GTF_EXCEPT);
}
- op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, commaOp2);
-
- if (isZeroOffset)
- {
- // Transfer the annotation to the new GT_ADDR node.
- fgAddFieldSeqForZeroOffset(op1, zeroFieldSeq);
- }
+ op1 = gtNewOperNode(GT_ADDR, TYP_BYREF, commaOp2);
commaNode->AsOp()->gtOp2 = op1;
// Originally, I gave all the comma nodes type "byref". But the ADDR(IND(x)) == x transform
// might give op1 a type different from byref (like, say, native int). So now go back and give
// Fold (x + 0) - given it won't change the tree type.
if (op2->IsIntegralConst(0) && (genActualType(add) == genActualType(op1)))
{
- if (op2->IsCnsIntOrI() && varTypeIsI(op1))
+ // Keep the offset nodes with annotations for value numbering purposes.
+ if (!op2->IsCnsIntOrI() || (op2->AsIntCon()->gtFieldSeq == nullptr))
{
- fgAddFieldSeqForZeroOffset(op1, op2->AsIntCon()->gtFieldSeq);
- }
+ DEBUG_DESTROY_NODE(op2);
+ DEBUG_DESTROY_NODE(add);
- DEBUG_DESTROY_NODE(op2);
- DEBUG_DESTROY_NODE(add);
+ return op1;
+ }
- return op1;
+ // Communicate to CSE that this addition is a no-op.
+ add->SetDoNotCSE();
}
if (opts.OptimizationEnabled())
#endif // FEATURE_IMPLICIT_BYREFS
}
-//------------------------------------------------------------------------
-// fgAddFieldSeqForZeroOffset:
-// Associate a fieldSeq (with a zero offset) with the GenTree node 'addr'
-//
-// Arguments:
-// addr - A GenTree node
-// fieldSeqZero - a fieldSeq (with a zero offset)
-//
-// Notes:
-// Some GenTree nodes have internal fields that record the field sequence.
-// If we have one of these nodes: GT_CNS_INT, GT_LCL_FLD
-// we can append the field sequence using the gtFieldSeq
-// If we have a GT_ADD of a GT_CNS_INT we can use the
-// fieldSeq from child node.
-// Otherwise we record 'fieldSeqZero' in the GenTree node using
-// a Map: GetFieldSeqStore()
-// When doing so we take care to preserve any existing zero field sequence
-//
-void Compiler::fgAddFieldSeqForZeroOffset(GenTree* addr, FieldSeqNode* fieldSeqZero)
-{
- // We expect 'addr' to be an address at this point.
- assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF);
-
- // Tunnel through any commas.
- const bool commaOnly = true;
- addr = addr->gtEffectiveVal(commaOnly);
-
- // We still expect 'addr' to be an address at this point.
- assert(addr->TypeGet() == TYP_BYREF || addr->TypeGet() == TYP_I_IMPL || addr->TypeGet() == TYP_REF);
-
- FieldSeqNode* fieldSeqUpdate = fieldSeqZero;
- GenTree* fieldSeqNode = addr;
- bool fieldSeqRecorded = false;
-
-#ifdef DEBUG
- if (verbose)
- {
- printf("\nfgAddFieldSeqForZeroOffset for");
- gtDispAnyFieldSeq(fieldSeqZero);
-
- printf("\naddr (Before)\n");
- gtDispNode(addr, nullptr, nullptr, false);
- gtDispCommonEndLine(addr);
- }
-#endif // DEBUG
-
- switch (addr->OperGet())
- {
- case GT_CNS_INT:
- fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsIntCon()->gtFieldSeq, fieldSeqZero);
- addr->AsIntCon()->gtFieldSeq = fieldSeqUpdate;
- fieldSeqRecorded = true;
- break;
-
- case GT_ADD:
- if (addr->AsOp()->gtOp1->OperGet() == GT_CNS_INT)
- {
- fieldSeqNode = addr->AsOp()->gtOp1;
-
- fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq, fieldSeqZero);
- addr->AsOp()->gtOp1->AsIntCon()->gtFieldSeq = fieldSeqUpdate;
- fieldSeqRecorded = true;
- }
- else if (addr->AsOp()->gtOp2->OperGet() == GT_CNS_INT)
- {
- fieldSeqNode = addr->AsOp()->gtOp2;
-
- fieldSeqUpdate = GetFieldSeqStore()->Append(addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq, fieldSeqZero);
- addr->AsOp()->gtOp2->AsIntCon()->gtFieldSeq = fieldSeqUpdate;
- fieldSeqRecorded = true;
- }
- break;
-
- default:
- break;
- }
-
- if (fieldSeqRecorded == false)
- {
- // Record in the general zero-offset map.
-
- // The "addr" node might already be annotated with a zero-offset field sequence.
- FieldSeqNode* existingFieldSeq = nullptr;
- if (GetZeroOffsetFieldMap()->Lookup(addr, &existingFieldSeq))
- {
- // Append the zero field sequences
- fieldSeqUpdate = GetFieldSeqStore()->Append(existingFieldSeq, fieldSeqZero);
- }
- // Overwrite the field sequence annotation for op1
- GetZeroOffsetFieldMap()->Set(addr, fieldSeqUpdate, NodeToFieldSeqMap::Overwrite);
- fieldSeqRecorded = true;
- }
-
-#ifdef DEBUG
- if (verbose)
- {
- printf(" (After)\n");
- gtDispNode(fieldSeqNode, nullptr, nullptr, false);
- gtDispCommonEndLine(fieldSeqNode);
- }
-#endif // DEBUG
-}
-
#ifdef FEATURE_SIMD
//-----------------------------------------------------------------------------------
LclVarDsc* srcVarDsc = m_comp->lvaGetDesc(m_srcLclNum);
unsigned srcFieldLclNum = srcVarDsc->lvFieldLclStart + i;
LclVarDsc* srcFieldVarDsc = m_comp->lvaGetDesc(srcFieldLclNum);
-
- // Have to set the field sequence -- which means we need the field handle.
- CORINFO_CLASS_HANDLE classHnd = srcVarDsc->GetStructHnd();
- CORINFO_FIELD_HANDLE fieldHnd =
- m_comp->info.compCompHnd->getFieldInClass(classHnd, srcFieldVarDsc->lvFldOrdinal);
-
- unsigned srcFieldOffset = m_comp->lvaGetDesc(srcFieldLclNum)->lvFldOffset;
- var_types srcType = srcFieldVarDsc->TypeGet();
- FieldSeqNode* curFieldSeq = m_comp->GetFieldSeqStore()->CreateSingleton(fieldHnd, srcFieldOffset);
+ unsigned srcFieldOffset = srcFieldVarDsc->lvFldOffset;
+ var_types srcType = srcFieldVarDsc->TypeGet();
if (!m_dstUseLclFld)
{
- if (srcFieldOffset == 0)
- {
- m_comp->fgAddFieldSeqForZeroOffset(dstAddrClone, curFieldSeq);
- }
- else
+ if (srcFieldOffset != 0)
{
- GenTree* fieldOffsetNode = m_comp->gtNewIconNode(srcFieldVarDsc->lvFldOffset, curFieldSeq);
+ GenTree* fieldOffsetNode = m_comp->gtNewIconNode(srcFieldVarDsc->lvFldOffset, TYP_I_IMPL);
dstAddrClone = m_comp->gtNewOperNode(GT_ADD, TYP_BYREF, dstAddrClone, fieldOffsetNode);
}
}
}
- CORINFO_CLASS_HANDLE classHnd = m_comp->lvaGetDesc(m_dstLclNum)->GetStructHnd();
- CORINFO_FIELD_HANDLE fieldHnd =
- m_comp->info.compCompHnd->getFieldInClass(classHnd,
- m_comp->lvaGetDesc(dstFieldLclNum)->lvFldOrdinal);
-
- unsigned fldOffset = m_comp->lvaGetDesc(dstFieldLclNum)->lvFldOffset;
- var_types destType = m_comp->lvaGetDesc(dstFieldLclNum)->lvType;
- FieldSeqNode* curFieldSeq = m_comp->GetFieldSeqStore()->CreateSingleton(fieldHnd, fldOffset);
+ unsigned fldOffset = m_comp->lvaGetDesc(dstFieldLclNum)->lvFldOffset;
+ var_types destType = m_comp->lvaGetDesc(dstFieldLclNum)->lvType;
bool done = false;
if (fldOffset == 0)
if (!m_srcUseLclFld)
{
assert(srcAddrClone != nullptr);
- if (fldOffset == 0)
+ if (fldOffset != 0)
{
- m_comp->fgAddFieldSeqForZeroOffset(srcAddrClone, curFieldSeq);
- }
- else
- {
- GenTreeIntCon* fldOffsetNode = m_comp->gtNewIconNode(fldOffset, curFieldSeq);
+ GenTreeIntCon* fldOffsetNode = m_comp->gtNewIconNode(fldOffset, TYP_I_IMPL);
srcAddrClone = m_comp->gtNewOperNode(GT_ADD, TYP_BYREF, srcAddrClone, fldOffsetNode);
}
+
srcFld = m_comp->gtNewIndir(destType, srcAddrClone);
}
else
// This will contain the replacement tree for exp
// It will either be the CSE def or CSE ref
//
- GenTree* cse = nullptr;
- bool isDef;
- FieldSeqNode* fldSeq = nullptr;
- bool commaOnly = true;
- GenTree* effectiveExp = exp->gtEffectiveVal(commaOnly);
- const bool hasZeroMapAnnotation = m_pCompiler->GetZeroOffsetFieldMap()->Lookup(effectiveExp, &fldSeq);
+ GenTree* cse = nullptr;
+ bool isDef;
if (IS_CSE_USE(exp->gtCSEnum))
{
//
*link = cse;
- // If it has a zero-offset field seq, copy annotation.
- if (hasZeroMapAnnotation)
- {
- m_pCompiler->fgAddFieldSeqForZeroOffset(cse, fldSeq);
- }
-
assert(m_pCompiler->fgRemoveRestOfBlock == false);
/* re-morph the statement */
{
GenTreeArrAddr* arrAddr = nullptr;
GenTree* baseAddr = nullptr;
- FieldSeqNode* fldSeq = nullptr;
+ FieldSeq* fldSeq = nullptr;
ssize_t offset = 0;
if (arg->IsArrayAddr(&arrAddr))
}
else if (arg->IsFieldAddr(this, &baseAddr, &fldSeq, &offset))
{
- assert((fldSeq != nullptr) && (fldSeq != FieldSeqStore::NotAField()));
+ assert(fldSeq != nullptr);
FieldKindForVN fieldKind =
(baseAddr != nullptr) ? FieldKindForVN::WithBaseAddr : FieldKindForVN::SimpleStatic;
// Allocate "m_defs" here, according to the typ/attribs pair.
switch (attribs)
{
- case CEA_NotAField:
- break; // Nothing to do.
case CEA_Const:
switch (typ)
{
}
//------------------------------------------------------------------------
-// IsVNNotAField: Is the value number a "NotAField" field sequence?
-//
-// Arguments:
-// vn - the value number in question
-//
-// Return Value:
-// Whether "vn" represents a "NotAField" field sequence.
-//
-// Notes:
-// "NotAField" field sequences always get a "new, unique" number, since
-// they represent unknown locations. Thus they get their own chunk kind,
-// for which no actual memory is allocated to hold VNFunc data.
-//
-bool ValueNumStore::IsVNNotAField(ValueNum vn)
-{
- return m_chunks.GetNoExpand(GetChunkNum(vn))->m_attribs == CEA_NotAField;
-}
-
-//------------------------------------------------------------------------
// VNForFieldSeq: Get the value number representing a field sequence.
//
// Arguments:
// fieldSeq - the field sequence
//
// Return Value:
-// "VNForNull" if the sequence is empty ("nullptr").
-// "IsVNNotAField" VN if it is a "NotAField".
-// "GTF_FIELD_SEQ_PTR" handle VN otherwise.
+// "GTF_FIELD_SEQ_PTR" handle VN for the sequences.
//
-ValueNum ValueNumStore::VNForFieldSeq(FieldSeqNode* fieldSeq)
+ValueNum ValueNumStore::VNForFieldSeq(FieldSeq* fieldSeq)
{
- if (fieldSeq == nullptr)
- {
- return VNForNull();
- }
-
- ValueNum fieldSeqVN;
- if (fieldSeq == FieldSeqStore::NotAField())
- {
- // We always allocate a new, unique VN in this call.
- Chunk* c = GetAllocChunk(TYP_REF, CEA_NotAField);
- unsigned offsetWithinChunk = c->AllocVN();
- fieldSeqVN = c->m_baseVN + offsetWithinChunk;
- }
- else
- {
- // This encoding relies on the canonicality of field sequences.
- fieldSeqVN = VNForHandle(reinterpret_cast<ssize_t>(fieldSeq), GTF_ICON_FIELD_SEQ);
- }
+ // This encoding relies on the canonicality of field sequences.
+ ValueNum fieldSeqVN = VNForHandle(reinterpret_cast<ssize_t>(fieldSeq), GTF_ICON_FIELD_SEQ);
#ifdef DEBUG
if (m_pComp->verbose)
// Return Value:
// The field sequence associated with "vn".
//
-FieldSeqNode* ValueNumStore::FieldSeqVNToFieldSeq(ValueNum vn)
+FieldSeq* ValueNumStore::FieldSeqVNToFieldSeq(ValueNum vn)
{
- if (vn == VNForNull())
- {
- return nullptr;
- }
-
- if (IsVNNotAField(vn))
- {
- return FieldSeqStore::NotAField();
- }
-
assert(IsVNHandle(vn) && (GetHandleFlags(vn) == GTF_ICON_FIELD_SEQ));
- return reinterpret_cast<FieldSeqNode*>(ConstantValue<ssize_t>(vn));
-}
-
-//------------------------------------------------------------------------
-// FieldSeqVNAppend: Append a field sequences to one represented as a VN.
-//
-// Arguments:
-// innerFieldSeqVN - VN of the field sequence being appended to
-// outerFieldSeq - the field sequence being appended
-//
-// Return Value:
-// The value number representing [innerFieldSeq, outerFieldSeq].
-//
-ValueNum ValueNumStore::FieldSeqVNAppend(ValueNum innerFieldSeqVN, FieldSeqNode* outerFieldSeq)
-{
- FieldSeqNode* innerFieldSeq = FieldSeqVNToFieldSeq(innerFieldSeqVN);
- FieldSeqNode* fullFieldSeq = m_pComp->GetFieldSeqStore()->Append(innerFieldSeq, outerFieldSeq);
-
- return VNForFieldSeq(fullFieldSeq);
+ return reinterpret_cast<FieldSeq*>(ConstantValue<ssize_t>(vn));
}
ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, GenTree* opB)
{
if (opB->OperGet() == GT_CNS_INT)
{
- FieldSeqNode* fldSeq = opB->AsIntCon()->gtFieldSeq;
- if (fldSeq != nullptr)
- {
- return ExtendPtrVN(opA, fldSeq, opB->AsIntCon()->IconValue());
- }
+ return ExtendPtrVN(opA, opB->AsIntCon()->gtFieldSeq, opB->AsIntCon()->IconValue());
}
+
return NoVN;
}
-ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, FieldSeqNode* fldSeq, ssize_t offset)
+ValueNum ValueNumStore::ExtendPtrVN(GenTree* opA, FieldSeq* fldSeq, ssize_t offset)
{
- assert(fldSeq != nullptr);
-
ValueNum res = NoVN;
ValueNum opAvnWx = opA->gtVNPair.GetLiberal();
if (funcApp.m_func == VNF_PtrToStatic)
{
- res = VNForFunc(TYP_BYREF, VNF_PtrToStatic, funcApp.m_args[0], FieldSeqVNAppend(funcApp.m_args[1], fldSeq),
+ fldSeq = m_pComp->GetFieldSeqStore()->Append(FieldSeqVNToFieldSeq(funcApp.m_args[1]), fldSeq);
+ res = VNForFunc(TYP_BYREF, VNF_PtrToStatic, funcApp.m_args[0], VNForFieldSeq(fldSeq),
VNForIntPtrCon(ConstantValue<ssize_t>(funcApp.m_args[2]) + offset));
}
else if (funcApp.m_func == VNF_PtrToArrElem)
// Notes:
// Only assigns normal VNs to "loadTree".
//
-void Compiler::fgValueNumberFieldLoad(GenTree* loadTree, GenTree* baseAddr, FieldSeqNode* fieldSeq, ssize_t offset)
+void Compiler::fgValueNumberFieldLoad(GenTree* loadTree, GenTree* baseAddr, FieldSeq* fieldSeq, ssize_t offset)
{
- assert(fieldSeq != nullptr);
-
- if (fieldSeq == FieldSeqStore::NotAField())
- {
- loadTree->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, loadTree->TypeGet()));
- return;
- }
+ noway_assert(fieldSeq != nullptr);
// Two cases:
//
// value - The value being stored
//
void Compiler::fgValueNumberFieldStore(
- GenTree* storeNode, GenTree* baseAddr, FieldSeqNode* fieldSeq, ssize_t offset, unsigned storeSize, ValueNum value)
+ GenTree* storeNode, GenTree* baseAddr, FieldSeq* fieldSeq, ssize_t offset, unsigned storeSize, ValueNum value)
{
- assert(fieldSeq != nullptr);
-
- if (fieldSeq == FieldSeqStore::NotAField())
- {
- fgMutateGcHeap(storeNode DEBUGARG("NotAField field store"));
- return;
- }
+ noway_assert(fieldSeq != nullptr);
// Two cases:
// 1) Instance field / "complex" static: heap[field][baseAddr][offset + load size] = value.
{
printf("NoVN");
}
- else if (IsVNNotAField(vn) || (IsVNHandle(vn) && (GetHandleFlags(vn) == GTF_ICON_FIELD_SEQ)))
+ else if (IsVNHandle(vn) && (GetHandleFlags(vn) == GTF_ICON_FIELD_SEQ))
{
- comp->gtDispAnyFieldSeq(FieldSeqVNToFieldSeq(vn));
+ comp->gtDispFieldSeq(FieldSeqVNToFieldSeq(vn), 0);
printf(" ");
}
else if (IsVNHandle(vn))
ssize_t offset = 0;
unsigned storeSize = lhs->AsIndir()->Size();
GenTree* baseAddr = nullptr;
- FieldSeqNode* fldSeq = nullptr;
+ FieldSeq* fldSeq = nullptr;
if (argIsVNFunc && (funcApp.m_func == VNF_PtrToStatic))
{
}
else if (arg->IsFieldAddr(this, &baseAddr, &fldSeq, &offset))
{
- assert(fldSeq != FieldSeqStore::NotAField());
+ assert(fldSeq != nullptr);
fgValueNumberFieldStore(tree, baseAddr, fldSeq, offset, storeSize, rhsVNPair.GetLiberal());
}
else if (arg->DefinesLocalAddr(&lclVarTree, &offset))
// An untracked local, and other odd cases.
lcl->gtVNPair.SetBoth(vnStore->VNForExpr(compCurBB, lcl->TypeGet()));
}
-
- // When we have a TYP_BYREF LclVar it can have a zero offset field sequence that needs to be added.
- FieldSeqNode* zeroOffsetFldSeq = nullptr;
- if ((typ == TYP_BYREF) && GetZeroOffsetFieldMap()->Lookup(tree, &zeroOffsetFldSeq))
- {
- ValueNum addrExtended = vnStore->ExtendPtrVN(lcl, zeroOffsetFldSeq, 0);
- if (addrExtended != ValueNumStore::NoVN)
- {
- assert(lcl->gtVNPair.BothEqual());
- lcl->gtVNPair.SetBoth(addrExtended);
- }
- }
}
else
{
lclFld->gtVNPair =
vnStore->VNPairForLoad(lclVarValue, lvaLclExactSize(lclNum), lclFld->TypeGet(),
lclFld->GetLclOffs(), lclFld->GetSize());
-
- // If we have byref field, we may have a zero-offset sequence to add.
- FieldSeqNode* zeroOffsetFldSeq = nullptr;
- if ((typ == TYP_BYREF) && GetZeroOffsetFieldMap()->Lookup(lclFld, &zeroOffsetFldSeq))
- {
- ValueNum addrExtended = vnStore->ExtendPtrVN(lclFld, zeroOffsetFldSeq, 0);
- if (addrExtended != ValueNumStore::NoVN)
- {
- lclFld->gtVNPair.SetBoth(addrExtended);
- }
- }
}
}
else
}
else if ((location->gtOper == GT_IND) || location->OperIsBlk())
{
- // Usually the ADDR and IND just cancel out...
- // except when this GT_ADDR has a valid zero-offset field sequence
+ // They just cancel, so fetch the ValueNumber from the op1 of the GT_IND node.
//
+ GenTree* addr = location->AsIndir()->Addr();
+ ValueNumPair addrVNP = addr->gtVNPair;
- ValueNumPair addrVNP = ValueNumPair();
- FieldSeqNode* zeroOffsetFieldSeq = nullptr;
- if (GetZeroOffsetFieldMap()->Lookup(tree, &zeroOffsetFieldSeq))
- {
- ValueNum addrExtended = vnStore->ExtendPtrVN(location->AsIndir()->Addr(), zeroOffsetFieldSeq, 0);
- if (addrExtended != ValueNumStore::NoVN)
- {
- // We don't care about lib/cons differences for addresses.
- addrVNP.SetBoth(addrExtended);
- }
- else
- {
- // ExtendPtrVN returned a failure result - give this address a new unique value.
- addrVNP.SetBoth(vnStore->VNForExpr(compCurBB, TYP_BYREF));
- }
- }
- else
- {
- // They just cancel, so fetch the ValueNumber from the op1 of the GT_IND node.
- //
- GenTree* addr = location->AsIndir()->Addr();
- addrVNP = addr->gtVNPair;
-
- // For the CSE phase mark the address as GTF_DONT_CSE
- // because it will end up with the same value number as tree (the GT_ADDR).
- addr->gtFlags |= GTF_DONT_CSE;
- }
+ // For the CSE phase mark the address as GTF_DONT_CSE
+ // because it will end up with the same value number as tree (the GT_ADDR).
+ addr->gtFlags |= GTF_DONT_CSE;
tree->gtVNPair = vnStore->VNPWithExc(addrVNP, vnStore->VNPExceptionSet(location->gtVNPair));
}
// can recognize redundant loads with no stores between them.
GenTree* addr = tree->AsIndir()->Addr();
GenTreeLclVarCommon* lclVarTree = nullptr;
- FieldSeqNode* fldSeq = nullptr;
+ FieldSeq* fldSeq = nullptr;
GenTree* baseAddr = nullptr;
bool isVolatile = (tree->gtFlags & GTF_IND_VOLATILE) != 0;
}
else if (addr->IsFieldAddr(this, &baseAddr, &fldSeq, &offset))
{
- assert(fldSeq != FieldSeqStore::NotAField());
+ assert(fldSeq != nullptr);
fgValueNumberFieldLoad(tree, baseAddr, fldSeq, offset);
}
else // We don't know where the address points, so it is an ByrefExposed load.
// Check for the addition of a field offset constant
//
- if ((oper == GT_ADD) && (!tree->gtOverflowEx()))
+ if ((oper == GT_ADD) && !tree->gtOverflowEx())
{
newVN = vnStore->ExtendPtrVN(tree->AsOp()->gtOp1, tree->AsOp()->gtOp2);
}
printf("\n");
}
}
-
- fgDebugCheckValueNumberedTree(tree);
#endif // DEBUG
}
}
}
-//------------------------------------------------------------------------
-// fgDebugCheckValueNumberedTree: Verify proper numbering for "tree".
-//
-// Currently only checks that we have not forgotten to add a zero-offset
-// field sequence to "tree"'s value number.
-//
-// Arguments:
-// tree - The tree, that has just been numbered, to check
-//
-void Compiler::fgDebugCheckValueNumberedTree(GenTree* tree)
-{
- FieldSeqNode* zeroOffsetFldSeq;
- if (GetZeroOffsetFieldMap()->Lookup(tree, &zeroOffsetFldSeq))
- {
- // Empty field sequences should never be recorded in the map.
- assert(zeroOffsetFldSeq != nullptr);
-
- ValueNum vns[] = {tree->GetVN(VNK_Liberal), tree->GetVN(VNK_Conservative)};
- for (ValueNum vn : vns)
- {
- VNFuncApp vnFunc;
- if (vnStore->GetVNFunc(vn, &vnFunc))
- {
- FieldSeqNode* fullFldSeq;
- switch (vnFunc.m_func)
- {
- case VNF_PtrToStatic:
- fullFldSeq = vnStore->FieldSeqVNToFieldSeq(vnFunc.m_args[1]);
- break;
-
- default:
- continue;
- }
-
- // Verify that the "fullFldSeq" we have just collected is of the
- // form "[outer fields, zeroOffsetFldSeq]", or is "NotAField".
- if (fullFldSeq == FieldSeqStore::NotAField())
- {
- continue;
- }
-
- // This check relies on the canonicality of field sequences.
- FieldSeqNode* fldSeq = fullFldSeq;
- bool zeroOffsetFldSeqFound = false;
- while (fldSeq != nullptr)
- {
- if (fldSeq == zeroOffsetFldSeq)
- {
- zeroOffsetFldSeqFound = true;
- break;
- }
-
- fldSeq = fldSeq->GetNext();
- }
-
- assert(zeroOffsetFldSeqFound);
- }
- }
- }
-}
-
// This method asserts that SSA name constraints specified are satisfied.
// Until we figure out otherwise, all VN's are assumed to be liberal.
// TODO-Cleanup: new JitTestLabels for lib vs cons vs both VN classes?
ValueNum VNForBitCast(ValueNum srcVN, var_types castToType);
ValueNumPair VNPairForBitCast(ValueNumPair srcVNPair, var_types castToType);
- bool IsVNNotAField(ValueNum vn);
+ ValueNum VNForFieldSeq(FieldSeq* fieldSeq);
- ValueNum VNForFieldSeq(FieldSeqNode* fieldSeq);
+ FieldSeq* FieldSeqVNToFieldSeq(ValueNum vn);
- FieldSeqNode* FieldSeqVNToFieldSeq(ValueNum vn);
-
- ValueNum FieldSeqVNAppend(ValueNum innerFieldSeqVN, FieldSeqNode* outerFieldSeq);
-
- // If "opA" has a PtrToLoc, PtrToArrElem, or PtrToStatic application as its value numbers, and "opB" is an integer
- // with a "fieldSeq", returns the VN for the pointer form extended with the field sequence; or else NoVN.
ValueNum ExtendPtrVN(GenTree* opA, GenTree* opB);
- // If "opA" has a PtrToLoc, PtrToArrElem, or PtrToStatic application as its value numbers, returns the VN for the
- // pointer form extended with "fieldSeq"; or else NoVN.
- ValueNum ExtendPtrVN(GenTree* opA, FieldSeqNode* fieldSeq, ssize_t offset);
+
+ ValueNum ExtendPtrVN(GenTree* opA, FieldSeq* fieldSeq, ssize_t offset);
// Queries on value numbers.
// All queries taking value numbers require that those value numbers are valid, that is, that
enum ChunkExtraAttribs : BYTE
{
- CEA_Const, // This chunk contains constant values.
- CEA_Handle, // This chunk contains handle constants.
- CEA_NotAField, // This chunk contains "not a field" values.
- CEA_Func0, // Represents functions of arity 0.
- CEA_Func1, // ...arity 1.
- CEA_Func2, // ...arity 2.
- CEA_Func3, // ...arity 3.
- CEA_Func4, // ...arity 4.
+ CEA_Const, // This chunk contains constant values.
+ CEA_Handle, // This chunk contains handle constants.
+ CEA_Func0, // Represents functions of arity 0.
+ CEA_Func1, // ...arity 1.
+ CEA_Func2, // ...arity 2.
+ CEA_Func3, // ...arity 3.
+ CEA_Func4, // ...arity 4.
CEA_Count
};
ValueNumFuncDef(PtrToArrElem, 4, false, false, false) // Pointer (byref) to an array element. Args: 0: array elem type eq class var_types value, VN's of: 1: array, 2: index, 3: offset.
ValueNumFuncDef(PtrToStatic, 3, false, true, false) // Pointer (byref) to a static variable (or possibly a field thereof, if the static variable is a struct).
// Args: 0: (VN of) the box's address if the static is "boxed",
- // 1: (VN of) the field sequence, of which the first element is the static itself.
+ // 1: (VN of) the field sequence,
// 2: (VN of) offset for the constituent struct fields
ValueNumFuncDef(MDArrLength, 2, false, false, false) // MD array len, Args: 0: array, 1: dimension