// Enumeration of the kinds of memory whose state changes the compiler tracks
enum MemoryKind
{
- GcHeap = 0, // Includes actual GC heap, and also static fields
- MemoryKindCount, // Number of MemoryKinds
+ ByrefExposed = 0, // Includes anything byrefs can read/write (everything in GcHeap, address-taken locals,
+ // unmanaged heap, callers' locals, etc.)
+ GcHeap, // Includes actual GC heap, and also static fields
+ MemoryKindCount, // Number of MemoryKinds
};
#ifdef DEBUG
-const char* const memoryKindNames[] = {"GcHeap"};
+const char* const memoryKindNames[] = {"ByrefExposed", "GcHeap"};
#endif // DEBUG
// Bitmask describing a set of memory kinds (usable in bitfields)
break;
case GT_CLS_VAR:
- // For Volatile indirection, first mutate GcHeap
+ // For Volatile indirection, first mutate GcHeap/ByrefExposed
// see comments in ValueNum.cpp (under case GT_CLS_VAR)
// This models Volatile reads as def-then-use of the heap.
// and allows for a CSE of a subsequent non-volatile read
if ((tree->gtFlags & GTF_FLD_VOLATILE) != 0)
{
// For any Volatile indirection, we must handle it as a
- // definition of GcHeap
- fgCurMemoryDef |= memoryKindSet(GcHeap);
+ // definition of GcHeap/ByrefExposed
+ fgCurMemoryDef |= memoryKindSet(GcHeap, ByrefExposed);
}
// If the GT_CLS_VAR is the lhs of an assignment, we'll handle it as a heap def, when we get to
// assignment.
// Otherwise, we treat it as a use here.
if ((tree->gtFlags & GTF_CLS_VAR_ASG_LHS) == 0)
{
- fgCurMemoryUse |= memoryKindSet(GcHeap);
+ fgCurMemoryUse |= memoryKindSet(GcHeap, ByrefExposed);
}
break;
case GT_IND:
- // For Volatile indirection, first mutate GcHeap
+ // For Volatile indirection, first mutate GcHeap/ByrefExposed
// see comments in ValueNum.cpp (under case GT_CLS_VAR)
// This models Volatile reads as def-then-use of the heap.
// and allows for a CSE of a subsequent non-volatile read
if ((tree->gtFlags & GTF_IND_VOLATILE) != 0)
{
// For any Volatile indirection, we must handle it as a
- // definition of GcHeap
- fgCurMemoryDef |= memoryKindSet(GcHeap);
+ // definition of GcHeap/ByrefExposed
+ fgCurMemoryDef |= memoryKindSet(GcHeap, ByrefExposed);
}
// If the GT_IND is the lhs of an assignment, we'll handle it
- // as a heap def, when we get to assignment.
+ // as a heap/byref def, when we get to assignment.
// Otherwise, we treat it as a use here.
if ((tree->gtFlags & GTF_IND_ASG_LHS) == 0)
{
GenTreePtr addrArg = tree->gtOp.gtOp1->gtEffectiveVal(/*commaOnly*/ true);
if (!addrArg->DefinesLocalAddr(this, /*width doesn't matter*/ 0, &dummyLclVarTree, &dummyIsEntire))
{
- fgCurMemoryUse |= memoryKindSet(GcHeap);
+ fgCurMemoryUse |= memoryKindSet(GcHeap, ByrefExposed);
}
else
{
unreached();
break;
- // We'll assume these are use-then-defs of GcHeap.
+ // We'll assume these are use-then-defs of GcHeap/ByrefExposed.
case GT_LOCKADD:
case GT_XADD:
case GT_XCHG:
case GT_CMPXCHG:
- fgCurMemoryUse |= memoryKindSet(GcHeap);
- fgCurMemoryDef |= memoryKindSet(GcHeap);
- fgCurMemoryHavoc |= memoryKindSet(GcHeap);
+ fgCurMemoryUse |= memoryKindSet(GcHeap, ByrefExposed);
+ fgCurMemoryDef |= memoryKindSet(GcHeap, ByrefExposed);
+ fgCurMemoryHavoc |= memoryKindSet(GcHeap, ByrefExposed);
break;
case GT_MEMORYBARRIER:
- // Simliar to any Volatile indirection, we must handle this as a definition of GcHeap
- fgCurMemoryDef |= memoryKindSet(GcHeap);
+ // Simliar to any Volatile indirection, we must handle this as a definition of GcHeap/ByrefExposed
+ fgCurMemoryDef |= memoryKindSet(GcHeap, ByrefExposed);
break;
- // For now, all calls read/write GcHeap, the latter in its entirety. Might tighten this case later.
+ // For now, all calls read/write GcHeap/ByrefExposed, writes in their entirety. Might tighten this case
+ // later.
case GT_CALL:
{
GenTreeCall* call = tree->AsCall();
}
if (modHeap)
{
- fgCurMemoryUse |= memoryKindSet(GcHeap);
- fgCurMemoryDef |= memoryKindSet(GcHeap);
- fgCurMemoryHavoc |= memoryKindSet(GcHeap);
+ fgCurMemoryUse |= memoryKindSet(GcHeap, ByrefExposed);
+ fgCurMemoryDef |= memoryKindSet(GcHeap, ByrefExposed);
+ fgCurMemoryHavoc |= memoryKindSet(GcHeap, ByrefExposed);
}
}
default:
- // Determine whether it defines a GC heap location.
+ // Determine what memory kinds it defines.
if (tree->OperIsAssignment() || tree->OperIsBlkOp())
{
GenTreeLclVarCommon* dummyLclVarTree = NULL;
- if (!tree->DefinesLocal(this, &dummyLclVarTree))
+ if (tree->DefinesLocal(this, &dummyLclVarTree))
{
- // If it doesn't define a local, then it might update the GC heap.
- fgCurMemoryDef |= memoryKindSet(GcHeap);
+ if (lvaVarAddrExposed(dummyLclVarTree->gtLclNum))
+ {
+ fgCurMemoryDef |= memoryKindSet(ByrefExposed);
+
+ // We've found a store that modifies ByrefExposed
+ // memory but not GcHeap memory, so track their
+ // states separately.
+ byrefStatesMatchGcHeapStates = false;
+ }
+ }
+ else
+ {
+ // If it doesn't define a local, then it might update GcHeap/ByrefExposed.
+ fgCurMemoryDef |= memoryKindSet(GcHeap, ByrefExposed);
}
}
MemoryKindSet fgCurMemoryDef; // True iff the current basic block modifies memory.
MemoryKindSet fgCurMemoryHavoc; // True if the current basic block is known to set memory to a "havoc" value.
+ bool byrefStatesMatchGcHeapStates; // True iff GcHeap and ByrefExposed memory have all the same def points.
+
void fgMarkUseDef(GenTreeLclVarCommon* tree);
void fgBeginScopeLife(VARSET_TP* inScope, VarScopeDsc* var);
// state, all the possible memory states are possible initial states of the corresponding catch block(s).)
NodeToUnsignedMap* GetMemorySsaMap(MemoryKind memoryKind)
{
+ if (memoryKind == GcHeap && byrefStatesMatchGcHeapStates)
+ {
+ // Use the same map for GCHeap and ByrefExposed when their states match.
+ memoryKind = ByrefExposed;
+ }
+
assert(memoryKind < MemoryKindCount);
Compiler* compRoot = impInlineRoot();
if (compRoot->m_memorySsaMap[memoryKind] == nullptr)
varDsc->lvRefCnt = 1;
}
+ const bool isDef = (tree->gtFlags & GTF_VAR_DEF) != 0;
+ const bool isUse = !isDef || ((tree->gtFlags & (GTF_VAR_USEASG | GTF_VAR_USEDEF)) != 0);
+
if (varDsc->lvTracked)
{
assert(varDsc->lvVarIndex < lvaTrackedCount);
- const bool isDef = (tree->gtFlags & GTF_VAR_DEF) != 0;
- const bool isUse = !isDef || ((tree->gtFlags & (GTF_VAR_USEASG | GTF_VAR_USEDEF)) != 0);
+ // We don't treat stores to tracked locals as modifications of ByrefExposed memory;
+ // Make sure no tracked local is addr-exposed, to make sure we don't incorrectly CSE byref
+ // loads aliasing it across a store to it.
+ assert(!varDsc->lvAddrExposed);
if (isUse && !VarSetOps::IsMember(this, fgCurDefSet, varDsc->lvVarIndex))
{
VarSetOps::AddElemD(this, fgCurDefSet, varDsc->lvVarIndex);
}
}
- else if (varTypeIsStruct(varDsc))
+ else
{
- lvaPromotionType promotionType = lvaGetPromotionType(varDsc);
-
- if (promotionType != PROMOTION_TYPE_NONE)
+ if (varDsc->lvAddrExposed)
{
- VARSET_TP VARSET_INIT_NOCOPY(bitMask, VarSetOps::MakeEmpty(this));
+ // Reflect the effect on ByrefExposed memory
- for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
+ if (isUse)
{
- noway_assert(lvaTable[i].lvIsStructField);
- if (lvaTable[i].lvTracked)
- {
- noway_assert(lvaTable[i].lvVarIndex < lvaTrackedCount);
- VarSetOps::AddElemD(this, bitMask, lvaTable[i].lvVarIndex);
- }
+ fgCurMemoryUse |= memoryKindSet(ByrefExposed);
}
-
- // For pure defs (i.e. not an "update" def which is also a use), add to the (all) def set.
- if ((tree->gtFlags & GTF_VAR_DEF) != 0 && (tree->gtFlags & (GTF_VAR_USEASG | GTF_VAR_USEDEF)) == 0)
+ if (isDef)
{
- VarSetOps::UnionD(this, fgCurDefSet, bitMask);
+ fgCurMemoryDef |= memoryKindSet(ByrefExposed);
+
+ // We've found a store that modifies ByrefExposed
+ // memory but not GcHeap memory, so track their
+ // states separately.
+ byrefStatesMatchGcHeapStates = false;
}
- else if (!VarSetOps::IsSubset(this, bitMask, fgCurDefSet))
+ }
+
+ if (varTypeIsStruct(varDsc))
+ {
+ lvaPromotionType promotionType = lvaGetPromotionType(varDsc);
+
+ if (promotionType != PROMOTION_TYPE_NONE)
{
- // Mark as used any struct fields that are not yet defined.
- VarSetOps::UnionD(this, fgCurUseSet, bitMask);
+ VARSET_TP VARSET_INIT_NOCOPY(bitMask, VarSetOps::MakeEmpty(this));
+
+ for (unsigned i = varDsc->lvFieldLclStart; i < varDsc->lvFieldLclStart + varDsc->lvFieldCnt; ++i)
+ {
+ noway_assert(lvaTable[i].lvIsStructField);
+ if (lvaTable[i].lvTracked)
+ {
+ noway_assert(lvaTable[i].lvVarIndex < lvaTrackedCount);
+ VarSetOps::AddElemD(this, bitMask, lvaTable[i].lvVarIndex);
+ }
+ }
+
+ // For pure defs (i.e. not an "update" def which is also a use), add to the (all) def set.
+ if (!isUse)
+ {
+ assert(isDef);
+ VarSetOps::UnionD(this, fgCurDefSet, bitMask);
+ }
+ else if (!VarSetOps::IsSubset(this, bitMask, fgCurDefSet))
+ {
+ // Mark as used any struct fields that are not yet defined.
+ VarSetOps::UnionD(this, fgCurUseSet, bitMask);
+ }
}
}
}
break;
case GT_CLS_VAR:
- // For Volatile indirection, first mutate GcHeap.
+ // For Volatile indirection, first mutate GcHeap/ByrefExposed.
// See comments in ValueNum.cpp (under case GT_CLS_VAR)
// This models Volatile reads as def-then-use of memory
// and allows for a CSE of a subsequent non-volatile read.
if ((tree->gtFlags & GTF_FLD_VOLATILE) != 0)
{
// For any Volatile indirection, we must handle it as a
- // definition of GcHeap
- fgCurMemoryDef |= memoryKindSet(GcHeap);
+ // definition of GcHeap/ByrefExposed
+ fgCurMemoryDef |= memoryKindSet(GcHeap, ByrefExposed);
}
- // If the GT_CLS_VAR is the lhs of an assignment, we'll handle it as a GcHeap def, when we get to
- // assignment.
+ // If the GT_CLS_VAR is the lhs of an assignment, we'll handle it as a GcHeap/ByrefExposed def, when we get
+ // to the assignment.
// Otherwise, we treat it as a use here.
if ((tree->gtFlags & GTF_CLS_VAR_ASG_LHS) == 0)
{
- fgCurMemoryUse |= memoryKindSet(GcHeap);
+ fgCurMemoryUse |= memoryKindSet(GcHeap, ByrefExposed);
}
break;
case GT_IND:
- // For Volatile indirection, first mutate GcHeap
+ // For Volatile indirection, first mutate GcHeap/ByrefExposed
// see comments in ValueNum.cpp (under case GT_CLS_VAR)
// This models Volatile reads as def-then-use of memory.
// and allows for a CSE of a subsequent non-volatile read
if ((tree->gtFlags & GTF_IND_VOLATILE) != 0)
{
// For any Volatile indirection, we must handle it as a
- // definition of the GcHeap
- fgCurMemoryDef |= memoryKindSet(GcHeap);
+ // definition of the GcHeap/ByrefExposed
+ fgCurMemoryDef |= memoryKindSet(GcHeap, ByrefExposed);
}
// If the GT_IND is the lhs of an assignment, we'll handle it
GenTreePtr addrArg = tree->gtOp.gtOp1->gtEffectiveVal(/*commaOnly*/ true);
if (!addrArg->DefinesLocalAddr(this, /*width doesn't matter*/ 0, &dummyLclVarTree, &dummyIsEntire))
{
- fgCurMemoryUse |= memoryKindSet(GcHeap);
+ fgCurMemoryUse |= memoryKindSet(GcHeap, ByrefExposed);
}
else
{
case GT_XADD:
case GT_XCHG:
case GT_CMPXCHG:
- fgCurMemoryUse |= memoryKindSet(GcHeap);
- fgCurMemoryDef |= memoryKindSet(GcHeap);
- fgCurMemoryHavoc |= memoryKindSet(GcHeap);
+ fgCurMemoryUse |= memoryKindSet(GcHeap, ByrefExposed);
+ fgCurMemoryDef |= memoryKindSet(GcHeap, ByrefExposed);
+ fgCurMemoryHavoc |= memoryKindSet(GcHeap, ByrefExposed);
break;
case GT_MEMORYBARRIER:
- // Simliar to any Volatile indirection, we must handle this as a definition of GcHeap
- fgCurMemoryDef |= memoryKindSet(GcHeap);
+ // Simliar to any Volatile indirection, we must handle this as a definition of GcHeap/ByrefExposed
+ fgCurMemoryDef |= memoryKindSet(GcHeap, ByrefExposed);
break;
- // For now, all calls read/write GcHeap, the latter in its entirety. Might tighten this case later.
+ // For now, all calls read/write GcHeap/ByrefExposed, writes in their entirety. Might tighten this case later.
case GT_CALL:
{
GenTreeCall* call = tree->AsCall();
}
if (modHeap)
{
- fgCurMemoryUse |= memoryKindSet(GcHeap);
- fgCurMemoryDef |= memoryKindSet(GcHeap);
- fgCurMemoryHavoc |= memoryKindSet(GcHeap);
+ fgCurMemoryUse |= memoryKindSet(GcHeap, ByrefExposed);
+ fgCurMemoryDef |= memoryKindSet(GcHeap, ByrefExposed);
+ fgCurMemoryHavoc |= memoryKindSet(GcHeap, ByrefExposed);
}
}
default:
- // Determine whether it defines a GcHeap location.
+ // Determine what memory locations it defines.
if (tree->OperIsAssignment() || tree->OperIsBlkOp())
{
GenTreeLclVarCommon* dummyLclVarTree = nullptr;
- if (!tree->DefinesLocal(this, &dummyLclVarTree))
+ if (tree->DefinesLocal(this, &dummyLclVarTree))
{
- // If it doesn't define a local, then it might update the GC heap.
- fgCurMemoryDef |= memoryKindSet(GcHeap);
+ if (lvaVarAddrExposed(dummyLclVarTree->gtLclNum))
+ {
+ fgCurMemoryDef |= memoryKindSet(ByrefExposed);
+
+ // We've found a store that modifies ByrefExposed
+ // memory but not GcHeap memory, so track their
+ // states separately.
+ byrefStatesMatchGcHeapStates = false;
+ }
+ }
+ else
+ {
+ // If it doesn't define a local, then it might update GcHeap/ByrefExposed.
+ fgCurMemoryDef |= memoryKindSet(GcHeap, ByrefExposed);
}
}
break;
break;
}
}
+
+ // In minopts, we don't explicitly build SSA or value-number; GcHeap and
+ // ByrefExposed implicitly (conservatively) change state at each instr.
+ byrefStatesMatchGcHeapStates = true;
+
return;
}
VarSetOps::AssignNoCopy(this, fgCurUseSet, VarSetOps::MakeEmpty(this));
VarSetOps::AssignNoCopy(this, fgCurDefSet, VarSetOps::MakeEmpty(this));
+ // GC Heap and ByrefExposed can share states unless we see a def of byref-exposed
+ // memory that is not a GC Heap def.
+ byrefStatesMatchGcHeapStates = true;
+
for (block = fgFirstBB; block; block = block->bbNext)
{
VarSetOps::ClearD(this, fgCurUseSet);
VarSetOps::AssignNoCopy(this, block->bbLiveIn, VarSetOps::MakeEmpty(this));
block->bbMemoryLiveIn = emptyMemoryKindSet;
}
+
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("** Memory liveness computed, GcHeap states and ByrefExposed states %s\n",
+ (byrefStatesMatchGcHeapStates ? "match" : "diverge"));
+ }
+#endif // DEBUG
}
// Helper functions to mark variables live over their entire scope
if ((tree->gtFlags & GTF_IND_VOLATILE) != 0)
{
- memoryHavoc |= memoryKindSet(GcHeap);
+ memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed);
continue;
}
CORINFO_CLASS_HANDLE elemType =
CORINFO_CLASS_HANDLE(vnStore->ConstantValue<size_t>(funcApp.m_args[0]));
AddModifiedElemTypeAllContainingLoops(mostNestedLoop, elemType);
- // Don't set memoryHavoc below.
+ // Don't set memoryHavoc for GcHeap below. Do set memoryHavoc for ByrefExposed
+ // (conservatively assuming that a byref may alias the array element)
+ memoryHavoc |= memoryKindSet(ByrefExposed);
continue;
}
}
// Otherwise...
- memoryHavoc |= memoryKindSet(GcHeap);
+ memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed);
}
// Is the LHS an array index expression?
else if (lhs->ParseArrayElemForm(this, &arrInfo, &fldSeqArrElem))
// field of "S", will lose all information about the array type.
CORINFO_CLASS_HANDLE elemTypeEq = EncodeElemType(arrInfo.m_elemType, arrInfo.m_elemStructType);
AddModifiedElemTypeAllContainingLoops(mostNestedLoop, elemTypeEq);
+ // Conservatively assume byrefs may alias this array element
+ memoryHavoc |= memoryKindSet(ByrefExposed);
}
else
{
}
AddModifiedFieldAllContainingLoops(mostNestedLoop, fldSeq->m_fieldHnd);
+ // Conservatively assume byrefs may alias this object.
+ memoryHavoc |= memoryKindSet(ByrefExposed);
}
else
{
- memoryHavoc |= memoryKindSet(GcHeap);
+ memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed);
}
}
}
bool isEntire;
if (!tree->DefinesLocal(this, &lclVarTree, &isEntire))
{
- // For now, assume arbitrary side effects on GcHeap...
- memoryHavoc |= memoryKindSet(GcHeap);
+ // For now, assume arbitrary side effects on GcHeap/ByrefExposed...
+ memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed);
+ }
+ else if (lvaVarAddrExposed(lclVarTree->gtLclNum))
+ {
+ memoryHavoc |= memoryKindSet(ByrefExposed);
}
}
else if (lhs->OperGet() == GT_CLS_VAR)
{
AddModifiedFieldAllContainingLoops(mostNestedLoop, lhs->gtClsVar.gtClsVarHnd);
+ // Conservatively assume byrefs may alias this static field
+ memoryHavoc |= memoryKindSet(ByrefExposed);
}
// Otherwise, must be local lhs form. I should assert that.
else if (lhs->OperGet() == GT_LCL_VAR)
->m_vnPair.SetLiberal(rhsVN);
}
}
+ // If the local is address-exposed, count this as ByrefExposed havoc
+ if (lvaVarAddrExposed(lhsLcl->gtLclNum))
+ {
+ memoryHavoc |= memoryKindSet(ByrefExposed);
+ }
}
}
else // not GenTree::OperIsAssignment(oper)
case GT_XCHG: // Binop
case GT_CMPXCHG: // Specialop
{
- memoryHavoc |= memoryKindSet(GcHeap);
+ memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed);
}
break;
CorInfoHelpFunc helpFunc = eeGetHelperNum(call->gtCallMethHnd);
if (s_helperCallProperties.MutatesHeap(helpFunc))
{
- memoryHavoc |= memoryKindSet(GcHeap);
+ memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed);
}
else if (s_helperCallProperties.MayRunCctor(helpFunc))
{
// and might have arbitrary side effects.
if ((tree->gtFlags & GTF_CALL_HOISTABLE) == 0)
{
- memoryHavoc |= memoryKindSet(GcHeap);
+ memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed);
}
}
}
else
{
- memoryHavoc |= memoryKindSet(GcHeap);
+ memoryHavoc |= memoryKindSet(GcHeap, ByrefExposed);
}
break;
}
for (MemoryKind memoryKind : allMemoryKinds())
{
+ if ((memoryKind == GcHeap) && m_pCompiler->byrefStatesMatchGcHeapStates)
+ {
+ // Share the PhiFunc with ByrefExposed.
+ assert(memoryKind > ByrefExposed);
+ bbInDomFront->bbMemorySsaPhiFunc[memoryKind] = bbInDomFront->bbMemorySsaPhiFunc[ByrefExposed];
+ continue;
+ }
+
+ // Check if this memoryKind is defined in this block.
+ if ((block->bbMemoryDef & memoryKindSet(memoryKind)) == 0)
+ {
+ continue;
+ }
+
// Check if memoryKind is live into block "*iterBlk".
if ((bbInDomFront->bbMemoryLiveIn & memoryKindSet(memoryKind)) == 0)
{
if (m_pCompiler->ehBlockHasExnFlowDsc(block))
{
GenTreeLclVarCommon* lclVarNode;
- if (!tree->DefinesLocal(m_pCompiler, &lclVarNode))
+
+ bool isLocal = tree->DefinesLocal(m_pCompiler, &lclVarNode);
+ bool isAddrExposedLocal = isLocal && m_pCompiler->lvaVarAddrExposed(lclVarNode->gtLclNum);
+ bool hasByrefHavoc = ((block->bbMemoryHavoc & memoryKindSet(ByrefExposed)) != 0);
+ if (!isLocal || (isAddrExposedLocal && !hasByrefHavoc))
{
- // It *may* define the GC heap in a non-havoc way. Make a new SSA # -- associate with this node.
+ // It *may* define byref memory in a non-havoc way. Make a new SSA # -- associate with this node.
unsigned count = pRenameState->CountForMemoryDef();
- pRenameState->PushMemory(GcHeap, block, count);
- m_pCompiler->GetMemorySsaMap(GcHeap)->Set(tree, count);
-#ifdef DEBUG
- if (JitTls::GetCompiler()->verboseSsa)
+ if (!hasByrefHavoc)
{
- printf("Node ");
- Compiler::printTreeID(tree);
- printf(" (in try block) may define GC heap; ssa # = %d.\n", count);
- }
+ pRenameState->PushMemory(ByrefExposed, block, count);
+ m_pCompiler->GetMemorySsaMap(ByrefExposed)->Set(tree, count);
+#ifdef DEBUG
+ if (JitTls::GetCompiler()->verboseSsa)
+ {
+ printf("Node ");
+ Compiler::printTreeID(tree);
+ printf(" (in try block) may define memory; ssa # = %d.\n", count);
+ }
#endif // DEBUG
- // Now add this SSA # to all phis of the reachable catch blocks.
- AddMemoryDefToHandlerPhis(GcHeap, block, count);
+ // Now add this SSA # to all phis of the reachable catch blocks.
+ AddMemoryDefToHandlerPhis(ByrefExposed, block, count);
+ }
+
+ if (!isLocal)
+ {
+ // Add a new def for GcHeap as well
+ if (m_pCompiler->byrefStatesMatchGcHeapStates)
+ {
+ // GcHeap and ByrefExposed share the same stacks, SsaMap, and phis
+ assert(!hasByrefHavoc);
+ assert(pRenameState->CountForMemoryUse(GcHeap) == count);
+ assert(*m_pCompiler->GetMemorySsaMap(GcHeap)->LookupPointer(tree) == count);
+ assert(block->bbMemorySsaPhiFunc[GcHeap] == block->bbMemorySsaPhiFunc[ByrefExposed]);
+ }
+ else
+ {
+ if (!hasByrefHavoc)
+ {
+ // Allocate a distinct defnum for the GC Heap
+ count = pRenameState->CountForMemoryDef();
+ }
+
+ pRenameState->PushMemory(GcHeap, block, count);
+ m_pCompiler->GetMemorySsaMap(GcHeap)->Set(tree, count);
+ AddMemoryDefToHandlerPhis(GcHeap, block, count);
+ }
+ }
}
}
}
// Add "count" to the phi args of memoryKind.
BasicBlock::MemoryPhiArg*& handlerMemoryPhi = handler->bbMemorySsaPhiFunc[memoryKind];
+
+#if DEBUG
+ if (m_pCompiler->byrefStatesMatchGcHeapStates)
+ {
+ // When sharing phis for GcHeap and ByrefExposed, callers should ask to add phis
+ // for ByrefExposed only.
+ assert(memoryKind != GcHeap);
+ if (memoryKind == ByrefExposed)
+ {
+ // The GcHeap and ByrefExposed phi funcs should always be in sync.
+ assert(handlerMemoryPhi == handler->bbMemorySsaPhiFunc[GcHeap]);
+ }
+ }
+#endif
+
if (handlerMemoryPhi == BasicBlock::EmptyMemoryPhiDef)
{
handlerMemoryPhi = new (m_pCompiler) BasicBlock::MemoryPhiArg(count);
DBG_SSA_JITDUMP(" Added phi arg u:%d for %s to phi defn in handler block BB%02u.\n", count,
memoryKindNames[memoryKind], memoryKind, handler->bbNum);
+
+ if ((memoryKind == ByrefExposed) && m_pCompiler->byrefStatesMatchGcHeapStates)
+ {
+ // Share the phi between GcHeap and ByrefExposed.
+ handler->bbMemorySsaPhiFunc[GcHeap] = handlerMemoryPhi;
+ }
}
unsigned tryInd = tryBlk->ebdEnclosingTryIndex;
if (tryInd == EHblkDsc::NO_ENCLOSING_INDEX)
// First handle the incoming memory states.
for (MemoryKind memoryKind : allMemoryKinds())
{
- // Is there an Phi definition for memoryKind at the start of this block?
- if (block->bbMemorySsaPhiFunc[memoryKind] != nullptr)
+ if ((memoryKind == GcHeap) && m_pCompiler->byrefStatesMatchGcHeapStates)
+ {
+ // ByrefExposed and GcHeap share any phi this block may have,
+ assert(block->bbMemorySsaPhiFunc[memoryKind] == block->bbMemorySsaPhiFunc[ByrefExposed]);
+ // so we will have already allocated a defnum for it if needed.
+ assert(memoryKind > ByrefExposed);
+ assert(pRenameState->CountForMemoryUse(memoryKind) == pRenameState->CountForMemoryUse(ByrefExposed));
+ }
+ else
{
- unsigned count = pRenameState->CountForMemoryDef();
- pRenameState->PushMemory(memoryKind, block, count);
+ // Is there an Phi definition for memoryKind at the start of this block?
+ if (block->bbMemorySsaPhiFunc[memoryKind] != nullptr)
+ {
+ unsigned count = pRenameState->CountForMemoryDef();
+ pRenameState->PushMemory(memoryKind, block, count);
- DBG_SSA_JITDUMP("Ssa # for %s phi on entry to BB%02u is %d.\n", memoryKindNames[memoryKind], block->bbNum,
- count);
+ DBG_SSA_JITDUMP("Ssa # for %s phi on entry to BB%02u is %d.\n", memoryKindNames[memoryKind],
+ block->bbNum, count);
+ }
}
// Record the "in" Ssa # for memoryKind.
// If the block defines memory, allocate an SSA variable for the final memory state in the block.
// (This may be redundant with the last SSA var explicitly created, but there's no harm in that.)
- if ((block->bbMemoryDef & memorySet) != 0)
+ if ((memoryKind == GcHeap) && m_pCompiler->byrefStatesMatchGcHeapStates)
{
- unsigned count = pRenameState->CountForMemoryDef();
- pRenameState->PushMemory(memoryKind, block, count);
- AddMemoryDefToHandlerPhis(memoryKind, block, count);
+ // We've already allocated the SSA num and propagated it to shared phis, if needed,
+ // when processing ByrefExposed.
+ assert(memoryKind > ByrefExposed);
+ assert(((block->bbMemoryDef & memorySet) != 0) ==
+ ((block->bbMemoryDef & memoryKindSet(ByrefExposed)) != 0));
+ assert(pRenameState->CountForMemoryUse(memoryKind) == pRenameState->CountForMemoryUse(ByrefExposed));
+ }
+ else
+ {
+ if ((block->bbMemoryDef & memorySet) != 0)
+ {
+ unsigned count = pRenameState->CountForMemoryDef();
+ pRenameState->PushMemory(memoryKind, block, count);
+ AddMemoryDefToHandlerPhis(memoryKind, block, count);
+ }
}
// Record the "out" Ssa" # for memoryKind.
BasicBlock::MemoryPhiArg*& succMemoryPhi = succ->bbMemorySsaPhiFunc[memoryKind];
if (succMemoryPhi != nullptr)
{
+ if ((memoryKind == GcHeap) && m_pCompiler->byrefStatesMatchGcHeapStates)
+ {
+ // We've already propagated the "out" number to the phi shared with ByrefExposed,
+ // but still need to update bbMemorySsaPhiFunc to be in sync between GcHeap and ByrefExposed.
+ assert(memoryKind > ByrefExposed);
+ assert(block->bbMemorySsaNumOut[memoryKind] == block->bbMemorySsaNumOut[ByrefExposed]);
+ assert((succ->bbMemorySsaPhiFunc[ByrefExposed] == succMemoryPhi) ||
+ (succ->bbMemorySsaPhiFunc[ByrefExposed]->m_nextArg ==
+ (succMemoryPhi == BasicBlock::EmptyMemoryPhiDef ? nullptr : succMemoryPhi)));
+ succMemoryPhi = succ->bbMemorySsaPhiFunc[ByrefExposed];
+
+ continue;
+ }
+
if (succMemoryPhi == BasicBlock::EmptyMemoryPhiDef)
{
succMemoryPhi = new (m_pCompiler) BasicBlock::MemoryPhiArg(block->bbMemorySsaNumOut[memoryKind]);
BasicBlock::MemoryPhiArg*& handlerMemoryPhi = handlerStart->bbMemorySsaPhiFunc[memoryKind];
if (handlerMemoryPhi != nullptr)
{
+ if ((memoryKind == GcHeap) && m_pCompiler->byrefStatesMatchGcHeapStates)
+ {
+ // We've already added the arg to the phi shared with ByrefExposed if needed,
+ // but still need to update bbMemorySsaPhiFunc to stay in sync.
+ assert(memoryKind > ByrefExposed);
+ assert(block->bbMemorySsaNumOut[memoryKind] == block->bbMemorySsaNumOut[ByrefExposed]);
+ assert(handlerStart->bbMemorySsaPhiFunc[ByrefExposed]->m_ssaNum ==
+ block->bbMemorySsaNumOut[memoryKind]);
+ handlerMemoryPhi = handlerStart->bbMemorySsaPhiFunc[ByrefExposed];
+
+ continue;
+ }
+
if (handlerMemoryPhi == BasicBlock::EmptyMemoryPhiDef)
{
handlerMemoryPhi =
// And for memory.
for (MemoryKind memoryKind : allMemoryKinds())
{
+ if ((memoryKind == GcHeap) && m_pCompiler->byrefStatesMatchGcHeapStates)
+ {
+ // GcHeap and ByrefExposed share a rename stack, so don't try
+ // to pop it a second time.
+ continue;
+ }
pRenameState->PopBlockMemoryStack(memoryKind, block);
}
}
assert(initMemoryCount == SsaConfig::FIRST_SSA_NUM);
for (MemoryKind memoryKind : allMemoryKinds())
{
+ if ((memoryKind == GcHeap) && m_pCompiler->byrefStatesMatchGcHeapStates)
+ {
+ // GcHeap shares its stack with ByrefExposed; don't re-push.
+ continue;
+ }
pRenameState->PushMemory(memoryKind, m_pCompiler->fgFirstBB, initMemoryCount);
}
// Rename local variables and collect UD information for each ssa var.
SsaRenameState* pRenameState = new (jitstd::utility::allocate<SsaRenameState>(m_allocator), jitstd::placement_t())
- SsaRenameState(m_allocator, m_pCompiler->lvaCount);
+ SsaRenameState(m_allocator, m_pCompiler->lvaCount, m_pCompiler->byrefStatesMatchGcHeapStates);
RenameVariables(domTree, pRenameState);
EndPhase(PHASE_BUILD_SSA_RENAME);
*
* @params alloc The allocator class used to allocate jitstd data.
*/
-SsaRenameState::SsaRenameState(const jitstd::allocator<int>& alloc, unsigned lvaCount)
+SsaRenameState::SsaRenameState(const jitstd::allocator<int>& alloc,
+ unsigned lvaCount,
+ bool byrefStatesMatchGcHeapStates)
: counts(nullptr)
, stacks(nullptr)
, definedLocs(alloc)
, memoryCount(0)
, lvaCount(lvaCount)
, m_alloc(alloc)
+ , byrefStatesMatchGcHeapStates(byrefStatesMatchGcHeapStates)
{
}
typedef unsigned* Counts;
typedef jitstd::list<SsaRenameStateLocDef> DefStack;
- SsaRenameState(const jitstd::allocator<int>& allocator, unsigned lvaCount);
+ SsaRenameState(const jitstd::allocator<int>& allocator, unsigned lvaCount, bool byrefStatesMatchGcHeapStates);
void EnsureCounts();
void EnsureStacks();
}
unsigned CountForMemoryUse(MemoryKind memoryKind)
{
+ if ((memoryKind == GcHeap) && byrefStatesMatchGcHeapStates)
+ {
+ // Share rename stacks in this configuration.
+ memoryKind = ByrefExposed;
+ }
return memoryStack[memoryKind].back().m_count;
}
void PushMemory(MemoryKind memoryKind, BasicBlock* bb, unsigned count)
{
+ if ((memoryKind == GcHeap) && byrefStatesMatchGcHeapStates)
+ {
+ // Share rename stacks in this configuration.
+ memoryKind = ByrefExposed;
+ }
memoryStack[memoryKind].push_back(SsaRenameStateForBlock(bb, count));
}
// Allocator to allocate stacks.
jitstd::allocator<void> m_alloc;
+
+ // Indicates whether GcHeap and ByrefExposed use the same state.
+ bool byrefStatesMatchGcHeapStates;
};
}
}
}
+ else
+ {
+ // If there were any fields/elements modified, this should have been recorded as havoc
+ // for ByrefExposed.
+ assert(memoryKind == ByrefExposed);
+ assert((optLoopTable[loopNum].lpFieldsModified == nullptr) ||
+ optLoopTable[loopNum].lpLoopHasMemoryHavoc[memoryKind]);
+ assert((optLoopTable[loopNum].lpArrayElemTypesModified == nullptr) ||
+ optLoopTable[loopNum].lpLoopHasMemoryHavoc[memoryKind]);
+ }
#ifdef DEBUG
if (verbose)