This tends to generate one virtual call for every allocation...
class TestBitSetTraits
{
public:
- static void* Alloc(IAllocator* alloc, size_t byteSize)
+ static void* Alloc(CompAllocator* alloc, size_t byteSize)
{
return alloc->Alloc(byteSize);
}
- static unsigned GetSize(IAllocator* alloc)
+ static unsigned GetSize(CompAllocator* alloc)
{
return 64;
}
- static unsigned GetArrSize(IAllocator* alloc, unsigned elemSize)
+ static unsigned GetArrSize(CompAllocator* alloc, unsigned elemSize)
{
assert(elemSize == sizeof(size_t));
return (64 / 8) / sizeof(size_t);
}
- static unsigned GetEpoch(IAllocator* alloc)
+ static unsigned GetEpoch(CompAllocator* alloc)
{
return 0;
}
};
-void BitSetSupport::TestSuite(IAllocator* env)
+void BitSetSupport::TestSuite(CompAllocator* env)
{
- BitSetSupport::RunTests<UINT64, BSUInt64, IAllocator*, TestBitSetTraits>(env);
- BitSetSupport::RunTests<BitSetShortLongRep, BSShortLong, IAllocator*, TestBitSetTraits>(env);
- BitSetSupport::RunTests<BitSetUint64<IAllocator*, TestBitSetTraits>, BSUInt64Class, IAllocator*, TestBitSetTraits>(
- env);
+ BitSetSupport::RunTests<UINT64, BSUInt64, CompAllocator*, TestBitSetTraits>(env);
+ BitSetSupport::RunTests<BitSetShortLongRep, BSShortLong, CompAllocator*, TestBitSetTraits>(env);
+ BitSetSupport::RunTests<BitSetUint64<CompAllocator*, TestBitSetTraits>, BSUInt64Class, CompAllocator*,
+ TestBitSetTraits>(env);
}
#endif
#ifdef DEBUG
// This runs the "TestSuite" method for a few important instantiations of BitSet.
- static void TestSuite(IAllocator* env);
+ static void TestSuite(CompAllocator* env);
#endif
enum Operation
// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#endif // DEBUG
-// This class implements the "IAllocator" interface, so that we can use
-// utilcode collection classes in the JIT, and have them use the JIT's allocator.
-
-class CompAllocator : public IAllocator
-{
- Compiler* m_comp;
-#if MEASURE_MEM_ALLOC
- CompMemKind m_cmk;
-#endif
-public:
- CompAllocator(Compiler* comp, CompMemKind cmk)
- : m_comp(comp)
-#if MEASURE_MEM_ALLOC
- , m_cmk(cmk)
-#endif
- {
- }
-
- inline void* Alloc(size_t sz);
-
- inline void* ArrayAlloc(size_t elems, size_t elemSize);
-
- // For the compiler's no-release allocator, free operations are no-ops.
- void Free(void* p)
- {
- }
-};
-
/*
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
if (m_indirAssignMap == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_IndirAssignMap, and use that for allocation.
- IAllocator* ialloc = new (this, CMK_IndirAssignMap) CompAllocator(this, CMK_IndirAssignMap);
- m_indirAssignMap = new (ialloc) NodeToIndirAssignMap(ialloc);
+ CompAllocator* ialloc = new (this, CMK_IndirAssignMap) CompAllocator(this, CMK_IndirAssignMap);
+ m_indirAssignMap = new (ialloc) NodeToIndirAssignMap(ialloc);
}
return m_indirAssignMap;
}
// The switch block "switchBlk" just had an entry with value "from" modified to the value "to".
// Update "this" as necessary: if "from" is no longer an element of the jump table of "switchBlk",
// remove it from "this", and ensure that "to" is a member. Use "alloc" to do any required allocation.
- void UpdateTarget(IAllocator* alloc, BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to);
+ void UpdateTarget(CompAllocator* alloc, BasicBlock* switchBlk, BasicBlock* from, BasicBlock* to);
};
typedef SimplerHashTable<BasicBlock*, PtrKeyFuncs<BasicBlock>, SwitchUniqueSuccSet, JitSimplerHashBehavior>
{
VarScopeListNode* head;
VarScopeListNode* tail;
- static VarScopeMapInfo* Create(VarScopeListNode* node, IAllocator* alloc)
+ static VarScopeMapInfo* Create(VarScopeListNode* node, CompAllocator* alloc)
{
VarScopeMapInfo* info = new (alloc) VarScopeMapInfo;
info->head = node;
public:
// This one presents an implementation of the "IAllocator" abstract class that uses "compAllocator",
// suitable for use by utilcode collection types.
- IAllocator* compAsIAllocator;
+ CompAllocator* compAsIAllocator;
#if MEASURE_MEM_ALLOC
- IAllocator* compAsIAllocatorBitset; // An allocator that uses the CMK_bitset tracker.
- IAllocator* compAsIAllocatorGC; // An allocator that uses the CMK_GC tracker.
- IAllocator* compAsIAllocatorLoopHoist; // An allocator that uses the CMK_LoopHoist tracker.
+ CompAllocator* compAsIAllocatorBitset; // An allocator that uses the CMK_bitset tracker.
+ CompAllocator* compAsIAllocatorGC; // An allocator that uses the CMK_GC tracker.
+ CompAllocator* compAsIAllocatorLoopHoist; // An allocator that uses the CMK_LoopHoist tracker.
#ifdef DEBUG
- IAllocator* compAsIAllocatorDebugOnly; // An allocator that uses the CMK_DebugOnly tracker.
-#endif // DEBUG
-#endif // MEASURE_MEM_ALLOC
+ CompAllocator* compAsIAllocatorDebugOnly; // An allocator that uses the CMK_DebugOnly tracker.
+#endif // DEBUG
+#endif // MEASURE_MEM_ALLOC
void compFunctionTraceStart();
void compFunctionTraceEnd(void* methodCodePtr, ULONG methodCodeSize, bool isNYI);
// Assumes called as part of process shutdown; does any compiler-specific work associated with that.
static void ProcessShutdownWork(ICorStaticInfo* statInfo);
- IAllocator* getAllocator()
+ CompAllocator* getAllocator()
{
return compAsIAllocator;
}
#if MEASURE_MEM_ALLOC
- IAllocator* getAllocatorBitset()
+ CompAllocator* getAllocatorBitset()
{
return compAsIAllocatorBitset;
}
- IAllocator* getAllocatorGC()
+ CompAllocator* getAllocatorGC()
{
return compAsIAllocatorGC;
}
- IAllocator* getAllocatorLoopHoist()
+ CompAllocator* getAllocatorLoopHoist()
{
return compAsIAllocatorLoopHoist;
}
#else // !MEASURE_MEM_ALLOC
- IAllocator* getAllocatorBitset()
+ CompAllocator* getAllocatorBitset()
{
return compAsIAllocator;
}
- IAllocator* getAllocatorGC()
+ CompAllocator* getAllocatorGC()
{
return compAsIAllocator;
}
- IAllocator* getAllocatorLoopHoist()
+ CompAllocator* getAllocatorLoopHoist()
{
return compAsIAllocator;
}
#endif // !MEASURE_MEM_ALLOC
#ifdef DEBUG
- IAllocator* getAllocatorDebugOnly()
+ CompAllocator* getAllocatorDebugOnly()
{
#if MEASURE_MEM_ALLOC
return compAsIAllocatorDebugOnly;
if (compRoot->m_fieldSeqStore == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_FieldSeqStore, and use that for allocation.
- IAllocator* ialloc = new (this, CMK_FieldSeqStore) CompAllocator(this, CMK_FieldSeqStore);
+ CompAllocator* ialloc = new (this, CMK_FieldSeqStore) CompAllocator(this, CMK_FieldSeqStore);
compRoot->m_fieldSeqStore = new (ialloc) FieldSeqStore(ialloc);
}
return compRoot->m_fieldSeqStore;
{
// Create a CompAllocator that labels sub-structure with CMK_ZeroOffsetFieldMap, and use that for
// allocation.
- IAllocator* ialloc = new (this, CMK_ZeroOffsetFieldMap) CompAllocator(this, CMK_ZeroOffsetFieldMap);
- m_zeroOffsetFieldMap = new (ialloc) NodeToFieldSeqMap(ialloc);
+ CompAllocator* ialloc = new (this, CMK_ZeroOffsetFieldMap) CompAllocator(this, CMK_ZeroOffsetFieldMap);
+ m_zeroOffsetFieldMap = new (ialloc) NodeToFieldSeqMap(ialloc);
}
return m_zeroOffsetFieldMap;
}
if (compRoot->m_arrayInfoMap == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation.
- IAllocator* ialloc = new (this, CMK_ArrayInfoMap) CompAllocator(this, CMK_ArrayInfoMap);
+ CompAllocator* ialloc = new (this, CMK_ArrayInfoMap) CompAllocator(this, CMK_ArrayInfoMap);
compRoot->m_arrayInfoMap = new (ialloc) NodeToArrayInfoMap(ialloc);
}
return compRoot->m_arrayInfoMap;
if (compRoot->m_memorySsaMap[memoryKind] == nullptr)
{
// Create a CompAllocator that labels sub-structure with CMK_ArrayInfoMap, and use that for allocation.
- IAllocator* ialloc = new (this, CMK_ArrayInfoMap) CompAllocator(this, CMK_ArrayInfoMap);
+ CompAllocator* ialloc = new (this, CMK_ArrayInfoMap) CompAllocator(this, CMK_ArrayInfoMap);
compRoot->m_memorySsaMap[memoryKind] = new (ialloc) NodeToUnsignedMap(ialloc);
}
return compRoot->m_memorySsaMap[memoryKind];
}
}
-void Compiler::SwitchUniqueSuccSet::UpdateTarget(IAllocator* alloc,
- BasicBlock* switchBlk,
- BasicBlock* from,
- BasicBlock* to)
+void Compiler::SwitchUniqueSuccSet::UpdateTarget(CompAllocator* alloc,
+ BasicBlock* switchBlk,
+ BasicBlock* from,
+ BasicBlock* to)
{
assert(switchBlk->bbJumpKind == BBJ_SWITCH); // Precondition.
unsigned jmpTabCnt = switchBlk->bbJumpSwt->bbsCount;
FieldSeqNode FieldSeqStore::s_notAField(nullptr, nullptr);
// FieldSeqStore methods.
-FieldSeqStore::FieldSeqStore(IAllocator* alloc) : m_alloc(alloc), m_canonMap(new (alloc) FieldSeqNodeCanonMap(alloc))
+FieldSeqStore::FieldSeqStore(CompAllocator* alloc) : m_alloc(alloc), m_canonMap(new (alloc) FieldSeqNodeCanonMap(alloc))
{
}
typedef SimplerHashTable<FieldSeqNode, /*KeyFuncs*/ FieldSeqNode, FieldSeqNode*, JitSimplerHashBehavior>
FieldSeqNodeCanonMap;
- IAllocator* m_alloc;
+ CompAllocator* m_alloc;
FieldSeqNodeCanonMap* m_canonMap;
static FieldSeqNode s_notAField; // No value, just exists to provide an address.
static int ConstantIndexPseudoFieldStruct;
public:
- FieldSeqStore(IAllocator* alloc);
+ FieldSeqStore(CompAllocator* alloc);
// Returns the (canonical in the store) singleton field sequence for the given handle.
FieldSeqNode* CreateSingleton(CORINFO_FIELD_HANDLE fieldHnd);
};
class Compiler;
+
+// This class implements the "IAllocator" interface, so that we can use
+// utilcode collection classes in the JIT, and have them use the JIT's allocator.
+
+class CompAllocator final : public IAllocator
+{
+ Compiler* const m_comp;
+#if MEASURE_MEM_ALLOC
+ CompMemKind const m_cmk;
+#endif
+public:
+ CompAllocator(Compiler* comp, CompMemKind cmk)
+ : m_comp(comp)
+#if MEASURE_MEM_ALLOC
+ , m_cmk(cmk)
+#endif
+ {
+ }
+
+ inline void* Alloc(size_t sz) override;
+
+ inline void* ArrayAlloc(size_t elems, size_t elemSize) override;
+
+ // For the compiler's no-release allocator, free operations are no-ops.
+ void Free(void* p) override
+ {
+ }
+};
+
class JitTls
{
#ifdef DEBUG
// EnsureChildren - Create an array of child nodes if nullptr.
//
// Arguments:
-// alloc IAllocator instance
+// alloc CompAllocator instance
//
// Return Values:
// None
//
-void LC_Deref::EnsureChildren(IAllocator* alloc)
+void LC_Deref::EnsureChildren(CompAllocator* alloc)
{
if (children == nullptr)
{
unsigned rank; // Rank of the array
BasicBlock* useBlock; // Block where the [] occurs
- ArrIndex(IAllocator* alloc) : arrLcl(BAD_VAR_NUM), indLcls(alloc), bndsChks(alloc), rank(0), useBlock(nullptr)
+ ArrIndex(CompAllocator* alloc) : arrLcl(BAD_VAR_NUM), indLcls(alloc), bndsChks(alloc), rank(0), useBlock(nullptr)
{
}
{
}
- ArrIndex* GetArrIndexForDim(IAllocator* alloc)
+ ArrIndex* GetArrIndexForDim(CompAllocator* alloc)
{
if (index == nullptr)
{
unsigned Lcl();
bool HasChildren();
- void EnsureChildren(IAllocator* alloc);
+ void EnsureChildren(CompAllocator* alloc);
static LC_Deref* Find(ExpandArrayStack<LC_Deref*>* children, unsigned lcl);
void DeriveLevelConditions(ExpandArrayStack<ExpandArrayStack<LC_Condition>*>* len);
*/
struct LoopCloneContext
{
- IAllocator* alloc; // The allocator
+ CompAllocator* alloc; // The allocator
ExpandArrayStack<LcOptInfo*>** optInfo; // The array of optimization opportunities found in each loop. (loop x
// optimization-opportunities)
ExpandArrayStack<LC_Condition>** conditions; // The array of conditions that influence which path to take for each
ExpandArrayStack<ExpandArrayStack<LC_Condition>*>** blockConditions; // The array of block levels of conditions for
// each loop. (loop x level x conditions)
- LoopCloneContext(unsigned loopCount, IAllocator* alloc) : alloc(alloc)
+ LoopCloneContext(unsigned loopCount, CompAllocator* alloc) : alloc(alloc)
{
optInfo = new (alloc) ExpandArrayStack<LcOptInfo*>*[loopCount];
conditions = new (alloc) ExpandArrayStack<LC_Condition>*[loopCount];
private:
#if MEASURE_MEM_ALLOC
- IAllocator* lsraIAllocator;
+ CompAllocator* lsraIAllocator;
#endif
- IAllocator* getAllocator(Compiler* comp)
+ CompAllocator* getAllocator(Compiler* comp)
{
#if MEASURE_MEM_ALLOC
if (lsraIAllocator == nullptr)
return false;
}
#ifdef DEBUG
- const char* ToString(IAllocator* alloc)
+ const char* ToString(CompAllocator* alloc)
{
unsigned size = 64;
char* buf = (char*)alloc->Alloc(size);
}
#ifdef DEBUG
- char* ToString(IAllocator* alloc)
+ char* ToString(CompAllocator* alloc)
{
size_t size = 64;
char* buf = (char*)alloc->Alloc(size);
void Compiler::fgSsaBuild()
{
- IAllocator* pIAllocator = new (this, CMK_SSA) CompAllocator(this, CMK_SSA);
+ CompAllocator* pIAllocator = new (this, CMK_SSA) CompAllocator(this, CMK_SSA);
// If this is not the first invocation, reset data structures for SSA.
if (fgSsaPassesCompleted > 0)
*
* @remarks Initializes the class and member pointers/objects that use constructors.
*/
-SsaBuilder::SsaBuilder(Compiler* pCompiler, IAllocator* pIAllocator)
+SsaBuilder::SsaBuilder(Compiler* pCompiler, CompAllocator* pIAllocator)
: m_pCompiler(pCompiler)
, m_allocator(pIAllocator)
public:
// Constructor
- SsaBuilder(Compiler* pCompiler, IAllocator* pIAllocator);
+ SsaBuilder(Compiler* pCompiler, CompAllocator* pIAllocator);
// Requires stmt nodes to be already sequenced in evaluation order. Analyzes the graph
// for introduction of phi-nodes as GT_PHI tree nodes at the beginning of each block.
}
}
-ValueNumStore::ValueNumStore(Compiler* comp, IAllocator* alloc)
+ValueNumStore::ValueNumStore(Compiler* comp, CompAllocator* alloc)
: m_pComp(comp)
, m_alloc(alloc)
,
return GetVNFunc(vn, &funcAttr) && (s_vnfOpAttribs[funcAttr.m_func] & VNFOA_SharedStatic) != 0;
}
-ValueNumStore::Chunk::Chunk(
- IAllocator* alloc, ValueNum* pNextBaseVN, var_types typ, ChunkExtraAttribs attribs, BasicBlock::loopNumber loopNum)
+ValueNumStore::Chunk::Chunk(CompAllocator* alloc,
+ ValueNum* pNextBaseVN,
+ var_types typ,
+ ChunkExtraAttribs attribs,
+ BasicBlock::loopNumber loopNum)
: m_defs(nullptr), m_numUsed(0), m_baseVN(*pNextBaseVN), m_typ(typ), m_attribs(attribs), m_loopNum(loopNum)
{
// Allocate "m_defs" here, according to the typ/attribs pair.
class VNMap : public SimplerHashTable<fromType, keyfuncs, ValueNum, JitSimplerHashBehavior>
{
public:
- VNMap(IAllocator* alloc) : SimplerHashTable<fromType, keyfuncs, ValueNum, JitSimplerHashBehavior>(alloc)
+ VNMap(CompAllocator* alloc) : SimplerHashTable<fromType, keyfuncs, ValueNum, JitSimplerHashBehavior>(alloc)
{
}
~VNMap()
Compiler* m_pComp;
// For allocations. (Other things?)
- IAllocator* m_alloc;
+ CompAllocator* m_alloc;
// TODO-Cleanup: should transform "attribs" into a struct with bit fields. That would be simpler...
static void InitValueNumStoreStatics();
// Initialize an empty ValueNumStore.
- ValueNumStore(Compiler* comp, IAllocator* allocator);
+ ValueNumStore(Compiler* comp, CompAllocator* allocator);
// Returns "true" iff "vnf" (which may have been created by a cast from an integral value) represents
// a legal value number function.
// Initialize a chunk, starting at "*baseVN", for the given "typ", "attribs", and "loopNum" (using "alloc" for
// allocations).
// (Increments "*baseVN" by ChunkSize.)
- Chunk(IAllocator* alloc,
+ Chunk(CompAllocator* alloc,
ValueNum* baseVN,
var_types typ,
ChunkExtraAttribs attribs,