* compGetMemA and compGetMemArrayA are useless because ArenaAllocator always returns aligned memory
* compGetMemCallback is not used anywhere
* compFreeMem is not used anywhere. The function is not used anywhere either but let's keep it for consistency.
}
#endif // DEBUG
-/******************************************************************************
- *
- * The Emitter uses this callback function to allocate its memory
- */
-
-/* static */
-void* Compiler::compGetMemCallback(void* p, size_t size, CompMemKind cmk)
-{
- assert(p);
-
- return ((Compiler*)p)->compGetMem(size, cmk);
-}
-
/*****************************************************************************
*
* The central memory allocation routine used by the compiler. Normally this
assert((varDscExposedStruct->lvExactSize / TARGET_POINTER_SIZE) == 8);
BYTE* oldGCPtrs = varDscExposedStruct->lvGcLayout;
- BYTE* newGCPtrs = (BYTE*)compGetMemA(8, CMK_LvaTable);
+ BYTE* newGCPtrs = (BYTE*)compGetMem(8, CMK_LvaTable);
for (int i = 0; i < 4; i++)
{
#endif // LOOP_HOIST_STATS
void* compGetMemArray(size_t numElem, size_t elemSize, CompMemKind cmk = CMK_Unknown);
- void* compGetMemArrayA(size_t numElem, size_t elemSize, CompMemKind cmk = CMK_Unknown);
void* compGetMem(size_t sz, CompMemKind cmk = CMK_Unknown);
- void* compGetMemA(size_t sz, CompMemKind cmk = CMK_Unknown);
- static void* compGetMemCallback(void*, size_t, CompMemKind cmk = CMK_Unknown);
void compFreeMem(void*);
bool compIsForImportOnly();
return compGetMem(numElem * elemSize, cmk);
}
-__forceinline void* Compiler::compGetMemArrayA(size_t numElem, size_t elemSize, CompMemKind cmk)
-{
- if (numElem > (MAX_MEMORY_PER_ALLOCATION / elemSize))
- {
- NOMEM();
- }
-
- return compGetMemA(numElem * elemSize, cmk);
-}
-
/******************************************************************************
*
* Roundup the allocated size so that if this memory block is aligned,
* The JIT will always try to keep all the blocks aligned.
*/
-inline void* Compiler::compGetMemA(size_t sz, CompMemKind cmk)
-{
- assert(sz);
-
- size_t allocSz = roundUp(sz, sizeof(size_t));
-
-#if MEASURE_MEM_ALLOC
- genMemStats.AddAlloc(allocSz, cmk);
-#endif
-
- void* ptr = compAllocator->allocateMemory(allocSz);
-
- // Verify that the current block is aligned. Only then will the next
- // block allocated be on an aligned boundary.
- assert((size_t(ptr) & (sizeof(size_t) - 1)) == 0);
-
- return ptr;
-}
-
inline void Compiler::compFreeMem(void* ptr)
{
}
-#define compFreeMem(ptr) compFreeMem((void*)ptr)
-
inline bool Compiler::compIsProfilerHookNeeded()
{
#ifdef PROFILING_SUPPORTED
{
// Allocate a bit-array for all the variables and initialize to false
- bool* varInfoProvided = (bool*)compGetMemA(info.compLocalsCount * sizeof(varInfoProvided[0]));
+ bool* varInfoProvided = (bool*)compGetMem(info.compLocalsCount * sizeof(varInfoProvided[0]));
unsigned i;
for (i = 0; i < info.compLocalsCount; i++)
{
length += param.siglength + 2;
- char* retName = (char*)compGetMemA(length, CMK_DebugOnly);
+ char* retName = (char*)compGetMem(length, CMK_DebugOnly);
/* Now generate the full signature string in the allocated buffer */
if (subsitutionRequired)
{
- char* newName = (char*)compGetMemA(lengthOut, CMK_DebugOnly);
+ char* newName = (char*)compGetMem(lengthOut, CMK_DebugOnly);
char* pDest;
pDest = newName;
pChar = nameIn;
/* Do we need an array as well as the mask ? */
if (pasMaxDepth > BITS_IN_pasMask)
- pasTopArray = (BYTE*)pComp->compGetMemA(pasMaxDepth - BITS_IN_pasMask);
+ pasTopArray = (BYTE*)pComp->compGetMem(pasMaxDepth - BITS_IN_pasMask);
}
//-----------------------------------------------------------------------------
return;
}
- block->bbEntryState = (EntryState*)compGetMemA(sizeof(EntryState));
+ block->bbEntryState = (EntryState*)compGetMem(sizeof(EntryState));
// block->bbEntryState.esRefcount = 1;
size_t lvSize = varDsc->lvSize();
assert((lvSize % sizeof(void*)) ==
0); // The struct needs to be a multiple of sizeof(void*) bytes for getClassGClayout() to be valid.
- varDsc->lvGcLayout = (BYTE*)compGetMemA((lvSize / sizeof(void*)) * sizeof(BYTE), CMK_LvaTable);
+ varDsc->lvGcLayout = (BYTE*)compGetMem((lvSize / sizeof(void*)) * sizeof(BYTE), CMK_LvaTable);
unsigned numGCVars;
var_types simdBaseType = TYP_UNKNOWN;
varDsc->lvType = impNormStructType(typeHnd, varDsc->lvGcLayout, &numGCVars, &simdBaseType);
if (rpBestRecordedPrediction == NULL)
{
rpBestRecordedPrediction =
- reinterpret_cast<VarRegPrediction*>(compGetMemArrayA(lvaCount, sizeof(VarRegPrediction)));
+ reinterpret_cast<VarRegPrediction*>(compGetMemArray(lvaCount, sizeof(VarRegPrediction)));
}
for (unsigned k = 0; k < lvaCount; k++)
{
assert(bitVectMemSize * bitChunkSize() >= size);
- bv = (FixedBitVect*)comp->compGetMemA(sizeof(FixedBitVect) + bitVectMemSize, CMK_FixedBitVect);
+ bv = (FixedBitVect*)comp->compGetMem(sizeof(FixedBitVect) + bitVectMemSize, CMK_FixedBitVect);
memset(bv->bitVect, 0, bitVectMemSize);
bv->bitVectSize = size;