bool operator!=(const const_iterator& it) const;
const T& operator*() const;
const T* operator&() const;
+ const T* operator->() const;
operator const T*() const;
private:
bool operator!=(const iterator& it);
T& operator*();
T* operator&();
+ T* operator->();
operator T*();
private:
bool operator!=(const const_reverse_iterator& it) const;
const T& operator*() const;
const T* operator&() const;
+ const T* operator->() const;
operator const T*() const;
private:
bool operator!=(const reverse_iterator& it);
T& operator*();
T* operator&();
+ T* operator->();
operator T*();
friend class list<T, Allocator>::const_reverse_iterator;
}
template <typename T, typename Allocator>
+T* list<T, Allocator>::iterator::operator->()
+{
+ return &(m_pNode->m_value);
+}
+
+template <typename T, typename Allocator>
list<T, Allocator>::iterator::operator T*()
{
return &(m_pNode->m_value);
return m_pNode->m_value;
}
-
template <typename T, typename Allocator>
const T* list<T, Allocator>::const_iterator::operator&() const
{
}
template <typename T, typename Allocator>
+const T* list<T, Allocator>::const_iterator::operator->() const
+{
+ return &(m_pNode->m_value);
+}
+
+template <typename T, typename Allocator>
list<T, Allocator>::const_iterator::operator const T*() const
{
return &(m_pNode->m_value);
return m_pNode->m_value;
}
-
template <typename T, typename Allocator>
T* list<T, Allocator>::reverse_iterator::operator&()
{
}
template <typename T, typename Allocator>
+T* list<T, Allocator>::reverse_iterator::operator->()
+{
+ return &(m_pNode->m_value);
+}
+
+template <typename T, typename Allocator>
list<T, Allocator>::reverse_iterator::operator T*()
{
return &(m_pNode->m_value);
}
template <typename T, typename Allocator>
+const T* list<T, Allocator>::const_reverse_iterator::operator->() const
+{
+ return &(m_pNode->m_value);
+}
+
+template <typename T, typename Allocator>
list<T, Allocator>::const_reverse_iterator::operator const T*() const
{
return &(m_pNode->m_value);
#include "lsra.h"
-// Simulated C++11 ranged-for construct
-#define FOREACH(I,C) for (auto __iterator = (C).begin(); __iterator != (C).end(); __iterator++) { auto I = *__iterator;
-#define END_FOREACH }
-
-// Actual C++11 ranged-for construct
-// NOTE: CoreCLR builds use an older compiler that doesn't understand this (2012/3/5).
-// We could check _MSC_VER or _MSC_FULL_VER and use this when it's available.
-//#define FOREACH(I,C) for (auto I : C)
-//#define END_FOREACH
-
#ifdef DEBUG
const char* LinearScan::resolveTypeName[] = { "Split", "Join", "Critical", "SharedCritical" };
#endif // DEBUG
return (regMask != RBM_NONE && genMaxOneBit(regMask));
}
-RefPositionList::ItemIterator
-skipRefsNotMatching(RefPositionList::ItemIterator iter, RefType match)
-{
- while (iter && (*iter)->refType != match)
- {
- iter++;
- }
- return iter;
-}
-
-RefPositionList::ItemIterator
-skipRefsMatching(RefPositionList::ItemIterator iter, RefType match)
-{
- while (iter && (*iter)->refType == match)
- {
- iter++;
- }
- return iter;
-}
-
#ifdef DEBUG
// TODO-Cleanup: Consider using a #include file with custom #defines for both defining
// the enum as well as the string array
Interval *
LinearScan::newInterval(RegisterType theRegisterType)
{
- Interval *newInt = intervals.AppendThrowing();
+ intervals.push_back(Interval());
+ Interval *newInt = &intervals.back();
newInt->init();
newInt->registerType = theRegisterType;
RefPosition *
LinearScan::newRefPositionRaw()
{
- RefPosition *newRP = refPositions.AppendThrowing();
+ refPositions.push_back(RefPosition());
+ RefPosition *newRP = &refPositions.back();
memset(newRP, 0, sizeof(RefPosition)); // TODO-Cleanup: call a RefPosition constructor instead?
#ifdef DEBUG
newRP->rpNum = refPositionCount;
}
-void *
-LinearScanMemoryAllocatorInterval::Alloc (void *context, SIZE_T cb)
-{
- LinearScan * linearScan = CONTAINING_RECORD(context, LinearScan, intervals);
- return linearScan->compiler->compGetMem(cb, CMK_LSRA_Interval);
-}
-
-void *
-LinearScanMemoryAllocatorRefPosition::Alloc (void *context, SIZE_T cb)
-{
- LinearScan * linearScan = CONTAINING_RECORD(context, LinearScan, refPositions);
- return linearScan->compiler->compGetMem(cb, CMK_LSRA_RefPosition);
-}
-// everything but REG_STK
-
//------------------------------------------------------------------------
// LSRA constructor
//
#if MEASURE_MEM_ALLOC
, lsraIAllocator(nullptr)
#endif // MEASURE_MEM_ALLOC
+ , intervals(LinearScanMemoryAllocatorInterval(theCompiler))
+ , refPositions(LinearScanMemoryAllocatorRefPosition(theCompiler))
{
#ifdef DEBUG
intervalCount = 0;
VARSET_TP VARSET_INIT(compiler, temp, block->bbLiveOut);
- // TODO: we are walking the RefPositions backwards, but refPositions.GetIndex() walks the
- // list forwards (by chunks) to find the given index. This will be fast as long as the number
- // of chunks is small. But it should be considered since technically it makes this loop O(N^2).
-
- unsigned index = refPositionCount - 1;
- RefPosition * currentRefPosition = refPositions.GetIndex(index);
+ auto currentRefPosition = refPositions.rbegin();
while (currentRefPosition->refType != RefTypeBB)
{
VarSetOps::RemoveElemD(compiler, temp, varIndex);
}
}
- assert(index != 0);
- index--;
- currentRefPosition = refPositions.GetIndex(index);
+ assert(currentRefPosition != refPositions.rend());
+ ++currentRefPosition;
}
#ifdef DEBUG
RegRecord * farthestRefPhysRegRecord = nullptr;
LsraLocation farthestLocation = MinLocation;
LsraLocation refLocation = refPosition->nodeLocation;
- FOREACH(regNum, Registers(regType))
+ for (regNumber regNum : Registers(regType))
{
regMaskTP candidateBit = genRegMask(regNum);
if (!(candidates & candidateBit)) continue;
farthestRefPhysRegRecord = physRegRecord;
}
}
- END_FOREACH
assert(farthestRefPhysRegRecord != nullptr &&
(farthestLocation > refLocation || refPosition->isFixedRegRef));
foundReg = farthestRefPhysRegRecord->regNum;
printf("------------\n");
printf("REFPOSITIONS %s: \n", str);
printf("------------\n");
- FOREACH(refPos, refPositions)
+ for (auto& refPos : refPositions)
{
- refPos->dump();
+ refPos.dump();
}
- END_FOREACH
}
#endif // DEBUG
JITDUMP("*************** In LinearScan::allocateRegisters()\n");
DBEXEC(VERBOSE, lsraDumpIntervals("before allocateRegisters"));
+
// at start, nothing is active except for register args
- FOREACH(currentInterval, intervals)
+ for (auto& interval : intervals)
{
+ Interval* currentInterval = &interval;
currentInterval->recentRefPosition = nullptr;
currentInterval->isActive = false;
if (currentInterval->isLocalVar)
}
}
}
- END_FOREACH
+
for (regNumber reg = REG_FIRST; reg < ACTUAL_REG_COUNT; reg = REG_NEXT(reg))
{
getRegisterRecord(reg)->recentRefPosition = nullptr;
bool handledBlockEnd = false;
- FOREACH(currentRefPosition, refPositions)
+ for (auto& refPosition : refPositions)
{
+ RefPosition* currentRefPosition = &refPosition;
+
#ifdef DEBUG
// Set the activeRefPosition to null until we're done with any boundary handling.
activeRefPosition = nullptr;
lastAllocatedRefPosition = currentRefPosition;
}
}
- END_FOREACH
// Free registers to clear associated intervals for resolution phase
#ifdef DEBUG
// provide a Reset function (!) - we'll probably replace this so don't bother
// adding it
- FOREACH(interval, intervals)
+ for (auto& interval : intervals)
{
- if (interval->isActive)
+ if (interval.isActive)
{
printf("Active ");
- interval->dump();
+ interval.dump();
}
}
- END_FOREACH
printf("\n");
}
}
// handle incoming arguments and special temps
- auto iter = refPositions.begin();
- RefPosition * currentRefPosition;
+ auto currentRefPosition = refPositions.begin();
VarToRegMap entryVarToRegMap = inVarToRegMaps[compiler->fgFirstBB->bbNum];
- while (iter != refPositions.end() &&
- ((*iter)->refType == RefTypeParamDef || (*iter)->refType == RefTypeZeroInit))
+ while (currentRefPosition != refPositions.end() &&
+ (currentRefPosition->refType == RefTypeParamDef || currentRefPosition->refType == RefTypeZeroInit))
{
- currentRefPosition = *iter;
Interval * interval = currentRefPosition->getInterval();
assert(interval != nullptr && interval->isLocalVar);
resolveLocalRef(nullptr, currentRefPosition);
interval->isActive = false;
}
entryVarToRegMap[varIndex] = reg;
- iter++;
+ ++currentRefPosition;
}
- currentRefPosition = *iter;
- iter++;
JITDUMP("------------------------\n");
JITDUMP("WRITING BACK ASSIGNMENTS\n");
// Handle the DummyDefs, updating the incoming var location.
for ( ;
- currentRefPosition != nullptr && currentRefPosition->refType == RefTypeDummyDef;
- currentRefPosition = *iter,iter++)
+ currentRefPosition != refPositions.end() && currentRefPosition->refType == RefTypeDummyDef;
+ ++currentRefPosition)
{
assert(currentRefPosition->isIntervalRef());
// Don't mark dummy defs as reload
}
// The next RefPosition should be for the block. Move past it.
- assert(currentRefPosition != nullptr);
+ assert(currentRefPosition != refPositions.end());
assert(currentRefPosition->refType == RefTypeBB);
- currentRefPosition = *iter;
- iter++;
+ ++currentRefPosition;
// Handle the RefPositions for the block
for ( ;
- currentRefPosition != nullptr && currentRefPosition->refType != RefTypeBB && currentRefPosition->refType != RefTypeDummyDef;
- currentRefPosition = *iter,iter++)
+ currentRefPosition != refPositions.end() && currentRefPosition->refType != RefTypeBB && currentRefPosition->refType != RefTypeDummyDef;
+ ++currentRefPosition)
{
currentLocation = currentRefPosition->nodeLocation;
JITDUMP("current : ");
Interval * interval;
printf("\nLinear scan intervals %s:\n", msg);
- FOREACH(interval, intervals)
+ for (auto& interval : intervals)
{
// only dump something if it has references
//if (interval->firstRefPosition)
- interval->dump();
+ interval.dump();
}
- END_FOREACH
printf("\n");
}
// currentRefPosition is not used for LSRA_DUMP_PRE
// We keep separate iterators for defs, so that we can print them
// on the lhs of the dump
- RefPosition * currentRefPosition = nullptr;
- auto iter = refPositions.begin();
+ auto currentRefPosition = refPositions.begin();
switch (mode)
{
if (mode != LSRA_DUMP_PRE)
{
printf ("Incoming Parameters: ");
- for ( currentRefPosition = *iter,iter++;
- currentRefPosition != nullptr && currentRefPosition->refType != RefTypeBB;
- currentRefPosition = *iter,iter++)
+ for ( ;
+ currentRefPosition != refPositions.end() && currentRefPosition->refType != RefTypeBB;
+ ++currentRefPosition)
{
Interval* interval = currentRefPosition->getInterval();
assert(interval != nullptr && interval->isLocalVar);
bool printedBlockHeader = false;
// We should find the boundary RefPositions in the order of exposed uses, dummy defs, and the blocks
for ( ;
- currentRefPosition != nullptr &&
+ currentRefPosition != refPositions.end() &&
(currentRefPosition->refType == RefTypeExpUse ||
currentRefPosition->refType == RefTypeDummyDef ||
(currentRefPosition->refType == RefTypeBB && !printedBlockHeader));
- currentRefPosition = *iter,iter++)
+ ++currentRefPosition)
{
Interval * interval = nullptr;
if (currentRefPosition->isIntervalRef()) interval = currentRefPosition->getInterval();
bool killPrinted = false;
RefPosition * lastFixedRegRefPos = nullptr;
for ( ;
- currentRefPosition != nullptr &&
+ currentRefPosition != refPositions.end() &&
(currentRefPosition->refType == RefTypeUse ||
currentRefPosition->refType == RefTypeFixedReg ||
currentRefPosition->refType == RefTypeKill ||
currentRefPosition->refType == RefTypeDef) &&
(currentRefPosition->nodeLocation == tree->gtSeqNum ||
currentRefPosition->nodeLocation == tree->gtSeqNum+1);
- currentRefPosition = *iter,iter++)
+ ++currentRefPosition)
{
Interval * interval = nullptr;
if (currentRefPosition->isIntervalRef())
RegRecord * physRegRecord = getRegisterRecord(reg);
physRegRecord->assignedInterval = nullptr;
}
- FOREACH(interval, intervals)
+
+ for (auto& interval : intervals)
{
- interval->assignedReg = nullptr;
- interval->physReg = REG_NA;
+ interval.assignedReg = nullptr;
+ interval.physReg = REG_NA;
}
- END_FOREACH
DBEXEC(VERBOSE, dumpRegRecordTitle());
regMaskTP regsToFree = RBM_NONE;
regMaskTP delayRegsToFree = RBM_NONE;
LsraLocation currentLocation = MinLocation;
- FOREACH(currentRefPosition, refPositions)
+ for (auto& refPosition : refPositions)
{
+ RefPosition* currentRefPosition = &refPosition;
Interval* interval = nullptr;
RegRecord* regRecord = nullptr;
regNumber regNum = REG_NA;
}
}
}
- END_FOREACH
// Now, verify the resolution blocks.
// Currently these are nearly always at the end of the method, but that may not alwyas be the case.
class RefPosition;
class LinearScan;
class RegRecord;
-class LinearScanMemoryAllocatorInterval;
-class LinearScanMemoryAllocatorRefPosition;
template<class T>
class ArrayStack;
typedef regNumber * VarToRegMap;
-typedef StructArrayList<Interval, /* initial element count */ 32, /* multiplicative chunk size growth factor */ 2, LinearScanMemoryAllocatorInterval> IntervalList;
-typedef StructArrayList<RefPosition, /* initial element count */ 64, /* multiplicative chunk size growth factor */ 2, LinearScanMemoryAllocatorRefPosition> RefPositionList;
-
-// Wrapper for ArenaAllocator
-class LinearScanMemoryAllocatorRefPosition
+template <typename ElementType, CompMemKind MemKind>
+class ListElementAllocator
{
-public:
- static void * Alloc (void *context, SIZE_T cb);
- static void Free (void *context, void *pv) {}
-};
+private:
+ template <typename U, CompMemKind CMK>
+ friend class ListElementAllocator;
+
+ Compiler* m_compiler;
-class LinearScanMemoryAllocatorInterval
-{
public:
- static void * Alloc (void *context, SIZE_T cb);
- static void Free (void *context, void *pv) {}
+ ListElementAllocator(Compiler* compiler)
+ : m_compiler(compiler)
+ {
+ }
+
+ template <typename U>
+ ListElementAllocator(const ListElementAllocator<U, MemKind>& other)
+ : m_compiler(other.m_compiler)
+ {
+ }
+
+ ElementType* allocate(size_t count)
+ {
+ return reinterpret_cast<ElementType*>(m_compiler->compGetMem(sizeof(ElementType) * count, MemKind));
+ }
+
+ void deallocate(ElementType* pointer, size_t count)
+ {
+ }
+
+ template <typename U>
+ struct rebind
+ {
+ typedef ListElementAllocator<U, MemKind> allocator;
+ };
};
+typedef ListElementAllocator<Interval, CMK_LSRA_Interval> LinearScanMemoryAllocatorInterval;
+typedef ListElementAllocator<RefPosition, CMK_LSRA_RefPosition> LinearScanMemoryAllocatorRefPosition;
+
+typedef jitstd::list<Interval, LinearScanMemoryAllocatorInterval> IntervalList;
+typedef jitstd::list<RefPosition, LinearScanMemoryAllocatorRefPosition> RefPositionList;
class Referenceable
{
class LinearScan : public LinearScanInterface
{
- friend class LinearScanMemoryAllocatorInterval;
- friend class LinearScanMemoryAllocatorRefPosition;
friend class RefPosition;
friend class Interval;
friend class Lowering;