// Locks
//
-struct alloc_context;
+struct gc_alloc_context;
class Thread;
Thread * GetThread();
}
else if (g_TrapReturningThreads)
{
- g_theGcHeap->WaitUntilGCComplete();
+ g_theGCHeap->WaitUntilGCComplete();
}
}
BOOL fSmallObjectHeapPtr = FALSE, fLargeObjectHeapPtr = FALSE;
if (!noRangeChecks)
{
- fSmallObjectHeapPtr = g_theGcHeap->IsHeapPointer(this, TRUE);
+ fSmallObjectHeapPtr = g_theGCHeap->IsHeapPointer(this, TRUE);
if (!fSmallObjectHeapPtr)
- fLargeObjectHeapPtr = g_theGcHeap->IsHeapPointer(this);
+ fLargeObjectHeapPtr = g_theGCHeap->IsHeapPointer(this);
_ASSERTE(fSmallObjectHeapPtr || fLargeObjectHeapPtr);
}
#ifdef VERIFY_HEAP
if (bDeep && (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC))
- g_theGcHeap->ValidateObjectMember(this);
+ g_theGCHeap->ValidateObjectMember(this);
#endif
if (fSmallObjectHeapPtr)
{
#ifdef FEATURE_BASICFREEZE
- _ASSERTE(!g_theGcHeap->IsLargeObject(pMT) || g_theGcHeap->IsInFrozenSegment(this));
+ _ASSERTE(!g_theGCHeap->IsLargeObject(pMT) || g_theGCHeap->IsInFrozenSegment(this));
#else
- _ASSERTE(!g_theGcHeap->IsLargeObject(pMT));
+ _ASSERTE(!g_theGCHeap->IsLargeObject(pMT));
#endif
}
}
// if seg_size is small but not 0 (0 is default if config not set)
// then set the segment to the minimum size
- if (!g_theGcHeap->IsValidSegmentSize(seg_size))
+ if (!g_theGCHeap->IsValidSegmentSize(seg_size))
{
// if requested size is between 1 byte and 4MB, use min
if ((seg_size >> 1) && !(seg_size >> 22))
void fix_alloc_context(gc_alloc_context* acontext, void* param)
{
fix_alloc_context_args* args = (fix_alloc_context_args*)param;
- g_theGcHeap->FixAllocContext(acontext, FALSE, (void*)(size_t)(args->for_gc_p), args->heap);
+ g_theGCHeap->FixAllocContext(acontext, FALSE, (void*)(size_t)(args->for_gc_p), args->heap);
}
void gc_heap::fix_allocation_contexts(BOOL for_gc_p)
{
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
#ifdef MULTIPLE_HEAPS
- int n_heaps = g_theGcHeap->GetNumberOfHeaps ();
+ int n_heaps = g_theGCHeap->GetNumberOfHeaps ();
for (int i = 0; i < n_heaps; i++)
{
gc_heap* hp = GCHeap::GetHeap(i)->pGenGCHeap;
return TRUE;
#else
+ UNREFERENCED_PARAMETER(context);
return FALSE;
#endif // defined(STRESS_HEAP) && !defined(FEATURE_REDHAWK)
}
return newAlloc;
#else
+ UNREFERENCED_PARAMETER(size);
+ UNREFERENCED_PARAMETER(flags);
assert(!"should not call GCHeap::AllocAlign8 without FEATURE_64BIT_ALIGNMENT defined!");
return nullptr;
#endif //FEATURE_64BIT_ALIGNMENT
return AllocAlign8Common(hp, acontext, size, flags);
#else
+ UNREFERENCED_PARAMETER(ctx);
+ UNREFERENCED_PARAMETER(size);
+ UNREFERENCED_PARAMETER(flags);
assert(!"should not call GCHeap::AllocAlign8 without FEATURE_64BIT_ALIGNMENT defined!");
return nullptr;
#endif //FEATURE_64BIT_ALIGNMENT
#endif //TRACE_GC
return newAlloc;
#else
+ UNREFERENCED_PARAMETER(_hp);
+ UNREFERENCED_PARAMETER(acontext);
+ UNREFERENCED_PARAMETER(size);
+ UNREFERENCED_PARAMETER(flags);
assert(!"Should not call GCHeap::AllocAlign8Common without FEATURE_64BIT_ALIGNMENT defined!");
return nullptr;
#endif // FEATURE_64BIT_ALIGNMENT
{
size_t gen0size = g_pConfig->GetGCgen0size();
- if ((gen0size == 0) || !g_theGcHeap->IsValidGen0MaxSize(gen0size))
+ if ((gen0size == 0) || !g_theGCHeap->IsValidGen0MaxSize(gen0size))
{
#ifdef SERVER_GC
// performance data seems to indicate halving the size results
#ifdef BACKGROUND_GC
(!gc_heap::settings.concurrent) &&
#endif //BACKGROUND_GC
- (g_theGcHeap->WhichGeneration( (Object*) StartPoint ) == 0))
+ (g_theGCHeap->WhichGeneration( (Object*) StartPoint ) == 0))
return;
rover = StartPoint;
{
CObjectHeader* obj = (CObjectHeader*)*i;
dprintf (3, ("scanning: %Ix", (size_t)obj));
- if (!g_theGcHeap->IsPromoted (obj))
+ if (!g_theGCHeap->IsPromoted (obj))
{
dprintf (3, ("freacheable: %Ix", (size_t)obj));
for (Object** po = startIndex;
po < SegQueueLimit (gen_segment(i)); po++)
{
- int new_gen = g_theGcHeap->WhichGeneration (*po);
+ int new_gen = g_theGCHeap->WhichGeneration (*po);
if (new_gen != i)
{
if (new_gen > i)
for (Object **po = startIndex; po < stopIndex; po++)
{
- if ((int)g_theGcHeap->WhichGeneration (*po) < i)
+ if ((int)g_theGCHeap->WhichGeneration (*po) < i)
FATAL_GC_ERROR ();
((CObjectHeader*)*po)->Validate();
}
// callback functions for heap walkers
typedef void object_callback_func(void * pvContext, void * pvDataLoc);
-// stub type to abstract a heap segment
-struct gc_heap_segment_stub;
-typedef gc_heap_segment_stub *segment_handle;
-
-struct segment_info
-{
- void * pvMem; // base of the allocation, not the first object (must add ibFirstObject)
- size_t ibFirstObject; // offset to the base of the first object in the segment
- size_t ibAllocated; // limit of allocated memory in the segment (>= firstobject)
- size_t ibCommit; // limit of committed memory in the segment (>= alllocated)
- size_t ibReserved; // limit of reserved memory in the segment (>= commit)
-};
-
/*!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/
/* If you modify failure_get_memory and */
/* oom_reason be sure to make the corresponding */
return mt->GetBaseSize() >= LARGE_OBJECT_SIZE;
}
-public:
-
-#ifdef FEATURE_BASICFREEZE
- // frozen segment management functions
- virtual segment_handle RegisterFrozenSegment(segment_info *pseginfo) = 0;
- virtual void UnregisterFrozenSegment(segment_handle seg) = 0;
-#endif //FEATURE_BASICFREEZE
-
protected:
public:
#if defined(FEATURE_BASICFREEZE) && defined(VERIFY_HEAP)
extern void FinalizeWeakReference(Object * obj);
// The single GC heap instance, shared with the VM.
-extern IGCHeapInternal* g_theGcHeap;
+extern IGCHeapInternal* g_theGCHeap;
#ifndef DACCESS_COMPILE
inline BOOL IsGCInProgress(bool bConsiderGCStart = FALSE)
{
WRAPPER_NO_CONTRACT;
- return g_theGcHeap != nullptr ? g_theGcHeap->IsGCInProgressHelper(bConsiderGCStart) : false;
+ return g_theGCHeap != nullptr ? g_theGCHeap->IsGCInProgressHelper(bConsiderGCStart) : false;
}
#endif // DACCESS_COMPILE
SVAL_IMPL_INIT(uint32_t,IGCHeap,maxGeneration,2);
-IGCHeapInternal* g_theGcHeap;
+IGCHeapInternal* g_theGCHeap;
/* global versions of the card table and brick table */
GPTR_IMPL(uint32_t,g_card_table);
heap = WKS::CreateGCHeap();
#endif
- g_theGcHeap = heap;
+ g_theGCHeap = heap;
return heap;
}
#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
}
-#ifdef FEATURE_BASICFREEZE
segment_handle GCHeap::RegisterFrozenSegment(segment_info *pseginfo)
{
+#ifdef FEATURE_BASICFREEZE
heap_segment * seg = new (nothrow) heap_segment;
if (!seg)
{
}
return reinterpret_cast< segment_handle >(seg);
+#else
+ assert(!"Should not call GCHeap::RegisterFrozenSegment without FEATURE_BASICFREEZE defined!");
+ return NULL;
+#endif // FEATURE_BASICFREEZE
}
void GCHeap::UnregisterFrozenSegment(segment_handle seg)
{
+#ifdef FEATURE_BASICFREEZE
#if defined (MULTIPLE_HEAPS) && !defined (ISOLATED_HEAPS)
gc_heap* heap = gc_heap::g_heaps[0];
#else
#endif //MULTIPLE_HEAPS && !ISOLATED_HEAPS
heap->remove_ro_segment(reinterpret_cast<heap_segment*>(seg));
-}
+#else
+ assert(!"Should not call GCHeap::UnregisterFrozenSegment without FEATURE_BASICFREEZE defined!");
#endif // FEATURE_BASICFREEZE
+}
#endif // !DACCESS_COMPILE
// Interface with gc_heap
size_t GarbageCollectTry (int generation, BOOL low_memory_p=FALSE, int mode=collection_blocking);
-#ifdef FEATURE_BASICFREEZE
// frozen segment management functions
virtual segment_handle RegisterFrozenSegment(segment_info *pseginfo);
virtual void UnregisterFrozenSegment(segment_handle seg);
-#endif // FEATURE_BASICFREEZE
void WaitUntilConcurrentGCComplete (); // Use in managd threads
#ifndef DACCESS_COMPILE
}
};
+// stub type to abstract a heap segment
+struct gc_heap_segment_stub;
+typedef gc_heap_segment_stub *segment_handle;
+
+struct segment_info
+{
+ void * pvMem; // base of the allocation, not the first object (must add ibFirstObject)
+ size_t ibFirstObject; // offset to the base of the first object in the segment
+ size_t ibAllocated; // limit of allocated memory in the segment (>= firstobject)
+ size_t ibCommit; // limit of committed memory in the segment (>= alllocated)
+ size_t ibReserved; // limit of reserved memory in the segment (>= commit)
+};
+
#ifdef PROFILING_SUPPORTED
#define GC_PROFILING //Turn on profiling
#endif // PROFILING_SUPPORTED
virtual void SetGCInProgress(BOOL fInProgress) = 0;
/*
+ ============================================================================
Add/RemoveMemoryPressure support routines. These are on the interface
for now, but we should move Add/RemoveMemoryPressure from the VM to the GC.
When that occurs, these three routines can be removed from the interface.
+ ============================================================================
*/
// Get the timestamp corresponding to the last GC that occured for the
// Returns TRUE if GC actually happens, otherwise FALSE
virtual BOOL StressHeap(gc_alloc_context* acontext = 0) = 0;
+ /*
+ ===========================================================================
+ Routines to register read only segments for frozen objects.
+ Only valid if FEATURE_BASICFREEZE is defined.
+ ===========================================================================
+ */
+
+ // Registers a frozen segment with the GC.
+ virtual segment_handle RegisterFrozenSegment(segment_info *pseginfo) = 0;
+
+ // Unregisters a frozen segment.
+ virtual void UnregisterFrozenSegment(segment_handle seg) = 0;
+
IGCHeap() {}
virtual ~IGCHeap() {}
PER_HEAP
VOLATILE(int) alloc_context_count;
#else //MULTIPLE_HEAPS
-#define vm_heap ((GCHeap*) g_theGcHeap)
+#define vm_heap ((GCHeap*) g_theGCHeap)
#define heap_number (0)
#endif //MULTIPLE_HEAPS
LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Checking referent of Weak-", pObjRef, "to ", *pObjRef)));
Object **pRef = (Object **)pObjRef;
- if (!g_theGcHeap->IsPromoted(*pRef))
+ if (!g_theGCHeap->IsPromoted(*pRef))
{
LOG((LF_GC, LL_INFO100, LOG_HANDLE_OBJECT_CLASS("Severing Weak-", pObjRef, "to unreachable ", *pObjRef)));
uint32_t hndType = HandleFetchType(handle);
ADIndex appDomainIndex = HndGetHandleADIndex(handle);
AppDomain* pAppDomain = SystemDomain::GetAppDomainAtIndex(appDomainIndex);
- uint32_t generation = value != 0 ? g_theGcHeap->WhichGeneration(value) : 0;
+ uint32_t generation = value != 0 ? g_theGCHeap->WhichGeneration(value) : 0;
FireEtwSetGCHandle((void*) handle, value, hndType, generation, (int64_t) pAppDomain, GetClrInstanceId());
FireEtwPrvSetGCHandle((void*) handle, value, hndType, generation, (int64_t) pAppDomain, GetClrInstanceId());
for (size_t i = 0; i < num; i ++)
{
value = ppObj[i];
- uint32_t generation = value != 0 ? g_theGcHeap->WhichGeneration(value) : 0;
+ uint32_t generation = value != 0 ? g_theGCHeap->WhichGeneration(value) : 0;
FireEtwSetGCHandle(overlapped, value, HNDTYPE_PINNED, generation, (int64_t) pAppDomain, GetClrInstanceId());
}
}
else
{
value = OBJECTREF_TO_UNCHECKED_OBJECTREF(overlapped->m_userObject);
- uint32_t generation = value != 0 ? g_theGcHeap->WhichGeneration(value) : 0;
+ uint32_t generation = value != 0 ? g_theGCHeap->WhichGeneration(value) : 0;
FireEtwSetGCHandle(overlapped, value, HNDTYPE_PINNED, generation, (int64_t) pAppDomain, GetClrInstanceId());
}
}
if (*pClumpAge != 0) // Perf optimization: if clumpAge is 0, nothing more to do
{
// find out generation
- int generation = g_theGcHeap->WhichGeneration(value);
+ int generation = g_theGCHeap->WhichGeneration(value);
uint32_t uType = HandleFetchType(handle);
#ifndef FEATURE_REDHAWK
// we have the lock held but the part we care about (the async table scan) takes the table lock during
// a preparation step so we'll be able to complete our segment moves before the async scan has a
// chance to interfere with us (or vice versa).
- if (g_theGcHeap->IsConcurrentGCInProgress())
+ if (g_theGCHeap->IsConcurrentGCInProgress())
{
// A concurrent GC is in progress so someone might be scanning our segments asynchronously.
// Release the lock, wait for the GC to complete and try again. The order is important; if we wait
// before releasing the table lock we can deadlock with an async table scan.
ch.Release();
- g_theGcHeap->WaitUntilConcurrentGCComplete();
+ g_theGCHeap->WaitUntilConcurrentGCComplete();
continue;
}
{
if (!HndIsNullOrDestroyedHandle(*pValue))
{
- int thisAge = g_theGcHeap->WhichGeneration(*pValue);
+ int thisAge = g_theGCHeap->WhichGeneration(*pValue);
if (minAge > thisAge)
minAge = thisAge;
if (pOverlapped->m_userObject != NULL)
{
Object * pUserObject = OBJECTREFToObject(pOverlapped->m_userObject);
- thisAge = g_theGcHeap->WhichGeneration(pUserObject);
+ thisAge = g_theGCHeap->WhichGeneration(pUserObject);
if (minAge > thisAge)
minAge = thisAge;
if (pOverlapped->m_isArray)
size_t num = pUserArrayObject->GetNumComponents();
for (size_t i = 0; i < num; i ++)
{
- thisAge = g_theGcHeap->WhichGeneration(pObj[i]);
+ thisAge = g_theGCHeap->WhichGeneration(pObj[i]);
if (minAge > thisAge)
minAge = thisAge;
}
UNREFERENCED_PARAMETER(pValue);
VerifyObject(from, obj);
- int thisAge = g_theGcHeap->WhichGeneration(obj);
+ int thisAge = g_theGCHeap->WhichGeneration(obj);
//debugging code
- //if (minAge > thisAge && thisAge < g_theGcHeap->GetMaxGeneration())
+ //if (minAge > thisAge && thisAge < g_theGCHeap->GetMaxGeneration())
//{
// if ((*pValue) == obj)
// printf("Handle (age %u) %p -> %p (age %u)", minAge, pValue, obj, thisAge);
// }
//}
- if (minAge >= GEN_MAX_AGE || (minAge > thisAge && thisAge < static_cast<int>(g_theGcHeap->GetMaxGeneration())))
+ if (minAge >= GEN_MAX_AGE || (minAge > thisAge && thisAge < static_cast<int>(g_theGCHeap->GetMaxGeneration())))
{
_ASSERTE(!"Fatal Error in HandleTable.");
EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
Object *pOldObj = pObj;
#endif
- if (!HndIsNullOrDestroyedHandle(pObj) && !g_theGcHeap->IsPromoted(pObj))
+ if (!HndIsNullOrDestroyedHandle(pObj) && !g_theGCHeap->IsPromoted(pObj))
{
if (GCToEEInterface::RefCountedHandleCallbacks(pObj))
{
ScanContext *sc = (ScanContext*)lp1;
DhContext *pDhContext = Ref_GetDependentHandleContext(sc);
- if (*pObjRef && g_theGcHeap->IsPromoted(*pPrimaryRef))
+ if (*pObjRef && g_theGCHeap->IsPromoted(*pPrimaryRef))
{
- if (!g_theGcHeap->IsPromoted(*pSecondaryRef))
+ if (!g_theGCHeap->IsPromoted(*pSecondaryRef))
{
LOG((LF_GC|LF_ENC, LL_INFO10000, "\tPromoting secondary " LOG_OBJECT_CLASS(*pSecondaryRef)));
_ASSERTE(lp2);
LOG((LF_GC|LF_ENC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("\tPrimary:\t", pPrimaryRef, "to ", *pPrimaryRef)));
LOG((LF_GC|LF_ENC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("\tSecondary\t", pSecondaryRef, "to ", *pSecondaryRef)));
- if (!g_theGcHeap->IsPromoted(*pPrimaryRef))
+ if (!g_theGCHeap->IsPromoted(*pPrimaryRef))
{
LOG((LF_GC|LF_ENC, LL_INFO1000, "\tunreachable ", LOG_OBJECT_CLASS(*pPrimaryRef)));
LOG((LF_GC|LF_ENC, LL_INFO1000, "\tunreachable ", LOG_OBJECT_CLASS(*pSecondaryRef)));
}
else
{
- _ASSERTE(g_theGcHeap->IsPromoted(*pSecondaryRef));
+ _ASSERTE(g_theGCHeap->IsPromoted(*pSecondaryRef));
LOG((LF_GC|LF_ENC, LL_INFO10000, "\tPrimary is reachable " LOG_OBJECT_CLASS(*pPrimaryRef)));
LOG((LF_GC|LF_ENC, LL_INFO10000, "\tSecondary is reachable " LOG_OBJECT_CLASS(*pSecondaryRef)));
}
LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Checking referent of Weak-", pObjRef, "to ", *pObjRef)));
Object **ppRef = (Object **)pObjRef;
- if (!g_theGcHeap->IsPromoted(*ppRef))
+ if (!g_theGCHeap->IsPromoted(*ppRef))
{
LOG((LF_GC, LL_INFO100, LOG_HANDLE_OBJECT_CLASS("Severing Weak-", pObjRef, "to unreachable ", *pObjRef)));
ScanContext* sc = (ScanContext *)lp1;
promote_func* callback = (promote_func*) lp2;
- size_t sizeBegin = g_theGcHeap->GetPromotedBytes(sc->thread_number);
+ size_t sizeBegin = g_theGCHeap->GetPromotedBytes(sc->thread_number);
callback(ppSizedRef, (ScanContext *)lp1, 0);
- size_t sizeEnd = g_theGcHeap->GetPromotedBytes(sc->thread_number);
+ size_t sizeEnd = g_theGCHeap->GetPromotedBytes(sc->thread_number);
*pSize = sizeEnd - sizeBegin;
}
// promote objects pointed to by strong handles
// during ephemeral GCs we also want to promote the ones pointed to by sizedref handles.
uint32_t types[2] = {HNDTYPE_STRONG, HNDTYPE_SIZEDREF};
- uint32_t uTypeCount = (((condemned >= maxgen) && !g_theGcHeap->IsConcurrentGCInProgress()) ? 1 : _countof(types));
+ uint32_t uTypeCount = (((condemned >= maxgen) && !g_theGCHeap->IsConcurrentGCInProgress()) ? 1 : _countof(types));
uint32_t flags = (sc->concurrent) ? HNDGCF_ASYNC : HNDGCF_NORMAL;
HandleTableMap *walk = &g_HandleTableMap;
HandleTableMap *walk = &g_HandleTableMap;
uint32_t type = HNDTYPE_SIZEDREF;
int uCPUindex = getSlotNumber(sc);
- int n_slots = g_theGcHeap->GetNumberOfHeaps();
+ int n_slots = g_theGCHeap->GetNumberOfHeaps();
while (walk)
{
if (IsServerHeap())
{
bDo = (Interlocked::Increment(&uCount) == 1);
- Interlocked::CompareExchange (&uCount, 0, g_theGcHeap->GetNumberOfHeaps());
- _ASSERTE (uCount <= g_theGcHeap->GetNumberOfHeaps());
+ Interlocked::CompareExchange (&uCount, 0, g_theGCHeap->GetNumberOfHeaps());
+ _ASSERTE (uCount <= g_theGCHeap->GetNumberOfHeaps());
}
if (bDo)
{
WRAPPER_NO_CONTRACT;
- if (g_theGcHeap == nullptr)
+ if (g_theGCHeap == nullptr)
return 0;
- return g_theGcHeap->GetHomeHeapNumber();
+ return g_theGCHeap->GetHomeHeapNumber();
}
bool HandleTableBucket::Contains(OBJECTHANDLE handle)
}
HHANDLETABLE hTable = HndGetHandleTable(handle);
- for (int uCPUindex=0; uCPUindex < g_theGcHeap->GetNumberOfHeaps(); uCPUindex++)
+ for (int uCPUindex=0; uCPUindex < g_theGCHeap->GetNumberOfHeaps(); uCPUindex++)
{
if (hTable == this->pTable[uCPUindex])
{
}
else
{
- pObject = g_theGcHeap->Alloc(acontext, size, 0);
+ pObject = g_theGCHeap->Alloc(acontext, size, 0);
if (pObject == NULL)
return NULL;
}
void GCToEEInterface::SuspendEE(GCToEEInterface::SUSPEND_REASON reason)
{
- g_theGcHeap->SetGCInProgress(TRUE);
+ g_theGCHeap->SetGCInProgress(TRUE);
// TODO: Implement
}
{
// TODO: Implement
- g_theGcHeap->SetGCInProgress(FALSE);
+ g_theGCHeap->SetGCInProgress(FALSE);
}
void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc)