gc_etw_segment_pinned_object_heap :
gc_etw_segment_large_object_heap);
- GCToEEInterface::DiagUpdateGenerationBounds();
-
#ifndef USE_REGIONS
#ifdef MULTIPLE_HEAPS
hp->thread_uoh_segment (gen_number, res);
thread_uoh_segment (gen_number, res);
#endif //MULTIPLE_HEAPS
#endif //!USE_REGIONS
+ GCToEEInterface::DiagAddNewRegion(
+ gen_number,
+ heap_segment_mem (res),
+ heap_segment_allocated (res),
+ heap_segment_reserved (res)
+ );
}
return res;
fix_youngest_allocation_area();
heap_segment* next_seg = heap_segment_next (ephemeral_heap_segment);
+ bool new_seg = false;
if (!next_seg)
{
assert (ephemeral_heap_segment == generation_tail_region (generation_of (gen_number)));
next_seg = get_new_region (gen_number);
+ new_seg = true;
}
if (next_seg)
dprintf (REGIONS_LOG, ("eph seg %Ix -> next %Ix",
heap_segment_mem (ephemeral_heap_segment), heap_segment_mem (next_seg)));
ephemeral_heap_segment = next_seg;
+ if (new_seg)
+ {
+ GCToEEInterface::DiagAddNewRegion(
+ heap_segment_gen_num (next_seg),
+ heap_segment_mem (next_seg),
+ heap_segment_allocated (next_seg),
+ heap_segment_reserved (next_seg)
+ );
+ }
}
else
{
}
region = heap_segment_next (region);
- if ((region == nullptr) && !(region = get_new_region (0)))
+ if (region == nullptr)
{
- break;
+ region = get_new_region (0);
+ if (region == nullptr)
+ {
+ break;
+ }
+ else
+ {
+ GCToEEInterface::DiagAddNewRegion(
+ 0,
+ heap_segment_mem (region),
+ heap_segment_allocated (region),
+ heap_segment_reserved (region)
+ );
+ }
}
}
else
return g;
}
+unsigned int GCHeap::GetGenerationWithRange (Object* object, uint8_t** ppStart, uint8_t** ppAllocated, uint8_t** ppReserved)
+{
+ int generation = -1;
+ heap_segment * hs = gc_heap::find_segment ((uint8_t*)object, FALSE);
+#ifdef USE_REGIONS
+ generation = heap_segment_gen_num (hs);
+ if (generation == max_generation)
+ {
+ if (heap_segment_loh_p (hs))
+ {
+ generation = loh_generation;
+ }
+ else if (heap_segment_poh_p (hs))
+ {
+ generation = poh_generation;
+ }
+ }
+
+ *ppStart = heap_segment_mem (hs);
+ *ppAllocated = heap_segment_allocated (hs);
+ *ppReserved = heap_segment_reserved (hs);
+#else
+#ifdef MULTIPLE_HEAPS
+ gc_heap* hp = heap_segment_heap (hs);
+#else
+ gc_heap* hp = __this;
+#endif //MULTIPLE_HEAPS
+ if (hs == hp->ephemeral_heap_segment)
+ {
+ uint8_t* reserved = heap_segment_reserved (hs);
+ uint8_t* end = heap_segment_allocated(hs);
+ for (int gen = 0; gen < max_generation; gen++)
+ {
+ uint8_t* start = generation_allocation_start (hp->generation_of (gen));
+ if ((uint8_t*)object >= start)
+ {
+ generation = gen;
+ *ppStart = start;
+ *ppAllocated = end;
+ *ppReserved = reserved;
+ break;
+ }
+ end = reserved = start;
+ }
+ if (generation == -1)
+ {
+ *ppStart = heap_segment_mem (hs);
+ *ppAllocated = *ppReserved = generation_allocation_start (hp->generation_of (max_generation - 1));
+ }
+ }
+ else
+ {
+ generation = max_generation;
+ if (heap_segment_loh_p (hs))
+ {
+ generation = loh_generation;
+ }
+ else if (heap_segment_poh_p (hs))
+ {
+ generation = poh_generation;
+ }
+ *ppStart = heap_segment_mem (hs);
+ *ppAllocated = heap_segment_allocated (hs);
+ *ppReserved = heap_segment_reserved (hs);
+ }
+#endif //USE_REGIONS
+ return (unsigned int)generation;
+}
+
bool GCHeap::IsEphemeral (Object* object)
{
uint8_t* o = (uint8_t*)object;
BYTE *rangeEndReserved;
};
-struct GenerationTable
-{
+class GenerationTable
+{
+public:
+ GenerationTable();
+ void AddRecord(int generation, BYTE* rangeStart, BYTE* rangeEnd, BYTE* rangeEndReserved);
+ void AddRecordNoLock(int generation, BYTE* rangeStart, BYTE* rangeEnd, BYTE* rangeEndReserved);
+ void Refresh();
+ HRESULT GetGenerationBounds(ULONG cObjectRanges, ULONG* pcObjectRanges, COR_PRF_GC_GENERATION_RANGE* ranges);
+private:
+ Crst mutex;
ULONG count;
ULONG capacity;
static const ULONG defaultCapacity = 5; // that's the minimum for Gen0-2 + LOH + POH
- GenerationTable *prev;
GenerationDesc *genDescTable;
-#ifdef _DEBUG
- ULONG magic;
-#define GENERATION_TABLE_MAGIC 0x34781256
-#define GENERATION_TABLE_BAD_MAGIC 0x55aa55aa
-#endif
};
+GenerationTable::GenerationTable() : mutex(CrstLeafLock, CRST_UNSAFE_ANYMODE)
+{
+ count = 0;
+ capacity = GenerationTable::defaultCapacity;
+ genDescTable = new (nothrow) GenerationDesc[capacity];
+ if (genDescTable == NULL)
+ {
+ capacity = 0;
+ }
+}
+
+void GenerationTable::AddRecord(int generation, BYTE* rangeStart, BYTE* rangeEnd, BYTE* rangeEndReserved)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY; // can be called even on GC threads
+ PRECONDITION(0 <= generation && generation <= 4);
+ PRECONDITION(CheckPointer(rangeStart));
+ PRECONDITION(CheckPointer(rangeEnd));
+ PRECONDITION(CheckPointer(rangeEndReserved));
+ } CONTRACT_END;
+
+ CrstHolder holder(&mutex);
+
+ // Because the segment/region are added to the heap before they are reported to the profiler,
+ // it is possible that the region is added to the heap, a racing GenerationTable refresh happened,
+ // that refresh would contain the new region, and then it get reported again here.
+ // This check will make sure we never add duplicated record to the table.
+ for (ULONG i = 0; i < count; i++)
+ {
+ if (genDescTable[i].rangeStart == rangeStart)
+ {
+ _ASSERTE (genDescTable[i].generation == generation);
+ _ASSERTE (genDescTable[i].rangeEnd == rangeEnd);
+ _ASSERTE (genDescTable[i].rangeEndReserved == rangeEndReserved);
+ RETURN;
+ }
+ }
+ AddRecordNoLock(generation, rangeStart, rangeEnd, rangeEndReserved);
+ RETURN;
+}
+
+void GenerationTable::AddRecordNoLock(int generation, BYTE* rangeStart, BYTE* rangeEnd, BYTE* rangeEndReserved)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY; // can be called even on GC threads
+ PRECONDITION(0 <= generation && generation <= 4);
+ PRECONDITION(CheckPointer(rangeStart));
+ PRECONDITION(CheckPointer(rangeEnd));
+ PRECONDITION(CheckPointer(rangeEndReserved));
+ } CONTRACT_END;
+
+ _ASSERTE (mutex.OwnedByCurrentThread());
+ if (count >= capacity)
+ {
+ ULONG newCapacity = capacity == 0 ? GenerationTable::defaultCapacity : capacity * 2;
+ GenerationDesc *newGenDescTable = new (nothrow) GenerationDesc[newCapacity];
+ if (newGenDescTable == NULL)
+ {
+ count = capacity = 0;
+ delete[] genDescTable;
+ genDescTable = nullptr;
+ RETURN;
+ }
+ memcpy(newGenDescTable, genDescTable, sizeof(genDescTable[0]) * count);
+ delete[] genDescTable;
+ genDescTable = newGenDescTable;
+ capacity = newCapacity;
+ }
+ _ASSERTE(count < capacity);
+
+ genDescTable[count].generation = generation;
+ genDescTable[count].rangeStart = rangeStart;
+ genDescTable[count].rangeEnd = rangeEnd;
+ genDescTable[count].rangeEndReserved = rangeEndReserved;
+
+ count = count + 1;
+ RETURN;
+}
+
+HRESULT GenerationTable::GetGenerationBounds(ULONG cObjectRanges, ULONG* pcObjectRanges, COR_PRF_GC_GENERATION_RANGE* ranges)
+{
+ if ((cObjectRanges > 0) && (ranges == nullptr))
+ {
+ return E_INVALIDARG;
+ }
+ CrstHolder holder(&mutex);
+ if (genDescTable == nullptr)
+ {
+ return E_FAIL;
+ }
+ ULONG copy = min(count, cObjectRanges);
+ for (ULONG i = 0; i < copy; i++)
+ {
+ ranges[i].generation = (COR_PRF_GC_GENERATION)genDescTable[i].generation;
+ ranges[i].rangeStart = (ObjectID)genDescTable[i].rangeStart;
+ ranges[i].rangeLength = genDescTable[i].rangeEnd - genDescTable[i].rangeStart;
+ ranges[i].rangeLengthReserved = genDescTable[i].rangeEndReserved - genDescTable[i].rangeStart;
+ }
+ if (pcObjectRanges != nullptr)
+ {
+ *pcObjectRanges = count;
+ }
+ return S_OK;
+}
//---------------------------------------------------------------------------------------
//
} CONTRACT_END;
GenerationTable *generationTable = (GenerationTable *)context;
+ generationTable->AddRecordNoLock(generation, rangeStart, rangeEnd, rangeEndReserved);
+ RETURN;
+}
- _ASSERTE(generationTable->magic == GENERATION_TABLE_MAGIC);
-
- ULONG count = generationTable->count;
- if (count >= generationTable->capacity)
- {
- ULONG newCapacity = generationTable->capacity == 0 ? GenerationTable::defaultCapacity : generationTable->capacity * 2;
- GenerationDesc *newGenDescTable = new (nothrow) GenerationDesc[newCapacity];
- if (newGenDescTable == NULL)
- {
- // if we can't allocate a bigger table, we'll have to ignore this call
- RETURN;
- }
- memcpy(newGenDescTable, generationTable->genDescTable, sizeof(generationTable->genDescTable[0]) * generationTable->count);
- delete[] generationTable->genDescTable;
- generationTable->genDescTable = newGenDescTable;
- generationTable->capacity = newCapacity;
- }
- _ASSERTE(count < generationTable->capacity);
-
- GenerationDesc *genDescTable = generationTable->genDescTable;
-
- genDescTable[count].generation = generation;
- genDescTable[count].rangeStart = rangeStart;
- genDescTable[count].rangeEnd = rangeEnd;
- genDescTable[count].rangeEndReserved = rangeEndReserved;
-
- generationTable->count = count + 1;
+void GenerationTable::Refresh()
+{
+ // fill in the values by calling back into the gc, which will report
+ // the ranges by calling GenWalkFunc for each one
+ CrstHolder holder(&mutex);
+ IGCHeap *hp = GCHeapUtilities::GetGCHeap();
+ this->count = 0;
+ hp->DiagDescrGenerations(GenWalkFunc, this);
}
// This is the table of generation bounds updated by the gc
-// and read by the profiler. So this is a single writer,
-// multiple readers scenario.
+// and read by the profiler.
static GenerationTable *s_currentGenerationTable;
-// The generation table is updated atomically by replacing the
-// pointer to it. The only tricky part is knowing when
-// the old table can be deleted.
-static Volatile<LONG> s_generationTableLock;
-
// This is just so we can assert there's a single writer
#ifdef ENABLE_CONTRACTS
static Volatile<LONG> s_generationTableWriterCount;
// Notify the profiler of start of the collection
if (CORProfilerTrackGC() || CORProfilerTrackBasicGC())
{
- // generate a new generation table
- GenerationTable *newGenerationTable = new (nothrow) GenerationTable();
- if (newGenerationTable == NULL)
- RETURN;
- newGenerationTable->count = 0;
- newGenerationTable->capacity = GenerationTable::defaultCapacity;
- // if there is already a current table, use its capacity as a guess for the capacity
- if (s_currentGenerationTable != NULL)
- newGenerationTable->capacity = s_currentGenerationTable->capacity;
- newGenerationTable->prev = NULL;
- newGenerationTable->genDescTable = new (nothrow) GenerationDesc[newGenerationTable->capacity];
- if (newGenerationTable->genDescTable == NULL)
- newGenerationTable->capacity = 0;
-
-#ifdef _DEBUG
- newGenerationTable->magic = GENERATION_TABLE_MAGIC;
-#endif
- // fill in the values by calling back into the gc, which will report
- // the ranges by calling GenWalkFunc for each one
- IGCHeap *hp = GCHeapUtilities::GetGCHeap();
- hp->DiagDescrGenerations(GenWalkFunc, newGenerationTable);
-
- // remember the old table and plug in the new one
- GenerationTable *oldGenerationTable = s_currentGenerationTable;
- s_currentGenerationTable = newGenerationTable;
- // WARNING: tricky code!
- //
- // We sample the generation table lock *after* plugging in the new table
- // We do so using an interlocked operation so the cpu can't reorder
- // the write to the s_currentGenerationTable with the increment.
- // If the interlocked increment returns 1, we know nobody can be using
- // the old table (readers increment the lock before using the table,
- // and decrement it afterwards). Any new readers coming in
- // will use the new table. So it's safe to delete the old
- // table.
- // On the other hand, if the interlocked increment returns
- // something other than one, we put the old table on a list
- // dangling off of the new one. Next time around, we'll try again
- // deleting any old tables.
- if (FastInterlockIncrement(&s_generationTableLock) == 1)
+ if (s_currentGenerationTable == nullptr)
{
- // We know nobody can be using any of the old tables
- while (oldGenerationTable != NULL)
+ EX_TRY
{
- _ASSERTE(oldGenerationTable->magic == GENERATION_TABLE_MAGIC);
-#ifdef _DEBUG
- oldGenerationTable->magic = GENERATION_TABLE_BAD_MAGIC;
-#endif
- GenerationTable *temp = oldGenerationTable;
- oldGenerationTable = oldGenerationTable->prev;
- delete[] temp->genDescTable;
- delete temp;
+ s_currentGenerationTable = new (nothrow) GenerationTable();
}
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
}
- else
+
+ if (s_currentGenerationTable == nullptr)
{
- // put the old table on a list
- newGenerationTable->prev = oldGenerationTable;
+ RETURN;
}
- FastInterlockDecrement(&s_generationTableLock);
+ s_currentGenerationTable->Refresh();
+ }
+#endif // PROFILING_SUPPORTED
+ RETURN;
+}
+
+void __stdcall ProfilerAddNewRegion(int generation, uint8_t* rangeStart, uint8_t* rangeEnd, uint8_t* rangeEndReserved)
+{
+ CONTRACT_VOID
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY; // can be called even on GC threads
+ } CONTRACT_END;
+#ifdef PROFILING_SUPPORTED
+ if (CORProfilerTrackGC() || CORProfilerTrackBasicGC())
+ {
+ s_currentGenerationTable->AddRecord(generation, rangeStart, rangeEnd, rangeEndReserved);
}
#endif // PROFILING_SUPPORTED
RETURN;
// Yay!
EE_THREAD_NOT_REQUIRED;
- // Yay!
- CANNOT_TAKE_LOCK;
-
+ // Lock is required to ensure this is synchronized with GC's updates.
+ CAN_TAKE_LOCK;
PRECONDITION(CheckPointer(pcObjectRanges));
PRECONDITION(cObjectRanges <= 0 || ranges != NULL);
- PRECONDITION(s_generationTableLock >= 0);
}
CONTRACTL_END;
LL_INFO1000,
"**PROF: GetGenerationBounds.\n"));
- // Announce we are using the generation table now
- CounterHolder genTableLock(&s_generationTableLock);
-
- GenerationTable *generationTable = s_currentGenerationTable;
-
- if (generationTable == NULL)
+ if (s_currentGenerationTable == NULL)
{
return E_FAIL;
}
- _ASSERTE(generationTable->magic == GENERATION_TABLE_MAGIC);
-
- GenerationDesc *genDescTable = generationTable->genDescTable;
- ULONG count = min(generationTable->count, cObjectRanges);
- for (ULONG i = 0; i < count; i++)
- {
- ranges[i].generation = (COR_PRF_GC_GENERATION)genDescTable[i].generation;
- ranges[i].rangeStart = (ObjectID)genDescTable[i].rangeStart;
- ranges[i].rangeLength = genDescTable[i].rangeEnd - genDescTable[i].rangeStart;
- ranges[i].rangeLengthReserved = genDescTable[i].rangeEndReserved - genDescTable[i].rangeStart;
- }
-
- *pcObjectRanges = generationTable->count;
-
- return S_OK;
+ return s_currentGenerationTable->GetGenerationBounds(cObjectRanges, pcObjectRanges, ranges);
}
PRECONDITION(objectId != NULL);
PRECONDITION(CheckPointer(range));
- PRECONDITION(s_generationTableLock >= 0);
}
CONTRACTL_END;
_ASSERTE((GetThreadNULLOk() == NULL) || (GetThreadNULLOk()->PreemptiveGCDisabled()));
-
-
- // Announce we are using the generation table now
- CounterHolder genTableLock(&s_generationTableLock);
-
- GenerationTable *generationTable = s_currentGenerationTable;
-
- if (generationTable == NULL)
- {
- return E_FAIL;
- }
- _ASSERTE(generationTable->magic == GENERATION_TABLE_MAGIC);
+ IGCHeap *hp = GCHeapUtilities::GetGCHeap();
- GenerationDesc *genDescTable = generationTable->genDescTable;
- ULONG count = generationTable->count;
- for (ULONG i = 0; i < count; i++)
- {
- if (genDescTable[i].rangeStart <= (BYTE *)objectId && (BYTE *)objectId < genDescTable[i].rangeEndReserved)
- {
- range->generation = (COR_PRF_GC_GENERATION)genDescTable[i].generation;
- range->rangeStart = (ObjectID)genDescTable[i].rangeStart;
- range->rangeLength = genDescTable[i].rangeEnd - genDescTable[i].rangeStart;
- range->rangeLengthReserved = genDescTable[i].rangeEndReserved - genDescTable[i].rangeStart;
+ uint8_t* pStart;
+ uint8_t* pAllocated;
+ uint8_t* pReserved;
+ unsigned int generation = hp->GetGenerationWithRange((Object*)objectId, &pStart, &pAllocated, &pReserved);
- return S_OK;
- }
- }
+ UINT_PTR rangeLength = pAllocated - pStart;
+ UINT_PTR rangeLengthReserved = pReserved - pStart;
- return E_FAIL;
+ range->generation = (COR_PRF_GC_GENERATION)generation;
+ range->rangeStart = (ObjectID)pStart;
+ range->rangeLength = rangeLength;
+ range->rangeLengthReserved = rangeLengthReserved;
+ return S_OK;
}
HRESULT ProfToEEInterfaceImpl::GetReJITIDs(