typedef uint32_t BOOL;
typedef uint32_t DWORD;
-typedef void* LPVOID;
// -----------------------------------------------------------------------------------------------------------
// HRESULT subset.
static char rgchBuffer[BUFFERSIZE];
char * pBuffer = &rgchBuffer[0];
- pBuffer[0] = '\r';
- pBuffer[1] = '\n';
- int buffer_start = 2;
+ pBuffer[0] = '\n';
+ int buffer_start = 1;
int pid_len = sprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, "[%5d]", GCToOSInterface::GetCurrentThreadIdForLogging());
buffer_start += pid_len;
memset(&pBuffer[buffer_start], '-', BUFFERSIZE - buffer_start);
char index_str[8];
memset (index_str, '-', 8);
sprintf_s (index_str, _countof(index_str), "%d", (int)gc_buffer_index);
- gc_log_buffer[gc_log_buffer_offset] = '\r';
- gc_log_buffer[gc_log_buffer_offset + 1] = '\n';
- memcpy (gc_log_buffer + (gc_log_buffer_offset + 2), index_str, 8);
+ gc_log_buffer[gc_log_buffer_offset] = '\n';
+ memcpy (gc_log_buffer + (gc_log_buffer_offset + 1), index_str, 8);
gc_buffer_index++;
if (gc_buffer_index > max_gc_buffers)
static char rgchBuffer[BUFFERSIZE];
char * pBuffer = &rgchBuffer[0];
- pBuffer[0] = '\r';
- pBuffer[1] = '\n';
- int buffer_start = 2;
+ pBuffer[0] = '\n';
+ int buffer_start = 1;
int msg_len = _vsnprintf_s (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, _TRUNCATE, fmt, args );
assert (msg_len != -1);
msg_len += buffer_start;
{
if (bToggleGC || g_TrapReturningThreads)
{
+#ifdef _DEBUG
+ // In debug builds, all enter_spin_lock operations go through this code. If a GC has
+ // started, it is important to block until the GC thread calls set_gc_done (since it is
+ // guaranteed to have cleared g_TrapReturningThreads by this point). This avoids livelock
+ // conditions which can otherwise occur if threads are allowed to spin in this function
+ // (and therefore starve the GC thread) between the point when the GC thread sets the
+ // WaitForGC event and the point when the GC thread clears g_TrapReturningThreads.
+ if (gc_heap::gc_started)
+ {
+ gc_heap::wait_for_gc_done();
+ }
+#endif // _DEBUG
GCToEEInterface::DisablePreemptiveGC(pCurThread);
if (!bToggleGC)
{
#ifdef _DEBUG
alloc_context* acontext =
-#endif // DEBUG
+#endif // _DEBUG
generation_alloc_context (large_object_generation);
assert (acontext->alloc_ptr == 0);
assert (acontext->alloc_limit == 0);
return ((card_table_info*)((uint8_t*)c_table - sizeof (card_table_info)))->mark_array;
}
-#if defined (_TARGET_AMD64_)
+#ifdef BIT64
#define mark_bit_pitch ((size_t)16)
#else
#define mark_bit_pitch ((size_t)8)
-#endif //AMD64
+#endif // BIT64
#define mark_word_width ((size_t)32)
#define mark_word_size (mark_word_width * mark_bit_pitch)
return S_OK;
}
-//used by static variable implementation
-void CGCDescGcScan(LPVOID pvCGCDesc, promote_func* fn, ScanContext* sc)
-{
- CGCDesc* map = (CGCDesc*)pvCGCDesc;
-
- CGCDescSeries *last = map->GetLowestSeries();
- CGCDescSeries *cur = map->GetHighestSeries();
-
- assert (cur >= last);
- do
- {
- uint8_t** ppslot = (uint8_t**)((uint8_t*)pvCGCDesc + cur->GetSeriesOffset());
- uint8_t**ppstop = (uint8_t**)((uint8_t*)ppslot + cur->GetSeriesSize());
-
- while (ppslot < ppstop)
- {
- if (*ppslot)
- {
- (fn) ((Object**)ppslot, sc, 0);
- }
-
- ppslot++;
- }
-
- cur--;
- }
- while (cur >= last);
-}
-
// Wait until a garbage collection is complete
// returns NOERROR if wait was OK, other error code if failure.
// WARNING: This will not undo the must complete state. If you are
#endif //defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
// Go through and touch (read) each page straddled by a memory block.
-void TouchPages(LPVOID pStart, uint32_t cb)
+void TouchPages(void * pStart, size_t cb)
{
const uint32_t pagesize = OS_PAGE_SIZE;
_ASSERTE(0 == (pagesize & (pagesize-1))); // Must be a power of 2.
}
}
-#define INVALIDGCVALUE (LPVOID)((size_t)0xcccccccd)
+#define INVALIDGCVALUE (void*)((size_t)0xcccccccd)
// test to see if 'ptr' was only updated via the write barrier.
inline void testGCShadow(Object** ptr)
struct segment_info
{
- LPVOID pvMem; // base of the allocation, not the first object (must add ibFirstObject)
+ void * pvMem; // base of the allocation, not the first object (must add ibFirstObject)
size_t ibFirstObject; // offset to the base of the first object in the segment
size_t ibAllocated; // limit of allocated memory in the segment (>= firstobject)
size_t ibCommit; // limit of committed memory in the segment (>= alllocated)
struct ProfilingScanContext : ScanContext
{
BOOL fProfilerPinned;
- LPVOID pvEtwContext;
+ void * pvEtwContext;
void *pHeapId;
ProfilingScanContext(BOOL fProfilerPinnedParam) : ScanContext()
extern VOLATILE(int32_t) m_GCLock;
// Go through and touch (read) each page straddled by a memory block.
-void TouchPages(LPVOID pStart, uint32_t cb);
+void TouchPages(void * pStart, size_t cb);
// For low memory notification from host
extern int32_t g_bLowMemoryFromHost;
#ifdef WRITE_BARRIER_CHECK
-#define INVALIDGCVALUE (LPVOID)((size_t)0xcccccccd)
+#define INVALIDGCVALUE (void *)((size_t)0xcccccccd)
// called by the write barrier to update the shadow heap
void updateGCShadow(Object** ptr, Object* val)
pObj = (Object*) hp->find_object(o, hp->gc_low);
}
#endif //INTERIOR_POINTERS
- ScanRootsHelper(&pObj, pSC, dwFlags);
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ ScanRootsHelper(&pObj, ppObject, pSC, dwFlags);
+#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
}
// TODO - at some point we would like to completely decouple profiling
// Returns TRUE if GC profiling is enabled and the profiler
// should scan dependent handles, FALSE otherwise.
-BOOL ProfilerShouldTrackConditionalWeakTableElements() {
+BOOL ProfilerShouldTrackConditionalWeakTableElements()
+{
#if defined(GC_PROFILING)
return CORProfilerTrackConditionalWeakTableElements();
#else
// If GC profiling is enabled, informs the profiler that we are done
// tracing dependent handles.
-void ProfilerEndConditionalWeakTableElementReferences(void* heapId) {
+void ProfilerEndConditionalWeakTableElementReferences(void* heapId)
+{
#if defined (GC_PROFILING)
g_profControlBlock.pProfInterface->EndConditionalWeakTableElementReferences(heapId);
+#else
+ UNREFERENCED_PARAMETER(heapId);
#endif // defined (GC_PROFILING)
}
// If GC profiling is enabled, informs the profiler that we are done
// tracing root references.
-void ProfilerEndRootReferences2(void* heapId) {
+void ProfilerEndRootReferences2(void* heapId)
+{
#if defined (GC_PROFILING)
g_profControlBlock.pProfInterface->EndRootReferences2(heapId);
+#else
+ UNREFERENCED_PARAMETER(heapId);
#endif // defined (GC_PROFILING)
}
// This can also be called to do a single walk for BOTH a) and b) simultaneously. Since
// ETW can ask for roots, but not objects
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+
void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw)
{
{
// The finalizer queue is also a source of roots
SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
- hp->finalize_queue->GcScanRoots(&ScanRootsHelper, hn, &SC);
+ hp->finalize_queue->GcScanRoots(&ProfScanRootsHelper, hn, &SC);
}
#else
// Ask the vm to go over all of the roots
// The finalizer queue is also a source of roots
SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
- pGenGCHeap->finalize_queue->GcScanRoots(&ScanRootsHelper, 0, &SC);
+ pGenGCHeap->finalize_queue->GcScanRoots(&ProfScanRootsHelper, 0, &SC);
#endif // MULTIPLE_HEAPS
// Handles are kept independent of wks/svr/concurrent builds
#endif // FEATURE_EVENT_TRACE
}
}
-#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
void GCProfileWalkHeap()
{
#if defined (GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
// we need to walk the heap if one of GC_PROFILING or FEATURE_EVENT_TRACE
// is defined, since both of them make use of the walk heap worker.
- if (!fWalkedHeapForProfiler &&
+ if (!fWalkedHeapForProfiler &&
(fShouldWalkHeapRootsForEtw || fShouldWalkHeapObjectsForEtw))
{
GCProfileWalkHeapWorker(FALSE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
}
-#endif // defined (GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
}
BOOL GCHeap::IsGCInProgressHelper (BOOL bConsiderGCStart)
#endif //DACCESS_COMPILE
};
+// These two functions are utilized to scan the heap if requested by ETW
+// or a profiler. The implementations of these two functions are in profheapwalkhelper.cpp.
+#if defined(FEATURE_EVENT_TRACE) | defined(GC_PROFILING)
+void ScanRootsHelper(Object** ppObject, Object** ppObjectRef, ScanContext * pSC, DWORD dwFlags);
+BOOL HeapWalkHelper(Object * pBO, void * pvContext);
+#endif
+
#endif // _GCSCAN_H_
if (uBlock >= uCommitLine)
{
// figure out where to commit next
- LPVOID pvCommit = pSegment->rgValue + (uCommitLine * HANDLE_HANDLES_PER_BLOCK);
+ void * pvCommit = pSegment->rgValue + (uCommitLine * HANDLE_HANDLES_PER_BLOCK);
// we should commit one more page of handles
uint32_t dwCommit = g_SystemInfo.dwPageSize;
if (dwHi > dwLo)
{
// decommit the memory
- GCToOSInterface::VirtualDecommit((LPVOID)dwLo, dwHi - dwLo);
+ GCToOSInterface::VirtualDecommit((void *)dwLo, dwHi - dwLo);
// update the commit line
pSegment->bCommitLine = (uint8_t)((dwLo - (size_t)pSegment->rgValue) / HANDLE_BYTES_PER_BLOCK);
DWORD
WINAPI
GetCurrentThreadId(
- VOID)
+ void)
{
// TODO: Implement
return 1;
}
WINBASEAPI
-VOID
+void
WINAPI
YieldProcessor()
{
}
WINBASEAPI
-VOID
+void
WINAPI
MemoryBarrier()
{