1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
8 * GCToEEInterface implementation
14 void GCToEEInterface::SuspendEE(SUSPEND_REASON reason)
18 static_assert_no_msg(SUSPEND_FOR_GC == (int)ThreadSuspend::SUSPEND_FOR_GC);
19 static_assert_no_msg(SUSPEND_FOR_GC_PREP == (int)ThreadSuspend::SUSPEND_FOR_GC_PREP);
21 _ASSERTE(reason == SUSPEND_FOR_GC || reason == SUSPEND_FOR_GC_PREP);
23 g_pDebugInterface->SuspendForGarbageCollectionStarted();
25 ThreadSuspend::SuspendEE((ThreadSuspend::SUSPEND_REASON)reason);
27 g_pDebugInterface->SuspendForGarbageCollectionCompleted();
30 void GCToEEInterface::RestartEE(bool bFinishedGC)
34 g_pDebugInterface->ResumeForGarbageCollectionStarted();
36 ThreadSuspend::RestartEE(bFinishedGC, TRUE);
39 VOID GCToEEInterface::SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2)
48 SyncBlockCache::GetSyncBlockCache()->GCWeakPtrScan(scanProc, lp1, lp2);
51 //EE can perform post stack scanning action, while the
52 // user threads are still suspended
53 VOID GCToEEInterface::AfterGcScanRoots (int condemned, int max_gen,
63 #ifdef FEATURE_COMINTEROP
64 // Go through all the only app domain and detach all the *unmarked* RCWs to prevent
65 // the RCW cache from resurrecting them.
66 ::GetAppDomain()->DetachRCWs();
67 #endif // FEATURE_COMINTEROP
71 * Scan all stack roots
74 static void ScanStackRoots(Thread * pThread, promote_func* fn, ScanContext* sc)
82 ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
84 // Either we are in a concurrent situation (in which case the thread is unknown to
85 // us), or we are performing a synchronous GC and we are the GC thread, holding
86 // the threadstore lock.
88 _ASSERTE(dbgOnly_IsSpecialEEThread() ||
89 GetThread() == NULL ||
90 // this is for background GC threads which always call this when EE is suspended.
91 IsGCSpecialThread() ||
92 (GetThread() == ThreadSuspend::GetSuspensionThread() && ThreadStore::HoldingThreadStore()));
94 pThread->SetHasPromotedBytes();
96 Frame* pTopFrame = pThread->GetFrame();
97 Object ** topStack = (Object **)pTopFrame;
98 if ((pTopFrame != ((Frame*)-1))
99 && (pTopFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr())) {
100 // It is an InlinedCallFrame. Get SP from it.
101 InlinedCallFrame* pInlinedFrame = (InlinedCallFrame*)pTopFrame;
102 topStack = (Object **)pInlinedFrame->GetCallSiteSP();
105 sc->stack_limit = (uintptr_t)topStack;
107 #ifdef FEATURE_CONSERVATIVE_GC
108 if (g_pConfig->GetGCConservative())
110 // Conservative stack root reporting
111 // We will treat everything on stack as a pinned interior GC pointer
112 // Since we report every thing as pinned, we don't need to run following code for relocation phase.
115 Object ** bottomStack = (Object **) pThread->GetCachedStackBase();
117 for (walk = topStack; walk < bottomStack; walk ++)
119 if (((void*)*walk > (void*)bottomStack || (void*)*walk < (void*)topStack) &&
120 ((void*)*walk >= (void*)g_lowest_address && (void*)*walk <= (void*)g_highest_address)
123 //DbgPrintf("promote " FMT_ADDR " : " FMT_ADDR "\n", walk, *walk);
124 fn(walk, sc, GC_CALL_INTERIOR|GC_CALL_PINNED);
129 // Also ask the explicit Frames to report any references they might know about.
130 // Generally these will be a subset of the objects reported below but there's
131 // nothing that guarantees that and in the specific case of a GC protect frame the
132 // references it protects may live at a lower address than the frame itself (and
133 // thus escape the stack range we scanned above).
134 Frame *pFrame = pThread->GetFrame();
135 while (pFrame != FRAME_TOP)
137 pFrame->GcScanRoots(fn, sc);
138 pFrame = pFrame->PtrNextFrame();
144 unsigned flagsStackWalk = ALLOW_ASYNC_STACK_WALK | ALLOW_INVALID_OBJECTS;
145 #if defined(WIN64EXCEPTIONS)
146 flagsStackWalk |= GC_FUNCLET_REFERENCE_REPORTING;
147 #endif // defined(WIN64EXCEPTIONS)
148 pThread->StackWalkFrames( GcStackCrawlCallBack, &gcctx, flagsStackWalk);
152 void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
154 STRESS_LOG1(LF_GCROOTS, LL_INFO10, "GCScan: Promotion Phase = %d\n", sc->promotion);
156 // In server GC, we should be competing for marking the statics
157 if (GCHeapUtilities::MarkShouldCompeteForStatics())
159 if (condemned == max_gen && sc->promotion)
161 SystemDomain::EnumAllStaticGCRefs(fn, sc);
165 Thread* pThread = NULL;
166 while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
168 STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "{ Starting scan of Thread %p ID = %x\n", pThread, pThread->GetThreadId());
170 if (GCHeapUtilities::GetGCHeap()->IsThreadUsingAllocationContextHeap(
171 pThread->GetAllocContext(), sc->thread_number))
173 sc->thread_under_crawl = pThread;
174 #ifdef FEATURE_EVENT_TRACE
175 sc->dwEtwRootKind = kEtwGCRootKindStack;
176 #endif // FEATURE_EVENT_TRACE
177 ScanStackRoots(pThread, fn, sc);
178 #ifdef FEATURE_EVENT_TRACE
179 sc->dwEtwRootKind = kEtwGCRootKindOther;
180 #endif // FEATURE_EVENT_TRACE
182 STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "Ending scan of Thread %p ID = 0x%x }\n", pThread, pThread->GetThreadId());
186 void GCToEEInterface::GcStartWork (int condemned, int max_gen)
196 // Validate byrefs pinned by IL stubs since the last GC.
197 StubHelpers::ProcessByrefValidationList();
198 #endif // VERIFY_HEAP
200 ExecutionManager::CleanupCodeHeaps();
202 #ifdef FEATURE_EVENT_TRACE
203 ETW::TypeSystemLog::Cleanup();
206 #ifdef FEATURE_COMINTEROP
208 // Let GC detect managed/native cycles with input from jupiter
210 // 1. Report reference from RCW to CCW based on native reference in Jupiter
211 // 2. Identify the subset of CCWs that needs to be rooted
213 // We'll build the references from RCW to CCW using
214 // 1. Preallocated arrays
215 // 2. Dependent handles
217 RCWWalker::OnGCStarted(condemned);
218 #endif // FEATURE_COMINTEROP
220 if (condemned == max_gen)
222 ThreadStore::s_pThreadStore->OnMaxGenerationGCStarted();
226 void GCToEEInterface::GcDone(int condemned)
235 #ifdef FEATURE_COMINTEROP
237 // Tell Jupiter GC has finished
239 RCWWalker::OnGCFinished(condemned);
240 #endif // FEATURE_COMINTEROP
243 bool GCToEEInterface::RefCountedHandleCallbacks(Object * pObject)
252 #ifdef FEATURE_COMINTEROP
253 //<REVISIT_TODO>@todo optimize the access to the ref-count
254 ComCallWrapper* pWrap = ComCallWrapper::GetWrapperForObject((OBJECTREF)pObject);
256 return pWrap != NULL && pWrap->IsWrapperActive();
262 void GCToEEInterface::GcBeforeBGCSweepWork()
272 // Validate byrefs pinned by IL stubs since the last GC.
273 StubHelpers::ProcessByrefValidationList();
274 #endif // VERIFY_HEAP
277 void GCToEEInterface::SyncBlockCacheDemote(int max_gen)
286 SyncBlockCache::GetSyncBlockCache()->GCDone(TRUE, max_gen);
289 void GCToEEInterface::SyncBlockCachePromotionsGranted(int max_gen)
298 SyncBlockCache::GetSyncBlockCache()->GCDone(FALSE, max_gen);
301 uint32_t GCToEEInterface::GetActiveSyncBlockCount()
310 return SyncBlockCache::GetSyncBlockCache()->GetActiveCount();
313 gc_alloc_context * GCToEEInterface::GetAllocContext()
317 Thread* pThread = ::GetThread();
323 return pThread->GetAllocContext();
326 void GCToEEInterface::GcEnumAllocContexts(enum_alloc_context_func* fn, void* param)
335 if (GCHeapUtilities::UseThreadAllocationContexts())
337 Thread * pThread = NULL;
338 while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
340 fn(pThread->GetAllocContext(), param);
345 fn(&g_global_alloc_context, param);
350 uint8_t* GCToEEInterface::GetLoaderAllocatorObjectForGC(Object* pObject)
359 return pObject->GetGCSafeMethodTable()->GetLoaderAllocatorObjectForGC();
362 bool GCToEEInterface::IsPreemptiveGCDisabled()
366 Thread* pThread = ::GetThread();
369 return !!pThread->PreemptiveGCDisabled();
375 bool GCToEEInterface::EnablePreemptiveGC()
379 bool bToggleGC = false;
380 Thread* pThread = ::GetThread();
384 bToggleGC = !!pThread->PreemptiveGCDisabled();
387 pThread->EnablePreemptiveGC();
394 void GCToEEInterface::DisablePreemptiveGC()
398 Thread* pThread = ::GetThread();
401 pThread->DisablePreemptiveGC();
405 Thread* GCToEEInterface::GetThread()
409 return ::GetThread();
412 struct BackgroundThreadStubArgs
415 GCBackgroundThreadFunction threadStart;
417 CLREvent threadStartedEvent;
421 DWORD WINAPI BackgroundThreadStub(void* arg)
423 BackgroundThreadStubArgs* stubArgs = (BackgroundThreadStubArgs*)arg;
424 assert (stubArgs->thread != NULL);
426 ClrFlsSetThreadType (ThreadType_GC);
427 stubArgs->thread->SetGCSpecial(true);
428 STRESS_LOG_RESERVE_MEM (GC_STRESSLOG_MULTIPLY);
430 stubArgs->hasStarted = !!stubArgs->thread->HasStarted(FALSE);
432 Thread* thread = stubArgs->thread;
433 GCBackgroundThreadFunction realThreadStart = stubArgs->threadStart;
434 void* realThreadArg = stubArgs->arg;
435 bool hasStarted = stubArgs->hasStarted;
437 stubArgs->threadStartedEvent.Set();
438 // The stubArgs cannot be used once the event is set, since that releases wait on the
439 // event in the function that created this thread and the stubArgs go out of scope.
445 result = realThreadStart(realThreadArg);
446 DestroyThread(thread);
456 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
457 inline BOOL ShouldTrackMovementForProfilerOrEtw()
460 if (CORProfilerTrackGC())
464 #ifdef FEATURE_EVENT_TRACE
465 if (ETW::GCLog::ShouldTrackMovementForEtw())
471 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
473 void ProfScanRootsHelper(Object** ppObject, ScanContext *pSC, uint32_t dwFlags)
475 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
476 Object *pObj = *ppObject;
477 if (dwFlags & GC_CALL_INTERIOR)
479 pObj = GCHeapUtilities::GetGCHeap()->GetContainingObject(pObj, true);
483 ScanRootsHelper(pObj, ppObject, pSC, dwFlags);
484 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
487 // TODO - at some point we would like to completely decouple profiling
488 // from ETW tracing using a pattern similar to this, where the
489 // ProfilingScanContext has flags about whether or not certain things
490 // should be tracked, and each one of these ProfilerShouldXYZ functions
491 // will check these flags and determine what to do based upon that.
492 // GCProfileWalkHeapWorker can, in turn, call those methods without fear
493 // of things being ifdef'd out.
495 // Returns TRUE if GC profiling is enabled and the profiler
496 // should scan dependent handles, FALSE otherwise.
497 BOOL ProfilerShouldTrackConditionalWeakTableElements()
499 #if defined(GC_PROFILING)
500 return CORProfilerTrackConditionalWeakTableElements();
503 #endif // defined (GC_PROFILING)
506 // If GC profiling is enabled, informs the profiler that we are done
507 // tracing dependent handles.
508 void ProfilerEndConditionalWeakTableElementReferences(void* heapId)
510 #if defined (GC_PROFILING)
511 g_profControlBlock.pProfInterface->EndConditionalWeakTableElementReferences(heapId);
513 UNREFERENCED_PARAMETER(heapId);
514 #endif // defined (GC_PROFILING)
517 // If GC profiling is enabled, informs the profiler that we are done
518 // tracing root references.
519 void ProfilerEndRootReferences2(void* heapId)
521 #if defined (GC_PROFILING)
522 g_profControlBlock.pProfInterface->EndRootReferences2(heapId);
524 UNREFERENCED_PARAMETER(heapId);
525 #endif // defined (GC_PROFILING)
528 void GcScanRootsForProfilerAndETW(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
530 Thread* pThread = NULL;
531 while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
533 sc->thread_under_crawl = pThread;
534 #ifdef FEATURE_EVENT_TRACE
535 sc->dwEtwRootKind = kEtwGCRootKindStack;
536 #endif // FEATURE_EVENT_TRACE
537 ScanStackRoots(pThread, fn, sc);
538 #ifdef FEATURE_EVENT_TRACE
539 sc->dwEtwRootKind = kEtwGCRootKindOther;
540 #endif // FEATURE_EVENT_TRACE
544 void ScanHandleForProfilerAndETW(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, bool isDependent)
546 ProfilingScanContext* pSC = (ProfilingScanContext*)context;
549 // Give the profiler the objectref.
550 if (pSC->fProfilerPinned)
554 BEGIN_PIN_PROFILER(CORProfilerTrackGC());
555 g_profControlBlock.pProfInterface->RootReference2(
557 kEtwGCRootKindHandle,
558 (EtwGCRootFlags)flags,
565 BEGIN_PIN_PROFILER(CORProfilerTrackConditionalWeakTableElements());
566 g_profControlBlock.pProfInterface->ConditionalWeakTableElementReference(
574 #endif // GC_PROFILING
576 #if defined(FEATURE_EVENT_TRACE)
577 // Notify ETW of the handle
578 if (ETW::GCLog::ShouldWalkHeapRootsForEtw())
580 ETW::GCLog::RootReference(
582 *pRef, // object being rooted
583 pSec, // pSecondaryNodeForDependentHandle
587 flags); // ETW handle flags
589 #endif // defined(FEATURE_EVENT_TRACE)
592 // This is called only if we've determined that either:
593 // a) The Profiling API wants to do a walk of the heap, and it has pinned the
594 // profiler in place (so it cannot be detached), and it's thus safe to call into the
596 // b) ETW infrastructure wants to do a walk of the heap either to log roots,
598 // This can also be called to do a single walk for BOTH a) and b) simultaneously. Since
599 // ETW can ask for roots, but not objects
600 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
601 void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw)
604 ProfilingScanContext SC(fProfilerPinned);
605 unsigned max_generation = GCHeapUtilities::GetGCHeap()->GetMaxGeneration();
607 // **** Scan roots: Only scan roots if profiling API wants them or ETW wants them.
608 if (fProfilerPinned || fShouldWalkHeapRootsForEtw)
610 GcScanRootsForProfilerAndETW(&ProfScanRootsHelper, max_generation, max_generation, &SC);
611 SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
612 GCHeapUtilities::GetGCHeap()->DiagScanFinalizeQueue(&ProfScanRootsHelper, &SC);
614 // Handles are kept independent of wks/svr/concurrent builds
615 SC.dwEtwRootKind = kEtwGCRootKindHandle;
616 GCHeapUtilities::GetGCHeap()->DiagScanHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
618 // indicate that regular handle scanning is over, so we can flush the buffered roots
619 // to the profiler. (This is for profapi only. ETW will flush after the
620 // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
623 ProfilerEndRootReferences2(&SC.pHeapId);
627 // **** Scan dependent handles: only if the profiler supports it or ETW wants roots
628 if ((fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements()) ||
629 fShouldWalkHeapRootsForEtw)
631 // GcScanDependentHandlesForProfiler double-checks
632 // CORProfilerTrackConditionalWeakTableElements() before calling into the profiler
634 ProfilingScanContext* pSC = &SC;
636 // we'll re-use pHeapId (which was either unused (0) or freed by EndRootReferences2
637 // (-1)), so reset it to NULL
638 _ASSERTE((*((size_t *)(&pSC->pHeapId)) == (size_t)(-1)) ||
639 (*((size_t *)(&pSC->pHeapId)) == (size_t)(0)));
642 GCHeapUtilities::GetGCHeap()->DiagScanDependentHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
644 // indicate that dependent handle scanning is over, so we can flush the buffered roots
645 // to the profiler. (This is for profapi only. ETW will flush after the
646 // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
647 if (fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements())
649 ProfilerEndConditionalWeakTableElementReferences(&SC.pHeapId);
653 ProfilerWalkHeapContext profilerWalkHeapContext(fProfilerPinned, SC.pvEtwContext);
655 // **** Walk objects on heap: only if profiling API wants them or ETW wants them.
656 if (fProfilerPinned || fShouldWalkHeapObjectsForEtw)
658 GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, true /* walk the large object heap */);
661 #ifdef FEATURE_EVENT_TRACE
662 // **** Done! Indicate to ETW helpers that the heap walk is done, so any buffers
663 // should be flushed into the ETW stream
664 if (fShouldWalkHeapObjectsForEtw || fShouldWalkHeapRootsForEtw)
666 ETW::GCLog::EndHeapDump(&profilerWalkHeapContext);
668 #endif // FEATURE_EVENT_TRACE
671 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
673 void GCProfileWalkHeap()
675 BOOL fWalkedHeapForProfiler = FALSE;
677 #ifdef FEATURE_EVENT_TRACE
678 if (ETW::GCLog::ShouldWalkStaticsAndCOMForEtw())
679 ETW::GCLog::WalkStaticsAndCOMForETW();
681 BOOL fShouldWalkHeapRootsForEtw = ETW::GCLog::ShouldWalkHeapRootsForEtw();
682 BOOL fShouldWalkHeapObjectsForEtw = ETW::GCLog::ShouldWalkHeapObjectsForEtw();
683 #else // !FEATURE_EVENT_TRACE
684 BOOL fShouldWalkHeapRootsForEtw = FALSE;
685 BOOL fShouldWalkHeapObjectsForEtw = FALSE;
686 #endif // FEATURE_EVENT_TRACE
688 #if defined (GC_PROFILING)
690 BEGIN_PIN_PROFILER(CORProfilerTrackGC());
691 GCProfileWalkHeapWorker(TRUE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
692 fWalkedHeapForProfiler = TRUE;
695 #endif // defined (GC_PROFILING)
697 #if defined (GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
698 // we need to walk the heap if one of GC_PROFILING or FEATURE_EVENT_TRACE
699 // is defined, since both of them make use of the walk heap worker.
700 if (!fWalkedHeapForProfiler &&
701 (fShouldWalkHeapRootsForEtw || fShouldWalkHeapObjectsForEtw))
703 GCProfileWalkHeapWorker(FALSE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
705 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
708 void WalkFReachableObjects(bool isCritical, void* objectID)
710 g_profControlBlock.pProfInterface->FinalizeableObjectQueued(isCritical, (ObjectID)objectID);
713 static fq_walk_fn g_FQWalkFn = &WalkFReachableObjects;
715 void GCToEEInterface::DiagGCStart(int gen, bool isInduced)
718 DiagUpdateGenerationBounds();
719 GarbageCollectionStartedCallback(gen, isInduced);
721 BEGIN_PIN_PROFILER(CORProfilerTrackGC());
724 // When we're walking objects allocated by class, then we don't want to walk the large
725 // object heap because then it would count things that may have been around for a while.
726 GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&AllocByClassHelper, (void *)&context, 0, false);
728 // Notify that we've reached the end of the Gen 0 scan
729 g_profControlBlock.pProfInterface->EndAllocByClass(&context);
733 #endif // GC_PROFILING
736 void GCToEEInterface::DiagUpdateGenerationBounds()
739 if (CORProfilerTrackGC() || CORProfilerTrackBasicGC())
740 UpdateGenerationBounds();
741 #endif // GC_PROFILING
744 void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent)
747 // We were only doing generation bounds and GC finish callback for non concurrent GCs so
748 // I am keeping that behavior to not break profilers. But if BasicGC monitoring is enabled
749 // we will do these for all GCs.
755 if (CORProfilerTrackBasicGC() || (!fConcurrent && CORProfilerTrackGC()))
757 DiagUpdateGenerationBounds();
758 GarbageCollectionFinishedCallback();
760 #endif // GC_PROFILING
763 void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext)
766 if (CORProfilerTrackGC())
768 BEGIN_PIN_PROFILER(CORProfilerPresent());
769 GCHeapUtilities::GetGCHeap()->DiagWalkFinalizeQueue(gcContext, g_FQWalkFn);
772 #endif //GC_PROFILING
775 // Note on last parameter: when calling this for bgc, only ETW
776 // should be sending these events so that existing profapi profilers
777 // don't get confused.
778 void WalkMovedReferences(uint8_t* begin, uint8_t* end,
784 ETW::GCLog::MovedReference(begin, end,
785 (fCompacting ? reloc : 0),
791 void GCToEEInterface::DiagWalkSurvivors(void* gcContext)
793 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
794 if (ShouldTrackMovementForProfilerOrEtw())
797 ETW::GCLog::BeginMovedReferences(&context);
798 GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_gc);
799 ETW::GCLog::EndMovedReferences(context);
801 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
804 void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext)
806 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
807 if (ShouldTrackMovementForProfilerOrEtw())
810 ETW::GCLog::BeginMovedReferences(&context);
811 GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_loh);
812 ETW::GCLog::EndMovedReferences(context);
814 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
817 void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext)
819 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
820 if (ShouldTrackMovementForProfilerOrEtw())
823 ETW::GCLog::BeginMovedReferences(&context);
824 GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_bgc);
825 ETW::GCLog::EndMovedReferences(context);
827 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
830 void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args)
832 int stompWBCompleteActions = SWB_PASS;
833 bool is_runtime_suspended = false;
835 assert(args != nullptr);
836 switch (args->operation)
838 case WriteBarrierOp::StompResize:
839 // StompResize requires a new card table, a new lowest address, and
840 // a new highest address
841 assert(args->card_table != nullptr);
842 assert(args->lowest_address != nullptr);
843 assert(args->highest_address != nullptr);
845 g_card_table = args->card_table;
847 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
848 assert(args->card_bundle_table != nullptr);
849 g_card_bundle_table = args->card_bundle_table;
852 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
853 if (g_sw_ww_enabled_for_gc_heap && (args->write_watch_table != nullptr))
855 assert(args->is_runtime_suspended);
856 g_sw_ww_table = args->write_watch_table;
858 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
860 stompWBCompleteActions |= ::StompWriteBarrierResize(args->is_runtime_suspended, args->requires_upper_bounds_check);
862 // We need to make sure that other threads executing checked write barriers
863 // will see the g_card_table update before g_lowest/highest_address updates.
864 // Otherwise, the checked write barrier may AV accessing the old card table
865 // with address that it does not cover.
867 // Even x86's total store ordering is insufficient here because threads reading
868 // g_card_table do so via the instruction cache, whereas g_lowest/highest_address
869 // are read via the data cache.
871 // The g_card_table update is covered by section 8.1.3 of the Intel Software
872 // Development Manual, Volume 3A (System Programming Guide, Part 1), about
873 // "cross-modifying code": We need all _executing_ threads to invalidate
874 // their instruction cache, which FlushProcessWriteBuffers achieves by sending
875 // an IPI (inter-process interrupt).
877 if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
879 // flushing icache on current processor (thread)
880 ::FlushWriteBarrierInstructionCache();
881 // asking other processors (threads) to invalidate their icache
882 FlushProcessWriteBuffers();
885 g_lowest_address = args->lowest_address;
886 VolatileStore(&g_highest_address, args->highest_address);
888 #if defined(_ARM64_) || defined(_ARM_)
889 // Need to reupdate for changes to g_highest_address g_lowest_address
890 is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended;
891 stompWBCompleteActions |= ::StompWriteBarrierResize(is_runtime_suspended, args->requires_upper_bounds_check);
894 if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
896 ::FlushWriteBarrierInstructionCache();
900 is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended;
901 if(!is_runtime_suspended)
903 // If runtime is not suspended, force updated state to be visible to all threads
907 if (stompWBCompleteActions & SWB_EE_RESTART)
909 assert(!args->is_runtime_suspended &&
910 "if runtime was suspended in patching routines then it was in running state at begining");
911 ThreadSuspend::RestartEE(FALSE, TRUE);
913 return; // unlike other branches we have already done cleanup so bailing out here
914 case WriteBarrierOp::StompEphemeral:
915 // StompEphemeral requires a new ephemeral low and a new ephemeral high
916 assert(args->ephemeral_low != nullptr);
917 assert(args->ephemeral_high != nullptr);
918 g_ephemeral_low = args->ephemeral_low;
919 g_ephemeral_high = args->ephemeral_high;
920 stompWBCompleteActions |= ::StompWriteBarrierEphemeral(args->is_runtime_suspended);
922 case WriteBarrierOp::Initialize:
923 // This operation should only be invoked once, upon initialization.
924 assert(g_card_table == nullptr);
925 assert(g_lowest_address == nullptr);
926 assert(g_highest_address == nullptr);
927 assert(args->card_table != nullptr);
928 assert(args->lowest_address != nullptr);
929 assert(args->highest_address != nullptr);
930 assert(args->ephemeral_low != nullptr);
931 assert(args->ephemeral_high != nullptr);
932 assert(args->is_runtime_suspended && "the runtime must be suspended here!");
933 assert(!args->requires_upper_bounds_check && "the ephemeral generation must be at the top of the heap!");
935 g_card_table = args->card_table;
937 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
938 assert(g_card_bundle_table == nullptr);
939 g_card_bundle_table = args->card_bundle_table;
942 g_lowest_address = args->lowest_address;
943 g_highest_address = args->highest_address;
944 stompWBCompleteActions |= ::StompWriteBarrierResize(true, false);
946 // StompWriteBarrierResize does not necessarily bash g_ephemeral_low
947 // usages, so we must do so here. This is particularly true on x86,
948 // where StompWriteBarrierResize will not bash g_ephemeral_low when
949 // called with the parameters (true, false), as it is above.
950 g_ephemeral_low = args->ephemeral_low;
951 g_ephemeral_high = args->ephemeral_high;
952 stompWBCompleteActions |= ::StompWriteBarrierEphemeral(true);
954 case WriteBarrierOp::SwitchToWriteWatch:
955 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
956 assert(args->write_watch_table != nullptr);
957 assert(args->is_runtime_suspended && "the runtime must be suspended here!");
958 g_sw_ww_table = args->write_watch_table;
959 g_sw_ww_enabled_for_gc_heap = true;
960 stompWBCompleteActions |= ::SwitchToWriteWatchBarrier(true);
962 assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP");
963 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
965 case WriteBarrierOp::SwitchToNonWriteWatch:
966 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
967 assert(args->is_runtime_suspended && "the runtime must be suspended here!");
969 g_sw_ww_enabled_for_gc_heap = false;
970 stompWBCompleteActions |= ::SwitchToNonWriteWatchBarrier(true);
972 assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP");
973 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
976 assert(!"unknown WriteBarrierOp enum");
978 if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
980 ::FlushWriteBarrierInstructionCache();
982 if (stompWBCompleteActions & SWB_EE_RESTART)
984 assert(!args->is_runtime_suspended &&
985 "if runtime was suspended in patching routines then it was in running state at begining");
986 ThreadSuspend::RestartEE(FALSE, TRUE);
990 void GCToEEInterface::EnableFinalization(bool foundFinalizers)
992 if (foundFinalizers || FinalizerThread::HaveExtraWorkForFinalizer())
994 FinalizerThread::EnableFinalization();
998 void GCToEEInterface::HandleFatalError(unsigned int exitCode)
1000 EEPOLICY_HANDLE_FATAL_ERROR(exitCode);
1003 bool GCToEEInterface::ShouldFinalizeObjectForUnload(void* pDomain, Object* obj)
1005 // CoreCLR does not have appdomains, so this code path is dead. Other runtimes may
1006 // choose to inspect the object being finalized here.
1007 // [DESKTOP TODO] Desktop looks for "agile and finalizable" objects and may choose
1008 // to move them to a new app domain instead of finalizing them here.
1012 bool GCToEEInterface::EagerFinalized(Object* obj)
1014 MethodTable* pMT = obj->GetGCSafeMethodTable();
1015 if (pMT == pWeakReferenceMT ||
1016 pMT->GetCanonicalMethodTable() == pWeakReferenceOfTCanonMT)
1018 FinalizeWeakReference(obj);
1025 MethodTable* GCToEEInterface::GetFreeObjectMethodTable()
1027 assert(g_pFreeObjectMethodTable != nullptr);
1028 return g_pFreeObjectMethodTable;
1031 // This is arbitrary, we shouldn't ever be having config keys
1032 // longer than these lengths.
1033 const size_t MaxConfigKeyLength = 255;
1035 bool GCToEEInterface::GetBooleanConfigValue(const char* key, bool* value)
1042 // these configuration values are given to us via startup flags.
1043 if (strcmp(key, "gcServer") == 0)
1045 *value = g_heap_type == GC_HEAP_SVR;
1049 if (strcmp(key, "gcConcurrent") == 0)
1051 *value = !!g_pConfig->GetGCconcurrent();
1055 if (strcmp(key, "GCRetainVM") == 0)
1057 *value = !!g_pConfig->GetGCRetainVM();
1061 WCHAR configKey[MaxConfigKeyLength];
1062 if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1064 // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1068 // otherwise, ask the config subsystem.
1069 if (CLRConfig::IsConfigOptionSpecified(configKey))
1071 CLRConfig::ConfigDWORDInfo info { configKey , 0, CLRConfig::EEConfig_default };
1072 *value = CLRConfig::GetConfigValue(info) != 0;
1079 bool GCToEEInterface::GetIntConfigValue(const char* key, int64_t* value)
1086 if (strcmp(key, "GCSegmentSize") == 0)
1088 *value = g_pConfig->GetSegmentSize();
1092 if (strcmp(key, "GCgen0size") == 0)
1094 *value = g_pConfig->GetGCgen0size();
1098 if (strcmp(key, "GCLOHThreshold") == 0)
1100 *value = g_pConfig->GetGCLOHThreshold();
1104 WCHAR configKey[MaxConfigKeyLength];
1105 if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1107 // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1111 // There is no ConfigULONGLONGInfo, and the GC uses 64 bit values for things like GCHeapAffinitizeMask,
1112 // so have to fake it with getting the string and converting to uint64_t
1113 if (CLRConfig::IsConfigOptionSpecified(configKey))
1115 CLRConfig::ConfigStringInfo info { configKey, CLRConfig::EEConfig_default };
1116 LPWSTR out = CLRConfig::GetConfigValue(info);
1120 CLRConfig::FreeConfigString(out);
1127 result = _wcstoui64(out, &end, 16);
1128 // errno is ERANGE if the number is out of range, and end is set to pvalue if
1129 // no valid conversion exists.
1130 if (errno == ERANGE || end == out)
1132 CLRConfig::FreeConfigString(out);
1136 *value = static_cast<int64_t>(result);
1137 CLRConfig::FreeConfigString(out);
1144 bool GCToEEInterface::GetStringConfigValue(const char* key, const char** value)
1151 WCHAR configKey[MaxConfigKeyLength];
1152 if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1154 // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1158 CLRConfig::ConfigStringInfo info { configKey, CLRConfig::EEConfig_default };
1159 LPWSTR out = CLRConfig::GetConfigValue(info);
1166 int charCount = WideCharToMultiByte(CP_ACP, 0, out, -1 /* out is null-terminated */, NULL, 0, nullptr, nullptr);
1169 // this should only happen if the config subsystem gives us a string that's not valid
1171 CLRConfig::FreeConfigString(out);
1175 // not allocated on the stack since it escapes this function
1176 AStringHolder configResult = new (nothrow) char[charCount];
1179 CLRConfig::FreeConfigString(out);
1183 if (WideCharToMultiByte(CP_ACP, 0, out, -1 /* out is null-terminated */,
1184 configResult.GetValue(), charCount, nullptr, nullptr) == 0)
1186 // this should never happen, the previous call to WideCharToMultiByte that computed the charCount should
1187 // have caught all issues.
1189 CLRConfig::FreeConfigString(out);
1193 *value = configResult.Extract();
1194 CLRConfig::FreeConfigString(out);
1198 void GCToEEInterface::FreeStringConfigValue(const char* value)
1203 bool GCToEEInterface::IsGCThread()
1205 return !!::IsGCThread();
1208 bool GCToEEInterface::WasCurrentThreadCreatedByGC()
1210 return !!::IsGCSpecialThread();
1213 struct SuspendableThreadStubArguments
1216 void (*ThreadStart)(void*);
1219 CLREvent ThreadStartedEvent;
1222 struct ThreadStubArguments
1225 void (*ThreadStart)(void*);
1228 CLREvent ThreadStartedEvent;
1233 const size_t MaxThreadNameSize = 255;
1235 bool CreateSuspendableThread(
1236 void (*threadStart)(void*),
1238 const wchar_t* name)
1240 LIMITED_METHOD_CONTRACT;
1242 SuspendableThreadStubArguments args;
1243 args.Argument = argument;
1244 args.ThreadStart = threadStart;
1245 args.Thread = nullptr;
1246 args.HasStarted = false;
1247 if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE))
1254 args.Thread = SetupUnstartedThread(FALSE);
1259 EX_END_CATCH(SwallowAllExceptions)
1263 args.ThreadStartedEvent.CloseEvent();
1267 auto threadStub = [](void* argument) -> DWORD
1269 SuspendableThreadStubArguments* args = static_cast<SuspendableThreadStubArguments*>(argument);
1270 assert(args != nullptr);
1272 ClrFlsSetThreadType(ThreadType_GC);
1273 args->Thread->SetGCSpecial(true);
1274 STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY);
1275 args->HasStarted = !!args->Thread->HasStarted(false);
1277 Thread* thread = args->Thread;
1278 auto threadStart = args->ThreadStart;
1279 void* threadArgument = args->Argument;
1280 bool hasStarted = args->HasStarted;
1281 args->ThreadStartedEvent.Set();
1283 // The stubArgs cannot be used once the event is set, since that releases wait on the
1284 // event in the function that created this thread and the stubArgs go out of scope.
1287 threadStart(threadArgument);
1288 DestroyThread(thread);
1293 if (!args.Thread->CreateNewThread(0, threadStub, &args, name))
1295 args.Thread->DecExternalCount(FALSE);
1296 args.ThreadStartedEvent.CloseEvent();
1300 args.Thread->SetBackground(TRUE, FALSE);
1301 args.Thread->StartThread();
1303 // Wait for the thread to be in its main loop
1304 uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE);
1305 args.ThreadStartedEvent.CloseEvent();
1306 _ASSERTE(res == WAIT_OBJECT_0);
1308 if (!args.HasStarted)
1310 // The thread has failed to start and the Thread object was destroyed in the Thread::HasStarted
1311 // failure code path.
1318 bool CreateNonSuspendableThread(
1319 void (*threadStart)(void*),
1321 const wchar_t* name)
1323 LIMITED_METHOD_CONTRACT;
1325 ThreadStubArguments args;
1326 args.Argument = argument;
1327 args.ThreadStart = threadStart;
1328 args.Thread = INVALID_HANDLE_VALUE;
1329 if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE))
1334 auto threadStub = [](void* argument) -> DWORD
1336 ThreadStubArguments* args = static_cast<ThreadStubArguments*>(argument);
1337 assert(args != nullptr);
1339 ClrFlsSetThreadType(ThreadType_GC);
1340 STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY);
1342 args->HasStarted = true;
1343 auto threadStart = args->ThreadStart;
1344 void* threadArgument = args->Argument;
1345 args->ThreadStartedEvent.Set();
1347 // The stub args cannot be used once the event is set, since that releases wait on the
1348 // event in the function that created this thread and the stubArgs go out of scope.
1349 threadStart(threadArgument);
1353 args.Thread = Thread::CreateUtilityThread(Thread::StackSize_Medium, threadStub, &args, name);
1354 if (args.Thread == INVALID_HANDLE_VALUE)
1356 args.ThreadStartedEvent.CloseEvent();
1360 // Wait for the thread to be in its main loop
1361 uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE);
1362 args.ThreadStartedEvent.CloseEvent();
1363 _ASSERTE(res == WAIT_OBJECT_0);
1365 CloseHandle(args.Thread);
1368 } // anonymous namespace
1370 bool GCToEEInterface::CreateThread(void (*threadStart)(void*), void* arg, bool is_suspendable, const char* name)
1372 InlineSString<MaxThreadNameSize> wideName;
1373 const WCHAR* namePtr = nullptr;
1376 if (name != nullptr)
1378 wideName.SetUTF8(name);
1379 namePtr = wideName.GetUnicode();
1384 // we're not obligated to provide a name - if it's not valid,
1385 // just report nullptr as the name.
1387 EX_END_CATCH(SwallowAllExceptions)
1389 LIMITED_METHOD_CONTRACT;
1392 return CreateSuspendableThread(threadStart, arg, namePtr);
1396 return CreateNonSuspendableThread(threadStart, arg, namePtr);
1400 void GCToEEInterface::WalkAsyncPinnedForPromotion(Object* object, ScanContext* sc, promote_func* callback)
1402 LIMITED_METHOD_CONTRACT;
1404 assert(object != nullptr);
1405 assert(sc != nullptr);
1406 assert(callback != nullptr);
1407 if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass)
1409 // not an overlapped data object - nothing to do.
1413 // reporting the pinned user objects
1414 OverlappedDataObject *pOverlapped = (OverlappedDataObject *)object;
1415 if (pOverlapped->m_userObject != NULL)
1417 if (pOverlapped->m_userObject->GetGCSafeMethodTable() == g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT]->GetMethodTable())
1419 // OverlappedDataObject is very special. An async pin handle keeps it alive.
1420 // During GC, we also make sure
1421 // 1. m_userObject itself does not move if m_userObject is not array
1422 // 2. Every object pointed by m_userObject does not move if m_userObject is array
1423 // We do not want to pin m_userObject if it is array.
1424 ArrayBase* pUserObject = (ArrayBase*)OBJECTREFToObject(pOverlapped->m_userObject);
1425 Object **ppObj = (Object**)pUserObject->GetDataPtr(TRUE);
1426 size_t num = pUserObject->GetNumComponents();
1427 for (size_t i = 0; i < num; i++)
1429 callback(ppObj + i, sc, GC_CALL_PINNED);
1434 callback(&OBJECTREF_TO_UNCHECKED_OBJECTREF(pOverlapped->m_userObject), (ScanContext *)sc, GC_CALL_PINNED);
1439 void GCToEEInterface::WalkAsyncPinned(Object* object, void* context, void (*callback)(Object*, Object*, void*))
1441 LIMITED_METHOD_CONTRACT;
1443 assert(object != nullptr);
1444 assert(callback != nullptr);
1446 if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass)
1451 OverlappedDataObject *pOverlapped = (OverlappedDataObject *)(object);
1452 if (pOverlapped->m_userObject != NULL)
1454 Object * pUserObject = OBJECTREFToObject(pOverlapped->m_userObject);
1455 callback(object, pUserObject, context);
1456 if (pOverlapped->m_userObject->GetGCSafeMethodTable() == g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT]->GetMethodTable())
1458 ArrayBase* pUserArrayObject = (ArrayBase*)pUserObject;
1459 Object **pObj = (Object**)pUserArrayObject->GetDataPtr(TRUE);
1460 size_t num = pUserArrayObject->GetNumComponents();
1461 for (size_t i = 0; i < num; i ++)
1463 callback(pUserObject, pObj[i], context);
1469 IGCToCLREventSink* GCToEEInterface::EventSink()
1471 LIMITED_METHOD_CONTRACT;
1473 return &g_gcToClrEventSink;
1476 uint32_t GCToEEInterface::GetDefaultDomainIndex()
1478 LIMITED_METHOD_CONTRACT;
1483 void *GCToEEInterface::GetAppDomainAtIndex(uint32_t appDomainIndex)
1485 LIMITED_METHOD_CONTRACT;
1487 return ::GetAppDomain();
1490 bool GCToEEInterface::AppDomainCanAccessHandleTable(uint32_t appDomainID)
1492 LIMITED_METHOD_CONTRACT;
1494 return appDomainID == DefaultADID;
1497 uint32_t GCToEEInterface::GetIndexOfAppDomainBeingUnloaded()
1499 LIMITED_METHOD_CONTRACT;
1504 uint32_t GCToEEInterface::GetTotalNumSizedRefHandles()
1506 LIMITED_METHOD_CONTRACT;
1508 return SystemDomain::System()->GetTotalNumSizedRefHandles();
1512 bool GCToEEInterface::AppDomainIsRudeUnload(void *appDomain)
1514 LIMITED_METHOD_CONTRACT;
1519 bool GCToEEInterface::AnalyzeSurvivorsRequested(int condemnedGeneration)
1521 LIMITED_METHOD_CONTRACT;
1523 // Is the list active?
1524 GcNotifications gn(g_pGcNotificationTable);
1527 GcEvtArgs gea = { GC_MARK_END, { (1<<condemnedGeneration) } };
1528 if (gn.GetNotification(gea) != 0)
1537 void GCToEEInterface::AnalyzeSurvivorsFinished(int condemnedGeneration)
1539 LIMITED_METHOD_CONTRACT;
1541 // Is the list active?
1542 GcNotifications gn(g_pGcNotificationTable);
1545 GcEvtArgs gea = { GC_MARK_END, { (1<<condemnedGeneration) } };
1546 if (gn.GetNotification(gea) != 0)
1548 DACNotify::DoGCNotification(gea);
1553 void GCToEEInterface::VerifySyncTableEntry()
1555 LIMITED_METHOD_CONTRACT;
1558 SyncBlockCache::GetSyncBlockCache()->VerifySyncTableEntry();
1559 #endif // VERIFY_HEAP
1562 void GCToEEInterface::UpdateGCEventStatus(int currentPublicLevel, int currentPublicKeywords, int currentPrivateLevel, int currentPrivateKeywords)
1564 #if defined(__linux__)
1565 LIMITED_METHOD_CONTRACT;
1566 // LTTng does not have a notion of enabling events via "keyword"/"level" but we have to
1567 // somehow implement a similar behavior to it.
1569 // To do this, we manaully check for events that are enabled via different provider/keywords/level.
1570 // Ex 1. GCJoin_V2 is what we use to check whether the GC keyword is enabled in verbose level in the public provider
1571 // Ex 2. SetGCHandle is what we use to check whether the GCHandle keyword is enabled in informational level in the public provider
1572 // Refer to the comments in src/vm/gcenv.ee.h next to the EXTERN C definitions to see which events are enabled.
1574 // WARNING: To change an event's GC level, perfcollect script needs to be updated simultaneously to reflect it.
1575 BOOL keyword_gc_verbose = EventXplatEnabledGCJoin_V2();
1576 BOOL keyword_gc_informational = EventXplatEnabledGCStart();
1578 BOOL keyword_gc_heapsurvival_and_movement_informational = EventXplatEnabledGCGenerationRange();
1579 BOOL keyword_gchandle_informational = EventXplatEnabledSetGCHandle();
1580 BOOL keyword_gchandle_prv_informational = EventXplatEnabledPrvSetGCHandle();
1582 BOOL prv_gcprv_informational = EventXplatEnabledBGCBegin();
1583 BOOL prv_gcprv_verbose = EventXplatEnabledPinPlugAtGCTime();
1585 int publicProviderLevel = keyword_gc_verbose ? GCEventLevel_Verbose : (keyword_gc_informational ? GCEventLevel_Information : GCEventLevel_None);
1586 int publicProviderKeywords = (keyword_gc_informational ? GCEventKeyword_GC : GCEventKeyword_None) |
1587 (keyword_gchandle_informational ? GCEventKeyword_GCHandle : GCEventKeyword_None) |
1588 (keyword_gc_heapsurvival_and_movement_informational ? GCEventKeyword_GCHeapSurvivalAndMovement : GCEventKeyword_None);
1590 int privateProviderLevel = prv_gcprv_verbose ? GCEventLevel_Verbose : (prv_gcprv_informational ? GCEventLevel_Information : GCEventLevel_None);
1591 int privateProviderKeywords = (prv_gcprv_informational ? GCEventKeyword_GCPrivate : GCEventKeyword_None) |
1592 (keyword_gchandle_prv_informational ? GCEventKeyword_GCHandlePrivate : GCEventKeyword_None);
1594 if (publicProviderLevel != currentPublicLevel || publicProviderKeywords != currentPublicKeywords)
1596 GCEventLevel publicLevel = static_cast<GCEventLevel>(publicProviderLevel);
1597 GCEventKeyword publicKeywords = static_cast<GCEventKeyword>(publicProviderKeywords);
1598 GCHeapUtilities::RecordEventStateChange(true, publicKeywords, publicLevel);
1600 if (privateProviderLevel != currentPrivateLevel || privateProviderKeywords != currentPrivateKeywords)
1602 GCEventLevel privateLevel = static_cast<GCEventLevel>(privateProviderLevel);
1603 GCEventKeyword privateKeywords = static_cast<GCEventKeyword>(privateProviderKeywords);
1604 GCHeapUtilities::RecordEventStateChange(false, privateKeywords, privateLevel);