1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
8 * GCToEEInterface implementation
14 void GCToEEInterface::SuspendEE(SUSPEND_REASON reason)
18 static_assert_no_msg(SUSPEND_FOR_GC == (int)ThreadSuspend::SUSPEND_FOR_GC);
19 static_assert_no_msg(SUSPEND_FOR_GC_PREP == (int)ThreadSuspend::SUSPEND_FOR_GC_PREP);
21 _ASSERTE(reason == SUSPEND_FOR_GC || reason == SUSPEND_FOR_GC_PREP);
23 ThreadSuspend::SuspendEE((ThreadSuspend::SUSPEND_REASON)reason);
26 void GCToEEInterface::RestartEE(bool bFinishedGC)
30 ThreadSuspend::RestartEE(bFinishedGC, TRUE);
33 VOID GCToEEInterface::SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2)
42 SyncBlockCache::GetSyncBlockCache()->GCWeakPtrScan(scanProc, lp1, lp2);
46 //EE can perform post stack scanning action, while the
47 // user threads are still suspended
48 VOID GCToEEInterface::AfterGcScanRoots (int condemned, int max_gen,
58 #ifdef FEATURE_COMINTEROP
59 // Go through all the app domains and for each one detach all the *unmarked* RCWs to prevent
60 // the RCW cache from resurrecting them.
61 UnsafeAppDomainIterator i(TRUE);
66 i.GetDomain()->DetachRCWs();
68 #endif // FEATURE_COMINTEROP
72 * Scan all stack roots
75 static void ScanStackRoots(Thread * pThread, promote_func* fn, ScanContext* sc)
83 ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
85 // Either we are in a concurrent situation (in which case the thread is unknown to
86 // us), or we are performing a synchronous GC and we are the GC thread, holding
87 // the threadstore lock.
89 _ASSERTE(dbgOnly_IsSpecialEEThread() ||
90 GetThread() == NULL ||
91 // this is for background GC threads which always call this when EE is suspended.
92 IsGCSpecialThread() ||
93 (GetThread() == ThreadSuspend::GetSuspensionThread() && ThreadStore::HoldingThreadStore()));
95 pThread->SetHasPromotedBytes();
97 Frame* pTopFrame = pThread->GetFrame();
98 Object ** topStack = (Object **)pTopFrame;
99 if ((pTopFrame != ((Frame*)-1))
100 && (pTopFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr())) {
101 // It is an InlinedCallFrame. Get SP from it.
102 InlinedCallFrame* pInlinedFrame = (InlinedCallFrame*)pTopFrame;
103 topStack = (Object **)pInlinedFrame->GetCallSiteSP();
106 sc->stack_limit = (uintptr_t)topStack;
108 #ifdef FEATURE_CONSERVATIVE_GC
109 if (g_pConfig->GetGCConservative())
111 // Conservative stack root reporting
112 // We will treat everything on stack as a pinned interior GC pointer
113 // Since we report every thing as pinned, we don't need to run following code for relocation phase.
116 Object ** bottomStack = (Object **) pThread->GetCachedStackBase();
118 for (walk = topStack; walk < bottomStack; walk ++)
120 if (((void*)*walk > (void*)bottomStack || (void*)*walk < (void*)topStack) &&
121 ((void*)*walk >= (void*)g_lowest_address && (void*)*walk <= (void*)g_highest_address)
124 //DbgPrintf("promote " FMT_ADDR " : " FMT_ADDR "\n", walk, *walk);
125 fn(walk, sc, GC_CALL_INTERIOR|GC_CALL_PINNED);
130 // Also ask the explicit Frames to report any references they might know about.
131 // Generally these will be a subset of the objects reported below but there's
132 // nothing that guarantees that and in the specific case of a GC protect frame the
133 // references it protects may live at a lower address than the frame itself (and
134 // thus escape the stack range we scanned above).
135 Frame *pFrame = pThread->GetFrame();
136 while (pFrame != FRAME_TOP)
138 pFrame->GcScanRoots(fn, sc);
139 pFrame = pFrame->PtrNextFrame();
145 unsigned flagsStackWalk = ALLOW_ASYNC_STACK_WALK | ALLOW_INVALID_OBJECTS;
146 #if defined(WIN64EXCEPTIONS)
147 flagsStackWalk |= GC_FUNCLET_REFERENCE_REPORTING;
148 #endif // defined(WIN64EXCEPTIONS)
149 pThread->StackWalkFrames( GcStackCrawlCallBack, &gcctx, flagsStackWalk);
153 void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
155 STRESS_LOG1(LF_GCROOTS, LL_INFO10, "GCScan: Promotion Phase = %d\n", sc->promotion);
157 // In server GC, we should be competing for marking the statics
158 if (GCHeapUtilities::MarkShouldCompeteForStatics())
160 if (condemned == max_gen && sc->promotion)
162 SystemDomain::EnumAllStaticGCRefs(fn, sc);
166 Thread* pThread = NULL;
167 while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
169 STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "{ Starting scan of Thread %p ID = %x\n", pThread, pThread->GetThreadId());
171 if (GCHeapUtilities::GetGCHeap()->IsThreadUsingAllocationContextHeap(
172 pThread->GetAllocContext(), sc->thread_number))
174 sc->thread_under_crawl = pThread;
175 #ifdef FEATURE_EVENT_TRACE
176 sc->dwEtwRootKind = kEtwGCRootKindStack;
177 #endif // FEATURE_EVENT_TRACE
178 ScanStackRoots(pThread, fn, sc);
179 #ifdef FEATURE_EVENT_TRACE
180 sc->dwEtwRootKind = kEtwGCRootKindOther;
181 #endif // FEATURE_EVENT_TRACE
183 STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "Ending scan of Thread %p ID = 0x%x }\n", pThread, pThread->GetThreadId());
187 void GCToEEInterface::GcStartWork (int condemned, int max_gen)
196 // Update AppDomain stage here.
197 SystemDomain::System()->ProcessClearingDomains();
200 // Validate byrefs pinned by IL stubs since the last GC.
201 StubHelpers::ProcessByrefValidationList();
202 #endif // VERIFY_HEAP
204 ExecutionManager::CleanupCodeHeaps();
206 #ifdef FEATURE_EVENT_TRACE
207 ETW::TypeSystemLog::Cleanup();
210 #ifdef FEATURE_COMINTEROP
212 // Let GC detect managed/native cycles with input from jupiter
214 // 1. Report reference from RCW to CCW based on native reference in Jupiter
215 // 2. Identify the subset of CCWs that needs to be rooted
217 // We'll build the references from RCW to CCW using
218 // 1. Preallocated arrays
219 // 2. Dependent handles
221 RCWWalker::OnGCStarted(condemned);
222 #endif // FEATURE_COMINTEROP
224 if (condemned == max_gen)
226 ThreadStore::s_pThreadStore->OnMaxGenerationGCStarted();
230 void GCToEEInterface::GcDone(int condemned)
239 #ifdef FEATURE_COMINTEROP
241 // Tell Jupiter GC has finished
243 RCWWalker::OnGCFinished(condemned);
244 #endif // FEATURE_COMINTEROP
247 bool GCToEEInterface::RefCountedHandleCallbacks(Object * pObject)
256 #ifdef FEATURE_COMINTEROP
257 //<REVISIT_TODO>@todo optimize the access to the ref-count
258 ComCallWrapper* pWrap = ComCallWrapper::GetWrapperForObject((OBJECTREF)pObject);
259 _ASSERTE(pWrap != NULL);
261 return !!pWrap->IsWrapperActive();
267 void GCToEEInterface::GcBeforeBGCSweepWork()
277 // Validate byrefs pinned by IL stubs since the last GC.
278 StubHelpers::ProcessByrefValidationList();
279 #endif // VERIFY_HEAP
282 void GCToEEInterface::SyncBlockCacheDemote(int max_gen)
291 SyncBlockCache::GetSyncBlockCache()->GCDone(TRUE, max_gen);
294 void GCToEEInterface::SyncBlockCachePromotionsGranted(int max_gen)
303 SyncBlockCache::GetSyncBlockCache()->GCDone(FALSE, max_gen);
306 uint32_t GCToEEInterface::GetActiveSyncBlockCount()
315 return SyncBlockCache::GetSyncBlockCache()->GetActiveCount();
318 gc_alloc_context * GCToEEInterface::GetAllocContext()
322 Thread* pThread = ::GetThread();
323 assert(pThread != nullptr);
324 return pThread->GetAllocContext();
327 void GCToEEInterface::GcEnumAllocContexts(enum_alloc_context_func* fn, void* param)
336 if (GCHeapUtilities::UseThreadAllocationContexts())
338 Thread * pThread = NULL;
339 while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
341 fn(pThread->GetAllocContext(), param);
346 fn(&g_global_alloc_context, param);
350 bool GCToEEInterface::IsPreemptiveGCDisabled()
354 Thread* pThread = ::GetThread();
357 return !!pThread->PreemptiveGCDisabled();
363 bool GCToEEInterface::EnablePreemptiveGC()
367 bool bToggleGC = false;
368 Thread* pThread = ::GetThread();
372 bToggleGC = !!pThread->PreemptiveGCDisabled();
375 pThread->EnablePreemptiveGC();
382 void GCToEEInterface::DisablePreemptiveGC()
386 Thread* pThread = ::GetThread();
389 pThread->DisablePreemptiveGC();
393 Thread* GCToEEInterface::GetThread()
397 return ::GetThread();
400 struct BackgroundThreadStubArgs
403 GCBackgroundThreadFunction threadStart;
405 CLREvent threadStartedEvent;
409 DWORD WINAPI BackgroundThreadStub(void* arg)
411 BackgroundThreadStubArgs* stubArgs = (BackgroundThreadStubArgs*)arg;
412 assert (stubArgs->thread != NULL);
414 ClrFlsSetThreadType (ThreadType_GC);
415 stubArgs->thread->SetGCSpecial(true);
416 STRESS_LOG_RESERVE_MEM (GC_STRESSLOG_MULTIPLY);
418 stubArgs->hasStarted = !!stubArgs->thread->HasStarted(FALSE);
420 Thread* thread = stubArgs->thread;
421 GCBackgroundThreadFunction realThreadStart = stubArgs->threadStart;
422 void* realThreadArg = stubArgs->arg;
423 bool hasStarted = stubArgs->hasStarted;
425 stubArgs->threadStartedEvent.Set();
426 // The stubArgs cannot be used once the event is set, since that releases wait on the
427 // event in the function that created this thread and the stubArgs go out of scope.
433 result = realThreadStart(realThreadArg);
434 DestroyThread(thread);
444 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
445 inline BOOL ShouldTrackMovementForProfilerOrEtw()
448 if (CORProfilerTrackGC())
452 #ifdef FEATURE_EVENT_TRACE
453 if (ETW::GCLog::ShouldTrackMovementForEtw())
459 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
461 void ProfScanRootsHelper(Object** ppObject, ScanContext *pSC, uint32_t dwFlags)
463 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
464 Object *pObj = *ppObject;
465 if (dwFlags & GC_CALL_INTERIOR)
467 pObj = GCHeapUtilities::GetGCHeap()->GetContainingObject(pObj, true);
471 ScanRootsHelper(pObj, ppObject, pSC, dwFlags);
472 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
475 // TODO - at some point we would like to completely decouple profiling
476 // from ETW tracing using a pattern similar to this, where the
477 // ProfilingScanContext has flags about whether or not certain things
478 // should be tracked, and each one of these ProfilerShouldXYZ functions
479 // will check these flags and determine what to do based upon that.
480 // GCProfileWalkHeapWorker can, in turn, call those methods without fear
481 // of things being ifdef'd out.
483 // Returns TRUE if GC profiling is enabled and the profiler
484 // should scan dependent handles, FALSE otherwise.
485 BOOL ProfilerShouldTrackConditionalWeakTableElements()
487 #if defined(GC_PROFILING)
488 return CORProfilerTrackConditionalWeakTableElements();
491 #endif // defined (GC_PROFILING)
494 // If GC profiling is enabled, informs the profiler that we are done
495 // tracing dependent handles.
496 void ProfilerEndConditionalWeakTableElementReferences(void* heapId)
498 #if defined (GC_PROFILING)
499 g_profControlBlock.pProfInterface->EndConditionalWeakTableElementReferences(heapId);
501 UNREFERENCED_PARAMETER(heapId);
502 #endif // defined (GC_PROFILING)
505 // If GC profiling is enabled, informs the profiler that we are done
506 // tracing root references.
507 void ProfilerEndRootReferences2(void* heapId)
509 #if defined (GC_PROFILING)
510 g_profControlBlock.pProfInterface->EndRootReferences2(heapId);
512 UNREFERENCED_PARAMETER(heapId);
513 #endif // defined (GC_PROFILING)
516 void GcScanRootsForProfilerAndETW(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
518 Thread* pThread = NULL;
519 while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
521 sc->thread_under_crawl = pThread;
522 #ifdef FEATURE_EVENT_TRACE
523 sc->dwEtwRootKind = kEtwGCRootKindStack;
524 #endif // FEATURE_EVENT_TRACE
525 ScanStackRoots(pThread, fn, sc);
526 #ifdef FEATURE_EVENT_TRACE
527 sc->dwEtwRootKind = kEtwGCRootKindOther;
528 #endif // FEATURE_EVENT_TRACE
532 void ScanHandleForProfilerAndETW(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, bool isDependent)
534 ProfilingScanContext* pSC = (ProfilingScanContext*)context;
537 // Give the profiler the objectref.
538 if (pSC->fProfilerPinned)
542 BEGIN_PIN_PROFILER(CORProfilerTrackGC());
543 g_profControlBlock.pProfInterface->RootReference2(
545 kEtwGCRootKindHandle,
546 (EtwGCRootFlags)flags,
553 BEGIN_PIN_PROFILER(CORProfilerTrackConditionalWeakTableElements());
554 g_profControlBlock.pProfInterface->ConditionalWeakTableElementReference(
562 #endif // GC_PROFILING
564 #if defined(FEATURE_EVENT_TRACE)
565 // Notify ETW of the handle
566 if (ETW::GCLog::ShouldWalkHeapRootsForEtw())
568 ETW::GCLog::RootReference(
570 *pRef, // object being rooted
571 pSec, // pSecondaryNodeForDependentHandle
575 flags); // ETW handle flags
577 #endif // defined(FEATURE_EVENT_TRACE)
580 // This is called only if we've determined that either:
581 // a) The Profiling API wants to do a walk of the heap, and it has pinned the
582 // profiler in place (so it cannot be detached), and it's thus safe to call into the
584 // b) ETW infrastructure wants to do a walk of the heap either to log roots,
586 // This can also be called to do a single walk for BOTH a) and b) simultaneously. Since
587 // ETW can ask for roots, but not objects
588 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
589 void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw)
592 ProfilingScanContext SC(fProfilerPinned);
593 unsigned max_generation = GCHeapUtilities::GetGCHeap()->GetMaxGeneration();
595 // **** Scan roots: Only scan roots if profiling API wants them or ETW wants them.
596 if (fProfilerPinned || fShouldWalkHeapRootsForEtw)
598 GcScanRootsForProfilerAndETW(&ProfScanRootsHelper, max_generation, max_generation, &SC);
599 SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
600 GCHeapUtilities::GetGCHeap()->DiagScanFinalizeQueue(&ProfScanRootsHelper, &SC);
602 // Handles are kept independent of wks/svr/concurrent builds
603 SC.dwEtwRootKind = kEtwGCRootKindHandle;
604 GCHeapUtilities::GetGCHeap()->DiagScanHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
606 // indicate that regular handle scanning is over, so we can flush the buffered roots
607 // to the profiler. (This is for profapi only. ETW will flush after the
608 // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
611 ProfilerEndRootReferences2(&SC.pHeapId);
615 // **** Scan dependent handles: only if the profiler supports it or ETW wants roots
616 if ((fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements()) ||
617 fShouldWalkHeapRootsForEtw)
619 // GcScanDependentHandlesForProfiler double-checks
620 // CORProfilerTrackConditionalWeakTableElements() before calling into the profiler
622 ProfilingScanContext* pSC = &SC;
624 // we'll re-use pHeapId (which was either unused (0) or freed by EndRootReferences2
625 // (-1)), so reset it to NULL
626 _ASSERTE((*((size_t *)(&pSC->pHeapId)) == (size_t)(-1)) ||
627 (*((size_t *)(&pSC->pHeapId)) == (size_t)(0)));
630 GCHeapUtilities::GetGCHeap()->DiagScanDependentHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
632 // indicate that dependent handle scanning is over, so we can flush the buffered roots
633 // to the profiler. (This is for profapi only. ETW will flush after the
634 // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
635 if (fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements())
637 ProfilerEndConditionalWeakTableElementReferences(&SC.pHeapId);
641 ProfilerWalkHeapContext profilerWalkHeapContext(fProfilerPinned, SC.pvEtwContext);
643 // **** Walk objects on heap: only if profiling API wants them or ETW wants them.
644 if (fProfilerPinned || fShouldWalkHeapObjectsForEtw)
646 GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, true /* walk the large object heap */);
649 #ifdef FEATURE_EVENT_TRACE
650 // **** Done! Indicate to ETW helpers that the heap walk is done, so any buffers
651 // should be flushed into the ETW stream
652 if (fShouldWalkHeapObjectsForEtw || fShouldWalkHeapRootsForEtw)
654 ETW::GCLog::EndHeapDump(&profilerWalkHeapContext);
656 #endif // FEATURE_EVENT_TRACE
659 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
661 void GCProfileWalkHeap()
663 BOOL fWalkedHeapForProfiler = FALSE;
665 #ifdef FEATURE_EVENT_TRACE
666 if (ETW::GCLog::ShouldWalkStaticsAndCOMForEtw())
667 ETW::GCLog::WalkStaticsAndCOMForETW();
669 BOOL fShouldWalkHeapRootsForEtw = ETW::GCLog::ShouldWalkHeapRootsForEtw();
670 BOOL fShouldWalkHeapObjectsForEtw = ETW::GCLog::ShouldWalkHeapObjectsForEtw();
671 #else // !FEATURE_EVENT_TRACE
672 BOOL fShouldWalkHeapRootsForEtw = FALSE;
673 BOOL fShouldWalkHeapObjectsForEtw = FALSE;
674 #endif // FEATURE_EVENT_TRACE
676 #if defined (GC_PROFILING)
678 BEGIN_PIN_PROFILER(CORProfilerTrackGC());
679 GCProfileWalkHeapWorker(TRUE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
680 fWalkedHeapForProfiler = TRUE;
683 #endif // defined (GC_PROFILING)
685 #if defined (GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
686 // we need to walk the heap if one of GC_PROFILING or FEATURE_EVENT_TRACE
687 // is defined, since both of them make use of the walk heap worker.
688 if (!fWalkedHeapForProfiler &&
689 (fShouldWalkHeapRootsForEtw || fShouldWalkHeapObjectsForEtw))
691 GCProfileWalkHeapWorker(FALSE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
693 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
696 void WalkFReachableObjects(bool isCritical, void* objectID)
698 g_profControlBlock.pProfInterface->FinalizeableObjectQueued(isCritical, (ObjectID)objectID);
701 static fq_walk_fn g_FQWalkFn = &WalkFReachableObjects;
703 void GCToEEInterface::DiagGCStart(int gen, bool isInduced)
706 DiagUpdateGenerationBounds();
707 GarbageCollectionStartedCallback(gen, isInduced);
709 BEGIN_PIN_PROFILER(CORProfilerTrackGC());
712 // When we're walking objects allocated by class, then we don't want to walk the large
713 // object heap because then it would count things that may have been around for a while.
714 GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&AllocByClassHelper, (void *)&context, 0, false);
716 // Notify that we've reached the end of the Gen 0 scan
717 g_profControlBlock.pProfInterface->EndAllocByClass(&context);
721 #endif // GC_PROFILING
724 void GCToEEInterface::DiagUpdateGenerationBounds()
727 if (CORProfilerTrackGC())
728 UpdateGenerationBounds();
729 #endif // GC_PROFILING
732 void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent)
738 DiagUpdateGenerationBounds();
739 GarbageCollectionFinishedCallback();
741 #endif // GC_PROFILING
744 void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext)
747 if (CORProfilerTrackGC())
749 BEGIN_PIN_PROFILER(CORProfilerPresent());
750 GCHeapUtilities::GetGCHeap()->DiagWalkFinalizeQueue(gcContext, g_FQWalkFn);
753 #endif //GC_PROFILING
756 // Note on last parameter: when calling this for bgc, only ETW
757 // should be sending these events so that existing profapi profilers
758 // don't get confused.
759 void WalkMovedReferences(uint8_t* begin, uint8_t* end,
765 ETW::GCLog::MovedReference(begin, end,
766 (fCompacting ? reloc : 0),
772 void GCToEEInterface::DiagWalkSurvivors(void* gcContext)
774 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
775 if (ShouldTrackMovementForProfilerOrEtw())
778 ETW::GCLog::BeginMovedReferences(&context);
779 GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_gc);
780 ETW::GCLog::EndMovedReferences(context);
782 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
785 void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext)
787 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
788 if (ShouldTrackMovementForProfilerOrEtw())
791 ETW::GCLog::BeginMovedReferences(&context);
792 GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_loh);
793 ETW::GCLog::EndMovedReferences(context);
795 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
798 void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext)
800 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
801 if (ShouldTrackMovementForProfilerOrEtw())
804 ETW::GCLog::BeginMovedReferences(&context);
805 GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_bgc);
806 ETW::GCLog::EndMovedReferences(context);
808 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
811 void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args)
813 int stompWBCompleteActions = SWB_PASS;
814 bool is_runtime_suspended = false;
816 assert(args != nullptr);
817 switch (args->operation)
819 case WriteBarrierOp::StompResize:
820 // StompResize requires a new card table, a new lowest address, and
821 // a new highest address
822 assert(args->card_table != nullptr);
823 assert(args->lowest_address != nullptr);
824 assert(args->highest_address != nullptr);
826 g_card_table = args->card_table;
828 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
829 assert(args->card_bundle_table != nullptr);
830 g_card_bundle_table = args->card_bundle_table;
833 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
834 if (g_sw_ww_enabled_for_gc_heap && (args->write_watch_table != nullptr))
836 assert(args->is_runtime_suspended);
837 g_sw_ww_table = args->write_watch_table;
839 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
841 stompWBCompleteActions |= ::StompWriteBarrierResize(args->is_runtime_suspended, args->requires_upper_bounds_check);
843 // We need to make sure that other threads executing checked write barriers
844 // will see the g_card_table update before g_lowest/highest_address updates.
845 // Otherwise, the checked write barrier may AV accessing the old card table
846 // with address that it does not cover.
848 // Even x86's total store ordering is insufficient here because threads reading
849 // g_card_table do so via the instruction cache, whereas g_lowest/highest_address
850 // are read via the data cache.
852 // The g_card_table update is covered by section 8.1.3 of the Intel Software
853 // Development Manual, Volume 3A (System Programming Guide, Part 1), about
854 // "cross-modifying code": We need all _executing_ threads to invalidate
855 // their instruction cache, which FlushProcessWriteBuffers achieves by sending
856 // an IPI (inter-process interrupt).
858 if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
860 // flushing icache on current processor (thread)
861 ::FlushWriteBarrierInstructionCache();
862 // asking other processors (threads) to invalidate their icache
863 FlushProcessWriteBuffers();
866 g_lowest_address = args->lowest_address;
867 VolatileStore(&g_highest_address, args->highest_address);
870 // Need to reupdate for changes to g_highest_address g_lowest_address
871 is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended;
872 stompWBCompleteActions |= ::StompWriteBarrierResize(is_runtime_suspended, args->requires_upper_bounds_check);
874 is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended;
875 if(!is_runtime_suspended)
877 // If runtime is not suspended, force updated state to be visible to all threads
881 if (stompWBCompleteActions & SWB_EE_RESTART)
883 assert(!args->is_runtime_suspended &&
884 "if runtime was suspended in patching routines then it was in running state at begining");
885 ThreadSuspend::RestartEE(FALSE, TRUE);
887 return; // unlike other branches we have already done cleanup so bailing out here
888 case WriteBarrierOp::StompEphemeral:
889 // StompEphemeral requires a new ephemeral low and a new ephemeral high
890 assert(args->ephemeral_low != nullptr);
891 assert(args->ephemeral_high != nullptr);
892 g_ephemeral_low = args->ephemeral_low;
893 g_ephemeral_high = args->ephemeral_high;
894 stompWBCompleteActions |= ::StompWriteBarrierEphemeral(args->is_runtime_suspended);
896 case WriteBarrierOp::Initialize:
897 // This operation should only be invoked once, upon initialization.
898 assert(g_card_table == nullptr);
899 assert(g_lowest_address == nullptr);
900 assert(g_highest_address == nullptr);
901 assert(args->card_table != nullptr);
902 assert(args->lowest_address != nullptr);
903 assert(args->highest_address != nullptr);
904 assert(args->ephemeral_low != nullptr);
905 assert(args->ephemeral_high != nullptr);
906 assert(args->is_runtime_suspended && "the runtime must be suspended here!");
907 assert(!args->requires_upper_bounds_check && "the ephemeral generation must be at the top of the heap!");
909 g_card_table = args->card_table;
911 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
912 assert(g_card_bundle_table == nullptr);
913 g_card_bundle_table = args->card_bundle_table;
916 g_lowest_address = args->lowest_address;
917 g_highest_address = args->highest_address;
918 stompWBCompleteActions |= ::StompWriteBarrierResize(true, false);
920 // StompWriteBarrierResize does not necessarily bash g_ephemeral_low
921 // usages, so we must do so here. This is particularly true on x86,
922 // where StompWriteBarrierResize will not bash g_ephemeral_low when
923 // called with the parameters (true, false), as it is above.
924 g_ephemeral_low = args->ephemeral_low;
925 g_ephemeral_high = args->ephemeral_high;
926 stompWBCompleteActions |= ::StompWriteBarrierEphemeral(true);
928 case WriteBarrierOp::SwitchToWriteWatch:
929 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
930 assert(args->write_watch_table != nullptr);
931 assert(args->is_runtime_suspended && "the runtime must be suspended here!");
932 g_sw_ww_table = args->write_watch_table;
933 g_sw_ww_enabled_for_gc_heap = true;
934 stompWBCompleteActions |= ::SwitchToWriteWatchBarrier(true);
936 assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP");
937 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
939 case WriteBarrierOp::SwitchToNonWriteWatch:
940 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
941 assert(args->is_runtime_suspended && "the runtime must be suspended here!");
943 g_sw_ww_enabled_for_gc_heap = false;
944 stompWBCompleteActions |= ::SwitchToNonWriteWatchBarrier(true);
946 assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP");
947 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
950 assert(!"unknown WriteBarrierOp enum");
952 if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
954 ::FlushWriteBarrierInstructionCache();
956 if (stompWBCompleteActions & SWB_EE_RESTART)
958 assert(!args->is_runtime_suspended &&
959 "if runtime was suspended in patching routines then it was in running state at begining");
960 ThreadSuspend::RestartEE(FALSE, TRUE);
964 void GCToEEInterface::EnableFinalization(bool foundFinalizers)
966 if (foundFinalizers || FinalizerThread::HaveExtraWorkForFinalizer())
968 FinalizerThread::EnableFinalization();
972 void GCToEEInterface::HandleFatalError(unsigned int exitCode)
974 EEPOLICY_HANDLE_FATAL_ERROR(exitCode);
977 bool GCToEEInterface::ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj)
979 // CoreCLR does not have appdomains, so this code path is dead. Other runtimes may
980 // choose to inspect the object being finalized here.
981 // [DESKTOP TODO] Desktop looks for "agile and finalizable" objects and may choose
982 // to move them to a new app domain instead of finalizing them here.
986 bool GCToEEInterface::ForceFullGCToBeBlocking()
988 // In theory, there is nothing fundamental that requires an AppDomain unload to induce
989 // a blocking GC. In the past, this workaround was done to fix an Stress AV, but the root
990 // cause of the AV was never discovered and this workaround remains in place.
992 // It would be nice if this were not necessary. However, it's not clear if the aformentioned
993 // stress bug is still lurking and will return if this workaround is removed. We should
994 // do some experiments: remove this workaround and see if the stress bug still repros.
995 // If so, we should find the root cause instead of relying on this.
996 return !!SystemDomain::System()->RequireAppDomainCleanup();
999 bool GCToEEInterface::EagerFinalized(Object* obj)
1001 MethodTable* pMT = obj->GetGCSafeMethodTable();
1002 if (pMT == pWeakReferenceMT ||
1003 pMT->GetCanonicalMethodTable() == pWeakReferenceOfTCanonMT)
1005 FinalizeWeakReference(obj);
1012 MethodTable* GCToEEInterface::GetFreeObjectMethodTable()
1014 assert(g_pFreeObjectMethodTable != nullptr);
1015 return g_pFreeObjectMethodTable;
1018 // These are arbitrary, we shouldn't ever be having confrig keys or values
1019 // longer than these lengths.
1020 const size_t MaxConfigKeyLength = 255;
1021 const size_t MaxConfigValueLength = 255;
1023 bool GCToEEInterface::GetBooleanConfigValue(const char* key, bool* value)
1030 // these configuration values are given to us via startup flags.
1031 if (strcmp(key, "gcServer") == 0)
1033 *value = g_heap_type == GC_HEAP_SVR;
1037 if (strcmp(key, "gcConcurrent") == 0)
1039 *value = !!g_pConfig->GetGCconcurrent();
1043 if (strcmp(key, "GCRetainVM") == 0)
1045 *value = !!g_pConfig->GetGCRetainVM();
1049 WCHAR configKey[MaxConfigKeyLength];
1050 if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1052 // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1056 // otherwise, ask the config subsystem.
1057 if (CLRConfig::IsConfigOptionSpecified(configKey))
1059 CLRConfig::ConfigDWORDInfo info { configKey , 0, CLRConfig::EEConfig_default };
1060 *value = CLRConfig::GetConfigValue(info) != 0;
1067 bool GCToEEInterface::GetIntConfigValue(const char* key, int64_t* value)
1074 WCHAR configKey[MaxConfigKeyLength];
1075 if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1077 // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1081 if (CLRConfig::IsConfigOptionSpecified(configKey))
1083 CLRConfig::ConfigDWORDInfo info { configKey , 0, CLRConfig::EEConfig_default };
1084 *value = CLRConfig::GetConfigValue(info);
1091 bool GCToEEInterface::GetStringConfigValue(const char* key, const char** value)
1098 WCHAR configKey[MaxConfigKeyLength];
1099 if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1101 // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1105 CLRConfig::ConfigStringInfo info { configKey, CLRConfig::EEConfig_default };
1106 LPWSTR out = CLRConfig::GetConfigValue(info);
1113 // not allocated on the stack since it escapes this function
1114 AStringHolder configResult = new (nothrow) char[MaxConfigValueLength];
1117 CLRConfig::FreeConfigString(out);
1121 if (WideCharToMultiByte(CP_ACP, 0, out, -1 /* out is null-terminated */,
1122 configResult.GetValue(), MaxConfigKeyLength, nullptr, nullptr) == 0)
1124 // this should only happen if the config subsystem gives us a string that's not valid
1126 CLRConfig::FreeConfigString(out);
1130 *value = configResult.Extract();
1131 CLRConfig::FreeConfigString(out);
1135 void GCToEEInterface::FreeStringConfigValue(const char* value)
1140 bool GCToEEInterface::IsGCThread()
1142 return !!::IsGCThread();
1145 bool GCToEEInterface::WasCurrentThreadCreatedByGC()
1147 return !!::IsGCSpecialThread();
1150 struct SuspendableThreadStubArguments
1153 void (*ThreadStart)(void*);
1156 CLREvent ThreadStartedEvent;
1159 struct ThreadStubArguments
1162 void (*ThreadStart)(void*);
1165 CLREvent ThreadStartedEvent;
1170 const size_t MaxThreadNameSize = 255;
1172 bool CreateSuspendableThread(
1173 void (*threadStart)(void*),
1177 LIMITED_METHOD_CONTRACT;
1179 SuspendableThreadStubArguments args;
1180 args.Argument = argument;
1181 args.ThreadStart = threadStart;
1182 args.Thread = nullptr;
1183 args.HasStarted = false;
1184 if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE))
1191 args.Thread = SetupUnstartedThread(FALSE);
1196 EX_END_CATCH(SwallowAllExceptions)
1200 args.ThreadStartedEvent.CloseEvent();
1204 auto threadStub = [](void* argument) -> DWORD
1206 SuspendableThreadStubArguments* args = static_cast<SuspendableThreadStubArguments*>(argument);
1207 assert(args != nullptr);
1209 ClrFlsSetThreadType(ThreadType_GC);
1210 args->Thread->SetGCSpecial(true);
1211 STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY);
1212 args->HasStarted = !!args->Thread->HasStarted(false);
1214 Thread* thread = args->Thread;
1215 auto threadStart = args->ThreadStart;
1216 void* threadArgument = args->Argument;
1217 bool hasStarted = args->HasStarted;
1218 args->ThreadStartedEvent.Set();
1220 // The stubArgs cannot be used once the event is set, since that releases wait on the
1221 // event in the function that created this thread and the stubArgs go out of scope.
1224 threadStart(threadArgument);
1225 DestroyThread(thread);
1231 InlineSString<MaxThreadNameSize> wideName;
1232 const WCHAR* namePtr = nullptr;
1235 if (name != nullptr)
1237 wideName.SetUTF8(name);
1238 namePtr = wideName.GetUnicode();
1243 // we're not obligated to provide a name - if it's not valid,
1244 // just report nullptr as the name.
1246 EX_END_CATCH(SwallowAllExceptions)
1248 if (!args.Thread->CreateNewThread(0, threadStub, &args, namePtr))
1250 args.Thread->DecExternalCount(FALSE);
1251 args.ThreadStartedEvent.CloseEvent();
1255 args.Thread->SetBackground(TRUE, FALSE);
1256 args.Thread->StartThread();
1258 // Wait for the thread to be in its main loop
1259 uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE);
1260 args.ThreadStartedEvent.CloseEvent();
1261 _ASSERTE(res == WAIT_OBJECT_0);
1263 if (!args.HasStarted)
1265 // The thread has failed to start and the Thread object was destroyed in the Thread::HasStarted
1266 // failure code path.
1273 bool CreateNonSuspendableThread(
1274 void (*threadStart)(void*),
1278 LIMITED_METHOD_CONTRACT;
1280 ThreadStubArguments args;
1281 args.Argument = argument;
1282 args.ThreadStart = threadStart;
1283 args.Thread = INVALID_HANDLE_VALUE;
1284 if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE))
1289 auto threadStub = [](void* argument) -> DWORD
1291 ThreadStubArguments* args = static_cast<ThreadStubArguments*>(argument);
1292 assert(args != nullptr);
1294 ClrFlsSetThreadType(ThreadType_GC);
1295 STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY);
1297 args->HasStarted = true;
1298 auto threadStart = args->ThreadStart;
1299 void* threadArgument = args->Argument;
1300 args->ThreadStartedEvent.Set();
1302 // The stub args cannot be used once the event is set, since that releases wait on the
1303 // event in the function that created this thread and the stubArgs go out of scope.
1304 threadStart(threadArgument);
1308 args.Thread = Thread::CreateUtilityThread(Thread::StackSize_Medium, threadStub, &args);
1309 if (args.Thread == INVALID_HANDLE_VALUE)
1311 args.ThreadStartedEvent.CloseEvent();
1315 // Wait for the thread to be in its main loop
1316 uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE);
1317 args.ThreadStartedEvent.CloseEvent();
1318 _ASSERTE(res == WAIT_OBJECT_0);
1320 CloseHandle(args.Thread);
1323 } // anonymous namespace
1325 bool GCToEEInterface::CreateThread(void (*threadStart)(void*), void* arg, bool is_suspendable, const char* name)
1327 LIMITED_METHOD_CONTRACT;
1330 return CreateSuspendableThread(threadStart, arg, name);
1334 return CreateNonSuspendableThread(threadStart, arg, name);
1338 void GCToEEInterface::WalkAsyncPinnedForPromotion(Object* object, ScanContext* sc, promote_func* callback)
1340 LIMITED_METHOD_CONTRACT;
1342 assert(object != nullptr);
1343 assert(sc != nullptr);
1344 assert(callback != nullptr);
1345 if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass)
1347 // not an overlapped data object - nothing to do.
1351 // reporting the pinned user objects
1352 OverlappedDataObject *pOverlapped = (OverlappedDataObject *)object;
1353 if (pOverlapped->m_userObject != NULL)
1355 //callback(OBJECTREF_TO_UNCHECKED_OBJECTREF(pOverlapped->m_userObject), (ScanContext *)lp1, GC_CALL_PINNED);
1356 if (pOverlapped->m_isArray)
1358 // OverlappedDataObject is very special. An async pin handle keeps it alive.
1359 // During GC, we also make sure
1360 // 1. m_userObject itself does not move if m_userObject is not array
1361 // 2. Every object pointed by m_userObject does not move if m_userObject is array
1362 // We do not want to pin m_userObject if it is array. But m_userObject may be updated
1363 // during relocation phase before OverlappedDataObject is doing relocation.
1364 // m_userObjectInternal is used to track the location of the m_userObject before it is updated.
1365 pOverlapped->m_userObjectInternal = static_cast<void*>(OBJECTREFToObject(pOverlapped->m_userObject));
1366 ArrayBase* pUserObject = (ArrayBase*)OBJECTREFToObject(pOverlapped->m_userObject);
1367 Object **ppObj = (Object**)pUserObject->GetDataPtr(TRUE);
1368 size_t num = pUserObject->GetNumComponents();
1369 for (size_t i = 0; i < num; i++)
1371 callback(ppObj + i, sc, GC_CALL_PINNED);
1376 callback(&OBJECTREF_TO_UNCHECKED_OBJECTREF(pOverlapped->m_userObject), (ScanContext *)sc, GC_CALL_PINNED);
1380 if (pOverlapped->GetAppDomainId() != DefaultADID && pOverlapped->GetAppDomainIndex().m_dwIndex == DefaultADID)
1382 OverlappedDataObject::MarkCleanupNeededFromGC();
1386 void GCToEEInterface::WalkAsyncPinned(Object* object, void* context, void (*callback)(Object*, Object*, void*))
1388 LIMITED_METHOD_CONTRACT;
1390 assert(object != nullptr);
1391 assert(callback != nullptr);
1393 if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass)
1398 OverlappedDataObject *pOverlapped = (OverlappedDataObject *)(object);
1399 if (pOverlapped->m_userObject != NULL)
1401 Object * pUserObject = OBJECTREFToObject(pOverlapped->m_userObject);
1402 callback(object, pUserObject, context);
1403 if (pOverlapped->m_isArray)
1405 ArrayBase* pUserArrayObject = (ArrayBase*)pUserObject;
1406 Object **pObj = (Object**)pUserArrayObject->GetDataPtr(TRUE);
1407 size_t num = pUserArrayObject->GetNumComponents();
1408 for (size_t i = 0; i < num; i ++)
1410 callback(pUserObject, pObj[i], context);
1416 IGCToCLREventSink* GCToEEInterface::EventSink()
1418 LIMITED_METHOD_CONTRACT;
1420 return &g_gcToClrEventSink;