1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
8 * GCToEEInterface implementation
14 void GCToEEInterface::SuspendEE(SUSPEND_REASON reason)
18 static_assert_no_msg(SUSPEND_FOR_GC == (int)ThreadSuspend::SUSPEND_FOR_GC);
19 static_assert_no_msg(SUSPEND_FOR_GC_PREP == (int)ThreadSuspend::SUSPEND_FOR_GC_PREP);
21 _ASSERTE(reason == SUSPEND_FOR_GC || reason == SUSPEND_FOR_GC_PREP);
23 ThreadSuspend::SuspendEE((ThreadSuspend::SUSPEND_REASON)reason);
25 g_pDebugInterface->BeforeGarbageCollection();
28 void GCToEEInterface::RestartEE(bool bFinishedGC)
32 g_pDebugInterface->AfterGarbageCollection();
34 ThreadSuspend::RestartEE(bFinishedGC, TRUE);
37 VOID GCToEEInterface::SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2)
46 SyncBlockCache::GetSyncBlockCache()->GCWeakPtrScan(scanProc, lp1, lp2);
50 //EE can perform post stack scanning action, while the
51 // user threads are still suspended
52 VOID GCToEEInterface::AfterGcScanRoots (int condemned, int max_gen,
62 #ifdef FEATURE_COMINTEROP
63 // Go through all the app domains and for each one detach all the *unmarked* RCWs to prevent
64 // the RCW cache from resurrecting them.
65 UnsafeAppDomainIterator i(TRUE);
70 i.GetDomain()->DetachRCWs();
72 #endif // FEATURE_COMINTEROP
76 * Scan all stack roots
79 static void ScanStackRoots(Thread * pThread, promote_func* fn, ScanContext* sc)
87 ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
89 // Either we are in a concurrent situation (in which case the thread is unknown to
90 // us), or we are performing a synchronous GC and we are the GC thread, holding
91 // the threadstore lock.
93 _ASSERTE(dbgOnly_IsSpecialEEThread() ||
94 GetThread() == NULL ||
95 // this is for background GC threads which always call this when EE is suspended.
96 IsGCSpecialThread() ||
97 (GetThread() == ThreadSuspend::GetSuspensionThread() && ThreadStore::HoldingThreadStore()));
99 pThread->SetHasPromotedBytes();
101 Frame* pTopFrame = pThread->GetFrame();
102 Object ** topStack = (Object **)pTopFrame;
103 if ((pTopFrame != ((Frame*)-1))
104 && (pTopFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr())) {
105 // It is an InlinedCallFrame. Get SP from it.
106 InlinedCallFrame* pInlinedFrame = (InlinedCallFrame*)pTopFrame;
107 topStack = (Object **)pInlinedFrame->GetCallSiteSP();
110 sc->stack_limit = (uintptr_t)topStack;
112 #ifdef FEATURE_CONSERVATIVE_GC
113 if (g_pConfig->GetGCConservative())
115 // Conservative stack root reporting
116 // We will treat everything on stack as a pinned interior GC pointer
117 // Since we report every thing as pinned, we don't need to run following code for relocation phase.
120 Object ** bottomStack = (Object **) pThread->GetCachedStackBase();
122 for (walk = topStack; walk < bottomStack; walk ++)
124 if (((void*)*walk > (void*)bottomStack || (void*)*walk < (void*)topStack) &&
125 ((void*)*walk >= (void*)g_lowest_address && (void*)*walk <= (void*)g_highest_address)
128 //DbgPrintf("promote " FMT_ADDR " : " FMT_ADDR "\n", walk, *walk);
129 fn(walk, sc, GC_CALL_INTERIOR|GC_CALL_PINNED);
134 // Also ask the explicit Frames to report any references they might know about.
135 // Generally these will be a subset of the objects reported below but there's
136 // nothing that guarantees that and in the specific case of a GC protect frame the
137 // references it protects may live at a lower address than the frame itself (and
138 // thus escape the stack range we scanned above).
139 Frame *pFrame = pThread->GetFrame();
140 while (pFrame != FRAME_TOP)
142 pFrame->GcScanRoots(fn, sc);
143 pFrame = pFrame->PtrNextFrame();
149 unsigned flagsStackWalk = ALLOW_ASYNC_STACK_WALK | ALLOW_INVALID_OBJECTS;
150 #if defined(WIN64EXCEPTIONS)
151 flagsStackWalk |= GC_FUNCLET_REFERENCE_REPORTING;
152 #endif // defined(WIN64EXCEPTIONS)
153 pThread->StackWalkFrames( GcStackCrawlCallBack, &gcctx, flagsStackWalk);
157 void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
159 STRESS_LOG1(LF_GCROOTS, LL_INFO10, "GCScan: Promotion Phase = %d\n", sc->promotion);
161 // In server GC, we should be competing for marking the statics
162 if (GCHeapUtilities::MarkShouldCompeteForStatics())
164 if (condemned == max_gen && sc->promotion)
166 SystemDomain::EnumAllStaticGCRefs(fn, sc);
170 Thread* pThread = NULL;
171 while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
173 STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "{ Starting scan of Thread %p ID = %x\n", pThread, pThread->GetThreadId());
175 if (GCHeapUtilities::GetGCHeap()->IsThreadUsingAllocationContextHeap(
176 pThread->GetAllocContext(), sc->thread_number))
178 sc->thread_under_crawl = pThread;
179 #ifdef FEATURE_EVENT_TRACE
180 sc->dwEtwRootKind = kEtwGCRootKindStack;
181 #endif // FEATURE_EVENT_TRACE
182 ScanStackRoots(pThread, fn, sc);
183 #ifdef FEATURE_EVENT_TRACE
184 sc->dwEtwRootKind = kEtwGCRootKindOther;
185 #endif // FEATURE_EVENT_TRACE
187 STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "Ending scan of Thread %p ID = 0x%x }\n", pThread, pThread->GetThreadId());
191 void GCToEEInterface::GcStartWork (int condemned, int max_gen)
201 // Validate byrefs pinned by IL stubs since the last GC.
202 StubHelpers::ProcessByrefValidationList();
203 #endif // VERIFY_HEAP
205 ExecutionManager::CleanupCodeHeaps();
207 #ifdef FEATURE_EVENT_TRACE
208 ETW::TypeSystemLog::Cleanup();
211 #ifdef FEATURE_COMINTEROP
213 // Let GC detect managed/native cycles with input from jupiter
215 // 1. Report reference from RCW to CCW based on native reference in Jupiter
216 // 2. Identify the subset of CCWs that needs to be rooted
218 // We'll build the references from RCW to CCW using
219 // 1. Preallocated arrays
220 // 2. Dependent handles
222 RCWWalker::OnGCStarted(condemned);
223 #endif // FEATURE_COMINTEROP
225 if (condemned == max_gen)
227 ThreadStore::s_pThreadStore->OnMaxGenerationGCStarted();
231 void GCToEEInterface::GcDone(int condemned)
240 #ifdef FEATURE_COMINTEROP
242 // Tell Jupiter GC has finished
244 RCWWalker::OnGCFinished(condemned);
245 #endif // FEATURE_COMINTEROP
248 bool GCToEEInterface::RefCountedHandleCallbacks(Object * pObject)
257 #ifdef FEATURE_COMINTEROP
258 //<REVISIT_TODO>@todo optimize the access to the ref-count
259 ComCallWrapper* pWrap = ComCallWrapper::GetWrapperForObject((OBJECTREF)pObject);
261 return pWrap != NULL && pWrap->IsWrapperActive();
267 void GCToEEInterface::GcBeforeBGCSweepWork()
277 // Validate byrefs pinned by IL stubs since the last GC.
278 StubHelpers::ProcessByrefValidationList();
279 #endif // VERIFY_HEAP
282 void GCToEEInterface::SyncBlockCacheDemote(int max_gen)
291 SyncBlockCache::GetSyncBlockCache()->GCDone(TRUE, max_gen);
294 void GCToEEInterface::SyncBlockCachePromotionsGranted(int max_gen)
303 SyncBlockCache::GetSyncBlockCache()->GCDone(FALSE, max_gen);
306 uint32_t GCToEEInterface::GetActiveSyncBlockCount()
315 return SyncBlockCache::GetSyncBlockCache()->GetActiveCount();
318 gc_alloc_context * GCToEEInterface::GetAllocContext()
322 Thread* pThread = ::GetThread();
328 return pThread->GetAllocContext();
331 void GCToEEInterface::GcEnumAllocContexts(enum_alloc_context_func* fn, void* param)
340 if (GCHeapUtilities::UseThreadAllocationContexts())
342 Thread * pThread = NULL;
343 while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
345 fn(pThread->GetAllocContext(), param);
350 fn(&g_global_alloc_context, param);
355 uint8_t* GCToEEInterface::GetLoaderAllocatorObjectForGC(Object* pObject)
364 return pObject->GetGCSafeMethodTable()->GetLoaderAllocatorObjectForGC();
367 bool GCToEEInterface::IsPreemptiveGCDisabled()
371 Thread* pThread = ::GetThread();
374 return !!pThread->PreemptiveGCDisabled();
380 bool GCToEEInterface::EnablePreemptiveGC()
384 bool bToggleGC = false;
385 Thread* pThread = ::GetThread();
389 bToggleGC = !!pThread->PreemptiveGCDisabled();
392 pThread->EnablePreemptiveGC();
399 void GCToEEInterface::DisablePreemptiveGC()
403 Thread* pThread = ::GetThread();
406 pThread->DisablePreemptiveGC();
410 Thread* GCToEEInterface::GetThread()
414 return ::GetThread();
417 struct BackgroundThreadStubArgs
420 GCBackgroundThreadFunction threadStart;
422 CLREvent threadStartedEvent;
426 DWORD WINAPI BackgroundThreadStub(void* arg)
428 BackgroundThreadStubArgs* stubArgs = (BackgroundThreadStubArgs*)arg;
429 assert (stubArgs->thread != NULL);
431 ClrFlsSetThreadType (ThreadType_GC);
432 stubArgs->thread->SetGCSpecial(true);
433 STRESS_LOG_RESERVE_MEM (GC_STRESSLOG_MULTIPLY);
435 stubArgs->hasStarted = !!stubArgs->thread->HasStarted(FALSE);
437 Thread* thread = stubArgs->thread;
438 GCBackgroundThreadFunction realThreadStart = stubArgs->threadStart;
439 void* realThreadArg = stubArgs->arg;
440 bool hasStarted = stubArgs->hasStarted;
442 stubArgs->threadStartedEvent.Set();
443 // The stubArgs cannot be used once the event is set, since that releases wait on the
444 // event in the function that created this thread and the stubArgs go out of scope.
450 result = realThreadStart(realThreadArg);
451 DestroyThread(thread);
461 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
462 inline BOOL ShouldTrackMovementForProfilerOrEtw()
465 if (CORProfilerTrackGC())
469 #ifdef FEATURE_EVENT_TRACE
470 if (ETW::GCLog::ShouldTrackMovementForEtw())
476 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
478 void ProfScanRootsHelper(Object** ppObject, ScanContext *pSC, uint32_t dwFlags)
480 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
481 Object *pObj = *ppObject;
482 if (dwFlags & GC_CALL_INTERIOR)
484 pObj = GCHeapUtilities::GetGCHeap()->GetContainingObject(pObj, true);
488 ScanRootsHelper(pObj, ppObject, pSC, dwFlags);
489 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
492 // TODO - at some point we would like to completely decouple profiling
493 // from ETW tracing using a pattern similar to this, where the
494 // ProfilingScanContext has flags about whether or not certain things
495 // should be tracked, and each one of these ProfilerShouldXYZ functions
496 // will check these flags and determine what to do based upon that.
497 // GCProfileWalkHeapWorker can, in turn, call those methods without fear
498 // of things being ifdef'd out.
500 // Returns TRUE if GC profiling is enabled and the profiler
501 // should scan dependent handles, FALSE otherwise.
502 BOOL ProfilerShouldTrackConditionalWeakTableElements()
504 #if defined(GC_PROFILING)
505 return CORProfilerTrackConditionalWeakTableElements();
508 #endif // defined (GC_PROFILING)
511 // If GC profiling is enabled, informs the profiler that we are done
512 // tracing dependent handles.
513 void ProfilerEndConditionalWeakTableElementReferences(void* heapId)
515 #if defined (GC_PROFILING)
516 g_profControlBlock.pProfInterface->EndConditionalWeakTableElementReferences(heapId);
518 UNREFERENCED_PARAMETER(heapId);
519 #endif // defined (GC_PROFILING)
522 // If GC profiling is enabled, informs the profiler that we are done
523 // tracing root references.
524 void ProfilerEndRootReferences2(void* heapId)
526 #if defined (GC_PROFILING)
527 g_profControlBlock.pProfInterface->EndRootReferences2(heapId);
529 UNREFERENCED_PARAMETER(heapId);
530 #endif // defined (GC_PROFILING)
533 void GcScanRootsForProfilerAndETW(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
535 Thread* pThread = NULL;
536 while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
538 sc->thread_under_crawl = pThread;
539 #ifdef FEATURE_EVENT_TRACE
540 sc->dwEtwRootKind = kEtwGCRootKindStack;
541 #endif // FEATURE_EVENT_TRACE
542 ScanStackRoots(pThread, fn, sc);
543 #ifdef FEATURE_EVENT_TRACE
544 sc->dwEtwRootKind = kEtwGCRootKindOther;
545 #endif // FEATURE_EVENT_TRACE
549 void ScanHandleForProfilerAndETW(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, bool isDependent)
551 ProfilingScanContext* pSC = (ProfilingScanContext*)context;
554 // Give the profiler the objectref.
555 if (pSC->fProfilerPinned)
559 BEGIN_PIN_PROFILER(CORProfilerTrackGC());
560 g_profControlBlock.pProfInterface->RootReference2(
562 kEtwGCRootKindHandle,
563 (EtwGCRootFlags)flags,
570 BEGIN_PIN_PROFILER(CORProfilerTrackConditionalWeakTableElements());
571 g_profControlBlock.pProfInterface->ConditionalWeakTableElementReference(
579 #endif // GC_PROFILING
581 #if defined(FEATURE_EVENT_TRACE)
582 // Notify ETW of the handle
583 if (ETW::GCLog::ShouldWalkHeapRootsForEtw())
585 ETW::GCLog::RootReference(
587 *pRef, // object being rooted
588 pSec, // pSecondaryNodeForDependentHandle
592 flags); // ETW handle flags
594 #endif // defined(FEATURE_EVENT_TRACE)
597 // This is called only if we've determined that either:
598 // a) The Profiling API wants to do a walk of the heap, and it has pinned the
599 // profiler in place (so it cannot be detached), and it's thus safe to call into the
601 // b) ETW infrastructure wants to do a walk of the heap either to log roots,
603 // This can also be called to do a single walk for BOTH a) and b) simultaneously. Since
604 // ETW can ask for roots, but not objects
605 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
606 void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw)
609 ProfilingScanContext SC(fProfilerPinned);
610 unsigned max_generation = GCHeapUtilities::GetGCHeap()->GetMaxGeneration();
612 // **** Scan roots: Only scan roots if profiling API wants them or ETW wants them.
613 if (fProfilerPinned || fShouldWalkHeapRootsForEtw)
615 GcScanRootsForProfilerAndETW(&ProfScanRootsHelper, max_generation, max_generation, &SC);
616 SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
617 GCHeapUtilities::GetGCHeap()->DiagScanFinalizeQueue(&ProfScanRootsHelper, &SC);
619 // Handles are kept independent of wks/svr/concurrent builds
620 SC.dwEtwRootKind = kEtwGCRootKindHandle;
621 GCHeapUtilities::GetGCHeap()->DiagScanHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
623 // indicate that regular handle scanning is over, so we can flush the buffered roots
624 // to the profiler. (This is for profapi only. ETW will flush after the
625 // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
628 ProfilerEndRootReferences2(&SC.pHeapId);
632 // **** Scan dependent handles: only if the profiler supports it or ETW wants roots
633 if ((fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements()) ||
634 fShouldWalkHeapRootsForEtw)
636 // GcScanDependentHandlesForProfiler double-checks
637 // CORProfilerTrackConditionalWeakTableElements() before calling into the profiler
639 ProfilingScanContext* pSC = &SC;
641 // we'll re-use pHeapId (which was either unused (0) or freed by EndRootReferences2
642 // (-1)), so reset it to NULL
643 _ASSERTE((*((size_t *)(&pSC->pHeapId)) == (size_t)(-1)) ||
644 (*((size_t *)(&pSC->pHeapId)) == (size_t)(0)));
647 GCHeapUtilities::GetGCHeap()->DiagScanDependentHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
649 // indicate that dependent handle scanning is over, so we can flush the buffered roots
650 // to the profiler. (This is for profapi only. ETW will flush after the
651 // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
652 if (fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements())
654 ProfilerEndConditionalWeakTableElementReferences(&SC.pHeapId);
658 ProfilerWalkHeapContext profilerWalkHeapContext(fProfilerPinned, SC.pvEtwContext);
660 // **** Walk objects on heap: only if profiling API wants them or ETW wants them.
661 if (fProfilerPinned || fShouldWalkHeapObjectsForEtw)
663 GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, true /* walk the large object heap */);
666 #ifdef FEATURE_EVENT_TRACE
667 // **** Done! Indicate to ETW helpers that the heap walk is done, so any buffers
668 // should be flushed into the ETW stream
669 if (fShouldWalkHeapObjectsForEtw || fShouldWalkHeapRootsForEtw)
671 ETW::GCLog::EndHeapDump(&profilerWalkHeapContext);
673 #endif // FEATURE_EVENT_TRACE
676 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
678 void GCProfileWalkHeap()
680 BOOL fWalkedHeapForProfiler = FALSE;
682 #ifdef FEATURE_EVENT_TRACE
683 if (ETW::GCLog::ShouldWalkStaticsAndCOMForEtw())
684 ETW::GCLog::WalkStaticsAndCOMForETW();
686 BOOL fShouldWalkHeapRootsForEtw = ETW::GCLog::ShouldWalkHeapRootsForEtw();
687 BOOL fShouldWalkHeapObjectsForEtw = ETW::GCLog::ShouldWalkHeapObjectsForEtw();
688 #else // !FEATURE_EVENT_TRACE
689 BOOL fShouldWalkHeapRootsForEtw = FALSE;
690 BOOL fShouldWalkHeapObjectsForEtw = FALSE;
691 #endif // FEATURE_EVENT_TRACE
693 #if defined (GC_PROFILING)
695 BEGIN_PIN_PROFILER(CORProfilerTrackGC());
696 GCProfileWalkHeapWorker(TRUE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
697 fWalkedHeapForProfiler = TRUE;
700 #endif // defined (GC_PROFILING)
702 #if defined (GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
703 // we need to walk the heap if one of GC_PROFILING or FEATURE_EVENT_TRACE
704 // is defined, since both of them make use of the walk heap worker.
705 if (!fWalkedHeapForProfiler &&
706 (fShouldWalkHeapRootsForEtw || fShouldWalkHeapObjectsForEtw))
708 GCProfileWalkHeapWorker(FALSE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
710 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
713 void WalkFReachableObjects(bool isCritical, void* objectID)
715 g_profControlBlock.pProfInterface->FinalizeableObjectQueued(isCritical, (ObjectID)objectID);
718 static fq_walk_fn g_FQWalkFn = &WalkFReachableObjects;
720 void GCToEEInterface::DiagGCStart(int gen, bool isInduced)
723 DiagUpdateGenerationBounds();
724 GarbageCollectionStartedCallback(gen, isInduced);
726 BEGIN_PIN_PROFILER(CORProfilerTrackGC());
729 // When we're walking objects allocated by class, then we don't want to walk the large
730 // object heap because then it would count things that may have been around for a while.
731 GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&AllocByClassHelper, (void *)&context, 0, false);
733 // Notify that we've reached the end of the Gen 0 scan
734 g_profControlBlock.pProfInterface->EndAllocByClass(&context);
738 #endif // GC_PROFILING
741 void GCToEEInterface::DiagUpdateGenerationBounds()
744 if (CORProfilerTrackGC())
745 UpdateGenerationBounds();
746 #endif // GC_PROFILING
749 void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent)
755 DiagUpdateGenerationBounds();
756 GarbageCollectionFinishedCallback();
758 #endif // GC_PROFILING
761 void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext)
764 if (CORProfilerTrackGC())
766 BEGIN_PIN_PROFILER(CORProfilerPresent());
767 GCHeapUtilities::GetGCHeap()->DiagWalkFinalizeQueue(gcContext, g_FQWalkFn);
770 #endif //GC_PROFILING
773 // Note on last parameter: when calling this for bgc, only ETW
774 // should be sending these events so that existing profapi profilers
775 // don't get confused.
776 void WalkMovedReferences(uint8_t* begin, uint8_t* end,
782 ETW::GCLog::MovedReference(begin, end,
783 (fCompacting ? reloc : 0),
789 void GCToEEInterface::DiagWalkSurvivors(void* gcContext)
791 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
792 if (ShouldTrackMovementForProfilerOrEtw())
795 ETW::GCLog::BeginMovedReferences(&context);
796 GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_gc);
797 ETW::GCLog::EndMovedReferences(context);
799 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
802 void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext)
804 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
805 if (ShouldTrackMovementForProfilerOrEtw())
808 ETW::GCLog::BeginMovedReferences(&context);
809 GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_loh);
810 ETW::GCLog::EndMovedReferences(context);
812 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
815 void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext)
817 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
818 if (ShouldTrackMovementForProfilerOrEtw())
821 ETW::GCLog::BeginMovedReferences(&context);
822 GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_bgc);
823 ETW::GCLog::EndMovedReferences(context);
825 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
828 void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args)
830 int stompWBCompleteActions = SWB_PASS;
831 bool is_runtime_suspended = false;
833 assert(args != nullptr);
834 switch (args->operation)
836 case WriteBarrierOp::StompResize:
837 // StompResize requires a new card table, a new lowest address, and
838 // a new highest address
839 assert(args->card_table != nullptr);
840 assert(args->lowest_address != nullptr);
841 assert(args->highest_address != nullptr);
843 g_card_table = args->card_table;
845 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
846 assert(args->card_bundle_table != nullptr);
847 g_card_bundle_table = args->card_bundle_table;
850 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
851 if (g_sw_ww_enabled_for_gc_heap && (args->write_watch_table != nullptr))
853 assert(args->is_runtime_suspended);
854 g_sw_ww_table = args->write_watch_table;
856 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
858 stompWBCompleteActions |= ::StompWriteBarrierResize(args->is_runtime_suspended, args->requires_upper_bounds_check);
860 // We need to make sure that other threads executing checked write barriers
861 // will see the g_card_table update before g_lowest/highest_address updates.
862 // Otherwise, the checked write barrier may AV accessing the old card table
863 // with address that it does not cover.
865 // Even x86's total store ordering is insufficient here because threads reading
866 // g_card_table do so via the instruction cache, whereas g_lowest/highest_address
867 // are read via the data cache.
869 // The g_card_table update is covered by section 8.1.3 of the Intel Software
870 // Development Manual, Volume 3A (System Programming Guide, Part 1), about
871 // "cross-modifying code": We need all _executing_ threads to invalidate
872 // their instruction cache, which FlushProcessWriteBuffers achieves by sending
873 // an IPI (inter-process interrupt).
875 if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
877 // flushing icache on current processor (thread)
878 ::FlushWriteBarrierInstructionCache();
879 // asking other processors (threads) to invalidate their icache
880 FlushProcessWriteBuffers();
883 g_lowest_address = args->lowest_address;
884 VolatileStore(&g_highest_address, args->highest_address);
886 #if defined(_ARM64_) || defined(_ARM_)
887 // Need to reupdate for changes to g_highest_address g_lowest_address
888 is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended;
889 stompWBCompleteActions |= ::StompWriteBarrierResize(is_runtime_suspended, args->requires_upper_bounds_check);
892 if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
894 ::FlushWriteBarrierInstructionCache();
898 is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended;
899 if(!is_runtime_suspended)
901 // If runtime is not suspended, force updated state to be visible to all threads
905 if (stompWBCompleteActions & SWB_EE_RESTART)
907 assert(!args->is_runtime_suspended &&
908 "if runtime was suspended in patching routines then it was in running state at begining");
909 ThreadSuspend::RestartEE(FALSE, TRUE);
911 return; // unlike other branches we have already done cleanup so bailing out here
912 case WriteBarrierOp::StompEphemeral:
913 // StompEphemeral requires a new ephemeral low and a new ephemeral high
914 assert(args->ephemeral_low != nullptr);
915 assert(args->ephemeral_high != nullptr);
916 g_ephemeral_low = args->ephemeral_low;
917 g_ephemeral_high = args->ephemeral_high;
918 stompWBCompleteActions |= ::StompWriteBarrierEphemeral(args->is_runtime_suspended);
920 case WriteBarrierOp::Initialize:
921 // This operation should only be invoked once, upon initialization.
922 assert(g_card_table == nullptr);
923 assert(g_lowest_address == nullptr);
924 assert(g_highest_address == nullptr);
925 assert(args->card_table != nullptr);
926 assert(args->lowest_address != nullptr);
927 assert(args->highest_address != nullptr);
928 assert(args->ephemeral_low != nullptr);
929 assert(args->ephemeral_high != nullptr);
930 assert(args->is_runtime_suspended && "the runtime must be suspended here!");
931 assert(!args->requires_upper_bounds_check && "the ephemeral generation must be at the top of the heap!");
933 g_card_table = args->card_table;
935 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
936 assert(g_card_bundle_table == nullptr);
937 g_card_bundle_table = args->card_bundle_table;
940 g_lowest_address = args->lowest_address;
941 g_highest_address = args->highest_address;
942 stompWBCompleteActions |= ::StompWriteBarrierResize(true, false);
944 // StompWriteBarrierResize does not necessarily bash g_ephemeral_low
945 // usages, so we must do so here. This is particularly true on x86,
946 // where StompWriteBarrierResize will not bash g_ephemeral_low when
947 // called with the parameters (true, false), as it is above.
948 g_ephemeral_low = args->ephemeral_low;
949 g_ephemeral_high = args->ephemeral_high;
950 stompWBCompleteActions |= ::StompWriteBarrierEphemeral(true);
952 case WriteBarrierOp::SwitchToWriteWatch:
953 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
954 assert(args->write_watch_table != nullptr);
955 assert(args->is_runtime_suspended && "the runtime must be suspended here!");
956 g_sw_ww_table = args->write_watch_table;
957 g_sw_ww_enabled_for_gc_heap = true;
958 stompWBCompleteActions |= ::SwitchToWriteWatchBarrier(true);
960 assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP");
961 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
963 case WriteBarrierOp::SwitchToNonWriteWatch:
964 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
965 assert(args->is_runtime_suspended && "the runtime must be suspended here!");
967 g_sw_ww_enabled_for_gc_heap = false;
968 stompWBCompleteActions |= ::SwitchToNonWriteWatchBarrier(true);
970 assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP");
971 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
974 assert(!"unknown WriteBarrierOp enum");
976 if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
978 ::FlushWriteBarrierInstructionCache();
980 if (stompWBCompleteActions & SWB_EE_RESTART)
982 assert(!args->is_runtime_suspended &&
983 "if runtime was suspended in patching routines then it was in running state at begining");
984 ThreadSuspend::RestartEE(FALSE, TRUE);
988 void GCToEEInterface::EnableFinalization(bool foundFinalizers)
990 if (foundFinalizers || FinalizerThread::HaveExtraWorkForFinalizer())
992 FinalizerThread::EnableFinalization();
996 void GCToEEInterface::HandleFatalError(unsigned int exitCode)
998 EEPOLICY_HANDLE_FATAL_ERROR(exitCode);
1001 bool GCToEEInterface::ShouldFinalizeObjectForUnload(void* pDomain, Object* obj)
1003 // CoreCLR does not have appdomains, so this code path is dead. Other runtimes may
1004 // choose to inspect the object being finalized here.
1005 // [DESKTOP TODO] Desktop looks for "agile and finalizable" objects and may choose
1006 // to move them to a new app domain instead of finalizing them here.
1010 bool GCToEEInterface::EagerFinalized(Object* obj)
1012 MethodTable* pMT = obj->GetGCSafeMethodTable();
1013 if (pMT == pWeakReferenceMT ||
1014 pMT->GetCanonicalMethodTable() == pWeakReferenceOfTCanonMT)
1016 FinalizeWeakReference(obj);
1023 MethodTable* GCToEEInterface::GetFreeObjectMethodTable()
1025 assert(g_pFreeObjectMethodTable != nullptr);
1026 return g_pFreeObjectMethodTable;
1029 // These are arbitrary, we shouldn't ever be having confrig keys or values
1030 // longer than these lengths.
1031 const size_t MaxConfigKeyLength = 255;
1032 const size_t MaxConfigValueLength = 255;
1034 bool GCToEEInterface::GetBooleanConfigValue(const char* key, bool* value)
1041 // these configuration values are given to us via startup flags.
1042 if (strcmp(key, "gcServer") == 0)
1044 *value = g_heap_type == GC_HEAP_SVR;
1048 if (strcmp(key, "gcConcurrent") == 0)
1050 *value = !!g_pConfig->GetGCconcurrent();
1054 if (strcmp(key, "GCRetainVM") == 0)
1056 *value = !!g_pConfig->GetGCRetainVM();
1060 WCHAR configKey[MaxConfigKeyLength];
1061 if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1063 // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1067 // otherwise, ask the config subsystem.
1068 if (CLRConfig::IsConfigOptionSpecified(configKey))
1070 CLRConfig::ConfigDWORDInfo info { configKey , 0, CLRConfig::EEConfig_default };
1071 *value = CLRConfig::GetConfigValue(info) != 0;
1078 bool GCToEEInterface::GetIntConfigValue(const char* key, int64_t* value)
1085 WCHAR configKey[MaxConfigKeyLength];
1086 if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1088 // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1092 if (CLRConfig::IsConfigOptionSpecified(configKey))
1094 CLRConfig::ConfigDWORDInfo info { configKey , 0, CLRConfig::EEConfig_default };
1095 *value = CLRConfig::GetConfigValue(info);
1102 bool GCToEEInterface::GetStringConfigValue(const char* key, const char** value)
1109 WCHAR configKey[MaxConfigKeyLength];
1110 if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1112 // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1116 CLRConfig::ConfigStringInfo info { configKey, CLRConfig::EEConfig_default };
1117 LPWSTR out = CLRConfig::GetConfigValue(info);
1124 // not allocated on the stack since it escapes this function
1125 AStringHolder configResult = new (nothrow) char[MaxConfigValueLength];
1128 CLRConfig::FreeConfigString(out);
1132 if (WideCharToMultiByte(CP_ACP, 0, out, -1 /* out is null-terminated */,
1133 configResult.GetValue(), MaxConfigKeyLength, nullptr, nullptr) == 0)
1135 // this should only happen if the config subsystem gives us a string that's not valid
1137 CLRConfig::FreeConfigString(out);
1141 *value = configResult.Extract();
1142 CLRConfig::FreeConfigString(out);
1146 void GCToEEInterface::FreeStringConfigValue(const char* value)
1151 bool GCToEEInterface::IsGCThread()
1153 return !!::IsGCThread();
1156 bool GCToEEInterface::WasCurrentThreadCreatedByGC()
1158 return !!::IsGCSpecialThread();
1161 struct SuspendableThreadStubArguments
1164 void (*ThreadStart)(void*);
1167 CLREvent ThreadStartedEvent;
1170 struct ThreadStubArguments
1173 void (*ThreadStart)(void*);
1176 CLREvent ThreadStartedEvent;
1181 const size_t MaxThreadNameSize = 255;
1183 bool CreateSuspendableThread(
1184 void (*threadStart)(void*),
1186 const wchar_t* name)
1188 LIMITED_METHOD_CONTRACT;
1190 SuspendableThreadStubArguments args;
1191 args.Argument = argument;
1192 args.ThreadStart = threadStart;
1193 args.Thread = nullptr;
1194 args.HasStarted = false;
1195 if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE))
1202 args.Thread = SetupUnstartedThread(FALSE);
1207 EX_END_CATCH(SwallowAllExceptions)
1211 args.ThreadStartedEvent.CloseEvent();
1215 auto threadStub = [](void* argument) -> DWORD
1217 SuspendableThreadStubArguments* args = static_cast<SuspendableThreadStubArguments*>(argument);
1218 assert(args != nullptr);
1220 ClrFlsSetThreadType(ThreadType_GC);
1221 args->Thread->SetGCSpecial(true);
1222 STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY);
1223 args->HasStarted = !!args->Thread->HasStarted(false);
1225 Thread* thread = args->Thread;
1226 auto threadStart = args->ThreadStart;
1227 void* threadArgument = args->Argument;
1228 bool hasStarted = args->HasStarted;
1229 args->ThreadStartedEvent.Set();
1231 // The stubArgs cannot be used once the event is set, since that releases wait on the
1232 // event in the function that created this thread and the stubArgs go out of scope.
1235 threadStart(threadArgument);
1236 DestroyThread(thread);
1241 if (!args.Thread->CreateNewThread(0, threadStub, &args, name))
1243 args.Thread->DecExternalCount(FALSE);
1244 args.ThreadStartedEvent.CloseEvent();
1248 args.Thread->SetBackground(TRUE, FALSE);
1249 args.Thread->StartThread();
1251 // Wait for the thread to be in its main loop
1252 uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE);
1253 args.ThreadStartedEvent.CloseEvent();
1254 _ASSERTE(res == WAIT_OBJECT_0);
1256 if (!args.HasStarted)
1258 // The thread has failed to start and the Thread object was destroyed in the Thread::HasStarted
1259 // failure code path.
1266 bool CreateNonSuspendableThread(
1267 void (*threadStart)(void*),
1269 const wchar_t* name)
1271 LIMITED_METHOD_CONTRACT;
1273 ThreadStubArguments args;
1274 args.Argument = argument;
1275 args.ThreadStart = threadStart;
1276 args.Thread = INVALID_HANDLE_VALUE;
1277 if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE))
1282 auto threadStub = [](void* argument) -> DWORD
1284 ThreadStubArguments* args = static_cast<ThreadStubArguments*>(argument);
1285 assert(args != nullptr);
1287 ClrFlsSetThreadType(ThreadType_GC);
1288 STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY);
1290 args->HasStarted = true;
1291 auto threadStart = args->ThreadStart;
1292 void* threadArgument = args->Argument;
1293 args->ThreadStartedEvent.Set();
1295 // The stub args cannot be used once the event is set, since that releases wait on the
1296 // event in the function that created this thread and the stubArgs go out of scope.
1297 threadStart(threadArgument);
1301 args.Thread = Thread::CreateUtilityThread(Thread::StackSize_Medium, threadStub, &args, name);
1302 if (args.Thread == INVALID_HANDLE_VALUE)
1304 args.ThreadStartedEvent.CloseEvent();
1308 // Wait for the thread to be in its main loop
1309 uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE);
1310 args.ThreadStartedEvent.CloseEvent();
1311 _ASSERTE(res == WAIT_OBJECT_0);
1313 CloseHandle(args.Thread);
1316 } // anonymous namespace
1318 bool GCToEEInterface::CreateThread(void (*threadStart)(void*), void* arg, bool is_suspendable, const char* name)
1320 InlineSString<MaxThreadNameSize> wideName;
1321 const WCHAR* namePtr = nullptr;
1324 if (name != nullptr)
1326 wideName.SetUTF8(name);
1327 namePtr = wideName.GetUnicode();
1332 // we're not obligated to provide a name - if it's not valid,
1333 // just report nullptr as the name.
1335 EX_END_CATCH(SwallowAllExceptions)
1337 LIMITED_METHOD_CONTRACT;
1340 return CreateSuspendableThread(threadStart, arg, namePtr);
1344 return CreateNonSuspendableThread(threadStart, arg, namePtr);
1348 void GCToEEInterface::WalkAsyncPinnedForPromotion(Object* object, ScanContext* sc, promote_func* callback)
1350 LIMITED_METHOD_CONTRACT;
1352 assert(object != nullptr);
1353 assert(sc != nullptr);
1354 assert(callback != nullptr);
1355 if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass)
1357 // not an overlapped data object - nothing to do.
1361 // reporting the pinned user objects
1362 OverlappedDataObject *pOverlapped = (OverlappedDataObject *)object;
1363 if (pOverlapped->m_userObject != NULL)
1365 if (pOverlapped->m_userObject->GetGCSafeMethodTable() == g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT]->GetMethodTable())
1367 // OverlappedDataObject is very special. An async pin handle keeps it alive.
1368 // During GC, we also make sure
1369 // 1. m_userObject itself does not move if m_userObject is not array
1370 // 2. Every object pointed by m_userObject does not move if m_userObject is array
1371 // We do not want to pin m_userObject if it is array.
1372 ArrayBase* pUserObject = (ArrayBase*)OBJECTREFToObject(pOverlapped->m_userObject);
1373 Object **ppObj = (Object**)pUserObject->GetDataPtr(TRUE);
1374 size_t num = pUserObject->GetNumComponents();
1375 for (size_t i = 0; i < num; i++)
1377 callback(ppObj + i, sc, GC_CALL_PINNED);
1382 callback(&OBJECTREF_TO_UNCHECKED_OBJECTREF(pOverlapped->m_userObject), (ScanContext *)sc, GC_CALL_PINNED);
1387 void GCToEEInterface::WalkAsyncPinned(Object* object, void* context, void (*callback)(Object*, Object*, void*))
1389 LIMITED_METHOD_CONTRACT;
1391 assert(object != nullptr);
1392 assert(callback != nullptr);
1394 if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass)
1399 OverlappedDataObject *pOverlapped = (OverlappedDataObject *)(object);
1400 if (pOverlapped->m_userObject != NULL)
1402 Object * pUserObject = OBJECTREFToObject(pOverlapped->m_userObject);
1403 callback(object, pUserObject, context);
1404 if (pOverlapped->m_userObject->GetGCSafeMethodTable() == g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT]->GetMethodTable())
1406 ArrayBase* pUserArrayObject = (ArrayBase*)pUserObject;
1407 Object **pObj = (Object**)pUserArrayObject->GetDataPtr(TRUE);
1408 size_t num = pUserArrayObject->GetNumComponents();
1409 for (size_t i = 0; i < num; i ++)
1411 callback(pUserObject, pObj[i], context);
1417 IGCToCLREventSink* GCToEEInterface::EventSink()
1419 LIMITED_METHOD_CONTRACT;
1421 return &g_gcToClrEventSink;
1424 uint32_t GCToEEInterface::GetDefaultDomainIndex()
1426 LIMITED_METHOD_CONTRACT;
1428 return SystemDomain::System()->DefaultDomain()->GetIndex().m_dwIndex;
1431 void *GCToEEInterface::GetAppDomainAtIndex(uint32_t appDomainIndex)
1433 LIMITED_METHOD_CONTRACT;
1435 ADIndex index(appDomainIndex);
1436 return static_cast<void *>(SystemDomain::GetAppDomainAtIndex(index));
1439 bool GCToEEInterface::AppDomainCanAccessHandleTable(uint32_t appDomainID)
1441 LIMITED_METHOD_CONTRACT;
1443 ADIndex index(appDomainID);
1444 AppDomain *pDomain = SystemDomain::GetAppDomainAtIndex(index);
1445 return (pDomain != NULL);
1448 uint32_t GCToEEInterface::GetIndexOfAppDomainBeingUnloaded()
1450 LIMITED_METHOD_CONTRACT;
1455 uint32_t GCToEEInterface::GetTotalNumSizedRefHandles()
1457 LIMITED_METHOD_CONTRACT;
1459 return SystemDomain::System()->GetTotalNumSizedRefHandles();
1463 bool GCToEEInterface::AppDomainIsRudeUnload(void *appDomain)
1465 LIMITED_METHOD_CONTRACT;
1470 bool GCToEEInterface::AnalyzeSurvivorsRequested(int condemnedGeneration)
1472 LIMITED_METHOD_CONTRACT;
1474 // Is the list active?
1475 GcNotifications gn(g_pGcNotificationTable);
1478 GcEvtArgs gea = { GC_MARK_END, { (1<<condemnedGeneration) } };
1479 if (gn.GetNotification(gea) != 0)
1488 void GCToEEInterface::AnalyzeSurvivorsFinished(int condemnedGeneration)
1490 LIMITED_METHOD_CONTRACT;
1492 // Is the list active?
1493 GcNotifications gn(g_pGcNotificationTable);
1496 GcEvtArgs gea = { GC_MARK_END, { (1<<condemnedGeneration) } };
1497 if (gn.GetNotification(gea) != 0)
1499 DACNotify::DoGCNotification(gea);