1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
8 * GCToEEInterface implementation
14 void GCToEEInterface::SuspendEE(SUSPEND_REASON reason)
18 static_assert_no_msg(SUSPEND_FOR_GC == (int)ThreadSuspend::SUSPEND_FOR_GC);
19 static_assert_no_msg(SUSPEND_FOR_GC_PREP == (int)ThreadSuspend::SUSPEND_FOR_GC_PREP);
21 _ASSERTE(reason == SUSPEND_FOR_GC || reason == SUSPEND_FOR_GC_PREP);
23 ThreadSuspend::SuspendEE((ThreadSuspend::SUSPEND_REASON)reason);
26 void GCToEEInterface::RestartEE(bool bFinishedGC)
30 ThreadSuspend::RestartEE(bFinishedGC, TRUE);
33 VOID GCToEEInterface::SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2)
42 SyncBlockCache::GetSyncBlockCache()->GCWeakPtrScan(scanProc, lp1, lp2);
46 //EE can perform post stack scanning action, while the
47 // user threads are still suspended
48 VOID GCToEEInterface::AfterGcScanRoots (int condemned, int max_gen,
58 #ifdef FEATURE_COMINTEROP
59 // Go through all the app domains and for each one detach all the *unmarked* RCWs to prevent
60 // the RCW cache from resurrecting them.
61 UnsafeAppDomainIterator i(TRUE);
66 i.GetDomain()->DetachRCWs();
68 #endif // FEATURE_COMINTEROP
72 * Scan all stack roots
75 static void ScanStackRoots(Thread * pThread, promote_func* fn, ScanContext* sc)
83 ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
85 // Either we are in a concurrent situation (in which case the thread is unknown to
86 // us), or we are performing a synchronous GC and we are the GC thread, holding
87 // the threadstore lock.
89 _ASSERTE(dbgOnly_IsSpecialEEThread() ||
90 GetThread() == NULL ||
91 // this is for background GC threads which always call this when EE is suspended.
92 IsGCSpecialThread() ||
93 (GetThread() == ThreadSuspend::GetSuspensionThread() && ThreadStore::HoldingThreadStore()));
95 pThread->SetHasPromotedBytes();
97 Frame* pTopFrame = pThread->GetFrame();
98 Object ** topStack = (Object **)pTopFrame;
99 if ((pTopFrame != ((Frame*)-1))
100 && (pTopFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr())) {
101 // It is an InlinedCallFrame. Get SP from it.
102 InlinedCallFrame* pInlinedFrame = (InlinedCallFrame*)pTopFrame;
103 topStack = (Object **)pInlinedFrame->GetCallSiteSP();
106 sc->stack_limit = (uintptr_t)topStack;
108 #ifdef FEATURE_CONSERVATIVE_GC
109 if (g_pConfig->GetGCConservative())
111 // Conservative stack root reporting
112 // We will treat everything on stack as a pinned interior GC pointer
113 // Since we report every thing as pinned, we don't need to run following code for relocation phase.
116 Object ** bottomStack = (Object **) pThread->GetCachedStackBase();
118 for (walk = topStack; walk < bottomStack; walk ++)
120 if (((void*)*walk > (void*)bottomStack || (void*)*walk < (void*)topStack) &&
121 ((void*)*walk >= (void*)g_lowest_address && (void*)*walk <= (void*)g_highest_address)
124 //DbgPrintf("promote " FMT_ADDR " : " FMT_ADDR "\n", walk, *walk);
125 fn(walk, sc, GC_CALL_INTERIOR|GC_CALL_PINNED);
130 // Also ask the explicit Frames to report any references they might know about.
131 // Generally these will be a subset of the objects reported below but there's
132 // nothing that guarantees that and in the specific case of a GC protect frame the
133 // references it protects may live at a lower address than the frame itself (and
134 // thus escape the stack range we scanned above).
135 Frame *pFrame = pThread->GetFrame();
136 while (pFrame != FRAME_TOP)
138 pFrame->GcScanRoots(fn, sc);
139 pFrame = pFrame->PtrNextFrame();
145 unsigned flagsStackWalk = ALLOW_ASYNC_STACK_WALK | ALLOW_INVALID_OBJECTS;
146 #if defined(WIN64EXCEPTIONS)
147 flagsStackWalk |= GC_FUNCLET_REFERENCE_REPORTING;
148 #endif // defined(WIN64EXCEPTIONS)
149 pThread->StackWalkFrames( GcStackCrawlCallBack, &gcctx, flagsStackWalk);
153 void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
155 STRESS_LOG1(LF_GCROOTS, LL_INFO10, "GCScan: Promotion Phase = %d\n", sc->promotion);
157 // In server GC, we should be competing for marking the statics
158 if (GCHeapUtilities::MarkShouldCompeteForStatics())
160 if (condemned == max_gen && sc->promotion)
162 SystemDomain::EnumAllStaticGCRefs(fn, sc);
166 Thread* pThread = NULL;
167 while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
169 STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "{ Starting scan of Thread %p ID = %x\n", pThread, pThread->GetThreadId());
171 if (GCHeapUtilities::GetGCHeap()->IsThreadUsingAllocationContextHeap(
172 pThread->GetAllocContext(), sc->thread_number))
174 sc->thread_under_crawl = pThread;
175 #ifdef FEATURE_EVENT_TRACE
176 sc->dwEtwRootKind = kEtwGCRootKindStack;
177 #endif // FEATURE_EVENT_TRACE
178 ScanStackRoots(pThread, fn, sc);
179 #ifdef FEATURE_EVENT_TRACE
180 sc->dwEtwRootKind = kEtwGCRootKindOther;
181 #endif // FEATURE_EVENT_TRACE
183 STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "Ending scan of Thread %p ID = 0x%x }\n", pThread, pThread->GetThreadId());
187 void GCToEEInterface::GcStartWork (int condemned, int max_gen)
196 // Update AppDomain stage here.
197 SystemDomain::System()->ProcessClearingDomains();
200 // Validate byrefs pinned by IL stubs since the last GC.
201 StubHelpers::ProcessByrefValidationList();
202 #endif // VERIFY_HEAP
204 ExecutionManager::CleanupCodeHeaps();
206 #ifdef FEATURE_EVENT_TRACE
207 ETW::TypeSystemLog::Cleanup();
210 #ifdef FEATURE_COMINTEROP
212 // Let GC detect managed/native cycles with input from jupiter
214 // 1. Report reference from RCW to CCW based on native reference in Jupiter
215 // 2. Identify the subset of CCWs that needs to be rooted
217 // We'll build the references from RCW to CCW using
218 // 1. Preallocated arrays
219 // 2. Dependent handles
221 RCWWalker::OnGCStarted(condemned);
222 #endif // FEATURE_COMINTEROP
224 if (condemned == max_gen)
226 ThreadStore::s_pThreadStore->OnMaxGenerationGCStarted();
230 void GCToEEInterface::GcDone(int condemned)
239 #ifdef FEATURE_COMINTEROP
241 // Tell Jupiter GC has finished
243 RCWWalker::OnGCFinished(condemned);
244 #endif // FEATURE_COMINTEROP
247 bool GCToEEInterface::RefCountedHandleCallbacks(Object * pObject)
256 #ifdef FEATURE_COMINTEROP
257 //<REVISIT_TODO>@todo optimize the access to the ref-count
258 ComCallWrapper* pWrap = ComCallWrapper::GetWrapperForObject((OBJECTREF)pObject);
259 _ASSERTE(pWrap != NULL);
261 return !!pWrap->IsWrapperActive();
267 void GCToEEInterface::GcBeforeBGCSweepWork()
277 // Validate byrefs pinned by IL stubs since the last GC.
278 StubHelpers::ProcessByrefValidationList();
279 #endif // VERIFY_HEAP
282 void GCToEEInterface::SyncBlockCacheDemote(int max_gen)
291 SyncBlockCache::GetSyncBlockCache()->GCDone(TRUE, max_gen);
294 void GCToEEInterface::SyncBlockCachePromotionsGranted(int max_gen)
303 SyncBlockCache::GetSyncBlockCache()->GCDone(FALSE, max_gen);
306 uint32_t GCToEEInterface::GetActiveSyncBlockCount()
315 return SyncBlockCache::GetSyncBlockCache()->GetActiveCount();
318 gc_alloc_context * GCToEEInterface::GetAllocContext()
322 Thread* pThread = ::GetThread();
323 assert(pThread != nullptr);
324 return pThread->GetAllocContext();
327 void GCToEEInterface::GcEnumAllocContexts(enum_alloc_context_func* fn, void* param)
336 if (GCHeapUtilities::UseThreadAllocationContexts())
338 Thread * pThread = NULL;
339 while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
341 fn(pThread->GetAllocContext(), param);
346 fn(&g_global_alloc_context, param);
351 uint8_t* GCToEEInterface::GetLoaderAllocatorObjectForGC(Object* pObject)
360 return pObject->GetMethodTable()->GetLoaderAllocatorObjectForGC();
363 bool GCToEEInterface::IsPreemptiveGCDisabled()
367 Thread* pThread = ::GetThread();
370 return !!pThread->PreemptiveGCDisabled();
376 bool GCToEEInterface::EnablePreemptiveGC()
380 bool bToggleGC = false;
381 Thread* pThread = ::GetThread();
385 bToggleGC = !!pThread->PreemptiveGCDisabled();
388 pThread->EnablePreemptiveGC();
395 void GCToEEInterface::DisablePreemptiveGC()
399 Thread* pThread = ::GetThread();
402 pThread->DisablePreemptiveGC();
406 Thread* GCToEEInterface::GetThread()
410 return ::GetThread();
413 struct BackgroundThreadStubArgs
416 GCBackgroundThreadFunction threadStart;
418 CLREvent threadStartedEvent;
422 DWORD WINAPI BackgroundThreadStub(void* arg)
424 BackgroundThreadStubArgs* stubArgs = (BackgroundThreadStubArgs*)arg;
425 assert (stubArgs->thread != NULL);
427 ClrFlsSetThreadType (ThreadType_GC);
428 stubArgs->thread->SetGCSpecial(true);
429 STRESS_LOG_RESERVE_MEM (GC_STRESSLOG_MULTIPLY);
431 stubArgs->hasStarted = !!stubArgs->thread->HasStarted(FALSE);
433 Thread* thread = stubArgs->thread;
434 GCBackgroundThreadFunction realThreadStart = stubArgs->threadStart;
435 void* realThreadArg = stubArgs->arg;
436 bool hasStarted = stubArgs->hasStarted;
438 stubArgs->threadStartedEvent.Set();
439 // The stubArgs cannot be used once the event is set, since that releases wait on the
440 // event in the function that created this thread and the stubArgs go out of scope.
446 result = realThreadStart(realThreadArg);
447 DestroyThread(thread);
457 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
458 inline BOOL ShouldTrackMovementForProfilerOrEtw()
461 if (CORProfilerTrackGC())
465 #ifdef FEATURE_EVENT_TRACE
466 if (ETW::GCLog::ShouldTrackMovementForEtw())
472 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
474 void ProfScanRootsHelper(Object** ppObject, ScanContext *pSC, uint32_t dwFlags)
476 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
477 Object *pObj = *ppObject;
478 if (dwFlags & GC_CALL_INTERIOR)
480 pObj = GCHeapUtilities::GetGCHeap()->GetContainingObject(pObj, true);
484 ScanRootsHelper(pObj, ppObject, pSC, dwFlags);
485 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
488 // TODO - at some point we would like to completely decouple profiling
489 // from ETW tracing using a pattern similar to this, where the
490 // ProfilingScanContext has flags about whether or not certain things
491 // should be tracked, and each one of these ProfilerShouldXYZ functions
492 // will check these flags and determine what to do based upon that.
493 // GCProfileWalkHeapWorker can, in turn, call those methods without fear
494 // of things being ifdef'd out.
496 // Returns TRUE if GC profiling is enabled and the profiler
497 // should scan dependent handles, FALSE otherwise.
498 BOOL ProfilerShouldTrackConditionalWeakTableElements()
500 #if defined(GC_PROFILING)
501 return CORProfilerTrackConditionalWeakTableElements();
504 #endif // defined (GC_PROFILING)
507 // If GC profiling is enabled, informs the profiler that we are done
508 // tracing dependent handles.
509 void ProfilerEndConditionalWeakTableElementReferences(void* heapId)
511 #if defined (GC_PROFILING)
512 g_profControlBlock.pProfInterface->EndConditionalWeakTableElementReferences(heapId);
514 UNREFERENCED_PARAMETER(heapId);
515 #endif // defined (GC_PROFILING)
518 // If GC profiling is enabled, informs the profiler that we are done
519 // tracing root references.
520 void ProfilerEndRootReferences2(void* heapId)
522 #if defined (GC_PROFILING)
523 g_profControlBlock.pProfInterface->EndRootReferences2(heapId);
525 UNREFERENCED_PARAMETER(heapId);
526 #endif // defined (GC_PROFILING)
529 void GcScanRootsForProfilerAndETW(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
531 Thread* pThread = NULL;
532 while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
534 sc->thread_under_crawl = pThread;
535 #ifdef FEATURE_EVENT_TRACE
536 sc->dwEtwRootKind = kEtwGCRootKindStack;
537 #endif // FEATURE_EVENT_TRACE
538 ScanStackRoots(pThread, fn, sc);
539 #ifdef FEATURE_EVENT_TRACE
540 sc->dwEtwRootKind = kEtwGCRootKindOther;
541 #endif // FEATURE_EVENT_TRACE
545 void ScanHandleForProfilerAndETW(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, bool isDependent)
547 ProfilingScanContext* pSC = (ProfilingScanContext*)context;
550 // Give the profiler the objectref.
551 if (pSC->fProfilerPinned)
555 BEGIN_PIN_PROFILER(CORProfilerTrackGC());
556 g_profControlBlock.pProfInterface->RootReference2(
558 kEtwGCRootKindHandle,
559 (EtwGCRootFlags)flags,
566 BEGIN_PIN_PROFILER(CORProfilerTrackConditionalWeakTableElements());
567 g_profControlBlock.pProfInterface->ConditionalWeakTableElementReference(
575 #endif // GC_PROFILING
577 #if defined(FEATURE_EVENT_TRACE)
578 // Notify ETW of the handle
579 if (ETW::GCLog::ShouldWalkHeapRootsForEtw())
581 ETW::GCLog::RootReference(
583 *pRef, // object being rooted
584 pSec, // pSecondaryNodeForDependentHandle
588 flags); // ETW handle flags
590 #endif // defined(FEATURE_EVENT_TRACE)
593 // This is called only if we've determined that either:
594 // a) The Profiling API wants to do a walk of the heap, and it has pinned the
595 // profiler in place (so it cannot be detached), and it's thus safe to call into the
597 // b) ETW infrastructure wants to do a walk of the heap either to log roots,
599 // This can also be called to do a single walk for BOTH a) and b) simultaneously. Since
600 // ETW can ask for roots, but not objects
601 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
602 void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw)
605 ProfilingScanContext SC(fProfilerPinned);
606 unsigned max_generation = GCHeapUtilities::GetGCHeap()->GetMaxGeneration();
608 // **** Scan roots: Only scan roots if profiling API wants them or ETW wants them.
609 if (fProfilerPinned || fShouldWalkHeapRootsForEtw)
611 GcScanRootsForProfilerAndETW(&ProfScanRootsHelper, max_generation, max_generation, &SC);
612 SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
613 GCHeapUtilities::GetGCHeap()->DiagScanFinalizeQueue(&ProfScanRootsHelper, &SC);
615 // Handles are kept independent of wks/svr/concurrent builds
616 SC.dwEtwRootKind = kEtwGCRootKindHandle;
617 GCHeapUtilities::GetGCHeap()->DiagScanHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
619 // indicate that regular handle scanning is over, so we can flush the buffered roots
620 // to the profiler. (This is for profapi only. ETW will flush after the
621 // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
624 ProfilerEndRootReferences2(&SC.pHeapId);
628 // **** Scan dependent handles: only if the profiler supports it or ETW wants roots
629 if ((fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements()) ||
630 fShouldWalkHeapRootsForEtw)
632 // GcScanDependentHandlesForProfiler double-checks
633 // CORProfilerTrackConditionalWeakTableElements() before calling into the profiler
635 ProfilingScanContext* pSC = &SC;
637 // we'll re-use pHeapId (which was either unused (0) or freed by EndRootReferences2
638 // (-1)), so reset it to NULL
639 _ASSERTE((*((size_t *)(&pSC->pHeapId)) == (size_t)(-1)) ||
640 (*((size_t *)(&pSC->pHeapId)) == (size_t)(0)));
643 GCHeapUtilities::GetGCHeap()->DiagScanDependentHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
645 // indicate that dependent handle scanning is over, so we can flush the buffered roots
646 // to the profiler. (This is for profapi only. ETW will flush after the
647 // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
648 if (fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements())
650 ProfilerEndConditionalWeakTableElementReferences(&SC.pHeapId);
654 ProfilerWalkHeapContext profilerWalkHeapContext(fProfilerPinned, SC.pvEtwContext);
656 // **** Walk objects on heap: only if profiling API wants them or ETW wants them.
657 if (fProfilerPinned || fShouldWalkHeapObjectsForEtw)
659 GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, true /* walk the large object heap */);
662 #ifdef FEATURE_EVENT_TRACE
663 // **** Done! Indicate to ETW helpers that the heap walk is done, so any buffers
664 // should be flushed into the ETW stream
665 if (fShouldWalkHeapObjectsForEtw || fShouldWalkHeapRootsForEtw)
667 ETW::GCLog::EndHeapDump(&profilerWalkHeapContext);
669 #endif // FEATURE_EVENT_TRACE
672 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
674 void GCProfileWalkHeap()
676 BOOL fWalkedHeapForProfiler = FALSE;
678 #ifdef FEATURE_EVENT_TRACE
679 if (ETW::GCLog::ShouldWalkStaticsAndCOMForEtw())
680 ETW::GCLog::WalkStaticsAndCOMForETW();
682 BOOL fShouldWalkHeapRootsForEtw = ETW::GCLog::ShouldWalkHeapRootsForEtw();
683 BOOL fShouldWalkHeapObjectsForEtw = ETW::GCLog::ShouldWalkHeapObjectsForEtw();
684 #else // !FEATURE_EVENT_TRACE
685 BOOL fShouldWalkHeapRootsForEtw = FALSE;
686 BOOL fShouldWalkHeapObjectsForEtw = FALSE;
687 #endif // FEATURE_EVENT_TRACE
689 #if defined (GC_PROFILING)
691 BEGIN_PIN_PROFILER(CORProfilerTrackGC());
692 GCProfileWalkHeapWorker(TRUE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
693 fWalkedHeapForProfiler = TRUE;
696 #endif // defined (GC_PROFILING)
698 #if defined (GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
699 // we need to walk the heap if one of GC_PROFILING or FEATURE_EVENT_TRACE
700 // is defined, since both of them make use of the walk heap worker.
701 if (!fWalkedHeapForProfiler &&
702 (fShouldWalkHeapRootsForEtw || fShouldWalkHeapObjectsForEtw))
704 GCProfileWalkHeapWorker(FALSE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
706 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
709 void WalkFReachableObjects(bool isCritical, void* objectID)
711 g_profControlBlock.pProfInterface->FinalizeableObjectQueued(isCritical, (ObjectID)objectID);
714 static fq_walk_fn g_FQWalkFn = &WalkFReachableObjects;
716 void GCToEEInterface::DiagGCStart(int gen, bool isInduced)
719 DiagUpdateGenerationBounds();
720 GarbageCollectionStartedCallback(gen, isInduced);
722 BEGIN_PIN_PROFILER(CORProfilerTrackGC());
725 // When we're walking objects allocated by class, then we don't want to walk the large
726 // object heap because then it would count things that may have been around for a while.
727 GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&AllocByClassHelper, (void *)&context, 0, false);
729 // Notify that we've reached the end of the Gen 0 scan
730 g_profControlBlock.pProfInterface->EndAllocByClass(&context);
734 #endif // GC_PROFILING
737 void GCToEEInterface::DiagUpdateGenerationBounds()
740 if (CORProfilerTrackGC())
741 UpdateGenerationBounds();
742 #endif // GC_PROFILING
745 void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent)
751 DiagUpdateGenerationBounds();
752 GarbageCollectionFinishedCallback();
754 #endif // GC_PROFILING
757 void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext)
760 if (CORProfilerTrackGC())
762 BEGIN_PIN_PROFILER(CORProfilerPresent());
763 GCHeapUtilities::GetGCHeap()->DiagWalkFinalizeQueue(gcContext, g_FQWalkFn);
766 #endif //GC_PROFILING
769 // Note on last parameter: when calling this for bgc, only ETW
770 // should be sending these events so that existing profapi profilers
771 // don't get confused.
772 void WalkMovedReferences(uint8_t* begin, uint8_t* end,
778 ETW::GCLog::MovedReference(begin, end,
779 (fCompacting ? reloc : 0),
785 void GCToEEInterface::DiagWalkSurvivors(void* gcContext)
787 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
788 if (ShouldTrackMovementForProfilerOrEtw())
791 ETW::GCLog::BeginMovedReferences(&context);
792 GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_gc);
793 ETW::GCLog::EndMovedReferences(context);
795 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
798 void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext)
800 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
801 if (ShouldTrackMovementForProfilerOrEtw())
804 ETW::GCLog::BeginMovedReferences(&context);
805 GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_loh);
806 ETW::GCLog::EndMovedReferences(context);
808 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
811 void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext)
813 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
814 if (ShouldTrackMovementForProfilerOrEtw())
817 ETW::GCLog::BeginMovedReferences(&context);
818 GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_bgc);
819 ETW::GCLog::EndMovedReferences(context);
821 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
824 void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args)
826 int stompWBCompleteActions = SWB_PASS;
827 bool is_runtime_suspended = false;
829 assert(args != nullptr);
830 switch (args->operation)
832 case WriteBarrierOp::StompResize:
833 // StompResize requires a new card table, a new lowest address, and
834 // a new highest address
835 assert(args->card_table != nullptr);
836 assert(args->lowest_address != nullptr);
837 assert(args->highest_address != nullptr);
839 g_card_table = args->card_table;
841 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
842 assert(args->card_bundle_table != nullptr);
843 g_card_bundle_table = args->card_bundle_table;
846 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
847 if (g_sw_ww_enabled_for_gc_heap && (args->write_watch_table != nullptr))
849 assert(args->is_runtime_suspended);
850 g_sw_ww_table = args->write_watch_table;
852 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
854 stompWBCompleteActions |= ::StompWriteBarrierResize(args->is_runtime_suspended, args->requires_upper_bounds_check);
856 // We need to make sure that other threads executing checked write barriers
857 // will see the g_card_table update before g_lowest/highest_address updates.
858 // Otherwise, the checked write barrier may AV accessing the old card table
859 // with address that it does not cover.
861 // Even x86's total store ordering is insufficient here because threads reading
862 // g_card_table do so via the instruction cache, whereas g_lowest/highest_address
863 // are read via the data cache.
865 // The g_card_table update is covered by section 8.1.3 of the Intel Software
866 // Development Manual, Volume 3A (System Programming Guide, Part 1), about
867 // "cross-modifying code": We need all _executing_ threads to invalidate
868 // their instruction cache, which FlushProcessWriteBuffers achieves by sending
869 // an IPI (inter-process interrupt).
871 if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
873 // flushing icache on current processor (thread)
874 ::FlushWriteBarrierInstructionCache();
875 // asking other processors (threads) to invalidate their icache
876 FlushProcessWriteBuffers();
879 g_lowest_address = args->lowest_address;
880 VolatileStore(&g_highest_address, args->highest_address);
883 // Need to reupdate for changes to g_highest_address g_lowest_address
884 is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended;
885 stompWBCompleteActions |= ::StompWriteBarrierResize(is_runtime_suspended, args->requires_upper_bounds_check);
887 is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended;
888 if(!is_runtime_suspended)
890 // If runtime is not suspended, force updated state to be visible to all threads
894 if (stompWBCompleteActions & SWB_EE_RESTART)
896 assert(!args->is_runtime_suspended &&
897 "if runtime was suspended in patching routines then it was in running state at begining");
898 ThreadSuspend::RestartEE(FALSE, TRUE);
900 return; // unlike other branches we have already done cleanup so bailing out here
901 case WriteBarrierOp::StompEphemeral:
902 // StompEphemeral requires a new ephemeral low and a new ephemeral high
903 assert(args->ephemeral_low != nullptr);
904 assert(args->ephemeral_high != nullptr);
905 g_ephemeral_low = args->ephemeral_low;
906 g_ephemeral_high = args->ephemeral_high;
907 stompWBCompleteActions |= ::StompWriteBarrierEphemeral(args->is_runtime_suspended);
909 case WriteBarrierOp::Initialize:
910 // This operation should only be invoked once, upon initialization.
911 assert(g_card_table == nullptr);
912 assert(g_lowest_address == nullptr);
913 assert(g_highest_address == nullptr);
914 assert(args->card_table != nullptr);
915 assert(args->lowest_address != nullptr);
916 assert(args->highest_address != nullptr);
917 assert(args->ephemeral_low != nullptr);
918 assert(args->ephemeral_high != nullptr);
919 assert(args->is_runtime_suspended && "the runtime must be suspended here!");
920 assert(!args->requires_upper_bounds_check && "the ephemeral generation must be at the top of the heap!");
922 g_card_table = args->card_table;
924 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
925 assert(g_card_bundle_table == nullptr);
926 g_card_bundle_table = args->card_bundle_table;
929 g_lowest_address = args->lowest_address;
930 g_highest_address = args->highest_address;
931 stompWBCompleteActions |= ::StompWriteBarrierResize(true, false);
933 // StompWriteBarrierResize does not necessarily bash g_ephemeral_low
934 // usages, so we must do so here. This is particularly true on x86,
935 // where StompWriteBarrierResize will not bash g_ephemeral_low when
936 // called with the parameters (true, false), as it is above.
937 g_ephemeral_low = args->ephemeral_low;
938 g_ephemeral_high = args->ephemeral_high;
939 stompWBCompleteActions |= ::StompWriteBarrierEphemeral(true);
941 case WriteBarrierOp::SwitchToWriteWatch:
942 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
943 assert(args->write_watch_table != nullptr);
944 assert(args->is_runtime_suspended && "the runtime must be suspended here!");
945 g_sw_ww_table = args->write_watch_table;
946 g_sw_ww_enabled_for_gc_heap = true;
947 stompWBCompleteActions |= ::SwitchToWriteWatchBarrier(true);
949 assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP");
950 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
952 case WriteBarrierOp::SwitchToNonWriteWatch:
953 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
954 assert(args->is_runtime_suspended && "the runtime must be suspended here!");
956 g_sw_ww_enabled_for_gc_heap = false;
957 stompWBCompleteActions |= ::SwitchToNonWriteWatchBarrier(true);
959 assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP");
960 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
963 assert(!"unknown WriteBarrierOp enum");
965 if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
967 ::FlushWriteBarrierInstructionCache();
969 if (stompWBCompleteActions & SWB_EE_RESTART)
971 assert(!args->is_runtime_suspended &&
972 "if runtime was suspended in patching routines then it was in running state at begining");
973 ThreadSuspend::RestartEE(FALSE, TRUE);
977 void GCToEEInterface::EnableFinalization(bool foundFinalizers)
979 if (foundFinalizers || FinalizerThread::HaveExtraWorkForFinalizer())
981 FinalizerThread::EnableFinalization();
985 void GCToEEInterface::HandleFatalError(unsigned int exitCode)
987 EEPOLICY_HANDLE_FATAL_ERROR(exitCode);
990 bool GCToEEInterface::ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj)
992 // CoreCLR does not have appdomains, so this code path is dead. Other runtimes may
993 // choose to inspect the object being finalized here.
994 // [DESKTOP TODO] Desktop looks for "agile and finalizable" objects and may choose
995 // to move them to a new app domain instead of finalizing them here.
999 bool GCToEEInterface::ForceFullGCToBeBlocking()
1001 // In theory, there is nothing fundamental that requires an AppDomain unload to induce
1002 // a blocking GC. In the past, this workaround was done to fix an Stress AV, but the root
1003 // cause of the AV was never discovered and this workaround remains in place.
1005 // It would be nice if this were not necessary. However, it's not clear if the aformentioned
1006 // stress bug is still lurking and will return if this workaround is removed. We should
1007 // do some experiments: remove this workaround and see if the stress bug still repros.
1008 // If so, we should find the root cause instead of relying on this.
1009 return !!SystemDomain::System()->RequireAppDomainCleanup();
1012 bool GCToEEInterface::EagerFinalized(Object* obj)
1014 MethodTable* pMT = obj->GetGCSafeMethodTable();
1015 if (pMT == pWeakReferenceMT ||
1016 pMT->GetCanonicalMethodTable() == pWeakReferenceOfTCanonMT)
1018 FinalizeWeakReference(obj);
1025 MethodTable* GCToEEInterface::GetFreeObjectMethodTable()
1027 assert(g_pFreeObjectMethodTable != nullptr);
1028 return g_pFreeObjectMethodTable;
1031 // These are arbitrary, we shouldn't ever be having confrig keys or values
1032 // longer than these lengths.
1033 const size_t MaxConfigKeyLength = 255;
1034 const size_t MaxConfigValueLength = 255;
1036 bool GCToEEInterface::GetBooleanConfigValue(const char* key, bool* value)
1043 // these configuration values are given to us via startup flags.
1044 if (strcmp(key, "gcServer") == 0)
1046 *value = g_heap_type == GC_HEAP_SVR;
1050 if (strcmp(key, "gcConcurrent") == 0)
1052 *value = !!g_pConfig->GetGCconcurrent();
1056 if (strcmp(key, "GCRetainVM") == 0)
1058 *value = !!g_pConfig->GetGCRetainVM();
1062 WCHAR configKey[MaxConfigKeyLength];
1063 if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1065 // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1069 // otherwise, ask the config subsystem.
1070 if (CLRConfig::IsConfigOptionSpecified(configKey))
1072 CLRConfig::ConfigDWORDInfo info { configKey , 0, CLRConfig::EEConfig_default };
1073 *value = CLRConfig::GetConfigValue(info) != 0;
1080 bool GCToEEInterface::GetIntConfigValue(const char* key, int64_t* value)
1087 WCHAR configKey[MaxConfigKeyLength];
1088 if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1090 // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1094 if (CLRConfig::IsConfigOptionSpecified(configKey))
1096 CLRConfig::ConfigDWORDInfo info { configKey , 0, CLRConfig::EEConfig_default };
1097 *value = CLRConfig::GetConfigValue(info);
1104 bool GCToEEInterface::GetStringConfigValue(const char* key, const char** value)
1111 WCHAR configKey[MaxConfigKeyLength];
1112 if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1114 // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1118 CLRConfig::ConfigStringInfo info { configKey, CLRConfig::EEConfig_default };
1119 LPWSTR out = CLRConfig::GetConfigValue(info);
1126 // not allocated on the stack since it escapes this function
1127 AStringHolder configResult = new (nothrow) char[MaxConfigValueLength];
1130 CLRConfig::FreeConfigString(out);
1134 if (WideCharToMultiByte(CP_ACP, 0, out, -1 /* out is null-terminated */,
1135 configResult.GetValue(), MaxConfigKeyLength, nullptr, nullptr) == 0)
1137 // this should only happen if the config subsystem gives us a string that's not valid
1139 CLRConfig::FreeConfigString(out);
1143 *value = configResult.Extract();
1144 CLRConfig::FreeConfigString(out);
1148 void GCToEEInterface::FreeStringConfigValue(const char* value)
1153 bool GCToEEInterface::IsGCThread()
1155 return !!::IsGCThread();
1158 bool GCToEEInterface::WasCurrentThreadCreatedByGC()
1160 return !!::IsGCSpecialThread();
1163 struct SuspendableThreadStubArguments
1166 void (*ThreadStart)(void*);
1169 CLREvent ThreadStartedEvent;
1172 struct ThreadStubArguments
1175 void (*ThreadStart)(void*);
1178 CLREvent ThreadStartedEvent;
1183 const size_t MaxThreadNameSize = 255;
1185 bool CreateSuspendableThread(
1186 void (*threadStart)(void*),
1190 LIMITED_METHOD_CONTRACT;
1192 SuspendableThreadStubArguments args;
1193 args.Argument = argument;
1194 args.ThreadStart = threadStart;
1195 args.Thread = nullptr;
1196 args.HasStarted = false;
1197 if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE))
1204 args.Thread = SetupUnstartedThread(FALSE);
1209 EX_END_CATCH(SwallowAllExceptions)
1213 args.ThreadStartedEvent.CloseEvent();
1217 auto threadStub = [](void* argument) -> DWORD
1219 SuspendableThreadStubArguments* args = static_cast<SuspendableThreadStubArguments*>(argument);
1220 assert(args != nullptr);
1222 ClrFlsSetThreadType(ThreadType_GC);
1223 args->Thread->SetGCSpecial(true);
1224 STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY);
1225 args->HasStarted = !!args->Thread->HasStarted(false);
1227 Thread* thread = args->Thread;
1228 auto threadStart = args->ThreadStart;
1229 void* threadArgument = args->Argument;
1230 bool hasStarted = args->HasStarted;
1231 args->ThreadStartedEvent.Set();
1233 // The stubArgs cannot be used once the event is set, since that releases wait on the
1234 // event in the function that created this thread and the stubArgs go out of scope.
1237 threadStart(threadArgument);
1238 DestroyThread(thread);
1244 InlineSString<MaxThreadNameSize> wideName;
1245 const WCHAR* namePtr = nullptr;
1248 if (name != nullptr)
1250 wideName.SetUTF8(name);
1251 namePtr = wideName.GetUnicode();
1256 // we're not obligated to provide a name - if it's not valid,
1257 // just report nullptr as the name.
1259 EX_END_CATCH(SwallowAllExceptions)
1261 if (!args.Thread->CreateNewThread(0, threadStub, &args, namePtr))
1263 args.Thread->DecExternalCount(FALSE);
1264 args.ThreadStartedEvent.CloseEvent();
1268 args.Thread->SetBackground(TRUE, FALSE);
1269 args.Thread->StartThread();
1271 // Wait for the thread to be in its main loop
1272 uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE);
1273 args.ThreadStartedEvent.CloseEvent();
1274 _ASSERTE(res == WAIT_OBJECT_0);
1276 if (!args.HasStarted)
1278 // The thread has failed to start and the Thread object was destroyed in the Thread::HasStarted
1279 // failure code path.
1286 bool CreateNonSuspendableThread(
1287 void (*threadStart)(void*),
1291 LIMITED_METHOD_CONTRACT;
1293 ThreadStubArguments args;
1294 args.Argument = argument;
1295 args.ThreadStart = threadStart;
1296 args.Thread = INVALID_HANDLE_VALUE;
1297 if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE))
1302 auto threadStub = [](void* argument) -> DWORD
1304 ThreadStubArguments* args = static_cast<ThreadStubArguments*>(argument);
1305 assert(args != nullptr);
1307 ClrFlsSetThreadType(ThreadType_GC);
1308 STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY);
1310 args->HasStarted = true;
1311 auto threadStart = args->ThreadStart;
1312 void* threadArgument = args->Argument;
1313 args->ThreadStartedEvent.Set();
1315 // The stub args cannot be used once the event is set, since that releases wait on the
1316 // event in the function that created this thread and the stubArgs go out of scope.
1317 threadStart(threadArgument);
1321 args.Thread = Thread::CreateUtilityThread(Thread::StackSize_Medium, threadStub, &args);
1322 if (args.Thread == INVALID_HANDLE_VALUE)
1324 args.ThreadStartedEvent.CloseEvent();
1328 // Wait for the thread to be in its main loop
1329 uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE);
1330 args.ThreadStartedEvent.CloseEvent();
1331 _ASSERTE(res == WAIT_OBJECT_0);
1333 CloseHandle(args.Thread);
1336 } // anonymous namespace
1338 bool GCToEEInterface::CreateThread(void (*threadStart)(void*), void* arg, bool is_suspendable, const char* name)
1340 LIMITED_METHOD_CONTRACT;
1343 return CreateSuspendableThread(threadStart, arg, name);
1347 return CreateNonSuspendableThread(threadStart, arg, name);
1351 void GCToEEInterface::WalkAsyncPinnedForPromotion(Object* object, ScanContext* sc, promote_func* callback)
1353 LIMITED_METHOD_CONTRACT;
1355 assert(object != nullptr);
1356 assert(sc != nullptr);
1357 assert(callback != nullptr);
1358 if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass)
1360 // not an overlapped data object - nothing to do.
1364 // reporting the pinned user objects
1365 OverlappedDataObject *pOverlapped = (OverlappedDataObject *)object;
1366 if (pOverlapped->m_userObject != NULL)
1368 //callback(OBJECTREF_TO_UNCHECKED_OBJECTREF(pOverlapped->m_userObject), (ScanContext *)lp1, GC_CALL_PINNED);
1369 if (pOverlapped->m_isArray)
1371 // OverlappedDataObject is very special. An async pin handle keeps it alive.
1372 // During GC, we also make sure
1373 // 1. m_userObject itself does not move if m_userObject is not array
1374 // 2. Every object pointed by m_userObject does not move if m_userObject is array
1375 // We do not want to pin m_userObject if it is array. But m_userObject may be updated
1376 // during relocation phase before OverlappedDataObject is doing relocation.
1377 // m_userObjectInternal is used to track the location of the m_userObject before it is updated.
1378 pOverlapped->m_userObjectInternal = static_cast<void*>(OBJECTREFToObject(pOverlapped->m_userObject));
1379 ArrayBase* pUserObject = (ArrayBase*)OBJECTREFToObject(pOverlapped->m_userObject);
1380 Object **ppObj = (Object**)pUserObject->GetDataPtr(TRUE);
1381 size_t num = pUserObject->GetNumComponents();
1382 for (size_t i = 0; i < num; i++)
1384 callback(ppObj + i, sc, GC_CALL_PINNED);
1389 callback(&OBJECTREF_TO_UNCHECKED_OBJECTREF(pOverlapped->m_userObject), (ScanContext *)sc, GC_CALL_PINNED);
1393 if (pOverlapped->GetAppDomainId() != DefaultADID && pOverlapped->GetAppDomainIndex().m_dwIndex == DefaultADID)
1395 OverlappedDataObject::MarkCleanupNeededFromGC();
1399 void GCToEEInterface::WalkAsyncPinned(Object* object, void* context, void (*callback)(Object*, Object*, void*))
1401 LIMITED_METHOD_CONTRACT;
1403 assert(object != nullptr);
1404 assert(callback != nullptr);
1406 if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass)
1411 OverlappedDataObject *pOverlapped = (OverlappedDataObject *)(object);
1412 if (pOverlapped->m_userObject != NULL)
1414 Object * pUserObject = OBJECTREFToObject(pOverlapped->m_userObject);
1415 callback(object, pUserObject, context);
1416 if (pOverlapped->m_isArray)
1418 ArrayBase* pUserArrayObject = (ArrayBase*)pUserObject;
1419 Object **pObj = (Object**)pUserArrayObject->GetDataPtr(TRUE);
1420 size_t num = pUserArrayObject->GetNumComponents();
1421 for (size_t i = 0; i < num; i ++)
1423 callback(pUserObject, pObj[i], context);
1429 IGCToCLREventSink* GCToEEInterface::EventSink()
1431 LIMITED_METHOD_CONTRACT;
1433 return &g_gcToClrEventSink;