1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
8 * GCToEEInterface implementation
14 void GCToEEInterface::SuspendEE(SUSPEND_REASON reason)
18 static_assert_no_msg(SUSPEND_FOR_GC == (int)ThreadSuspend::SUSPEND_FOR_GC);
19 static_assert_no_msg(SUSPEND_FOR_GC_PREP == (int)ThreadSuspend::SUSPEND_FOR_GC_PREP);
21 _ASSERTE(reason == SUSPEND_FOR_GC || reason == SUSPEND_FOR_GC_PREP);
23 ThreadSuspend::SuspendEE((ThreadSuspend::SUSPEND_REASON)reason);
25 g_pDebugInterface->BeforeGarbageCollection();
28 void GCToEEInterface::RestartEE(bool bFinishedGC)
32 g_pDebugInterface->AfterGarbageCollection();
34 ThreadSuspend::RestartEE(bFinishedGC, TRUE);
37 VOID GCToEEInterface::SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2)
46 SyncBlockCache::GetSyncBlockCache()->GCWeakPtrScan(scanProc, lp1, lp2);
50 //EE can perform post stack scanning action, while the
51 // user threads are still suspended
52 VOID GCToEEInterface::AfterGcScanRoots (int condemned, int max_gen,
62 #ifdef FEATURE_COMINTEROP
63 // Go through all the app domains and for each one detach all the *unmarked* RCWs to prevent
64 // the RCW cache from resurrecting them.
65 UnsafeAppDomainIterator i(TRUE);
70 i.GetDomain()->DetachRCWs();
72 #endif // FEATURE_COMINTEROP
76 * Scan all stack roots
79 static void ScanStackRoots(Thread * pThread, promote_func* fn, ScanContext* sc)
87 ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
89 // Either we are in a concurrent situation (in which case the thread is unknown to
90 // us), or we are performing a synchronous GC and we are the GC thread, holding
91 // the threadstore lock.
93 _ASSERTE(dbgOnly_IsSpecialEEThread() ||
94 GetThread() == NULL ||
95 // this is for background GC threads which always call this when EE is suspended.
96 IsGCSpecialThread() ||
97 (GetThread() == ThreadSuspend::GetSuspensionThread() && ThreadStore::HoldingThreadStore()));
99 pThread->SetHasPromotedBytes();
101 Frame* pTopFrame = pThread->GetFrame();
102 Object ** topStack = (Object **)pTopFrame;
103 if ((pTopFrame != ((Frame*)-1))
104 && (pTopFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr())) {
105 // It is an InlinedCallFrame. Get SP from it.
106 InlinedCallFrame* pInlinedFrame = (InlinedCallFrame*)pTopFrame;
107 topStack = (Object **)pInlinedFrame->GetCallSiteSP();
110 sc->stack_limit = (uintptr_t)topStack;
112 #ifdef FEATURE_CONSERVATIVE_GC
113 if (g_pConfig->GetGCConservative())
115 // Conservative stack root reporting
116 // We will treat everything on stack as a pinned interior GC pointer
117 // Since we report every thing as pinned, we don't need to run following code for relocation phase.
120 Object ** bottomStack = (Object **) pThread->GetCachedStackBase();
122 for (walk = topStack; walk < bottomStack; walk ++)
124 if (((void*)*walk > (void*)bottomStack || (void*)*walk < (void*)topStack) &&
125 ((void*)*walk >= (void*)g_lowest_address && (void*)*walk <= (void*)g_highest_address)
128 //DbgPrintf("promote " FMT_ADDR " : " FMT_ADDR "\n", walk, *walk);
129 fn(walk, sc, GC_CALL_INTERIOR|GC_CALL_PINNED);
134 // Also ask the explicit Frames to report any references they might know about.
135 // Generally these will be a subset of the objects reported below but there's
136 // nothing that guarantees that and in the specific case of a GC protect frame the
137 // references it protects may live at a lower address than the frame itself (and
138 // thus escape the stack range we scanned above).
139 Frame *pFrame = pThread->GetFrame();
140 while (pFrame != FRAME_TOP)
142 pFrame->GcScanRoots(fn, sc);
143 pFrame = pFrame->PtrNextFrame();
149 unsigned flagsStackWalk = ALLOW_ASYNC_STACK_WALK | ALLOW_INVALID_OBJECTS;
150 #if defined(WIN64EXCEPTIONS)
151 flagsStackWalk |= GC_FUNCLET_REFERENCE_REPORTING;
152 #endif // defined(WIN64EXCEPTIONS)
153 pThread->StackWalkFrames( GcStackCrawlCallBack, &gcctx, flagsStackWalk);
157 void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
159 STRESS_LOG1(LF_GCROOTS, LL_INFO10, "GCScan: Promotion Phase = %d\n", sc->promotion);
161 // In server GC, we should be competing for marking the statics
162 if (GCHeapUtilities::MarkShouldCompeteForStatics())
164 if (condemned == max_gen && sc->promotion)
166 SystemDomain::EnumAllStaticGCRefs(fn, sc);
170 Thread* pThread = NULL;
171 while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
173 STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "{ Starting scan of Thread %p ID = %x\n", pThread, pThread->GetThreadId());
175 if (GCHeapUtilities::GetGCHeap()->IsThreadUsingAllocationContextHeap(
176 pThread->GetAllocContext(), sc->thread_number))
178 sc->thread_under_crawl = pThread;
179 #ifdef FEATURE_EVENT_TRACE
180 sc->dwEtwRootKind = kEtwGCRootKindStack;
181 #endif // FEATURE_EVENT_TRACE
182 ScanStackRoots(pThread, fn, sc);
183 #ifdef FEATURE_EVENT_TRACE
184 sc->dwEtwRootKind = kEtwGCRootKindOther;
185 #endif // FEATURE_EVENT_TRACE
187 STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "Ending scan of Thread %p ID = 0x%x }\n", pThread, pThread->GetThreadId());
191 void GCToEEInterface::GcStartWork (int condemned, int max_gen)
201 // Validate byrefs pinned by IL stubs since the last GC.
202 StubHelpers::ProcessByrefValidationList();
203 #endif // VERIFY_HEAP
205 ExecutionManager::CleanupCodeHeaps();
207 #ifdef FEATURE_EVENT_TRACE
208 ETW::TypeSystemLog::Cleanup();
211 #ifdef FEATURE_COMINTEROP
213 // Let GC detect managed/native cycles with input from jupiter
215 // 1. Report reference from RCW to CCW based on native reference in Jupiter
216 // 2. Identify the subset of CCWs that needs to be rooted
218 // We'll build the references from RCW to CCW using
219 // 1. Preallocated arrays
220 // 2. Dependent handles
222 RCWWalker::OnGCStarted(condemned);
223 #endif // FEATURE_COMINTEROP
225 if (condemned == max_gen)
227 ThreadStore::s_pThreadStore->OnMaxGenerationGCStarted();
231 void GCToEEInterface::GcDone(int condemned)
240 #ifdef FEATURE_COMINTEROP
242 // Tell Jupiter GC has finished
244 RCWWalker::OnGCFinished(condemned);
245 #endif // FEATURE_COMINTEROP
248 bool GCToEEInterface::RefCountedHandleCallbacks(Object * pObject)
257 #ifdef FEATURE_COMINTEROP
258 //<REVISIT_TODO>@todo optimize the access to the ref-count
259 ComCallWrapper* pWrap = ComCallWrapper::GetWrapperForObject((OBJECTREF)pObject);
261 return pWrap != NULL && pWrap->IsWrapperActive();
267 void GCToEEInterface::GcBeforeBGCSweepWork()
277 // Validate byrefs pinned by IL stubs since the last GC.
278 StubHelpers::ProcessByrefValidationList();
279 #endif // VERIFY_HEAP
282 void GCToEEInterface::SyncBlockCacheDemote(int max_gen)
291 SyncBlockCache::GetSyncBlockCache()->GCDone(TRUE, max_gen);
294 void GCToEEInterface::SyncBlockCachePromotionsGranted(int max_gen)
303 SyncBlockCache::GetSyncBlockCache()->GCDone(FALSE, max_gen);
306 uint32_t GCToEEInterface::GetActiveSyncBlockCount()
315 return SyncBlockCache::GetSyncBlockCache()->GetActiveCount();
318 gc_alloc_context * GCToEEInterface::GetAllocContext()
322 Thread* pThread = ::GetThread();
323 assert(pThread != nullptr);
324 return pThread->GetAllocContext();
327 void GCToEEInterface::GcEnumAllocContexts(enum_alloc_context_func* fn, void* param)
336 if (GCHeapUtilities::UseThreadAllocationContexts())
338 Thread * pThread = NULL;
339 while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
341 fn(pThread->GetAllocContext(), param);
346 fn(&g_global_alloc_context, param);
351 uint8_t* GCToEEInterface::GetLoaderAllocatorObjectForGC(Object* pObject)
360 return pObject->GetGCSafeMethodTable()->GetLoaderAllocatorObjectForGC();
363 bool GCToEEInterface::IsPreemptiveGCDisabled()
367 Thread* pThread = ::GetThread();
370 return !!pThread->PreemptiveGCDisabled();
376 bool GCToEEInterface::EnablePreemptiveGC()
380 bool bToggleGC = false;
381 Thread* pThread = ::GetThread();
385 bToggleGC = !!pThread->PreemptiveGCDisabled();
388 pThread->EnablePreemptiveGC();
395 void GCToEEInterface::DisablePreemptiveGC()
399 Thread* pThread = ::GetThread();
402 pThread->DisablePreemptiveGC();
406 Thread* GCToEEInterface::GetThread()
410 return ::GetThread();
413 struct BackgroundThreadStubArgs
416 GCBackgroundThreadFunction threadStart;
418 CLREvent threadStartedEvent;
422 DWORD WINAPI BackgroundThreadStub(void* arg)
424 BackgroundThreadStubArgs* stubArgs = (BackgroundThreadStubArgs*)arg;
425 assert (stubArgs->thread != NULL);
427 ClrFlsSetThreadType (ThreadType_GC);
428 stubArgs->thread->SetGCSpecial(true);
429 STRESS_LOG_RESERVE_MEM (GC_STRESSLOG_MULTIPLY);
431 stubArgs->hasStarted = !!stubArgs->thread->HasStarted(FALSE);
433 Thread* thread = stubArgs->thread;
434 GCBackgroundThreadFunction realThreadStart = stubArgs->threadStart;
435 void* realThreadArg = stubArgs->arg;
436 bool hasStarted = stubArgs->hasStarted;
438 stubArgs->threadStartedEvent.Set();
439 // The stubArgs cannot be used once the event is set, since that releases wait on the
440 // event in the function that created this thread and the stubArgs go out of scope.
446 result = realThreadStart(realThreadArg);
447 DestroyThread(thread);
457 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
458 inline BOOL ShouldTrackMovementForProfilerOrEtw()
461 if (CORProfilerTrackGC())
465 #ifdef FEATURE_EVENT_TRACE
466 if (ETW::GCLog::ShouldTrackMovementForEtw())
472 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
474 void ProfScanRootsHelper(Object** ppObject, ScanContext *pSC, uint32_t dwFlags)
476 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
477 Object *pObj = *ppObject;
478 if (dwFlags & GC_CALL_INTERIOR)
480 pObj = GCHeapUtilities::GetGCHeap()->GetContainingObject(pObj, true);
484 ScanRootsHelper(pObj, ppObject, pSC, dwFlags);
485 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
488 // TODO - at some point we would like to completely decouple profiling
489 // from ETW tracing using a pattern similar to this, where the
490 // ProfilingScanContext has flags about whether or not certain things
491 // should be tracked, and each one of these ProfilerShouldXYZ functions
492 // will check these flags and determine what to do based upon that.
493 // GCProfileWalkHeapWorker can, in turn, call those methods without fear
494 // of things being ifdef'd out.
496 // Returns TRUE if GC profiling is enabled and the profiler
497 // should scan dependent handles, FALSE otherwise.
498 BOOL ProfilerShouldTrackConditionalWeakTableElements()
500 #if defined(GC_PROFILING)
501 return CORProfilerTrackConditionalWeakTableElements();
504 #endif // defined (GC_PROFILING)
507 // If GC profiling is enabled, informs the profiler that we are done
508 // tracing dependent handles.
509 void ProfilerEndConditionalWeakTableElementReferences(void* heapId)
511 #if defined (GC_PROFILING)
512 g_profControlBlock.pProfInterface->EndConditionalWeakTableElementReferences(heapId);
514 UNREFERENCED_PARAMETER(heapId);
515 #endif // defined (GC_PROFILING)
518 // If GC profiling is enabled, informs the profiler that we are done
519 // tracing root references.
520 void ProfilerEndRootReferences2(void* heapId)
522 #if defined (GC_PROFILING)
523 g_profControlBlock.pProfInterface->EndRootReferences2(heapId);
525 UNREFERENCED_PARAMETER(heapId);
526 #endif // defined (GC_PROFILING)
529 void GcScanRootsForProfilerAndETW(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
531 Thread* pThread = NULL;
532 while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
534 sc->thread_under_crawl = pThread;
535 #ifdef FEATURE_EVENT_TRACE
536 sc->dwEtwRootKind = kEtwGCRootKindStack;
537 #endif // FEATURE_EVENT_TRACE
538 ScanStackRoots(pThread, fn, sc);
539 #ifdef FEATURE_EVENT_TRACE
540 sc->dwEtwRootKind = kEtwGCRootKindOther;
541 #endif // FEATURE_EVENT_TRACE
545 void ScanHandleForProfilerAndETW(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, bool isDependent)
547 ProfilingScanContext* pSC = (ProfilingScanContext*)context;
550 // Give the profiler the objectref.
551 if (pSC->fProfilerPinned)
555 BEGIN_PIN_PROFILER(CORProfilerTrackGC());
556 g_profControlBlock.pProfInterface->RootReference2(
558 kEtwGCRootKindHandle,
559 (EtwGCRootFlags)flags,
566 BEGIN_PIN_PROFILER(CORProfilerTrackConditionalWeakTableElements());
567 g_profControlBlock.pProfInterface->ConditionalWeakTableElementReference(
575 #endif // GC_PROFILING
577 #if defined(FEATURE_EVENT_TRACE)
578 // Notify ETW of the handle
579 if (ETW::GCLog::ShouldWalkHeapRootsForEtw())
581 ETW::GCLog::RootReference(
583 *pRef, // object being rooted
584 pSec, // pSecondaryNodeForDependentHandle
588 flags); // ETW handle flags
590 #endif // defined(FEATURE_EVENT_TRACE)
593 // This is called only if we've determined that either:
594 // a) The Profiling API wants to do a walk of the heap, and it has pinned the
595 // profiler in place (so it cannot be detached), and it's thus safe to call into the
597 // b) ETW infrastructure wants to do a walk of the heap either to log roots,
599 // This can also be called to do a single walk for BOTH a) and b) simultaneously. Since
600 // ETW can ask for roots, but not objects
601 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
602 void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw)
605 ProfilingScanContext SC(fProfilerPinned);
606 unsigned max_generation = GCHeapUtilities::GetGCHeap()->GetMaxGeneration();
608 // **** Scan roots: Only scan roots if profiling API wants them or ETW wants them.
609 if (fProfilerPinned || fShouldWalkHeapRootsForEtw)
611 GcScanRootsForProfilerAndETW(&ProfScanRootsHelper, max_generation, max_generation, &SC);
612 SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
613 GCHeapUtilities::GetGCHeap()->DiagScanFinalizeQueue(&ProfScanRootsHelper, &SC);
615 // Handles are kept independent of wks/svr/concurrent builds
616 SC.dwEtwRootKind = kEtwGCRootKindHandle;
617 GCHeapUtilities::GetGCHeap()->DiagScanHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
619 // indicate that regular handle scanning is over, so we can flush the buffered roots
620 // to the profiler. (This is for profapi only. ETW will flush after the
621 // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
624 ProfilerEndRootReferences2(&SC.pHeapId);
628 // **** Scan dependent handles: only if the profiler supports it or ETW wants roots
629 if ((fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements()) ||
630 fShouldWalkHeapRootsForEtw)
632 // GcScanDependentHandlesForProfiler double-checks
633 // CORProfilerTrackConditionalWeakTableElements() before calling into the profiler
635 ProfilingScanContext* pSC = &SC;
637 // we'll re-use pHeapId (which was either unused (0) or freed by EndRootReferences2
638 // (-1)), so reset it to NULL
639 _ASSERTE((*((size_t *)(&pSC->pHeapId)) == (size_t)(-1)) ||
640 (*((size_t *)(&pSC->pHeapId)) == (size_t)(0)));
643 GCHeapUtilities::GetGCHeap()->DiagScanDependentHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
645 // indicate that dependent handle scanning is over, so we can flush the buffered roots
646 // to the profiler. (This is for profapi only. ETW will flush after the
647 // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
648 if (fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements())
650 ProfilerEndConditionalWeakTableElementReferences(&SC.pHeapId);
654 ProfilerWalkHeapContext profilerWalkHeapContext(fProfilerPinned, SC.pvEtwContext);
656 // **** Walk objects on heap: only if profiling API wants them or ETW wants them.
657 if (fProfilerPinned || fShouldWalkHeapObjectsForEtw)
659 GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, true /* walk the large object heap */);
662 #ifdef FEATURE_EVENT_TRACE
663 // **** Done! Indicate to ETW helpers that the heap walk is done, so any buffers
664 // should be flushed into the ETW stream
665 if (fShouldWalkHeapObjectsForEtw || fShouldWalkHeapRootsForEtw)
667 ETW::GCLog::EndHeapDump(&profilerWalkHeapContext);
669 #endif // FEATURE_EVENT_TRACE
672 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
674 void GCProfileWalkHeap()
676 BOOL fWalkedHeapForProfiler = FALSE;
678 #ifdef FEATURE_EVENT_TRACE
679 if (ETW::GCLog::ShouldWalkStaticsAndCOMForEtw())
680 ETW::GCLog::WalkStaticsAndCOMForETW();
682 BOOL fShouldWalkHeapRootsForEtw = ETW::GCLog::ShouldWalkHeapRootsForEtw();
683 BOOL fShouldWalkHeapObjectsForEtw = ETW::GCLog::ShouldWalkHeapObjectsForEtw();
684 #else // !FEATURE_EVENT_TRACE
685 BOOL fShouldWalkHeapRootsForEtw = FALSE;
686 BOOL fShouldWalkHeapObjectsForEtw = FALSE;
687 #endif // FEATURE_EVENT_TRACE
689 #if defined (GC_PROFILING)
691 BEGIN_PIN_PROFILER(CORProfilerTrackGC());
692 GCProfileWalkHeapWorker(TRUE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
693 fWalkedHeapForProfiler = TRUE;
696 #endif // defined (GC_PROFILING)
698 #if defined (GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
699 // we need to walk the heap if one of GC_PROFILING or FEATURE_EVENT_TRACE
700 // is defined, since both of them make use of the walk heap worker.
701 if (!fWalkedHeapForProfiler &&
702 (fShouldWalkHeapRootsForEtw || fShouldWalkHeapObjectsForEtw))
704 GCProfileWalkHeapWorker(FALSE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
706 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
709 void WalkFReachableObjects(bool isCritical, void* objectID)
711 g_profControlBlock.pProfInterface->FinalizeableObjectQueued(isCritical, (ObjectID)objectID);
714 static fq_walk_fn g_FQWalkFn = &WalkFReachableObjects;
716 void GCToEEInterface::DiagGCStart(int gen, bool isInduced)
719 DiagUpdateGenerationBounds();
720 GarbageCollectionStartedCallback(gen, isInduced);
722 BEGIN_PIN_PROFILER(CORProfilerTrackGC());
725 // When we're walking objects allocated by class, then we don't want to walk the large
726 // object heap because then it would count things that may have been around for a while.
727 GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&AllocByClassHelper, (void *)&context, 0, false);
729 // Notify that we've reached the end of the Gen 0 scan
730 g_profControlBlock.pProfInterface->EndAllocByClass(&context);
734 #endif // GC_PROFILING
737 void GCToEEInterface::DiagUpdateGenerationBounds()
740 if (CORProfilerTrackGC())
741 UpdateGenerationBounds();
742 #endif // GC_PROFILING
745 void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent)
751 DiagUpdateGenerationBounds();
752 GarbageCollectionFinishedCallback();
754 #endif // GC_PROFILING
757 void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext)
760 if (CORProfilerTrackGC())
762 BEGIN_PIN_PROFILER(CORProfilerPresent());
763 GCHeapUtilities::GetGCHeap()->DiagWalkFinalizeQueue(gcContext, g_FQWalkFn);
766 #endif //GC_PROFILING
769 // Note on last parameter: when calling this for bgc, only ETW
770 // should be sending these events so that existing profapi profilers
771 // don't get confused.
772 void WalkMovedReferences(uint8_t* begin, uint8_t* end,
778 ETW::GCLog::MovedReference(begin, end,
779 (fCompacting ? reloc : 0),
785 void GCToEEInterface::DiagWalkSurvivors(void* gcContext)
787 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
788 if (ShouldTrackMovementForProfilerOrEtw())
791 ETW::GCLog::BeginMovedReferences(&context);
792 GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_gc);
793 ETW::GCLog::EndMovedReferences(context);
795 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
798 void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext)
800 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
801 if (ShouldTrackMovementForProfilerOrEtw())
804 ETW::GCLog::BeginMovedReferences(&context);
805 GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_loh);
806 ETW::GCLog::EndMovedReferences(context);
808 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
811 void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext)
813 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
814 if (ShouldTrackMovementForProfilerOrEtw())
817 ETW::GCLog::BeginMovedReferences(&context);
818 GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_bgc);
819 ETW::GCLog::EndMovedReferences(context);
821 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
824 void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args)
826 int stompWBCompleteActions = SWB_PASS;
827 bool is_runtime_suspended = false;
829 assert(args != nullptr);
830 switch (args->operation)
832 case WriteBarrierOp::StompResize:
833 // StompResize requires a new card table, a new lowest address, and
834 // a new highest address
835 assert(args->card_table != nullptr);
836 assert(args->lowest_address != nullptr);
837 assert(args->highest_address != nullptr);
839 g_card_table = args->card_table;
841 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
842 assert(args->card_bundle_table != nullptr);
843 g_card_bundle_table = args->card_bundle_table;
846 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
847 if (g_sw_ww_enabled_for_gc_heap && (args->write_watch_table != nullptr))
849 assert(args->is_runtime_suspended);
850 g_sw_ww_table = args->write_watch_table;
852 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
854 stompWBCompleteActions |= ::StompWriteBarrierResize(args->is_runtime_suspended, args->requires_upper_bounds_check);
856 // We need to make sure that other threads executing checked write barriers
857 // will see the g_card_table update before g_lowest/highest_address updates.
858 // Otherwise, the checked write barrier may AV accessing the old card table
859 // with address that it does not cover.
861 // Even x86's total store ordering is insufficient here because threads reading
862 // g_card_table do so via the instruction cache, whereas g_lowest/highest_address
863 // are read via the data cache.
865 // The g_card_table update is covered by section 8.1.3 of the Intel Software
866 // Development Manual, Volume 3A (System Programming Guide, Part 1), about
867 // "cross-modifying code": We need all _executing_ threads to invalidate
868 // their instruction cache, which FlushProcessWriteBuffers achieves by sending
869 // an IPI (inter-process interrupt).
871 if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
873 // flushing icache on current processor (thread)
874 ::FlushWriteBarrierInstructionCache();
875 // asking other processors (threads) to invalidate their icache
876 FlushProcessWriteBuffers();
879 g_lowest_address = args->lowest_address;
880 VolatileStore(&g_highest_address, args->highest_address);
882 #if defined(_ARM64_) || defined(_ARM_)
883 // Need to reupdate for changes to g_highest_address g_lowest_address
884 is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended;
885 stompWBCompleteActions |= ::StompWriteBarrierResize(is_runtime_suspended, args->requires_upper_bounds_check);
888 if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
890 ::FlushWriteBarrierInstructionCache();
894 is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended;
895 if(!is_runtime_suspended)
897 // If runtime is not suspended, force updated state to be visible to all threads
901 if (stompWBCompleteActions & SWB_EE_RESTART)
903 assert(!args->is_runtime_suspended &&
904 "if runtime was suspended in patching routines then it was in running state at begining");
905 ThreadSuspend::RestartEE(FALSE, TRUE);
907 return; // unlike other branches we have already done cleanup so bailing out here
908 case WriteBarrierOp::StompEphemeral:
909 // StompEphemeral requires a new ephemeral low and a new ephemeral high
910 assert(args->ephemeral_low != nullptr);
911 assert(args->ephemeral_high != nullptr);
912 g_ephemeral_low = args->ephemeral_low;
913 g_ephemeral_high = args->ephemeral_high;
914 stompWBCompleteActions |= ::StompWriteBarrierEphemeral(args->is_runtime_suspended);
916 case WriteBarrierOp::Initialize:
917 // This operation should only be invoked once, upon initialization.
918 assert(g_card_table == nullptr);
919 assert(g_lowest_address == nullptr);
920 assert(g_highest_address == nullptr);
921 assert(args->card_table != nullptr);
922 assert(args->lowest_address != nullptr);
923 assert(args->highest_address != nullptr);
924 assert(args->ephemeral_low != nullptr);
925 assert(args->ephemeral_high != nullptr);
926 assert(args->is_runtime_suspended && "the runtime must be suspended here!");
927 assert(!args->requires_upper_bounds_check && "the ephemeral generation must be at the top of the heap!");
929 g_card_table = args->card_table;
931 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
932 assert(g_card_bundle_table == nullptr);
933 g_card_bundle_table = args->card_bundle_table;
936 g_lowest_address = args->lowest_address;
937 g_highest_address = args->highest_address;
938 stompWBCompleteActions |= ::StompWriteBarrierResize(true, false);
940 // StompWriteBarrierResize does not necessarily bash g_ephemeral_low
941 // usages, so we must do so here. This is particularly true on x86,
942 // where StompWriteBarrierResize will not bash g_ephemeral_low when
943 // called with the parameters (true, false), as it is above.
944 g_ephemeral_low = args->ephemeral_low;
945 g_ephemeral_high = args->ephemeral_high;
946 stompWBCompleteActions |= ::StompWriteBarrierEphemeral(true);
948 case WriteBarrierOp::SwitchToWriteWatch:
949 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
950 assert(args->write_watch_table != nullptr);
951 assert(args->is_runtime_suspended && "the runtime must be suspended here!");
952 g_sw_ww_table = args->write_watch_table;
953 g_sw_ww_enabled_for_gc_heap = true;
954 stompWBCompleteActions |= ::SwitchToWriteWatchBarrier(true);
956 assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP");
957 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
959 case WriteBarrierOp::SwitchToNonWriteWatch:
960 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
961 assert(args->is_runtime_suspended && "the runtime must be suspended here!");
963 g_sw_ww_enabled_for_gc_heap = false;
964 stompWBCompleteActions |= ::SwitchToNonWriteWatchBarrier(true);
966 assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP");
967 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
970 assert(!"unknown WriteBarrierOp enum");
972 if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
974 ::FlushWriteBarrierInstructionCache();
976 if (stompWBCompleteActions & SWB_EE_RESTART)
978 assert(!args->is_runtime_suspended &&
979 "if runtime was suspended in patching routines then it was in running state at begining");
980 ThreadSuspend::RestartEE(FALSE, TRUE);
984 void GCToEEInterface::EnableFinalization(bool foundFinalizers)
986 if (foundFinalizers || FinalizerThread::HaveExtraWorkForFinalizer())
988 FinalizerThread::EnableFinalization();
992 void GCToEEInterface::HandleFatalError(unsigned int exitCode)
994 EEPOLICY_HANDLE_FATAL_ERROR(exitCode);
997 bool GCToEEInterface::ShouldFinalizeObjectForUnload(void* pDomain, Object* obj)
999 // CoreCLR does not have appdomains, so this code path is dead. Other runtimes may
1000 // choose to inspect the object being finalized here.
1001 // [DESKTOP TODO] Desktop looks for "agile and finalizable" objects and may choose
1002 // to move them to a new app domain instead of finalizing them here.
1006 bool GCToEEInterface::EagerFinalized(Object* obj)
1008 MethodTable* pMT = obj->GetGCSafeMethodTable();
1009 if (pMT == pWeakReferenceMT ||
1010 pMT->GetCanonicalMethodTable() == pWeakReferenceOfTCanonMT)
1012 FinalizeWeakReference(obj);
1019 MethodTable* GCToEEInterface::GetFreeObjectMethodTable()
1021 assert(g_pFreeObjectMethodTable != nullptr);
1022 return g_pFreeObjectMethodTable;
1025 // These are arbitrary, we shouldn't ever be having confrig keys or values
1026 // longer than these lengths.
1027 const size_t MaxConfigKeyLength = 255;
1028 const size_t MaxConfigValueLength = 255;
1030 bool GCToEEInterface::GetBooleanConfigValue(const char* key, bool* value)
1037 // these configuration values are given to us via startup flags.
1038 if (strcmp(key, "gcServer") == 0)
1040 *value = g_heap_type == GC_HEAP_SVR;
1044 if (strcmp(key, "gcConcurrent") == 0)
1046 *value = !!g_pConfig->GetGCconcurrent();
1050 if (strcmp(key, "GCRetainVM") == 0)
1052 *value = !!g_pConfig->GetGCRetainVM();
1056 WCHAR configKey[MaxConfigKeyLength];
1057 if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1059 // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1063 // otherwise, ask the config subsystem.
1064 if (CLRConfig::IsConfigOptionSpecified(configKey))
1066 CLRConfig::ConfigDWORDInfo info { configKey , 0, CLRConfig::EEConfig_default };
1067 *value = CLRConfig::GetConfigValue(info) != 0;
1074 bool GCToEEInterface::GetIntConfigValue(const char* key, int64_t* value)
1081 WCHAR configKey[MaxConfigKeyLength];
1082 if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1084 // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1088 if (CLRConfig::IsConfigOptionSpecified(configKey))
1090 CLRConfig::ConfigDWORDInfo info { configKey , 0, CLRConfig::EEConfig_default };
1091 *value = CLRConfig::GetConfigValue(info);
1098 bool GCToEEInterface::GetStringConfigValue(const char* key, const char** value)
1105 WCHAR configKey[MaxConfigKeyLength];
1106 if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1108 // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1112 CLRConfig::ConfigStringInfo info { configKey, CLRConfig::EEConfig_default };
1113 LPWSTR out = CLRConfig::GetConfigValue(info);
1120 // not allocated on the stack since it escapes this function
1121 AStringHolder configResult = new (nothrow) char[MaxConfigValueLength];
1124 CLRConfig::FreeConfigString(out);
1128 if (WideCharToMultiByte(CP_ACP, 0, out, -1 /* out is null-terminated */,
1129 configResult.GetValue(), MaxConfigKeyLength, nullptr, nullptr) == 0)
1131 // this should only happen if the config subsystem gives us a string that's not valid
1133 CLRConfig::FreeConfigString(out);
1137 *value = configResult.Extract();
1138 CLRConfig::FreeConfigString(out);
1142 void GCToEEInterface::FreeStringConfigValue(const char* value)
1147 bool GCToEEInterface::IsGCThread()
1149 return !!::IsGCThread();
1152 bool GCToEEInterface::WasCurrentThreadCreatedByGC()
1154 return !!::IsGCSpecialThread();
1157 struct SuspendableThreadStubArguments
1160 void (*ThreadStart)(void*);
1163 CLREvent ThreadStartedEvent;
1166 struct ThreadStubArguments
1169 void (*ThreadStart)(void*);
1172 CLREvent ThreadStartedEvent;
1177 const size_t MaxThreadNameSize = 255;
1179 bool CreateSuspendableThread(
1180 void (*threadStart)(void*),
1182 const wchar_t* name)
1184 LIMITED_METHOD_CONTRACT;
1186 SuspendableThreadStubArguments args;
1187 args.Argument = argument;
1188 args.ThreadStart = threadStart;
1189 args.Thread = nullptr;
1190 args.HasStarted = false;
1191 if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE))
1198 args.Thread = SetupUnstartedThread(FALSE);
1203 EX_END_CATCH(SwallowAllExceptions)
1207 args.ThreadStartedEvent.CloseEvent();
1211 auto threadStub = [](void* argument) -> DWORD
1213 SuspendableThreadStubArguments* args = static_cast<SuspendableThreadStubArguments*>(argument);
1214 assert(args != nullptr);
1216 ClrFlsSetThreadType(ThreadType_GC);
1217 args->Thread->SetGCSpecial(true);
1218 STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY);
1219 args->HasStarted = !!args->Thread->HasStarted(false);
1221 Thread* thread = args->Thread;
1222 auto threadStart = args->ThreadStart;
1223 void* threadArgument = args->Argument;
1224 bool hasStarted = args->HasStarted;
1225 args->ThreadStartedEvent.Set();
1227 // The stubArgs cannot be used once the event is set, since that releases wait on the
1228 // event in the function that created this thread and the stubArgs go out of scope.
1231 threadStart(threadArgument);
1232 DestroyThread(thread);
1237 if (!args.Thread->CreateNewThread(0, threadStub, &args, name))
1239 args.Thread->DecExternalCount(FALSE);
1240 args.ThreadStartedEvent.CloseEvent();
1244 args.Thread->SetBackground(TRUE, FALSE);
1245 args.Thread->StartThread();
1247 // Wait for the thread to be in its main loop
1248 uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE);
1249 args.ThreadStartedEvent.CloseEvent();
1250 _ASSERTE(res == WAIT_OBJECT_0);
1252 if (!args.HasStarted)
1254 // The thread has failed to start and the Thread object was destroyed in the Thread::HasStarted
1255 // failure code path.
1262 bool CreateNonSuspendableThread(
1263 void (*threadStart)(void*),
1265 const wchar_t* name)
1267 LIMITED_METHOD_CONTRACT;
1269 ThreadStubArguments args;
1270 args.Argument = argument;
1271 args.ThreadStart = threadStart;
1272 args.Thread = INVALID_HANDLE_VALUE;
1273 if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE))
1278 auto threadStub = [](void* argument) -> DWORD
1280 ThreadStubArguments* args = static_cast<ThreadStubArguments*>(argument);
1281 assert(args != nullptr);
1283 ClrFlsSetThreadType(ThreadType_GC);
1284 STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY);
1286 args->HasStarted = true;
1287 auto threadStart = args->ThreadStart;
1288 void* threadArgument = args->Argument;
1289 args->ThreadStartedEvent.Set();
1291 // The stub args cannot be used once the event is set, since that releases wait on the
1292 // event in the function that created this thread and the stubArgs go out of scope.
1293 threadStart(threadArgument);
1297 args.Thread = Thread::CreateUtilityThread(Thread::StackSize_Medium, threadStub, &args, name);
1298 if (args.Thread == INVALID_HANDLE_VALUE)
1300 args.ThreadStartedEvent.CloseEvent();
1304 // Wait for the thread to be in its main loop
1305 uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE);
1306 args.ThreadStartedEvent.CloseEvent();
1307 _ASSERTE(res == WAIT_OBJECT_0);
1309 CloseHandle(args.Thread);
1312 } // anonymous namespace
1314 bool GCToEEInterface::CreateThread(void (*threadStart)(void*), void* arg, bool is_suspendable, const char* name)
1316 InlineSString<MaxThreadNameSize> wideName;
1317 const WCHAR* namePtr = nullptr;
1320 if (name != nullptr)
1322 wideName.SetUTF8(name);
1323 namePtr = wideName.GetUnicode();
1328 // we're not obligated to provide a name - if it's not valid,
1329 // just report nullptr as the name.
1331 EX_END_CATCH(SwallowAllExceptions)
1333 LIMITED_METHOD_CONTRACT;
1336 return CreateSuspendableThread(threadStart, arg, namePtr);
1340 return CreateNonSuspendableThread(threadStart, arg, namePtr);
1344 void GCToEEInterface::WalkAsyncPinnedForPromotion(Object* object, ScanContext* sc, promote_func* callback)
1346 LIMITED_METHOD_CONTRACT;
1348 assert(object != nullptr);
1349 assert(sc != nullptr);
1350 assert(callback != nullptr);
1351 if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass)
1353 // not an overlapped data object - nothing to do.
1357 // reporting the pinned user objects
1358 OverlappedDataObject *pOverlapped = (OverlappedDataObject *)object;
1359 if (pOverlapped->m_userObject != NULL)
1361 if (pOverlapped->m_userObject->GetGCSafeMethodTable() == g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT]->GetMethodTable())
1363 // OverlappedDataObject is very special. An async pin handle keeps it alive.
1364 // During GC, we also make sure
1365 // 1. m_userObject itself does not move if m_userObject is not array
1366 // 2. Every object pointed by m_userObject does not move if m_userObject is array
1367 // We do not want to pin m_userObject if it is array.
1368 ArrayBase* pUserObject = (ArrayBase*)OBJECTREFToObject(pOverlapped->m_userObject);
1369 Object **ppObj = (Object**)pUserObject->GetDataPtr(TRUE);
1370 size_t num = pUserObject->GetNumComponents();
1371 for (size_t i = 0; i < num; i++)
1373 callback(ppObj + i, sc, GC_CALL_PINNED);
1378 callback(&OBJECTREF_TO_UNCHECKED_OBJECTREF(pOverlapped->m_userObject), (ScanContext *)sc, GC_CALL_PINNED);
1383 void GCToEEInterface::WalkAsyncPinned(Object* object, void* context, void (*callback)(Object*, Object*, void*))
1385 LIMITED_METHOD_CONTRACT;
1387 assert(object != nullptr);
1388 assert(callback != nullptr);
1390 if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass)
1395 OverlappedDataObject *pOverlapped = (OverlappedDataObject *)(object);
1396 if (pOverlapped->m_userObject != NULL)
1398 Object * pUserObject = OBJECTREFToObject(pOverlapped->m_userObject);
1399 callback(object, pUserObject, context);
1400 if (pOverlapped->m_userObject->GetGCSafeMethodTable() == g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT]->GetMethodTable())
1402 ArrayBase* pUserArrayObject = (ArrayBase*)pUserObject;
1403 Object **pObj = (Object**)pUserArrayObject->GetDataPtr(TRUE);
1404 size_t num = pUserArrayObject->GetNumComponents();
1405 for (size_t i = 0; i < num; i ++)
1407 callback(pUserObject, pObj[i], context);
1413 IGCToCLREventSink* GCToEEInterface::EventSink()
1415 LIMITED_METHOD_CONTRACT;
1417 return &g_gcToClrEventSink;
1420 uint32_t GCToEEInterface::GetDefaultDomainIndex()
1422 LIMITED_METHOD_CONTRACT;
1424 return SystemDomain::System()->DefaultDomain()->GetIndex().m_dwIndex;
1427 void *GCToEEInterface::GetAppDomainAtIndex(uint32_t appDomainIndex)
1429 LIMITED_METHOD_CONTRACT;
1431 ADIndex index(appDomainIndex);
1432 return static_cast<void *>(SystemDomain::GetAppDomainAtIndex(index));
1435 bool GCToEEInterface::AppDomainCanAccessHandleTable(uint32_t appDomainID)
1437 LIMITED_METHOD_CONTRACT;
1439 ADIndex index(appDomainID);
1440 AppDomain *pDomain = SystemDomain::GetAppDomainAtIndex(index);
1441 return (pDomain != NULL);
1444 uint32_t GCToEEInterface::GetIndexOfAppDomainBeingUnloaded()
1446 LIMITED_METHOD_CONTRACT;
1451 uint32_t GCToEEInterface::GetTotalNumSizedRefHandles()
1453 LIMITED_METHOD_CONTRACT;
1455 return SystemDomain::System()->GetTotalNumSizedRefHandles();
1459 bool GCToEEInterface::AppDomainIsRudeUnload(void *appDomain)
1461 LIMITED_METHOD_CONTRACT;
1466 bool GCToEEInterface::AnalyzeSurvivorsRequested(int condemnedGeneration)
1468 LIMITED_METHOD_CONTRACT;
1470 // Is the list active?
1471 GcNotifications gn(g_pGcNotificationTable);
1474 GcEvtArgs gea = { GC_MARK_END, { (1<<condemnedGeneration) } };
1475 if (gn.GetNotification(gea) != 0)
1484 void GCToEEInterface::AnalyzeSurvivorsFinished(int condemnedGeneration)
1486 LIMITED_METHOD_CONTRACT;
1488 // Is the list active?
1489 GcNotifications gn(g_pGcNotificationTable);
1492 GcEvtArgs gea = { GC_MARK_END, { (1<<condemnedGeneration) } };
1493 if (gn.GetNotification(gea) != 0)
1495 DACNotify::DoGCNotification(gea);