Fix perf issues with GetCurrentThreadHomeHeapNumber (#21150)
[platform/upstream/coreclr.git] / src / vm / gcenv.ee.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*
6  * GCENV.EE.CPP
7  *
8  * GCToEEInterface implementation
9  *
10
11  *
12  */
13
14 void GCToEEInterface::SuspendEE(SUSPEND_REASON reason)
15 {
16     WRAPPER_NO_CONTRACT;
17
18     static_assert_no_msg(SUSPEND_FOR_GC == (int)ThreadSuspend::SUSPEND_FOR_GC);
19     static_assert_no_msg(SUSPEND_FOR_GC_PREP == (int)ThreadSuspend::SUSPEND_FOR_GC_PREP);
20
21     _ASSERTE(reason == SUSPEND_FOR_GC || reason == SUSPEND_FOR_GC_PREP);
22
23     ThreadSuspend::SuspendEE((ThreadSuspend::SUSPEND_REASON)reason);
24
25     g_pDebugInterface->BeforeGarbageCollection();
26 }
27
28 void GCToEEInterface::RestartEE(bool bFinishedGC)
29 {
30     WRAPPER_NO_CONTRACT;
31
32     g_pDebugInterface->AfterGarbageCollection();
33
34     ThreadSuspend::RestartEE(bFinishedGC, TRUE);
35 }
36
37 VOID GCToEEInterface::SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2)
38 {
39     CONTRACTL
40     {
41         NOTHROW;
42         GC_NOTRIGGER;
43     }
44     CONTRACTL_END;
45
46     SyncBlockCache::GetSyncBlockCache()->GCWeakPtrScan(scanProc, lp1, lp2);
47 }
48
49
50 //EE can perform post stack scanning action, while the
51 // user threads are still suspended
52 VOID GCToEEInterface::AfterGcScanRoots (int condemned, int max_gen,
53                                    ScanContext* sc)
54 {
55     CONTRACTL
56     {
57         NOTHROW;
58         GC_NOTRIGGER;
59     }
60     CONTRACTL_END;
61
62 #ifdef FEATURE_COMINTEROP
63     // Go through all the app domains and for each one detach all the *unmarked* RCWs to prevent
64     // the RCW cache from resurrecting them.
65     UnsafeAppDomainIterator i(TRUE);
66     i.Init();
67
68     while (i.Next())
69     {
70         i.GetDomain()->DetachRCWs();
71     }
72 #endif // FEATURE_COMINTEROP
73 }
74
75 /*
76  * Scan all stack roots
77  */
78
79 static void ScanStackRoots(Thread * pThread, promote_func* fn, ScanContext* sc)
80 {
81     GCCONTEXT   gcctx;
82
83     gcctx.f  = fn;
84     gcctx.sc = sc;
85     gcctx.cf = NULL;
86
87     ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
88
89     // Either we are in a concurrent situation (in which case the thread is unknown to
90     // us), or we are performing a synchronous GC and we are the GC thread, holding
91     // the threadstore lock.
92
93     _ASSERTE(dbgOnly_IsSpecialEEThread() ||
94                 GetThread() == NULL ||
95                 // this is for background GC threads which always call this when EE is suspended.
96                 IsGCSpecialThread() ||
97                 (GetThread() == ThreadSuspend::GetSuspensionThread() && ThreadStore::HoldingThreadStore()));
98
99     pThread->SetHasPromotedBytes();
100
101     Frame* pTopFrame = pThread->GetFrame();
102     Object ** topStack = (Object **)pTopFrame;
103     if ((pTopFrame != ((Frame*)-1))
104         && (pTopFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr())) {
105         // It is an InlinedCallFrame. Get SP from it.
106         InlinedCallFrame* pInlinedFrame = (InlinedCallFrame*)pTopFrame;
107         topStack = (Object **)pInlinedFrame->GetCallSiteSP();
108     }
109
110     sc->stack_limit = (uintptr_t)topStack;
111
112 #ifdef FEATURE_CONSERVATIVE_GC
113     if (g_pConfig->GetGCConservative())
114     {
115         // Conservative stack root reporting
116         // We will treat everything on stack as a pinned interior GC pointer
117         // Since we report every thing as pinned, we don't need to run following code for relocation phase.
118         if (sc->promotion)
119         {
120             Object ** bottomStack = (Object **) pThread->GetCachedStackBase();
121             Object ** walk;
122             for (walk = topStack; walk < bottomStack; walk ++)
123             {
124                 if (((void*)*walk > (void*)bottomStack || (void*)*walk < (void*)topStack) &&
125                     ((void*)*walk >= (void*)g_lowest_address && (void*)*walk <= (void*)g_highest_address)
126                     )
127                 {
128                     //DbgPrintf("promote " FMT_ADDR " : " FMT_ADDR "\n", walk, *walk);
129                     fn(walk, sc, GC_CALL_INTERIOR|GC_CALL_PINNED);
130                 }
131             }
132         }
133
134         // Also ask the explicit Frames to report any references they might know about.
135         // Generally these will be a subset of the objects reported below but there's
136         // nothing that guarantees that and in the specific case of a GC protect frame the
137         // references it protects may live at a lower address than the frame itself (and
138         // thus escape the stack range we scanned above).
139         Frame *pFrame = pThread->GetFrame();
140         while (pFrame != FRAME_TOP)
141         {
142             pFrame->GcScanRoots(fn, sc);
143             pFrame = pFrame->PtrNextFrame();
144         }
145     }
146     else
147 #endif
148     {
149         unsigned flagsStackWalk = ALLOW_ASYNC_STACK_WALK | ALLOW_INVALID_OBJECTS;
150 #if defined(WIN64EXCEPTIONS)
151         flagsStackWalk |= GC_FUNCLET_REFERENCE_REPORTING;
152 #endif // defined(WIN64EXCEPTIONS)
153         pThread->StackWalkFrames( GcStackCrawlCallBack, &gcctx, flagsStackWalk);
154     }
155 }
156
157 void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
158 {
159     STRESS_LOG1(LF_GCROOTS, LL_INFO10, "GCScan: Promotion Phase = %d\n", sc->promotion);
160
161     // In server GC, we should be competing for marking the statics
162     if (GCHeapUtilities::MarkShouldCompeteForStatics())
163     {
164         if (condemned == max_gen && sc->promotion)
165         {
166             SystemDomain::EnumAllStaticGCRefs(fn, sc);
167         }
168     }
169
170     Thread* pThread = NULL;
171     while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
172     {
173         STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "{ Starting scan of Thread %p ID = %x\n", pThread, pThread->GetThreadId());
174
175         if (GCHeapUtilities::GetGCHeap()->IsThreadUsingAllocationContextHeap(
176             pThread->GetAllocContext(), sc->thread_number))
177         {
178             sc->thread_under_crawl = pThread;
179 #ifdef FEATURE_EVENT_TRACE
180             sc->dwEtwRootKind = kEtwGCRootKindStack;
181 #endif // FEATURE_EVENT_TRACE
182             ScanStackRoots(pThread, fn, sc);
183 #ifdef FEATURE_EVENT_TRACE
184             sc->dwEtwRootKind = kEtwGCRootKindOther;
185 #endif // FEATURE_EVENT_TRACE
186         }
187         STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "Ending scan of Thread %p ID = 0x%x }\n", pThread, pThread->GetThreadId());
188     }
189 }
190
191 void GCToEEInterface::GcStartWork (int condemned, int max_gen)
192 {
193     CONTRACTL
194     {
195         NOTHROW;
196         GC_NOTRIGGER;
197     }
198     CONTRACTL_END;
199
200 #ifdef VERIFY_HEAP
201     // Validate byrefs pinned by IL stubs since the last GC.
202     StubHelpers::ProcessByrefValidationList();
203 #endif // VERIFY_HEAP
204
205     ExecutionManager::CleanupCodeHeaps();
206
207 #ifdef FEATURE_EVENT_TRACE
208     ETW::TypeSystemLog::Cleanup();
209 #endif
210
211 #ifdef FEATURE_COMINTEROP
212     //
213     // Let GC detect managed/native cycles with input from jupiter
214     // Jupiter will
215     // 1. Report reference from RCW to CCW based on native reference in Jupiter
216     // 2. Identify the subset of CCWs that needs to be rooted
217     //
218     // We'll build the references from RCW to CCW using
219     // 1. Preallocated arrays
220     // 2. Dependent handles
221     //
222     RCWWalker::OnGCStarted(condemned);
223 #endif // FEATURE_COMINTEROP
224
225     if (condemned == max_gen)
226     {
227         ThreadStore::s_pThreadStore->OnMaxGenerationGCStarted();
228     }
229 }
230
231 void GCToEEInterface::GcDone(int condemned)
232 {
233     CONTRACTL
234     {
235         NOTHROW;
236         GC_NOTRIGGER;
237     }
238     CONTRACTL_END;
239
240 #ifdef FEATURE_COMINTEROP
241     //
242     // Tell Jupiter GC has finished
243     //
244     RCWWalker::OnGCFinished(condemned);
245 #endif // FEATURE_COMINTEROP
246 }
247
248 bool GCToEEInterface::RefCountedHandleCallbacks(Object * pObject)
249 {
250     CONTRACTL
251     {
252         NOTHROW;
253         GC_NOTRIGGER;
254     }
255     CONTRACTL_END;
256
257 #ifdef FEATURE_COMINTEROP
258     //<REVISIT_TODO>@todo optimize the access to the ref-count
259     ComCallWrapper* pWrap = ComCallWrapper::GetWrapperForObject((OBJECTREF)pObject);
260
261     return pWrap != NULL && pWrap->IsWrapperActive();
262 #else
263     return false;
264 #endif
265 }
266
267 void GCToEEInterface::GcBeforeBGCSweepWork()
268 {
269     CONTRACTL
270     {
271         NOTHROW;
272         GC_NOTRIGGER;
273     }
274     CONTRACTL_END;
275
276 #ifdef VERIFY_HEAP
277     // Validate byrefs pinned by IL stubs since the last GC.
278     StubHelpers::ProcessByrefValidationList();
279 #endif // VERIFY_HEAP
280 }
281
282 void GCToEEInterface::SyncBlockCacheDemote(int max_gen)
283 {
284     CONTRACTL
285     {
286         NOTHROW;
287         GC_NOTRIGGER;
288     }
289     CONTRACTL_END;
290
291     SyncBlockCache::GetSyncBlockCache()->GCDone(TRUE, max_gen);
292 }
293
294 void GCToEEInterface::SyncBlockCachePromotionsGranted(int max_gen)
295 {
296     CONTRACTL
297     {
298         NOTHROW;
299         GC_NOTRIGGER;
300     }
301     CONTRACTL_END;
302
303     SyncBlockCache::GetSyncBlockCache()->GCDone(FALSE, max_gen);
304 }
305
306 uint32_t GCToEEInterface::GetActiveSyncBlockCount()
307 {
308     CONTRACTL
309     {
310         NOTHROW;
311         GC_NOTRIGGER;
312     }
313     CONTRACTL_END;
314
315     return SyncBlockCache::GetSyncBlockCache()->GetActiveCount();
316 }
317
318 gc_alloc_context * GCToEEInterface::GetAllocContext()
319 {
320     WRAPPER_NO_CONTRACT;
321
322     Thread* pThread = ::GetThread();
323     if (!pThread)
324     {
325         return nullptr;
326     }
327
328     return pThread->GetAllocContext();
329 }
330
331 void GCToEEInterface::GcEnumAllocContexts(enum_alloc_context_func* fn, void* param)
332 {
333     CONTRACTL
334     {
335         NOTHROW;
336         GC_NOTRIGGER;
337     }
338     CONTRACTL_END;
339
340     if (GCHeapUtilities::UseThreadAllocationContexts())
341     {
342         Thread * pThread = NULL;
343         while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
344         {
345             fn(pThread->GetAllocContext(), param);
346         }
347     }
348     else
349     {
350         fn(&g_global_alloc_context, param);
351     }
352 }
353
354
355 uint8_t* GCToEEInterface::GetLoaderAllocatorObjectForGC(Object* pObject)
356 {
357     CONTRACTL
358     {
359         NOTHROW;
360         GC_NOTRIGGER;
361     }
362     CONTRACTL_END;
363
364     return pObject->GetGCSafeMethodTable()->GetLoaderAllocatorObjectForGC();
365 }
366
367 bool GCToEEInterface::IsPreemptiveGCDisabled()
368 {
369     WRAPPER_NO_CONTRACT;
370
371     Thread* pThread = ::GetThread();
372     if (pThread)
373     {
374         return !!pThread->PreemptiveGCDisabled();
375     }
376
377     return false;
378 }
379
380 bool GCToEEInterface::EnablePreemptiveGC()
381 {
382     WRAPPER_NO_CONTRACT;
383
384     bool bToggleGC = false;
385     Thread* pThread = ::GetThread();
386
387     if (pThread)
388     {
389         bToggleGC = !!pThread->PreemptiveGCDisabled();
390         if (bToggleGC)
391         {
392             pThread->EnablePreemptiveGC();
393         }
394     }
395
396     return bToggleGC;
397 }
398
399 void GCToEEInterface::DisablePreemptiveGC()
400 {
401     WRAPPER_NO_CONTRACT;
402
403     Thread* pThread = ::GetThread();
404     if (pThread)
405     {
406         pThread->DisablePreemptiveGC();
407     }
408 }
409
410 Thread* GCToEEInterface::GetThread()
411 {
412     WRAPPER_NO_CONTRACT;
413
414     return ::GetThread();
415 }
416
417 struct BackgroundThreadStubArgs
418 {
419     Thread* thread;
420     GCBackgroundThreadFunction threadStart;
421     void* arg;
422     CLREvent threadStartedEvent;
423     bool hasStarted;
424 };
425
426 DWORD WINAPI BackgroundThreadStub(void* arg)
427 {
428     BackgroundThreadStubArgs* stubArgs = (BackgroundThreadStubArgs*)arg;
429     assert (stubArgs->thread != NULL);
430
431     ClrFlsSetThreadType (ThreadType_GC);
432     stubArgs->thread->SetGCSpecial(true);
433     STRESS_LOG_RESERVE_MEM (GC_STRESSLOG_MULTIPLY);
434
435     stubArgs->hasStarted = !!stubArgs->thread->HasStarted(FALSE);
436
437     Thread* thread = stubArgs->thread;
438     GCBackgroundThreadFunction realThreadStart = stubArgs->threadStart;
439     void* realThreadArg = stubArgs->arg;
440     bool hasStarted = stubArgs->hasStarted;
441
442     stubArgs->threadStartedEvent.Set();
443     // The stubArgs cannot be used once the event is set, since that releases wait on the
444     // event in the function that created this thread and the stubArgs go out of scope.
445
446     DWORD result = 0;
447
448     if (hasStarted)
449     {
450         result = realThreadStart(realThreadArg);
451         DestroyThread(thread);
452     }
453
454     return result;
455 }
456
457 //
458 // Diagnostics code
459 //
460
461 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
462 inline BOOL ShouldTrackMovementForProfilerOrEtw()
463 {
464 #ifdef GC_PROFILING
465     if (CORProfilerTrackGC())
466         return true;
467 #endif
468
469 #ifdef FEATURE_EVENT_TRACE
470     if (ETW::GCLog::ShouldTrackMovementForEtw())
471         return true;
472 #endif
473
474     return false;
475 }
476 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
477
478 void ProfScanRootsHelper(Object** ppObject, ScanContext *pSC, uint32_t dwFlags)
479 {
480 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
481     Object *pObj = *ppObject;
482     if (dwFlags & GC_CALL_INTERIOR)
483     {
484         pObj = GCHeapUtilities::GetGCHeap()->GetContainingObject(pObj, true);
485         if (pObj == nullptr)
486             return;
487     }
488     ScanRootsHelper(pObj, ppObject, pSC, dwFlags);
489 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
490 }
491
492 // TODO - at some point we would like to completely decouple profiling
493 // from ETW tracing using a pattern similar to this, where the
494 // ProfilingScanContext has flags about whether or not certain things
495 // should be tracked, and each one of these ProfilerShouldXYZ functions
496 // will check these flags and determine what to do based upon that.
497 // GCProfileWalkHeapWorker can, in turn, call those methods without fear
498 // of things being ifdef'd out.
499
500 // Returns TRUE if GC profiling is enabled and the profiler
501 // should scan dependent handles, FALSE otherwise.
502 BOOL ProfilerShouldTrackConditionalWeakTableElements()
503 {
504 #if defined(GC_PROFILING)
505     return CORProfilerTrackConditionalWeakTableElements();
506 #else
507     return FALSE;
508 #endif // defined (GC_PROFILING)
509 }
510
511 // If GC profiling is enabled, informs the profiler that we are done
512 // tracing dependent handles.
513 void ProfilerEndConditionalWeakTableElementReferences(void* heapId)
514 {
515 #if defined (GC_PROFILING)
516     g_profControlBlock.pProfInterface->EndConditionalWeakTableElementReferences(heapId);
517 #else
518     UNREFERENCED_PARAMETER(heapId);
519 #endif // defined (GC_PROFILING)
520 }
521
522 // If GC profiling is enabled, informs the profiler that we are done
523 // tracing root references.
524 void ProfilerEndRootReferences2(void* heapId)
525 {
526 #if defined (GC_PROFILING)
527     g_profControlBlock.pProfInterface->EndRootReferences2(heapId);
528 #else
529     UNREFERENCED_PARAMETER(heapId);
530 #endif // defined (GC_PROFILING)
531 }
532
533 void GcScanRootsForProfilerAndETW(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
534 {
535     Thread* pThread = NULL;
536     while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
537     {
538         sc->thread_under_crawl = pThread;
539 #ifdef FEATURE_EVENT_TRACE
540         sc->dwEtwRootKind = kEtwGCRootKindStack;
541 #endif // FEATURE_EVENT_TRACE
542         ScanStackRoots(pThread, fn, sc);
543 #ifdef FEATURE_EVENT_TRACE
544         sc->dwEtwRootKind = kEtwGCRootKindOther;
545 #endif // FEATURE_EVENT_TRACE
546     }
547 }
548
549 void ScanHandleForProfilerAndETW(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, bool isDependent)
550 {
551     ProfilingScanContext* pSC = (ProfilingScanContext*)context;
552
553 #ifdef GC_PROFILING
554     // Give the profiler the objectref.
555     if (pSC->fProfilerPinned)
556     {
557         if (!isDependent)
558         {
559             BEGIN_PIN_PROFILER(CORProfilerTrackGC());
560             g_profControlBlock.pProfInterface->RootReference2(
561                 (uint8_t *)*pRef,
562                 kEtwGCRootKindHandle,
563                 (EtwGCRootFlags)flags,
564                 pRef,
565                 &pSC->pHeapId);
566             END_PIN_PROFILER();
567         }
568         else
569         {
570             BEGIN_PIN_PROFILER(CORProfilerTrackConditionalWeakTableElements());
571             g_profControlBlock.pProfInterface->ConditionalWeakTableElementReference(
572                 (uint8_t*)*pRef,
573                 (uint8_t*)pSec,
574                 pRef,
575                 &pSC->pHeapId);
576             END_PIN_PROFILER();
577         }
578     }
579 #endif // GC_PROFILING
580
581 #if defined(FEATURE_EVENT_TRACE)
582     // Notify ETW of the handle
583     if (ETW::GCLog::ShouldWalkHeapRootsForEtw())
584     {
585         ETW::GCLog::RootReference(
586             pRef,
587             *pRef,          // object being rooted
588             pSec,           // pSecondaryNodeForDependentHandle
589             isDependent,
590             pSC,
591             0,              // dwGCFlags,
592             flags);     // ETW handle flags
593     }
594 #endif // defined(FEATURE_EVENT_TRACE)
595 }
596
597 // This is called only if we've determined that either:
598 //     a) The Profiling API wants to do a walk of the heap, and it has pinned the
599 //     profiler in place (so it cannot be detached), and it's thus safe to call into the
600 //     profiler, OR
601 //     b) ETW infrastructure wants to do a walk of the heap either to log roots,
602 //     objects, or both.
603 // This can also be called to do a single walk for BOTH a) and b) simultaneously.  Since
604 // ETW can ask for roots, but not objects
605 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
606 void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw)
607 {
608     {
609         ProfilingScanContext SC(fProfilerPinned);
610         unsigned max_generation = GCHeapUtilities::GetGCHeap()->GetMaxGeneration();
611
612         // **** Scan roots:  Only scan roots if profiling API wants them or ETW wants them.
613         if (fProfilerPinned || fShouldWalkHeapRootsForEtw)
614         {
615             GcScanRootsForProfilerAndETW(&ProfScanRootsHelper, max_generation, max_generation, &SC);
616             SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
617             GCHeapUtilities::GetGCHeap()->DiagScanFinalizeQueue(&ProfScanRootsHelper, &SC);
618
619             // Handles are kept independent of wks/svr/concurrent builds
620             SC.dwEtwRootKind = kEtwGCRootKindHandle;
621             GCHeapUtilities::GetGCHeap()->DiagScanHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
622
623             // indicate that regular handle scanning is over, so we can flush the buffered roots
624             // to the profiler.  (This is for profapi only.  ETW will flush after the
625             // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
626             if (fProfilerPinned)
627             {
628                 ProfilerEndRootReferences2(&SC.pHeapId);
629             }
630         }
631
632         // **** Scan dependent handles: only if the profiler supports it or ETW wants roots
633         if ((fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements()) ||
634             fShouldWalkHeapRootsForEtw)
635         {
636             // GcScanDependentHandlesForProfiler double-checks
637             // CORProfilerTrackConditionalWeakTableElements() before calling into the profiler
638
639             ProfilingScanContext* pSC = &SC;
640
641             // we'll re-use pHeapId (which was either unused (0) or freed by EndRootReferences2
642             // (-1)), so reset it to NULL
643             _ASSERTE((*((size_t *)(&pSC->pHeapId)) == (size_t)(-1)) ||
644                     (*((size_t *)(&pSC->pHeapId)) == (size_t)(0)));
645             pSC->pHeapId = NULL;
646
647             GCHeapUtilities::GetGCHeap()->DiagScanDependentHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
648
649             // indicate that dependent handle scanning is over, so we can flush the buffered roots
650             // to the profiler.  (This is for profapi only.  ETW will flush after the
651             // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
652             if (fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements())
653             {
654                 ProfilerEndConditionalWeakTableElementReferences(&SC.pHeapId);
655             }
656         }
657
658         ProfilerWalkHeapContext profilerWalkHeapContext(fProfilerPinned, SC.pvEtwContext);
659
660         // **** Walk objects on heap: only if profiling API wants them or ETW wants them.
661         if (fProfilerPinned || fShouldWalkHeapObjectsForEtw)
662         {
663             GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, true /* walk the large object heap */);
664         }
665
666 #ifdef FEATURE_EVENT_TRACE
667         // **** Done! Indicate to ETW helpers that the heap walk is done, so any buffers
668         // should be flushed into the ETW stream
669         if (fShouldWalkHeapObjectsForEtw || fShouldWalkHeapRootsForEtw)
670         {
671             ETW::GCLog::EndHeapDump(&profilerWalkHeapContext);
672         }
673 #endif // FEATURE_EVENT_TRACE
674     }
675 }
676 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
677
678 void GCProfileWalkHeap()
679 {
680     BOOL fWalkedHeapForProfiler = FALSE;
681
682 #ifdef FEATURE_EVENT_TRACE
683     if (ETW::GCLog::ShouldWalkStaticsAndCOMForEtw())
684         ETW::GCLog::WalkStaticsAndCOMForETW();
685
686     BOOL fShouldWalkHeapRootsForEtw = ETW::GCLog::ShouldWalkHeapRootsForEtw();
687     BOOL fShouldWalkHeapObjectsForEtw = ETW::GCLog::ShouldWalkHeapObjectsForEtw();
688 #else // !FEATURE_EVENT_TRACE
689     BOOL fShouldWalkHeapRootsForEtw = FALSE;
690     BOOL fShouldWalkHeapObjectsForEtw = FALSE;
691 #endif // FEATURE_EVENT_TRACE
692
693 #if defined (GC_PROFILING)
694     {
695         BEGIN_PIN_PROFILER(CORProfilerTrackGC());
696         GCProfileWalkHeapWorker(TRUE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
697         fWalkedHeapForProfiler = TRUE;
698         END_PIN_PROFILER();
699     }
700 #endif // defined (GC_PROFILING)
701
702 #if defined (GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
703     // we need to walk the heap if one of GC_PROFILING or FEATURE_EVENT_TRACE
704     // is defined, since both of them make use of the walk heap worker.
705     if (!fWalkedHeapForProfiler &&
706         (fShouldWalkHeapRootsForEtw || fShouldWalkHeapObjectsForEtw))
707     {
708         GCProfileWalkHeapWorker(FALSE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
709     }
710 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
711 }
712
713 void WalkFReachableObjects(bool isCritical, void* objectID)
714 {
715         g_profControlBlock.pProfInterface->FinalizeableObjectQueued(isCritical, (ObjectID)objectID);
716 }
717
718 static fq_walk_fn g_FQWalkFn = &WalkFReachableObjects;
719
720 void GCToEEInterface::DiagGCStart(int gen, bool isInduced)
721 {
722 #ifdef GC_PROFILING
723     DiagUpdateGenerationBounds();
724     GarbageCollectionStartedCallback(gen, isInduced);
725     {
726         BEGIN_PIN_PROFILER(CORProfilerTrackGC());
727         size_t context = 0;
728
729         // When we're walking objects allocated by class, then we don't want to walk the large
730         // object heap because then it would count things that may have been around for a while.
731         GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&AllocByClassHelper, (void *)&context, 0, false);
732
733         // Notify that we've reached the end of the Gen 0 scan
734         g_profControlBlock.pProfInterface->EndAllocByClass(&context);
735         END_PIN_PROFILER();
736     }
737
738 #endif // GC_PROFILING
739 }
740
741 void GCToEEInterface::DiagUpdateGenerationBounds()
742 {
743 #ifdef GC_PROFILING
744     if (CORProfilerTrackGC())
745         UpdateGenerationBounds();
746 #endif // GC_PROFILING
747 }
748
749 void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent)
750 {
751 #ifdef GC_PROFILING
752     if (!fConcurrent)
753     {
754         GCProfileWalkHeap();
755         DiagUpdateGenerationBounds();
756         GarbageCollectionFinishedCallback();
757     }
758 #endif // GC_PROFILING
759 }
760
761 void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext)
762 {
763 #ifdef GC_PROFILING
764     if (CORProfilerTrackGC())
765     {
766         BEGIN_PIN_PROFILER(CORProfilerPresent());
767         GCHeapUtilities::GetGCHeap()->DiagWalkFinalizeQueue(gcContext, g_FQWalkFn);
768         END_PIN_PROFILER();
769     }
770 #endif //GC_PROFILING
771 }
772
773 // Note on last parameter: when calling this for bgc, only ETW
774 // should be sending these events so that existing profapi profilers
775 // don't get confused.
776 void WalkMovedReferences(uint8_t* begin, uint8_t* end,
777                          ptrdiff_t reloc,
778                          void* context,
779                          bool fCompacting,
780                          bool fBGC)
781 {
782     ETW::GCLog::MovedReference(begin, end,
783                                (fCompacting ? reloc : 0),
784                                (size_t)context,
785                                fCompacting,
786                                !fBGC);
787 }
788
789 void GCToEEInterface::DiagWalkSurvivors(void* gcContext)
790 {
791 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
792     if (ShouldTrackMovementForProfilerOrEtw())
793     {
794         size_t context = 0;
795         ETW::GCLog::BeginMovedReferences(&context);
796         GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_gc);
797         ETW::GCLog::EndMovedReferences(context);
798     }
799 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
800 }
801
802 void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext)
803 {
804 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
805     if (ShouldTrackMovementForProfilerOrEtw())
806     {
807         size_t context = 0;
808         ETW::GCLog::BeginMovedReferences(&context);
809         GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_loh);
810         ETW::GCLog::EndMovedReferences(context);
811     }
812 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
813 }
814
815 void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext)
816 {
817 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
818     if (ShouldTrackMovementForProfilerOrEtw())
819     {
820         size_t context = 0;
821         ETW::GCLog::BeginMovedReferences(&context);
822         GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_bgc);
823         ETW::GCLog::EndMovedReferences(context);
824     }
825 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
826 }
827
828 void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args)
829 {
830     int stompWBCompleteActions = SWB_PASS;
831     bool is_runtime_suspended = false;
832
833     assert(args != nullptr);
834     switch (args->operation)
835     {
836     case WriteBarrierOp::StompResize:
837         // StompResize requires a new card table, a new lowest address, and
838         // a new highest address
839         assert(args->card_table != nullptr);
840         assert(args->lowest_address != nullptr);
841         assert(args->highest_address != nullptr);
842
843         g_card_table = args->card_table;
844
845 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
846         assert(args->card_bundle_table != nullptr);
847         g_card_bundle_table = args->card_bundle_table;
848 #endif
849
850 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
851         if (g_sw_ww_enabled_for_gc_heap && (args->write_watch_table != nullptr))
852         {
853             assert(args->is_runtime_suspended);
854             g_sw_ww_table = args->write_watch_table;
855         }
856 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
857
858         stompWBCompleteActions |= ::StompWriteBarrierResize(args->is_runtime_suspended, args->requires_upper_bounds_check);
859
860         // We need to make sure that other threads executing checked write barriers
861         // will see the g_card_table update before g_lowest/highest_address updates.
862         // Otherwise, the checked write barrier may AV accessing the old card table
863         // with address that it does not cover.
864         //
865         // Even x86's total store ordering is insufficient here because threads reading
866         // g_card_table do so via the instruction cache, whereas g_lowest/highest_address
867         // are read via the data cache.
868         //
869         // The g_card_table update is covered by section 8.1.3 of the Intel Software
870         // Development Manual, Volume 3A (System Programming Guide, Part 1), about
871         // "cross-modifying code": We need all _executing_ threads to invalidate
872         // their instruction cache, which FlushProcessWriteBuffers achieves by sending
873         // an IPI (inter-process interrupt).
874
875         if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
876         {
877             // flushing icache on current processor (thread)
878             ::FlushWriteBarrierInstructionCache();
879             // asking other processors (threads) to invalidate their icache
880             FlushProcessWriteBuffers();
881         }
882
883         g_lowest_address = args->lowest_address;
884         VolatileStore(&g_highest_address, args->highest_address);
885
886 #if defined(_ARM64_) || defined(_ARM_)
887         // Need to reupdate for changes to g_highest_address g_lowest_address
888         is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended;
889         stompWBCompleteActions |= ::StompWriteBarrierResize(is_runtime_suspended, args->requires_upper_bounds_check);
890
891 #ifdef _ARM_
892         if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
893         {
894             ::FlushWriteBarrierInstructionCache();
895         }
896 #endif
897
898         is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended;
899         if(!is_runtime_suspended)
900         {
901             // If runtime is not suspended, force updated state to be visible to all threads
902             MemoryBarrier();
903         }
904 #endif
905         if (stompWBCompleteActions & SWB_EE_RESTART)
906         {
907             assert(!args->is_runtime_suspended &&
908                 "if runtime was suspended in patching routines then it was in running state at begining");
909             ThreadSuspend::RestartEE(FALSE, TRUE);
910         }
911         return; // unlike other branches we have already done cleanup so bailing out here
912     case WriteBarrierOp::StompEphemeral:
913         // StompEphemeral requires a new ephemeral low and a new ephemeral high
914         assert(args->ephemeral_low != nullptr);
915         assert(args->ephemeral_high != nullptr);
916         g_ephemeral_low = args->ephemeral_low;
917         g_ephemeral_high = args->ephemeral_high;
918         stompWBCompleteActions |= ::StompWriteBarrierEphemeral(args->is_runtime_suspended);
919         break;
920     case WriteBarrierOp::Initialize:
921         // This operation should only be invoked once, upon initialization.
922         assert(g_card_table == nullptr);
923         assert(g_lowest_address == nullptr);
924         assert(g_highest_address == nullptr);
925         assert(args->card_table != nullptr);
926         assert(args->lowest_address != nullptr);
927         assert(args->highest_address != nullptr);
928         assert(args->ephemeral_low != nullptr);
929         assert(args->ephemeral_high != nullptr);
930         assert(args->is_runtime_suspended && "the runtime must be suspended here!");
931         assert(!args->requires_upper_bounds_check && "the ephemeral generation must be at the top of the heap!");
932
933         g_card_table = args->card_table;
934
935 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
936         assert(g_card_bundle_table == nullptr);
937         g_card_bundle_table = args->card_bundle_table;
938 #endif
939
940         g_lowest_address = args->lowest_address;
941         g_highest_address = args->highest_address;
942         stompWBCompleteActions |= ::StompWriteBarrierResize(true, false);
943
944         // StompWriteBarrierResize does not necessarily bash g_ephemeral_low
945         // usages, so we must do so here. This is particularly true on x86,
946         // where StompWriteBarrierResize will not bash g_ephemeral_low when
947         // called with the parameters (true, false), as it is above.
948         g_ephemeral_low = args->ephemeral_low;
949         g_ephemeral_high = args->ephemeral_high;
950         stompWBCompleteActions |= ::StompWriteBarrierEphemeral(true);
951         break;
952     case WriteBarrierOp::SwitchToWriteWatch:
953 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
954         assert(args->write_watch_table != nullptr);
955         assert(args->is_runtime_suspended && "the runtime must be suspended here!");
956         g_sw_ww_table = args->write_watch_table;
957         g_sw_ww_enabled_for_gc_heap = true;
958         stompWBCompleteActions |= ::SwitchToWriteWatchBarrier(true);
959 #else
960         assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP");
961 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
962         break;
963     case WriteBarrierOp::SwitchToNonWriteWatch:
964 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
965         assert(args->is_runtime_suspended && "the runtime must be suspended here!");
966         g_sw_ww_table = 0;
967         g_sw_ww_enabled_for_gc_heap = false;
968         stompWBCompleteActions |= ::SwitchToNonWriteWatchBarrier(true);
969 #else
970         assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP");
971 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
972         break;
973     default:
974         assert(!"unknown WriteBarrierOp enum");
975     }
976     if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
977     {
978         ::FlushWriteBarrierInstructionCache();
979     }
980     if (stompWBCompleteActions & SWB_EE_RESTART)
981     {
982         assert(!args->is_runtime_suspended &&
983             "if runtime was suspended in patching routines then it was in running state at begining");
984         ThreadSuspend::RestartEE(FALSE, TRUE);
985     }
986 }
987
988 void GCToEEInterface::EnableFinalization(bool foundFinalizers)
989 {
990     if (foundFinalizers || FinalizerThread::HaveExtraWorkForFinalizer())
991     {
992         FinalizerThread::EnableFinalization();
993     }
994 }
995
996 void GCToEEInterface::HandleFatalError(unsigned int exitCode)
997 {
998     EEPOLICY_HANDLE_FATAL_ERROR(exitCode);
999 }
1000
1001 bool GCToEEInterface::ShouldFinalizeObjectForUnload(void* pDomain, Object* obj)
1002 {
1003     // CoreCLR does not have appdomains, so this code path is dead. Other runtimes may
1004     // choose to inspect the object being finalized here.
1005     // [DESKTOP TODO] Desktop looks for "agile and finalizable" objects and may choose
1006     // to move them to a new app domain instead of finalizing them here.
1007     return true;
1008 }
1009
1010 bool GCToEEInterface::EagerFinalized(Object* obj)
1011 {
1012     MethodTable* pMT = obj->GetGCSafeMethodTable();
1013     if (pMT == pWeakReferenceMT ||
1014         pMT->GetCanonicalMethodTable() == pWeakReferenceOfTCanonMT)
1015     {
1016         FinalizeWeakReference(obj);
1017         return true;
1018     }
1019
1020     return false;
1021 }
1022
1023 MethodTable* GCToEEInterface::GetFreeObjectMethodTable()
1024 {
1025     assert(g_pFreeObjectMethodTable != nullptr);
1026     return g_pFreeObjectMethodTable;
1027 }
1028
1029 // These are arbitrary, we shouldn't ever be having confrig keys or values
1030 // longer than these lengths.
1031 const size_t MaxConfigKeyLength = 255;
1032 const size_t MaxConfigValueLength = 255;
1033
1034 bool GCToEEInterface::GetBooleanConfigValue(const char* key, bool* value)
1035 {
1036     CONTRACTL {
1037         NOTHROW;
1038         GC_NOTRIGGER;
1039     } CONTRACTL_END;
1040
1041     // these configuration values are given to us via startup flags.
1042     if (strcmp(key, "gcServer") == 0)
1043     {
1044         *value = g_heap_type == GC_HEAP_SVR;
1045         return true;
1046     }
1047
1048     if (strcmp(key, "gcConcurrent") == 0)
1049     {
1050         *value = !!g_pConfig->GetGCconcurrent();
1051         return true;
1052     }
1053
1054     if (strcmp(key, "GCRetainVM") == 0)
1055     {
1056         *value = !!g_pConfig->GetGCRetainVM();
1057         return true;
1058     }
1059
1060     WCHAR configKey[MaxConfigKeyLength];
1061     if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1062     {
1063         // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1064         return false;
1065     }
1066
1067     // otherwise, ask the config subsystem.
1068     if (CLRConfig::IsConfigOptionSpecified(configKey))
1069     {
1070         CLRConfig::ConfigDWORDInfo info { configKey , 0, CLRConfig::EEConfig_default };
1071         *value = CLRConfig::GetConfigValue(info) != 0;
1072         return true;
1073     }
1074
1075     return false;
1076 }
1077
1078 bool GCToEEInterface::GetIntConfigValue(const char* key, int64_t* value)
1079 {
1080     CONTRACTL {
1081       NOTHROW;
1082       GC_NOTRIGGER;
1083     } CONTRACTL_END;
1084
1085     WCHAR configKey[MaxConfigKeyLength];
1086     if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1087     {
1088         // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1089         return false;
1090     }
1091
1092     if (CLRConfig::IsConfigOptionSpecified(configKey))
1093     {
1094         CLRConfig::ConfigDWORDInfo info { configKey , 0, CLRConfig::EEConfig_default };
1095         *value = CLRConfig::GetConfigValue(info);
1096         return true;
1097     }
1098
1099     return false;
1100 }
1101
1102 bool GCToEEInterface::GetStringConfigValue(const char* key, const char** value)
1103 {
1104     CONTRACTL {
1105       NOTHROW;
1106       GC_NOTRIGGER;
1107     } CONTRACTL_END;
1108
1109     WCHAR configKey[MaxConfigKeyLength];
1110     if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1111     {
1112         // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1113         return false;
1114     }
1115
1116     CLRConfig::ConfigStringInfo info { configKey, CLRConfig::EEConfig_default };
1117     LPWSTR out = CLRConfig::GetConfigValue(info);
1118     if (!out)
1119     {
1120         // config not found
1121         return false;
1122     }
1123
1124     // not allocated on the stack since it escapes this function
1125     AStringHolder configResult = new (nothrow) char[MaxConfigValueLength];
1126     if (!configResult)
1127     {
1128         CLRConfig::FreeConfigString(out);
1129         return false;
1130     }
1131
1132     if (WideCharToMultiByte(CP_ACP, 0, out, -1 /* out is null-terminated */,
1133           configResult.GetValue(), MaxConfigKeyLength, nullptr, nullptr) == 0)
1134     {
1135         // this should only happen if the config subsystem gives us a string that's not valid
1136         // unicode.
1137         CLRConfig::FreeConfigString(out);
1138         return false;
1139     }
1140
1141     *value = configResult.Extract();
1142     CLRConfig::FreeConfigString(out);
1143     return true;
1144 }
1145
1146 void GCToEEInterface::FreeStringConfigValue(const char* value)
1147 {
1148     delete [] value;
1149 }
1150
1151 bool GCToEEInterface::IsGCThread()
1152 {
1153     return !!::IsGCThread();
1154 }
1155
1156 bool GCToEEInterface::WasCurrentThreadCreatedByGC()
1157 {
1158     return !!::IsGCSpecialThread();
1159 }
1160
1161 struct SuspendableThreadStubArguments
1162 {
1163     void* Argument;
1164     void (*ThreadStart)(void*);
1165     Thread* Thread;
1166     bool HasStarted;
1167     CLREvent ThreadStartedEvent;
1168 };
1169
1170 struct ThreadStubArguments
1171 {
1172     void* Argument;
1173     void (*ThreadStart)(void*);
1174     HANDLE Thread;
1175     bool HasStarted;
1176     CLREvent ThreadStartedEvent;
1177 };
1178
1179 namespace
1180 {
1181     const size_t MaxThreadNameSize = 255;
1182
1183     bool CreateSuspendableThread(
1184         void (*threadStart)(void*),
1185         void* argument,
1186         const wchar_t* name)
1187     {
1188         LIMITED_METHOD_CONTRACT;
1189
1190         SuspendableThreadStubArguments args;
1191         args.Argument = argument;
1192         args.ThreadStart = threadStart;
1193         args.Thread = nullptr;
1194         args.HasStarted = false;
1195         if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE))
1196         {
1197             return false;
1198         }
1199
1200         EX_TRY
1201         {
1202             args.Thread = SetupUnstartedThread(FALSE);
1203         }
1204         EX_CATCH
1205         {
1206         }
1207         EX_END_CATCH(SwallowAllExceptions)
1208
1209         if (!args.Thread)
1210         {
1211             args.ThreadStartedEvent.CloseEvent();
1212             return false;
1213         }
1214
1215         auto threadStub = [](void* argument) -> DWORD
1216         {
1217             SuspendableThreadStubArguments* args = static_cast<SuspendableThreadStubArguments*>(argument);
1218             assert(args != nullptr);
1219
1220             ClrFlsSetThreadType(ThreadType_GC);
1221             args->Thread->SetGCSpecial(true);
1222             STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY);
1223             args->HasStarted = !!args->Thread->HasStarted(false);
1224
1225             Thread* thread = args->Thread;
1226             auto threadStart = args->ThreadStart;
1227             void* threadArgument = args->Argument;
1228             bool hasStarted = args->HasStarted;
1229             args->ThreadStartedEvent.Set();
1230
1231             // The stubArgs cannot be used once the event is set, since that releases wait on the
1232             // event in the function that created this thread and the stubArgs go out of scope.
1233             if (hasStarted)
1234             {
1235                 threadStart(threadArgument);
1236                 DestroyThread(thread);
1237             }
1238
1239             return 0;
1240         };
1241         if (!args.Thread->CreateNewThread(0, threadStub, &args, name))
1242         {
1243             args.Thread->DecExternalCount(FALSE);
1244             args.ThreadStartedEvent.CloseEvent();
1245             return false;
1246         }
1247
1248         args.Thread->SetBackground(TRUE, FALSE);
1249         args.Thread->StartThread();
1250
1251         // Wait for the thread to be in its main loop
1252         uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE);
1253         args.ThreadStartedEvent.CloseEvent();
1254         _ASSERTE(res == WAIT_OBJECT_0);
1255
1256         if (!args.HasStarted)
1257         {
1258             // The thread has failed to start and the Thread object was destroyed in the Thread::HasStarted
1259             // failure code path.
1260             return false;
1261         }
1262
1263         return true;
1264     }
1265
1266     bool CreateNonSuspendableThread(
1267         void (*threadStart)(void*),
1268         void* argument,
1269         const wchar_t* name)
1270     {
1271         LIMITED_METHOD_CONTRACT;
1272
1273         ThreadStubArguments args;
1274         args.Argument = argument;
1275         args.ThreadStart = threadStart;
1276         args.Thread = INVALID_HANDLE_VALUE;
1277         if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE))
1278         {
1279             return false;
1280         }
1281
1282         auto threadStub = [](void* argument) -> DWORD
1283         {
1284             ThreadStubArguments* args = static_cast<ThreadStubArguments*>(argument);
1285             assert(args != nullptr);
1286
1287             ClrFlsSetThreadType(ThreadType_GC);
1288             STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY);
1289
1290             args->HasStarted = true;
1291             auto threadStart = args->ThreadStart;
1292             void* threadArgument = args->Argument;
1293             args->ThreadStartedEvent.Set();
1294
1295             // The stub args cannot be used once the event is set, since that releases wait on the
1296             // event in the function that created this thread and the stubArgs go out of scope.
1297             threadStart(threadArgument);
1298             return 0;
1299         };
1300
1301         args.Thread = Thread::CreateUtilityThread(Thread::StackSize_Medium, threadStub, &args, name);
1302         if (args.Thread == INVALID_HANDLE_VALUE)
1303         {
1304             args.ThreadStartedEvent.CloseEvent();
1305             return false;
1306         }
1307
1308         // Wait for the thread to be in its main loop
1309         uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE);
1310         args.ThreadStartedEvent.CloseEvent();
1311         _ASSERTE(res == WAIT_OBJECT_0);
1312
1313         CloseHandle(args.Thread);
1314         return true;
1315     }
1316 } // anonymous namespace
1317
1318 bool GCToEEInterface::CreateThread(void (*threadStart)(void*), void* arg, bool is_suspendable, const char* name)
1319 {
1320     InlineSString<MaxThreadNameSize> wideName;
1321     const WCHAR* namePtr = nullptr;
1322     EX_TRY
1323     {
1324         if (name != nullptr)
1325         {
1326             wideName.SetUTF8(name);
1327             namePtr = wideName.GetUnicode();
1328         }
1329     }
1330         EX_CATCH
1331     {
1332         // we're not obligated to provide a name - if it's not valid,
1333         // just report nullptr as the name.
1334     }
1335     EX_END_CATCH(SwallowAllExceptions)
1336
1337     LIMITED_METHOD_CONTRACT;
1338     if (is_suspendable)
1339     {
1340         return CreateSuspendableThread(threadStart, arg, namePtr);
1341     }
1342     else
1343     {
1344         return CreateNonSuspendableThread(threadStart, arg, namePtr);
1345     }
1346 }
1347
1348 void GCToEEInterface::WalkAsyncPinnedForPromotion(Object* object, ScanContext* sc, promote_func* callback)
1349 {
1350     LIMITED_METHOD_CONTRACT;
1351
1352     assert(object != nullptr);
1353     assert(sc != nullptr);
1354     assert(callback != nullptr);
1355     if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass)
1356     {
1357         // not an overlapped data object - nothing to do.
1358         return;
1359     }
1360
1361     // reporting the pinned user objects
1362     OverlappedDataObject *pOverlapped = (OverlappedDataObject *)object;
1363     if (pOverlapped->m_userObject != NULL)
1364     {
1365         if (pOverlapped->m_userObject->GetGCSafeMethodTable() == g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT]->GetMethodTable())
1366         {
1367             // OverlappedDataObject is very special.  An async pin handle keeps it alive.
1368             // During GC, we also make sure
1369             // 1. m_userObject itself does not move if m_userObject is not array
1370             // 2. Every object pointed by m_userObject does not move if m_userObject is array
1371             // We do not want to pin m_userObject if it is array.
1372             ArrayBase* pUserObject = (ArrayBase*)OBJECTREFToObject(pOverlapped->m_userObject);
1373             Object **ppObj = (Object**)pUserObject->GetDataPtr(TRUE);
1374             size_t num = pUserObject->GetNumComponents();
1375             for (size_t i = 0; i < num; i++)
1376             {
1377                 callback(ppObj + i, sc, GC_CALL_PINNED);
1378             }
1379         }
1380         else
1381         {
1382             callback(&OBJECTREF_TO_UNCHECKED_OBJECTREF(pOverlapped->m_userObject), (ScanContext *)sc, GC_CALL_PINNED);
1383         }
1384     }
1385 }
1386
1387 void GCToEEInterface::WalkAsyncPinned(Object* object, void* context, void (*callback)(Object*, Object*, void*))
1388 {
1389     LIMITED_METHOD_CONTRACT;
1390
1391     assert(object != nullptr);
1392     assert(callback != nullptr);
1393
1394     if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass)
1395     {
1396         return;
1397     }
1398
1399     OverlappedDataObject *pOverlapped = (OverlappedDataObject *)(object);
1400     if (pOverlapped->m_userObject != NULL)
1401     {
1402         Object * pUserObject = OBJECTREFToObject(pOverlapped->m_userObject);
1403         callback(object, pUserObject, context);
1404         if (pOverlapped->m_userObject->GetGCSafeMethodTable() == g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT]->GetMethodTable())
1405         {
1406             ArrayBase* pUserArrayObject = (ArrayBase*)pUserObject;
1407             Object **pObj = (Object**)pUserArrayObject->GetDataPtr(TRUE);
1408             size_t num = pUserArrayObject->GetNumComponents();
1409             for (size_t i = 0; i < num; i ++)
1410             {
1411                 callback(pUserObject, pObj[i], context);
1412             }
1413         }
1414     }
1415 }
1416
1417 IGCToCLREventSink* GCToEEInterface::EventSink()
1418 {
1419     LIMITED_METHOD_CONTRACT;
1420
1421     return &g_gcToClrEventSink;
1422 }
1423
1424 uint32_t GCToEEInterface::GetDefaultDomainIndex()
1425 {
1426     LIMITED_METHOD_CONTRACT;
1427
1428     return SystemDomain::System()->DefaultDomain()->GetIndex().m_dwIndex;
1429 }
1430
1431 void *GCToEEInterface::GetAppDomainAtIndex(uint32_t appDomainIndex)
1432 {
1433     LIMITED_METHOD_CONTRACT;
1434
1435     ADIndex index(appDomainIndex);
1436     return static_cast<void *>(SystemDomain::GetAppDomainAtIndex(index));
1437 }
1438
1439 bool GCToEEInterface::AppDomainCanAccessHandleTable(uint32_t appDomainID)
1440 {
1441     LIMITED_METHOD_CONTRACT;
1442
1443     ADIndex index(appDomainID);
1444     AppDomain *pDomain = SystemDomain::GetAppDomainAtIndex(index);
1445     return (pDomain != NULL);
1446 }
1447
1448 uint32_t GCToEEInterface::GetIndexOfAppDomainBeingUnloaded()
1449 {
1450     LIMITED_METHOD_CONTRACT;
1451
1452     return 0xFFFFFFFF;
1453 }
1454
1455 uint32_t GCToEEInterface::GetTotalNumSizedRefHandles()
1456 {
1457     LIMITED_METHOD_CONTRACT;
1458
1459     return SystemDomain::System()->GetTotalNumSizedRefHandles();
1460 }
1461
1462
1463 bool GCToEEInterface::AppDomainIsRudeUnload(void *appDomain)
1464 {
1465     LIMITED_METHOD_CONTRACT;
1466
1467     return false;
1468 }
1469
1470 bool GCToEEInterface::AnalyzeSurvivorsRequested(int condemnedGeneration)
1471 {
1472     LIMITED_METHOD_CONTRACT;
1473
1474     // Is the list active?
1475     GcNotifications gn(g_pGcNotificationTable);
1476     if (gn.IsActive())
1477     {
1478         GcEvtArgs gea = { GC_MARK_END, { (1<<condemnedGeneration) } };
1479         if (gn.GetNotification(gea) != 0)
1480         {
1481             return true;
1482         }
1483     }
1484
1485     return false;
1486 }
1487
1488 void GCToEEInterface::AnalyzeSurvivorsFinished(int condemnedGeneration)
1489 {
1490     LIMITED_METHOD_CONTRACT;
1491
1492     // Is the list active?
1493     GcNotifications gn(g_pGcNotificationTable);
1494     if (gn.IsActive())
1495     {
1496         GcEvtArgs gea = { GC_MARK_END, { (1<<condemnedGeneration) } };
1497         if (gn.GetNotification(gea) != 0)
1498         {
1499             DACNotify::DoGCNotification(gea);
1500         }
1501     }
1502 }