d99b12c332d7ffe8e1edbe947c71544a2b9e628b
[platform/upstream/coreclr.git] / src / vm / gcenv.ee.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*
6  * GCENV.EE.CPP
7  *
8  * GCToEEInterface implementation
9  *
10
11  *
12  */
13
14 void GCToEEInterface::SuspendEE(SUSPEND_REASON reason)
15 {
16     WRAPPER_NO_CONTRACT;
17
18     static_assert_no_msg(SUSPEND_FOR_GC == (int)ThreadSuspend::SUSPEND_FOR_GC);
19     static_assert_no_msg(SUSPEND_FOR_GC_PREP == (int)ThreadSuspend::SUSPEND_FOR_GC_PREP);
20
21     _ASSERTE(reason == SUSPEND_FOR_GC || reason == SUSPEND_FOR_GC_PREP);
22
23     ThreadSuspend::SuspendEE((ThreadSuspend::SUSPEND_REASON)reason);
24
25     g_pDebugInterface->BeforeGarbageCollection();
26 }
27
28 void GCToEEInterface::RestartEE(bool bFinishedGC)
29 {
30     WRAPPER_NO_CONTRACT;
31
32     g_pDebugInterface->AfterGarbageCollection();
33
34     ThreadSuspend::RestartEE(bFinishedGC, TRUE);
35 }
36
37 VOID GCToEEInterface::SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2)
38 {
39     CONTRACTL
40     {
41         NOTHROW;
42         GC_NOTRIGGER;
43     }
44     CONTRACTL_END;
45
46     SyncBlockCache::GetSyncBlockCache()->GCWeakPtrScan(scanProc, lp1, lp2);
47 }
48
49
50 //EE can perform post stack scanning action, while the
51 // user threads are still suspended
52 VOID GCToEEInterface::AfterGcScanRoots (int condemned, int max_gen,
53                                    ScanContext* sc)
54 {
55     CONTRACTL
56     {
57         NOTHROW;
58         GC_NOTRIGGER;
59     }
60     CONTRACTL_END;
61
62 #ifdef FEATURE_COMINTEROP
63     // Go through all the app domains and for each one detach all the *unmarked* RCWs to prevent
64     // the RCW cache from resurrecting them.
65     UnsafeAppDomainIterator i(TRUE);
66     i.Init();
67
68     while (i.Next())
69     {
70         i.GetDomain()->DetachRCWs();
71     }
72 #endif // FEATURE_COMINTEROP
73 }
74
75 /*
76  * Scan all stack roots
77  */
78
79 static void ScanStackRoots(Thread * pThread, promote_func* fn, ScanContext* sc)
80 {
81     GCCONTEXT   gcctx;
82
83     gcctx.f  = fn;
84     gcctx.sc = sc;
85     gcctx.cf = NULL;
86
87     ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
88
89     // Either we are in a concurrent situation (in which case the thread is unknown to
90     // us), or we are performing a synchronous GC and we are the GC thread, holding
91     // the threadstore lock.
92
93     _ASSERTE(dbgOnly_IsSpecialEEThread() ||
94                 GetThread() == NULL ||
95                 // this is for background GC threads which always call this when EE is suspended.
96                 IsGCSpecialThread() ||
97                 (GetThread() == ThreadSuspend::GetSuspensionThread() && ThreadStore::HoldingThreadStore()));
98
99     pThread->SetHasPromotedBytes();
100
101     Frame* pTopFrame = pThread->GetFrame();
102     Object ** topStack = (Object **)pTopFrame;
103     if ((pTopFrame != ((Frame*)-1))
104         && (pTopFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr())) {
105         // It is an InlinedCallFrame. Get SP from it.
106         InlinedCallFrame* pInlinedFrame = (InlinedCallFrame*)pTopFrame;
107         topStack = (Object **)pInlinedFrame->GetCallSiteSP();
108     }
109
110     sc->stack_limit = (uintptr_t)topStack;
111
112 #ifdef FEATURE_CONSERVATIVE_GC
113     if (g_pConfig->GetGCConservative())
114     {
115         // Conservative stack root reporting
116         // We will treat everything on stack as a pinned interior GC pointer
117         // Since we report every thing as pinned, we don't need to run following code for relocation phase.
118         if (sc->promotion)
119         {
120             Object ** bottomStack = (Object **) pThread->GetCachedStackBase();
121             Object ** walk;
122             for (walk = topStack; walk < bottomStack; walk ++)
123             {
124                 if (((void*)*walk > (void*)bottomStack || (void*)*walk < (void*)topStack) &&
125                     ((void*)*walk >= (void*)g_lowest_address && (void*)*walk <= (void*)g_highest_address)
126                     )
127                 {
128                     //DbgPrintf("promote " FMT_ADDR " : " FMT_ADDR "\n", walk, *walk);
129                     fn(walk, sc, GC_CALL_INTERIOR|GC_CALL_PINNED);
130                 }
131             }
132         }
133
134         // Also ask the explicit Frames to report any references they might know about.
135         // Generally these will be a subset of the objects reported below but there's
136         // nothing that guarantees that and in the specific case of a GC protect frame the
137         // references it protects may live at a lower address than the frame itself (and
138         // thus escape the stack range we scanned above).
139         Frame *pFrame = pThread->GetFrame();
140         while (pFrame != FRAME_TOP)
141         {
142             pFrame->GcScanRoots(fn, sc);
143             pFrame = pFrame->PtrNextFrame();
144         }
145     }
146     else
147 #endif
148     {
149         unsigned flagsStackWalk = ALLOW_ASYNC_STACK_WALK | ALLOW_INVALID_OBJECTS;
150 #if defined(WIN64EXCEPTIONS)
151         flagsStackWalk |= GC_FUNCLET_REFERENCE_REPORTING;
152 #endif // defined(WIN64EXCEPTIONS)
153         pThread->StackWalkFrames( GcStackCrawlCallBack, &gcctx, flagsStackWalk);
154     }
155 }
156
157 void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
158 {
159     STRESS_LOG1(LF_GCROOTS, LL_INFO10, "GCScan: Promotion Phase = %d\n", sc->promotion);
160
161     // In server GC, we should be competing for marking the statics
162     if (GCHeapUtilities::MarkShouldCompeteForStatics())
163     {
164         if (condemned == max_gen && sc->promotion)
165         {
166             SystemDomain::EnumAllStaticGCRefs(fn, sc);
167         }
168     }
169
170     Thread* pThread = NULL;
171     while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
172     {
173         STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "{ Starting scan of Thread %p ID = %x\n", pThread, pThread->GetThreadId());
174
175         if (GCHeapUtilities::GetGCHeap()->IsThreadUsingAllocationContextHeap(
176             pThread->GetAllocContext(), sc->thread_number))
177         {
178             sc->thread_under_crawl = pThread;
179 #ifdef FEATURE_EVENT_TRACE
180             sc->dwEtwRootKind = kEtwGCRootKindStack;
181 #endif // FEATURE_EVENT_TRACE
182             ScanStackRoots(pThread, fn, sc);
183 #ifdef FEATURE_EVENT_TRACE
184             sc->dwEtwRootKind = kEtwGCRootKindOther;
185 #endif // FEATURE_EVENT_TRACE
186         }
187         STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "Ending scan of Thread %p ID = 0x%x }\n", pThread, pThread->GetThreadId());
188     }
189 }
190
191 void GCToEEInterface::GcStartWork (int condemned, int max_gen)
192 {
193     CONTRACTL
194     {
195         NOTHROW;
196         GC_NOTRIGGER;
197     }
198     CONTRACTL_END;
199
200 #ifdef VERIFY_HEAP
201     // Validate byrefs pinned by IL stubs since the last GC.
202     StubHelpers::ProcessByrefValidationList();
203 #endif // VERIFY_HEAP
204
205     ExecutionManager::CleanupCodeHeaps();
206
207 #ifdef FEATURE_EVENT_TRACE
208     ETW::TypeSystemLog::Cleanup();
209 #endif
210
211 #ifdef FEATURE_COMINTEROP
212     //
213     // Let GC detect managed/native cycles with input from jupiter
214     // Jupiter will
215     // 1. Report reference from RCW to CCW based on native reference in Jupiter
216     // 2. Identify the subset of CCWs that needs to be rooted
217     //
218     // We'll build the references from RCW to CCW using
219     // 1. Preallocated arrays
220     // 2. Dependent handles
221     //
222     RCWWalker::OnGCStarted(condemned);
223 #endif // FEATURE_COMINTEROP
224
225     if (condemned == max_gen)
226     {
227         ThreadStore::s_pThreadStore->OnMaxGenerationGCStarted();
228     }
229 }
230
231 void GCToEEInterface::GcDone(int condemned)
232 {
233     CONTRACTL
234     {
235         NOTHROW;
236         GC_NOTRIGGER;
237     }
238     CONTRACTL_END;
239
240 #ifdef FEATURE_COMINTEROP
241     //
242     // Tell Jupiter GC has finished
243     //
244     RCWWalker::OnGCFinished(condemned);
245 #endif // FEATURE_COMINTEROP
246 }
247
248 bool GCToEEInterface::RefCountedHandleCallbacks(Object * pObject)
249 {
250     CONTRACTL
251     {
252         NOTHROW;
253         GC_NOTRIGGER;
254     }
255     CONTRACTL_END;
256
257 #ifdef FEATURE_COMINTEROP
258     //<REVISIT_TODO>@todo optimize the access to the ref-count
259     ComCallWrapper* pWrap = ComCallWrapper::GetWrapperForObject((OBJECTREF)pObject);
260
261     return pWrap != NULL && pWrap->IsWrapperActive();
262 #else
263     return false;
264 #endif
265 }
266
267 void GCToEEInterface::GcBeforeBGCSweepWork()
268 {
269     CONTRACTL
270     {
271         NOTHROW;
272         GC_NOTRIGGER;
273     }
274     CONTRACTL_END;
275
276 #ifdef VERIFY_HEAP
277     // Validate byrefs pinned by IL stubs since the last GC.
278     StubHelpers::ProcessByrefValidationList();
279 #endif // VERIFY_HEAP
280 }
281
282 void GCToEEInterface::SyncBlockCacheDemote(int max_gen)
283 {
284     CONTRACTL
285     {
286         NOTHROW;
287         GC_NOTRIGGER;
288     }
289     CONTRACTL_END;
290
291     SyncBlockCache::GetSyncBlockCache()->GCDone(TRUE, max_gen);
292 }
293
294 void GCToEEInterface::SyncBlockCachePromotionsGranted(int max_gen)
295 {
296     CONTRACTL
297     {
298         NOTHROW;
299         GC_NOTRIGGER;
300     }
301     CONTRACTL_END;
302
303     SyncBlockCache::GetSyncBlockCache()->GCDone(FALSE, max_gen);
304 }
305
306 uint32_t GCToEEInterface::GetActiveSyncBlockCount()
307 {
308     CONTRACTL
309     {
310         NOTHROW;
311         GC_NOTRIGGER;
312     }
313     CONTRACTL_END;
314
315     return SyncBlockCache::GetSyncBlockCache()->GetActiveCount();
316 }
317
318 gc_alloc_context * GCToEEInterface::GetAllocContext()
319 {
320     WRAPPER_NO_CONTRACT;
321
322     Thread* pThread = ::GetThread();
323     assert(pThread != nullptr);
324     return pThread->GetAllocContext();
325 }
326
327 void GCToEEInterface::GcEnumAllocContexts(enum_alloc_context_func* fn, void* param)
328 {
329     CONTRACTL
330     {
331         NOTHROW;
332         GC_NOTRIGGER;
333     }
334     CONTRACTL_END;
335
336     if (GCHeapUtilities::UseThreadAllocationContexts())
337     {
338         Thread * pThread = NULL;
339         while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
340         {
341             fn(pThread->GetAllocContext(), param);
342         }
343     }
344     else
345     {
346         fn(&g_global_alloc_context, param);
347     }
348 }
349
350
351 uint8_t* GCToEEInterface::GetLoaderAllocatorObjectForGC(Object* pObject)
352 {
353     CONTRACTL
354     {
355         NOTHROW;
356         GC_NOTRIGGER;
357     }
358     CONTRACTL_END;
359
360     return pObject->GetGCSafeMethodTable()->GetLoaderAllocatorObjectForGC();
361 }
362
363 bool GCToEEInterface::IsPreemptiveGCDisabled()
364 {
365     WRAPPER_NO_CONTRACT;
366
367     Thread* pThread = ::GetThread();
368     if (pThread)
369     {
370         return !!pThread->PreemptiveGCDisabled();
371     }
372
373     return false;
374 }
375
376 bool GCToEEInterface::EnablePreemptiveGC()
377 {
378     WRAPPER_NO_CONTRACT;
379
380     bool bToggleGC = false;
381     Thread* pThread = ::GetThread();
382
383     if (pThread)
384     {
385         bToggleGC = !!pThread->PreemptiveGCDisabled();
386         if (bToggleGC)
387         {
388             pThread->EnablePreemptiveGC();
389         }
390     }
391
392     return bToggleGC;
393 }
394
395 void GCToEEInterface::DisablePreemptiveGC()
396 {
397     WRAPPER_NO_CONTRACT;
398
399     Thread* pThread = ::GetThread();
400     if (pThread)
401     {
402         pThread->DisablePreemptiveGC();
403     }
404 }
405
406 Thread* GCToEEInterface::GetThread()
407 {
408     WRAPPER_NO_CONTRACT;
409
410     return ::GetThread();
411 }
412
413 struct BackgroundThreadStubArgs
414 {
415     Thread* thread;
416     GCBackgroundThreadFunction threadStart;
417     void* arg;
418     CLREvent threadStartedEvent;
419     bool hasStarted;
420 };
421
422 DWORD WINAPI BackgroundThreadStub(void* arg)
423 {
424     BackgroundThreadStubArgs* stubArgs = (BackgroundThreadStubArgs*)arg;
425     assert (stubArgs->thread != NULL);
426
427     ClrFlsSetThreadType (ThreadType_GC);
428     stubArgs->thread->SetGCSpecial(true);
429     STRESS_LOG_RESERVE_MEM (GC_STRESSLOG_MULTIPLY);
430
431     stubArgs->hasStarted = !!stubArgs->thread->HasStarted(FALSE);
432
433     Thread* thread = stubArgs->thread;
434     GCBackgroundThreadFunction realThreadStart = stubArgs->threadStart;
435     void* realThreadArg = stubArgs->arg;
436     bool hasStarted = stubArgs->hasStarted;
437
438     stubArgs->threadStartedEvent.Set();
439     // The stubArgs cannot be used once the event is set, since that releases wait on the
440     // event in the function that created this thread and the stubArgs go out of scope.
441
442     DWORD result = 0;
443
444     if (hasStarted)
445     {
446         result = realThreadStart(realThreadArg);
447         DestroyThread(thread);
448     }
449
450     return result;
451 }
452
453 //
454 // Diagnostics code
455 //
456
457 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
458 inline BOOL ShouldTrackMovementForProfilerOrEtw()
459 {
460 #ifdef GC_PROFILING
461     if (CORProfilerTrackGC())
462         return true;
463 #endif
464
465 #ifdef FEATURE_EVENT_TRACE
466     if (ETW::GCLog::ShouldTrackMovementForEtw())
467         return true;
468 #endif
469
470     return false;
471 }
472 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
473
474 void ProfScanRootsHelper(Object** ppObject, ScanContext *pSC, uint32_t dwFlags)
475 {
476 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
477     Object *pObj = *ppObject;
478     if (dwFlags & GC_CALL_INTERIOR)
479     {
480         pObj = GCHeapUtilities::GetGCHeap()->GetContainingObject(pObj, true);
481         if (pObj == nullptr)
482             return;
483     }
484     ScanRootsHelper(pObj, ppObject, pSC, dwFlags);
485 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
486 }
487
488 // TODO - at some point we would like to completely decouple profiling
489 // from ETW tracing using a pattern similar to this, where the
490 // ProfilingScanContext has flags about whether or not certain things
491 // should be tracked, and each one of these ProfilerShouldXYZ functions
492 // will check these flags and determine what to do based upon that.
493 // GCProfileWalkHeapWorker can, in turn, call those methods without fear
494 // of things being ifdef'd out.
495
496 // Returns TRUE if GC profiling is enabled and the profiler
497 // should scan dependent handles, FALSE otherwise.
498 BOOL ProfilerShouldTrackConditionalWeakTableElements()
499 {
500 #if defined(GC_PROFILING)
501     return CORProfilerTrackConditionalWeakTableElements();
502 #else
503     return FALSE;
504 #endif // defined (GC_PROFILING)
505 }
506
507 // If GC profiling is enabled, informs the profiler that we are done
508 // tracing dependent handles.
509 void ProfilerEndConditionalWeakTableElementReferences(void* heapId)
510 {
511 #if defined (GC_PROFILING)
512     g_profControlBlock.pProfInterface->EndConditionalWeakTableElementReferences(heapId);
513 #else
514     UNREFERENCED_PARAMETER(heapId);
515 #endif // defined (GC_PROFILING)
516 }
517
518 // If GC profiling is enabled, informs the profiler that we are done
519 // tracing root references.
520 void ProfilerEndRootReferences2(void* heapId)
521 {
522 #if defined (GC_PROFILING)
523     g_profControlBlock.pProfInterface->EndRootReferences2(heapId);
524 #else
525     UNREFERENCED_PARAMETER(heapId);
526 #endif // defined (GC_PROFILING)
527 }
528
529 void GcScanRootsForProfilerAndETW(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
530 {
531     Thread* pThread = NULL;
532     while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
533     {
534         sc->thread_under_crawl = pThread;
535 #ifdef FEATURE_EVENT_TRACE
536         sc->dwEtwRootKind = kEtwGCRootKindStack;
537 #endif // FEATURE_EVENT_TRACE
538         ScanStackRoots(pThread, fn, sc);
539 #ifdef FEATURE_EVENT_TRACE
540         sc->dwEtwRootKind = kEtwGCRootKindOther;
541 #endif // FEATURE_EVENT_TRACE
542     }
543 }
544
545 void ScanHandleForProfilerAndETW(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, bool isDependent)
546 {
547     ProfilingScanContext* pSC = (ProfilingScanContext*)context;
548
549 #ifdef GC_PROFILING
550     // Give the profiler the objectref.
551     if (pSC->fProfilerPinned)
552     {
553         if (!isDependent)
554         {
555             BEGIN_PIN_PROFILER(CORProfilerTrackGC());
556             g_profControlBlock.pProfInterface->RootReference2(
557                 (uint8_t *)*pRef,
558                 kEtwGCRootKindHandle,
559                 (EtwGCRootFlags)flags,
560                 pRef,
561                 &pSC->pHeapId);
562             END_PIN_PROFILER();
563         }
564         else
565         {
566             BEGIN_PIN_PROFILER(CORProfilerTrackConditionalWeakTableElements());
567             g_profControlBlock.pProfInterface->ConditionalWeakTableElementReference(
568                 (uint8_t*)*pRef,
569                 (uint8_t*)pSec,
570                 pRef,
571                 &pSC->pHeapId);
572             END_PIN_PROFILER();
573         }
574     }
575 #endif // GC_PROFILING
576
577 #if defined(FEATURE_EVENT_TRACE)
578     // Notify ETW of the handle
579     if (ETW::GCLog::ShouldWalkHeapRootsForEtw())
580     {
581         ETW::GCLog::RootReference(
582             pRef,
583             *pRef,          // object being rooted
584             pSec,           // pSecondaryNodeForDependentHandle
585             isDependent,
586             pSC,
587             0,              // dwGCFlags,
588             flags);     // ETW handle flags
589     }
590 #endif // defined(FEATURE_EVENT_TRACE)
591 }
592
593 // This is called only if we've determined that either:
594 //     a) The Profiling API wants to do a walk of the heap, and it has pinned the
595 //     profiler in place (so it cannot be detached), and it's thus safe to call into the
596 //     profiler, OR
597 //     b) ETW infrastructure wants to do a walk of the heap either to log roots,
598 //     objects, or both.
599 // This can also be called to do a single walk for BOTH a) and b) simultaneously.  Since
600 // ETW can ask for roots, but not objects
601 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
602 void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw)
603 {
604     {
605         ProfilingScanContext SC(fProfilerPinned);
606         unsigned max_generation = GCHeapUtilities::GetGCHeap()->GetMaxGeneration();
607
608         // **** Scan roots:  Only scan roots if profiling API wants them or ETW wants them.
609         if (fProfilerPinned || fShouldWalkHeapRootsForEtw)
610         {
611             GcScanRootsForProfilerAndETW(&ProfScanRootsHelper, max_generation, max_generation, &SC);
612             SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
613             GCHeapUtilities::GetGCHeap()->DiagScanFinalizeQueue(&ProfScanRootsHelper, &SC);
614
615             // Handles are kept independent of wks/svr/concurrent builds
616             SC.dwEtwRootKind = kEtwGCRootKindHandle;
617             GCHeapUtilities::GetGCHeap()->DiagScanHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
618
619             // indicate that regular handle scanning is over, so we can flush the buffered roots
620             // to the profiler.  (This is for profapi only.  ETW will flush after the
621             // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
622             if (fProfilerPinned)
623             {
624                 ProfilerEndRootReferences2(&SC.pHeapId);
625             }
626         }
627
628         // **** Scan dependent handles: only if the profiler supports it or ETW wants roots
629         if ((fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements()) ||
630             fShouldWalkHeapRootsForEtw)
631         {
632             // GcScanDependentHandlesForProfiler double-checks
633             // CORProfilerTrackConditionalWeakTableElements() before calling into the profiler
634
635             ProfilingScanContext* pSC = &SC;
636
637             // we'll re-use pHeapId (which was either unused (0) or freed by EndRootReferences2
638             // (-1)), so reset it to NULL
639             _ASSERTE((*((size_t *)(&pSC->pHeapId)) == (size_t)(-1)) ||
640                     (*((size_t *)(&pSC->pHeapId)) == (size_t)(0)));
641             pSC->pHeapId = NULL;
642
643             GCHeapUtilities::GetGCHeap()->DiagScanDependentHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
644
645             // indicate that dependent handle scanning is over, so we can flush the buffered roots
646             // to the profiler.  (This is for profapi only.  ETW will flush after the
647             // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
648             if (fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements())
649             {
650                 ProfilerEndConditionalWeakTableElementReferences(&SC.pHeapId);
651             }
652         }
653
654         ProfilerWalkHeapContext profilerWalkHeapContext(fProfilerPinned, SC.pvEtwContext);
655
656         // **** Walk objects on heap: only if profiling API wants them or ETW wants them.
657         if (fProfilerPinned || fShouldWalkHeapObjectsForEtw)
658         {
659             GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, true /* walk the large object heap */);
660         }
661
662 #ifdef FEATURE_EVENT_TRACE
663         // **** Done! Indicate to ETW helpers that the heap walk is done, so any buffers
664         // should be flushed into the ETW stream
665         if (fShouldWalkHeapObjectsForEtw || fShouldWalkHeapRootsForEtw)
666         {
667             ETW::GCLog::EndHeapDump(&profilerWalkHeapContext);
668         }
669 #endif // FEATURE_EVENT_TRACE
670     }
671 }
672 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
673
674 void GCProfileWalkHeap()
675 {
676     BOOL fWalkedHeapForProfiler = FALSE;
677
678 #ifdef FEATURE_EVENT_TRACE
679     if (ETW::GCLog::ShouldWalkStaticsAndCOMForEtw())
680         ETW::GCLog::WalkStaticsAndCOMForETW();
681
682     BOOL fShouldWalkHeapRootsForEtw = ETW::GCLog::ShouldWalkHeapRootsForEtw();
683     BOOL fShouldWalkHeapObjectsForEtw = ETW::GCLog::ShouldWalkHeapObjectsForEtw();
684 #else // !FEATURE_EVENT_TRACE
685     BOOL fShouldWalkHeapRootsForEtw = FALSE;
686     BOOL fShouldWalkHeapObjectsForEtw = FALSE;
687 #endif // FEATURE_EVENT_TRACE
688
689 #if defined (GC_PROFILING)
690     {
691         BEGIN_PIN_PROFILER(CORProfilerTrackGC());
692         GCProfileWalkHeapWorker(TRUE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
693         fWalkedHeapForProfiler = TRUE;
694         END_PIN_PROFILER();
695     }
696 #endif // defined (GC_PROFILING)
697
698 #if defined (GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
699     // we need to walk the heap if one of GC_PROFILING or FEATURE_EVENT_TRACE
700     // is defined, since both of them make use of the walk heap worker.
701     if (!fWalkedHeapForProfiler &&
702         (fShouldWalkHeapRootsForEtw || fShouldWalkHeapObjectsForEtw))
703     {
704         GCProfileWalkHeapWorker(FALSE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
705     }
706 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
707 }
708
709 void WalkFReachableObjects(bool isCritical, void* objectID)
710 {
711         g_profControlBlock.pProfInterface->FinalizeableObjectQueued(isCritical, (ObjectID)objectID);
712 }
713
714 static fq_walk_fn g_FQWalkFn = &WalkFReachableObjects;
715
716 void GCToEEInterface::DiagGCStart(int gen, bool isInduced)
717 {
718 #ifdef GC_PROFILING
719     DiagUpdateGenerationBounds();
720     GarbageCollectionStartedCallback(gen, isInduced);
721     {
722         BEGIN_PIN_PROFILER(CORProfilerTrackGC());
723         size_t context = 0;
724
725         // When we're walking objects allocated by class, then we don't want to walk the large
726         // object heap because then it would count things that may have been around for a while.
727         GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&AllocByClassHelper, (void *)&context, 0, false);
728
729         // Notify that we've reached the end of the Gen 0 scan
730         g_profControlBlock.pProfInterface->EndAllocByClass(&context);
731         END_PIN_PROFILER();
732     }
733
734 #endif // GC_PROFILING
735 }
736
737 void GCToEEInterface::DiagUpdateGenerationBounds()
738 {
739 #ifdef GC_PROFILING
740     if (CORProfilerTrackGC())
741         UpdateGenerationBounds();
742 #endif // GC_PROFILING
743 }
744
745 void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent)
746 {
747 #ifdef GC_PROFILING
748     if (!fConcurrent)
749     {
750         GCProfileWalkHeap();
751         DiagUpdateGenerationBounds();
752         GarbageCollectionFinishedCallback();
753     }
754 #endif // GC_PROFILING
755 }
756
757 void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext)
758 {
759 #ifdef GC_PROFILING
760     if (CORProfilerTrackGC())
761     {
762         BEGIN_PIN_PROFILER(CORProfilerPresent());
763         GCHeapUtilities::GetGCHeap()->DiagWalkFinalizeQueue(gcContext, g_FQWalkFn);
764         END_PIN_PROFILER();
765     }
766 #endif //GC_PROFILING
767 }
768
769 // Note on last parameter: when calling this for bgc, only ETW
770 // should be sending these events so that existing profapi profilers
771 // don't get confused.
772 void WalkMovedReferences(uint8_t* begin, uint8_t* end,
773                          ptrdiff_t reloc,
774                          void* context,
775                          bool fCompacting,
776                          bool fBGC)
777 {
778     ETW::GCLog::MovedReference(begin, end,
779                                (fCompacting ? reloc : 0),
780                                (size_t)context,
781                                fCompacting,
782                                !fBGC);
783 }
784
785 void GCToEEInterface::DiagWalkSurvivors(void* gcContext)
786 {
787 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
788     if (ShouldTrackMovementForProfilerOrEtw())
789     {
790         size_t context = 0;
791         ETW::GCLog::BeginMovedReferences(&context);
792         GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_gc);
793         ETW::GCLog::EndMovedReferences(context);
794     }
795 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
796 }
797
798 void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext)
799 {
800 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
801     if (ShouldTrackMovementForProfilerOrEtw())
802     {
803         size_t context = 0;
804         ETW::GCLog::BeginMovedReferences(&context);
805         GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_loh);
806         ETW::GCLog::EndMovedReferences(context);
807     }
808 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
809 }
810
811 void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext)
812 {
813 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
814     if (ShouldTrackMovementForProfilerOrEtw())
815     {
816         size_t context = 0;
817         ETW::GCLog::BeginMovedReferences(&context);
818         GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_bgc);
819         ETW::GCLog::EndMovedReferences(context);
820     }
821 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
822 }
823
824 void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args)
825 {
826     int stompWBCompleteActions = SWB_PASS;
827     bool is_runtime_suspended = false;
828
829     assert(args != nullptr);
830     switch (args->operation)
831     {
832     case WriteBarrierOp::StompResize:
833         // StompResize requires a new card table, a new lowest address, and
834         // a new highest address
835         assert(args->card_table != nullptr);
836         assert(args->lowest_address != nullptr);
837         assert(args->highest_address != nullptr);
838
839         g_card_table = args->card_table;
840
841 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
842         assert(args->card_bundle_table != nullptr);
843         g_card_bundle_table = args->card_bundle_table;
844 #endif
845
846 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
847         if (g_sw_ww_enabled_for_gc_heap && (args->write_watch_table != nullptr))
848         {
849             assert(args->is_runtime_suspended);
850             g_sw_ww_table = args->write_watch_table;
851         }
852 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
853
854         stompWBCompleteActions |= ::StompWriteBarrierResize(args->is_runtime_suspended, args->requires_upper_bounds_check);
855
856         // We need to make sure that other threads executing checked write barriers
857         // will see the g_card_table update before g_lowest/highest_address updates.
858         // Otherwise, the checked write barrier may AV accessing the old card table
859         // with address that it does not cover.
860         //
861         // Even x86's total store ordering is insufficient here because threads reading
862         // g_card_table do so via the instruction cache, whereas g_lowest/highest_address
863         // are read via the data cache.
864         //
865         // The g_card_table update is covered by section 8.1.3 of the Intel Software
866         // Development Manual, Volume 3A (System Programming Guide, Part 1), about
867         // "cross-modifying code": We need all _executing_ threads to invalidate
868         // their instruction cache, which FlushProcessWriteBuffers achieves by sending
869         // an IPI (inter-process interrupt).
870
871         if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
872         {
873             // flushing icache on current processor (thread)
874             ::FlushWriteBarrierInstructionCache();
875             // asking other processors (threads) to invalidate their icache
876             FlushProcessWriteBuffers();
877         }
878
879         g_lowest_address = args->lowest_address;
880         VolatileStore(&g_highest_address, args->highest_address);
881
882 #if defined(_ARM64_) || defined(_ARM_)
883         // Need to reupdate for changes to g_highest_address g_lowest_address
884         is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended;
885         stompWBCompleteActions |= ::StompWriteBarrierResize(is_runtime_suspended, args->requires_upper_bounds_check);
886
887 #ifdef _ARM_
888         if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
889         {
890             ::FlushWriteBarrierInstructionCache();
891         }
892 #endif
893
894         is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended;
895         if(!is_runtime_suspended)
896         {
897             // If runtime is not suspended, force updated state to be visible to all threads
898             MemoryBarrier();
899         }
900 #endif
901         if (stompWBCompleteActions & SWB_EE_RESTART)
902         {
903             assert(!args->is_runtime_suspended &&
904                 "if runtime was suspended in patching routines then it was in running state at begining");
905             ThreadSuspend::RestartEE(FALSE, TRUE);
906         }
907         return; // unlike other branches we have already done cleanup so bailing out here
908     case WriteBarrierOp::StompEphemeral:
909         // StompEphemeral requires a new ephemeral low and a new ephemeral high
910         assert(args->ephemeral_low != nullptr);
911         assert(args->ephemeral_high != nullptr);
912         g_ephemeral_low = args->ephemeral_low;
913         g_ephemeral_high = args->ephemeral_high;
914         stompWBCompleteActions |= ::StompWriteBarrierEphemeral(args->is_runtime_suspended);
915         break;
916     case WriteBarrierOp::Initialize:
917         // This operation should only be invoked once, upon initialization.
918         assert(g_card_table == nullptr);
919         assert(g_lowest_address == nullptr);
920         assert(g_highest_address == nullptr);
921         assert(args->card_table != nullptr);
922         assert(args->lowest_address != nullptr);
923         assert(args->highest_address != nullptr);
924         assert(args->ephemeral_low != nullptr);
925         assert(args->ephemeral_high != nullptr);
926         assert(args->is_runtime_suspended && "the runtime must be suspended here!");
927         assert(!args->requires_upper_bounds_check && "the ephemeral generation must be at the top of the heap!");
928
929         g_card_table = args->card_table;
930
931 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
932         assert(g_card_bundle_table == nullptr);
933         g_card_bundle_table = args->card_bundle_table;
934 #endif
935
936         g_lowest_address = args->lowest_address;
937         g_highest_address = args->highest_address;
938         stompWBCompleteActions |= ::StompWriteBarrierResize(true, false);
939
940         // StompWriteBarrierResize does not necessarily bash g_ephemeral_low
941         // usages, so we must do so here. This is particularly true on x86,
942         // where StompWriteBarrierResize will not bash g_ephemeral_low when
943         // called with the parameters (true, false), as it is above.
944         g_ephemeral_low = args->ephemeral_low;
945         g_ephemeral_high = args->ephemeral_high;
946         stompWBCompleteActions |= ::StompWriteBarrierEphemeral(true);
947         break;
948     case WriteBarrierOp::SwitchToWriteWatch:
949 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
950         assert(args->write_watch_table != nullptr);
951         assert(args->is_runtime_suspended && "the runtime must be suspended here!");
952         g_sw_ww_table = args->write_watch_table;
953         g_sw_ww_enabled_for_gc_heap = true;
954         stompWBCompleteActions |= ::SwitchToWriteWatchBarrier(true);
955 #else
956         assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP");
957 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
958         break;
959     case WriteBarrierOp::SwitchToNonWriteWatch:
960 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
961         assert(args->is_runtime_suspended && "the runtime must be suspended here!");
962         g_sw_ww_table = 0;
963         g_sw_ww_enabled_for_gc_heap = false;
964         stompWBCompleteActions |= ::SwitchToNonWriteWatchBarrier(true);
965 #else
966         assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP");
967 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
968         break;
969     default:
970         assert(!"unknown WriteBarrierOp enum");
971     }
972     if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
973     {
974         ::FlushWriteBarrierInstructionCache();
975     }
976     if (stompWBCompleteActions & SWB_EE_RESTART)
977     {
978         assert(!args->is_runtime_suspended &&
979             "if runtime was suspended in patching routines then it was in running state at begining");
980         ThreadSuspend::RestartEE(FALSE, TRUE);
981     }
982 }
983
984 void GCToEEInterface::EnableFinalization(bool foundFinalizers)
985 {
986     if (foundFinalizers || FinalizerThread::HaveExtraWorkForFinalizer())
987     {
988         FinalizerThread::EnableFinalization();
989     }
990 }
991
992 void GCToEEInterface::HandleFatalError(unsigned int exitCode)
993 {
994     EEPOLICY_HANDLE_FATAL_ERROR(exitCode);
995 }
996
997 bool GCToEEInterface::ShouldFinalizeObjectForUnload(void* pDomain, Object* obj)
998 {
999     // CoreCLR does not have appdomains, so this code path is dead. Other runtimes may
1000     // choose to inspect the object being finalized here.
1001     // [DESKTOP TODO] Desktop looks for "agile and finalizable" objects and may choose
1002     // to move them to a new app domain instead of finalizing them here.
1003     return true;
1004 }
1005
1006 bool GCToEEInterface::EagerFinalized(Object* obj)
1007 {
1008     MethodTable* pMT = obj->GetGCSafeMethodTable();
1009     if (pMT == pWeakReferenceMT ||
1010         pMT->GetCanonicalMethodTable() == pWeakReferenceOfTCanonMT)
1011     {
1012         FinalizeWeakReference(obj);
1013         return true;
1014     }
1015
1016     return false;
1017 }
1018
1019 MethodTable* GCToEEInterface::GetFreeObjectMethodTable()
1020 {
1021     assert(g_pFreeObjectMethodTable != nullptr);
1022     return g_pFreeObjectMethodTable;
1023 }
1024
1025 // These are arbitrary, we shouldn't ever be having confrig keys or values
1026 // longer than these lengths.
1027 const size_t MaxConfigKeyLength = 255;
1028 const size_t MaxConfigValueLength = 255;
1029
1030 bool GCToEEInterface::GetBooleanConfigValue(const char* key, bool* value)
1031 {
1032     CONTRACTL {
1033         NOTHROW;
1034         GC_NOTRIGGER;
1035     } CONTRACTL_END;
1036
1037     // these configuration values are given to us via startup flags.
1038     if (strcmp(key, "gcServer") == 0)
1039     {
1040         *value = g_heap_type == GC_HEAP_SVR;
1041         return true;
1042     }
1043
1044     if (strcmp(key, "gcConcurrent") == 0)
1045     {
1046         *value = !!g_pConfig->GetGCconcurrent();
1047         return true;
1048     }
1049
1050     if (strcmp(key, "GCRetainVM") == 0)
1051     {
1052         *value = !!g_pConfig->GetGCRetainVM();
1053         return true;
1054     }
1055
1056     WCHAR configKey[MaxConfigKeyLength];
1057     if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1058     {
1059         // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1060         return false;
1061     }
1062
1063     // otherwise, ask the config subsystem.
1064     if (CLRConfig::IsConfigOptionSpecified(configKey))
1065     {
1066         CLRConfig::ConfigDWORDInfo info { configKey , 0, CLRConfig::EEConfig_default };
1067         *value = CLRConfig::GetConfigValue(info) != 0;
1068         return true;
1069     }
1070
1071     return false;
1072 }
1073
1074 bool GCToEEInterface::GetIntConfigValue(const char* key, int64_t* value)
1075 {
1076     CONTRACTL {
1077       NOTHROW;
1078       GC_NOTRIGGER;
1079     } CONTRACTL_END;
1080
1081     WCHAR configKey[MaxConfigKeyLength];
1082     if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1083     {
1084         // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1085         return false;
1086     }
1087
1088     if (CLRConfig::IsConfigOptionSpecified(configKey))
1089     {
1090         CLRConfig::ConfigDWORDInfo info { configKey , 0, CLRConfig::EEConfig_default };
1091         *value = CLRConfig::GetConfigValue(info);
1092         return true;
1093     }
1094
1095     return false;
1096 }
1097
1098 bool GCToEEInterface::GetStringConfigValue(const char* key, const char** value)
1099 {
1100     CONTRACTL {
1101       NOTHROW;
1102       GC_NOTRIGGER;
1103     } CONTRACTL_END;
1104
1105     WCHAR configKey[MaxConfigKeyLength];
1106     if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1107     {
1108         // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1109         return false;
1110     }
1111
1112     CLRConfig::ConfigStringInfo info { configKey, CLRConfig::EEConfig_default };
1113     LPWSTR out = CLRConfig::GetConfigValue(info);
1114     if (!out)
1115     {
1116         // config not found
1117         return false;
1118     }
1119
1120     // not allocated on the stack since it escapes this function
1121     AStringHolder configResult = new (nothrow) char[MaxConfigValueLength];
1122     if (!configResult)
1123     {
1124         CLRConfig::FreeConfigString(out);
1125         return false;
1126     }
1127
1128     if (WideCharToMultiByte(CP_ACP, 0, out, -1 /* out is null-terminated */,
1129           configResult.GetValue(), MaxConfigKeyLength, nullptr, nullptr) == 0)
1130     {
1131         // this should only happen if the config subsystem gives us a string that's not valid
1132         // unicode.
1133         CLRConfig::FreeConfigString(out);
1134         return false;
1135     }
1136
1137     *value = configResult.Extract();
1138     CLRConfig::FreeConfigString(out);
1139     return true;
1140 }
1141
1142 void GCToEEInterface::FreeStringConfigValue(const char* value)
1143 {
1144     delete [] value;
1145 }
1146
1147 bool GCToEEInterface::IsGCThread()
1148 {
1149     return !!::IsGCThread();
1150 }
1151
1152 bool GCToEEInterface::WasCurrentThreadCreatedByGC()
1153 {
1154     return !!::IsGCSpecialThread();
1155 }
1156
1157 struct SuspendableThreadStubArguments
1158 {
1159     void* Argument;
1160     void (*ThreadStart)(void*);
1161     Thread* Thread;
1162     bool HasStarted;
1163     CLREvent ThreadStartedEvent;
1164 };
1165
1166 struct ThreadStubArguments
1167 {
1168     void* Argument;
1169     void (*ThreadStart)(void*);
1170     HANDLE Thread;
1171     bool HasStarted;
1172     CLREvent ThreadStartedEvent;
1173 };
1174
1175 namespace
1176 {
1177     const size_t MaxThreadNameSize = 255;
1178
1179     bool CreateSuspendableThread(
1180         void (*threadStart)(void*),
1181         void* argument,
1182         const wchar_t* name)
1183     {
1184         LIMITED_METHOD_CONTRACT;
1185
1186         SuspendableThreadStubArguments args;
1187         args.Argument = argument;
1188         args.ThreadStart = threadStart;
1189         args.Thread = nullptr;
1190         args.HasStarted = false;
1191         if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE))
1192         {
1193             return false;
1194         }
1195
1196         EX_TRY
1197         {
1198             args.Thread = SetupUnstartedThread(FALSE);
1199         }
1200         EX_CATCH
1201         {
1202         }
1203         EX_END_CATCH(SwallowAllExceptions)
1204
1205         if (!args.Thread)
1206         {
1207             args.ThreadStartedEvent.CloseEvent();
1208             return false;
1209         }
1210
1211         auto threadStub = [](void* argument) -> DWORD
1212         {
1213             SuspendableThreadStubArguments* args = static_cast<SuspendableThreadStubArguments*>(argument);
1214             assert(args != nullptr);
1215
1216             ClrFlsSetThreadType(ThreadType_GC);
1217             args->Thread->SetGCSpecial(true);
1218             STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY);
1219             args->HasStarted = !!args->Thread->HasStarted(false);
1220
1221             Thread* thread = args->Thread;
1222             auto threadStart = args->ThreadStart;
1223             void* threadArgument = args->Argument;
1224             bool hasStarted = args->HasStarted;
1225             args->ThreadStartedEvent.Set();
1226
1227             // The stubArgs cannot be used once the event is set, since that releases wait on the
1228             // event in the function that created this thread and the stubArgs go out of scope.
1229             if (hasStarted)
1230             {
1231                 threadStart(threadArgument);
1232                 DestroyThread(thread);
1233             }
1234
1235             return 0;
1236         };
1237         if (!args.Thread->CreateNewThread(0, threadStub, &args, name))
1238         {
1239             args.Thread->DecExternalCount(FALSE);
1240             args.ThreadStartedEvent.CloseEvent();
1241             return false;
1242         }
1243
1244         args.Thread->SetBackground(TRUE, FALSE);
1245         args.Thread->StartThread();
1246
1247         // Wait for the thread to be in its main loop
1248         uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE);
1249         args.ThreadStartedEvent.CloseEvent();
1250         _ASSERTE(res == WAIT_OBJECT_0);
1251
1252         if (!args.HasStarted)
1253         {
1254             // The thread has failed to start and the Thread object was destroyed in the Thread::HasStarted
1255             // failure code path.
1256             return false;
1257         }
1258
1259         return true;
1260     }
1261
1262     bool CreateNonSuspendableThread(
1263         void (*threadStart)(void*),
1264         void* argument,
1265         const wchar_t* name)
1266     {
1267         LIMITED_METHOD_CONTRACT;
1268
1269         ThreadStubArguments args;
1270         args.Argument = argument;
1271         args.ThreadStart = threadStart;
1272         args.Thread = INVALID_HANDLE_VALUE;
1273         if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE))
1274         {
1275             return false;
1276         }
1277
1278         auto threadStub = [](void* argument) -> DWORD
1279         {
1280             ThreadStubArguments* args = static_cast<ThreadStubArguments*>(argument);
1281             assert(args != nullptr);
1282
1283             ClrFlsSetThreadType(ThreadType_GC);
1284             STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY);
1285
1286             args->HasStarted = true;
1287             auto threadStart = args->ThreadStart;
1288             void* threadArgument = args->Argument;
1289             args->ThreadStartedEvent.Set();
1290
1291             // The stub args cannot be used once the event is set, since that releases wait on the
1292             // event in the function that created this thread and the stubArgs go out of scope.
1293             threadStart(threadArgument);
1294             return 0;
1295         };
1296
1297         args.Thread = Thread::CreateUtilityThread(Thread::StackSize_Medium, threadStub, &args, name);
1298         if (args.Thread == INVALID_HANDLE_VALUE)
1299         {
1300             args.ThreadStartedEvent.CloseEvent();
1301             return false;
1302         }
1303
1304         // Wait for the thread to be in its main loop
1305         uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE);
1306         args.ThreadStartedEvent.CloseEvent();
1307         _ASSERTE(res == WAIT_OBJECT_0);
1308
1309         CloseHandle(args.Thread);
1310         return true;
1311     }
1312 } // anonymous namespace
1313
1314 bool GCToEEInterface::CreateThread(void (*threadStart)(void*), void* arg, bool is_suspendable, const char* name)
1315 {
1316     InlineSString<MaxThreadNameSize> wideName;
1317     const WCHAR* namePtr = nullptr;
1318     EX_TRY
1319     {
1320         if (name != nullptr)
1321         {
1322             wideName.SetUTF8(name);
1323             namePtr = wideName.GetUnicode();
1324         }
1325     }
1326         EX_CATCH
1327     {
1328         // we're not obligated to provide a name - if it's not valid,
1329         // just report nullptr as the name.
1330     }
1331     EX_END_CATCH(SwallowAllExceptions)
1332
1333     LIMITED_METHOD_CONTRACT;
1334     if (is_suspendable)
1335     {
1336         return CreateSuspendableThread(threadStart, arg, namePtr);
1337     }
1338     else
1339     {
1340         return CreateNonSuspendableThread(threadStart, arg, namePtr);
1341     }
1342 }
1343
1344 void GCToEEInterface::WalkAsyncPinnedForPromotion(Object* object, ScanContext* sc, promote_func* callback)
1345 {
1346     LIMITED_METHOD_CONTRACT;
1347
1348     assert(object != nullptr);
1349     assert(sc != nullptr);
1350     assert(callback != nullptr);
1351     if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass)
1352     {
1353         // not an overlapped data object - nothing to do.
1354         return;
1355     }
1356
1357     // reporting the pinned user objects
1358     OverlappedDataObject *pOverlapped = (OverlappedDataObject *)object;
1359     if (pOverlapped->m_userObject != NULL)
1360     {
1361         if (pOverlapped->m_userObject->GetGCSafeMethodTable() == g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT]->GetMethodTable())
1362         {
1363             // OverlappedDataObject is very special.  An async pin handle keeps it alive.
1364             // During GC, we also make sure
1365             // 1. m_userObject itself does not move if m_userObject is not array
1366             // 2. Every object pointed by m_userObject does not move if m_userObject is array
1367             // We do not want to pin m_userObject if it is array.
1368             ArrayBase* pUserObject = (ArrayBase*)OBJECTREFToObject(pOverlapped->m_userObject);
1369             Object **ppObj = (Object**)pUserObject->GetDataPtr(TRUE);
1370             size_t num = pUserObject->GetNumComponents();
1371             for (size_t i = 0; i < num; i++)
1372             {
1373                 callback(ppObj + i, sc, GC_CALL_PINNED);
1374             }
1375         }
1376         else
1377         {
1378             callback(&OBJECTREF_TO_UNCHECKED_OBJECTREF(pOverlapped->m_userObject), (ScanContext *)sc, GC_CALL_PINNED);
1379         }
1380     }
1381 }
1382
1383 void GCToEEInterface::WalkAsyncPinned(Object* object, void* context, void (*callback)(Object*, Object*, void*))
1384 {
1385     LIMITED_METHOD_CONTRACT;
1386
1387     assert(object != nullptr);
1388     assert(callback != nullptr);
1389
1390     if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass)
1391     {
1392         return;
1393     }
1394
1395     OverlappedDataObject *pOverlapped = (OverlappedDataObject *)(object);
1396     if (pOverlapped->m_userObject != NULL)
1397     {
1398         Object * pUserObject = OBJECTREFToObject(pOverlapped->m_userObject);
1399         callback(object, pUserObject, context);
1400         if (pOverlapped->m_userObject->GetGCSafeMethodTable() == g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT]->GetMethodTable())
1401         {
1402             ArrayBase* pUserArrayObject = (ArrayBase*)pUserObject;
1403             Object **pObj = (Object**)pUserArrayObject->GetDataPtr(TRUE);
1404             size_t num = pUserArrayObject->GetNumComponents();
1405             for (size_t i = 0; i < num; i ++)
1406             {
1407                 callback(pUserObject, pObj[i], context);
1408             }
1409         }
1410     }
1411 }
1412
1413 IGCToCLREventSink* GCToEEInterface::EventSink()
1414 {
1415     LIMITED_METHOD_CONTRACT;
1416
1417     return &g_gcToClrEventSink;
1418 }
1419
1420 uint32_t GCToEEInterface::GetDefaultDomainIndex()
1421 {
1422     LIMITED_METHOD_CONTRACT;
1423
1424     return SystemDomain::System()->DefaultDomain()->GetIndex().m_dwIndex;
1425 }
1426
1427 void *GCToEEInterface::GetAppDomainAtIndex(uint32_t appDomainIndex)
1428 {
1429     LIMITED_METHOD_CONTRACT;
1430
1431     ADIndex index(appDomainIndex);
1432     return static_cast<void *>(SystemDomain::GetAppDomainAtIndex(index));
1433 }
1434
1435 bool GCToEEInterface::AppDomainCanAccessHandleTable(uint32_t appDomainID)
1436 {
1437     LIMITED_METHOD_CONTRACT;
1438
1439     ADIndex index(appDomainID);
1440     AppDomain *pDomain = SystemDomain::GetAppDomainAtIndex(index);
1441     return (pDomain != NULL);
1442 }
1443
1444 uint32_t GCToEEInterface::GetIndexOfAppDomainBeingUnloaded()
1445 {
1446     LIMITED_METHOD_CONTRACT;
1447
1448     return 0xFFFFFFFF;
1449 }
1450
1451 uint32_t GCToEEInterface::GetTotalNumSizedRefHandles()
1452 {
1453     LIMITED_METHOD_CONTRACT;
1454
1455     return SystemDomain::System()->GetTotalNumSizedRefHandles();
1456 }
1457
1458
1459 bool GCToEEInterface::AppDomainIsRudeUnload(void *appDomain)
1460 {
1461     LIMITED_METHOD_CONTRACT;
1462
1463     return false;
1464 }
1465
1466 bool GCToEEInterface::AnalyzeSurvivorsRequested(int condemnedGeneration)
1467 {
1468     LIMITED_METHOD_CONTRACT;
1469
1470     // Is the list active?
1471     GcNotifications gn(g_pGcNotificationTable);
1472     if (gn.IsActive())
1473     {
1474         GcEvtArgs gea = { GC_MARK_END, { (1<<condemnedGeneration) } };
1475         if (gn.GetNotification(gea) != 0)
1476         {
1477             return true;
1478         }
1479     }
1480
1481     return false;
1482 }
1483
1484 void GCToEEInterface::AnalyzeSurvivorsFinished(int condemnedGeneration)
1485 {
1486     LIMITED_METHOD_CONTRACT;
1487
1488     // Is the list active?
1489     GcNotifications gn(g_pGcNotificationTable);
1490     if (gn.IsActive())
1491     {
1492         GcEvtArgs gea = { GC_MARK_END, { (1<<condemnedGeneration) } };
1493         if (gn.GetNotification(gea) != 0)
1494         {
1495             DACNotify::DoGCNotification(gea);
1496         }
1497     }
1498 }