Add GetLoaderAllocatorObjectForGC to IGCToCLR (#17443)
[platform/upstream/coreclr.git] / src / vm / gcenv.ee.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*
6  * GCENV.EE.CPP 
7  *
8  * GCToEEInterface implementation
9  *
10
11  *
12  */
13
14 void GCToEEInterface::SuspendEE(SUSPEND_REASON reason)
15 {
16     WRAPPER_NO_CONTRACT;
17
18     static_assert_no_msg(SUSPEND_FOR_GC == (int)ThreadSuspend::SUSPEND_FOR_GC);
19     static_assert_no_msg(SUSPEND_FOR_GC_PREP == (int)ThreadSuspend::SUSPEND_FOR_GC_PREP);
20
21     _ASSERTE(reason == SUSPEND_FOR_GC || reason == SUSPEND_FOR_GC_PREP);
22
23     ThreadSuspend::SuspendEE((ThreadSuspend::SUSPEND_REASON)reason);
24 }
25
26 void GCToEEInterface::RestartEE(bool bFinishedGC)
27 {
28     WRAPPER_NO_CONTRACT;
29
30     ThreadSuspend::RestartEE(bFinishedGC, TRUE);
31 }
32
33 VOID GCToEEInterface::SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2)
34 {
35     CONTRACTL
36     {
37         NOTHROW;
38         GC_NOTRIGGER;
39     }
40     CONTRACTL_END;
41
42     SyncBlockCache::GetSyncBlockCache()->GCWeakPtrScan(scanProc, lp1, lp2);
43 }
44
45
46 //EE can perform post stack scanning action, while the 
47 // user threads are still suspended 
48 VOID GCToEEInterface::AfterGcScanRoots (int condemned, int max_gen,
49                                    ScanContext* sc)
50 {
51     CONTRACTL
52     {
53         NOTHROW;
54         GC_NOTRIGGER;
55     }
56     CONTRACTL_END;
57
58 #ifdef FEATURE_COMINTEROP
59     // Go through all the app domains and for each one detach all the *unmarked* RCWs to prevent
60     // the RCW cache from resurrecting them.
61     UnsafeAppDomainIterator i(TRUE);
62     i.Init();
63
64     while (i.Next())
65     {
66         i.GetDomain()->DetachRCWs();
67     }
68 #endif // FEATURE_COMINTEROP
69 }
70
71 /*
72  * Scan all stack roots
73  */
74  
75 static void ScanStackRoots(Thread * pThread, promote_func* fn, ScanContext* sc)
76 {
77     GCCONTEXT   gcctx;
78
79     gcctx.f  = fn;
80     gcctx.sc = sc;
81     gcctx.cf = NULL;
82
83     ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
84
85     // Either we are in a concurrent situation (in which case the thread is unknown to
86     // us), or we are performing a synchronous GC and we are the GC thread, holding
87     // the threadstore lock.
88
89     _ASSERTE(dbgOnly_IsSpecialEEThread() ||
90                 GetThread() == NULL ||
91                 // this is for background GC threads which always call this when EE is suspended.
92                 IsGCSpecialThread() || 
93                 (GetThread() == ThreadSuspend::GetSuspensionThread() && ThreadStore::HoldingThreadStore()));
94
95     pThread->SetHasPromotedBytes();
96
97     Frame* pTopFrame = pThread->GetFrame();
98     Object ** topStack = (Object **)pTopFrame;
99     if ((pTopFrame != ((Frame*)-1)) 
100         && (pTopFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr())) {
101         // It is an InlinedCallFrame. Get SP from it.
102         InlinedCallFrame* pInlinedFrame = (InlinedCallFrame*)pTopFrame;
103         topStack = (Object **)pInlinedFrame->GetCallSiteSP();
104     } 
105
106     sc->stack_limit = (uintptr_t)topStack;
107
108 #ifdef FEATURE_CONSERVATIVE_GC
109     if (g_pConfig->GetGCConservative())
110     {
111         // Conservative stack root reporting
112         // We will treat everything on stack as a pinned interior GC pointer
113         // Since we report every thing as pinned, we don't need to run following code for relocation phase.
114         if (sc->promotion)
115         {
116             Object ** bottomStack = (Object **) pThread->GetCachedStackBase();
117             Object ** walk;
118             for (walk = topStack; walk < bottomStack; walk ++)
119             {
120                 if (((void*)*walk > (void*)bottomStack || (void*)*walk < (void*)topStack) &&
121                     ((void*)*walk >= (void*)g_lowest_address && (void*)*walk <= (void*)g_highest_address)
122                     )
123                 {
124                     //DbgPrintf("promote " FMT_ADDR " : " FMT_ADDR "\n", walk, *walk);
125                     fn(walk, sc, GC_CALL_INTERIOR|GC_CALL_PINNED);
126                 }
127             }
128         }
129
130         // Also ask the explicit Frames to report any references they might know about.
131         // Generally these will be a subset of the objects reported below but there's
132         // nothing that guarantees that and in the specific case of a GC protect frame the
133         // references it protects may live at a lower address than the frame itself (and
134         // thus escape the stack range we scanned above).
135         Frame *pFrame = pThread->GetFrame();
136         while (pFrame != FRAME_TOP)
137         {
138             pFrame->GcScanRoots(fn, sc);
139             pFrame = pFrame->PtrNextFrame();
140         }
141     }
142     else
143 #endif
144     {    
145         unsigned flagsStackWalk = ALLOW_ASYNC_STACK_WALK | ALLOW_INVALID_OBJECTS;
146 #if defined(WIN64EXCEPTIONS)            
147         flagsStackWalk |= GC_FUNCLET_REFERENCE_REPORTING;
148 #endif // defined(WIN64EXCEPTIONS)                        
149         pThread->StackWalkFrames( GcStackCrawlCallBack, &gcctx, flagsStackWalk);
150     }
151 }
152
153 void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
154 {
155     STRESS_LOG1(LF_GCROOTS, LL_INFO10, "GCScan: Promotion Phase = %d\n", sc->promotion);
156
157     // In server GC, we should be competing for marking the statics
158     if (GCHeapUtilities::MarkShouldCompeteForStatics())
159     {
160         if (condemned == max_gen && sc->promotion)
161         {
162             SystemDomain::EnumAllStaticGCRefs(fn, sc);
163         }
164     }
165
166     Thread* pThread = NULL;
167     while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
168     {
169         STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "{ Starting scan of Thread %p ID = %x\n", pThread, pThread->GetThreadId());
170
171         if (GCHeapUtilities::GetGCHeap()->IsThreadUsingAllocationContextHeap(
172             pThread->GetAllocContext(), sc->thread_number))
173         {
174             sc->thread_under_crawl = pThread;
175 #ifdef FEATURE_EVENT_TRACE
176             sc->dwEtwRootKind = kEtwGCRootKindStack;
177 #endif // FEATURE_EVENT_TRACE
178             ScanStackRoots(pThread, fn, sc);
179 #ifdef FEATURE_EVENT_TRACE
180             sc->dwEtwRootKind = kEtwGCRootKindOther;
181 #endif // FEATURE_EVENT_TRACE
182         }
183         STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "Ending scan of Thread %p ID = 0x%x }\n", pThread, pThread->GetThreadId());
184     }
185 }
186
187 void GCToEEInterface::GcStartWork (int condemned, int max_gen)
188 {
189     CONTRACTL
190     {
191         NOTHROW;
192         GC_NOTRIGGER;
193     }
194     CONTRACTL_END;
195
196     // Update AppDomain stage here.
197     SystemDomain::System()->ProcessClearingDomains();
198
199 #ifdef VERIFY_HEAP
200     // Validate byrefs pinned by IL stubs since the last GC.
201     StubHelpers::ProcessByrefValidationList();
202 #endif // VERIFY_HEAP
203
204     ExecutionManager::CleanupCodeHeaps();
205
206 #ifdef FEATURE_EVENT_TRACE
207     ETW::TypeSystemLog::Cleanup();
208 #endif
209
210 #ifdef FEATURE_COMINTEROP
211     //
212     // Let GC detect managed/native cycles with input from jupiter
213     // Jupiter will
214     // 1. Report reference from RCW to CCW based on native reference in Jupiter
215     // 2. Identify the subset of CCWs that needs to be rooted
216     // 
217     // We'll build the references from RCW to CCW using
218     // 1. Preallocated arrays
219     // 2. Dependent handles
220     // 
221     RCWWalker::OnGCStarted(condemned);
222 #endif // FEATURE_COMINTEROP
223
224     if (condemned == max_gen)
225     {
226         ThreadStore::s_pThreadStore->OnMaxGenerationGCStarted();
227     }
228 }
229
230 void GCToEEInterface::GcDone(int condemned)
231 {
232     CONTRACTL
233     {
234         NOTHROW;
235         GC_NOTRIGGER;
236     }
237     CONTRACTL_END;
238
239 #ifdef FEATURE_COMINTEROP
240     //
241     // Tell Jupiter GC has finished
242     // 
243     RCWWalker::OnGCFinished(condemned);
244 #endif // FEATURE_COMINTEROP
245 }
246
247 bool GCToEEInterface::RefCountedHandleCallbacks(Object * pObject)
248 {
249     CONTRACTL
250     {
251         NOTHROW;
252         GC_NOTRIGGER;
253     }
254     CONTRACTL_END;
255
256 #ifdef FEATURE_COMINTEROP
257     //<REVISIT_TODO>@todo optimize the access to the ref-count
258     ComCallWrapper* pWrap = ComCallWrapper::GetWrapperForObject((OBJECTREF)pObject);
259     _ASSERTE(pWrap != NULL);
260
261     return !!pWrap->IsWrapperActive();
262 #else
263     return false;
264 #endif
265 }
266
267 void GCToEEInterface::GcBeforeBGCSweepWork()
268 {
269     CONTRACTL
270     {
271         NOTHROW;
272         GC_NOTRIGGER;
273     }
274     CONTRACTL_END;
275
276 #ifdef VERIFY_HEAP
277     // Validate byrefs pinned by IL stubs since the last GC.
278     StubHelpers::ProcessByrefValidationList();
279 #endif // VERIFY_HEAP
280 }
281
282 void GCToEEInterface::SyncBlockCacheDemote(int max_gen)
283 {
284     CONTRACTL
285     {
286         NOTHROW;
287         GC_NOTRIGGER;
288     }
289     CONTRACTL_END;
290
291     SyncBlockCache::GetSyncBlockCache()->GCDone(TRUE, max_gen);
292 }
293
294 void GCToEEInterface::SyncBlockCachePromotionsGranted(int max_gen)
295 {
296     CONTRACTL
297     {
298         NOTHROW;
299         GC_NOTRIGGER;
300     }
301     CONTRACTL_END;
302
303     SyncBlockCache::GetSyncBlockCache()->GCDone(FALSE, max_gen);
304 }
305
306 uint32_t GCToEEInterface::GetActiveSyncBlockCount()
307 {
308     CONTRACTL
309     {
310         NOTHROW;
311         GC_NOTRIGGER;
312     }
313     CONTRACTL_END;
314
315     return SyncBlockCache::GetSyncBlockCache()->GetActiveCount();   
316 }
317
318 gc_alloc_context * GCToEEInterface::GetAllocContext()
319 {
320     WRAPPER_NO_CONTRACT;
321     
322     Thread* pThread = ::GetThread();
323     assert(pThread != nullptr);
324     return pThread->GetAllocContext();
325 }
326
327 void GCToEEInterface::GcEnumAllocContexts(enum_alloc_context_func* fn, void* param)
328 {
329     CONTRACTL
330     {
331         NOTHROW;
332         GC_NOTRIGGER;
333     }
334     CONTRACTL_END;
335
336     if (GCHeapUtilities::UseThreadAllocationContexts())
337     {
338         Thread * pThread = NULL;
339         while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
340         {
341             fn(pThread->GetAllocContext(), param);
342         }
343     }
344     else
345     {
346         fn(&g_global_alloc_context, param);
347     }
348 }
349
350
351 uint8_t* GCToEEInterface::GetLoaderAllocatorObjectForGC(Object* pObject)
352 {
353     CONTRACTL
354     {
355         NOTHROW;
356         GC_NOTRIGGER;
357     }
358     CONTRACTL_END;
359
360     return pObject->GetMethodTable()->GetLoaderAllocatorObjectForGC();
361 }
362
363 bool GCToEEInterface::IsPreemptiveGCDisabled()
364 {
365     WRAPPER_NO_CONTRACT;
366
367     Thread* pThread = ::GetThread();
368     if (pThread)
369     {
370         return !!pThread->PreemptiveGCDisabled();
371     }
372
373     return false;
374 }
375
376 bool GCToEEInterface::EnablePreemptiveGC()
377 {
378     WRAPPER_NO_CONTRACT;
379
380     bool bToggleGC = false;
381     Thread* pThread = ::GetThread();
382
383     if (pThread)
384     {
385         bToggleGC = !!pThread->PreemptiveGCDisabled();
386         if (bToggleGC)
387         {
388             pThread->EnablePreemptiveGC();
389         }
390     }
391
392     return bToggleGC;
393 }
394
395 void GCToEEInterface::DisablePreemptiveGC()
396 {
397     WRAPPER_NO_CONTRACT;
398
399     Thread* pThread = ::GetThread();
400     if (pThread)
401     {
402         pThread->DisablePreemptiveGC();
403     }
404 }
405
406 Thread* GCToEEInterface::GetThread()
407 {
408     WRAPPER_NO_CONTRACT;
409
410     return ::GetThread();
411 }
412
413 struct BackgroundThreadStubArgs
414 {
415     Thread* thread;
416     GCBackgroundThreadFunction threadStart;
417     void* arg;
418     CLREvent threadStartedEvent;
419     bool hasStarted;
420 };
421
422 DWORD WINAPI BackgroundThreadStub(void* arg)
423 {
424     BackgroundThreadStubArgs* stubArgs = (BackgroundThreadStubArgs*)arg;
425     assert (stubArgs->thread != NULL);
426
427     ClrFlsSetThreadType (ThreadType_GC);
428     stubArgs->thread->SetGCSpecial(true);
429     STRESS_LOG_RESERVE_MEM (GC_STRESSLOG_MULTIPLY);
430
431     stubArgs->hasStarted = !!stubArgs->thread->HasStarted(FALSE);
432
433     Thread* thread = stubArgs->thread;
434     GCBackgroundThreadFunction realThreadStart = stubArgs->threadStart;
435     void* realThreadArg = stubArgs->arg;
436     bool hasStarted = stubArgs->hasStarted;
437
438     stubArgs->threadStartedEvent.Set();
439     // The stubArgs cannot be used once the event is set, since that releases wait on the
440     // event in the function that created this thread and the stubArgs go out of scope.
441
442     DWORD result = 0;
443
444     if (hasStarted)
445     {
446         result = realThreadStart(realThreadArg);
447         DestroyThread(thread);
448     }
449
450     return result;
451 }
452
453 //
454 // Diagnostics code
455 //
456
457 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
458 inline BOOL ShouldTrackMovementForProfilerOrEtw()
459 {
460 #ifdef GC_PROFILING
461     if (CORProfilerTrackGC())
462         return true;
463 #endif
464
465 #ifdef FEATURE_EVENT_TRACE
466     if (ETW::GCLog::ShouldTrackMovementForEtw())
467         return true;
468 #endif
469
470     return false;
471 }
472 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
473
474 void ProfScanRootsHelper(Object** ppObject, ScanContext *pSC, uint32_t dwFlags)
475 {
476 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
477     Object *pObj = *ppObject;
478     if (dwFlags & GC_CALL_INTERIOR)
479     {
480         pObj = GCHeapUtilities::GetGCHeap()->GetContainingObject(pObj, true);
481         if (pObj == nullptr)
482             return;
483     }
484     ScanRootsHelper(pObj, ppObject, pSC, dwFlags);
485 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
486 }
487
488 // TODO - at some point we would like to completely decouple profiling
489 // from ETW tracing using a pattern similar to this, where the
490 // ProfilingScanContext has flags about whether or not certain things
491 // should be tracked, and each one of these ProfilerShouldXYZ functions
492 // will check these flags and determine what to do based upon that.
493 // GCProfileWalkHeapWorker can, in turn, call those methods without fear
494 // of things being ifdef'd out.
495
496 // Returns TRUE if GC profiling is enabled and the profiler
497 // should scan dependent handles, FALSE otherwise.
498 BOOL ProfilerShouldTrackConditionalWeakTableElements() 
499 {
500 #if defined(GC_PROFILING)
501     return CORProfilerTrackConditionalWeakTableElements();
502 #else
503     return FALSE;
504 #endif // defined (GC_PROFILING)
505 }
506
507 // If GC profiling is enabled, informs the profiler that we are done
508 // tracing dependent handles.
509 void ProfilerEndConditionalWeakTableElementReferences(void* heapId)
510 {
511 #if defined (GC_PROFILING)
512     g_profControlBlock.pProfInterface->EndConditionalWeakTableElementReferences(heapId);
513 #else
514     UNREFERENCED_PARAMETER(heapId);
515 #endif // defined (GC_PROFILING)
516 }
517
518 // If GC profiling is enabled, informs the profiler that we are done
519 // tracing root references.
520 void ProfilerEndRootReferences2(void* heapId) 
521 {
522 #if defined (GC_PROFILING)
523     g_profControlBlock.pProfInterface->EndRootReferences2(heapId);
524 #else
525     UNREFERENCED_PARAMETER(heapId);
526 #endif // defined (GC_PROFILING)
527 }
528
529 void GcScanRootsForProfilerAndETW(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
530 {
531     Thread* pThread = NULL;
532     while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
533     {
534         sc->thread_under_crawl = pThread;
535 #ifdef FEATURE_EVENT_TRACE
536         sc->dwEtwRootKind = kEtwGCRootKindStack;
537 #endif // FEATURE_EVENT_TRACE
538         ScanStackRoots(pThread, fn, sc);
539 #ifdef FEATURE_EVENT_TRACE
540         sc->dwEtwRootKind = kEtwGCRootKindOther;
541 #endif // FEATURE_EVENT_TRACE
542     }
543 }
544
545 void ScanHandleForProfilerAndETW(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, bool isDependent)
546 {
547     ProfilingScanContext* pSC = (ProfilingScanContext*)context;
548
549 #ifdef GC_PROFILING
550     // Give the profiler the objectref.
551     if (pSC->fProfilerPinned)
552     {
553         if (!isDependent)
554         {
555             BEGIN_PIN_PROFILER(CORProfilerTrackGC());
556             g_profControlBlock.pProfInterface->RootReference2(
557                 (uint8_t *)*pRef,
558                 kEtwGCRootKindHandle,
559                 (EtwGCRootFlags)flags,
560                 pRef, 
561                 &pSC->pHeapId);
562             END_PIN_PROFILER();
563         }
564         else
565         {
566             BEGIN_PIN_PROFILER(CORProfilerTrackConditionalWeakTableElements());
567             g_profControlBlock.pProfInterface->ConditionalWeakTableElementReference(
568                 (uint8_t*)*pRef,
569                 (uint8_t*)pSec,
570                 pRef,
571                 &pSC->pHeapId);
572             END_PIN_PROFILER();
573         }
574     }
575 #endif // GC_PROFILING
576
577 #if defined(FEATURE_EVENT_TRACE)
578     // Notify ETW of the handle
579     if (ETW::GCLog::ShouldWalkHeapRootsForEtw())
580     {
581         ETW::GCLog::RootReference(
582             pRef,
583             *pRef,          // object being rooted
584             pSec,           // pSecondaryNodeForDependentHandle
585             isDependent,
586             pSC,
587             0,              // dwGCFlags,
588             flags);     // ETW handle flags
589     }
590 #endif // defined(FEATURE_EVENT_TRACE) 
591 }
592
593 // This is called only if we've determined that either:
594 //     a) The Profiling API wants to do a walk of the heap, and it has pinned the
595 //     profiler in place (so it cannot be detached), and it's thus safe to call into the
596 //     profiler, OR
597 //     b) ETW infrastructure wants to do a walk of the heap either to log roots,
598 //     objects, or both.
599 // This can also be called to do a single walk for BOTH a) and b) simultaneously.  Since
600 // ETW can ask for roots, but not objects
601 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
602 void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw)
603 {
604     {
605         ProfilingScanContext SC(fProfilerPinned);
606         unsigned max_generation = GCHeapUtilities::GetGCHeap()->GetMaxGeneration();
607
608         // **** Scan roots:  Only scan roots if profiling API wants them or ETW wants them.
609         if (fProfilerPinned || fShouldWalkHeapRootsForEtw)
610         {
611             GcScanRootsForProfilerAndETW(&ProfScanRootsHelper, max_generation, max_generation, &SC);
612             SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
613             GCHeapUtilities::GetGCHeap()->DiagScanFinalizeQueue(&ProfScanRootsHelper, &SC);
614
615             // Handles are kept independent of wks/svr/concurrent builds
616             SC.dwEtwRootKind = kEtwGCRootKindHandle;
617             GCHeapUtilities::GetGCHeap()->DiagScanHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
618
619             // indicate that regular handle scanning is over, so we can flush the buffered roots
620             // to the profiler.  (This is for profapi only.  ETW will flush after the
621             // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
622             if (fProfilerPinned)
623             {
624                 ProfilerEndRootReferences2(&SC.pHeapId);
625             }
626         }
627
628         // **** Scan dependent handles: only if the profiler supports it or ETW wants roots
629         if ((fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements()) ||
630             fShouldWalkHeapRootsForEtw)
631         {
632             // GcScanDependentHandlesForProfiler double-checks
633             // CORProfilerTrackConditionalWeakTableElements() before calling into the profiler
634
635             ProfilingScanContext* pSC = &SC;
636
637             // we'll re-use pHeapId (which was either unused (0) or freed by EndRootReferences2
638             // (-1)), so reset it to NULL
639             _ASSERTE((*((size_t *)(&pSC->pHeapId)) == (size_t)(-1)) ||
640                     (*((size_t *)(&pSC->pHeapId)) == (size_t)(0)));
641             pSC->pHeapId = NULL;
642
643             GCHeapUtilities::GetGCHeap()->DiagScanDependentHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
644
645             // indicate that dependent handle scanning is over, so we can flush the buffered roots
646             // to the profiler.  (This is for profapi only.  ETW will flush after the
647             // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
648             if (fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements())
649             {
650                 ProfilerEndConditionalWeakTableElementReferences(&SC.pHeapId);
651             }
652         }
653
654         ProfilerWalkHeapContext profilerWalkHeapContext(fProfilerPinned, SC.pvEtwContext);
655
656         // **** Walk objects on heap: only if profiling API wants them or ETW wants them.
657         if (fProfilerPinned || fShouldWalkHeapObjectsForEtw)
658         {
659             GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, true /* walk the large object heap */);
660         }
661
662 #ifdef FEATURE_EVENT_TRACE
663         // **** Done! Indicate to ETW helpers that the heap walk is done, so any buffers
664         // should be flushed into the ETW stream
665         if (fShouldWalkHeapObjectsForEtw || fShouldWalkHeapRootsForEtw)
666         {
667             ETW::GCLog::EndHeapDump(&profilerWalkHeapContext);
668         }
669 #endif // FEATURE_EVENT_TRACE
670     }
671 }
672 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
673
674 void GCProfileWalkHeap()
675 {
676     BOOL fWalkedHeapForProfiler = FALSE;
677
678 #ifdef FEATURE_EVENT_TRACE
679     if (ETW::GCLog::ShouldWalkStaticsAndCOMForEtw())
680         ETW::GCLog::WalkStaticsAndCOMForETW();
681     
682     BOOL fShouldWalkHeapRootsForEtw = ETW::GCLog::ShouldWalkHeapRootsForEtw();
683     BOOL fShouldWalkHeapObjectsForEtw = ETW::GCLog::ShouldWalkHeapObjectsForEtw();
684 #else // !FEATURE_EVENT_TRACE
685     BOOL fShouldWalkHeapRootsForEtw = FALSE;
686     BOOL fShouldWalkHeapObjectsForEtw = FALSE;
687 #endif // FEATURE_EVENT_TRACE
688
689 #if defined (GC_PROFILING)
690     {
691         BEGIN_PIN_PROFILER(CORProfilerTrackGC());
692         GCProfileWalkHeapWorker(TRUE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
693         fWalkedHeapForProfiler = TRUE;
694         END_PIN_PROFILER();
695     }
696 #endif // defined (GC_PROFILING)
697
698 #if defined (GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
699     // we need to walk the heap if one of GC_PROFILING or FEATURE_EVENT_TRACE
700     // is defined, since both of them make use of the walk heap worker.
701     if (!fWalkedHeapForProfiler &&
702         (fShouldWalkHeapRootsForEtw || fShouldWalkHeapObjectsForEtw))
703     {
704         GCProfileWalkHeapWorker(FALSE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
705     }
706 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
707 }
708
709 void WalkFReachableObjects(bool isCritical, void* objectID)
710 {
711         g_profControlBlock.pProfInterface->FinalizeableObjectQueued(isCritical, (ObjectID)objectID);
712 }
713
714 static fq_walk_fn g_FQWalkFn = &WalkFReachableObjects;
715
716 void GCToEEInterface::DiagGCStart(int gen, bool isInduced)
717 {
718 #ifdef GC_PROFILING
719     DiagUpdateGenerationBounds();
720     GarbageCollectionStartedCallback(gen, isInduced);
721     {
722         BEGIN_PIN_PROFILER(CORProfilerTrackGC());
723         size_t context = 0;
724
725         // When we're walking objects allocated by class, then we don't want to walk the large
726         // object heap because then it would count things that may have been around for a while.
727         GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&AllocByClassHelper, (void *)&context, 0, false);
728
729         // Notify that we've reached the end of the Gen 0 scan
730         g_profControlBlock.pProfInterface->EndAllocByClass(&context);
731         END_PIN_PROFILER();
732     }
733
734 #endif // GC_PROFILING
735 }
736
737 void GCToEEInterface::DiagUpdateGenerationBounds()
738 {
739 #ifdef GC_PROFILING
740     if (CORProfilerTrackGC())
741         UpdateGenerationBounds();
742 #endif // GC_PROFILING
743 }
744
745 void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent)
746 {
747 #ifdef GC_PROFILING
748     if (!fConcurrent)
749     {
750         GCProfileWalkHeap();
751         DiagUpdateGenerationBounds();
752         GarbageCollectionFinishedCallback();
753     }
754 #endif // GC_PROFILING
755 }
756
757 void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext)
758 {
759 #ifdef GC_PROFILING
760     if (CORProfilerTrackGC())
761     {
762         BEGIN_PIN_PROFILER(CORProfilerPresent());
763         GCHeapUtilities::GetGCHeap()->DiagWalkFinalizeQueue(gcContext, g_FQWalkFn);
764         END_PIN_PROFILER();
765     }
766 #endif //GC_PROFILING
767 }
768
769 // Note on last parameter: when calling this for bgc, only ETW
770 // should be sending these events so that existing profapi profilers
771 // don't get confused.
772 void WalkMovedReferences(uint8_t* begin, uint8_t* end, 
773                          ptrdiff_t reloc,
774                          void* context, 
775                          bool fCompacting,
776                          bool fBGC)
777 {
778     ETW::GCLog::MovedReference(begin, end,
779                                (fCompacting ? reloc : 0),
780                                (size_t)context,
781                                fCompacting,
782                                !fBGC);
783 }
784
785 void GCToEEInterface::DiagWalkSurvivors(void* gcContext)
786 {
787 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
788     if (ShouldTrackMovementForProfilerOrEtw())
789     {
790         size_t context = 0;
791         ETW::GCLog::BeginMovedReferences(&context);
792         GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_gc);
793         ETW::GCLog::EndMovedReferences(context);
794     }
795 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
796 }
797
798 void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext)
799 {
800 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
801     if (ShouldTrackMovementForProfilerOrEtw())
802     {
803         size_t context = 0;
804         ETW::GCLog::BeginMovedReferences(&context);
805         GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_loh);
806         ETW::GCLog::EndMovedReferences(context);
807     }
808 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
809 }
810
811 void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext)
812 {
813 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
814     if (ShouldTrackMovementForProfilerOrEtw())
815     {
816         size_t context = 0;
817         ETW::GCLog::BeginMovedReferences(&context);
818         GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_bgc);
819         ETW::GCLog::EndMovedReferences(context);
820     }
821 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
822 }
823
824 void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args)
825 {
826     int stompWBCompleteActions = SWB_PASS;
827     bool is_runtime_suspended = false;
828
829     assert(args != nullptr);
830     switch (args->operation)
831     {
832     case WriteBarrierOp::StompResize:
833         // StompResize requires a new card table, a new lowest address, and
834         // a new highest address
835         assert(args->card_table != nullptr);
836         assert(args->lowest_address != nullptr);
837         assert(args->highest_address != nullptr);
838
839         g_card_table = args->card_table;
840
841 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
842         assert(args->card_bundle_table != nullptr);
843         g_card_bundle_table = args->card_bundle_table;
844 #endif
845
846 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
847         if (g_sw_ww_enabled_for_gc_heap && (args->write_watch_table != nullptr))
848         {
849             assert(args->is_runtime_suspended);
850             g_sw_ww_table = args->write_watch_table;
851         }
852 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
853
854         stompWBCompleteActions |= ::StompWriteBarrierResize(args->is_runtime_suspended, args->requires_upper_bounds_check);
855
856         // We need to make sure that other threads executing checked write barriers
857         // will see the g_card_table update before g_lowest/highest_address updates.
858         // Otherwise, the checked write barrier may AV accessing the old card table
859         // with address that it does not cover. 
860         //
861         // Even x86's total store ordering is insufficient here because threads reading
862         // g_card_table do so via the instruction cache, whereas g_lowest/highest_address
863         // are read via the data cache.
864         //
865         // The g_card_table update is covered by section 8.1.3 of the Intel Software
866         // Development Manual, Volume 3A (System Programming Guide, Part 1), about
867         // "cross-modifying code": We need all _executing_ threads to invalidate
868         // their instruction cache, which FlushProcessWriteBuffers achieves by sending
869         // an IPI (inter-process interrupt).
870
871         if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
872         {
873             // flushing icache on current processor (thread)
874             ::FlushWriteBarrierInstructionCache();
875             // asking other processors (threads) to invalidate their icache
876             FlushProcessWriteBuffers();
877         }
878
879         g_lowest_address = args->lowest_address;
880         VolatileStore(&g_highest_address, args->highest_address);
881
882 #if defined(_ARM64_)
883         // Need to reupdate for changes to g_highest_address g_lowest_address
884         is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended;
885         stompWBCompleteActions |= ::StompWriteBarrierResize(is_runtime_suspended, args->requires_upper_bounds_check);
886
887         is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended;
888         if(!is_runtime_suspended)
889         {
890             // If runtime is not suspended, force updated state to be visible to all threads
891             MemoryBarrier();
892         }
893 #endif
894         if (stompWBCompleteActions & SWB_EE_RESTART)
895         {
896             assert(!args->is_runtime_suspended &&
897                 "if runtime was suspended in patching routines then it was in running state at begining");
898             ThreadSuspend::RestartEE(FALSE, TRUE);
899         }
900         return; // unlike other branches we have already done cleanup so bailing out here
901     case WriteBarrierOp::StompEphemeral:
902         // StompEphemeral requires a new ephemeral low and a new ephemeral high
903         assert(args->ephemeral_low != nullptr);
904         assert(args->ephemeral_high != nullptr);
905         g_ephemeral_low = args->ephemeral_low;
906         g_ephemeral_high = args->ephemeral_high;
907         stompWBCompleteActions |= ::StompWriteBarrierEphemeral(args->is_runtime_suspended);
908         break;
909     case WriteBarrierOp::Initialize:
910         // This operation should only be invoked once, upon initialization.
911         assert(g_card_table == nullptr);
912         assert(g_lowest_address == nullptr);
913         assert(g_highest_address == nullptr);
914         assert(args->card_table != nullptr);
915         assert(args->lowest_address != nullptr);
916         assert(args->highest_address != nullptr);
917         assert(args->ephemeral_low != nullptr);
918         assert(args->ephemeral_high != nullptr);
919         assert(args->is_runtime_suspended && "the runtime must be suspended here!");
920         assert(!args->requires_upper_bounds_check && "the ephemeral generation must be at the top of the heap!");
921
922         g_card_table = args->card_table;
923
924 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
925         assert(g_card_bundle_table == nullptr);
926         g_card_bundle_table = args->card_bundle_table;
927 #endif
928         
929         g_lowest_address = args->lowest_address;
930         g_highest_address = args->highest_address;
931         stompWBCompleteActions |= ::StompWriteBarrierResize(true, false);
932
933         // StompWriteBarrierResize does not necessarily bash g_ephemeral_low
934         // usages, so we must do so here. This is particularly true on x86,
935         // where StompWriteBarrierResize will not bash g_ephemeral_low when
936         // called with the parameters (true, false), as it is above.
937         g_ephemeral_low = args->ephemeral_low;
938         g_ephemeral_high = args->ephemeral_high;
939         stompWBCompleteActions |= ::StompWriteBarrierEphemeral(true);
940         break;
941     case WriteBarrierOp::SwitchToWriteWatch:
942 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
943         assert(args->write_watch_table != nullptr);
944         assert(args->is_runtime_suspended && "the runtime must be suspended here!");
945         g_sw_ww_table = args->write_watch_table;
946         g_sw_ww_enabled_for_gc_heap = true;
947         stompWBCompleteActions |= ::SwitchToWriteWatchBarrier(true);
948 #else
949         assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP");
950 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
951         break;
952     case WriteBarrierOp::SwitchToNonWriteWatch:
953 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
954         assert(args->is_runtime_suspended && "the runtime must be suspended here!");
955         g_sw_ww_table = 0;
956         g_sw_ww_enabled_for_gc_heap = false;
957         stompWBCompleteActions |= ::SwitchToNonWriteWatchBarrier(true);
958 #else
959         assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP");
960 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
961         break;
962     default:
963         assert(!"unknown WriteBarrierOp enum");
964     }
965     if (stompWBCompleteActions & SWB_ICACHE_FLUSH) 
966     {
967         ::FlushWriteBarrierInstructionCache();
968     }
969     if (stompWBCompleteActions & SWB_EE_RESTART) 
970     {
971         assert(!args->is_runtime_suspended && 
972             "if runtime was suspended in patching routines then it was in running state at begining");
973         ThreadSuspend::RestartEE(FALSE, TRUE);
974     }
975 }
976
977 void GCToEEInterface::EnableFinalization(bool foundFinalizers)
978 {
979     if (foundFinalizers || FinalizerThread::HaveExtraWorkForFinalizer())
980     {
981         FinalizerThread::EnableFinalization();
982     }
983 }
984
985 void GCToEEInterface::HandleFatalError(unsigned int exitCode)
986 {
987     EEPOLICY_HANDLE_FATAL_ERROR(exitCode);
988 }
989
990 bool GCToEEInterface::ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj)
991 {
992     // CoreCLR does not have appdomains, so this code path is dead. Other runtimes may
993     // choose to inspect the object being finalized here.
994     // [DESKTOP TODO] Desktop looks for "agile and finalizable" objects and may choose
995     // to move them to a new app domain instead of finalizing them here.
996     return true;
997 }
998
999 bool GCToEEInterface::ForceFullGCToBeBlocking()
1000 {
1001     // In theory, there is nothing fundamental that requires an AppDomain unload to induce
1002     // a blocking GC. In the past, this workaround was done to fix an Stress AV, but the root
1003     // cause of the AV was never discovered and this workaround remains in place.
1004     //
1005     // It would be nice if this were not necessary. However, it's not clear if the aformentioned
1006     // stress bug is still lurking and will return if this workaround is removed. We should
1007     // do some experiments: remove this workaround and see if the stress bug still repros.
1008     // If so, we should find the root cause instead of relying on this.
1009     return !!SystemDomain::System()->RequireAppDomainCleanup();
1010 }
1011
1012 bool GCToEEInterface::EagerFinalized(Object* obj)
1013 {
1014     MethodTable* pMT = obj->GetGCSafeMethodTable();
1015     if (pMT == pWeakReferenceMT ||
1016         pMT->GetCanonicalMethodTable() == pWeakReferenceOfTCanonMT)
1017     {
1018         FinalizeWeakReference(obj);
1019         return true;
1020     }
1021
1022     return false;
1023 }
1024
1025 MethodTable* GCToEEInterface::GetFreeObjectMethodTable()
1026 {
1027     assert(g_pFreeObjectMethodTable != nullptr);
1028     return g_pFreeObjectMethodTable;
1029 }
1030
1031 // These are arbitrary, we shouldn't ever be having confrig keys or values
1032 // longer than these lengths.
1033 const size_t MaxConfigKeyLength = 255;
1034 const size_t MaxConfigValueLength = 255;
1035
1036 bool GCToEEInterface::GetBooleanConfigValue(const char* key, bool* value)
1037 {
1038     CONTRACTL {
1039         NOTHROW;
1040         GC_NOTRIGGER;
1041     } CONTRACTL_END;
1042
1043     // these configuration values are given to us via startup flags.
1044     if (strcmp(key, "gcServer") == 0)
1045     {
1046         *value = g_heap_type == GC_HEAP_SVR;
1047         return true;
1048     }
1049
1050     if (strcmp(key, "gcConcurrent") == 0)
1051     {
1052         *value = !!g_pConfig->GetGCconcurrent();
1053         return true;
1054     }
1055
1056     if (strcmp(key, "GCRetainVM") == 0)
1057     {
1058         *value = !!g_pConfig->GetGCRetainVM();
1059         return true;
1060     }
1061
1062     WCHAR configKey[MaxConfigKeyLength];
1063     if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1064     {
1065         // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1066         return false;
1067     }
1068
1069     // otherwise, ask the config subsystem.
1070     if (CLRConfig::IsConfigOptionSpecified(configKey))
1071     {
1072         CLRConfig::ConfigDWORDInfo info { configKey , 0, CLRConfig::EEConfig_default };
1073         *value = CLRConfig::GetConfigValue(info) != 0;
1074         return true;
1075     }
1076
1077     return false;
1078 }
1079
1080 bool GCToEEInterface::GetIntConfigValue(const char* key, int64_t* value)
1081 {
1082     CONTRACTL {
1083       NOTHROW;
1084       GC_NOTRIGGER;
1085     } CONTRACTL_END;
1086
1087     WCHAR configKey[MaxConfigKeyLength];
1088     if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1089     {
1090         // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1091         return false;
1092     }
1093
1094     if (CLRConfig::IsConfigOptionSpecified(configKey))
1095     {
1096         CLRConfig::ConfigDWORDInfo info { configKey , 0, CLRConfig::EEConfig_default };
1097         *value = CLRConfig::GetConfigValue(info);
1098         return true;
1099     }
1100
1101     return false;
1102 }
1103
1104 bool GCToEEInterface::GetStringConfigValue(const char* key, const char** value)
1105 {
1106     CONTRACTL {
1107       NOTHROW;
1108       GC_NOTRIGGER;
1109     } CONTRACTL_END;
1110
1111     WCHAR configKey[MaxConfigKeyLength];
1112     if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1113     {
1114         // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1115         return false;
1116     }
1117
1118     CLRConfig::ConfigStringInfo info { configKey, CLRConfig::EEConfig_default };
1119     LPWSTR out = CLRConfig::GetConfigValue(info);
1120     if (!out)
1121     {
1122         // config not found
1123         return false;
1124     }
1125
1126     // not allocated on the stack since it escapes this function
1127     AStringHolder configResult = new (nothrow) char[MaxConfigValueLength];
1128     if (!configResult)
1129     {
1130         CLRConfig::FreeConfigString(out);
1131         return false;
1132     }
1133
1134     if (WideCharToMultiByte(CP_ACP, 0, out, -1 /* out is null-terminated */, 
1135           configResult.GetValue(), MaxConfigKeyLength, nullptr, nullptr) == 0)
1136     {
1137         // this should only happen if the config subsystem gives us a string that's not valid
1138         // unicode.
1139         CLRConfig::FreeConfigString(out);
1140         return false;
1141     }
1142
1143     *value = configResult.Extract();
1144     CLRConfig::FreeConfigString(out);
1145     return true;
1146 }
1147
1148 void GCToEEInterface::FreeStringConfigValue(const char* value)
1149 {
1150     delete [] value;
1151 }
1152
1153 bool GCToEEInterface::IsGCThread()
1154 {
1155     return !!::IsGCThread();
1156 }
1157
1158 bool GCToEEInterface::WasCurrentThreadCreatedByGC()
1159 {
1160     return !!::IsGCSpecialThread();
1161 }
1162
1163 struct SuspendableThreadStubArguments
1164 {
1165     void* Argument;
1166     void (*ThreadStart)(void*);
1167     Thread* Thread;
1168     bool HasStarted;
1169     CLREvent ThreadStartedEvent;
1170 };
1171
1172 struct ThreadStubArguments
1173 {
1174     void* Argument;
1175     void (*ThreadStart)(void*);
1176     HANDLE Thread;
1177     bool HasStarted;
1178     CLREvent ThreadStartedEvent;
1179 };
1180
1181 namespace
1182 {
1183     const size_t MaxThreadNameSize = 255;
1184
1185     bool CreateSuspendableThread(
1186         void (*threadStart)(void*),
1187         void* argument,
1188         const char* name)
1189     {
1190         LIMITED_METHOD_CONTRACT;
1191
1192         SuspendableThreadStubArguments args;
1193         args.Argument = argument;
1194         args.ThreadStart = threadStart;
1195         args.Thread = nullptr;
1196         args.HasStarted = false;
1197         if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE))
1198         {
1199             return false;
1200         }
1201
1202         EX_TRY
1203         {
1204             args.Thread = SetupUnstartedThread(FALSE);
1205         }
1206         EX_CATCH
1207         {
1208         }
1209         EX_END_CATCH(SwallowAllExceptions)
1210
1211         if (!args.Thread)
1212         {
1213             args.ThreadStartedEvent.CloseEvent();
1214             return false;
1215         }
1216
1217         auto threadStub = [](void* argument) -> DWORD
1218         {
1219             SuspendableThreadStubArguments* args = static_cast<SuspendableThreadStubArguments*>(argument);
1220             assert(args != nullptr);
1221
1222             ClrFlsSetThreadType(ThreadType_GC);
1223             args->Thread->SetGCSpecial(true);
1224             STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY);
1225             args->HasStarted = !!args->Thread->HasStarted(false);
1226
1227             Thread* thread = args->Thread;
1228             auto threadStart = args->ThreadStart;
1229             void* threadArgument = args->Argument;
1230             bool hasStarted = args->HasStarted;
1231             args->ThreadStartedEvent.Set();
1232
1233             // The stubArgs cannot be used once the event is set, since that releases wait on the
1234             // event in the function that created this thread and the stubArgs go out of scope.
1235             if (hasStarted)
1236             {
1237                 threadStart(threadArgument);
1238                 DestroyThread(thread);
1239             }
1240
1241             return 0;
1242         };
1243
1244         InlineSString<MaxThreadNameSize> wideName;
1245         const WCHAR* namePtr = nullptr;
1246         EX_TRY
1247         {
1248             if (name != nullptr)
1249             {
1250                 wideName.SetUTF8(name);
1251                 namePtr = wideName.GetUnicode();
1252             }
1253         }
1254         EX_CATCH
1255         {
1256             // we're not obligated to provide a name - if it's not valid,
1257             // just report nullptr as the name.
1258         }
1259         EX_END_CATCH(SwallowAllExceptions)
1260
1261         if (!args.Thread->CreateNewThread(0, threadStub, &args, namePtr))
1262         {
1263             args.Thread->DecExternalCount(FALSE);
1264             args.ThreadStartedEvent.CloseEvent();
1265             return false;
1266         }
1267
1268         args.Thread->SetBackground(TRUE, FALSE);
1269         args.Thread->StartThread();
1270
1271         // Wait for the thread to be in its main loop
1272         uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE);
1273         args.ThreadStartedEvent.CloseEvent();
1274         _ASSERTE(res == WAIT_OBJECT_0);
1275
1276         if (!args.HasStarted)
1277         {
1278             // The thread has failed to start and the Thread object was destroyed in the Thread::HasStarted
1279             // failure code path.
1280             return false;
1281         }
1282
1283         return true;
1284     }
1285
1286     bool CreateNonSuspendableThread(
1287         void (*threadStart)(void*),
1288         void* argument,
1289         const char* name)
1290     {
1291         LIMITED_METHOD_CONTRACT;
1292
1293         ThreadStubArguments args;
1294         args.Argument = argument;
1295         args.ThreadStart = threadStart;
1296         args.Thread = INVALID_HANDLE_VALUE;
1297         if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE))
1298         {
1299             return false;
1300         }
1301
1302         auto threadStub = [](void* argument) -> DWORD
1303         {
1304             ThreadStubArguments* args = static_cast<ThreadStubArguments*>(argument);
1305             assert(args != nullptr);
1306
1307             ClrFlsSetThreadType(ThreadType_GC);
1308             STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY);
1309
1310             args->HasStarted = true;
1311             auto threadStart = args->ThreadStart;
1312             void* threadArgument = args->Argument;
1313             args->ThreadStartedEvent.Set();
1314
1315             // The stub args cannot be used once the event is set, since that releases wait on the
1316             // event in the function that created this thread and the stubArgs go out of scope.
1317             threadStart(threadArgument);
1318             return 0;
1319         };
1320
1321         args.Thread = Thread::CreateUtilityThread(Thread::StackSize_Medium, threadStub, &args);
1322         if (args.Thread == INVALID_HANDLE_VALUE)
1323         {
1324             args.ThreadStartedEvent.CloseEvent();
1325             return false;
1326         }
1327
1328         // Wait for the thread to be in its main loop
1329         uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE);
1330         args.ThreadStartedEvent.CloseEvent();
1331         _ASSERTE(res == WAIT_OBJECT_0);
1332
1333         CloseHandle(args.Thread);
1334         return true;
1335     }
1336 } // anonymous namespace
1337
1338 bool GCToEEInterface::CreateThread(void (*threadStart)(void*), void* arg, bool is_suspendable, const char* name)
1339 {
1340     LIMITED_METHOD_CONTRACT;
1341     if (is_suspendable)
1342     {
1343         return CreateSuspendableThread(threadStart, arg, name);
1344     }
1345     else
1346     {
1347         return CreateNonSuspendableThread(threadStart, arg, name);
1348     }
1349 }
1350
1351 void GCToEEInterface::WalkAsyncPinnedForPromotion(Object* object, ScanContext* sc, promote_func* callback)
1352 {
1353     LIMITED_METHOD_CONTRACT;
1354
1355     assert(object != nullptr);
1356     assert(sc != nullptr);
1357     assert(callback != nullptr);
1358     if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass)
1359     {
1360         // not an overlapped data object - nothing to do.
1361         return;
1362     }
1363
1364     // reporting the pinned user objects
1365     OverlappedDataObject *pOverlapped = (OverlappedDataObject *)object;
1366     if (pOverlapped->m_userObject != NULL)
1367     {
1368         //callback(OBJECTREF_TO_UNCHECKED_OBJECTREF(pOverlapped->m_userObject), (ScanContext *)lp1, GC_CALL_PINNED);
1369         if (pOverlapped->m_isArray)
1370         {
1371             // OverlappedDataObject is very special.  An async pin handle keeps it alive.
1372             // During GC, we also make sure
1373             // 1. m_userObject itself does not move if m_userObject is not array
1374             // 2. Every object pointed by m_userObject does not move if m_userObject is array
1375             // We do not want to pin m_userObject if it is array.  But m_userObject may be updated
1376             // during relocation phase before OverlappedDataObject is doing relocation.
1377             // m_userObjectInternal is used to track the location of the m_userObject before it is updated.
1378             pOverlapped->m_userObjectInternal = static_cast<void*>(OBJECTREFToObject(pOverlapped->m_userObject));
1379             ArrayBase* pUserObject = (ArrayBase*)OBJECTREFToObject(pOverlapped->m_userObject);
1380             Object **ppObj = (Object**)pUserObject->GetDataPtr(TRUE);
1381             size_t num = pUserObject->GetNumComponents();
1382             for (size_t i = 0; i < num; i++)
1383             {
1384                 callback(ppObj + i, sc, GC_CALL_PINNED);
1385             }
1386         }
1387         else
1388         {
1389             callback(&OBJECTREF_TO_UNCHECKED_OBJECTREF(pOverlapped->m_userObject), (ScanContext *)sc, GC_CALL_PINNED);
1390         }
1391     }
1392
1393     if (pOverlapped->GetAppDomainId() != DefaultADID && pOverlapped->GetAppDomainIndex().m_dwIndex == DefaultADID)
1394     {
1395         OverlappedDataObject::MarkCleanupNeededFromGC();
1396     }
1397 }
1398
1399 void GCToEEInterface::WalkAsyncPinned(Object* object, void* context, void (*callback)(Object*, Object*, void*))
1400 {
1401     LIMITED_METHOD_CONTRACT;
1402
1403     assert(object != nullptr);
1404     assert(callback != nullptr);
1405
1406     if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass)
1407     {
1408         return;
1409     }
1410
1411     OverlappedDataObject *pOverlapped = (OverlappedDataObject *)(object);
1412     if (pOverlapped->m_userObject != NULL)
1413     {
1414         Object * pUserObject = OBJECTREFToObject(pOverlapped->m_userObject);
1415         callback(object, pUserObject, context);
1416         if (pOverlapped->m_isArray)
1417         {
1418             ArrayBase* pUserArrayObject = (ArrayBase*)pUserObject;
1419             Object **pObj = (Object**)pUserArrayObject->GetDataPtr(TRUE);
1420             size_t num = pUserArrayObject->GetNumComponents();
1421             for (size_t i = 0; i < num; i ++)
1422             {
1423                 callback(pUserObject, pObj[i], context);
1424             }
1425         }
1426     }
1427 }
1428
1429 IGCToCLREventSink* GCToEEInterface::EventSink()
1430 {
1431     LIMITED_METHOD_CONTRACT;
1432
1433     return &g_gcToClrEventSink;
1434 }