590cc1022e435331c467e312948758d17c893201
[platform/upstream/coreclr.git] / src / vm / gcenv.ee.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*
6  * GCENV.EE.CPP 
7  *
8  * GCToEEInterface implementation
9  *
10
11  *
12  */
13
14 void GCToEEInterface::SuspendEE(SUSPEND_REASON reason)
15 {
16     WRAPPER_NO_CONTRACT;
17
18     static_assert_no_msg(SUSPEND_FOR_GC == (int)ThreadSuspend::SUSPEND_FOR_GC);
19     static_assert_no_msg(SUSPEND_FOR_GC_PREP == (int)ThreadSuspend::SUSPEND_FOR_GC_PREP);
20
21     _ASSERTE(reason == SUSPEND_FOR_GC || reason == SUSPEND_FOR_GC_PREP);
22
23     ThreadSuspend::SuspendEE((ThreadSuspend::SUSPEND_REASON)reason);
24 }
25
26 void GCToEEInterface::RestartEE(bool bFinishedGC)
27 {
28     WRAPPER_NO_CONTRACT;
29
30     ThreadSuspend::RestartEE(bFinishedGC, TRUE);
31 }
32
33 VOID GCToEEInterface::SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2)
34 {
35     CONTRACTL
36     {
37         NOTHROW;
38         GC_NOTRIGGER;
39     }
40     CONTRACTL_END;
41
42     SyncBlockCache::GetSyncBlockCache()->GCWeakPtrScan(scanProc, lp1, lp2);
43 }
44
45
46 //EE can perform post stack scanning action, while the 
47 // user threads are still suspended 
48 VOID GCToEEInterface::AfterGcScanRoots (int condemned, int max_gen,
49                                    ScanContext* sc)
50 {
51     CONTRACTL
52     {
53         NOTHROW;
54         GC_NOTRIGGER;
55     }
56     CONTRACTL_END;
57
58 #ifdef FEATURE_COMINTEROP
59     // Go through all the app domains and for each one detach all the *unmarked* RCWs to prevent
60     // the RCW cache from resurrecting them.
61     UnsafeAppDomainIterator i(TRUE);
62     i.Init();
63
64     while (i.Next())
65     {
66         i.GetDomain()->DetachRCWs();
67     }
68 #endif // FEATURE_COMINTEROP
69 }
70
71 /*
72  * Scan all stack roots
73  */
74  
75 static void ScanStackRoots(Thread * pThread, promote_func* fn, ScanContext* sc)
76 {
77     GCCONTEXT   gcctx;
78
79     gcctx.f  = fn;
80     gcctx.sc = sc;
81     gcctx.cf = NULL;
82
83     ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
84
85     // Either we are in a concurrent situation (in which case the thread is unknown to
86     // us), or we are performing a synchronous GC and we are the GC thread, holding
87     // the threadstore lock.
88
89     _ASSERTE(dbgOnly_IsSpecialEEThread() ||
90                 GetThread() == NULL ||
91                 // this is for background GC threads which always call this when EE is suspended.
92                 IsGCSpecialThread() || 
93                 (GetThread() == ThreadSuspend::GetSuspensionThread() && ThreadStore::HoldingThreadStore()));
94
95     pThread->SetHasPromotedBytes();
96
97     Frame* pTopFrame = pThread->GetFrame();
98     Object ** topStack = (Object **)pTopFrame;
99     if ((pTopFrame != ((Frame*)-1)) 
100         && (pTopFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr())) {
101         // It is an InlinedCallFrame. Get SP from it.
102         InlinedCallFrame* pInlinedFrame = (InlinedCallFrame*)pTopFrame;
103         topStack = (Object **)pInlinedFrame->GetCallSiteSP();
104     } 
105
106     sc->stack_limit = (uintptr_t)topStack;
107
108 #ifdef FEATURE_CONSERVATIVE_GC
109     if (g_pConfig->GetGCConservative())
110     {
111         // Conservative stack root reporting
112         // We will treat everything on stack as a pinned interior GC pointer
113         // Since we report every thing as pinned, we don't need to run following code for relocation phase.
114         if (sc->promotion)
115         {
116             Object ** bottomStack = (Object **) pThread->GetCachedStackBase();
117             Object ** walk;
118             for (walk = topStack; walk < bottomStack; walk ++)
119             {
120                 if (((void*)*walk > (void*)bottomStack || (void*)*walk < (void*)topStack) &&
121                     ((void*)*walk >= (void*)g_lowest_address && (void*)*walk <= (void*)g_highest_address)
122                     )
123                 {
124                     //DbgPrintf("promote " FMT_ADDR " : " FMT_ADDR "\n", walk, *walk);
125                     fn(walk, sc, GC_CALL_INTERIOR|GC_CALL_PINNED);
126                 }
127             }
128         }
129
130         // Also ask the explicit Frames to report any references they might know about.
131         // Generally these will be a subset of the objects reported below but there's
132         // nothing that guarantees that and in the specific case of a GC protect frame the
133         // references it protects may live at a lower address than the frame itself (and
134         // thus escape the stack range we scanned above).
135         Frame *pFrame = pThread->GetFrame();
136         while (pFrame != FRAME_TOP)
137         {
138             pFrame->GcScanRoots(fn, sc);
139             pFrame = pFrame->PtrNextFrame();
140         }
141     }
142     else
143 #endif
144     {    
145         unsigned flagsStackWalk = ALLOW_ASYNC_STACK_WALK | ALLOW_INVALID_OBJECTS;
146 #if defined(WIN64EXCEPTIONS)            
147         flagsStackWalk |= GC_FUNCLET_REFERENCE_REPORTING;
148 #endif // defined(WIN64EXCEPTIONS)                        
149         pThread->StackWalkFrames( GcStackCrawlCallBack, &gcctx, flagsStackWalk);
150     }
151 }
152
153 void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
154 {
155     STRESS_LOG1(LF_GCROOTS, LL_INFO10, "GCScan: Promotion Phase = %d\n", sc->promotion);
156
157     // In server GC, we should be competing for marking the statics
158     if (GCHeapUtilities::MarkShouldCompeteForStatics())
159     {
160         if (condemned == max_gen && sc->promotion)
161         {
162             SystemDomain::EnumAllStaticGCRefs(fn, sc);
163         }
164     }
165
166     Thread* pThread = NULL;
167     while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
168     {
169         STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "{ Starting scan of Thread %p ID = %x\n", pThread, pThread->GetThreadId());
170
171         if (GCHeapUtilities::GetGCHeap()->IsThreadUsingAllocationContextHeap(
172             pThread->GetAllocContext(), sc->thread_number))
173         {
174             sc->thread_under_crawl = pThread;
175 #ifdef FEATURE_EVENT_TRACE
176             sc->dwEtwRootKind = kEtwGCRootKindStack;
177 #endif // FEATURE_EVENT_TRACE
178             ScanStackRoots(pThread, fn, sc);
179 #ifdef FEATURE_EVENT_TRACE
180             sc->dwEtwRootKind = kEtwGCRootKindOther;
181 #endif // FEATURE_EVENT_TRACE
182         }
183         STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "Ending scan of Thread %p ID = 0x%x }\n", pThread, pThread->GetThreadId());
184     }
185 }
186
187 void GCToEEInterface::GcStartWork (int condemned, int max_gen)
188 {
189     CONTRACTL
190     {
191         NOTHROW;
192         GC_NOTRIGGER;
193     }
194     CONTRACTL_END;
195
196     // Update AppDomain stage here.
197     SystemDomain::System()->ProcessClearingDomains();
198
199 #ifdef VERIFY_HEAP
200     // Validate byrefs pinned by IL stubs since the last GC.
201     StubHelpers::ProcessByrefValidationList();
202 #endif // VERIFY_HEAP
203
204     ExecutionManager::CleanupCodeHeaps();
205
206 #ifdef FEATURE_EVENT_TRACE
207     ETW::TypeSystemLog::Cleanup();
208 #endif
209
210 #ifdef FEATURE_COMINTEROP
211     //
212     // Let GC detect managed/native cycles with input from jupiter
213     // Jupiter will
214     // 1. Report reference from RCW to CCW based on native reference in Jupiter
215     // 2. Identify the subset of CCWs that needs to be rooted
216     // 
217     // We'll build the references from RCW to CCW using
218     // 1. Preallocated arrays
219     // 2. Dependent handles
220     // 
221     RCWWalker::OnGCStarted(condemned);
222 #endif // FEATURE_COMINTEROP
223
224     if (condemned == max_gen)
225     {
226         ThreadStore::s_pThreadStore->OnMaxGenerationGCStarted();
227     }
228 }
229
230 void GCToEEInterface::GcDone(int condemned)
231 {
232     CONTRACTL
233     {
234         NOTHROW;
235         GC_NOTRIGGER;
236     }
237     CONTRACTL_END;
238
239 #ifdef FEATURE_COMINTEROP
240     //
241     // Tell Jupiter GC has finished
242     // 
243     RCWWalker::OnGCFinished(condemned);
244 #endif // FEATURE_COMINTEROP
245 }
246
247 bool GCToEEInterface::RefCountedHandleCallbacks(Object * pObject)
248 {
249     CONTRACTL
250     {
251         NOTHROW;
252         GC_NOTRIGGER;
253     }
254     CONTRACTL_END;
255
256 #ifdef FEATURE_COMINTEROP
257     //<REVISIT_TODO>@todo optimize the access to the ref-count
258     ComCallWrapper* pWrap = ComCallWrapper::GetWrapperForObject((OBJECTREF)pObject);
259     _ASSERTE(pWrap != NULL);
260
261     return !!pWrap->IsWrapperActive();
262 #else
263     return false;
264 #endif
265 }
266
267 void GCToEEInterface::GcBeforeBGCSweepWork()
268 {
269     CONTRACTL
270     {
271         NOTHROW;
272         GC_NOTRIGGER;
273     }
274     CONTRACTL_END;
275
276 #ifdef VERIFY_HEAP
277     // Validate byrefs pinned by IL stubs since the last GC.
278     StubHelpers::ProcessByrefValidationList();
279 #endif // VERIFY_HEAP
280 }
281
282 void GCToEEInterface::SyncBlockCacheDemote(int max_gen)
283 {
284     CONTRACTL
285     {
286         NOTHROW;
287         GC_NOTRIGGER;
288     }
289     CONTRACTL_END;
290
291     SyncBlockCache::GetSyncBlockCache()->GCDone(TRUE, max_gen);
292 }
293
294 void GCToEEInterface::SyncBlockCachePromotionsGranted(int max_gen)
295 {
296     CONTRACTL
297     {
298         NOTHROW;
299         GC_NOTRIGGER;
300     }
301     CONTRACTL_END;
302
303     SyncBlockCache::GetSyncBlockCache()->GCDone(FALSE, max_gen);
304 }
305
306 uint32_t GCToEEInterface::GetActiveSyncBlockCount()
307 {
308     CONTRACTL
309     {
310         NOTHROW;
311         GC_NOTRIGGER;
312     }
313     CONTRACTL_END;
314
315     return SyncBlockCache::GetSyncBlockCache()->GetActiveCount();   
316 }
317
318 gc_alloc_context * GCToEEInterface::GetAllocContext()
319 {
320     WRAPPER_NO_CONTRACT;
321     
322     Thread* pThread = ::GetThread();
323     assert(pThread != nullptr);
324     return pThread->GetAllocContext();
325 }
326
327 void GCToEEInterface::GcEnumAllocContexts(enum_alloc_context_func* fn, void* param)
328 {
329     CONTRACTL
330     {
331         NOTHROW;
332         GC_NOTRIGGER;
333     }
334     CONTRACTL_END;
335
336     if (GCHeapUtilities::UseThreadAllocationContexts())
337     {
338         Thread * pThread = NULL;
339         while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
340         {
341             fn(pThread->GetAllocContext(), param);
342         }
343     }
344     else
345     {
346         fn(&g_global_alloc_context, param);
347     }
348 }
349
350 bool GCToEEInterface::IsPreemptiveGCDisabled()
351 {
352     WRAPPER_NO_CONTRACT;
353
354     Thread* pThread = ::GetThread();
355     if (pThread)
356     {
357         return !!pThread->PreemptiveGCDisabled();
358     }
359
360     return false;
361 }
362
363 bool GCToEEInterface::EnablePreemptiveGC()
364 {
365     WRAPPER_NO_CONTRACT;
366
367     bool bToggleGC = false;
368     Thread* pThread = ::GetThread();
369
370     if (pThread)
371     {
372         bToggleGC = !!pThread->PreemptiveGCDisabled();
373         if (bToggleGC)
374         {
375             pThread->EnablePreemptiveGC();
376         }
377     }
378
379     return bToggleGC;
380 }
381
382 void GCToEEInterface::DisablePreemptiveGC()
383 {
384     WRAPPER_NO_CONTRACT;
385
386     Thread* pThread = ::GetThread();
387     if (pThread)
388     {
389         pThread->DisablePreemptiveGC();
390     }
391 }
392
393 Thread* GCToEEInterface::GetThread()
394 {
395     WRAPPER_NO_CONTRACT;
396
397     return ::GetThread();
398 }
399
400 struct BackgroundThreadStubArgs
401 {
402     Thread* thread;
403     GCBackgroundThreadFunction threadStart;
404     void* arg;
405     CLREvent threadStartedEvent;
406     bool hasStarted;
407 };
408
409 DWORD WINAPI BackgroundThreadStub(void* arg)
410 {
411     BackgroundThreadStubArgs* stubArgs = (BackgroundThreadStubArgs*)arg;
412     assert (stubArgs->thread != NULL);
413
414     ClrFlsSetThreadType (ThreadType_GC);
415     stubArgs->thread->SetGCSpecial(true);
416     STRESS_LOG_RESERVE_MEM (GC_STRESSLOG_MULTIPLY);
417
418     stubArgs->hasStarted = !!stubArgs->thread->HasStarted(FALSE);
419
420     Thread* thread = stubArgs->thread;
421     GCBackgroundThreadFunction realThreadStart = stubArgs->threadStart;
422     void* realThreadArg = stubArgs->arg;
423     bool hasStarted = stubArgs->hasStarted;
424
425     stubArgs->threadStartedEvent.Set();
426     // The stubArgs cannot be used once the event is set, since that releases wait on the
427     // event in the function that created this thread and the stubArgs go out of scope.
428
429     DWORD result = 0;
430
431     if (hasStarted)
432     {
433         result = realThreadStart(realThreadArg);
434         DestroyThread(thread);
435     }
436
437     return result;
438 }
439
440 //
441 // Diagnostics code
442 //
443
444 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
445 inline BOOL ShouldTrackMovementForProfilerOrEtw()
446 {
447 #ifdef GC_PROFILING
448     if (CORProfilerTrackGC())
449         return true;
450 #endif
451
452 #ifdef FEATURE_EVENT_TRACE
453     if (ETW::GCLog::ShouldTrackMovementForEtw())
454         return true;
455 #endif
456
457     return false;
458 }
459 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
460
461 void ProfScanRootsHelper(Object** ppObject, ScanContext *pSC, uint32_t dwFlags)
462 {
463 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
464     Object *pObj = *ppObject;
465     if (dwFlags & GC_CALL_INTERIOR)
466     {
467         pObj = GCHeapUtilities::GetGCHeap()->GetContainingObject(pObj, true);
468         if (pObj == nullptr)
469             return;
470     }
471     ScanRootsHelper(pObj, ppObject, pSC, dwFlags);
472 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
473 }
474
475 // TODO - at some point we would like to completely decouple profiling
476 // from ETW tracing using a pattern similar to this, where the
477 // ProfilingScanContext has flags about whether or not certain things
478 // should be tracked, and each one of these ProfilerShouldXYZ functions
479 // will check these flags and determine what to do based upon that.
480 // GCProfileWalkHeapWorker can, in turn, call those methods without fear
481 // of things being ifdef'd out.
482
483 // Returns TRUE if GC profiling is enabled and the profiler
484 // should scan dependent handles, FALSE otherwise.
485 BOOL ProfilerShouldTrackConditionalWeakTableElements() 
486 {
487 #if defined(GC_PROFILING)
488     return CORProfilerTrackConditionalWeakTableElements();
489 #else
490     return FALSE;
491 #endif // defined (GC_PROFILING)
492 }
493
494 // If GC profiling is enabled, informs the profiler that we are done
495 // tracing dependent handles.
496 void ProfilerEndConditionalWeakTableElementReferences(void* heapId)
497 {
498 #if defined (GC_PROFILING)
499     g_profControlBlock.pProfInterface->EndConditionalWeakTableElementReferences(heapId);
500 #else
501     UNREFERENCED_PARAMETER(heapId);
502 #endif // defined (GC_PROFILING)
503 }
504
505 // If GC profiling is enabled, informs the profiler that we are done
506 // tracing root references.
507 void ProfilerEndRootReferences2(void* heapId) 
508 {
509 #if defined (GC_PROFILING)
510     g_profControlBlock.pProfInterface->EndRootReferences2(heapId);
511 #else
512     UNREFERENCED_PARAMETER(heapId);
513 #endif // defined (GC_PROFILING)
514 }
515
516 void GcScanRootsForProfilerAndETW(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
517 {
518     Thread* pThread = NULL;
519     while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
520     {
521         sc->thread_under_crawl = pThread;
522 #ifdef FEATURE_EVENT_TRACE
523         sc->dwEtwRootKind = kEtwGCRootKindStack;
524 #endif // FEATURE_EVENT_TRACE
525         ScanStackRoots(pThread, fn, sc);
526 #ifdef FEATURE_EVENT_TRACE
527         sc->dwEtwRootKind = kEtwGCRootKindOther;
528 #endif // FEATURE_EVENT_TRACE
529     }
530 }
531
532 void ScanHandleForProfilerAndETW(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, bool isDependent)
533 {
534     ProfilingScanContext* pSC = (ProfilingScanContext*)context;
535
536 #ifdef GC_PROFILING
537     // Give the profiler the objectref.
538     if (pSC->fProfilerPinned)
539     {
540         if (!isDependent)
541         {
542             BEGIN_PIN_PROFILER(CORProfilerTrackGC());
543             g_profControlBlock.pProfInterface->RootReference2(
544                 (uint8_t *)*pRef,
545                 kEtwGCRootKindHandle,
546                 (EtwGCRootFlags)flags,
547                 pRef, 
548                 &pSC->pHeapId);
549             END_PIN_PROFILER();
550         }
551         else
552         {
553             BEGIN_PIN_PROFILER(CORProfilerTrackConditionalWeakTableElements());
554             g_profControlBlock.pProfInterface->ConditionalWeakTableElementReference(
555                 (uint8_t*)*pRef,
556                 (uint8_t*)pSec,
557                 pRef,
558                 &pSC->pHeapId);
559             END_PIN_PROFILER();
560         }
561     }
562 #endif // GC_PROFILING
563
564 #if defined(FEATURE_EVENT_TRACE)
565     // Notify ETW of the handle
566     if (ETW::GCLog::ShouldWalkHeapRootsForEtw())
567     {
568         ETW::GCLog::RootReference(
569             pRef,
570             *pRef,          // object being rooted
571             pSec,           // pSecondaryNodeForDependentHandle
572             isDependent,
573             pSC,
574             0,              // dwGCFlags,
575             flags);     // ETW handle flags
576     }
577 #endif // defined(FEATURE_EVENT_TRACE) 
578 }
579
580 // This is called only if we've determined that either:
581 //     a) The Profiling API wants to do a walk of the heap, and it has pinned the
582 //     profiler in place (so it cannot be detached), and it's thus safe to call into the
583 //     profiler, OR
584 //     b) ETW infrastructure wants to do a walk of the heap either to log roots,
585 //     objects, or both.
586 // This can also be called to do a single walk for BOTH a) and b) simultaneously.  Since
587 // ETW can ask for roots, but not objects
588 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
589 void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw)
590 {
591     {
592         ProfilingScanContext SC(fProfilerPinned);
593         unsigned max_generation = GCHeapUtilities::GetGCHeap()->GetMaxGeneration();
594
595         // **** Scan roots:  Only scan roots if profiling API wants them or ETW wants them.
596         if (fProfilerPinned || fShouldWalkHeapRootsForEtw)
597         {
598             GcScanRootsForProfilerAndETW(&ProfScanRootsHelper, max_generation, max_generation, &SC);
599             SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
600             GCHeapUtilities::GetGCHeap()->DiagScanFinalizeQueue(&ProfScanRootsHelper, &SC);
601
602             // Handles are kept independent of wks/svr/concurrent builds
603             SC.dwEtwRootKind = kEtwGCRootKindHandle;
604             GCHeapUtilities::GetGCHeap()->DiagScanHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
605
606             // indicate that regular handle scanning is over, so we can flush the buffered roots
607             // to the profiler.  (This is for profapi only.  ETW will flush after the
608             // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
609             if (fProfilerPinned)
610             {
611                 ProfilerEndRootReferences2(&SC.pHeapId);
612             }
613         }
614
615         // **** Scan dependent handles: only if the profiler supports it or ETW wants roots
616         if ((fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements()) ||
617             fShouldWalkHeapRootsForEtw)
618         {
619             // GcScanDependentHandlesForProfiler double-checks
620             // CORProfilerTrackConditionalWeakTableElements() before calling into the profiler
621
622             ProfilingScanContext* pSC = &SC;
623
624             // we'll re-use pHeapId (which was either unused (0) or freed by EndRootReferences2
625             // (-1)), so reset it to NULL
626             _ASSERTE((*((size_t *)(&pSC->pHeapId)) == (size_t)(-1)) ||
627                     (*((size_t *)(&pSC->pHeapId)) == (size_t)(0)));
628             pSC->pHeapId = NULL;
629
630             GCHeapUtilities::GetGCHeap()->DiagScanDependentHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
631
632             // indicate that dependent handle scanning is over, so we can flush the buffered roots
633             // to the profiler.  (This is for profapi only.  ETW will flush after the
634             // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
635             if (fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements())
636             {
637                 ProfilerEndConditionalWeakTableElementReferences(&SC.pHeapId);
638             }
639         }
640
641         ProfilerWalkHeapContext profilerWalkHeapContext(fProfilerPinned, SC.pvEtwContext);
642
643         // **** Walk objects on heap: only if profiling API wants them or ETW wants them.
644         if (fProfilerPinned || fShouldWalkHeapObjectsForEtw)
645         {
646             GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, true /* walk the large object heap */);
647         }
648
649 #ifdef FEATURE_EVENT_TRACE
650         // **** Done! Indicate to ETW helpers that the heap walk is done, so any buffers
651         // should be flushed into the ETW stream
652         if (fShouldWalkHeapObjectsForEtw || fShouldWalkHeapRootsForEtw)
653         {
654             ETW::GCLog::EndHeapDump(&profilerWalkHeapContext);
655         }
656 #endif // FEATURE_EVENT_TRACE
657     }
658 }
659 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
660
661 void GCProfileWalkHeap()
662 {
663     BOOL fWalkedHeapForProfiler = FALSE;
664
665 #ifdef FEATURE_EVENT_TRACE
666     if (ETW::GCLog::ShouldWalkStaticsAndCOMForEtw())
667         ETW::GCLog::WalkStaticsAndCOMForETW();
668     
669     BOOL fShouldWalkHeapRootsForEtw = ETW::GCLog::ShouldWalkHeapRootsForEtw();
670     BOOL fShouldWalkHeapObjectsForEtw = ETW::GCLog::ShouldWalkHeapObjectsForEtw();
671 #else // !FEATURE_EVENT_TRACE
672     BOOL fShouldWalkHeapRootsForEtw = FALSE;
673     BOOL fShouldWalkHeapObjectsForEtw = FALSE;
674 #endif // FEATURE_EVENT_TRACE
675
676 #if defined (GC_PROFILING)
677     {
678         BEGIN_PIN_PROFILER(CORProfilerTrackGC());
679         GCProfileWalkHeapWorker(TRUE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
680         fWalkedHeapForProfiler = TRUE;
681         END_PIN_PROFILER();
682     }
683 #endif // defined (GC_PROFILING)
684
685 #if defined (GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
686     // we need to walk the heap if one of GC_PROFILING or FEATURE_EVENT_TRACE
687     // is defined, since both of them make use of the walk heap worker.
688     if (!fWalkedHeapForProfiler &&
689         (fShouldWalkHeapRootsForEtw || fShouldWalkHeapObjectsForEtw))
690     {
691         GCProfileWalkHeapWorker(FALSE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
692     }
693 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
694 }
695
696 void WalkFReachableObjects(bool isCritical, void* objectID)
697 {
698         g_profControlBlock.pProfInterface->FinalizeableObjectQueued(isCritical, (ObjectID)objectID);
699 }
700
701 static fq_walk_fn g_FQWalkFn = &WalkFReachableObjects;
702
703 void GCToEEInterface::DiagGCStart(int gen, bool isInduced)
704 {
705 #ifdef GC_PROFILING
706     DiagUpdateGenerationBounds();
707     GarbageCollectionStartedCallback(gen, isInduced);
708     {
709         BEGIN_PIN_PROFILER(CORProfilerTrackGC());
710         size_t context = 0;
711
712         // When we're walking objects allocated by class, then we don't want to walk the large
713         // object heap because then it would count things that may have been around for a while.
714         GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&AllocByClassHelper, (void *)&context, 0, false);
715
716         // Notify that we've reached the end of the Gen 0 scan
717         g_profControlBlock.pProfInterface->EndAllocByClass(&context);
718         END_PIN_PROFILER();
719     }
720
721 #endif // GC_PROFILING
722 }
723
724 void GCToEEInterface::DiagUpdateGenerationBounds()
725 {
726 #ifdef GC_PROFILING
727     if (CORProfilerTrackGC())
728         UpdateGenerationBounds();
729 #endif // GC_PROFILING
730 }
731
732 void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent)
733 {
734 #ifdef GC_PROFILING
735     if (!fConcurrent)
736     {
737         GCProfileWalkHeap();
738         DiagUpdateGenerationBounds();
739         GarbageCollectionFinishedCallback();
740     }
741 #endif // GC_PROFILING
742 }
743
744 void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext)
745 {
746 #ifdef GC_PROFILING
747     if (CORProfilerTrackGC())
748     {
749         BEGIN_PIN_PROFILER(CORProfilerPresent());
750         GCHeapUtilities::GetGCHeap()->DiagWalkFinalizeQueue(gcContext, g_FQWalkFn);
751         END_PIN_PROFILER();
752     }
753 #endif //GC_PROFILING
754 }
755
756 // Note on last parameter: when calling this for bgc, only ETW
757 // should be sending these events so that existing profapi profilers
758 // don't get confused.
759 void WalkMovedReferences(uint8_t* begin, uint8_t* end, 
760                          ptrdiff_t reloc,
761                          void* context, 
762                          bool fCompacting,
763                          bool fBGC)
764 {
765     ETW::GCLog::MovedReference(begin, end,
766                                (fCompacting ? reloc : 0),
767                                (size_t)context,
768                                fCompacting,
769                                !fBGC);
770 }
771
772 void GCToEEInterface::DiagWalkSurvivors(void* gcContext)
773 {
774 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
775     if (ShouldTrackMovementForProfilerOrEtw())
776     {
777         size_t context = 0;
778         ETW::GCLog::BeginMovedReferences(&context);
779         GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_gc);
780         ETW::GCLog::EndMovedReferences(context);
781     }
782 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
783 }
784
785 void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext)
786 {
787 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
788     if (ShouldTrackMovementForProfilerOrEtw())
789     {
790         size_t context = 0;
791         ETW::GCLog::BeginMovedReferences(&context);
792         GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_loh);
793         ETW::GCLog::EndMovedReferences(context);
794     }
795 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
796 }
797
798 void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext)
799 {
800 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
801     if (ShouldTrackMovementForProfilerOrEtw())
802     {
803         size_t context = 0;
804         ETW::GCLog::BeginMovedReferences(&context);
805         GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_bgc);
806         ETW::GCLog::EndMovedReferences(context);
807     }
808 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
809 }
810
811 void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args)
812 {
813     int stompWBCompleteActions = SWB_PASS;
814     bool is_runtime_suspended = false;
815
816     assert(args != nullptr);
817     switch (args->operation)
818     {
819     case WriteBarrierOp::StompResize:
820         // StompResize requires a new card table, a new lowest address, and
821         // a new highest address
822         assert(args->card_table != nullptr);
823         assert(args->lowest_address != nullptr);
824         assert(args->highest_address != nullptr);
825
826         g_card_table = args->card_table;
827
828 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
829         assert(args->card_bundle_table != nullptr);
830         g_card_bundle_table = args->card_bundle_table;
831 #endif
832
833 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
834         if (g_sw_ww_enabled_for_gc_heap && (args->write_watch_table != nullptr))
835         {
836             assert(args->is_runtime_suspended);
837             g_sw_ww_table = args->write_watch_table;
838         }
839 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
840
841         stompWBCompleteActions |= ::StompWriteBarrierResize(args->is_runtime_suspended, args->requires_upper_bounds_check);
842
843         // We need to make sure that other threads executing checked write barriers
844         // will see the g_card_table update before g_lowest/highest_address updates.
845         // Otherwise, the checked write barrier may AV accessing the old card table
846         // with address that it does not cover. 
847         //
848         // Even x86's total store ordering is insufficient here because threads reading
849         // g_card_table do so via the instruction cache, whereas g_lowest/highest_address
850         // are read via the data cache.
851         //
852         // The g_card_table update is covered by section 8.1.3 of the Intel Software
853         // Development Manual, Volume 3A (System Programming Guide, Part 1), about
854         // "cross-modifying code": We need all _executing_ threads to invalidate
855         // their instruction cache, which FlushProcessWriteBuffers achieves by sending
856         // an IPI (inter-process interrupt).
857
858         if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
859         {
860             // flushing icache on current processor (thread)
861             ::FlushWriteBarrierInstructionCache();
862             // asking other processors (threads) to invalidate their icache
863             FlushProcessWriteBuffers();
864         }
865
866         g_lowest_address = args->lowest_address;
867         VolatileStore(&g_highest_address, args->highest_address);
868
869 #if defined(_ARM64_)
870         // Need to reupdate for changes to g_highest_address g_lowest_address
871         is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended;
872         stompWBCompleteActions |= ::StompWriteBarrierResize(is_runtime_suspended, args->requires_upper_bounds_check);
873
874         is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended;
875         if(!is_runtime_suspended)
876         {
877             // If runtime is not suspended, force updated state to be visible to all threads
878             MemoryBarrier();
879         }
880 #endif
881         if (stompWBCompleteActions & SWB_EE_RESTART)
882         {
883             assert(!args->is_runtime_suspended &&
884                 "if runtime was suspended in patching routines then it was in running state at begining");
885             ThreadSuspend::RestartEE(FALSE, TRUE);
886         }
887         return; // unlike other branches we have already done cleanup so bailing out here
888     case WriteBarrierOp::StompEphemeral:
889         // StompEphemeral requires a new ephemeral low and a new ephemeral high
890         assert(args->ephemeral_low != nullptr);
891         assert(args->ephemeral_high != nullptr);
892         g_ephemeral_low = args->ephemeral_low;
893         g_ephemeral_high = args->ephemeral_high;
894         stompWBCompleteActions |= ::StompWriteBarrierEphemeral(args->is_runtime_suspended);
895         break;
896     case WriteBarrierOp::Initialize:
897         // This operation should only be invoked once, upon initialization.
898         assert(g_card_table == nullptr);
899         assert(g_lowest_address == nullptr);
900         assert(g_highest_address == nullptr);
901         assert(args->card_table != nullptr);
902         assert(args->lowest_address != nullptr);
903         assert(args->highest_address != nullptr);
904         assert(args->ephemeral_low != nullptr);
905         assert(args->ephemeral_high != nullptr);
906         assert(args->is_runtime_suspended && "the runtime must be suspended here!");
907         assert(!args->requires_upper_bounds_check && "the ephemeral generation must be at the top of the heap!");
908
909         g_card_table = args->card_table;
910
911 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
912         assert(g_card_bundle_table == nullptr);
913         g_card_bundle_table = args->card_bundle_table;
914 #endif
915         
916         g_lowest_address = args->lowest_address;
917         g_highest_address = args->highest_address;
918         stompWBCompleteActions |= ::StompWriteBarrierResize(true, false);
919
920         // StompWriteBarrierResize does not necessarily bash g_ephemeral_low
921         // usages, so we must do so here. This is particularly true on x86,
922         // where StompWriteBarrierResize will not bash g_ephemeral_low when
923         // called with the parameters (true, false), as it is above.
924         g_ephemeral_low = args->ephemeral_low;
925         g_ephemeral_high = args->ephemeral_high;
926         stompWBCompleteActions |= ::StompWriteBarrierEphemeral(true);
927         break;
928     case WriteBarrierOp::SwitchToWriteWatch:
929 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
930         assert(args->write_watch_table != nullptr);
931         assert(args->is_runtime_suspended && "the runtime must be suspended here!");
932         g_sw_ww_table = args->write_watch_table;
933         g_sw_ww_enabled_for_gc_heap = true;
934         stompWBCompleteActions |= ::SwitchToWriteWatchBarrier(true);
935 #else
936         assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP");
937 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
938         break;
939     case WriteBarrierOp::SwitchToNonWriteWatch:
940 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
941         assert(args->is_runtime_suspended && "the runtime must be suspended here!");
942         g_sw_ww_table = 0;
943         g_sw_ww_enabled_for_gc_heap = false;
944         stompWBCompleteActions |= ::SwitchToNonWriteWatchBarrier(true);
945 #else
946         assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP");
947 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
948         break;
949     default:
950         assert(!"unknown WriteBarrierOp enum");
951     }
952     if (stompWBCompleteActions & SWB_ICACHE_FLUSH) 
953     {
954         ::FlushWriteBarrierInstructionCache();
955     }
956     if (stompWBCompleteActions & SWB_EE_RESTART) 
957     {
958         assert(!args->is_runtime_suspended && 
959             "if runtime was suspended in patching routines then it was in running state at begining");
960         ThreadSuspend::RestartEE(FALSE, TRUE);
961     }
962 }
963
964 void GCToEEInterface::EnableFinalization(bool foundFinalizers)
965 {
966     if (foundFinalizers || FinalizerThread::HaveExtraWorkForFinalizer())
967     {
968         FinalizerThread::EnableFinalization();
969     }
970 }
971
972 void GCToEEInterface::HandleFatalError(unsigned int exitCode)
973 {
974     EEPOLICY_HANDLE_FATAL_ERROR(exitCode);
975 }
976
977 bool GCToEEInterface::ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj)
978 {
979     // CoreCLR does not have appdomains, so this code path is dead. Other runtimes may
980     // choose to inspect the object being finalized here.
981     // [DESKTOP TODO] Desktop looks for "agile and finalizable" objects and may choose
982     // to move them to a new app domain instead of finalizing them here.
983     return true;
984 }
985
986 bool GCToEEInterface::ForceFullGCToBeBlocking()
987 {
988     // In theory, there is nothing fundamental that requires an AppDomain unload to induce
989     // a blocking GC. In the past, this workaround was done to fix an Stress AV, but the root
990     // cause of the AV was never discovered and this workaround remains in place.
991     //
992     // It would be nice if this were not necessary. However, it's not clear if the aformentioned
993     // stress bug is still lurking and will return if this workaround is removed. We should
994     // do some experiments: remove this workaround and see if the stress bug still repros.
995     // If so, we should find the root cause instead of relying on this.
996     return !!SystemDomain::System()->RequireAppDomainCleanup();
997 }
998
999 bool GCToEEInterface::EagerFinalized(Object* obj)
1000 {
1001     MethodTable* pMT = obj->GetGCSafeMethodTable();
1002     if (pMT == pWeakReferenceMT ||
1003         pMT->GetCanonicalMethodTable() == pWeakReferenceOfTCanonMT)
1004     {
1005         FinalizeWeakReference(obj);
1006         return true;
1007     }
1008
1009     return false;
1010 }
1011
1012 MethodTable* GCToEEInterface::GetFreeObjectMethodTable()
1013 {
1014     assert(g_pFreeObjectMethodTable != nullptr);
1015     return g_pFreeObjectMethodTable;
1016 }
1017
1018 // These are arbitrary, we shouldn't ever be having confrig keys or values
1019 // longer than these lengths.
1020 const size_t MaxConfigKeyLength = 255;
1021 const size_t MaxConfigValueLength = 255;
1022
1023 bool GCToEEInterface::GetBooleanConfigValue(const char* key, bool* value)
1024 {
1025     CONTRACTL {
1026         NOTHROW;
1027         GC_NOTRIGGER;
1028     } CONTRACTL_END;
1029
1030     // these configuration values are given to us via startup flags.
1031     if (strcmp(key, "gcServer") == 0)
1032     {
1033         *value = g_heap_type == GC_HEAP_SVR;
1034         return true;
1035     }
1036
1037     if (strcmp(key, "gcConcurrent") == 0)
1038     {
1039         *value = !!g_pConfig->GetGCconcurrent();
1040         return true;
1041     }
1042
1043     if (strcmp(key, "GCRetainVM") == 0)
1044     {
1045         *value = !!g_pConfig->GetGCRetainVM();
1046         return true;
1047     }
1048
1049     WCHAR configKey[MaxConfigKeyLength];
1050     if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1051     {
1052         // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1053         return false;
1054     }
1055
1056     // otherwise, ask the config subsystem.
1057     if (CLRConfig::IsConfigOptionSpecified(configKey))
1058     {
1059         CLRConfig::ConfigDWORDInfo info { configKey , 0, CLRConfig::EEConfig_default };
1060         *value = CLRConfig::GetConfigValue(info) != 0;
1061         return true;
1062     }
1063
1064     return false;
1065 }
1066
1067 bool GCToEEInterface::GetIntConfigValue(const char* key, int64_t* value)
1068 {
1069     CONTRACTL {
1070       NOTHROW;
1071       GC_NOTRIGGER;
1072     } CONTRACTL_END;
1073
1074     WCHAR configKey[MaxConfigKeyLength];
1075     if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1076     {
1077         // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1078         return false;
1079     }
1080
1081     if (CLRConfig::IsConfigOptionSpecified(configKey))
1082     {
1083         CLRConfig::ConfigDWORDInfo info { configKey , 0, CLRConfig::EEConfig_default };
1084         *value = CLRConfig::GetConfigValue(info);
1085         return true;
1086     }
1087
1088     return false;
1089 }
1090
1091 bool GCToEEInterface::GetStringConfigValue(const char* key, const char** value)
1092 {
1093     CONTRACTL {
1094       NOTHROW;
1095       GC_NOTRIGGER;
1096     } CONTRACTL_END;
1097
1098     WCHAR configKey[MaxConfigKeyLength];
1099     if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1100     {
1101         // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1102         return false;
1103     }
1104
1105     CLRConfig::ConfigStringInfo info { configKey, CLRConfig::EEConfig_default };
1106     LPWSTR out = CLRConfig::GetConfigValue(info);
1107     if (!out)
1108     {
1109         // config not found
1110         return false;
1111     }
1112
1113     // not allocated on the stack since it escapes this function
1114     AStringHolder configResult = new (nothrow) char[MaxConfigValueLength];
1115     if (!configResult)
1116     {
1117         CLRConfig::FreeConfigString(out);
1118         return false;
1119     }
1120
1121     if (WideCharToMultiByte(CP_ACP, 0, out, -1 /* out is null-terminated */, 
1122           configResult.GetValue(), MaxConfigKeyLength, nullptr, nullptr) == 0)
1123     {
1124         // this should only happen if the config subsystem gives us a string that's not valid
1125         // unicode.
1126         CLRConfig::FreeConfigString(out);
1127         return false;
1128     }
1129
1130     *value = configResult.Extract();
1131     CLRConfig::FreeConfigString(out);
1132     return true;
1133 }
1134
1135 void GCToEEInterface::FreeStringConfigValue(const char* value)
1136 {
1137     delete [] value;
1138 }
1139
1140 bool GCToEEInterface::IsGCThread()
1141 {
1142     return !!::IsGCThread();
1143 }
1144
1145 bool GCToEEInterface::WasCurrentThreadCreatedByGC()
1146 {
1147     return !!::IsGCSpecialThread();
1148 }
1149
1150 struct SuspendableThreadStubArguments
1151 {
1152     void* Argument;
1153     void (*ThreadStart)(void*);
1154     Thread* Thread;
1155     bool HasStarted;
1156     CLREvent ThreadStartedEvent;
1157 };
1158
1159 struct ThreadStubArguments
1160 {
1161     void* Argument;
1162     void (*ThreadStart)(void*);
1163     HANDLE Thread;
1164     bool HasStarted;
1165     CLREvent ThreadStartedEvent;
1166 };
1167
1168 namespace
1169 {
1170     const size_t MaxThreadNameSize = 255;
1171
1172     bool CreateSuspendableThread(
1173         void (*threadStart)(void*),
1174         void* argument,
1175         const char* name)
1176     {
1177         LIMITED_METHOD_CONTRACT;
1178
1179         SuspendableThreadStubArguments args;
1180         args.Argument = argument;
1181         args.ThreadStart = threadStart;
1182         args.Thread = nullptr;
1183         args.HasStarted = false;
1184         if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE))
1185         {
1186             return false;
1187         }
1188
1189         EX_TRY
1190         {
1191             args.Thread = SetupUnstartedThread(FALSE);
1192         }
1193         EX_CATCH
1194         {
1195         }
1196         EX_END_CATCH(SwallowAllExceptions)
1197
1198         if (!args.Thread)
1199         {
1200             args.ThreadStartedEvent.CloseEvent();
1201             return false;
1202         }
1203
1204         auto threadStub = [](void* argument) -> DWORD
1205         {
1206             SuspendableThreadStubArguments* args = static_cast<SuspendableThreadStubArguments*>(argument);
1207             assert(args != nullptr);
1208
1209             ClrFlsSetThreadType(ThreadType_GC);
1210             args->Thread->SetGCSpecial(true);
1211             STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY);
1212             args->HasStarted = !!args->Thread->HasStarted(false);
1213
1214             Thread* thread = args->Thread;
1215             auto threadStart = args->ThreadStart;
1216             void* threadArgument = args->Argument;
1217             bool hasStarted = args->HasStarted;
1218             args->ThreadStartedEvent.Set();
1219
1220             // The stubArgs cannot be used once the event is set, since that releases wait on the
1221             // event in the function that created this thread and the stubArgs go out of scope.
1222             if (hasStarted)
1223             {
1224                 threadStart(threadArgument);
1225                 DestroyThread(thread);
1226             }
1227
1228             return 0;
1229         };
1230
1231         InlineSString<MaxThreadNameSize> wideName;
1232         const WCHAR* namePtr = nullptr;
1233         EX_TRY
1234         {
1235             if (name != nullptr)
1236             {
1237                 wideName.SetUTF8(name);
1238                 namePtr = wideName.GetUnicode();
1239             }
1240         }
1241         EX_CATCH
1242         {
1243             // we're not obligated to provide a name - if it's not valid,
1244             // just report nullptr as the name.
1245         }
1246         EX_END_CATCH(SwallowAllExceptions)
1247
1248         if (!args.Thread->CreateNewThread(0, threadStub, &args, namePtr))
1249         {
1250             args.Thread->DecExternalCount(FALSE);
1251             args.ThreadStartedEvent.CloseEvent();
1252             return false;
1253         }
1254
1255         args.Thread->SetBackground(TRUE, FALSE);
1256         args.Thread->StartThread();
1257
1258         // Wait for the thread to be in its main loop
1259         uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE);
1260         args.ThreadStartedEvent.CloseEvent();
1261         _ASSERTE(res == WAIT_OBJECT_0);
1262
1263         if (!args.HasStarted)
1264         {
1265             // The thread has failed to start and the Thread object was destroyed in the Thread::HasStarted
1266             // failure code path.
1267             return false;
1268         }
1269
1270         return true;
1271     }
1272
1273     bool CreateNonSuspendableThread(
1274         void (*threadStart)(void*),
1275         void* argument,
1276         const char* name)
1277     {
1278         LIMITED_METHOD_CONTRACT;
1279
1280         ThreadStubArguments args;
1281         args.Argument = argument;
1282         args.ThreadStart = threadStart;
1283         args.Thread = INVALID_HANDLE_VALUE;
1284         if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE))
1285         {
1286             return false;
1287         }
1288
1289         auto threadStub = [](void* argument) -> DWORD
1290         {
1291             ThreadStubArguments* args = static_cast<ThreadStubArguments*>(argument);
1292             assert(args != nullptr);
1293
1294             ClrFlsSetThreadType(ThreadType_GC);
1295             STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY);
1296
1297             args->HasStarted = true;
1298             auto threadStart = args->ThreadStart;
1299             void* threadArgument = args->Argument;
1300             args->ThreadStartedEvent.Set();
1301
1302             // The stub args cannot be used once the event is set, since that releases wait on the
1303             // event in the function that created this thread and the stubArgs go out of scope.
1304             threadStart(threadArgument);
1305             return 0;
1306         };
1307
1308         args.Thread = Thread::CreateUtilityThread(Thread::StackSize_Medium, threadStub, &args);
1309         if (args.Thread == INVALID_HANDLE_VALUE)
1310         {
1311             args.ThreadStartedEvent.CloseEvent();
1312             return false;
1313         }
1314
1315         // Wait for the thread to be in its main loop
1316         uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE);
1317         args.ThreadStartedEvent.CloseEvent();
1318         _ASSERTE(res == WAIT_OBJECT_0);
1319
1320         CloseHandle(args.Thread);
1321         return true;
1322     }
1323 } // anonymous namespace
1324
1325 bool GCToEEInterface::CreateThread(void (*threadStart)(void*), void* arg, bool is_suspendable, const char* name)
1326 {
1327     LIMITED_METHOD_CONTRACT;
1328     if (is_suspendable)
1329     {
1330         return CreateSuspendableThread(threadStart, arg, name);
1331     }
1332     else
1333     {
1334         return CreateNonSuspendableThread(threadStart, arg, name);
1335     }
1336 }
1337
1338 void GCToEEInterface::WalkAsyncPinnedForPromotion(Object* object, ScanContext* sc, promote_func* callback)
1339 {
1340     LIMITED_METHOD_CONTRACT;
1341
1342     assert(object != nullptr);
1343     assert(sc != nullptr);
1344     assert(callback != nullptr);
1345     if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass)
1346     {
1347         // not an overlapped data object - nothing to do.
1348         return;
1349     }
1350
1351     // reporting the pinned user objects
1352     OverlappedDataObject *pOverlapped = (OverlappedDataObject *)object;
1353     if (pOverlapped->m_userObject != NULL)
1354     {
1355         //callback(OBJECTREF_TO_UNCHECKED_OBJECTREF(pOverlapped->m_userObject), (ScanContext *)lp1, GC_CALL_PINNED);
1356         if (pOverlapped->m_isArray)
1357         {
1358             // OverlappedDataObject is very special.  An async pin handle keeps it alive.
1359             // During GC, we also make sure
1360             // 1. m_userObject itself does not move if m_userObject is not array
1361             // 2. Every object pointed by m_userObject does not move if m_userObject is array
1362             // We do not want to pin m_userObject if it is array.  But m_userObject may be updated
1363             // during relocation phase before OverlappedDataObject is doing relocation.
1364             // m_userObjectInternal is used to track the location of the m_userObject before it is updated.
1365             pOverlapped->m_userObjectInternal = static_cast<void*>(OBJECTREFToObject(pOverlapped->m_userObject));
1366             ArrayBase* pUserObject = (ArrayBase*)OBJECTREFToObject(pOverlapped->m_userObject);
1367             Object **ppObj = (Object**)pUserObject->GetDataPtr(TRUE);
1368             size_t num = pUserObject->GetNumComponents();
1369             for (size_t i = 0; i < num; i++)
1370             {
1371                 callback(ppObj + i, sc, GC_CALL_PINNED);
1372             }
1373         }
1374         else
1375         {
1376             callback(&OBJECTREF_TO_UNCHECKED_OBJECTREF(pOverlapped->m_userObject), (ScanContext *)sc, GC_CALL_PINNED);
1377         }
1378     }
1379
1380     if (pOverlapped->GetAppDomainId() != DefaultADID && pOverlapped->GetAppDomainIndex().m_dwIndex == DefaultADID)
1381     {
1382         OverlappedDataObject::MarkCleanupNeededFromGC();
1383     }
1384 }
1385
1386 void GCToEEInterface::WalkAsyncPinned(Object* object, void* context, void (*callback)(Object*, Object*, void*))
1387 {
1388     LIMITED_METHOD_CONTRACT;
1389
1390     assert(object != nullptr);
1391     assert(callback != nullptr);
1392
1393     if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass)
1394     {
1395         return;
1396     }
1397
1398     OverlappedDataObject *pOverlapped = (OverlappedDataObject *)(object);
1399     if (pOverlapped->m_userObject != NULL)
1400     {
1401         Object * pUserObject = OBJECTREFToObject(pOverlapped->m_userObject);
1402         callback(object, pUserObject, context);
1403         if (pOverlapped->m_isArray)
1404         {
1405             ArrayBase* pUserArrayObject = (ArrayBase*)pUserObject;
1406             Object **pObj = (Object**)pUserArrayObject->GetDataPtr(TRUE);
1407             size_t num = pUserArrayObject->GetNumComponents();
1408             for (size_t i = 0; i < num; i ++)
1409             {
1410                 callback(pUserObject, pObj[i], context);
1411             }
1412         }
1413     }
1414 }
1415
1416 IGCToCLREventSink* GCToEEInterface::EventSink()
1417 {
1418     LIMITED_METHOD_CONTRACT;
1419
1420     return &g_gcToClrEventSink;
1421 }