Merge pull request #23537 from janvorli/remove-gc-cpu-group-knowledge
[platform/upstream/coreclr.git] / src / vm / gcenv.ee.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4
5 /*
6  * GCENV.EE.CPP
7  *
8  * GCToEEInterface implementation
9  *
10
11  *
12  */
13
14 void GCToEEInterface::SuspendEE(SUSPEND_REASON reason)
15 {
16     WRAPPER_NO_CONTRACT;
17
18     static_assert_no_msg(SUSPEND_FOR_GC == (int)ThreadSuspend::SUSPEND_FOR_GC);
19     static_assert_no_msg(SUSPEND_FOR_GC_PREP == (int)ThreadSuspend::SUSPEND_FOR_GC_PREP);
20
21     _ASSERTE(reason == SUSPEND_FOR_GC || reason == SUSPEND_FOR_GC_PREP);
22
23     g_pDebugInterface->SuspendForGarbageCollectionStarted();
24
25     ThreadSuspend::SuspendEE((ThreadSuspend::SUSPEND_REASON)reason);
26
27     g_pDebugInterface->SuspendForGarbageCollectionCompleted();
28 }
29
30 void GCToEEInterface::RestartEE(bool bFinishedGC)
31 {
32     WRAPPER_NO_CONTRACT;
33
34     g_pDebugInterface->ResumeForGarbageCollectionStarted();
35
36     ThreadSuspend::RestartEE(bFinishedGC, TRUE);
37 }
38
39 VOID GCToEEInterface::SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2)
40 {
41     CONTRACTL
42     {
43         NOTHROW;
44         GC_NOTRIGGER;
45     }
46     CONTRACTL_END;
47
48     SyncBlockCache::GetSyncBlockCache()->GCWeakPtrScan(scanProc, lp1, lp2);
49 }
50
51 //EE can perform post stack scanning action, while the
52 // user threads are still suspended
53 VOID GCToEEInterface::AfterGcScanRoots (int condemned, int max_gen,
54                                    ScanContext* sc)
55 {
56     CONTRACTL
57     {
58         NOTHROW;
59         GC_NOTRIGGER;
60     }
61     CONTRACTL_END;
62
63 #ifdef FEATURE_COMINTEROP
64     // Go through all the only app domain and detach all the *unmarked* RCWs to prevent
65     // the RCW cache from resurrecting them.
66     ::GetAppDomain()->DetachRCWs();
67 #endif // FEATURE_COMINTEROP
68 }
69
70 /*
71  * Scan all stack roots
72  */
73
74 static void ScanStackRoots(Thread * pThread, promote_func* fn, ScanContext* sc)
75 {
76     GCCONTEXT   gcctx;
77
78     gcctx.f  = fn;
79     gcctx.sc = sc;
80     gcctx.cf = NULL;
81
82     ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
83
84     // Either we are in a concurrent situation (in which case the thread is unknown to
85     // us), or we are performing a synchronous GC and we are the GC thread, holding
86     // the threadstore lock.
87
88     _ASSERTE(dbgOnly_IsSpecialEEThread() ||
89                 GetThread() == NULL ||
90                 // this is for background GC threads which always call this when EE is suspended.
91                 IsGCSpecialThread() ||
92                 (GetThread() == ThreadSuspend::GetSuspensionThread() && ThreadStore::HoldingThreadStore()));
93
94     pThread->SetHasPromotedBytes();
95
96     Frame* pTopFrame = pThread->GetFrame();
97     Object ** topStack = (Object **)pTopFrame;
98     if ((pTopFrame != ((Frame*)-1))
99         && (pTopFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr())) {
100         // It is an InlinedCallFrame. Get SP from it.
101         InlinedCallFrame* pInlinedFrame = (InlinedCallFrame*)pTopFrame;
102         topStack = (Object **)pInlinedFrame->GetCallSiteSP();
103     }
104
105     sc->stack_limit = (uintptr_t)topStack;
106
107 #ifdef FEATURE_CONSERVATIVE_GC
108     if (g_pConfig->GetGCConservative())
109     {
110         // Conservative stack root reporting
111         // We will treat everything on stack as a pinned interior GC pointer
112         // Since we report every thing as pinned, we don't need to run following code for relocation phase.
113         if (sc->promotion)
114         {
115             Object ** bottomStack = (Object **) pThread->GetCachedStackBase();
116             Object ** walk;
117             for (walk = topStack; walk < bottomStack; walk ++)
118             {
119                 if (((void*)*walk > (void*)bottomStack || (void*)*walk < (void*)topStack) &&
120                     ((void*)*walk >= (void*)g_lowest_address && (void*)*walk <= (void*)g_highest_address)
121                     )
122                 {
123                     //DbgPrintf("promote " FMT_ADDR " : " FMT_ADDR "\n", walk, *walk);
124                     fn(walk, sc, GC_CALL_INTERIOR|GC_CALL_PINNED);
125                 }
126             }
127         }
128
129         // Also ask the explicit Frames to report any references they might know about.
130         // Generally these will be a subset of the objects reported below but there's
131         // nothing that guarantees that and in the specific case of a GC protect frame the
132         // references it protects may live at a lower address than the frame itself (and
133         // thus escape the stack range we scanned above).
134         Frame *pFrame = pThread->GetFrame();
135         while (pFrame != FRAME_TOP)
136         {
137             pFrame->GcScanRoots(fn, sc);
138             pFrame = pFrame->PtrNextFrame();
139         }
140     }
141     else
142 #endif
143     {
144         unsigned flagsStackWalk = ALLOW_ASYNC_STACK_WALK | ALLOW_INVALID_OBJECTS;
145 #if defined(WIN64EXCEPTIONS)
146         flagsStackWalk |= GC_FUNCLET_REFERENCE_REPORTING;
147 #endif // defined(WIN64EXCEPTIONS)
148         pThread->StackWalkFrames( GcStackCrawlCallBack, &gcctx, flagsStackWalk);
149     }
150 }
151
152 void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
153 {
154     STRESS_LOG1(LF_GCROOTS, LL_INFO10, "GCScan: Promotion Phase = %d\n", sc->promotion);
155
156     // In server GC, we should be competing for marking the statics
157     if (GCHeapUtilities::MarkShouldCompeteForStatics())
158     {
159         if (condemned == max_gen && sc->promotion)
160         {
161             SystemDomain::EnumAllStaticGCRefs(fn, sc);
162         }
163     }
164
165     Thread* pThread = NULL;
166     while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
167     {
168         STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "{ Starting scan of Thread %p ID = %x\n", pThread, pThread->GetThreadId());
169
170         if (GCHeapUtilities::GetGCHeap()->IsThreadUsingAllocationContextHeap(
171             pThread->GetAllocContext(), sc->thread_number))
172         {
173             sc->thread_under_crawl = pThread;
174 #ifdef FEATURE_EVENT_TRACE
175             sc->dwEtwRootKind = kEtwGCRootKindStack;
176 #endif // FEATURE_EVENT_TRACE
177             ScanStackRoots(pThread, fn, sc);
178 #ifdef FEATURE_EVENT_TRACE
179             sc->dwEtwRootKind = kEtwGCRootKindOther;
180 #endif // FEATURE_EVENT_TRACE
181         }
182         STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "Ending scan of Thread %p ID = 0x%x }\n", pThread, pThread->GetThreadId());
183     }
184 }
185
186 void GCToEEInterface::GcStartWork (int condemned, int max_gen)
187 {
188     CONTRACTL
189     {
190         NOTHROW;
191         GC_NOTRIGGER;
192     }
193     CONTRACTL_END;
194
195 #ifdef VERIFY_HEAP
196     // Validate byrefs pinned by IL stubs since the last GC.
197     StubHelpers::ProcessByrefValidationList();
198 #endif // VERIFY_HEAP
199
200     ExecutionManager::CleanupCodeHeaps();
201
202 #ifdef FEATURE_EVENT_TRACE
203     ETW::TypeSystemLog::Cleanup();
204 #endif
205
206 #ifdef FEATURE_COMINTEROP
207     //
208     // Let GC detect managed/native cycles with input from jupiter
209     // Jupiter will
210     // 1. Report reference from RCW to CCW based on native reference in Jupiter
211     // 2. Identify the subset of CCWs that needs to be rooted
212     //
213     // We'll build the references from RCW to CCW using
214     // 1. Preallocated arrays
215     // 2. Dependent handles
216     //
217     RCWWalker::OnGCStarted(condemned);
218 #endif // FEATURE_COMINTEROP
219
220     if (condemned == max_gen)
221     {
222         ThreadStore::s_pThreadStore->OnMaxGenerationGCStarted();
223     }
224 }
225
226 void GCToEEInterface::GcDone(int condemned)
227 {
228     CONTRACTL
229     {
230         NOTHROW;
231         GC_NOTRIGGER;
232     }
233     CONTRACTL_END;
234
235 #ifdef FEATURE_COMINTEROP
236     //
237     // Tell Jupiter GC has finished
238     //
239     RCWWalker::OnGCFinished(condemned);
240 #endif // FEATURE_COMINTEROP
241 }
242
243 bool GCToEEInterface::RefCountedHandleCallbacks(Object * pObject)
244 {
245     CONTRACTL
246     {
247         NOTHROW;
248         GC_NOTRIGGER;
249     }
250     CONTRACTL_END;
251
252 #ifdef FEATURE_COMINTEROP
253     //<REVISIT_TODO>@todo optimize the access to the ref-count
254     ComCallWrapper* pWrap = ComCallWrapper::GetWrapperForObject((OBJECTREF)pObject);
255
256     return pWrap != NULL && pWrap->IsWrapperActive();
257 #else
258     return false;
259 #endif
260 }
261
262 void GCToEEInterface::GcBeforeBGCSweepWork()
263 {
264     CONTRACTL
265     {
266         NOTHROW;
267         GC_NOTRIGGER;
268     }
269     CONTRACTL_END;
270
271 #ifdef VERIFY_HEAP
272     // Validate byrefs pinned by IL stubs since the last GC.
273     StubHelpers::ProcessByrefValidationList();
274 #endif // VERIFY_HEAP
275 }
276
277 void GCToEEInterface::SyncBlockCacheDemote(int max_gen)
278 {
279     CONTRACTL
280     {
281         NOTHROW;
282         GC_NOTRIGGER;
283     }
284     CONTRACTL_END;
285
286     SyncBlockCache::GetSyncBlockCache()->GCDone(TRUE, max_gen);
287 }
288
289 void GCToEEInterface::SyncBlockCachePromotionsGranted(int max_gen)
290 {
291     CONTRACTL
292     {
293         NOTHROW;
294         GC_NOTRIGGER;
295     }
296     CONTRACTL_END;
297
298     SyncBlockCache::GetSyncBlockCache()->GCDone(FALSE, max_gen);
299 }
300
301 uint32_t GCToEEInterface::GetActiveSyncBlockCount()
302 {
303     CONTRACTL
304     {
305         NOTHROW;
306         GC_NOTRIGGER;
307     }
308     CONTRACTL_END;
309
310     return SyncBlockCache::GetSyncBlockCache()->GetActiveCount();
311 }
312
313 gc_alloc_context * GCToEEInterface::GetAllocContext()
314 {
315     WRAPPER_NO_CONTRACT;
316
317     Thread* pThread = ::GetThread();
318     if (!pThread)
319     {
320         return nullptr;
321     }
322
323     return pThread->GetAllocContext();
324 }
325
326 void GCToEEInterface::GcEnumAllocContexts(enum_alloc_context_func* fn, void* param)
327 {
328     CONTRACTL
329     {
330         NOTHROW;
331         GC_NOTRIGGER;
332     }
333     CONTRACTL_END;
334
335     if (GCHeapUtilities::UseThreadAllocationContexts())
336     {
337         Thread * pThread = NULL;
338         while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
339         {
340             fn(pThread->GetAllocContext(), param);
341         }
342     }
343     else
344     {
345         fn(&g_global_alloc_context, param);
346     }
347 }
348
349
350 uint8_t* GCToEEInterface::GetLoaderAllocatorObjectForGC(Object* pObject)
351 {
352     CONTRACTL
353     {
354         NOTHROW;
355         GC_NOTRIGGER;
356     }
357     CONTRACTL_END;
358
359     return pObject->GetGCSafeMethodTable()->GetLoaderAllocatorObjectForGC();
360 }
361
362 bool GCToEEInterface::IsPreemptiveGCDisabled()
363 {
364     WRAPPER_NO_CONTRACT;
365
366     Thread* pThread = ::GetThread();
367     if (pThread)
368     {
369         return !!pThread->PreemptiveGCDisabled();
370     }
371
372     return false;
373 }
374
375 bool GCToEEInterface::EnablePreemptiveGC()
376 {
377     WRAPPER_NO_CONTRACT;
378
379     bool bToggleGC = false;
380     Thread* pThread = ::GetThread();
381
382     if (pThread)
383     {
384         bToggleGC = !!pThread->PreemptiveGCDisabled();
385         if (bToggleGC)
386         {
387             pThread->EnablePreemptiveGC();
388         }
389     }
390
391     return bToggleGC;
392 }
393
394 void GCToEEInterface::DisablePreemptiveGC()
395 {
396     WRAPPER_NO_CONTRACT;
397
398     Thread* pThread = ::GetThread();
399     if (pThread)
400     {
401         pThread->DisablePreemptiveGC();
402     }
403 }
404
405 Thread* GCToEEInterface::GetThread()
406 {
407     WRAPPER_NO_CONTRACT;
408
409     return ::GetThread();
410 }
411
412 struct BackgroundThreadStubArgs
413 {
414     Thread* thread;
415     GCBackgroundThreadFunction threadStart;
416     void* arg;
417     CLREvent threadStartedEvent;
418     bool hasStarted;
419 };
420
421 DWORD WINAPI BackgroundThreadStub(void* arg)
422 {
423     BackgroundThreadStubArgs* stubArgs = (BackgroundThreadStubArgs*)arg;
424     assert (stubArgs->thread != NULL);
425
426     ClrFlsSetThreadType (ThreadType_GC);
427     stubArgs->thread->SetGCSpecial(true);
428     STRESS_LOG_RESERVE_MEM (GC_STRESSLOG_MULTIPLY);
429
430     stubArgs->hasStarted = !!stubArgs->thread->HasStarted(FALSE);
431
432     Thread* thread = stubArgs->thread;
433     GCBackgroundThreadFunction realThreadStart = stubArgs->threadStart;
434     void* realThreadArg = stubArgs->arg;
435     bool hasStarted = stubArgs->hasStarted;
436
437     stubArgs->threadStartedEvent.Set();
438     // The stubArgs cannot be used once the event is set, since that releases wait on the
439     // event in the function that created this thread and the stubArgs go out of scope.
440
441     DWORD result = 0;
442
443     if (hasStarted)
444     {
445         result = realThreadStart(realThreadArg);
446         DestroyThread(thread);
447     }
448
449     return result;
450 }
451
452 //
453 // Diagnostics code
454 //
455
456 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
457 inline BOOL ShouldTrackMovementForProfilerOrEtw()
458 {
459 #ifdef GC_PROFILING
460     if (CORProfilerTrackGC())
461         return true;
462 #endif
463
464 #ifdef FEATURE_EVENT_TRACE
465     if (ETW::GCLog::ShouldTrackMovementForEtw())
466         return true;
467 #endif
468
469     return false;
470 }
471 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
472
473 void ProfScanRootsHelper(Object** ppObject, ScanContext *pSC, uint32_t dwFlags)
474 {
475 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
476     Object *pObj = *ppObject;
477     if (dwFlags & GC_CALL_INTERIOR)
478     {
479         pObj = GCHeapUtilities::GetGCHeap()->GetContainingObject(pObj, true);
480         if (pObj == nullptr)
481             return;
482     }
483     ScanRootsHelper(pObj, ppObject, pSC, dwFlags);
484 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
485 }
486
487 // TODO - at some point we would like to completely decouple profiling
488 // from ETW tracing using a pattern similar to this, where the
489 // ProfilingScanContext has flags about whether or not certain things
490 // should be tracked, and each one of these ProfilerShouldXYZ functions
491 // will check these flags and determine what to do based upon that.
492 // GCProfileWalkHeapWorker can, in turn, call those methods without fear
493 // of things being ifdef'd out.
494
495 // Returns TRUE if GC profiling is enabled and the profiler
496 // should scan dependent handles, FALSE otherwise.
497 BOOL ProfilerShouldTrackConditionalWeakTableElements()
498 {
499 #if defined(GC_PROFILING)
500     return CORProfilerTrackConditionalWeakTableElements();
501 #else
502     return FALSE;
503 #endif // defined (GC_PROFILING)
504 }
505
506 // If GC profiling is enabled, informs the profiler that we are done
507 // tracing dependent handles.
508 void ProfilerEndConditionalWeakTableElementReferences(void* heapId)
509 {
510 #if defined (GC_PROFILING)
511     g_profControlBlock.pProfInterface->EndConditionalWeakTableElementReferences(heapId);
512 #else
513     UNREFERENCED_PARAMETER(heapId);
514 #endif // defined (GC_PROFILING)
515 }
516
517 // If GC profiling is enabled, informs the profiler that we are done
518 // tracing root references.
519 void ProfilerEndRootReferences2(void* heapId)
520 {
521 #if defined (GC_PROFILING)
522     g_profControlBlock.pProfInterface->EndRootReferences2(heapId);
523 #else
524     UNREFERENCED_PARAMETER(heapId);
525 #endif // defined (GC_PROFILING)
526 }
527
528 void GcScanRootsForProfilerAndETW(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
529 {
530     Thread* pThread = NULL;
531     while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
532     {
533         sc->thread_under_crawl = pThread;
534 #ifdef FEATURE_EVENT_TRACE
535         sc->dwEtwRootKind = kEtwGCRootKindStack;
536 #endif // FEATURE_EVENT_TRACE
537         ScanStackRoots(pThread, fn, sc);
538 #ifdef FEATURE_EVENT_TRACE
539         sc->dwEtwRootKind = kEtwGCRootKindOther;
540 #endif // FEATURE_EVENT_TRACE
541     }
542 }
543
544 void ScanHandleForProfilerAndETW(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, bool isDependent)
545 {
546     ProfilingScanContext* pSC = (ProfilingScanContext*)context;
547
548 #ifdef GC_PROFILING
549     // Give the profiler the objectref.
550     if (pSC->fProfilerPinned)
551     {
552         if (!isDependent)
553         {
554             BEGIN_PIN_PROFILER(CORProfilerTrackGC());
555             g_profControlBlock.pProfInterface->RootReference2(
556                 (uint8_t *)*pRef,
557                 kEtwGCRootKindHandle,
558                 (EtwGCRootFlags)flags,
559                 pRef,
560                 &pSC->pHeapId);
561             END_PIN_PROFILER();
562         }
563         else
564         {
565             BEGIN_PIN_PROFILER(CORProfilerTrackConditionalWeakTableElements());
566             g_profControlBlock.pProfInterface->ConditionalWeakTableElementReference(
567                 (uint8_t*)*pRef,
568                 (uint8_t*)pSec,
569                 pRef,
570                 &pSC->pHeapId);
571             END_PIN_PROFILER();
572         }
573     }
574 #endif // GC_PROFILING
575
576 #if defined(FEATURE_EVENT_TRACE)
577     // Notify ETW of the handle
578     if (ETW::GCLog::ShouldWalkHeapRootsForEtw())
579     {
580         ETW::GCLog::RootReference(
581             pRef,
582             *pRef,          // object being rooted
583             pSec,           // pSecondaryNodeForDependentHandle
584             isDependent,
585             pSC,
586             0,              // dwGCFlags,
587             flags);     // ETW handle flags
588     }
589 #endif // defined(FEATURE_EVENT_TRACE)
590 }
591
592 // This is called only if we've determined that either:
593 //     a) The Profiling API wants to do a walk of the heap, and it has pinned the
594 //     profiler in place (so it cannot be detached), and it's thus safe to call into the
595 //     profiler, OR
596 //     b) ETW infrastructure wants to do a walk of the heap either to log roots,
597 //     objects, or both.
598 // This can also be called to do a single walk for BOTH a) and b) simultaneously.  Since
599 // ETW can ask for roots, but not objects
600 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
601 void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw)
602 {
603     {
604         ProfilingScanContext SC(fProfilerPinned);
605         unsigned max_generation = GCHeapUtilities::GetGCHeap()->GetMaxGeneration();
606
607         // **** Scan roots:  Only scan roots if profiling API wants them or ETW wants them.
608         if (fProfilerPinned || fShouldWalkHeapRootsForEtw)
609         {
610             GcScanRootsForProfilerAndETW(&ProfScanRootsHelper, max_generation, max_generation, &SC);
611             SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
612             GCHeapUtilities::GetGCHeap()->DiagScanFinalizeQueue(&ProfScanRootsHelper, &SC);
613
614             // Handles are kept independent of wks/svr/concurrent builds
615             SC.dwEtwRootKind = kEtwGCRootKindHandle;
616             GCHeapUtilities::GetGCHeap()->DiagScanHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
617
618             // indicate that regular handle scanning is over, so we can flush the buffered roots
619             // to the profiler.  (This is for profapi only.  ETW will flush after the
620             // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
621             if (fProfilerPinned)
622             {
623                 ProfilerEndRootReferences2(&SC.pHeapId);
624             }
625         }
626
627         // **** Scan dependent handles: only if the profiler supports it or ETW wants roots
628         if ((fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements()) ||
629             fShouldWalkHeapRootsForEtw)
630         {
631             // GcScanDependentHandlesForProfiler double-checks
632             // CORProfilerTrackConditionalWeakTableElements() before calling into the profiler
633
634             ProfilingScanContext* pSC = &SC;
635
636             // we'll re-use pHeapId (which was either unused (0) or freed by EndRootReferences2
637             // (-1)), so reset it to NULL
638             _ASSERTE((*((size_t *)(&pSC->pHeapId)) == (size_t)(-1)) ||
639                     (*((size_t *)(&pSC->pHeapId)) == (size_t)(0)));
640             pSC->pHeapId = NULL;
641
642             GCHeapUtilities::GetGCHeap()->DiagScanDependentHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
643
644             // indicate that dependent handle scanning is over, so we can flush the buffered roots
645             // to the profiler.  (This is for profapi only.  ETW will flush after the
646             // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
647             if (fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements())
648             {
649                 ProfilerEndConditionalWeakTableElementReferences(&SC.pHeapId);
650             }
651         }
652
653         ProfilerWalkHeapContext profilerWalkHeapContext(fProfilerPinned, SC.pvEtwContext);
654
655         // **** Walk objects on heap: only if profiling API wants them or ETW wants them.
656         if (fProfilerPinned || fShouldWalkHeapObjectsForEtw)
657         {
658             GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, true /* walk the large object heap */);
659         }
660
661 #ifdef FEATURE_EVENT_TRACE
662         // **** Done! Indicate to ETW helpers that the heap walk is done, so any buffers
663         // should be flushed into the ETW stream
664         if (fShouldWalkHeapObjectsForEtw || fShouldWalkHeapRootsForEtw)
665         {
666             ETW::GCLog::EndHeapDump(&profilerWalkHeapContext);
667         }
668 #endif // FEATURE_EVENT_TRACE
669     }
670 }
671 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
672
673 void GCProfileWalkHeap()
674 {
675     BOOL fWalkedHeapForProfiler = FALSE;
676
677 #ifdef FEATURE_EVENT_TRACE
678     if (ETW::GCLog::ShouldWalkStaticsAndCOMForEtw())
679         ETW::GCLog::WalkStaticsAndCOMForETW();
680
681     BOOL fShouldWalkHeapRootsForEtw = ETW::GCLog::ShouldWalkHeapRootsForEtw();
682     BOOL fShouldWalkHeapObjectsForEtw = ETW::GCLog::ShouldWalkHeapObjectsForEtw();
683 #else // !FEATURE_EVENT_TRACE
684     BOOL fShouldWalkHeapRootsForEtw = FALSE;
685     BOOL fShouldWalkHeapObjectsForEtw = FALSE;
686 #endif // FEATURE_EVENT_TRACE
687
688 #if defined (GC_PROFILING)
689     {
690         BEGIN_PIN_PROFILER(CORProfilerTrackGC());
691         GCProfileWalkHeapWorker(TRUE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
692         fWalkedHeapForProfiler = TRUE;
693         END_PIN_PROFILER();
694     }
695 #endif // defined (GC_PROFILING)
696
697 #if defined (GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
698     // we need to walk the heap if one of GC_PROFILING or FEATURE_EVENT_TRACE
699     // is defined, since both of them make use of the walk heap worker.
700     if (!fWalkedHeapForProfiler &&
701         (fShouldWalkHeapRootsForEtw || fShouldWalkHeapObjectsForEtw))
702     {
703         GCProfileWalkHeapWorker(FALSE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
704     }
705 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
706 }
707
708 void WalkFReachableObjects(bool isCritical, void* objectID)
709 {
710         g_profControlBlock.pProfInterface->FinalizeableObjectQueued(isCritical, (ObjectID)objectID);
711 }
712
713 static fq_walk_fn g_FQWalkFn = &WalkFReachableObjects;
714
715 void GCToEEInterface::DiagGCStart(int gen, bool isInduced)
716 {
717 #ifdef GC_PROFILING
718     DiagUpdateGenerationBounds();
719     GarbageCollectionStartedCallback(gen, isInduced);
720     {
721         BEGIN_PIN_PROFILER(CORProfilerTrackGC());
722         size_t context = 0;
723
724         // When we're walking objects allocated by class, then we don't want to walk the large
725         // object heap because then it would count things that may have been around for a while.
726         GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&AllocByClassHelper, (void *)&context, 0, false);
727
728         // Notify that we've reached the end of the Gen 0 scan
729         g_profControlBlock.pProfInterface->EndAllocByClass(&context);
730         END_PIN_PROFILER();
731     }
732
733 #endif // GC_PROFILING
734 }
735
736 void GCToEEInterface::DiagUpdateGenerationBounds()
737 {
738 #ifdef GC_PROFILING
739     if (CORProfilerTrackGC() || CORProfilerTrackBasicGC())
740         UpdateGenerationBounds();
741 #endif // GC_PROFILING
742 }
743
744 void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent)
745 {
746 #ifdef GC_PROFILING
747     // We were only doing generation bounds and GC finish callback for non concurrent GCs so
748     // I am keeping that behavior to not break profilers. But if BasicGC monitoring is enabled
749     // we will do these for all GCs.
750     if (!fConcurrent)
751     {
752         GCProfileWalkHeap();
753     }
754
755     if (CORProfilerTrackBasicGC() || (!fConcurrent && CORProfilerTrackGC()))
756     {
757         DiagUpdateGenerationBounds();
758         GarbageCollectionFinishedCallback();
759     }
760 #endif // GC_PROFILING
761 }
762
763 void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext)
764 {
765 #ifdef GC_PROFILING
766     if (CORProfilerTrackGC())
767     {
768         BEGIN_PIN_PROFILER(CORProfilerPresent());
769         GCHeapUtilities::GetGCHeap()->DiagWalkFinalizeQueue(gcContext, g_FQWalkFn);
770         END_PIN_PROFILER();
771     }
772 #endif //GC_PROFILING
773 }
774
775 // Note on last parameter: when calling this for bgc, only ETW
776 // should be sending these events so that existing profapi profilers
777 // don't get confused.
778 void WalkMovedReferences(uint8_t* begin, uint8_t* end,
779                          ptrdiff_t reloc,
780                          void* context,
781                          bool fCompacting,
782                          bool fBGC)
783 {
784     ETW::GCLog::MovedReference(begin, end,
785                                (fCompacting ? reloc : 0),
786                                (size_t)context,
787                                fCompacting,
788                                !fBGC);
789 }
790
791 void GCToEEInterface::DiagWalkSurvivors(void* gcContext)
792 {
793 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
794     if (ShouldTrackMovementForProfilerOrEtw())
795     {
796         size_t context = 0;
797         ETW::GCLog::BeginMovedReferences(&context);
798         GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_gc);
799         ETW::GCLog::EndMovedReferences(context);
800     }
801 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
802 }
803
804 void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext)
805 {
806 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
807     if (ShouldTrackMovementForProfilerOrEtw())
808     {
809         size_t context = 0;
810         ETW::GCLog::BeginMovedReferences(&context);
811         GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_loh);
812         ETW::GCLog::EndMovedReferences(context);
813     }
814 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
815 }
816
817 void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext)
818 {
819 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
820     if (ShouldTrackMovementForProfilerOrEtw())
821     {
822         size_t context = 0;
823         ETW::GCLog::BeginMovedReferences(&context);
824         GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_bgc);
825         ETW::GCLog::EndMovedReferences(context);
826     }
827 #endif //GC_PROFILING || FEATURE_EVENT_TRACE
828 }
829
830 void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args)
831 {
832     int stompWBCompleteActions = SWB_PASS;
833     bool is_runtime_suspended = false;
834
835     assert(args != nullptr);
836     switch (args->operation)
837     {
838     case WriteBarrierOp::StompResize:
839         // StompResize requires a new card table, a new lowest address, and
840         // a new highest address
841         assert(args->card_table != nullptr);
842         assert(args->lowest_address != nullptr);
843         assert(args->highest_address != nullptr);
844
845         g_card_table = args->card_table;
846
847 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
848         assert(args->card_bundle_table != nullptr);
849         g_card_bundle_table = args->card_bundle_table;
850 #endif
851
852 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
853         if (g_sw_ww_enabled_for_gc_heap && (args->write_watch_table != nullptr))
854         {
855             assert(args->is_runtime_suspended);
856             g_sw_ww_table = args->write_watch_table;
857         }
858 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
859
860         stompWBCompleteActions |= ::StompWriteBarrierResize(args->is_runtime_suspended, args->requires_upper_bounds_check);
861
862         // We need to make sure that other threads executing checked write barriers
863         // will see the g_card_table update before g_lowest/highest_address updates.
864         // Otherwise, the checked write barrier may AV accessing the old card table
865         // with address that it does not cover.
866         //
867         // Even x86's total store ordering is insufficient here because threads reading
868         // g_card_table do so via the instruction cache, whereas g_lowest/highest_address
869         // are read via the data cache.
870         //
871         // The g_card_table update is covered by section 8.1.3 of the Intel Software
872         // Development Manual, Volume 3A (System Programming Guide, Part 1), about
873         // "cross-modifying code": We need all _executing_ threads to invalidate
874         // their instruction cache, which FlushProcessWriteBuffers achieves by sending
875         // an IPI (inter-process interrupt).
876
877         if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
878         {
879             // flushing icache on current processor (thread)
880             ::FlushWriteBarrierInstructionCache();
881             // asking other processors (threads) to invalidate their icache
882             FlushProcessWriteBuffers();
883         }
884
885         g_lowest_address = args->lowest_address;
886         VolatileStore(&g_highest_address, args->highest_address);
887
888 #if defined(_ARM64_) || defined(_ARM_)
889         // Need to reupdate for changes to g_highest_address g_lowest_address
890         is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended;
891         stompWBCompleteActions |= ::StompWriteBarrierResize(is_runtime_suspended, args->requires_upper_bounds_check);
892
893 #ifdef _ARM_
894         if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
895         {
896             ::FlushWriteBarrierInstructionCache();
897         }
898 #endif
899
900         is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended;
901         if(!is_runtime_suspended)
902         {
903             // If runtime is not suspended, force updated state to be visible to all threads
904             MemoryBarrier();
905         }
906 #endif
907         if (stompWBCompleteActions & SWB_EE_RESTART)
908         {
909             assert(!args->is_runtime_suspended &&
910                 "if runtime was suspended in patching routines then it was in running state at begining");
911             ThreadSuspend::RestartEE(FALSE, TRUE);
912         }
913         return; // unlike other branches we have already done cleanup so bailing out here
914     case WriteBarrierOp::StompEphemeral:
915         // StompEphemeral requires a new ephemeral low and a new ephemeral high
916         assert(args->ephemeral_low != nullptr);
917         assert(args->ephemeral_high != nullptr);
918         g_ephemeral_low = args->ephemeral_low;
919         g_ephemeral_high = args->ephemeral_high;
920         stompWBCompleteActions |= ::StompWriteBarrierEphemeral(args->is_runtime_suspended);
921         break;
922     case WriteBarrierOp::Initialize:
923         // This operation should only be invoked once, upon initialization.
924         assert(g_card_table == nullptr);
925         assert(g_lowest_address == nullptr);
926         assert(g_highest_address == nullptr);
927         assert(args->card_table != nullptr);
928         assert(args->lowest_address != nullptr);
929         assert(args->highest_address != nullptr);
930         assert(args->ephemeral_low != nullptr);
931         assert(args->ephemeral_high != nullptr);
932         assert(args->is_runtime_suspended && "the runtime must be suspended here!");
933         assert(!args->requires_upper_bounds_check && "the ephemeral generation must be at the top of the heap!");
934
935         g_card_table = args->card_table;
936
937 #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
938         assert(g_card_bundle_table == nullptr);
939         g_card_bundle_table = args->card_bundle_table;
940 #endif
941
942         g_lowest_address = args->lowest_address;
943         g_highest_address = args->highest_address;
944         stompWBCompleteActions |= ::StompWriteBarrierResize(true, false);
945
946         // StompWriteBarrierResize does not necessarily bash g_ephemeral_low
947         // usages, so we must do so here. This is particularly true on x86,
948         // where StompWriteBarrierResize will not bash g_ephemeral_low when
949         // called with the parameters (true, false), as it is above.
950         g_ephemeral_low = args->ephemeral_low;
951         g_ephemeral_high = args->ephemeral_high;
952         stompWBCompleteActions |= ::StompWriteBarrierEphemeral(true);
953         break;
954     case WriteBarrierOp::SwitchToWriteWatch:
955 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
956         assert(args->write_watch_table != nullptr);
957         assert(args->is_runtime_suspended && "the runtime must be suspended here!");
958         g_sw_ww_table = args->write_watch_table;
959         g_sw_ww_enabled_for_gc_heap = true;
960         stompWBCompleteActions |= ::SwitchToWriteWatchBarrier(true);
961 #else
962         assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP");
963 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
964         break;
965     case WriteBarrierOp::SwitchToNonWriteWatch:
966 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
967         assert(args->is_runtime_suspended && "the runtime must be suspended here!");
968         g_sw_ww_table = 0;
969         g_sw_ww_enabled_for_gc_heap = false;
970         stompWBCompleteActions |= ::SwitchToNonWriteWatchBarrier(true);
971 #else
972         assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP");
973 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
974         break;
975     default:
976         assert(!"unknown WriteBarrierOp enum");
977     }
978     if (stompWBCompleteActions & SWB_ICACHE_FLUSH)
979     {
980         ::FlushWriteBarrierInstructionCache();
981     }
982     if (stompWBCompleteActions & SWB_EE_RESTART)
983     {
984         assert(!args->is_runtime_suspended &&
985             "if runtime was suspended in patching routines then it was in running state at begining");
986         ThreadSuspend::RestartEE(FALSE, TRUE);
987     }
988 }
989
990 void GCToEEInterface::EnableFinalization(bool foundFinalizers)
991 {
992     if (foundFinalizers || FinalizerThread::HaveExtraWorkForFinalizer())
993     {
994         FinalizerThread::EnableFinalization();
995     }
996 }
997
998 void GCToEEInterface::HandleFatalError(unsigned int exitCode)
999 {
1000     EEPOLICY_HANDLE_FATAL_ERROR(exitCode);
1001 }
1002
1003 bool GCToEEInterface::ShouldFinalizeObjectForUnload(void* pDomain, Object* obj)
1004 {
1005     // CoreCLR does not have appdomains, so this code path is dead. Other runtimes may
1006     // choose to inspect the object being finalized here.
1007     // [DESKTOP TODO] Desktop looks for "agile and finalizable" objects and may choose
1008     // to move them to a new app domain instead of finalizing them here.
1009     return true;
1010 }
1011
1012 bool GCToEEInterface::EagerFinalized(Object* obj)
1013 {
1014     MethodTable* pMT = obj->GetGCSafeMethodTable();
1015     if (pMT == pWeakReferenceMT ||
1016         pMT->GetCanonicalMethodTable() == pWeakReferenceOfTCanonMT)
1017     {
1018         FinalizeWeakReference(obj);
1019         return true;
1020     }
1021
1022     return false;
1023 }
1024
1025 MethodTable* GCToEEInterface::GetFreeObjectMethodTable()
1026 {
1027     assert(g_pFreeObjectMethodTable != nullptr);
1028     return g_pFreeObjectMethodTable;
1029 }
1030
1031 // This is arbitrary, we shouldn't ever be having config keys
1032 // longer than these lengths.
1033 const size_t MaxConfigKeyLength = 255;
1034
1035 bool GCToEEInterface::GetBooleanConfigValue(const char* key, bool* value)
1036 {
1037     CONTRACTL {
1038         NOTHROW;
1039         GC_NOTRIGGER;
1040     } CONTRACTL_END;
1041
1042     // these configuration values are given to us via startup flags.
1043     if (strcmp(key, "gcServer") == 0)
1044     {
1045         *value = g_heap_type == GC_HEAP_SVR;
1046         return true;
1047     }
1048
1049     if (strcmp(key, "gcConcurrent") == 0)
1050     {
1051         *value = !!g_pConfig->GetGCconcurrent();
1052         return true;
1053     }
1054
1055     if (strcmp(key, "GCRetainVM") == 0)
1056     {
1057         *value = !!g_pConfig->GetGCRetainVM();
1058         return true;
1059     }
1060
1061     WCHAR configKey[MaxConfigKeyLength];
1062     if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1063     {
1064         // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1065         return false;
1066     }
1067
1068     // otherwise, ask the config subsystem.
1069     if (CLRConfig::IsConfigOptionSpecified(configKey))
1070     {
1071         CLRConfig::ConfigDWORDInfo info { configKey , 0, CLRConfig::EEConfig_default };
1072         *value = CLRConfig::GetConfigValue(info) != 0;
1073         return true;
1074     }
1075
1076     return false;
1077 }
1078
1079 bool GCToEEInterface::GetIntConfigValue(const char* key, int64_t* value)
1080 {
1081     CONTRACTL {
1082       NOTHROW;
1083       GC_NOTRIGGER;
1084     } CONTRACTL_END;
1085
1086     if (strcmp(key, "GCSegmentSize") == 0)
1087     {
1088         *value = g_pConfig->GetSegmentSize();
1089         return true;
1090     }
1091
1092     if (strcmp(key, "GCgen0size") == 0)
1093     {
1094         *value = g_pConfig->GetGCgen0size();
1095         return true;
1096     }
1097
1098     if (strcmp(key, "GCLOHThreshold") == 0)
1099     {
1100         *value = g_pConfig->GetGCLOHThreshold();
1101         return true;
1102     }
1103
1104     WCHAR configKey[MaxConfigKeyLength];
1105     if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1106     {
1107         // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1108         return false;
1109     }
1110
1111     // There is no ConfigULONGLONGInfo, and the GC uses 64 bit values for things like GCHeapAffinitizeMask, 
1112     // so have to fake it with getting the string and converting to uint64_t
1113     if (CLRConfig::IsConfigOptionSpecified(configKey))
1114     {
1115         CLRConfig::ConfigStringInfo info { configKey, CLRConfig::EEConfig_default };
1116         LPWSTR out = CLRConfig::GetConfigValue(info);
1117         if (!out)
1118         {
1119             // config not found
1120             CLRConfig::FreeConfigString(out);
1121             return false;
1122         }
1123
1124         wchar_t *end;
1125         uint64_t result;
1126         errno = 0;
1127         result = _wcstoui64(out, &end, 16);
1128         // errno is ERANGE if the number is out of range, and end is set to pvalue if
1129         // no valid conversion exists.
1130         if (errno == ERANGE || end == out)
1131         {
1132             CLRConfig::FreeConfigString(out);
1133             return false;
1134         }
1135
1136         *value = static_cast<int64_t>(result);
1137         CLRConfig::FreeConfigString(out);
1138         return true;
1139     }
1140
1141     return false;
1142 }
1143
1144 bool GCToEEInterface::GetStringConfigValue(const char* key, const char** value)
1145 {
1146     CONTRACTL {
1147       NOTHROW;
1148       GC_NOTRIGGER;
1149     } CONTRACTL_END;
1150
1151     WCHAR configKey[MaxConfigKeyLength];
1152     if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0)
1153     {
1154         // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.)
1155         return false;
1156     }
1157
1158     CLRConfig::ConfigStringInfo info { configKey, CLRConfig::EEConfig_default };
1159     LPWSTR out = CLRConfig::GetConfigValue(info);
1160     if (!out)
1161     {
1162         // config not found
1163         return false;
1164     }
1165
1166     int charCount = WideCharToMultiByte(CP_ACP, 0, out, -1 /* out is null-terminated */, NULL, 0, nullptr, nullptr);
1167     if (charCount == 0)
1168     {
1169         // this should only happen if the config subsystem gives us a string that's not valid
1170         // unicode.
1171         CLRConfig::FreeConfigString(out);
1172         return false;
1173     }
1174
1175     // not allocated on the stack since it escapes this function
1176     AStringHolder configResult = new (nothrow) char[charCount];
1177     if (!configResult)
1178     {
1179         CLRConfig::FreeConfigString(out);
1180         return false;
1181     }
1182
1183     if (WideCharToMultiByte(CP_ACP, 0, out, -1 /* out is null-terminated */,
1184           configResult.GetValue(), charCount, nullptr, nullptr) == 0)
1185     {
1186         // this should never happen, the previous call to WideCharToMultiByte that computed the charCount should 
1187         // have caught all issues.
1188         assert(false);
1189         CLRConfig::FreeConfigString(out);
1190         return false;
1191     }
1192
1193     *value = configResult.Extract();
1194     CLRConfig::FreeConfigString(out);
1195     return true;
1196 }
1197
1198 void GCToEEInterface::FreeStringConfigValue(const char* value)
1199 {
1200     delete [] value;
1201 }
1202
1203 bool GCToEEInterface::IsGCThread()
1204 {
1205     return !!::IsGCThread();
1206 }
1207
1208 bool GCToEEInterface::WasCurrentThreadCreatedByGC()
1209 {
1210     return !!::IsGCSpecialThread();
1211 }
1212
1213 struct SuspendableThreadStubArguments
1214 {
1215     void* Argument;
1216     void (*ThreadStart)(void*);
1217     Thread* Thread;
1218     bool HasStarted;
1219     CLREvent ThreadStartedEvent;
1220 };
1221
1222 struct ThreadStubArguments
1223 {
1224     void* Argument;
1225     void (*ThreadStart)(void*);
1226     HANDLE Thread;
1227     bool HasStarted;
1228     CLREvent ThreadStartedEvent;
1229 };
1230
1231 namespace
1232 {
1233     const size_t MaxThreadNameSize = 255;
1234
1235     bool CreateSuspendableThread(
1236         void (*threadStart)(void*),
1237         void* argument,
1238         const wchar_t* name)
1239     {
1240         LIMITED_METHOD_CONTRACT;
1241
1242         SuspendableThreadStubArguments args;
1243         args.Argument = argument;
1244         args.ThreadStart = threadStart;
1245         args.Thread = nullptr;
1246         args.HasStarted = false;
1247         if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE))
1248         {
1249             return false;
1250         }
1251
1252         EX_TRY
1253         {
1254             args.Thread = SetupUnstartedThread(FALSE);
1255         }
1256         EX_CATCH
1257         {
1258         }
1259         EX_END_CATCH(SwallowAllExceptions)
1260
1261         if (!args.Thread)
1262         {
1263             args.ThreadStartedEvent.CloseEvent();
1264             return false;
1265         }
1266
1267         auto threadStub = [](void* argument) -> DWORD
1268         {
1269             SuspendableThreadStubArguments* args = static_cast<SuspendableThreadStubArguments*>(argument);
1270             assert(args != nullptr);
1271
1272             ClrFlsSetThreadType(ThreadType_GC);
1273             args->Thread->SetGCSpecial(true);
1274             STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY);
1275             args->HasStarted = !!args->Thread->HasStarted(false);
1276
1277             Thread* thread = args->Thread;
1278             auto threadStart = args->ThreadStart;
1279             void* threadArgument = args->Argument;
1280             bool hasStarted = args->HasStarted;
1281             args->ThreadStartedEvent.Set();
1282
1283             // The stubArgs cannot be used once the event is set, since that releases wait on the
1284             // event in the function that created this thread and the stubArgs go out of scope.
1285             if (hasStarted)
1286             {
1287                 threadStart(threadArgument);
1288                 DestroyThread(thread);
1289             }
1290
1291             return 0;
1292         };
1293         if (!args.Thread->CreateNewThread(0, threadStub, &args, name))
1294         {
1295             args.Thread->DecExternalCount(FALSE);
1296             args.ThreadStartedEvent.CloseEvent();
1297             return false;
1298         }
1299
1300         args.Thread->SetBackground(TRUE, FALSE);
1301         args.Thread->StartThread();
1302
1303         // Wait for the thread to be in its main loop
1304         uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE);
1305         args.ThreadStartedEvent.CloseEvent();
1306         _ASSERTE(res == WAIT_OBJECT_0);
1307
1308         if (!args.HasStarted)
1309         {
1310             // The thread has failed to start and the Thread object was destroyed in the Thread::HasStarted
1311             // failure code path.
1312             return false;
1313         }
1314
1315         return true;
1316     }
1317
1318     bool CreateNonSuspendableThread(
1319         void (*threadStart)(void*),
1320         void* argument,
1321         const wchar_t* name)
1322     {
1323         LIMITED_METHOD_CONTRACT;
1324
1325         ThreadStubArguments args;
1326         args.Argument = argument;
1327         args.ThreadStart = threadStart;
1328         args.Thread = INVALID_HANDLE_VALUE;
1329         if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE))
1330         {
1331             return false;
1332         }
1333
1334         auto threadStub = [](void* argument) -> DWORD
1335         {
1336             ThreadStubArguments* args = static_cast<ThreadStubArguments*>(argument);
1337             assert(args != nullptr);
1338
1339             ClrFlsSetThreadType(ThreadType_GC);
1340             STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY);
1341
1342             args->HasStarted = true;
1343             auto threadStart = args->ThreadStart;
1344             void* threadArgument = args->Argument;
1345             args->ThreadStartedEvent.Set();
1346
1347             // The stub args cannot be used once the event is set, since that releases wait on the
1348             // event in the function that created this thread and the stubArgs go out of scope.
1349             threadStart(threadArgument);
1350             return 0;
1351         };
1352
1353         args.Thread = Thread::CreateUtilityThread(Thread::StackSize_Medium, threadStub, &args, name);
1354         if (args.Thread == INVALID_HANDLE_VALUE)
1355         {
1356             args.ThreadStartedEvent.CloseEvent();
1357             return false;
1358         }
1359
1360         // Wait for the thread to be in its main loop
1361         uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE);
1362         args.ThreadStartedEvent.CloseEvent();
1363         _ASSERTE(res == WAIT_OBJECT_0);
1364
1365         CloseHandle(args.Thread);
1366         return true;
1367     }
1368 } // anonymous namespace
1369
1370 bool GCToEEInterface::CreateThread(void (*threadStart)(void*), void* arg, bool is_suspendable, const char* name)
1371 {
1372     InlineSString<MaxThreadNameSize> wideName;
1373     const WCHAR* namePtr = nullptr;
1374     EX_TRY
1375     {
1376         if (name != nullptr)
1377         {
1378             wideName.SetUTF8(name);
1379             namePtr = wideName.GetUnicode();
1380         }
1381     }
1382         EX_CATCH
1383     {
1384         // we're not obligated to provide a name - if it's not valid,
1385         // just report nullptr as the name.
1386     }
1387     EX_END_CATCH(SwallowAllExceptions)
1388
1389     LIMITED_METHOD_CONTRACT;
1390     if (is_suspendable)
1391     {
1392         return CreateSuspendableThread(threadStart, arg, namePtr);
1393     }
1394     else
1395     {
1396         return CreateNonSuspendableThread(threadStart, arg, namePtr);
1397     }
1398 }
1399
1400 void GCToEEInterface::WalkAsyncPinnedForPromotion(Object* object, ScanContext* sc, promote_func* callback)
1401 {
1402     LIMITED_METHOD_CONTRACT;
1403
1404     assert(object != nullptr);
1405     assert(sc != nullptr);
1406     assert(callback != nullptr);
1407     if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass)
1408     {
1409         // not an overlapped data object - nothing to do.
1410         return;
1411     }
1412
1413     // reporting the pinned user objects
1414     OverlappedDataObject *pOverlapped = (OverlappedDataObject *)object;
1415     if (pOverlapped->m_userObject != NULL)
1416     {
1417         if (pOverlapped->m_userObject->GetGCSafeMethodTable() == g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT]->GetMethodTable())
1418         {
1419             // OverlappedDataObject is very special.  An async pin handle keeps it alive.
1420             // During GC, we also make sure
1421             // 1. m_userObject itself does not move if m_userObject is not array
1422             // 2. Every object pointed by m_userObject does not move if m_userObject is array
1423             // We do not want to pin m_userObject if it is array.
1424             ArrayBase* pUserObject = (ArrayBase*)OBJECTREFToObject(pOverlapped->m_userObject);
1425             Object **ppObj = (Object**)pUserObject->GetDataPtr(TRUE);
1426             size_t num = pUserObject->GetNumComponents();
1427             for (size_t i = 0; i < num; i++)
1428             {
1429                 callback(ppObj + i, sc, GC_CALL_PINNED);
1430             }
1431         }
1432         else
1433         {
1434             callback(&OBJECTREF_TO_UNCHECKED_OBJECTREF(pOverlapped->m_userObject), (ScanContext *)sc, GC_CALL_PINNED);
1435         }
1436     }
1437 }
1438
1439 void GCToEEInterface::WalkAsyncPinned(Object* object, void* context, void (*callback)(Object*, Object*, void*))
1440 {
1441     LIMITED_METHOD_CONTRACT;
1442
1443     assert(object != nullptr);
1444     assert(callback != nullptr);
1445
1446     if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass)
1447     {
1448         return;
1449     }
1450
1451     OverlappedDataObject *pOverlapped = (OverlappedDataObject *)(object);
1452     if (pOverlapped->m_userObject != NULL)
1453     {
1454         Object * pUserObject = OBJECTREFToObject(pOverlapped->m_userObject);
1455         callback(object, pUserObject, context);
1456         if (pOverlapped->m_userObject->GetGCSafeMethodTable() == g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT]->GetMethodTable())
1457         {
1458             ArrayBase* pUserArrayObject = (ArrayBase*)pUserObject;
1459             Object **pObj = (Object**)pUserArrayObject->GetDataPtr(TRUE);
1460             size_t num = pUserArrayObject->GetNumComponents();
1461             for (size_t i = 0; i < num; i ++)
1462             {
1463                 callback(pUserObject, pObj[i], context);
1464             }
1465         }
1466     }
1467 }
1468
1469 IGCToCLREventSink* GCToEEInterface::EventSink()
1470 {
1471     LIMITED_METHOD_CONTRACT;
1472
1473     return &g_gcToClrEventSink;
1474 }
1475
1476 uint32_t GCToEEInterface::GetDefaultDomainIndex()
1477 {
1478     LIMITED_METHOD_CONTRACT;
1479
1480     return DefaultADID;
1481 }
1482
1483 void *GCToEEInterface::GetAppDomainAtIndex(uint32_t appDomainIndex)
1484 {
1485     LIMITED_METHOD_CONTRACT;
1486
1487     return ::GetAppDomain();
1488 }
1489
1490 bool GCToEEInterface::AppDomainCanAccessHandleTable(uint32_t appDomainID)
1491 {
1492     LIMITED_METHOD_CONTRACT;
1493
1494     return appDomainID == DefaultADID;
1495 }
1496
1497 uint32_t GCToEEInterface::GetIndexOfAppDomainBeingUnloaded()
1498 {
1499     LIMITED_METHOD_CONTRACT;
1500
1501     return 0xFFFFFFFF;
1502 }
1503
1504 uint32_t GCToEEInterface::GetTotalNumSizedRefHandles()
1505 {
1506     LIMITED_METHOD_CONTRACT;
1507
1508     return SystemDomain::System()->GetTotalNumSizedRefHandles();
1509 }
1510
1511
1512 bool GCToEEInterface::AppDomainIsRudeUnload(void *appDomain)
1513 {
1514     LIMITED_METHOD_CONTRACT;
1515
1516     return false;
1517 }
1518
1519 bool GCToEEInterface::AnalyzeSurvivorsRequested(int condemnedGeneration)
1520 {
1521     LIMITED_METHOD_CONTRACT;
1522
1523     // Is the list active?
1524     GcNotifications gn(g_pGcNotificationTable);
1525     if (gn.IsActive())
1526     {
1527         GcEvtArgs gea = { GC_MARK_END, { (1<<condemnedGeneration) } };
1528         if (gn.GetNotification(gea) != 0)
1529         {
1530             return true;
1531         }
1532     }
1533
1534     return false;
1535 }
1536
1537 void GCToEEInterface::AnalyzeSurvivorsFinished(int condemnedGeneration)
1538 {
1539     LIMITED_METHOD_CONTRACT;
1540
1541     // Is the list active?
1542     GcNotifications gn(g_pGcNotificationTable);
1543     if (gn.IsActive())
1544     {
1545         GcEvtArgs gea = { GC_MARK_END, { (1<<condemnedGeneration) } };
1546         if (gn.GetNotification(gea) != 0)
1547         {
1548             DACNotify::DoGCNotification(gea);
1549         }
1550     }
1551 }
1552
1553 void GCToEEInterface::VerifySyncTableEntry()
1554 {
1555     LIMITED_METHOD_CONTRACT;
1556
1557 #ifdef VERIFY_HEAP
1558     SyncBlockCache::GetSyncBlockCache()->VerifySyncTableEntry();
1559 #endif // VERIFY_HEAP
1560 }
1561
1562 void GCToEEInterface::UpdateGCEventStatus(int currentPublicLevel, int currentPublicKeywords, int currentPrivateLevel, int currentPrivateKeywords)
1563 {
1564 #if defined(__linux__)
1565     LIMITED_METHOD_CONTRACT;
1566     // LTTng does not have a notion of enabling events via "keyword"/"level" but we have to 
1567     // somehow implement a similar behavior to it. 
1568
1569     // To do this, we manaully check for events that are enabled via different provider/keywords/level.
1570     // Ex 1. GCJoin_V2 is what we use to check whether the GC keyword is enabled in verbose level in the public provider
1571     // Ex 2. SetGCHandle is what we use to check whether the GCHandle keyword is enabled in informational level in the public provider
1572     // Refer to the comments in src/vm/gcenv.ee.h next to the EXTERN C definitions to see which events are enabled.
1573
1574     // WARNING: To change an event's GC level, perfcollect script needs to be updated simultaneously to reflect it.
1575     BOOL keyword_gc_verbose = EventXplatEnabledGCJoin_V2();
1576     BOOL keyword_gc_informational = EventXplatEnabledGCStart();
1577
1578     BOOL keyword_gc_heapsurvival_and_movement_informational = EventXplatEnabledGCGenerationRange();
1579     BOOL keyword_gchandle_informational = EventXplatEnabledSetGCHandle();
1580     BOOL keyword_gchandle_prv_informational = EventXplatEnabledPrvSetGCHandle();
1581
1582     BOOL prv_gcprv_informational = EventXplatEnabledBGCBegin();
1583     BOOL prv_gcprv_verbose = EventXplatEnabledPinPlugAtGCTime();
1584
1585     int publicProviderLevel = keyword_gc_verbose ? GCEventLevel_Verbose : (keyword_gc_informational ? GCEventLevel_Information : GCEventLevel_None);
1586     int publicProviderKeywords = (keyword_gc_informational ? GCEventKeyword_GC : GCEventKeyword_None) | 
1587                                  (keyword_gchandle_informational ? GCEventKeyword_GCHandle : GCEventKeyword_None) |
1588                                  (keyword_gc_heapsurvival_and_movement_informational ? GCEventKeyword_GCHeapSurvivalAndMovement : GCEventKeyword_None);
1589
1590     int privateProviderLevel = prv_gcprv_verbose ? GCEventLevel_Verbose : (prv_gcprv_informational ? GCEventLevel_Information : GCEventLevel_None);
1591     int privateProviderKeywords = (prv_gcprv_informational ? GCEventKeyword_GCPrivate : GCEventKeyword_None) | 
1592         (keyword_gchandle_prv_informational ? GCEventKeyword_GCHandlePrivate : GCEventKeyword_None);
1593
1594     if (publicProviderLevel != currentPublicLevel || publicProviderKeywords != currentPublicKeywords)
1595     {
1596         GCEventLevel publicLevel = static_cast<GCEventLevel>(publicProviderLevel);
1597         GCEventKeyword publicKeywords = static_cast<GCEventKeyword>(publicProviderKeywords);
1598         GCHeapUtilities::RecordEventStateChange(true, publicKeywords, publicLevel);
1599     }
1600     if (privateProviderLevel != currentPrivateLevel || privateProviderKeywords != currentPrivateKeywords)
1601     {
1602         GCEventLevel privateLevel = static_cast<GCEventLevel>(privateProviderLevel);
1603         GCEventKeyword privateKeywords = static_cast<GCEventKeyword>(privateProviderKeywords);
1604         GCHeapUtilities::RecordEventStateChange(false, privateKeywords, privateLevel);
1605     }
1606 #endif // __linux__
1607 }