1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
6 * Wraps handle table to implement various handle types (Strong, Weak, etc.)
19 #include "objecthandle.h"
20 #include "handletablepriv.h"
22 #include "gchandletableimpl.h"
24 HandleTableMap g_HandleTableMap;
26 // Array of contexts used while scanning dependent handles for promotion. There are as many contexts as GC
27 // heaps and they're allocated by Ref_Initialize and initialized during each GC by GcDhInitialScan.
28 DhContext *g_pDependentHandleContexts;
30 #ifndef DACCESS_COMPILE
32 //----------------------------------------------------------------------------
37 * used when tracing variable-strength handles.
41 uintptr_t lEnableMask; // mask of types to trace
42 HANDLESCANPROC pfnTrace; // tracing function to use
43 uintptr_t lp2; // second parameter
47 //----------------------------------------------------------------------------
50 * Scan callback for tracing variable-strength handles.
52 * This callback is called to trace individual objects referred to by handles
53 * in the variable-strength table.
55 void CALLBACK VariableTraceDispatcher(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
59 // lp2 is a pointer to our VARSCANINFO
60 struct VARSCANINFO *pInfo = (struct VARSCANINFO *)lp2;
62 // is the handle's dynamic type one we're currently scanning?
63 if ((*pExtraInfo & pInfo->lEnableMask) != 0)
65 // yes - call the tracing function for this handle
66 pInfo->pfnTrace(pObjRef, NULL, lp1, pInfo->lp2);
70 #if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK)
72 * Scan callback for tracing ref-counted handles.
74 * This callback is called to trace individual objects referred to by handles
75 * in the refcounted table.
77 void CALLBACK PromoteRefCounted(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
80 UNREFERENCED_PARAMETER(pExtraInfo);
82 // there are too many races when asychnronously scanning ref-counted handles so we no longer support it
83 _ASSERTE(!((ScanContext*)lp1)->concurrent);
85 LOG((LF_GC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("", pObjRef, "causes promotion of ", *pObjRef)));
87 Object *pObj = VolatileLoad((PTR_Object*)pObjRef);
90 Object *pOldObj = pObj;
93 if (!HndIsNullOrDestroyedHandle(pObj) && !g_theGCHeap->IsPromoted(pObj))
95 if (GCToEEInterface::RefCountedHandleCallbacks(pObj))
98 promote_func* callback = (promote_func*) lp2;
99 callback(&pObj, (ScanContext *)lp1, 0);
103 // Assert this object wasn't relocated since we are passing a temporary object's address.
104 _ASSERTE(pOldObj == pObj);
106 #endif // FEATURE_COMINTEROP || FEATURE_REDHAWK
109 // Only used by profiling/ETW.
110 //----------------------------------------------------------------------------
113 * struct DIAG_DEPSCANINFO
115 * used when tracing dependent handles for profiling/ETW.
117 struct DIAG_DEPSCANINFO
119 HANDLESCANPROC pfnTrace; // tracing function to use
120 uintptr_t pfnProfilingOrETW;
123 void CALLBACK TraceDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
127 if (pObjRef == NULL || pExtraInfo == NULL)
130 // At this point, it's possible that either or both of the primary and secondary
131 // objects are NULL. However, if the secondary object is non-NULL, then the primary
132 // object should also be non-NULL.
133 _ASSERTE(*pExtraInfo == 0 || *pObjRef != NULL);
135 struct DIAG_DEPSCANINFO *pInfo = (struct DIAG_DEPSCANINFO*)lp2;
137 HANDLESCANPROC pfnTrace = pInfo->pfnTrace;
139 // is the handle's secondary object non-NULL?
140 if ((*pObjRef != NULL) && (*pExtraInfo != 0))
142 // yes - call the tracing function for this handle
143 pfnTrace(pObjRef, NULL, lp1, (uintptr_t)(pInfo->pfnProfilingOrETW));
147 void CALLBACK UpdateDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
149 LIMITED_METHOD_CONTRACT;
150 _ASSERTE(pExtraInfo);
152 Object **pPrimaryRef = (Object **)pObjRef;
153 Object **pSecondaryRef = (Object **)pExtraInfo;
155 LOG((LF_GC|LF_ENC, LL_INFO10000, LOG_HANDLE_OBJECT("Querying for new location of ",
156 pPrimaryRef, "to ", *pPrimaryRef)));
157 LOG((LF_GC|LF_ENC, LL_INFO10000, LOG_HANDLE_OBJECT(" and ",
158 pSecondaryRef, "to ", *pSecondaryRef)));
161 Object *pOldPrimary = *pPrimaryRef;
162 Object *pOldSecondary = *pSecondaryRef;
166 promote_func* callback = (promote_func*) lp2;
167 callback(pPrimaryRef, (ScanContext *)lp1, 0);
168 callback(pSecondaryRef, (ScanContext *)lp1, 0);
171 if (pOldPrimary != *pPrimaryRef)
172 LOG((LF_GC|LF_ENC, LL_INFO10000, "Updating " FMT_HANDLE "from" FMT_ADDR "to " FMT_OBJECT "\n",
173 DBG_ADDR(pPrimaryRef), DBG_ADDR(pOldPrimary), DBG_ADDR(*pPrimaryRef)));
175 LOG((LF_GC|LF_ENC, LL_INFO10000, "Updating " FMT_HANDLE "- " FMT_OBJECT "did not move\n",
176 DBG_ADDR(pPrimaryRef), DBG_ADDR(*pPrimaryRef)));
177 if (pOldSecondary != *pSecondaryRef)
178 LOG((LF_GC|LF_ENC, LL_INFO10000, "Updating " FMT_HANDLE "from" FMT_ADDR "to " FMT_OBJECT "\n",
179 DBG_ADDR(pSecondaryRef), DBG_ADDR(pOldSecondary), DBG_ADDR(*pSecondaryRef)));
181 LOG((LF_GC|LF_ENC, LL_INFO10000, "Updating " FMT_HANDLE "- " FMT_OBJECT "did not move\n",
182 DBG_ADDR(pSecondaryRef), DBG_ADDR(*pSecondaryRef)));
186 void CALLBACK PromoteDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
188 LIMITED_METHOD_CONTRACT;
189 _ASSERTE(pExtraInfo);
191 Object **pPrimaryRef = (Object **)pObjRef;
192 Object **pSecondaryRef = (Object **)pExtraInfo;
193 LOG((LF_GC|LF_ENC, LL_INFO1000, "Checking promotion of DependentHandle"));
194 LOG((LF_GC|LF_ENC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("\tPrimary:\t", pObjRef, "to ", *pObjRef)));
195 LOG((LF_GC|LF_ENC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("\tSecondary\t", pSecondaryRef, "to ", *pSecondaryRef)));
197 ScanContext *sc = (ScanContext*)lp1;
198 DhContext *pDhContext = Ref_GetDependentHandleContext(sc);
200 if (*pObjRef && g_theGCHeap->IsPromoted(*pPrimaryRef))
202 if (!g_theGCHeap->IsPromoted(*pSecondaryRef))
204 LOG((LF_GC|LF_ENC, LL_INFO10000, "\tPromoting secondary " LOG_OBJECT_CLASS(*pSecondaryRef)));
206 promote_func* callback = (promote_func*) lp2;
207 callback(pSecondaryRef, (ScanContext *)lp1, 0);
208 // need to rescan because we might have promoted an object that itself has added fields and this
209 // promotion might be all that is pinning that object. If we've already scanned that dependent
210 // handle relationship, we could lose it secondary object.
211 pDhContext->m_fPromoted = true;
216 // If we see a non-cleared primary which hasn't been promoted, record the fact. We will only require a
217 // rescan if this flag has been set (if it's clear then the previous scan found only clear and
218 // promoted handles, so there's no chance of finding an additional handle being promoted on a
220 pDhContext->m_fUnpromotedPrimaries = true;
224 void CALLBACK ClearDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t /*lp1*/, uintptr_t /*lp2*/)
226 LIMITED_METHOD_CONTRACT;
227 _ASSERTE(pExtraInfo);
229 Object **pPrimaryRef = (Object **)pObjRef;
230 Object **pSecondaryRef = (Object **)pExtraInfo;
231 LOG((LF_GC|LF_ENC, LL_INFO1000, "Checking referent of DependentHandle"));
232 LOG((LF_GC|LF_ENC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("\tPrimary:\t", pPrimaryRef, "to ", *pPrimaryRef)));
233 LOG((LF_GC|LF_ENC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("\tSecondary\t", pSecondaryRef, "to ", *pSecondaryRef)));
235 if (!g_theGCHeap->IsPromoted(*pPrimaryRef))
237 LOG((LF_GC|LF_ENC, LL_INFO1000, "\tunreachable ", LOG_OBJECT_CLASS(*pPrimaryRef)));
238 LOG((LF_GC|LF_ENC, LL_INFO1000, "\tunreachable ", LOG_OBJECT_CLASS(*pSecondaryRef)));
240 *pSecondaryRef = NULL;
244 _ASSERTE(g_theGCHeap->IsPromoted(*pSecondaryRef));
245 LOG((LF_GC|LF_ENC, LL_INFO10000, "\tPrimary is reachable " LOG_OBJECT_CLASS(*pPrimaryRef)));
246 LOG((LF_GC|LF_ENC, LL_INFO10000, "\tSecondary is reachable " LOG_OBJECT_CLASS(*pSecondaryRef)));
251 * Scan callback for pinning handles.
253 * This callback is called to pin individual objects referred to by handles in
256 void CALLBACK PinObject(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
258 STATIC_CONTRACT_NOTHROW;
259 STATIC_CONTRACT_GC_NOTRIGGER;
260 STATIC_CONTRACT_SO_TOLERANT;
261 STATIC_CONTRACT_MODE_COOPERATIVE;
262 UNREFERENCED_PARAMETER(pExtraInfo);
264 // PINNING IS BAD - DON'T DO IT IF YOU CAN AVOID IT
265 LOG((LF_GC, LL_WARNING, LOG_HANDLE_OBJECT_CLASS("WARNING: ", pObjRef, "causes pinning of ", *pObjRef)));
267 Object **pRef = (Object **)pObjRef;
269 promote_func* callback = (promote_func*) lp2;
270 callback(pRef, (ScanContext *)lp1, GC_CALL_PINNED);
273 void CALLBACK AsyncPinObject(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
275 UNREFERENCED_PARAMETER(pExtraInfo);
277 LOG((LF_GC, LL_WARNING, LOG_HANDLE_OBJECT_CLASS("WARNING: ", pObjRef, "causes (async) pinning of ", *pObjRef)));
279 Object **pRef = (Object **)pObjRef;
281 promote_func* callback = (promote_func*)lp2;
282 callback(pRef, (ScanContext *)lp1, 0);
283 Object* pPinnedObj = *pRef;
284 if (!HndIsNullOrDestroyedHandle(pPinnedObj))
286 GCToEEInterface::WalkAsyncPinnedForPromotion(pPinnedObj, (ScanContext *)lp1, callback);
292 * Scan callback for tracing strong handles.
294 * This callback is called to trace individual objects referred to by handles
295 * in the strong table.
297 void CALLBACK PromoteObject(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
300 UNREFERENCED_PARAMETER(pExtraInfo);
302 LOG((LF_GC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("", pObjRef, "causes promotion of ", *pObjRef)));
304 Object **ppRef = (Object **)pObjRef;
306 promote_func* callback = (promote_func*) lp2;
307 callback(ppRef, (ScanContext *)lp1, 0);
312 * Scan callback for disconnecting dead handles.
314 * This callback is called to check promotion of individual objects referred to by
315 * handles in the weak tables.
317 void CALLBACK CheckPromoted(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
320 UNREFERENCED_PARAMETER(pExtraInfo);
321 UNREFERENCED_PARAMETER(lp1);
322 UNREFERENCED_PARAMETER(lp2);
324 LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Checking referent of Weak-", pObjRef, "to ", *pObjRef)));
326 Object **ppRef = (Object **)pObjRef;
327 if (!g_theGCHeap->IsPromoted(*ppRef))
329 LOG((LF_GC, LL_INFO100, LOG_HANDLE_OBJECT_CLASS("Severing Weak-", pObjRef, "to unreachable ", *pObjRef)));
335 LOG((LF_GC, LL_INFO1000000, "reachable " LOG_OBJECT_CLASS(*pObjRef)));
339 void CALLBACK CalculateSizedRefSize(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
341 LIMITED_METHOD_CONTRACT;
343 _ASSERTE(pExtraInfo);
345 Object **ppSizedRef = (Object **)pObjRef;
346 size_t* pSize = (size_t *)pExtraInfo;
347 LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Getting size of referent of SizedRef-", pObjRef, "to ", *pObjRef)));
349 ScanContext* sc = (ScanContext *)lp1;
350 promote_func* callback = (promote_func*) lp2;
352 size_t sizeBegin = g_theGCHeap->GetPromotedBytes(sc->thread_number);
353 callback(ppSizedRef, (ScanContext *)lp1, 0);
354 size_t sizeEnd = g_theGCHeap->GetPromotedBytes(sc->thread_number);
355 *pSize = sizeEnd - sizeBegin;
359 * Scan callback for updating pointers.
361 * This callback is called to update pointers for individual objects referred to by
362 * handles in the weak and strong tables.
364 void CALLBACK UpdatePointer(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
366 LIMITED_METHOD_CONTRACT;
367 UNREFERENCED_PARAMETER(pExtraInfo);
369 LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT("Querying for new location of ", pObjRef, "to ", *pObjRef)));
371 Object **ppRef = (Object **)pObjRef;
374 Object *pOldLocation = *ppRef;
378 promote_func* callback = (promote_func*) lp2;
379 callback(ppRef, (ScanContext *)lp1, 0);
382 if (pOldLocation != *pObjRef)
383 LOG((LF_GC, LL_INFO10000, "Updating " FMT_HANDLE "from" FMT_ADDR "to " FMT_OBJECT "\n",
384 DBG_ADDR(pObjRef), DBG_ADDR(pOldLocation), DBG_ADDR(*pObjRef)));
386 LOG((LF_GC, LL_INFO100000, "Updating " FMT_HANDLE "- " FMT_OBJECT "did not move\n",
387 DBG_ADDR(pObjRef), DBG_ADDR(*pObjRef)));
392 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
394 * Scan callback for updating pointers.
396 * This callback is called to update pointers for individual objects referred to by
397 * handles in the weak and strong tables.
399 void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
407 UNREFERENCED_PARAMETER(pExtraInfo);
408 handle_scan_fn fn = (handle_scan_fn)lp2;
410 LOG((LF_GC | LF_CORPROF, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Notifying profiler of ", pObjRef, "to ", *pObjRef)));
412 // Get the baseobject (which can subsequently be cast into an OBJECTREF == ObjectID
413 Object **pRef = (Object **)pObjRef;
415 // Get a hold of the heap ID that's tacked onto the end of the scancontext struct.
416 ScanContext *pSC = (ScanContext *)lp1;
418 uint32_t rootFlags = 0;
419 bool isDependent = false;
421 OBJECTHANDLE handle = (OBJECTHANDLE)(pRef);
422 switch (HandleFetchType(handle))
424 case HNDTYPE_DEPENDENT:
427 case HNDTYPE_WEAK_SHORT:
428 case HNDTYPE_WEAK_LONG:
429 #ifdef FEATURE_COMINTEROP
430 case HNDTYPE_WEAK_WINRT:
431 #endif // FEATURE_COMINTEROP
432 rootFlags |= kEtwGCRootFlagsWeakRef;
436 case HNDTYPE_SIZEDREF:
440 case HNDTYPE_ASYNCPINNED:
441 rootFlags |= kEtwGCRootFlagsPinning;
444 case HNDTYPE_VARIABLE:
445 #ifdef FEATURE_REDHAWK
447 // Set the appropriate ETW flags for the current strength of this variable handle
448 uint32_t nVarHandleType = GetVariableHandleType(handle);
449 if (((nVarHandleType & VHT_WEAK_SHORT) != 0) ||
450 ((nVarHandleType & VHT_WEAK_LONG) != 0))
452 rootFlags |= kEtwGCRootFlagsWeakRef;
454 if ((nVarHandleType & VHT_PINNED) != 0)
456 rootFlags |= kEtwGCRootFlagsPinning;
459 // No special ETW flag for strong handles (VHT_STRONG)
462 _ASSERTE(!"Variable handle encountered");
466 #if defined(FEATURE_COMINTEROP) && !defined(FEATURE_REDHAWK)
467 case HNDTYPE_REFCOUNTED:
468 rootFlags |= kEtwGCRootFlagsRefCounted;
471 if (!GCToEEInterface::RefCountedHandleCallbacks(*pRef))
472 rootFlags |= kEtwGCRootFlagsWeakRef;
475 #endif // FEATURE_COMINTEROP || FEATURE_REDHAWK
478 _UNCHECKED_OBJECTREF pSec = NULL;
482 pSec = (_UNCHECKED_OBJECTREF)HndGetHandleExtraInfo(handle);
485 fn(pRef, pSec, rootFlags, pSC, isDependent);
487 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
490 * Scan callback for updating pointers.
492 * This callback is called to update pointers for individual objects referred to by
493 * handles in the pinned table.
495 void CALLBACK UpdatePointerPinned(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
497 LIMITED_METHOD_CONTRACT;
498 UNREFERENCED_PARAMETER(pExtraInfo);
500 Object **ppRef = (Object **)pObjRef;
503 promote_func* callback = (promote_func*) lp2;
504 callback(ppRef, (ScanContext *)lp1, GC_CALL_PINNED);
506 LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT("Updating ", pObjRef, "to pinned ", *pObjRef)));
510 //----------------------------------------------------------------------------
512 // flags describing the handle types
513 static const uint32_t s_rgTypeFlags[] =
515 HNDF_NORMAL, // HNDTYPE_WEAK_SHORT
516 HNDF_NORMAL, // HNDTYPE_WEAK_LONG
517 HNDF_NORMAL, // HNDTYPE_STRONG
518 HNDF_NORMAL, // HNDTYPE_PINNED
519 HNDF_EXTRAINFO, // HNDTYPE_VARIABLE
520 HNDF_NORMAL, // HNDTYPE_REFCOUNTED
521 HNDF_EXTRAINFO, // HNDTYPE_DEPENDENT
522 HNDF_NORMAL, // HNDTYPE_ASYNCPINNED
523 HNDF_EXTRAINFO, // HNDTYPE_SIZEDREF
524 HNDF_EXTRAINFO, // HNDTYPE_WEAK_WINRT
527 int getNumberOfSlots()
531 // when Ref_Initialize called, IGCHeap::GetNumberOfHeaps() is still 0, so use #procs as a workaround
532 // it is legal since even if later #heaps < #procs we create handles by thread home heap
533 // and just have extra unused slots in HandleTableBuckets, which does not take a lot of space
537 return GCToOSInterface::GetCurrentProcessCpuCount();
540 class HandleTableBucketHolder
543 HandleTableBucket* m_bucket;
545 BOOL m_SuppressRelease;
547 HandleTableBucketHolder(HandleTableBucket* bucket, int slots);
548 ~HandleTableBucketHolder();
550 void SuppressRelease()
552 m_SuppressRelease = TRUE;
556 HandleTableBucketHolder::HandleTableBucketHolder(HandleTableBucket* bucket, int slots)
557 :m_bucket(bucket), m_slots(slots), m_SuppressRelease(FALSE)
561 HandleTableBucketHolder::~HandleTableBucketHolder()
563 if (m_SuppressRelease)
567 if (m_bucket->pTable)
569 for (int n = 0; n < m_slots; n ++)
571 if (m_bucket->pTable[n])
573 HndDestroyHandleTable(m_bucket->pTable[n]);
576 delete [] m_bucket->pTable;
579 // we do not own m_bucket, so we shouldn't delete it here.
582 bool Ref_Initialize()
587 WRAPPER(GC_NOTRIGGER);
588 INJECT_FAULT(return false);
593 _ASSERTE(g_HandleTableMap.pBuckets == NULL);
595 // Create an array of INITIAL_HANDLE_TABLE_ARRAY_SIZE HandleTableBuckets to hold the handle table sets
596 HandleTableBucket** pBuckets = new (nothrow) HandleTableBucket * [ INITIAL_HANDLE_TABLE_ARRAY_SIZE ];
597 if (pBuckets == NULL)
600 ZeroMemory(pBuckets, INITIAL_HANDLE_TABLE_ARRAY_SIZE * sizeof (HandleTableBucket *));
602 g_gcGlobalHandleStore = new (nothrow) GCHandleStore();
603 if (g_gcGlobalHandleStore == NULL)
609 // Initialize the bucket in the global handle store
610 HandleTableBucket* pBucket = &g_gcGlobalHandleStore->_underlyingBucket;
612 pBucket->HandleTableIndex = 0;
614 int n_slots = getNumberOfSlots();
616 HandleTableBucketHolder bucketHolder(pBucket, n_slots);
618 // create the handle table set for the first bucket
619 pBucket->pTable = new (nothrow) HHANDLETABLE[n_slots];
620 if (pBucket->pTable == NULL)
623 ZeroMemory(pBucket->pTable,
624 n_slots * sizeof(HHANDLETABLE));
625 for (int uCPUindex = 0; uCPUindex < n_slots; uCPUindex++)
627 pBucket->pTable[uCPUindex] = HndCreateHandleTable(s_rgTypeFlags, _countof(s_rgTypeFlags), ADIndex(1));
628 if (pBucket->pTable[uCPUindex] == NULL)
631 HndSetHandleTableIndex(pBucket->pTable[uCPUindex], 0);
634 pBuckets[0] = pBucket;
635 bucketHolder.SuppressRelease();
637 g_HandleTableMap.pBuckets = pBuckets;
638 g_HandleTableMap.dwMaxIndex = INITIAL_HANDLE_TABLE_ARRAY_SIZE;
639 g_HandleTableMap.pNext = NULL;
641 // Allocate contexts used during dependent handle promotion scanning. There's one of these for every GC
642 // heap since they're scanned in parallel.
643 g_pDependentHandleContexts = new (nothrow) DhContext[n_slots];
644 if (g_pDependentHandleContexts == NULL)
650 if (pBuckets != NULL)
653 if (g_gcGlobalHandleStore != NULL)
654 delete g_gcGlobalHandleStore;
663 if (g_pDependentHandleContexts)
665 delete [] g_pDependentHandleContexts;
666 g_pDependentHandleContexts = NULL;
669 // are there any handle tables?
670 if (g_HandleTableMap.pBuckets)
672 // don't destroy any of the indexed handle tables; they should
673 // be destroyed externally.
675 // destroy the handle table bucket array
676 HandleTableMap *walk = &g_HandleTableMap;
678 delete [] walk->pBuckets;
682 // null out the handle table array
683 g_HandleTableMap.pNext = NULL;
684 g_HandleTableMap.dwMaxIndex = 0;
686 // null out the global table handle
687 g_HandleTableMap.pBuckets = NULL;
691 #ifndef FEATURE_REDHAWK
692 bool Ref_InitializeHandleTableBucket(HandleTableBucket* bucket, void* context)
697 WRAPPER(GC_TRIGGERS);
698 INJECT_FAULT(return false);
702 HandleTableBucket *result = bucket;
703 HandleTableMap *walk = &g_HandleTableMap;
705 HandleTableMap *last = NULL;
708 result->pTable = NULL;
710 // create handle table set for the bucket
711 int n_slots = getNumberOfSlots();
713 HandleTableBucketHolder bucketHolder(result, n_slots);
715 result->pTable = new (nothrow) HHANDLETABLE[n_slots];
721 ZeroMemory(result->pTable, n_slots * sizeof(HHANDLETABLE));
723 for (int uCPUindex=0; uCPUindex < n_slots; uCPUindex++) {
724 result->pTable[uCPUindex] = HndCreateHandleTable(s_rgTypeFlags, _countof(s_rgTypeFlags), ADIndex((DWORD)(uintptr_t)context));
725 if (!result->pTable[uCPUindex])
730 // Do we have free slot
732 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) {
733 if (walk->pBuckets[i] == 0) {
734 for (int uCPUindex=0; uCPUindex < n_slots; uCPUindex++)
735 HndSetHandleTableIndex(result->pTable[uCPUindex], i+offset);
737 result->HandleTableIndex = i+offset;
738 if (Interlocked::CompareExchangePointer(&walk->pBuckets[i], result, NULL) == 0) {
740 bucketHolder.SuppressRelease();
746 offset = walk->dwMaxIndex;
751 // Let's create a new node
752 HandleTableMap *newMap = new (nothrow) HandleTableMap;
758 newMap->pBuckets = new (nothrow) HandleTableBucket * [ INITIAL_HANDLE_TABLE_ARRAY_SIZE ];
759 if (!newMap->pBuckets)
765 newMap->dwMaxIndex = last->dwMaxIndex + INITIAL_HANDLE_TABLE_ARRAY_SIZE;
766 newMap->pNext = NULL;
767 ZeroMemory(newMap->pBuckets,
768 INITIAL_HANDLE_TABLE_ARRAY_SIZE * sizeof (HandleTableBucket *));
770 if (Interlocked::CompareExchangePointer(&last->pNext, newMap, NULL) != NULL)
772 // This thread loses.
773 delete [] newMap->pBuckets;
777 offset = last->dwMaxIndex;
780 #endif // !FEATURE_REDHAWK
782 void Ref_RemoveHandleTableBucket(HandleTableBucket *pBucket)
784 LIMITED_METHOD_CONTRACT;
786 size_t index = pBucket->HandleTableIndex;
787 HandleTableMap* walk = &g_HandleTableMap;
792 if ((index < walk->dwMaxIndex) && (index >= offset))
794 // During AppDomain unloading, we first remove a handle table and then destroy
795 // the table. As soon as the table is removed, the slot can be reused.
796 if (walk->pBuckets[index - offset] == pBucket)
798 walk->pBuckets[index - offset] = NULL;
802 offset = walk->dwMaxIndex;
806 // Didn't find it. This will happen typically from Ref_DestroyHandleTableBucket if
807 // we explicitly call Ref_RemoveHandleTableBucket first.
811 void Ref_DestroyHandleTableBucket(HandleTableBucket *pBucket)
815 Ref_RemoveHandleTableBucket(pBucket);
816 for (int uCPUindex=0; uCPUindex < getNumberOfSlots(); uCPUindex++)
818 HndDestroyHandleTable(pBucket->pTable[uCPUindex]);
820 delete [] pBucket->pTable;
823 int getSlotNumber(ScanContext* sc)
827 return (IsServerHeap() ? sc->thread_number : 0);
830 // <TODO> - reexpress as complete only like hndtable does now!!! -fmh</REVISIT_TODO>
831 void Ref_EndSynchronousGC(uint32_t condemned, uint32_t maxgen)
833 LIMITED_METHOD_CONTRACT;
834 UNREFERENCED_PARAMETER(condemned);
835 UNREFERENCED_PARAMETER(maxgen);
837 // NOT used, must be modified for MTHTS (scalable HandleTable scan) if planned to use:
838 // need to pass ScanContext info to split HT bucket by threads, or to be performed under t_join::join
840 // tell the table we finished a GC
841 HandleTableMap *walk = &g_HandleTableMap;
843 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) {
844 HHANDLETABLE hTable = walk->pTable[i];
846 HndNotifyGcCycleComplete(hTable, condemned, maxgen);
853 void SetDependentHandleSecondary(OBJECTHANDLE handle, OBJECTREF objref)
868 // handle should not be in unloaded domain
869 ValidateAppDomainForHandle(handle);
871 // Make sure the objref is valid before it is assigned to a handle
872 ValidateAssignObjrefForHandle(objref, HndGetHandleTableADIndex(HndGetHandleTable(handle)));
874 // unwrap the objectref we were given
875 _UNCHECKED_OBJECTREF value = OBJECTREF_TO_UNCHECKED_OBJECTREF(objref);
877 // if we are doing a non-NULL pointer store then invoke the write-barrier
879 HndWriteBarrier(handle, objref);
882 HndSetHandleExtraInfo(handle, HNDTYPE_DEPENDENT, (uintptr_t)value);
886 //----------------------------------------------------------------------------
889 * GetVariableHandleType.
891 * Retrieves the dynamic type of a variable-strength handle.
893 uint32_t GetVariableHandleType(OBJECTHANDLE handle)
897 return (uint32_t)HndGetHandleExtraInfo(handle);
901 * UpdateVariableHandleType.
903 * Changes the dynamic type of a variable-strength handle.
905 * N.B. This routine is not a macro since we do validation in RETAIL.
906 * We always validate the type here because it can come from external callers.
908 void UpdateVariableHandleType(OBJECTHANDLE handle, uint32_t type)
912 // verify that we are being asked to set a valid type
913 if (!IS_VALID_VHT_VALUE(type))
915 // bogus value passed in
920 // <REVISIT_TODO> (francish) CONCURRENT GC NOTE</REVISIT_TODO>
922 // If/when concurrent GC is implemented, we need to make sure variable handles
923 // DON'T change type during an asynchronous scan, OR that we properly recover
924 // from the change. Some changes are benign, but for example changing to or
925 // from a pinning handle in the middle of a scan would not be fun.
928 // store the type in the handle's extra info
929 HndSetHandleExtraInfo(handle, HNDTYPE_VARIABLE, (uintptr_t)type);
933 * CompareExchangeVariableHandleType.
935 * Changes the dynamic type of a variable-strength handle. Unlike UpdateVariableHandleType we assume that the
936 * types have already been validated.
938 uint32_t CompareExchangeVariableHandleType(OBJECTHANDLE handle, uint32_t oldType, uint32_t newType)
942 // verify that we are being asked to get/set valid types
943 _ASSERTE(IS_VALID_VHT_VALUE(oldType) && IS_VALID_VHT_VALUE(newType));
945 // attempt to store the type in the handle's extra info
946 return (uint32_t)HndCompareExchangeHandleExtraInfo(handle, HNDTYPE_VARIABLE, (uintptr_t)oldType, (uintptr_t)newType);
951 * TraceVariableHandles.
953 * Convenience function for tracing variable-strength handles.
954 * Wraps HndScanHandlesForGC.
956 void TraceVariableHandles(HANDLESCANPROC pfnTrace, uintptr_t lp1, uintptr_t lp2, uint32_t uEnableMask, uint32_t condemned, uint32_t maxgen, uint32_t flags)
960 // set up to scan variable handles with the specified mask and trace function
961 uint32_t type = HNDTYPE_VARIABLE;
962 struct VARSCANINFO info = { (uintptr_t)uEnableMask, pfnTrace, lp2 };
964 HandleTableMap *walk = &g_HandleTableMap;
966 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i++)
967 if (walk->pBuckets[i] != NULL)
969 HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber((ScanContext*) lp1)];
972 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
975 ScanContext* sc = (ScanContext *)lp1;
976 sc->pCurrentDomain = SystemDomain::GetAppDomainAtIndex(HndGetHandleTableADIndex(hTable));
978 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
979 HndScanHandlesForGC(hTable, VariableTraceDispatcher,
980 lp1, (uintptr_t)&info, &type, 1, condemned, maxgen, HNDGCF_EXTRAINFO | flags);
988 loop scan version of TraceVariableHandles for single-thread-managed Ref_* functions
989 should be kept in sync with the code above
991 void TraceVariableHandlesBySingleThread(HANDLESCANPROC pfnTrace, uintptr_t lp1, uintptr_t lp2, uint32_t uEnableMask, uint32_t condemned, uint32_t maxgen, uint32_t flags)
995 // set up to scan variable handles with the specified mask and trace function
996 uint32_t type = HNDTYPE_VARIABLE;
997 struct VARSCANINFO info = { (uintptr_t)uEnableMask, pfnTrace, lp2 };
999 HandleTableMap *walk = &g_HandleTableMap;
1001 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++)
1002 if (walk->pBuckets[i] != NULL)
1004 // this is the one of Ref_* function performed by single thread in MULTI_HEAPS case, so we need to loop through all HT of the bucket
1005 for (int uCPUindex=0; uCPUindex < getNumberOfSlots(); uCPUindex++)
1007 HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex];
1009 HndScanHandlesForGC(hTable, VariableTraceDispatcher,
1010 lp1, (uintptr_t)&info, &type, 1, condemned, maxgen, HNDGCF_EXTRAINFO | flags);
1017 //----------------------------------------------------------------------------
1019 void Ref_TracePinningRoots(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn)
1021 WRAPPER_NO_CONTRACT;
1023 LOG((LF_GC, LL_INFO10000, "Pinning referents of pinned handles in generation %u\n", condemned));
1025 // pin objects pointed to by pinning handles
1026 uint32_t types[2] = {HNDTYPE_PINNED, HNDTYPE_ASYNCPINNED};
1027 uint32_t flags = sc->concurrent ? HNDGCF_ASYNC : HNDGCF_NORMAL;
1029 HandleTableMap *walk = &g_HandleTableMap;
1031 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++)
1032 if (walk->pBuckets[i] != NULL)
1034 HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber((ScanContext*) sc)];
1037 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
1040 sc->pCurrentDomain = SystemDomain::GetAppDomainAtIndex(HndGetHandleTableADIndex(hTable));
1042 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
1044 // Pinned handles and async pinned handles are scanned in separate passes, since async pinned
1045 // handles may require a callback into the EE in order to fully trace an async pinned
1046 // object's object graph.
1047 HndScanHandlesForGC(hTable, PinObject, uintptr_t(sc), uintptr_t(fn), &types[0], 1, condemned, maxgen, flags);
1048 HndScanHandlesForGC(hTable, AsyncPinObject, uintptr_t(sc), uintptr_t(fn), &types[1], 1, condemned, maxgen, flags);
1054 // pin objects pointed to by variable handles whose dynamic type is VHT_PINNED
1055 TraceVariableHandles(PinObject, uintptr_t(sc), uintptr_t(fn), VHT_PINNED, condemned, maxgen, flags);
1059 void Ref_TraceNormalRoots(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn)
1061 WRAPPER_NO_CONTRACT;
1063 LOG((LF_GC, LL_INFO10000, "Promoting referents of strong handles in generation %u\n", condemned));
1065 // promote objects pointed to by strong handles
1066 // during ephemeral GCs we also want to promote the ones pointed to by sizedref handles.
1067 uint32_t types[2] = {HNDTYPE_STRONG, HNDTYPE_SIZEDREF};
1068 uint32_t uTypeCount = (((condemned >= maxgen) && !g_theGCHeap->IsConcurrentGCInProgress()) ? 1 : _countof(types));
1069 uint32_t flags = (sc->concurrent) ? HNDGCF_ASYNC : HNDGCF_NORMAL;
1071 HandleTableMap *walk = &g_HandleTableMap;
1073 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++)
1074 if (walk->pBuckets[i] != NULL)
1076 HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber(sc)];
1079 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
1082 sc->pCurrentDomain = SystemDomain::GetAppDomainAtIndex(HndGetHandleTableADIndex(hTable));
1084 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
1086 HndScanHandlesForGC(hTable, PromoteObject, uintptr_t(sc), uintptr_t(fn), types, uTypeCount, condemned, maxgen, flags);
1092 // promote objects pointed to by variable handles whose dynamic type is VHT_STRONG
1093 TraceVariableHandles(PromoteObject, uintptr_t(sc), uintptr_t(fn), VHT_STRONG, condemned, maxgen, flags);
1095 #if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK)
1096 // don't scan ref-counted handles during concurrent phase as the clean-up of CCWs can race with AD unload and cause AV's
1097 if (!sc->concurrent)
1099 // promote ref-counted handles
1100 uint32_t type = HNDTYPE_REFCOUNTED;
1102 walk = &g_HandleTableMap;
1104 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++)
1105 if (walk->pBuckets[i] != NULL)
1107 HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber(sc)];
1109 HndScanHandlesForGC(hTable, PromoteRefCounted, uintptr_t(sc), uintptr_t(fn), &type, 1, condemned, maxgen, flags );
1114 #endif // FEATURE_COMINTEROP || FEATURE_REDHAWK
1118 void Ref_TraceRefCountHandles(HANDLESCANPROC callback, uintptr_t lParam1, uintptr_t lParam2)
1120 #ifdef FEATURE_COMINTEROP
1121 int max_slots = getNumberOfSlots();
1122 uint32_t handleType = HNDTYPE_REFCOUNTED;
1124 HandleTableMap *walk = &g_HandleTableMap;
1127 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i++)
1129 if (walk->pBuckets[i] != NULL)
1131 for (int j = 0; j < max_slots; j++)
1133 HHANDLETABLE hTable = walk->pBuckets[i]->pTable[j];
1135 HndEnumHandles(hTable, &handleType, 1, callback, lParam1, lParam2, false);
1142 UNREFERENCED_PARAMETER(callback);
1143 UNREFERENCED_PARAMETER(lParam1);
1144 UNREFERENCED_PARAMETER(lParam2);
1145 #endif // FEATURE_COMINTEROP
1151 void Ref_CheckReachable(uint32_t condemned, uint32_t maxgen, uintptr_t lp1)
1153 WRAPPER_NO_CONTRACT;
1155 LOG((LF_GC, LL_INFO10000, "Checking reachability of referents of long-weak handles in generation %u\n", condemned));
1157 // these are the handle types that need to be checked
1161 #if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK)
1163 #endif // FEATURE_COMINTEROP || FEATURE_REDHAWK
1166 // check objects pointed to by short weak handles
1167 uint32_t flags = (((ScanContext*) lp1)->concurrent) ? HNDGCF_ASYNC : HNDGCF_NORMAL;
1168 int uCPUindex = getSlotNumber((ScanContext*) lp1);
1170 HandleTableMap *walk = &g_HandleTableMap;
1172 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++)
1174 if (walk->pBuckets[i] != NULL)
1176 HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex];
1178 HndScanHandlesForGC(hTable, CheckPromoted, lp1, 0, types, _countof(types), condemned, maxgen, flags);
1184 // check objects pointed to by variable handles whose dynamic type is VHT_WEAK_LONG
1185 TraceVariableHandles(CheckPromoted, lp1, 0, VHT_WEAK_LONG, condemned, maxgen, flags);
1189 // Dependent handles manages the relationship between primary and secondary objects, where the lifetime of
1190 // the secondary object is dependent upon that of the primary. The handle itself holds the primary instance,
1191 // while the extra handle info holds the secondary object. The secondary object should always be promoted
1192 // when the primary is, and the handle should be cleared if the primary is not promoted. Can't use ordinary
1193 // strong handle to refer to the secondary as this could case a cycle in the graph if the secondary somehow
1194 // pointed back to the primary. Can't use weak handle because that would not keep the secondary object alive.
1196 // The result is that a dependenHandle has the EFFECT of
1197 // * long weak handles in both the primary and secondary objects
1198 // * a strong reference from the primary object to the secondary one
1200 // Dependent handles are currently used for
1202 // * managing fields added to EnC classes, where the handle itself holds the this pointer and the
1203 // secondary object represents the new field that was added.
1204 // * it is exposed to managed code (as System.Runtime.CompilerServices.DependentHandle) and is used in the
1205 // implementation of ConditionWeakTable.
1208 // Retrieve the dependent handle context associated with the current GC scan context.
1209 DhContext *Ref_GetDependentHandleContext(ScanContext* sc)
1211 WRAPPER_NO_CONTRACT;
1212 return &g_pDependentHandleContexts[getSlotNumber(sc)];
1215 // Scan the dependent handle table promoting any secondary object whose associated primary object is promoted.
1217 // Multiple scans may be required since (a) secondary promotions made during one scan could cause the primary
1218 // of another handle to be promoted and (b) the GC may not have marked all promoted objects at the time it
1219 // initially calls us.
1221 // Returns true if any promotions resulted from this scan.
1222 bool Ref_ScanDependentHandlesForPromotion(DhContext *pDhContext)
1224 LOG((LF_GC, LL_INFO10000, "Checking liveness of referents of dependent handles in generation %u\n", pDhContext->m_iCondemned));
1225 uint32_t type = HNDTYPE_DEPENDENT;
1226 uint32_t flags = (pDhContext->m_pScanContext->concurrent) ? HNDGCF_ASYNC : HNDGCF_NORMAL;
1227 flags |= HNDGCF_EXTRAINFO;
1229 // Keep a note of whether we promoted anything over the entire scan (not just the last iteration). We need
1230 // to return this data since under server GC promotions from this table may cause further promotions in
1231 // tables handled by other threads.
1232 bool fAnyPromotions = false;
1234 // Keep rescanning the table while both the following conditions are true:
1235 // 1) There's at least primary object left that could have been promoted.
1236 // 2) We performed at least one secondary promotion (which could have caused a primary promotion) on the
1238 // Note that even once we terminate the GC may call us again (because it has caused more objects to be
1239 // marked as promoted). But we scan in a loop here anyway because it is cheaper for us to loop than the GC
1240 // (especially on server GC where each external cycle has to be synchronized between GC worker threads).
1243 // Assume the conditions for re-scanning are both false initially. The scan callback below
1244 // (PromoteDependentHandle) will set the relevant flag on the first unpromoted primary it sees or
1245 // secondary promotion it performs.
1246 pDhContext->m_fUnpromotedPrimaries = false;
1247 pDhContext->m_fPromoted = false;
1249 HandleTableMap *walk = &g_HandleTableMap;
1252 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++)
1254 if (walk->pBuckets[i] != NULL)
1256 HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber(pDhContext->m_pScanContext)];
1259 HndScanHandlesForGC(hTable,
1260 PromoteDependentHandle,
1261 uintptr_t(pDhContext->m_pScanContext),
1262 uintptr_t(pDhContext->m_pfnPromoteFunction),
1264 pDhContext->m_iCondemned,
1265 pDhContext->m_iMaxGen,
1273 if (pDhContext->m_fPromoted)
1274 fAnyPromotions = true;
1276 } while (pDhContext->m_fUnpromotedPrimaries && pDhContext->m_fPromoted);
1278 return fAnyPromotions;
1281 // Perform a scan of dependent handles for the purpose of clearing any that haven't had their primary
1283 void Ref_ScanDependentHandlesForClearing(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn)
1285 LOG((LF_GC, LL_INFO10000, "Clearing dead dependent handles in generation %u\n", condemned));
1286 uint32_t type = HNDTYPE_DEPENDENT;
1287 uint32_t flags = (sc->concurrent) ? HNDGCF_ASYNC : HNDGCF_NORMAL;
1288 flags |= HNDGCF_EXTRAINFO;
1290 HandleTableMap *walk = &g_HandleTableMap;
1293 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++)
1295 if (walk->pBuckets[i] != NULL)
1297 HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber(sc)];
1300 HndScanHandlesForGC(hTable, ClearDependentHandle, uintptr_t(sc), uintptr_t(fn), &type, 1, condemned, maxgen, flags );
1308 // Perform a scan of dependent handles for the purpose of updating handles to track relocated objects.
1309 void Ref_ScanDependentHandlesForRelocation(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn)
1311 LOG((LF_GC, LL_INFO10000, "Relocating moved dependent handles in generation %u\n", condemned));
1312 uint32_t type = HNDTYPE_DEPENDENT;
1313 uint32_t flags = (sc->concurrent) ? HNDGCF_ASYNC : HNDGCF_NORMAL;
1314 flags |= HNDGCF_EXTRAINFO;
1316 HandleTableMap *walk = &g_HandleTableMap;
1319 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++)
1321 if (walk->pBuckets[i] != NULL)
1323 HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber(sc)];
1326 HndScanHandlesForGC(hTable, UpdateDependentHandle, uintptr_t(sc), uintptr_t(fn), &type, 1, condemned, maxgen, flags );
1335 loop scan version of TraceVariableHandles for single-thread-managed Ref_* functions
1336 should be kept in sync with the code above
1337 Only used by profiling/ETW.
1339 void TraceDependentHandlesBySingleThread(HANDLESCANPROC pfnTrace, uintptr_t lp1, uintptr_t lp2, uint32_t condemned, uint32_t maxgen, uint32_t flags)
1341 WRAPPER_NO_CONTRACT;
1343 // set up to scan variable handles with the specified mask and trace function
1344 uint32_t type = HNDTYPE_DEPENDENT;
1345 struct DIAG_DEPSCANINFO info = { pfnTrace, lp2 };
1347 HandleTableMap *walk = &g_HandleTableMap;
1349 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++)
1350 if (walk->pBuckets[i] != NULL)
1352 // this is the one of Ref_* function performed by single thread in MULTI_HEAPS case, so we need to loop through all HT of the bucket
1353 for (int uCPUindex=0; uCPUindex < getNumberOfSlots(); uCPUindex++)
1355 HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex];
1357 HndScanHandlesForGC(hTable, TraceDependentHandle,
1358 lp1, (uintptr_t)&info, &type, 1, condemned, maxgen, HNDGCF_EXTRAINFO | flags);
1365 // We scan handle tables by their buckets (ie, AD index). We could get into the situation where
1366 // the AD indices are not very compacted (for example if we have just unloaded ADs and their
1367 // indices haven't been reused yet) and we could be scanning them in an unbalanced fashion.
1368 // Consider using an array to represent the compacted form of all AD indices exist for the
1369 // sized ref handles.
1370 void ScanSizedRefByAD(uint32_t maxgen, HANDLESCANPROC scanProc, ScanContext* sc, Ref_promote_func* fn, uint32_t flags)
1372 HandleTableMap *walk = &g_HandleTableMap;
1373 uint32_t type = HNDTYPE_SIZEDREF;
1374 int uCPUindex = getSlotNumber(sc);
1375 int n_slots = g_theGCHeap->GetNumberOfHeaps();
1379 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++)
1381 if (walk->pBuckets[i] != NULL)
1383 ADIndex adIndex = HndGetHandleTableADIndex(walk->pBuckets[i]->pTable[0]);
1384 if ((adIndex.m_dwIndex % n_slots) == (uint32_t)uCPUindex)
1386 for (int index = 0; index < n_slots; index++)
1388 HHANDLETABLE hTable = walk->pBuckets[i]->pTable[index];
1391 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
1394 sc->pCurrentDomain = SystemDomain::GetAppDomainAtIndex(adIndex);
1396 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
1397 HndScanHandlesForGC(hTable, scanProc, uintptr_t(sc), uintptr_t(fn), &type, 1, maxgen, maxgen, flags);
1407 void ScanSizedRefByCPU(uint32_t maxgen, HANDLESCANPROC scanProc, ScanContext* sc, Ref_promote_func* fn, uint32_t flags)
1409 HandleTableMap *walk = &g_HandleTableMap;
1410 uint32_t type = HNDTYPE_SIZEDREF;
1411 int uCPUindex = getSlotNumber(sc);
1415 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++)
1417 if (walk->pBuckets[i] != NULL)
1419 HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex];
1422 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
1425 sc->pCurrentDomain = SystemDomain::GetAppDomainAtIndex(HndGetHandleTableADIndex(hTable));
1427 #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
1429 HndScanHandlesForGC(hTable, scanProc, uintptr_t(sc), uintptr_t(fn), &type, 1, maxgen, maxgen, flags);
1437 void Ref_ScanSizedRefHandles(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn)
1439 LOG((LF_GC, LL_INFO10000, "Scanning SizedRef handles to in generation %u\n", condemned));
1440 UNREFERENCED_PARAMETER(condemned);
1441 _ASSERTE (condemned == maxgen);
1442 uint32_t flags = (sc->concurrent ? HNDGCF_ASYNC : HNDGCF_NORMAL) | HNDGCF_EXTRAINFO;
1444 ScanSizedRefByCPU(maxgen, CalculateSizedRefSize, sc, fn, flags);
1447 void Ref_CheckAlive(uint32_t condemned, uint32_t maxgen, uintptr_t lp1)
1449 WRAPPER_NO_CONTRACT;
1451 LOG((LF_GC, LL_INFO10000, "Checking liveness of referents of short-weak handles in generation %u\n", condemned));
1453 // perform a multi-type scan that checks for unreachable objects
1457 #ifdef FEATURE_COMINTEROP
1458 , HNDTYPE_WEAK_WINRT
1459 #endif // FEATURE_COMINTEROP
1461 uint32_t flags = (((ScanContext*) lp1)->concurrent) ? HNDGCF_ASYNC : HNDGCF_NORMAL;
1463 int uCPUindex = getSlotNumber((ScanContext*) lp1);
1464 HandleTableMap *walk = &g_HandleTableMap;
1467 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++)
1469 if (walk->pBuckets[i] != NULL)
1471 HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex];
1473 HndScanHandlesForGC(hTable, CheckPromoted, lp1, 0, types, _countof(types), condemned, maxgen, flags);
1478 // check objects pointed to by variable handles whose dynamic type is VHT_WEAK_SHORT
1479 TraceVariableHandles(CheckPromoted, lp1, 0, VHT_WEAK_SHORT, condemned, maxgen, flags);
1482 static VOLATILE(int32_t) uCount = 0;
1484 // NOTE: Please: if you update this function, update the very similar profiling function immediately below!!!
1485 void Ref_UpdatePointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn)
1487 WRAPPER_NO_CONTRACT;
1489 // For now, treat the syncblock as if it were short weak handles. <REVISIT_TODO>Later, get
1490 // the benefits of fast allocation / free & generational awareness by supporting
1491 // the SyncTable as a new block type.
1492 // @TODO cwb: wait for compelling performance measurements.</REVISIT_TODO>
1497 bDo = (Interlocked::Increment(&uCount) == 1);
1498 Interlocked::CompareExchange (&uCount, 0, g_theGCHeap->GetNumberOfHeaps());
1499 _ASSERTE (uCount <= g_theGCHeap->GetNumberOfHeaps());
1503 GCToEEInterface::SyncBlockCacheWeakPtrScan(&UpdatePointer, uintptr_t(sc), uintptr_t(fn));
1505 LOG((LF_GC, LL_INFO10000, "Updating pointers to referents of non-pinning handles in generation %u\n", condemned));
1507 // these are the handle types that need their pointers updated
1513 #if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK)
1515 #endif // FEATURE_COMINTEROP || FEATURE_REDHAWK
1516 #ifdef FEATURE_COMINTEROP
1518 #endif // FEATURE_COMINTEROP
1522 // perform a multi-type scan that updates pointers
1523 uint32_t flags = (sc->concurrent) ? HNDGCF_ASYNC : HNDGCF_NORMAL;
1525 HandleTableMap *walk = &g_HandleTableMap;
1527 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++)
1528 if (walk->pBuckets[i] != NULL)
1530 HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber(sc)];
1532 HndScanHandlesForGC(hTable, UpdatePointer, uintptr_t(sc), uintptr_t(fn), types, _countof(types), condemned, maxgen, flags);
1537 // update pointers in variable handles whose dynamic type is VHT_WEAK_SHORT, VHT_WEAK_LONG or VHT_STRONG
1538 TraceVariableHandles(UpdatePointer, uintptr_t(sc), uintptr_t(fn), VHT_WEAK_SHORT | VHT_WEAK_LONG | VHT_STRONG, condemned, maxgen, flags);
1541 #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
1543 // Please update this if you change the Ref_UpdatePointers function above.
1544 void Ref_ScanHandlesForProfilerAndETW(uint32_t maxgen, uintptr_t lp1, handle_scan_fn fn)
1546 WRAPPER_NO_CONTRACT;
1548 LOG((LF_GC | LF_CORPROF, LL_INFO10000, "Scanning all handle roots for profiler.\n"));
1550 // Don't scan the sync block because they should not be reported. They are weak handles only
1552 // <REVISIT_TODO>We should change the following to not report weak either
1553 // these are the handle types that need their pointers updated</REVISIT_TODO>
1559 #if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK)
1561 #endif // FEATURE_COMINTEROP || FEATURE_REDHAWK
1562 #ifdef FEATURE_COMINTEROP
1564 #endif // FEATURE_COMINTEROP
1566 // HNDTYPE_VARIABLE,
1567 HNDTYPE_ASYNCPINNED,
1571 uint32_t flags = HNDGCF_NORMAL;
1573 // perform a multi-type scan that updates pointers
1574 HandleTableMap *walk = &g_HandleTableMap;
1576 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++)
1577 if (walk->pBuckets[i] != NULL)
1578 // this is the one of Ref_* function performed by single thread in MULTI_HEAPS case, so we need to loop through all HT of the bucket
1579 for (int uCPUindex=0; uCPUindex < getNumberOfSlots(); uCPUindex++)
1581 HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex];
1583 HndScanHandlesForGC(hTable, &ScanPointerForProfilerAndETW, lp1, (uintptr_t)fn, types, _countof(types), maxgen, maxgen, flags);
1588 // update pointers in variable handles whose dynamic type is VHT_WEAK_SHORT, VHT_WEAK_LONG or VHT_STRONG
1589 TraceVariableHandlesBySingleThread(&ScanPointerForProfilerAndETW, lp1, (uintptr_t)fn, VHT_WEAK_SHORT | VHT_WEAK_LONG | VHT_STRONG, maxgen, maxgen, flags);
1592 void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t maxgen, ScanContext * SC, handle_scan_fn fn)
1594 WRAPPER_NO_CONTRACT;
1596 LOG((LF_GC | LF_CORPROF, LL_INFO10000, "Scanning dependent handles for profiler.\n"));
1598 uint32_t flags = HNDGCF_NORMAL;
1600 uintptr_t lp1 = (uintptr_t)SC;
1601 TraceDependentHandlesBySingleThread(&ScanPointerForProfilerAndETW, lp1, (uintptr_t)fn, maxgen, maxgen, flags);
1604 #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
1606 // Callback to enumerate all object references held in handles.
1607 void CALLBACK ScanPointer(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2)
1609 WRAPPER_NO_CONTRACT;
1610 UNREFERENCED_PARAMETER(pExtraInfo);
1612 Object **pRef = (Object **)pObjRef;
1614 promote_func* callback = (promote_func*)lp2;
1615 callback(pRef, (ScanContext *)lp1, 0);
1618 // Enumerate all object references held by any of the handle tables in the system.
1619 void Ref_ScanPointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn)
1621 WRAPPER_NO_CONTRACT;
1628 #if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK)
1630 #endif // FEATURE_COMINTEROP || FEATURE_REDHAWK
1632 HNDTYPE_ASYNCPINNED,
1636 uint32_t flags = HNDGCF_NORMAL;
1638 // perform a multi-type scan that enumerates pointers
1639 for (HandleTableMap * walk = &g_HandleTableMap;
1643 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i++)
1645 if (walk->pBuckets[i] != NULL)
1647 // this is the one of Ref_* function performed by single thread in MULTI_HEAPS case, so we need to loop through all HT of the bucket
1648 for (int uCPUindex = 0; uCPUindex < getNumberOfSlots(); uCPUindex++)
1650 HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex];
1652 HndScanHandlesForGC(hTable, &ScanPointer, uintptr_t(sc), uintptr_t(fn), types, _countof(types), condemned, maxgen, flags);
1658 // enumerate pointers in variable handles whose dynamic type is VHT_WEAK_SHORT, VHT_WEAK_LONG or VHT_STRONG
1659 TraceVariableHandlesBySingleThread(&ScanPointer, uintptr_t(sc), uintptr_t(fn), VHT_WEAK_SHORT | VHT_WEAK_LONG | VHT_STRONG, condemned, maxgen, flags);
1662 void Ref_UpdatePinnedPointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn)
1664 WRAPPER_NO_CONTRACT;
1666 LOG((LF_GC, LL_INFO10000, "Updating pointers to referents of pinning handles in generation %u\n", condemned));
1668 // these are the handle types that need their pointers updated
1669 uint32_t types[2] = {HNDTYPE_PINNED, HNDTYPE_ASYNCPINNED};
1670 uint32_t flags = (sc->concurrent) ? HNDGCF_ASYNC : HNDGCF_NORMAL;
1672 HandleTableMap *walk = &g_HandleTableMap;
1674 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++)
1675 if (walk->pBuckets[i] != NULL)
1677 HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber(sc)];
1679 HndScanHandlesForGC(hTable, UpdatePointerPinned, uintptr_t(sc), uintptr_t(fn), types, _countof(types), condemned, maxgen, flags);
1684 // update pointers in variable handles whose dynamic type is VHT_PINNED
1685 TraceVariableHandles(UpdatePointerPinned, uintptr_t(sc), uintptr_t(fn), VHT_PINNED, condemned, maxgen, flags);
1689 void Ref_AgeHandles(uint32_t condemned, uint32_t maxgen, uintptr_t lp1)
1691 WRAPPER_NO_CONTRACT;
1693 LOG((LF_GC, LL_INFO10000, "Aging handles in generation %u\n", condemned));
1695 // these are the handle types that need their ages updated
1705 #if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK)
1707 #endif // FEATURE_COMINTEROP || FEATURE_REDHAWK
1708 #ifdef FEATURE_COMINTEROP
1710 #endif // FEATURE_COMINTEROP
1711 HNDTYPE_ASYNCPINNED,
1715 int uCPUindex = getSlotNumber((ScanContext*) lp1);
1716 // perform a multi-type scan that ages the handles
1717 HandleTableMap *walk = &g_HandleTableMap;
1719 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++)
1720 if (walk->pBuckets[i] != NULL)
1722 HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex];
1724 HndScanHandlesForGC(hTable, NULL, 0, 0, types, _countof(types), condemned, maxgen, HNDGCF_AGE);
1731 void Ref_RejuvenateHandles(uint32_t condemned, uint32_t maxgen, uintptr_t lp1)
1733 WRAPPER_NO_CONTRACT;
1735 LOG((LF_GC, LL_INFO10000, "Rejuvenating handles.\n"));
1737 // these are the handle types that need their ages updated
1748 #if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK)
1750 #endif // FEATURE_COMINTEROP || FEATURE_REDHAWK
1751 #ifdef FEATURE_COMINTEROP
1753 #endif // FEATURE_COMINTEROP
1754 HNDTYPE_ASYNCPINNED,
1758 int uCPUindex = getSlotNumber((ScanContext*) lp1);
1759 // reset the ages of these handles
1760 HandleTableMap *walk = &g_HandleTableMap;
1762 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++)
1763 if (walk->pBuckets[i] != NULL)
1765 HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex];
1767 HndResetAgeMap(hTable, types, _countof(types), condemned, maxgen, HNDGCF_NORMAL);
1773 void Ref_VerifyHandleTable(uint32_t condemned, uint32_t maxgen, ScanContext* sc)
1775 WRAPPER_NO_CONTRACT;
1777 LOG((LF_GC, LL_INFO10000, "Verifying handles.\n"));
1779 // these are the handle types that need to be verified
1790 #if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK)
1792 #endif // FEATURE_COMINTEROP || FEATURE_REDHAWK
1793 #ifdef FEATURE_COMINTEROP
1795 #endif // FEATURE_COMINTEROP
1796 HNDTYPE_ASYNCPINNED,
1801 // verify these handles
1802 HandleTableMap *walk = &g_HandleTableMap;
1805 for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++)
1807 if (walk->pBuckets[i] != NULL)
1809 HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber(sc)];
1811 HndVerifyTable(hTable, types, _countof(types), condemned, maxgen, HNDGCF_NORMAL);
1818 int GetCurrentThreadHomeHeapNumber()
1820 WRAPPER_NO_CONTRACT;
1822 assert(g_theGCHeap != nullptr);
1823 return g_theGCHeap->GetHomeHeapNumber();
1826 bool HandleTableBucket::Contains(OBJECTHANDLE handle)
1828 LIMITED_METHOD_CONTRACT;
1835 HHANDLETABLE hTable = HndGetHandleTable(handle);
1836 for (int uCPUindex=0; uCPUindex < g_theGCHeap->GetNumberOfHeaps(); uCPUindex++)
1838 if (hTable == this->pTable[uCPUindex])
1846 #endif // !DACCESS_COMPILE
1849 OBJECTREF GetDependentHandleSecondary(OBJECTHANDLE handle)
1851 WRAPPER_NO_CONTRACT;
1853 return UNCHECKED_OBJECTREF_TO_OBJECTREF((_UNCHECKED_OBJECTREF)HndGetHandleExtraInfo(handle));
1856 void PopulateHandleTableDacVars(GcDacVars* gcDacVars)
1858 static_assert(offsetof(HandleTableMap, pBuckets) == offsetof(dac_handle_table_map, pBuckets), "handle table map DAC layout mismatch");
1859 static_assert(offsetof(HandleTableMap, pNext) == offsetof(dac_handle_table_map, pNext), "handle table map DAC layout mismatch");
1860 static_assert(offsetof(HandleTableMap, dwMaxIndex) == offsetof(dac_handle_table_map, dwMaxIndex), "handle table map DAC layout mismatch");
1861 static_assert(offsetof(HandleTableBucket, pTable) == offsetof(dac_handle_table_bucket, pTable), "handle table bucket DAC layout mismatch");
1862 static_assert(offsetof(HandleTableBucket, HandleTableIndex) == offsetof(dac_handle_table_bucket, HandleTableIndex), "handle table bucket DAC layout mismatch");
1863 static_assert(offsetof(HandleTable, uADIndex) == offsetof(dac_handle_table, uADIndex), "handle table DAC layout mismatch");
1865 #ifndef DACCESS_COMPILE
1866 gcDacVars->handle_table_map = reinterpret_cast<dac_handle_table_map*>(&g_HandleTableMap);
1867 #endif // DACCESS_COMPILE