1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
9 // Definition of a SyncBlock and the SyncBlockCache which manages it
23 #include "interoputil.h"
25 #include "perfcounters.h"
26 #include "eventtrace.h"
27 #include "dllimportcallback.h"
28 #include "comcallablewrapper.h"
31 #include "comdelegate.h"
32 #include "finalizerthread.h"
35 #ifdef FEATURE_COMINTEROP
36 #include "runtimecallablewrapper.h"
37 #endif // FEATURE_COMINTEROP
39 // Allocate 1 page worth. Typically enough
40 #define MAXSYNCBLOCK (PAGE_SIZE-sizeof(void*))/sizeof(SyncBlock)
41 #define SYNC_TABLE_INITIAL_SIZE 250
48 SyncBlockArray *m_Next;
49 BYTE m_Blocks[MAXSYNCBLOCK * sizeof (SyncBlock)];
52 // For in-place constructor
53 BYTE g_SyncBlockCacheInstance[sizeof(SyncBlockCache)];
55 SPTR_IMPL (SyncBlockCache, SyncBlockCache, s_pSyncBlockCache);
57 #ifndef DACCESS_COMPILE
61 void SyncBlock::OnADUnload()
67 m_pEnCInfo->Cleanup();
75 SLIST_HEADER InteropSyncBlockInfo::s_InteropInfoStandbyList;
76 #endif // !FEATURE_PAL
78 InteropSyncBlockInfo::~InteropSyncBlockInfo()
89 FreeUMEntryThunkOrInterceptStub();
93 // Deletes all items in code:s_InteropInfoStandbyList.
94 void InteropSyncBlockInfo::FlushStandbyList()
104 PSLIST_ENTRY pEntry = InterlockedFlushSList(&InteropSyncBlockInfo::s_InteropInfoStandbyList);
107 PSLIST_ENTRY pNextEntry = pEntry->Next;
109 // make sure to use the global delete since the destructor has already run
110 ::delete (void *)pEntry;
114 #endif // !FEATURE_PAL
116 void InteropSyncBlockInfo::FreeUMEntryThunkOrInterceptStub()
129 void *pUMEntryThunk = GetUMEntryThunk();
130 if (pUMEntryThunk != NULL)
132 COMDelegate::RemoveEntryFromFPtrHash((UPTR)pUMEntryThunk);
133 UMEntryThunk::FreeUMEntryThunk((UMEntryThunk *)pUMEntryThunk);
137 #if defined(_TARGET_X86_)
138 Stub *pInterceptStub = GetInterceptStub();
140 if (pInterceptStub != NULL)
142 // There may be multiple chained stubs, i.e. host hook stub calling MDA stack
143 // imbalance stub, and the following DecRef will free all of them.
144 pInterceptStub->DecRef();
146 #else // _TARGET_X86_
147 // Intercept stubs are currently not used on other platforms.
148 _ASSERTE(GetInterceptStub() == NULL);
149 #endif // _TARGET_X86_
152 m_pUMEntryThunkOrInterceptStub = NULL;
155 #ifdef FEATURE_COMINTEROP
156 // Returns either NULL or an RCW on which AcquireLock has been called.
157 RCW* InteropSyncBlockInfo::GetRCWAndIncrementUseCount()
159 LIMITED_METHOD_CONTRACT;
161 DWORD dwSwitchCount = 0;
164 RCW *pRCW = VolatileLoad(&m_pRCW);
165 if ((size_t)pRCW <= 0x1)
167 // the RCW never existed or has been released
171 if (((size_t)pRCW & 0x1) == 0x0)
173 // it looks like we have a chance, try to acquire the lock
174 RCW *pLockedRCW = (RCW *)((size_t)pRCW | 0x1);
175 if (InterlockedCompareExchangeT(&m_pRCW, pLockedRCW, pRCW) == pRCW)
177 // we have the lock on the m_pRCW field, now we can safely "use" the RCW
178 pRCW->IncrementUseCount();
180 // release the m_pRCW lock
181 VolatileStore(&m_pRCW, pRCW);
183 // and return the RCW
188 // somebody else holds the lock, retry
189 __SwitchToThread(0, ++dwSwitchCount);
193 // Sets the m_pRCW field in a thread-safe manner, pRCW can be NULL.
194 void InteropSyncBlockInfo::SetRawRCW(RCW* pRCW)
196 LIMITED_METHOD_CONTRACT;
200 // we never set two different RCWs on a single object
201 _ASSERTE(m_pRCW == NULL);
206 DWORD dwSwitchCount = 0;
209 RCW *pOldRCW = VolatileLoad(&m_pRCW);
211 if ((size_t)pOldRCW <= 0x1)
213 // the RCW never existed or has been released
214 VolatileStore(&m_pRCW, (RCW *)0x1);
218 if (((size_t)pOldRCW & 0x1) == 0x0)
220 // it looks like we have a chance, set the RCW to 0x1
221 if (InterlockedCompareExchangeT(&m_pRCW, (RCW *)0x1, pOldRCW) == pOldRCW)
228 // somebody else holds the lock, retry
229 __SwitchToThread(0, ++dwSwitchCount);
233 #endif // FEATURE_COMINTEROP
235 void UMEntryThunk::OnADUnload()
237 LIMITED_METHOD_CONTRACT;
238 m_pObjectHandle = NULL;
241 #endif // !DACCESS_COMPILE
243 PTR_SyncTableEntry SyncTableEntry::GetSyncTableEntry()
245 LIMITED_METHOD_CONTRACT;
248 return (PTR_SyncTableEntry)g_pSyncTable;
251 #ifndef DACCESS_COMPILE
253 SyncTableEntry*& SyncTableEntry::GetSyncTableEntryByRef()
255 LIMITED_METHOD_CONTRACT;
260 SyncBlockCache*& SyncBlockCache::GetSyncBlockCache()
262 LIMITED_METHOD_CONTRACT;
264 return s_pSyncBlockCache;
268 //----------------------------------------------------------------------------
270 // ThreadQueue Implementation
272 //----------------------------------------------------------------------------
273 #endif //!DACCESS_COMPILE
275 // Given a link in the chain, get the Thread that it represents
277 inline PTR_WaitEventLink ThreadQueue::WaitEventLinkForLink(PTR_SLink pLink)
279 LIMITED_METHOD_CONTRACT;
281 return (PTR_WaitEventLink) (((PTR_BYTE) pLink) - offsetof(WaitEventLink, m_LinkSB));
284 #ifndef DACCESS_COMPILE
286 // Unlink the head of the Q. We are always in the SyncBlock's critical
289 inline WaitEventLink *ThreadQueue::DequeueThread(SyncBlock *psb)
300 // Be careful, the debugger inspects the queue from out of process and just looks at the memory...
301 // it must be valid even if the lock is held. Be careful if you change the way the queue is updated.
302 SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
304 WaitEventLink *ret = NULL;
305 SLink *pLink = psb->m_Link.m_pNext;
309 psb->m_Link.m_pNext = pLink->m_pNext;
311 pLink->m_pNext = (SLink *)POISONC;
313 ret = WaitEventLinkForLink(pLink);
314 _ASSERTE(ret->m_WaitSB == psb);
315 COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cQueueLength--);
320 // Enqueue is the slow one. We have to find the end of the Q since we don't
321 // want to burn storage for this in the SyncBlock.
323 inline void ThreadQueue::EnqueueThread(WaitEventLink *pWaitEventLink, SyncBlock *psb)
334 _ASSERTE (pWaitEventLink->m_LinkSB.m_pNext == NULL);
336 // Be careful, the debugger inspects the queue from out of process and just looks at the memory...
337 // it must be valid even if the lock is held. Be careful if you change the way the queue is updated.
338 SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
340 COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cQueueLength++);
342 SLink *pPrior = &psb->m_Link;
344 while (pPrior->m_pNext)
346 // We shouldn't already be in the waiting list!
347 _ASSERTE(pPrior->m_pNext != &pWaitEventLink->m_LinkSB);
349 pPrior = pPrior->m_pNext;
351 pPrior->m_pNext = &pWaitEventLink->m_LinkSB;
355 // Wade through the SyncBlock's list of waiting threads and remove the
358 BOOL ThreadQueue::RemoveThread (Thread *pThread, SyncBlock *psb)
370 // Be careful, the debugger inspects the queue from out of process and just looks at the memory...
371 // it must be valid even if the lock is held. Be careful if you change the way the queue is updated.
372 SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
374 SLink *pPrior = &psb->m_Link;
376 WaitEventLink *pWaitEventLink;
378 while ((pLink = pPrior->m_pNext) != NULL)
380 pWaitEventLink = WaitEventLinkForLink(pLink);
381 if (pWaitEventLink->m_Thread == pThread)
383 pPrior->m_pNext = pLink->m_pNext;
385 pLink->m_pNext = (SLink *)POISONC;
387 _ASSERTE(pWaitEventLink->m_WaitSB == psb);
388 COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cQueueLength--);
397 #endif //!DACCESS_COMPILE
399 #ifdef DACCESS_COMPILE
400 // Enumerates the threads in the queue from front to back by calling
401 // pCallbackFunction on each one
403 void ThreadQueue::EnumerateThreads(SyncBlock *psb, FP_TQ_THREAD_ENUMERATION_CALLBACK pCallbackFunction, void* pUserData)
414 PTR_SLink pLink = psb->m_Link.m_pNext;
415 PTR_WaitEventLink pWaitEventLink;
417 while (pLink != NULL)
419 pWaitEventLink = WaitEventLinkForLink(pLink);
421 pCallbackFunction(pWaitEventLink->m_Thread, pUserData);
422 pLink = pLink->m_pNext;
425 #endif //DACCESS_COMPILE
427 #ifndef DACCESS_COMPILE
429 // ***************************************************************************
431 // Ephemeral Bitmap Helper
433 // ***************************************************************************
437 #define card_word_width 32
439 size_t CardIndex (size_t card)
441 LIMITED_METHOD_CONTRACT;
442 return card_size * card;
445 size_t CardOf (size_t idx)
447 LIMITED_METHOD_CONTRACT;
448 return idx / card_size;
451 size_t CardWord (size_t card)
453 LIMITED_METHOD_CONTRACT;
454 return card / card_word_width;
457 unsigned CardBit (size_t card)
459 LIMITED_METHOD_CONTRACT;
460 return (unsigned)(card % card_word_width);
464 void SyncBlockCache::SetCard (size_t card)
467 m_EphemeralBitmap [CardWord (card)] =
468 (m_EphemeralBitmap [CardWord (card)] | (1 << CardBit (card)));
472 void SyncBlockCache::ClearCard (size_t card)
475 m_EphemeralBitmap [CardWord (card)] =
476 (m_EphemeralBitmap [CardWord (card)] & ~(1 << CardBit (card)));
480 BOOL SyncBlockCache::CardSetP (size_t card)
483 return ( m_EphemeralBitmap [ CardWord (card) ] & (1 << CardBit (card)));
487 void SyncBlockCache::CardTableSetBit (size_t idx)
490 SetCard (CardOf (idx));
494 size_t BitMapSize (size_t cacheSize)
496 LIMITED_METHOD_CONTRACT;
498 return (cacheSize + card_size * card_word_width - 1)/ (card_size * card_word_width);
501 // ***************************************************************************
503 // SyncBlockCache class implementation
505 // ***************************************************************************
507 SyncBlockCache::SyncBlockCache()
508 : m_pCleanupBlockList(NULL),
509 m_FreeBlockList(NULL),
511 // NOTE: CRST_UNSAFE_ANYMODE prevents a GC mode switch when entering this crst.
512 // If you remove this flag, we will switch to preemptive mode when entering
513 // g_criticalSection, which means all functions that enter it will become
514 // GC_TRIGGERS. (This includes all uses of LockHolder around SyncBlockCache::GetSyncBlockCache().
515 // So be sure to update the contracts if you remove this flag.
516 m_CacheLock(CrstSyncBlockCache, (CrstFlags) (CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD)),
522 m_FreeSyncTableIndex(1),
523 m_FreeSyncTableList(0),
524 m_SyncTableSize(SYNC_TABLE_INITIAL_SIZE),
526 m_bSyncBlockCleanupInProgress(FALSE),
535 INJECT_FAULT(COMPlusThrowOM());
541 // This method is NO longer called.
542 SyncBlockCache::~SyncBlockCache()
553 // Clear the list the fast way.
554 m_FreeBlockList = NULL;
555 //<TODO>@todo we can clear this fast too I guess</TODO>
556 m_pCleanupBlockList = NULL;
558 // destruct all arrays
561 SyncBlockArray *next = m_SyncBlocks->m_Next;
566 // Also, now is a good time to clean up all the old tables which we discarded
567 // when we overflowed them.
569 while ((arr = m_OldSyncTables) != 0)
571 m_OldSyncTables = (SyncTableEntry*)arr[0].m_Object.Load();
577 // When the GC determines that an object is dead the low bit of the
578 // m_Object field of SyncTableEntry is set, however it is not
579 // cleaned up because we cant do the COM interop cleanup at GC time.
580 // It is put on a cleanup list and at a later time (typically during
581 // finalization, this list is cleaned up.
583 void SyncBlockCache::CleanupSyncBlocks()
585 STATIC_CONTRACT_THROWS;
586 STATIC_CONTRACT_MODE_COOPERATIVE;
588 _ASSERTE(GetThread() == FinalizerThread::GetFinalizerThread());
590 // Set the flag indicating sync block cleanup is in progress.
591 // IMPORTANT: This must be set before the sync block cleanup bit is reset on the thread.
592 m_bSyncBlockCleanupInProgress = TRUE;
596 SyncBlockCache *pThis;
598 #ifdef FEATURE_COMINTEROP
604 #ifdef FEATURE_COMINTEROP
608 EE_TRY_FOR_FINALLY(Param *, pParam, ¶m)
611 FinalizerThread::GetFinalizerThread()->ResetSyncBlockCleanup();
613 // walk the cleanup list and cleanup 'em up
614 while ((pParam->psb = pParam->pThis->GetNextCleanupSyncBlock()) != NULL)
616 #ifdef FEATURE_COMINTEROP
617 InteropSyncBlockInfo* pInteropInfo = pParam->psb->GetInteropInfoNoCreate();
620 pParam->pRCW = pInteropInfo->GetRawRCW();
623 // We should have initialized the cleanup list with the
624 // first RCW cache we created
625 _ASSERTE(g_pRCWCleanupList != NULL);
627 g_pRCWCleanupList->AddWrapper(pParam->pRCW);
630 pInteropInfo->SetRawRCW(NULL);
633 #endif // FEATURE_COMINTEROP
635 // Delete the sync block.
636 pParam->pThis->DeleteSyncBlock(pParam->psb);
639 // pulse GC mode to allow GC to perform its work
640 if (FinalizerThread::GetFinalizerThread()->CatchAtSafePointOpportunistic())
642 FinalizerThread::GetFinalizerThread()->PulseGCMode();
646 #ifdef FEATURE_COMINTEROP
647 // Now clean up the rcw's sorted by context
648 if (g_pRCWCleanupList != NULL)
649 g_pRCWCleanupList->CleanupAllWrappers();
650 #endif // FEATURE_COMINTEROP
654 // We are finished cleaning up the sync blocks.
655 m_bSyncBlockCleanupInProgress = FALSE;
657 #ifdef FEATURE_COMINTEROP
659 param.pRCW->Cleanup();
663 DeleteSyncBlock(param.psb);
667 // When a appdomain is unloading, we need to insure that any pointers to
668 // it from sync blocks (eg from COM Callable Wrappers) are properly
669 // updated so that they fail gracefully if another call is made from
670 // them. This is what this routine does.
672 VOID SyncBlockCache::CleanupSyncBlocksInAppDomain(AppDomain *pDomain)
682 #ifndef DACCESS_COMPILE
683 _ASSERTE(IsFinalizerThread());
685 ADIndex index = pDomain->GetIndex();
687 ADID id = pDomain->GetId();
689 // Make sure we dont race with anybody updating the table
693 // Taking this lock here avoids races whre m_FreeSyncTableIndex is being updated.
694 // (a volatile read would have been enough however).
695 SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
696 maxIndex = m_FreeSyncTableIndex;
698 BOOL bModifiedCleanupList=FALSE;
699 STRESS_LOG1(LF_APPDOMAIN, LL_INFO100, "To cleanup - %d sync blocks", maxIndex);
701 for (nb = 1; nb < maxIndex; nb++)
703 // This is a check for syncblocks that were already cleaned up.
704 if ((size_t)SyncTableEntry::GetSyncTableEntry()[nb].m_Object.Load() & 1)
709 // If the syncblock pointer is invalid, nothing more we can do.
710 SyncBlock *pSyncBlock = SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock;
716 // If we happen to have a CCW living in the AppDomain being cleaned, then we need to neuter it.
717 // We do this check early because we have to neuter CCWs for agile objects as well.
718 // Neutering the object simply means we disconnect the object from the CCW so it can no longer
719 // be used. When its ref-count falls to zero, it gets cleaned up.
720 STRESS_LOG1(LF_APPDOMAIN, LL_INFO1000000, "SyncBlock %p.", pSyncBlock);
721 InteropSyncBlockInfo* pInteropInfo = pSyncBlock->GetInteropInfoNoCreate();
724 #ifdef FEATURE_COMINTEROP
725 ComCallWrapper* pWrap = pInteropInfo->GetCCW();
728 SimpleComCallWrapper* pSimpleWrapper = pWrap->GetSimpleWrapper();
729 _ASSERTE(pSimpleWrapper);
731 if (pSimpleWrapper->GetDomainID() == id)
733 pSimpleWrapper->Neuter();
736 #endif // FEATURE_COMINTEROP
738 UMEntryThunk* umThunk=(UMEntryThunk*)pInteropInfo->GetUMEntryThunk();
740 if (umThunk && umThunk->GetDomainId()==id)
742 umThunk->OnADUnload();
743 STRESS_LOG1(LF_APPDOMAIN, LL_INFO100, "Thunk %x unloaded", umThunk);
746 #ifdef FEATURE_COMINTEROP
748 // we need to take RCWCache lock to avoid the race with another thread which is
749 // removing the RCW from cache, decoupling it from the object, and deleting the RCW.
750 RCWCache* pCache = pDomain->GetRCWCache();
752 RCWCache::LockHolder lh(pCache);
753 RCW* pRCW = pInteropInfo->GetRawRCW();
754 if (pRCW && pRCW->GetDomain()==pDomain)
756 // We should have initialized the cleanup list with the
757 // first RCW cache we created
758 _ASSERTE(g_pRCWCleanupList != NULL);
760 g_pRCWCleanupList->AddWrapper(pRCW);
762 pCache->RemoveWrapper(pRCW);
763 pInteropInfo->SetRawRCW(NULL);
764 bModifiedCleanupList=TRUE;
767 #endif // FEATURE_COMINTEROP
770 // NOTE: this will only notify the sync block if it is non-agile and living in the unloading domain.
771 // Agile objects that are still alive will not get notification!
772 if (pSyncBlock->GetAppDomainIndex() == index)
774 pSyncBlock->OnADUnload();
777 STRESS_LOG1(LF_APPDOMAIN, LL_INFO100, "AD cleanup - %d sync blocks done", nb);
778 // Make sure nobody decreased m_FreeSyncTableIndex behind our back (we would read
780 _ASSERTE(maxIndex <= m_FreeSyncTableIndex);
782 if (bModifiedCleanupList)
783 GetThread()->SetSyncBlockCleanup();
785 while (GetThread()->RequireSyncBlockCleanup()) //we also might have something in the cleanup list
790 SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
791 DWORD maxIndex = m_FreeSyncTableIndex;
792 for (DWORD nb = 1; nb < maxIndex; nb++)
794 if ((size_t)SyncTableEntry::GetSyncTableEntry()[nb].m_Object.Load() & 1)
799 // If the syncblock pointer is invalid, nothing more we can do.
800 SyncBlock *pSyncBlock = SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock;
805 InteropSyncBlockInfo* pInteropInfo = pSyncBlock->GetInteropInfoNoCreate();
808 UMEntryThunk* umThunk=(UMEntryThunk*)pInteropInfo->GetUMEntryThunk();
810 if (umThunk && umThunk->GetDomainId()==id)
812 _ASSERTE(!umThunk->GetObjectHandle());
824 // create the sync block cache
826 void SyncBlockCache::Attach()
828 LIMITED_METHOD_CONTRACT;
831 // destroy the sync block cache
832 // This method is NO longer called.
834 void SyncBlockCache::DoDetach()
849 // Ensure that all the critical sections are released. This is particularly
850 // important in DEBUG, because all critical sections are threaded onto a global
851 // list which would otherwise be corrupted.
852 for (DWORD i=0; i<m_FreeSyncTableIndex; i++)
853 if (((size_t)SyncTableEntry::GetSyncTableEntry()[i].m_Object & 1) == 0)
854 if (SyncTableEntry::GetSyncTableEntry()[i].m_SyncBlock)
856 // <TODO>@TODO -- If threads are executing during this detach, they will
857 // fail in various ways:
859 // 1) They will race between us tearing these data structures down
860 // as they navigate through them.
862 // 2) They will unexpectedly see the syncblock destroyed, even though
863 // they hold the synchronization lock, or have been exposed out
866 // 3) The instance's hash code may change during the shutdown.
868 // The correct solution involves suspending the threads earlier, but
869 // changing our suspension code so that it allows pumping if we are
870 // in a shutdown case.
874 // Make sure this gets updated because the finalizer thread & others
875 // will continue to run for a short while more during our shutdown.
876 pObj = SyncTableEntry::GetSyncTableEntry()[i].m_Object;
877 pHeader = pObj->GetHeader();
880 ENTER_SPIN_LOCK(pHeader);
881 ADIndex appDomainIndex = pHeader->GetAppDomainIndex();
882 if (! appDomainIndex.m_dwIndex)
884 SyncBlock* syncBlock = pObj->PassiveGetSyncBlock();
886 appDomainIndex = syncBlock->GetAppDomainIndex();
889 pHeader->ResetIndex();
891 if (appDomainIndex.m_dwIndex)
893 pHeader->SetIndex(appDomainIndex.m_dwIndex<<SBLK_APPDOMAIN_SHIFT);
895 LEAVE_SPIN_LOCK(pHeader);
898 SyncTableEntry::GetSyncTableEntry()[i].m_Object = (Object *)(m_FreeSyncTableList | 1);
899 m_FreeSyncTableList = i << 1;
901 DeleteSyncBlock(SyncTableEntry::GetSyncTableEntry()[i].m_SyncBlock);
906 // destroy the sync block cache
908 // This method is NO longer called.
910 void SyncBlockCache::Detach()
912 SyncBlockCache::GetSyncBlockCache()->DoDetach();
917 // create the sync block cache
919 void SyncBlockCache::Start()
926 INJECT_FAULT(COMPlusThrowOM(););
930 DWORD* bm = new DWORD [BitMapSize(SYNC_TABLE_INITIAL_SIZE+1)];
932 memset (bm, 0, BitMapSize (SYNC_TABLE_INITIAL_SIZE+1)*sizeof(DWORD));
934 SyncTableEntry::GetSyncTableEntryByRef() = new SyncTableEntry[SYNC_TABLE_INITIAL_SIZE+1];
936 for (int i=0; i<SYNC_TABLE_INITIAL_SIZE+1; i++) {
937 SyncTableEntry::GetSyncTableEntry()[i].m_SyncBlock = NULL;
941 SyncTableEntry::GetSyncTableEntry()[0].m_SyncBlock = 0;
942 SyncBlockCache::GetSyncBlockCache() = new (&g_SyncBlockCacheInstance) SyncBlockCache;
944 SyncBlockCache::GetSyncBlockCache()->m_EphemeralBitmap = bm;
947 InitializeSListHead(&InteropSyncBlockInfo::s_InteropInfoStandbyList);
948 #endif // !FEATURE_PAL
952 // destroy the sync block cache
954 void SyncBlockCache::Stop()
964 // cache must be destroyed first, since it can traverse the table to find all the
965 // sync blocks which are live and thus must have their critical sections destroyed.
966 if (SyncBlockCache::GetSyncBlockCache())
968 delete SyncBlockCache::GetSyncBlockCache();
969 SyncBlockCache::GetSyncBlockCache() = 0;
972 if (SyncTableEntry::GetSyncTableEntry())
974 delete SyncTableEntry::GetSyncTableEntry();
975 SyncTableEntry::GetSyncTableEntryByRef() = 0;
980 void SyncBlockCache::InsertCleanupSyncBlock(SyncBlock* psb)
991 // free up the threads that are waiting before we use the link
992 // for other purposes
993 if (psb->m_Link.m_pNext != NULL)
995 while (ThreadQueue::DequeueThread(psb) != NULL)
999 #ifdef FEATURE_COMINTEROP
1000 if (psb->m_pInteropInfo)
1003 // so do only minorcleanup
1004 MinorCleanupSyncBlockComData(psb->m_pInteropInfo);
1006 #endif // FEATURE_COMINTEROP
1008 // This method will be called only by the GC thread
1009 //<TODO>@todo add an assert for the above statement</TODO>
1010 // we don't need to lock here
1013 psb->m_Link.m_pNext = m_pCleanupBlockList;
1014 m_pCleanupBlockList = &psb->m_Link;
1016 // we don't need a lock here
1020 SyncBlock* SyncBlockCache::GetNextCleanupSyncBlock()
1022 LIMITED_METHOD_CONTRACT;
1024 // we don't need a lock here,
1025 // as this is called only on the finalizer thread currently
1027 SyncBlock *psb = NULL;
1028 if (m_pCleanupBlockList)
1030 // get the actual sync block pointer
1031 psb = (SyncBlock *) (((BYTE *) m_pCleanupBlockList) - offsetof(SyncBlock, m_Link));
1032 m_pCleanupBlockList = m_pCleanupBlockList->m_pNext;
1038 // returns and removes the next free syncblock from the list
1039 // the cache lock must be entered to call this
1040 SyncBlock *SyncBlockCache::GetNextFreeSyncBlock()
1044 INJECT_FAULT(COMPlusThrowOM());
1051 #ifdef _DEBUG // Instrumentation for OOM fault injection testing
1056 SLink *plst = m_FreeBlockList;
1062 m_FreeBlockList = m_FreeBlockList->m_pNext;
1067 // get the actual sync block pointer
1068 psb = (SyncBlock *) (((BYTE *) plst) - offsetof(SyncBlock, m_Link));
1074 if ((m_SyncBlocks == NULL) || (m_FreeSyncBlock >= MAXSYNCBLOCK))
1077 // LogSpewAlways("Allocating new syncblock array\n");
1078 // DumpSyncBlockCache();
1080 SyncBlockArray* newsyncblocks = new(SyncBlockArray);
1084 newsyncblocks->m_Next = m_SyncBlocks;
1085 m_SyncBlocks = newsyncblocks;
1086 m_FreeSyncBlock = 0;
1088 return &(((SyncBlock*)m_SyncBlocks->m_Blocks)[m_FreeSyncBlock++]);
1093 void SyncBlockCache::Grow()
1101 INJECT_FAULT(COMPlusThrowOM(););
1105 STRESS_LOG0(LF_SYNC, LL_INFO10000, "SyncBlockCache::NewSyncBlockSlot growing SyncBlockCache \n");
1107 NewArrayHolder<SyncTableEntry> newSyncTable (NULL);
1108 NewArrayHolder<DWORD> newBitMap (NULL);
1111 // Compute the size of the new synctable. Normally, we double it - unless
1112 // doing so would create slots with indices too high to fit within the
1113 // mask. If so, we create a synctable up to the mask limit. If we're
1114 // already at the mask limit, then caller is out of luck.
1115 DWORD newSyncTableSize;
1116 if (m_SyncTableSize <= (MASK_SYNCBLOCKINDEX >> 1))
1118 newSyncTableSize = m_SyncTableSize * 2;
1122 newSyncTableSize = MASK_SYNCBLOCKINDEX;
1125 if (!(newSyncTableSize > m_SyncTableSize)) // Make sure we actually found room to grow!
1130 newSyncTable = new SyncTableEntry[newSyncTableSize];
1131 newBitMap = new DWORD[BitMapSize (newSyncTableSize)];
1135 //! From here on, we assume that we will succeed and start doing global side-effects.
1136 //! Any operation that could fail must occur before this point.
1137 CANNOTTHROWCOMPLUSEXCEPTION();
1140 newSyncTable.SuppressRelease();
1141 newBitMap.SuppressRelease();
1144 // We chain old table because we can't delete
1145 // them before all the threads are stoppped
1147 SyncTableEntry::GetSyncTableEntry() [0].m_Object = (Object *)m_OldSyncTables;
1148 m_OldSyncTables = SyncTableEntry::GetSyncTableEntry();
1150 memset (newSyncTable, 0, newSyncTableSize*sizeof (SyncTableEntry));
1151 memset (newBitMap, 0, BitMapSize (newSyncTableSize)*sizeof (DWORD));
1152 CopyMemory (newSyncTable, SyncTableEntry::GetSyncTableEntry(),
1153 m_SyncTableSize*sizeof (SyncTableEntry));
1155 CopyMemory (newBitMap, m_EphemeralBitmap,
1156 BitMapSize (m_SyncTableSize)*sizeof (DWORD));
1158 oldBitMap = m_EphemeralBitmap;
1159 m_EphemeralBitmap = newBitMap;
1162 _ASSERTE((m_SyncTableSize & MASK_SYNCBLOCKINDEX) == m_SyncTableSize);
1163 // note: we do not care if another thread does not see the new size
1164 // however we really do not want it to see the new size without seeing the new array
1165 //@TODO do we still leak here if two threads come here at the same time ?
1166 FastInterlockExchangePointer(&SyncTableEntry::GetSyncTableEntryByRef(), newSyncTable.GetValue());
1168 m_FreeSyncTableIndex++;
1170 m_SyncTableSize = newSyncTableSize;
1173 static int dumpSBOnResize = -1;
1175 if (dumpSBOnResize == -1)
1176 dumpSBOnResize = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SBDumpOnResize);
1180 LogSpewAlways("SyncBlockCache resized\n");
1181 DumpSyncBlockCache();
1187 DWORD SyncBlockCache::NewSyncBlockSlot(Object *obj)
1195 INJECT_FAULT(COMPlusThrowOM(););
1198 _ASSERTE(m_CacheLock.OwnedByCurrentThread()); // GetSyncBlock takes the lock, make sure no one else does.
1200 DWORD indexNewEntry;
1201 if (m_FreeSyncTableList)
1203 indexNewEntry = (DWORD)(m_FreeSyncTableList >> 1);
1204 _ASSERTE ((size_t)SyncTableEntry::GetSyncTableEntry()[indexNewEntry].m_Object.Load() & 1);
1205 m_FreeSyncTableList = (size_t)SyncTableEntry::GetSyncTableEntry()[indexNewEntry].m_Object.Load() & ~1;
1207 else if ((indexNewEntry = (DWORD)(m_FreeSyncTableIndex)) >= m_SyncTableSize)
1209 // This is kept out of line to keep stuff like the C++ EH prolog (needed for holders) off
1210 // of the common path.
1216 static int dumpSBOnNewIndex = -1;
1218 if (dumpSBOnNewIndex == -1)
1219 dumpSBOnNewIndex = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SBDumpOnNewIndex);
1221 if (dumpSBOnNewIndex)
1223 LogSpewAlways("SyncBlockCache index incremented\n");
1224 DumpSyncBlockCache();
1227 m_FreeSyncTableIndex ++;
1231 CardTableSetBit (indexNewEntry);
1233 // In debug builds the m_SyncBlock at indexNewEntry should already be null, since we should
1234 // start out with a null table and always null it out on delete.
1235 _ASSERTE(SyncTableEntry::GetSyncTableEntry() [indexNewEntry].m_SyncBlock == NULL);
1236 SyncTableEntry::GetSyncTableEntry() [indexNewEntry].m_SyncBlock = NULL;
1237 SyncTableEntry::GetSyncTableEntry() [indexNewEntry].m_Object = obj;
1239 _ASSERTE(indexNewEntry != 0);
1241 return indexNewEntry;
1245 // free a used sync block, only called from CleanupSyncBlocks.
1246 void SyncBlockCache::DeleteSyncBlock(SyncBlock *psb)
1254 INJECT_FAULT(COMPlusThrowOM());
1259 if (psb->m_pInteropInfo)
1261 #ifdef FEATURE_COMINTEROP
1262 CleanupSyncBlockComData(psb->m_pInteropInfo);
1263 #endif // FEATURE_COMINTEROP
1268 delete psb->m_pInteropInfo;
1272 psb->m_pInteropInfo->~InteropSyncBlockInfo();
1273 InterlockedPushEntrySList(&InteropSyncBlockInfo::s_InteropInfoStandbyList, (PSLIST_ENTRY)psb->m_pInteropInfo);
1275 #else // !FEATURE_PAL
1276 delete psb->m_pInteropInfo;
1277 #endif // !FEATURE_PAL
1280 #ifdef EnC_SUPPORTED
1281 // clean up EnC info
1282 if (psb->m_pEnCInfo)
1283 psb->m_pEnCInfo->Cleanup();
1284 #endif // EnC_SUPPORTED
1286 // Destruct the SyncBlock, but don't reclaim its memory. (Overridden
1287 // operator delete).
1290 //synchronizer with the consumers,
1291 // <TODO>@todo we don't really need a lock here, we can come up
1292 // with some simple algo to avoid taking a lock </TODO>
1294 SyncBlockCache::LockHolder lh(this);
1296 DeleteSyncBlockMemory(psb);
1301 // returns the sync block memory to the free pool but does not destruct sync block (must own cache lock already)
1302 void SyncBlockCache::DeleteSyncBlockMemory(SyncBlock *psb)
1316 psb->m_Link.m_pNext = m_FreeBlockList;
1317 m_FreeBlockList = &psb->m_Link;
1321 // free a used sync block
1322 void SyncBlockCache::GCDeleteSyncBlock(SyncBlock *psb)
1333 // Destruct the SyncBlock, but don't reclaim its memory. (Overridden
1334 // operator delete).
1340 psb->m_Link.m_pNext = m_FreeBlockList;
1341 m_FreeBlockList = &psb->m_Link;
1344 void SyncBlockCache::GCWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2)
1356 // First delete the obsolete arrays since we have exclusive access
1357 BOOL fSetSyncBlockCleanup = FALSE;
1359 SyncTableEntry* arr;
1360 while ((arr = m_OldSyncTables) != NULL)
1362 m_OldSyncTables = (SyncTableEntry*)arr[0].m_Object.Load();
1367 LogSpewAlways("GCWeakPtrScan starting\n");
1371 if (g_pConfig->GetHeapVerifyLevel()& EEConfig::HEAPVERIFY_SYNCBLK)
1372 STRESS_LOG0 (LF_GC | LF_SYNC, LL_INFO100, "GCWeakPtrScan starting\n");
1375 if (GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration() < GCHeapUtilities::GetGCHeap()->GetMaxGeneration())
1378 //for VSW 294550: we saw stale obeject reference in SyncBlkCache, so we want to make sure the card
1379 //table logic above works correctly so that every ephemeral entry is promoted.
1380 //For verification, we make a copy of the sync table in relocation phase and promote it use the
1381 //slow approach and compare the result with the original one
1382 DWORD freeSyncTalbeIndexCopy = m_FreeSyncTableIndex;
1383 SyncTableEntry * syncTableShadow = NULL;
1384 if ((g_pConfig->GetHeapVerifyLevel()& EEConfig::HEAPVERIFY_SYNCBLK) && !((ScanContext*)lp1)->promotion)
1386 syncTableShadow = new(nothrow) SyncTableEntry [m_FreeSyncTableIndex];
1387 if (syncTableShadow)
1389 memcpy (syncTableShadow, SyncTableEntry::GetSyncTableEntry(), m_FreeSyncTableIndex * sizeof (SyncTableEntry));
1392 #endif //VERIFY_HEAP
1398 while (dw < BitMapSize (m_SyncTableSize) && (m_EphemeralBitmap[dw]==0))
1402 if (dw < BitMapSize (m_SyncTableSize))
1405 for (int i = 0; i < card_word_width; i++)
1407 size_t card = i+dw*card_word_width;
1408 if (CardSetP (card))
1410 BOOL clear_card = TRUE;
1411 for (int idx = 0; idx < card_size; idx++)
1413 size_t nb = CardIndex (card) + idx;
1414 if (( nb < m_FreeSyncTableIndex) && (nb > 0))
1416 Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1417 if (o && !((size_t)o & 1))
1419 if (GCHeapUtilities::GetGCHeap()->IsEphemeral (o))
1423 GCWeakPtrScanElement ((int)nb, scanProc,
1424 lp1, lp2, fSetSyncBlockCleanup);
1440 //for VSW 294550: we saw stale obeject reference in SyncBlkCache, so we want to make sure the card
1441 //table logic above works correctly so that every ephemeral entry is promoted. To verify, we make a
1442 //copy of the sync table and promote it use the slow approach and compare the result with the real one
1443 if (g_pConfig->GetHeapVerifyLevel()& EEConfig::HEAPVERIFY_SYNCBLK)
1445 if (syncTableShadow)
1447 for (DWORD nb = 1; nb < m_FreeSyncTableIndex; nb++)
1449 Object **keyv = (Object **) &syncTableShadow[nb].m_Object;
1451 if (((size_t) *keyv & 1) == 0)
1453 (*scanProc) (keyv, NULL, lp1, lp2);
1454 SyncBlock *pSB = syncTableShadow[nb].m_SyncBlock;
1455 if (*keyv != 0 && (!pSB || !pSB->IsIDisposable()))
1457 if (syncTableShadow[nb].m_Object != SyncTableEntry::GetSyncTableEntry()[nb].m_Object)
1462 delete []syncTableShadow;
1463 syncTableShadow = NULL;
1465 if (freeSyncTalbeIndexCopy != m_FreeSyncTableIndex)
1468 #endif //VERIFY_HEAP
1473 for (DWORD nb = 1; nb < m_FreeSyncTableIndex; nb++)
1475 GCWeakPtrScanElement (nb, scanProc, lp1, lp2, fSetSyncBlockCleanup);
1481 if (fSetSyncBlockCleanup)
1483 // mark the finalizer thread saying requires cleanup
1484 FinalizerThread::GetFinalizerThread()->SetSyncBlockCleanup();
1485 FinalizerThread::EnableFinalization();
1488 #if defined(VERIFY_HEAP)
1489 if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)
1491 if (((ScanContext*)lp1)->promotion)
1494 for (int nb = 1; nb < (int)m_FreeSyncTableIndex; nb++)
1496 Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1497 if (((size_t)o & 1) == 0)
1504 #endif // VERIFY_HEAP
1507 /* Scan the weak pointers in the SyncBlockEntry and report them to the GC. If the
1508 reference is dead, then return TRUE */
1510 BOOL SyncBlockCache::GCWeakPtrScanElement (int nb, HANDLESCANPROC scanProc, LPARAM lp1, LPARAM lp2,
1522 Object **keyv = (Object **) &SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1532 PAL_TRY(Param *, pParam, ¶m) {
1533 if (! *pParam->keyv)
1534 pParam->name = "null";
1535 else if ((size_t) *pParam->keyv & 1)
1536 pParam->name = "free";
1538 pParam->name = (*pParam->keyv)->GetClass()->GetDebugClassName();
1539 if (strlen(pParam->name) == 0)
1540 pParam->name = "<INVALID>";
1542 } PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
1543 param.name = "<INVALID>";
1546 LogSpewAlways("[%4.4d]: %8.8x, %s\n", nb, *keyv, param.name);
1549 if (((size_t) *keyv & 1) == 0)
1552 if (g_pConfig->GetHeapVerifyLevel () & EEConfig::HEAPVERIFY_SYNCBLK)
1554 STRESS_LOG3 (LF_GC | LF_SYNC, LL_INFO100000, "scanning syncblk[%d, %p, %p]\n", nb, (size_t)SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock, (size_t)*keyv);
1558 (*scanProc) (keyv, NULL, lp1, lp2);
1559 SyncBlock *pSB = SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock;
1560 if ((*keyv == 0 ) || (pSB && pSB->IsIDisposable()))
1563 if (g_pConfig->GetHeapVerifyLevel () & EEConfig::HEAPVERIFY_SYNCBLK)
1565 STRESS_LOG3 (LF_GC | LF_SYNC, LL_INFO100000, "freeing syncblk[%d, %p, %p]\n", nb, (size_t)pSB, (size_t)*keyv);
1572 GCDeleteSyncBlock(pSB);
1573 //clean the object syncblock header
1574 ((Object*)(*keyv))->GetHeader()->GCResetIndex();
1580 // insert block into cleanup list
1581 InsertCleanupSyncBlock (SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock);
1583 LogSpewAlways(" Cleaning up block at %4.4d\n", nb);
1589 LogSpewAlways(" Deleting block at %4.4d\n", nb);
1591 SyncTableEntry::GetSyncTableEntry()[nb].m_Object = (Object *)(m_FreeSyncTableList | 1);
1592 m_FreeSyncTableList = nb << 1;
1593 SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock = NULL;
1599 LogSpewAlways(" Keeping block at %4.4d with oref %8.8x\n", nb, *keyv);
1606 void SyncBlockCache::GCDone(BOOL demoting, int max_gen)
1618 (GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration() ==
1619 GCHeapUtilities::GetGCHeap()->GetMaxGeneration()))
1625 while (dw < BitMapSize (m_SyncTableSize) &&
1626 (m_EphemeralBitmap[dw]==(DWORD)~0))
1630 if (dw < BitMapSize (m_SyncTableSize))
1633 for (int i = 0; i < card_word_width; i++)
1635 size_t card = i+dw*card_word_width;
1636 if (!CardSetP (card))
1638 for (int idx = 0; idx < card_size; idx++)
1640 size_t nb = CardIndex (card) + idx;
1641 if (( nb < m_FreeSyncTableIndex) && (nb > 0))
1643 Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1644 if (o && !((size_t)o & 1))
1646 if (GCHeapUtilities::GetGCHeap()->WhichGeneration (o) < (unsigned int)max_gen)
1666 #if defined (VERIFY_HEAP)
1672 #define _ASSERTE(c) if (!(c)) DebugBreak()
1675 void SyncBlockCache::VerifySyncTableEntry()
1686 for (DWORD nb = 1; nb < m_FreeSyncTableIndex; nb++)
1688 Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1689 // if the slot was just allocated, the object may still be null
1690 if (o && (((size_t)o & 1) == 0))
1692 //there is no need to verify next object's header because this is called
1693 //from verify_heap, which will verify every object anyway
1694 o->Validate(TRUE, FALSE);
1697 // This loop is just a heuristic to try to catch errors, but it is not 100%.
1698 // To prevent false positives, we weaken our assert below to exclude the case
1699 // where the index is still NULL, but we've reached the end of our loop.
1701 static const DWORD max_iterations = 100;
1704 for (; loop < max_iterations; loop++)
1706 // The syncblock index may be updating by another thread.
1707 if (o->GetHeader()->GetHeaderSyncBlockIndex() != 0)
1711 __SwitchToThread(0, CALLER_LIMITS_SPINNING);
1714 DWORD idx = o->GetHeader()->GetHeaderSyncBlockIndex();
1715 _ASSERTE(idx == nb || ((0 == idx) && (loop == max_iterations)));
1716 _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsEphemeral(o) || CardSetP(CardOf(nb)));
1723 #define _ASSERTE(expr) ((void)0)
1726 #endif // VERIFY_HEAP
1728 #if CHECK_APP_DOMAIN_LEAKS
1729 void SyncBlockCache::CheckForUnloadedInstances(ADIndex unloadingIndex)
1740 // Can only do in leak mode because agile objects will be in the domain with
1741 // their index set to their creating domain and check will fail.
1742 if (! g_pConfig->AppDomainLeaks())
1745 for (DWORD nb = 1; nb < m_FreeSyncTableIndex; nb++)
1747 SyncTableEntry *pEntry = &SyncTableEntry::GetSyncTableEntry()[nb];
1748 Object *oref = (Object *) pEntry->m_Object;
1749 if (((size_t) oref & 1) != 0)
1754 idx = pEntry->m_Object->GetHeader()->GetRawAppDomainIndex();
1755 if (! idx.m_dwIndex && pEntry->m_SyncBlock)
1756 idx = pEntry->m_SyncBlock->GetAppDomainIndex();
1757 // if the following assert fires, someobody is holding a reference to an object in an unloaded appdomain
1758 if (idx == unloadingIndex)
1760 // object must be agile to have survived the unload. If can't make it agile, that's a bug
1761 if (!oref->TrySetAppDomainAgile(TRUE))
1762 _ASSERTE(!"Detected instance of unloaded appdomain that survived GC\n");
1770 void DumpSyncBlockCache()
1772 STATIC_CONTRACT_NOTHROW;
1774 SyncBlockCache *pCache = SyncBlockCache::GetSyncBlockCache();
1776 LogSpewAlways("Dumping SyncBlockCache size %d\n", pCache->m_FreeSyncTableIndex);
1778 static int dumpSBStyle = -1;
1779 if (dumpSBStyle == -1)
1780 dumpSBStyle = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SBDumpStyle);
1781 if (dumpSBStyle == 0)
1784 BOOL isString = FALSE;
1785 DWORD objectCount = 0;
1786 DWORD slotCount = 0;
1788 for (DWORD nb = 1; nb < pCache->m_FreeSyncTableIndex; nb++)
1791 char buffer[1024], buffer2[1024];
1792 LPCUTF8 descrip = "null";
1793 SyncTableEntry *pEntry = &SyncTableEntry::GetSyncTableEntry()[nb];
1794 Object *oref = (Object *) pEntry->m_Object;
1795 if (((size_t) oref & 1) != 0)
1815 param.descrip = descrip;
1817 param.buffer2 = buffer2;
1818 param.cch2 = COUNTOF(buffer2);
1819 param.isString = isString;
1821 PAL_TRY(Param *, pParam, ¶m)
1823 pParam->descrip = pParam->oref->GetMethodTable()->GetDebugClassName();
1824 if (strlen(pParam->descrip) == 0)
1825 pParam->descrip = "<INVALID>";
1826 else if (pParam->oref->GetMethodTable() == g_pStringClass)
1828 sprintf_s(pParam->buffer2, pParam->cch2, "%s (%S)", pParam->descrip, ObjectToSTRINGREF((StringObject*)pParam->oref)->GetBuffer());
1829 pParam->descrip = pParam->buffer2;
1830 pParam->isString = TRUE;
1833 PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
1834 param.descrip = "<INVALID>";
1838 descrip = param.descrip;
1839 isString = param.isString;
1843 idx = pEntry->m_Object->GetHeader()->GetRawAppDomainIndex();
1844 if (! idx.m_dwIndex && pEntry->m_SyncBlock)
1845 idx = pEntry->m_SyncBlock->GetAppDomainIndex();
1846 if (idx.m_dwIndex && ! SystemDomain::System()->TestGetAppDomainAtIndex(idx))
1848 sprintf_s(buffer, COUNTOF(buffer), "** unloaded (%3.3x) %s", idx.m_dwIndex, descrip);
1853 sprintf_s(buffer, COUNTOF(buffer), "(AD %3.3x) %s", idx.m_dwIndex, descrip);
1857 if (dumpSBStyle < 2)
1858 LogSpewAlways("[%4.4d]: %8.8x %s\n", nb, oref, descrip);
1859 else if (dumpSBStyle == 2 && ! isString)
1860 LogSpewAlways("[%4.4d]: %s\n", nb, descrip);
1862 LogSpewAlways("Done dumping SyncBlockCache used slots: %d, objects: %d\n", slotCount, objectCount);
1866 // ***************************************************************************
1868 // ObjHeader class implementation
1870 // ***************************************************************************
1872 #if defined(ENABLE_CONTRACTS_IMPL)
1873 // The LOCK_TAKEN/RELEASED macros need a "pointer" to the lock object to do
1874 // comparisons between takes & releases (and to provide debugging info to the
1875 // developer). Ask the syncblock for its lock contract pointer, if the
1876 // syncblock exists. Otherwise, use the MethodTable* from the Object. That's not great,
1877 // as it's not unique, so we might miss unbalanced lock takes/releases from
1878 // different objects of the same type. However, our hands are tied, and we can't
1880 void * ObjHeader::GetPtrForLockContract()
1882 if (GetHeaderSyncBlockIndex() == 0)
1884 return (void *) GetBaseObject()->GetMethodTable();
1887 return PassiveGetSyncBlock()->GetPtrForLockContract();
1889 #endif // defined(ENABLE_CONTRACTS_IMPL)
1891 // this enters the monitor of an object
1892 void ObjHeader::EnterObjMonitor()
1894 WRAPPER_NO_CONTRACT;
1895 GetSyncBlock()->EnterMonitor();
1898 // Non-blocking version of above
1899 BOOL ObjHeader::TryEnterObjMonitor(INT32 timeOut)
1901 WRAPPER_NO_CONTRACT;
1902 return GetSyncBlock()->TryEnterMonitor(timeOut);
1905 BOOL ObjHeader::LeaveObjMonitor()
1915 //this function switch to preemp mode so we need to protect the object in some path
1916 OBJECTREF thisObj = ObjectToOBJECTREF (GetBaseObject ());
1918 DWORD dwSwitchCount = 0;
1922 AwareLock::LeaveHelperAction action = thisObj->GetHeader ()->LeaveObjMonitorHelper(GetThread());
1926 case AwareLock::LeaveHelperAction_None:
1929 case AwareLock::LeaveHelperAction_Signal:
1932 SyncBlock *psb = thisObj->GetHeader ()->PassiveGetSyncBlock();
1934 psb->QuickGetMonitor()->Signal();
1937 case AwareLock::LeaveHelperAction_Yield:
1940 case AwareLock::LeaveHelperAction_Contention:
1941 // Some thread is updating the syncblock value.
1943 //protect the object before switching mode
1944 GCPROTECT_BEGIN (thisObj);
1946 __SwitchToThread(0, ++dwSwitchCount);
1951 // Must be an error otherwise - ignore it
1952 _ASSERTE(action == AwareLock::LeaveHelperAction_Error);
1958 // The only difference between LeaveObjMonitor and LeaveObjMonitorAtException is switch
1959 // to preemptive mode around __SwitchToThread
1960 BOOL ObjHeader::LeaveObjMonitorAtException()
1970 DWORD dwSwitchCount = 0;
1974 AwareLock::LeaveHelperAction action = LeaveObjMonitorHelper(GetThread());
1978 case AwareLock::LeaveHelperAction_None:
1981 case AwareLock::LeaveHelperAction_Signal:
1984 SyncBlock *psb = PassiveGetSyncBlock();
1986 psb->QuickGetMonitor()->Signal();
1989 case AwareLock::LeaveHelperAction_Yield:
1992 case AwareLock::LeaveHelperAction_Contention:
1993 // Some thread is updating the syncblock value.
1995 // We never toggle GC mode while holding the spinlock (BeginNoTriggerGC/EndNoTriggerGC
1996 // in EnterSpinLock/ReleaseSpinLock ensures it). Thus we do not need to switch to preemptive
1997 // while waiting on the spinlock.
2000 __SwitchToThread(0, ++dwSwitchCount);
2004 // Must be an error otherwise - ignore it
2005 _ASSERTE(action == AwareLock::LeaveHelperAction_Error);
2011 #endif //!DACCESS_COMPILE
2013 // Returns TRUE if the lock is owned and FALSE otherwise
2014 // threadId is set to the ID (Thread::GetThreadId()) of the thread which owns the lock
2015 // acquisitionCount is set to the number of times the lock needs to be released before
2017 BOOL ObjHeader::GetThreadOwningMonitorLock(DWORD *pThreadId, DWORD *pAcquisitionCount)
2024 #ifndef DACCESS_COMPILE
2025 if (!IsGCSpecialThread ()) {MODE_COOPERATIVE;} else {MODE_ANY;}
2032 DWORD bits = GetBits();
2034 if (bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)
2036 if (bits & BIT_SBLK_IS_HASHCODE)
2039 // This thread does not own the lock.
2042 *pAcquisitionCount = 0;
2048 // We have a syncblk
2050 DWORD index = bits & MASK_SYNCBLOCKINDEX;
2051 SyncBlock* psb = g_pSyncTable[(int)index].m_SyncBlock;
2053 _ASSERTE(psb->GetMonitor() != NULL);
2054 Thread* pThread = psb->GetMonitor()->m_HoldingThread;
2058 *pAcquisitionCount = 0;
2063 *pThreadId = pThread->GetThreadId();
2064 *pAcquisitionCount = psb->GetMonitor()->m_Recursion;
2072 // We have a thinlock
2075 DWORD lockThreadId, recursionLevel;
2076 lockThreadId = bits & SBLK_MASK_LOCK_THREADID;
2077 recursionLevel = (bits & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT;
2078 //if thread ID is 0, recursionLevel got to be zero
2079 //but thread ID doesn't have to be valid because the lock could be orphanend
2080 _ASSERTE (lockThreadId != 0 || recursionLevel == 0 );
2082 *pThreadId = lockThreadId;
2083 if(lockThreadId != 0)
2085 // in the header, the recursionLevel of 0 means the lock is owned once
2086 // (this differs from m_Recursion in the AwareLock)
2087 *pAcquisitionCount = recursionLevel + 1;
2092 *pAcquisitionCount = 0;
2098 #ifndef DACCESS_COMPILE
2101 DEBUG_NOINLINE void ObjHeader::EnterSpinLock()
2103 // NOTE: This function cannot have a dynamic contract. If it does, the contract's
2104 // destructor will reset the CLR debug state to what it was before entering the
2105 // function, which will undo the BeginNoTriggerGC() call below.
2107 STATIC_CONTRACT_GC_NOTRIGGER;
2113 DWORD dwSwitchCount = 0;
2119 // Give 64bit more time because there isn't a remoting fast path now, and we've hit this assert
2120 // needlessly in CLRSTRESS.
2125 _ASSERTE(!"ObjHeader::EnterLock timed out");
2127 // get the value so that it doesn't get changed under us.
2128 LONG curValue = m_SyncBlockValue.LoadWithoutBarrier();
2130 // check if lock taken
2131 if (! (curValue & BIT_SBLK_SPIN_LOCK))
2133 // try to take the lock
2134 LONG newValue = curValue | BIT_SBLK_SPIN_LOCK;
2135 LONG result = FastInterlockCompareExchange((LONG*)&m_SyncBlockValue, newValue, curValue);
2136 if (result == curValue)
2139 if (g_SystemInfo.dwNumberOfProcessors > 1)
2141 for (int spinCount = 0; spinCount < BIT_SBLK_SPIN_COUNT; spinCount++)
2143 if (! (m_SyncBlockValue & BIT_SBLK_SPIN_LOCK))
2145 YieldProcessor(); // indicate to the processor that we are spining
2147 if (m_SyncBlockValue & BIT_SBLK_SPIN_LOCK)
2148 __SwitchToThread(0, ++dwSwitchCount);
2151 __SwitchToThread(0, ++dwSwitchCount);
2154 INCONTRACT(Thread* pThread = GetThread());
2155 INCONTRACT(if (pThread != NULL) pThread->BeginNoTriggerGC(__FILE__, __LINE__));
2158 DEBUG_NOINLINE void ObjHeader::EnterSpinLock()
2161 STATIC_CONTRACT_GC_NOTRIGGER;
2167 DWORD dwSwitchCount = 0;
2173 _ASSERTE(!"ObjHeader::EnterLock timed out");
2175 // get the value so that it doesn't get changed under us.
2176 LONG curValue = m_SyncBlockValue.LoadWithoutBarrier();
2178 // check if lock taken
2179 if (! (curValue & BIT_SBLK_SPIN_LOCK))
2181 // try to take the lock
2182 LONG newValue = curValue | BIT_SBLK_SPIN_LOCK;
2183 LONG result = FastInterlockCompareExchange((LONG*)&m_SyncBlockValue, newValue, curValue);
2184 if (result == curValue)
2187 __SwitchToThread(0, ++dwSwitchCount);
2190 INCONTRACT(Thread* pThread = GetThread());
2191 INCONTRACT(if (pThread != NULL) pThread->BeginNoTriggerGC(__FILE__, __LINE__));
2195 DEBUG_NOINLINE void ObjHeader::ReleaseSpinLock()
2198 LIMITED_METHOD_CONTRACT;
2200 INCONTRACT(Thread* pThread = GetThread());
2201 INCONTRACT(if (pThread != NULL) pThread->EndNoTriggerGC());
2203 FastInterlockAnd(&m_SyncBlockValue, ~BIT_SBLK_SPIN_LOCK);
2206 #endif //!DACCESS_COMPILE
2208 ADIndex ObjHeader::GetRawAppDomainIndex()
2210 LIMITED_METHOD_CONTRACT;
2213 // pull the value out before checking it to avoid race condition
2214 DWORD value = m_SyncBlockValue.LoadWithoutBarrier();
2215 if ((value & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) == 0)
2216 return ADIndex((value >> SBLK_APPDOMAIN_SHIFT) & SBLK_MASK_APPDOMAININDEX);
2220 ADIndex ObjHeader::GetAppDomainIndex()
2222 STATIC_CONTRACT_NOTHROW;
2223 STATIC_CONTRACT_GC_NOTRIGGER;
2224 STATIC_CONTRACT_SO_TOLERANT;
2225 STATIC_CONTRACT_SUPPORTS_DAC;
2227 ADIndex indx = GetRawAppDomainIndex();
2230 SyncBlock* syncBlock = PassiveGetSyncBlock();
2234 return syncBlock->GetAppDomainIndex();
2237 #ifndef DACCESS_COMPILE
2239 void ObjHeader::SetAppDomainIndex(ADIndex indx)
2247 INJECT_FAULT(COMPlusThrowOM(););
2252 // This should only be called during the header initialization,
2253 // so don't worry about races.
2259 static int forceSB = -1;
2262 forceSB = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ADForceSB);
2265 // force a synblock so we get one for every object.
2269 if (GetHeaderSyncBlockIndex() == 0 && indx.m_dwIndex < SBLK_MASK_APPDOMAININDEX)
2271 ENTER_SPIN_LOCK(this);
2273 if (GetHeaderSyncBlockIndex() == 0)
2275 _ASSERTE(GetRawAppDomainIndex().m_dwIndex == 0);
2276 // can store it in the object header
2277 FastInterlockOr(&m_SyncBlockValue, indx.m_dwIndex << SBLK_APPDOMAIN_SHIFT);
2280 LEAVE_SPIN_LOCK(this);
2285 // must create a syncblock entry and store the appdomain indx there
2286 SyncBlock *psb = GetSyncBlock();
2288 psb->SetAppDomainIndex(indx);
2292 void ObjHeader::ResetAppDomainIndex(ADIndex indx)
2304 // This should only be called during the header initialization,
2305 // so don't worry about races.
2310 if (GetHeaderSyncBlockIndex() == 0 && indx.m_dwIndex < SBLK_MASK_APPDOMAININDEX)
2312 ENTER_SPIN_LOCK(this);
2314 if (GetHeaderSyncBlockIndex() == 0)
2316 // can store it in the object header
2319 DWORD oldValue = m_SyncBlockValue.LoadWithoutBarrier();
2320 DWORD newValue = (oldValue & (~(SBLK_MASK_APPDOMAININDEX << SBLK_APPDOMAIN_SHIFT))) |
2321 (indx.m_dwIndex << SBLK_APPDOMAIN_SHIFT);
2322 if (FastInterlockCompareExchange((LONG*)&m_SyncBlockValue,
2324 oldValue) == (LONG)oldValue)
2331 LEAVE_SPIN_LOCK(this);
2336 // must create a syncblock entry and store the appdomain indx there
2337 SyncBlock *psb = GetSyncBlock();
2339 psb->SetAppDomainIndex(indx);
2343 void ObjHeader::ResetAppDomainIndexNoFailure(ADIndex indx)
2351 PRECONDITION(indx.m_dwIndex < SBLK_MASK_APPDOMAININDEX);
2355 ENTER_SPIN_LOCK(this);
2356 if (GetHeaderSyncBlockIndex() == 0)
2358 // can store it in the object header
2361 DWORD oldValue = m_SyncBlockValue.LoadWithoutBarrier();
2362 DWORD newValue = (oldValue & (~(SBLK_MASK_APPDOMAININDEX << SBLK_APPDOMAIN_SHIFT))) |
2363 (indx.m_dwIndex << SBLK_APPDOMAIN_SHIFT);
2364 if (FastInterlockCompareExchange((LONG*)&m_SyncBlockValue,
2366 oldValue) == (LONG)oldValue)
2374 SyncBlock *psb = PassiveGetSyncBlock();
2376 psb->SetAppDomainIndex(indx);
2378 LEAVE_SPIN_LOCK(this);
2381 DWORD ObjHeader::GetSyncBlockIndex()
2389 INJECT_FAULT(COMPlusThrowOM(););
2395 if ((indx = GetHeaderSyncBlockIndex()) == 0)
2397 BOOL fMustCreateSyncBlock = FALSE;
2399 if (GetAppDomainIndex().m_dwIndex)
2401 // if have an appdomain set then must create a sync block to store it
2402 fMustCreateSyncBlock = TRUE;
2406 //Need to get it from the cache
2407 SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
2410 if (GetHeaderSyncBlockIndex() == 0)
2412 ENTER_SPIN_LOCK(this);
2413 // Now the header will be stable - check whether hashcode, appdomain index or lock information is stored in it.
2414 DWORD bits = GetBits();
2415 if (((bits & (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE)) == (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE)) ||
2416 ((bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) == 0 &&
2417 (bits & ((SBLK_MASK_APPDOMAININDEX<<SBLK_APPDOMAIN_SHIFT)|SBLK_MASK_LOCK_RECLEVEL|SBLK_MASK_LOCK_THREADID)) != 0))
2419 // Need a sync block to store this info
2420 fMustCreateSyncBlock = TRUE;
2424 SetIndex(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | SyncBlockCache::GetSyncBlockCache()->NewSyncBlockSlot(GetBaseObject()));
2426 LEAVE_SPIN_LOCK(this);
2428 // SyncBlockCache::LockHolder goes out of scope here
2431 if (fMustCreateSyncBlock)
2434 if ((indx = GetHeaderSyncBlockIndex()) == 0)
2441 #if defined (VERIFY_HEAP)
2443 BOOL ObjHeader::Validate (BOOL bVerifySyncBlkIndex)
2445 STATIC_CONTRACT_THROWS;
2446 STATIC_CONTRACT_GC_NOTRIGGER;
2447 STATIC_CONTRACT_SO_TOLERANT;
2448 STATIC_CONTRACT_MODE_COOPERATIVE;
2450 DWORD bits = GetBits ();
2451 Object * obj = GetBaseObject ();
2452 BOOL bVerifyMore = g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_SYNCBLK;
2453 //the highest 2 bits have reloaded meaning
2454 //for string objects:
2455 // BIT_SBLK_STRING_HAS_NO_HIGH_CHARS 0x80000000
2456 // BIT_SBLK_STRING_HIGH_CHARS_KNOWN 0x40000000
2457 // BIT_SBLK_STRING_HAS_SPECIAL_SORT 0xC0000000
2458 //for other objects:
2459 // BIT_SBLK_AGILE_IN_PROGRESS 0x80000000
2460 // BIT_SBLK_FINALIZER_RUN 0x40000000
2461 if (bits & BIT_SBLK_STRING_HIGH_CHAR_MASK)
2463 if (obj->GetGCSafeMethodTable () == g_pStringClass)
2467 ASSERT_AND_CHECK (((StringObject *)obj)->ValidateHighChars());
2472 #if CHECK_APP_DOMAIN_LEAKS
2475 if (bits & BIT_SBLK_AGILE_IN_PROGRESS)
2479 //BIT_SBLK_AGILE_IN_PROGRESS is set only if the object needs to check appdomain agile
2480 obj->ShouldCheckAppDomainAgile(FALSE, &fResult)
2481 //before BIT_SBLK_AGILE_IN_PROGRESS is cleared, the object might already be marked as agile
2482 ||(obj->PassiveGetSyncBlock () && obj->PassiveGetSyncBlock ()->IsAppDomainAgile ())
2483 ||(obj->PassiveGetSyncBlock () && obj->PassiveGetSyncBlock ()->IsCheckedForAppDomainAgile ())
2487 #else //CHECK_APP_DOMAIN_LEAKS
2488 //BIT_SBLK_AGILE_IN_PROGRESS is set only in debug build
2489 ASSERT_AND_CHECK (!(bits & BIT_SBLK_AGILE_IN_PROGRESS));
2490 #endif //CHECK_APP_DOMAIN_LEAKS
2491 if (bits & BIT_SBLK_FINALIZER_RUN)
2493 ASSERT_AND_CHECK (obj->GetGCSafeMethodTable ()->HasFinalizer ());
2498 //BIT_SBLK_GC_RESERVE (0x20000000) is only set during GC. But for frozen object, we don't clean the bit
2499 if (bits & BIT_SBLK_GC_RESERVE)
2501 if (!GCHeapUtilities::IsGCInProgress () && !GCHeapUtilities::GetGCHeap()->IsConcurrentGCInProgress ())
2503 #ifdef FEATURE_BASICFREEZE
2504 ASSERT_AND_CHECK (GCHeapUtilities::GetGCHeap()->IsInFrozenSegment(obj));
2505 #else //FEATURE_BASICFREEZE
2506 _ASSERTE(!"Reserve bit not cleared");
2508 #endif //FEATURE_BASICFREEZE
2512 //Don't know how to verify BIT_SBLK_SPIN_LOCK (0x10000000)
2514 //BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX (0x08000000)
2515 if (bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)
2517 //if BIT_SBLK_IS_HASHCODE (0x04000000) is not set,
2518 //rest of the DWORD is SyncBlk Index
2519 if (!(bits & BIT_SBLK_IS_HASHCODE))
2521 if (bVerifySyncBlkIndex && GCScan::GetGcRuntimeStructuresValid ())
2523 DWORD sbIndex = bits & MASK_SYNCBLOCKINDEX;
2524 ASSERT_AND_CHECK(SyncTableEntry::GetSyncTableEntry()[sbIndex].m_Object == obj);
2529 // rest of the DWORD is a hash code and we don't have much to validate it
2534 //if BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX is clear, rest of DWORD is thin lock thread ID,
2535 //thin lock recursion level and appdomain index
2536 DWORD lockThreadId = bits & SBLK_MASK_LOCK_THREADID;
2537 DWORD recursionLevel = (bits & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT;
2538 //if thread ID is 0, recursionLeve got to be zero
2539 //but thread ID doesn't have to be valid because the lock could be orphanend
2540 ASSERT_AND_CHECK (lockThreadId != 0 || recursionLevel == 0 );
2542 DWORD adIndex = (bits >> SBLK_APPDOMAIN_SHIFT) & SBLK_MASK_APPDOMAININDEX;
2546 //in non debug build, only objects of domain neutral type have appdomain index in header
2547 ASSERT_AND_CHECK (obj->GetGCSafeMethodTable()->IsDomainNeutral());
2549 //todo: validate the AD index.
2550 //The trick here is agile objects could have a invalid AD index. Ideally we should call
2551 //Object::GetAppDomain to do all the agile validation but it has side effects like mark the object to
2552 //be agile and it only does the check if g_pConfig->AppDomainLeaks() is on
2559 #endif //VERIFY_HEAP
2561 // This holder takes care of the SyncBlock memory cleanup if an OOM occurs inside a call to NewSyncBlockSlot.
2563 // Warning: Assumes you already own the cache lock.
2564 // Assumes nothing allocated inside the SyncBlock (only releases the memory, does not destruct.)
2566 // This holder really just meets GetSyncBlock()'s special needs. It's not a general purpose holder.
2569 // Do not inline this call. (fyuan)
2570 // SyncBlockMemoryHolder is normally a check for empty pointer and return. Inlining VoidDeleteSyncBlockMemory adds expensive exception handling.
2571 void VoidDeleteSyncBlockMemory(SyncBlock* psb)
2573 LIMITED_METHOD_CONTRACT;
2574 SyncBlockCache::GetSyncBlockCache()->DeleteSyncBlockMemory(psb);
2577 typedef Wrapper<SyncBlock*, DoNothing<SyncBlock*>, VoidDeleteSyncBlockMemory, NULL> SyncBlockMemoryHolder;
2580 // get the sync block for an existing object
2581 SyncBlock *ObjHeader::GetSyncBlock()
2583 CONTRACT(SyncBlock *)
2589 INJECT_FAULT(COMPlusThrowOM(););
2590 POSTCONDITION(CheckPointer(RETVAL));
2594 PTR_SyncBlock syncBlock = GetBaseObject()->PassiveGetSyncBlock();
2596 BOOL indexHeld = FALSE;
2601 // Has our backpointer been correctly updated through every GC?
2602 PTR_SyncTableEntry pEntries(SyncTableEntry::GetSyncTableEntry());
2603 _ASSERTE(pEntries[GetHeaderSyncBlockIndex()].m_Object == GetBaseObject());
2608 //Need to get it from the cache
2610 SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
2613 syncBlock = GetBaseObject()->PassiveGetSyncBlock();
2618 SyncBlockMemoryHolder syncBlockMemoryHolder(SyncBlockCache::GetSyncBlockCache()->GetNextFreeSyncBlock());
2619 syncBlock = syncBlockMemoryHolder;
2621 if ((indx = GetHeaderSyncBlockIndex()) == 0)
2623 indx = SyncBlockCache::GetSyncBlockCache()->NewSyncBlockSlot(GetBaseObject());
2627 //We already have an index, we need to hold the syncblock
2632 //! NewSyncBlockSlot has side-effects that we don't have backout for - thus, that must be the last
2633 //! failable operation called.
2634 CANNOTTHROWCOMPLUSEXCEPTION();
2638 syncBlockMemoryHolder.SuppressRelease();
2640 new (syncBlock) SyncBlock(indx);
2643 // after this point, nobody can update the index in the header to give an AD index
2644 ENTER_SPIN_LOCK(this);
2647 // If there's an appdomain index stored in the header, transfer it to the syncblock
2649 ADIndex dwAppDomainIndex = GetAppDomainIndex();
2650 if (dwAppDomainIndex.m_dwIndex)
2651 syncBlock->SetAppDomainIndex(dwAppDomainIndex);
2653 // If the thin lock in the header is in use, transfer the information to the syncblock
2654 DWORD bits = GetBits();
2655 if ((bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) == 0)
2657 DWORD lockThreadId = bits & SBLK_MASK_LOCK_THREADID;
2658 DWORD recursionLevel = (bits & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT;
2659 if (lockThreadId != 0 || recursionLevel != 0)
2661 // recursionLevel can't be non-zero if thread id is 0
2662 _ASSERTE(lockThreadId != 0);
2664 Thread *pThread = g_pThinLockThreadIdDispenser->IdToThreadWithValidation(lockThreadId);
2666 if (pThread == NULL)
2668 // The lock is orphaned.
2669 pThread = (Thread*) -1;
2671 syncBlock->InitState();
2672 syncBlock->SetAwareLock(pThread, recursionLevel + 1);
2675 else if ((bits & BIT_SBLK_IS_HASHCODE) != 0)
2677 DWORD hashCode = bits & MASK_HASHCODE;
2679 syncBlock->SetHashCode(hashCode);
2683 SyncTableEntry::GetSyncTableEntry() [indx].m_SyncBlock = syncBlock;
2685 // in order to avoid a race where some thread tries to get the AD index and we've already nuked it,
2686 // make sure the syncblock etc is all setup with the AD index prior to replacing the index
2688 if (GetHeaderSyncBlockIndex() == 0)
2690 // We have transferred the AppDomain into the syncblock above.
2691 SetIndex(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | indx);
2694 //If we had already an index, hold the syncblock
2695 //for the lifetime of the object.
2697 syncBlock->SetPrecious();
2699 LEAVE_SPIN_LOCK(this);
2701 // SyncBlockCache::LockHolder goes out of scope here
2708 BOOL ObjHeader::Wait(INT32 timeOut, BOOL exitContext)
2716 INJECT_FAULT(COMPlusThrowOM(););
2720 // The following code may cause GC, so we must fetch the sync block from
2721 // the object now in case it moves.
2722 SyncBlock *pSB = GetBaseObject()->GetSyncBlock();
2724 // GetSyncBlock throws on failure
2725 _ASSERTE(pSB != NULL);
2727 // make sure we own the crst
2728 if (!pSB->DoesCurrentThreadOwnMonitor())
2729 COMPlusThrow(kSynchronizationLockException);
2732 Thread *pThread = GetThread();
2733 DWORD curLockCount = pThread->m_dwLockCount;
2736 BOOL result = pSB->Wait(timeOut,exitContext);
2738 _ASSERTE (curLockCount == pThread->m_dwLockCount);
2743 void ObjHeader::Pulse()
2751 INJECT_FAULT(COMPlusThrowOM(););
2755 // The following code may cause GC, so we must fetch the sync block from
2756 // the object now in case it moves.
2757 SyncBlock *pSB = GetBaseObject()->GetSyncBlock();
2759 // GetSyncBlock throws on failure
2760 _ASSERTE(pSB != NULL);
2762 // make sure we own the crst
2763 if (!pSB->DoesCurrentThreadOwnMonitor())
2764 COMPlusThrow(kSynchronizationLockException);
2769 void ObjHeader::PulseAll()
2777 INJECT_FAULT(COMPlusThrowOM(););
2781 // The following code may cause GC, so we must fetch the sync block from
2782 // the object now in case it moves.
2783 SyncBlock *pSB = GetBaseObject()->GetSyncBlock();
2785 // GetSyncBlock throws on failure
2786 _ASSERTE(pSB != NULL);
2788 // make sure we own the crst
2789 if (!pSB->DoesCurrentThreadOwnMonitor())
2790 COMPlusThrow(kSynchronizationLockException);
2796 // ***************************************************************************
2798 // AwareLock class implementation (GC-aware locking)
2800 // ***************************************************************************
2802 void AwareLock::AllocLockSemEvent()
2810 INJECT_FAULT(COMPlusThrowOM(););
2814 // Before we switch from cooperative, ensure that this syncblock won't disappear
2815 // under us. For something as expensive as an event, do it permanently rather
2816 // than transiently.
2821 // No need to take a lock - CLREvent::CreateMonitorEvent is thread safe
2822 m_SemEvent.CreateMonitorEvent((SIZE_T)this);
2825 void AwareLock::Enter()
2833 INJECT_FAULT(COMPlusThrowOM(););
2837 Thread *pCurThread = GetThread();
2841 // Read existing lock state.
2842 LONG state = m_MonitorHeld.LoadWithoutBarrier();
2846 // Common case: lock not held, no waiters. Attempt to acquire lock by
2847 // switching lock bit.
2848 if (FastInterlockCompareExchange((LONG*)&m_MonitorHeld, 1, 0) == 0)
2855 // It's possible to get here with waiters but no lock held, but in this
2856 // case a signal is about to be fired which will wake up a waiter. So
2857 // for fairness sake we should wait too.
2858 // Check first for recursive lock attempts on the same thread.
2859 if (m_HoldingThread == pCurThread)
2864 // Attempt to increment this count of waiters then goto contention
2866 if (FastInterlockCompareExchange((LONG*)&m_MonitorHeld, (state + 2), state) == state)
2873 // We get here if we successfully acquired the mutex.
2874 m_HoldingThread = pCurThread;
2876 pCurThread->IncLockCount();
2878 #if defined(_DEBUG) && defined(TRACK_SYNC)
2880 // The best place to grab this is from the ECall frame
2881 Frame *pFrame = pCurThread->GetFrame();
2882 int caller = (pFrame && pFrame != FRAME_TOP
2883 ? (int) pFrame->GetReturnAddress()
2885 pCurThread->m_pTrackSync->EnterSync(caller, this);
2892 // Didn't manage to get the mutex, must wait.
2893 EnterEpilog(pCurThread);
2897 // Got the mutex via recursive locking on the same thread.
2898 _ASSERTE(m_Recursion >= 1);
2900 #if defined(_DEBUG) && defined(TRACK_SYNC)
2901 // The best place to grab this is from the ECall frame
2902 Frame *pFrame = pCurThread->GetFrame();
2903 int caller = (pFrame && pFrame != FRAME_TOP ? (int) pFrame->GetReturnAddress() : -1);
2904 pCurThread->m_pTrackSync->EnterSync(caller, this);
2908 BOOL AwareLock::TryEnter(INT32 timeOut)
2915 if (timeOut == 0) {MODE_ANY;} else {MODE_COOPERATIVE;}
2916 INJECT_FAULT(COMPlusThrowOM(););
2922 LARGE_INTEGER qpFrequency, qpcStart, qpcEnd;
2923 BOOL canUseHighRes = QueryPerformanceCounter(&qpcStart);
2925 // try some more busy waiting
2926 if (Contention(timeOut))
2930 if (canUseHighRes && QueryPerformanceCounter(&qpcEnd) && QueryPerformanceFrequency(&qpFrequency))
2931 elapsed = (DWORD)((qpcEnd.QuadPart-qpcStart.QuadPart)/(qpFrequency.QuadPart/1000));
2933 if (elapsed >= (DWORD)timeOut)
2936 if (timeOut != (INT32)INFINITE)
2940 Thread *pCurThread = GetThread();
2941 TESTHOOKCALL(AppDomainCanBeUnloaded(pCurThread->GetDomain()->GetId().m_dwId,FALSE));
2943 if (pCurThread->IsAbortRequested())
2945 pCurThread->HandleThreadAbort();
2952 // Read existing lock state.
2953 LONG state = m_MonitorHeld.LoadWithoutBarrier();
2957 // Common case: lock not held, no waiters. Attempt to acquire lock by
2958 // switching lock bit.
2959 if (FastInterlockCompareExchange((LONG*)&m_MonitorHeld, 1, 0) == 0)
2966 // It's possible to get here with waiters but no lock held, but in this
2967 // case a signal is about to be fired which will wake up a waiter. So
2968 // for fairness sake we should wait too.
2969 // Check first for recursive lock attempts on the same thread.
2970 if (m_HoldingThread == pCurThread)
2981 // We get here if we successfully acquired the mutex.
2982 m_HoldingThread = pCurThread;
2984 pCurThread->IncLockCount();
2986 #if defined(_DEBUG) && defined(TRACK_SYNC)
2988 // The best place to grab this is from the ECall frame
2989 Frame *pFrame = pCurThread->GetFrame();
2990 int caller = (pFrame && pFrame != FRAME_TOP ? (int) pFrame->GetReturnAddress() : -1);
2991 pCurThread->m_pTrackSync->EnterSync(caller, this);
2998 // Didn't manage to get the mutex, return failure if no timeout, else wait
2999 // for at most timeout milliseconds for the mutex.
3005 // The precondition for EnterEpilog is that the count of waiters be bumped
3006 // to account for this thread
3010 // Read existing lock state.
3011 LONG state = m_MonitorHeld.LoadWithoutBarrier();
3018 if (FastInterlockCompareExchange((LONG*)&m_MonitorHeld, (state + 2), state) == state)
3024 return EnterEpilog(pCurThread, timeOut);
3027 // Got the mutex via recursive locking on the same thread.
3028 _ASSERTE(m_Recursion >= 1);
3030 #if defined(_DEBUG) && defined(TRACK_SYNC)
3031 // The best place to grab this is from the ECall frame
3032 Frame *pFrame = pCurThread->GetFrame();
3033 int caller = (pFrame && pFrame != FRAME_TOP ? (int) pFrame->GetReturnAddress() : -1);
3034 pCurThread->m_pTrackSync->EnterSync(caller, this);
3040 BOOL AwareLock::EnterEpilog(Thread* pCurThread, INT32 timeOut)
3042 STATIC_CONTRACT_THROWS;
3043 STATIC_CONTRACT_MODE_COOPERATIVE;
3044 STATIC_CONTRACT_GC_TRIGGERS;
3046 // While we are in this frame the thread is considered blocked on the
3047 // critical section of the monitor lock according to the debugger
3048 DebugBlockingItem blockingMonitorInfo;
3049 blockingMonitorInfo.dwTimeout = timeOut;
3050 blockingMonitorInfo.pMonitor = this;
3051 blockingMonitorInfo.pAppDomain = SystemDomain::GetCurrentDomain();
3052 blockingMonitorInfo.type = DebugBlock_MonitorCriticalSection;
3053 DebugBlockingItemHolder holder(pCurThread, &blockingMonitorInfo);
3055 // We need a separate helper because it uses SEH and the holder has a
3057 return EnterEpilogHelper(pCurThread, timeOut);
3060 BOOL AwareLock::EnterEpilogHelper(Thread* pCurThread, INT32 timeOut)
3062 STATIC_CONTRACT_THROWS;
3063 STATIC_CONTRACT_MODE_COOPERATIVE;
3064 STATIC_CONTRACT_GC_TRIGGERS;
3067 BOOL finished = false;
3069 // Require all callers to be in cooperative mode. If they have switched to preemptive
3070 // mode temporarily before calling here, then they are responsible for protecting
3071 // the object associated with this lock.
3072 _ASSERTE(pCurThread->PreemptiveGCDisabled());
3076 OBJECTREF obj = GetOwningObject();
3078 // We cannot allow the AwareLock to be cleaned up underneath us by the GC.
3079 IncrementTransientPrecious();
3081 GCPROTECT_BEGIN(obj);
3083 if (!m_SemEvent.IsMonitorEventAllocated())
3085 AllocLockSemEvent();
3087 _ASSERTE(m_SemEvent.IsMonitorEventAllocated());
3089 pCurThread->EnablePreemptiveGC();
3093 // We might be interrupted during the wait (Thread.Interrupt), so we need an
3094 // exception handler round the call.
3102 param.timeOut = timeOut;
3105 EE_TRY_FOR_FINALLY(Param *, pParam, ¶m)
3107 // Measure the time we wait so that, in the case where we wake up
3108 // and fail to acquire the mutex, we can adjust remaining timeout
3110 ULONGLONG start = CLRGetTickCount64();
3112 pParam->ret = pParam->pThis->m_SemEvent.Wait(pParam->timeOut, TRUE);
3113 _ASSERTE((pParam->ret == WAIT_OBJECT_0) || (pParam->ret == WAIT_TIMEOUT));
3115 // When calculating duration we consider a couple of special cases.
3116 // If the end tick is the same as the start tick we make the
3117 // duration a millisecond, to ensure we make forward progress if
3118 // there's a lot of contention on the mutex. Secondly, we have to
3119 // cope with the case where the tick counter wrapped while we where
3120 // waiting (we can cope with at most one wrap, so don't expect three
3121 // month timeouts to be very accurate). Luckily for us, the latter
3122 // case is taken care of by 32-bit modulo arithmetic automatically.
3124 if (pParam->timeOut != (INT32) INFINITE)
3126 ULONGLONG end = CLRGetTickCount64();
3134 duration = end - start;
3136 duration = min(duration, (DWORD)pParam->timeOut);
3137 pParam->timeOut -= (INT32)duration;
3142 if (GOT_EXCEPTION())
3144 // We must decrement the waiter count.
3147 LONG state = m_MonitorHeld.LoadWithoutBarrier();
3148 _ASSERTE((state >> 1) != 0);
3149 if (FastInterlockCompareExchange((LONG*)&m_MonitorHeld, state - 2, state) == state)
3155 // And signal the next waiter, else they'll wait forever.
3162 if (ret == WAIT_OBJECT_0)
3164 // Attempt to acquire lock (this also involves decrementing the waiter count).
3167 LONG state = m_MonitorHeld.LoadWithoutBarrier();
3168 _ASSERTE(((size_t)state >> 1) != 0);
3170 if ((size_t)state & 1)
3175 if (FastInterlockCompareExchange((LONG*)&m_MonitorHeld, ((state - 2) | 1), state) == state)
3184 // We timed out, decrement waiter count.
3187 LONG state = m_MonitorHeld.LoadWithoutBarrier();
3188 _ASSERTE((state >> 1) != 0);
3189 if (FastInterlockCompareExchange((LONG*)&m_MonitorHeld, state - 2, state) == state)
3203 pCurThread->DisablePreemptiveGC();
3206 DecrementTransientPrecious();
3208 if (ret == WAIT_TIMEOUT)
3213 m_HoldingThread = pCurThread;
3215 pCurThread->IncLockCount();
3217 #if defined(_DEBUG) && defined(TRACK_SYNC)
3218 // The best place to grab this is from the ECall frame
3219 Frame *pFrame = pCurThread->GetFrame();
3220 int caller = (pFrame && pFrame != FRAME_TOP ? (int) pFrame->GetReturnAddress() : -1);
3221 pCurThread->m_pTrackSync->EnterSync(caller, this);
3224 return (ret != WAIT_TIMEOUT);
3228 BOOL AwareLock::Leave()
3239 Thread* pThread = GetThread();
3241 AwareLock::LeaveHelperAction action = LeaveHelper(pThread);
3245 case AwareLock::LeaveHelperAction_None:
3248 case AwareLock::LeaveHelperAction_Signal:
3253 // Must be an error otherwise
3254 _ASSERTE(action == AwareLock::LeaveHelperAction_Error);
3260 #define _LOGCONTENTION
3263 #ifdef _LOGCONTENTION
3264 inline void LogContention()
3266 WRAPPER_NO_CONTRACT;
3268 if (LoggingOn(LF_SYNC, LL_INFO100))
3270 LogSpewAlways("Contention: Stack Trace Begin\n");
3271 void LogStackTrace();
3273 LogSpewAlways("Contention: Stack Trace End\n");
3278 #define LogContention()
3283 bool AwareLock::Contention(INT32 timeOut)
3291 INJECT_FAULT(COMPlusThrowOM(););
3295 DWORD startTime = 0;
3296 if (timeOut != (INT32)INFINITE)
3297 startTime = GetTickCount();
3299 COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cContention++);
3303 Thread *pCurThread = GetThread();
3304 OBJECTREF obj = GetOwningObject();
3305 bool bEntered = false;
3306 bool bKeepGoing = true;
3308 // We cannot allow the AwareLock to be cleaned up underneath us by the GC.
3309 IncrementTransientPrecious();
3311 GCPROTECT_BEGIN(obj);
3315 // Try spinning and yielding before eventually blocking.
3316 // The limit of 10 is largely arbitrary - feel free to tune if you have evidence
3317 // you're making things better
3318 for (DWORD iter = 0; iter < g_SpinConstants.dwRepetitions && bKeepGoing; iter++)
3320 DWORD i = g_SpinConstants.dwInitialDuration;
3330 if (g_SystemInfo.dwNumberOfProcessors <= 1)
3336 if (timeOut != (INT32)INFINITE && GetTickCount() - startTime >= (DWORD)timeOut)
3342 // Spin for i iterations, and make sure to never go more than 20000 iterations between
3343 // checking if we should SwitchToThread
3344 int remainingDelay = i;
3346 while (remainingDelay > 0)
3348 int currentDelay = min(remainingDelay, 20000);
3349 remainingDelay -= currentDelay;
3351 // Delay by approximately 2*currentDelay clock cycles (Pentium III).
3353 // This is brittle code - future processors may of course execute this
3354 // faster or slower, and future code generators may eliminate the loop altogether.
3355 // The precise value of the delay is not critical, however, and I can't think
3356 // of a better way that isn't machine-dependent.
3357 for (int delayCount = currentDelay; (--delayCount != 0); )
3359 YieldProcessor(); // indicate to the processor that we are spining
3362 // TryEnter will not take the lock if it has waiters. This means we should not spin
3363 // for long periods without giving the waiters a chance to run, since we won't
3364 // make progress until they run and they may be waiting for our CPU. So once
3365 // we're spinning >20000 iterations, check every 20000 iterations if there are
3366 // waiters and if so call SwitchToThread.
3368 // Since this only affects the spinning heuristic, calling HasWaiters now
3369 // and getting a dirty read is fine. Note that it is important that TryEnter
3370 // not take the lock because we could easily starve waiting threads.
3371 // They make only one attempt before going back to sleep, and spinners on
3372 // other CPUs would likely get the lock. We could fix this by allowing a
3373 // woken thread to become a spinner again, at which point there are no
3374 // starvation concerns and TryEnter can take the lock.
3375 if (remainingDelay > 0 && HasWaiters())
3377 __SwitchToThread(0, CALLER_LIMITS_SPINNING);
3381 // exponential backoff: wait a factor longer in the next iteration
3382 i *= g_SpinConstants.dwBackoffFactor;
3384 while (i < g_SpinConstants.dwMaximumDuration);
3388 pCurThread->HandleThreadAbort();
3391 __SwitchToThread(0, CALLER_LIMITS_SPINNING);
3396 // we are in co-operative mode so no need to keep this set
3397 DecrementTransientPrecious();
3398 if (!bEntered && timeOut == (INT32)INFINITE)
3400 // We've tried hard to enter - we need to eventually block to avoid wasting too much cpu
3409 LONG AwareLock::LeaveCompletely()
3411 WRAPPER_NO_CONTRACT;
3417 _ASSERTE(count > 0); // otherwise we were never in the lock
3423 BOOL AwareLock::OwnedByCurrentThread()
3425 WRAPPER_NO_CONTRACT;
3426 return (GetThread() == m_HoldingThread);
3430 // ***************************************************************************
3432 // SyncBlock class implementation
3434 // ***************************************************************************
3436 // We maintain two queues for SyncBlock::Wait.
3437 // 1. Inside SyncBlock we queue all threads that are waiting on the SyncBlock.
3438 // When we pulse, we pick the thread from this queue using FIFO.
3439 // 2. We queue all SyncBlocks that a thread is waiting for in Thread::m_WaitEventLink.
3440 // When we pulse a thread, we find the event from this queue to set, and we also
3441 // or in a 1 bit in the syncblock value saved in the queue, so that we can return
3442 // immediately from SyncBlock::Wait if the syncblock has been pulsed.
3443 BOOL SyncBlock::Wait(INT32 timeOut, BOOL exitContext)
3451 INJECT_FAULT(COMPlusThrowOM());
3455 Thread *pCurThread = GetThread();
3456 BOOL isTimedOut = FALSE;
3457 BOOL isEnqueued = FALSE;
3458 WaitEventLink waitEventLink;
3459 WaitEventLink *pWaitEventLink;
3461 // As soon as we flip the switch, we are in a race with the GC, which could clean
3462 // up the SyncBlock underneath us -- unless we report the object.
3463 _ASSERTE(pCurThread->PreemptiveGCDisabled());
3465 // Does this thread already wait for this SyncBlock?
3466 WaitEventLink *walk = pCurThread->WaitEventLinkForSyncBlock(this);
3468 if (walk->m_Next->m_WaitSB == this) {
3469 // Wait on the same lock again.
3470 walk->m_Next->m_RefCount ++;
3471 pWaitEventLink = walk->m_Next;
3473 else if ((SyncBlock*)(((DWORD_PTR)walk->m_Next->m_WaitSB) & ~1)== this) {
3474 // This thread has been pulsed. No need to wait.
3479 // First time this thread is going to wait for this SyncBlock.
3481 if (pCurThread->m_WaitEventLink.m_Next == NULL) {
3482 hEvent = &(pCurThread->m_EventWait);
3485 hEvent = GetEventFromEventStore();
3487 waitEventLink.m_WaitSB = this;
3488 waitEventLink.m_EventWait = hEvent;
3489 waitEventLink.m_Thread = pCurThread;
3490 waitEventLink.m_Next = NULL;
3491 waitEventLink.m_LinkSB.m_pNext = NULL;
3492 waitEventLink.m_RefCount = 1;
3493 pWaitEventLink = &waitEventLink;
3494 walk->m_Next = pWaitEventLink;
3496 // Before we enqueue it (and, thus, before it can be dequeued), reset the event
3497 // that will awaken us.
3500 // This thread is now waiting on this sync block
3501 ThreadQueue::EnqueueThread(pWaitEventLink, this);
3506 _ASSERTE ((SyncBlock*)((DWORD_PTR)walk->m_Next->m_WaitSB & ~1)== this);
3508 PendingSync syncState(walk);
3510 OBJECTREF obj = m_Monitor.GetOwningObject();
3512 m_Monitor.IncrementTransientPrecious();
3514 // While we are in this frame the thread is considered blocked on the
3515 // event of the monitor lock according to the debugger
3516 DebugBlockingItem blockingMonitorInfo;
3517 blockingMonitorInfo.dwTimeout = timeOut;
3518 blockingMonitorInfo.pMonitor = &m_Monitor;
3519 blockingMonitorInfo.pAppDomain = SystemDomain::GetCurrentDomain();
3520 blockingMonitorInfo.type = DebugBlock_MonitorEvent;
3521 DebugBlockingItemHolder holder(pCurThread, &blockingMonitorInfo);
3523 GCPROTECT_BEGIN(obj);
3527 // remember how many times we synchronized
3528 syncState.m_EnterCount = LeaveMonitorCompletely();
3529 _ASSERTE(syncState.m_EnterCount > 0);
3531 Context* targetContext;
3532 targetContext = pCurThread->GetContext();
3533 _ASSERTE(targetContext);
3534 Context* defaultContext;
3535 defaultContext = pCurThread->GetDomain()->GetDefaultContext();
3536 _ASSERTE(defaultContext);
3537 #ifdef FEATURE_REMOTING
3539 targetContext != defaultContext)
3541 Context::MonitorWaitArgs waitArgs = {timeOut, &syncState, &isTimedOut};
3542 Context::CallBackInfo callBackInfo = {Context::MonitorWait_callback, (void*) &waitArgs};
3543 Context::RequestCallBack(CURRENT_APPDOMAIN_ID, defaultContext, &callBackInfo);
3547 _ASSERTE( exitContext==NULL || targetContext == defaultContext);
3550 isTimedOut = pCurThread->Block(timeOut, &syncState);
3554 m_Monitor.DecrementTransientPrecious();
3559 void SyncBlock::Pulse()
3570 WaitEventLink *pWaitEventLink;
3572 if ((pWaitEventLink = ThreadQueue::DequeueThread(this)) != NULL)
3573 pWaitEventLink->m_EventWait->Set();
3576 void SyncBlock::PulseAll()
3587 WaitEventLink *pWaitEventLink;
3589 while ((pWaitEventLink = ThreadQueue::DequeueThread(this)) != NULL)
3590 pWaitEventLink->m_EventWait->Set();
3593 bool SyncBlock::SetInteropInfo(InteropSyncBlockInfo* pInteropInfo)
3595 WRAPPER_NO_CONTRACT;
3598 // We could be agile, but not have noticed yet. We can't assert here
3599 // that we live in any given domain, nor is this an appropriate place
3600 // to re-parent the syncblock.
3601 /* _ASSERTE (m_dwAppDomainIndex.m_dwIndex == 0 ||
3602 m_dwAppDomainIndex == SystemDomain::System()->DefaultDomain()->GetIndex() ||
3603 m_dwAppDomainIndex == GetAppDomain()->GetIndex());
3604 m_dwAppDomainIndex = GetAppDomain()->GetIndex();
3606 return (FastInterlockCompareExchangePointer(&m_pInteropInfo,
3611 #ifdef EnC_SUPPORTED
3612 // Store information about fields added to this object by EnC
3613 // This must be called from a thread in the AppDomain of this object instance
3614 void SyncBlock::SetEnCInfo(EnCSyncBlockInfo *pEnCInfo)
3616 WRAPPER_NO_CONTRACT;
3618 // We can't recreate the field contents, so this SyncBlock can never go away
3621 // Store the field info (should only ever happen once)
3622 _ASSERTE( m_pEnCInfo == NULL );
3623 m_pEnCInfo = pEnCInfo;
3625 // Also store the AppDomain that this object lives in.
3626 // Also verify that the AD was either not yet set, or set correctly before overwriting it.
3627 // I'm not sure why it should ever be set to the default domain and then changed to a different domain,
3628 // perhaps that can be removed.
3629 _ASSERTE (m_dwAppDomainIndex.m_dwIndex == 0 ||
3630 m_dwAppDomainIndex == SystemDomain::System()->DefaultDomain()->GetIndex() ||
3631 m_dwAppDomainIndex == GetAppDomain()->GetIndex());
3632 m_dwAppDomainIndex = GetAppDomain()->GetIndex();
3634 #endif // EnC_SUPPORTED
3635 #endif // !DACCESS_COMPILE
3637 #if defined(_WIN64) && defined(_DEBUG)
3638 void ObjHeader::IllegalAlignPad()
3640 WRAPPER_NO_CONTRACT;
3642 void** object = ((void**) this) + 1;
3643 LogSpewAlways("\n\n******** Illegal ObjHeader m_alignpad not 0, object" FMT_ADDR "\n\n",
3646 _ASSERTE(m_alignpad == 0);
3648 #endif // _WIN64 && _DEBUG