1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
9 // Definition of a SyncBlock and the SyncBlockCache which manages it
23 #include "interoputil.h"
25 #include "eventtrace.h"
26 #include "dllimportcallback.h"
27 #include "comcallablewrapper.h"
30 #include "comdelegate.h"
31 #include "finalizerthread.h"
33 #ifdef FEATURE_COMINTEROP
34 #include "runtimecallablewrapper.h"
35 #endif // FEATURE_COMINTEROP
37 // Allocate 4K worth. Typically enough
38 #define MAXSYNCBLOCK (0x1000-sizeof(void*))/sizeof(SyncBlock)
39 #define SYNC_TABLE_INITIAL_SIZE 250
46 SyncBlockArray *m_Next;
47 BYTE m_Blocks[MAXSYNCBLOCK * sizeof (SyncBlock)];
50 // For in-place constructor
51 BYTE g_SyncBlockCacheInstance[sizeof(SyncBlockCache)];
53 SPTR_IMPL (SyncBlockCache, SyncBlockCache, s_pSyncBlockCache);
55 #ifndef DACCESS_COMPILE
61 SLIST_HEADER InteropSyncBlockInfo::s_InteropInfoStandbyList;
62 #endif // !FEATURE_PAL
64 InteropSyncBlockInfo::~InteropSyncBlockInfo()
75 FreeUMEntryThunkOrInterceptStub();
79 // Deletes all items in code:s_InteropInfoStandbyList.
80 void InteropSyncBlockInfo::FlushStandbyList()
90 PSLIST_ENTRY pEntry = InterlockedFlushSList(&InteropSyncBlockInfo::s_InteropInfoStandbyList);
93 PSLIST_ENTRY pNextEntry = pEntry->Next;
95 // make sure to use the global delete since the destructor has already run
96 ::delete (void *)pEntry;
100 #endif // !FEATURE_PAL
102 void InteropSyncBlockInfo::FreeUMEntryThunkOrInterceptStub()
115 void *pUMEntryThunk = GetUMEntryThunk();
116 if (pUMEntryThunk != NULL)
118 COMDelegate::RemoveEntryFromFPtrHash((UPTR)pUMEntryThunk);
119 UMEntryThunk::FreeUMEntryThunk((UMEntryThunk *)pUMEntryThunk);
123 #if defined(_TARGET_X86_)
124 Stub *pInterceptStub = GetInterceptStub();
125 if (pInterceptStub != NULL)
127 // There may be multiple chained stubs
128 pInterceptStub->DecRef();
130 #else // _TARGET_X86_
131 // Intercept stubs are currently not used on other platforms.
132 _ASSERTE(GetInterceptStub() == NULL);
133 #endif // _TARGET_X86_
136 m_pUMEntryThunkOrInterceptStub = NULL;
139 #ifdef FEATURE_COMINTEROP
140 // Returns either NULL or an RCW on which AcquireLock has been called.
141 RCW* InteropSyncBlockInfo::GetRCWAndIncrementUseCount()
143 LIMITED_METHOD_CONTRACT;
145 DWORD dwSwitchCount = 0;
148 RCW *pRCW = VolatileLoad(&m_pRCW);
149 if ((size_t)pRCW <= 0x1)
151 // the RCW never existed or has been released
155 if (((size_t)pRCW & 0x1) == 0x0)
157 // it looks like we have a chance, try to acquire the lock
158 RCW *pLockedRCW = (RCW *)((size_t)pRCW | 0x1);
159 if (InterlockedCompareExchangeT(&m_pRCW, pLockedRCW, pRCW) == pRCW)
161 // we have the lock on the m_pRCW field, now we can safely "use" the RCW
162 pRCW->IncrementUseCount();
164 // release the m_pRCW lock
165 VolatileStore(&m_pRCW, pRCW);
167 // and return the RCW
172 // somebody else holds the lock, retry
173 __SwitchToThread(0, ++dwSwitchCount);
177 // Sets the m_pRCW field in a thread-safe manner, pRCW can be NULL.
178 void InteropSyncBlockInfo::SetRawRCW(RCW* pRCW)
180 LIMITED_METHOD_CONTRACT;
184 // we never set two different RCWs on a single object
185 _ASSERTE(m_pRCW == NULL);
190 DWORD dwSwitchCount = 0;
193 RCW *pOldRCW = VolatileLoad(&m_pRCW);
195 if ((size_t)pOldRCW <= 0x1)
197 // the RCW never existed or has been released
198 VolatileStore(&m_pRCW, (RCW *)0x1);
202 if (((size_t)pOldRCW & 0x1) == 0x0)
204 // it looks like we have a chance, set the RCW to 0x1
205 if (InterlockedCompareExchangeT(&m_pRCW, (RCW *)0x1, pOldRCW) == pOldRCW)
212 // somebody else holds the lock, retry
213 __SwitchToThread(0, ++dwSwitchCount);
217 #endif // FEATURE_COMINTEROP
219 #endif // !DACCESS_COMPILE
221 PTR_SyncTableEntry SyncTableEntry::GetSyncTableEntry()
223 LIMITED_METHOD_CONTRACT;
226 return (PTR_SyncTableEntry)g_pSyncTable;
229 #ifndef DACCESS_COMPILE
231 SyncTableEntry*& SyncTableEntry::GetSyncTableEntryByRef()
233 LIMITED_METHOD_CONTRACT;
238 SyncBlockCache*& SyncBlockCache::GetSyncBlockCache()
240 LIMITED_METHOD_CONTRACT;
242 return s_pSyncBlockCache;
246 //----------------------------------------------------------------------------
248 // ThreadQueue Implementation
250 //----------------------------------------------------------------------------
251 #endif //!DACCESS_COMPILE
253 // Given a link in the chain, get the Thread that it represents
255 inline PTR_WaitEventLink ThreadQueue::WaitEventLinkForLink(PTR_SLink pLink)
257 LIMITED_METHOD_CONTRACT;
259 return (PTR_WaitEventLink) (((PTR_BYTE) pLink) - offsetof(WaitEventLink, m_LinkSB));
262 #ifndef DACCESS_COMPILE
264 // Unlink the head of the Q. We are always in the SyncBlock's critical
267 inline WaitEventLink *ThreadQueue::DequeueThread(SyncBlock *psb)
278 // Be careful, the debugger inspects the queue from out of process and just looks at the memory...
279 // it must be valid even if the lock is held. Be careful if you change the way the queue is updated.
280 SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
282 WaitEventLink *ret = NULL;
283 SLink *pLink = psb->m_Link.m_pNext;
287 psb->m_Link.m_pNext = pLink->m_pNext;
289 pLink->m_pNext = (SLink *)POISONC;
291 ret = WaitEventLinkForLink(pLink);
292 _ASSERTE(ret->m_WaitSB == psb);
297 // Enqueue is the slow one. We have to find the end of the Q since we don't
298 // want to burn storage for this in the SyncBlock.
300 inline void ThreadQueue::EnqueueThread(WaitEventLink *pWaitEventLink, SyncBlock *psb)
311 _ASSERTE (pWaitEventLink->m_LinkSB.m_pNext == NULL);
313 // Be careful, the debugger inspects the queue from out of process and just looks at the memory...
314 // it must be valid even if the lock is held. Be careful if you change the way the queue is updated.
315 SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
317 SLink *pPrior = &psb->m_Link;
319 while (pPrior->m_pNext)
321 // We shouldn't already be in the waiting list!
322 _ASSERTE(pPrior->m_pNext != &pWaitEventLink->m_LinkSB);
324 pPrior = pPrior->m_pNext;
326 pPrior->m_pNext = &pWaitEventLink->m_LinkSB;
330 // Wade through the SyncBlock's list of waiting threads and remove the
333 BOOL ThreadQueue::RemoveThread (Thread *pThread, SyncBlock *psb)
345 // Be careful, the debugger inspects the queue from out of process and just looks at the memory...
346 // it must be valid even if the lock is held. Be careful if you change the way the queue is updated.
347 SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
349 SLink *pPrior = &psb->m_Link;
351 WaitEventLink *pWaitEventLink;
353 while ((pLink = pPrior->m_pNext) != NULL)
355 pWaitEventLink = WaitEventLinkForLink(pLink);
356 if (pWaitEventLink->m_Thread == pThread)
358 pPrior->m_pNext = pLink->m_pNext;
360 pLink->m_pNext = (SLink *)POISONC;
362 _ASSERTE(pWaitEventLink->m_WaitSB == psb);
371 #endif //!DACCESS_COMPILE
373 #ifdef DACCESS_COMPILE
374 // Enumerates the threads in the queue from front to back by calling
375 // pCallbackFunction on each one
377 void ThreadQueue::EnumerateThreads(SyncBlock *psb, FP_TQ_THREAD_ENUMERATION_CALLBACK pCallbackFunction, void* pUserData)
388 PTR_SLink pLink = psb->m_Link.m_pNext;
389 PTR_WaitEventLink pWaitEventLink;
391 while (pLink != NULL)
393 pWaitEventLink = WaitEventLinkForLink(pLink);
395 pCallbackFunction(pWaitEventLink->m_Thread, pUserData);
396 pLink = pLink->m_pNext;
399 #endif //DACCESS_COMPILE
401 #ifndef DACCESS_COMPILE
403 // ***************************************************************************
405 // Ephemeral Bitmap Helper
407 // ***************************************************************************
411 #define card_word_width 32
413 size_t CardIndex (size_t card)
415 LIMITED_METHOD_CONTRACT;
416 return card_size * card;
419 size_t CardOf (size_t idx)
421 LIMITED_METHOD_CONTRACT;
422 return idx / card_size;
425 size_t CardWord (size_t card)
427 LIMITED_METHOD_CONTRACT;
428 return card / card_word_width;
431 unsigned CardBit (size_t card)
433 LIMITED_METHOD_CONTRACT;
434 return (unsigned)(card % card_word_width);
438 void SyncBlockCache::SetCard (size_t card)
441 m_EphemeralBitmap [CardWord (card)] =
442 (m_EphemeralBitmap [CardWord (card)] | (1 << CardBit (card)));
446 void SyncBlockCache::ClearCard (size_t card)
449 m_EphemeralBitmap [CardWord (card)] =
450 (m_EphemeralBitmap [CardWord (card)] & ~(1 << CardBit (card)));
454 BOOL SyncBlockCache::CardSetP (size_t card)
457 return ( m_EphemeralBitmap [ CardWord (card) ] & (1 << CardBit (card)));
461 void SyncBlockCache::CardTableSetBit (size_t idx)
464 SetCard (CardOf (idx));
468 size_t BitMapSize (size_t cacheSize)
470 LIMITED_METHOD_CONTRACT;
472 return (cacheSize + card_size * card_word_width - 1)/ (card_size * card_word_width);
475 // ***************************************************************************
477 // SyncBlockCache class implementation
479 // ***************************************************************************
481 SyncBlockCache::SyncBlockCache()
482 : m_pCleanupBlockList(NULL),
483 m_FreeBlockList(NULL),
485 // NOTE: CRST_UNSAFE_ANYMODE prevents a GC mode switch when entering this crst.
486 // If you remove this flag, we will switch to preemptive mode when entering
487 // g_criticalSection, which means all functions that enter it will become
488 // GC_TRIGGERS. (This includes all uses of LockHolder around SyncBlockCache::GetSyncBlockCache().
489 // So be sure to update the contracts if you remove this flag.
490 m_CacheLock(CrstSyncBlockCache, (CrstFlags) (CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD)),
496 m_FreeSyncTableIndex(1),
497 m_FreeSyncTableList(0),
498 m_SyncTableSize(SYNC_TABLE_INITIAL_SIZE),
500 m_bSyncBlockCleanupInProgress(FALSE),
509 INJECT_FAULT(COMPlusThrowOM());
515 // This method is NO longer called.
516 SyncBlockCache::~SyncBlockCache()
527 // Clear the list the fast way.
528 m_FreeBlockList = NULL;
529 //<TODO>@todo we can clear this fast too I guess</TODO>
530 m_pCleanupBlockList = NULL;
532 // destruct all arrays
535 SyncBlockArray *next = m_SyncBlocks->m_Next;
540 // Also, now is a good time to clean up all the old tables which we discarded
541 // when we overflowed them.
543 while ((arr = m_OldSyncTables) != 0)
545 m_OldSyncTables = (SyncTableEntry*)arr[0].m_Object.Load();
551 // When the GC determines that an object is dead the low bit of the
552 // m_Object field of SyncTableEntry is set, however it is not
553 // cleaned up because we cant do the COM interop cleanup at GC time.
554 // It is put on a cleanup list and at a later time (typically during
555 // finalization, this list is cleaned up.
557 void SyncBlockCache::CleanupSyncBlocks()
559 STATIC_CONTRACT_THROWS;
560 STATIC_CONTRACT_MODE_COOPERATIVE;
562 _ASSERTE(GetThread() == FinalizerThread::GetFinalizerThread());
564 // Set the flag indicating sync block cleanup is in progress.
565 // IMPORTANT: This must be set before the sync block cleanup bit is reset on the thread.
566 m_bSyncBlockCleanupInProgress = TRUE;
570 SyncBlockCache *pThis;
572 #ifdef FEATURE_COMINTEROP
578 #ifdef FEATURE_COMINTEROP
582 EE_TRY_FOR_FINALLY(Param *, pParam, ¶m)
585 FinalizerThread::GetFinalizerThread()->ResetSyncBlockCleanup();
587 // walk the cleanup list and cleanup 'em up
588 while ((pParam->psb = pParam->pThis->GetNextCleanupSyncBlock()) != NULL)
590 #ifdef FEATURE_COMINTEROP
591 InteropSyncBlockInfo* pInteropInfo = pParam->psb->GetInteropInfoNoCreate();
594 pParam->pRCW = pInteropInfo->GetRawRCW();
597 // We should have initialized the cleanup list with the
598 // first RCW cache we created
599 _ASSERTE(g_pRCWCleanupList != NULL);
601 g_pRCWCleanupList->AddWrapper(pParam->pRCW);
604 pInteropInfo->SetRawRCW(NULL);
607 #endif // FEATURE_COMINTEROP
609 // Delete the sync block.
610 pParam->pThis->DeleteSyncBlock(pParam->psb);
613 // pulse GC mode to allow GC to perform its work
614 if (FinalizerThread::GetFinalizerThread()->CatchAtSafePointOpportunistic())
616 FinalizerThread::GetFinalizerThread()->PulseGCMode();
620 #ifdef FEATURE_COMINTEROP
621 // Now clean up the rcw's sorted by context
622 if (g_pRCWCleanupList != NULL)
623 g_pRCWCleanupList->CleanupAllWrappers();
624 #endif // FEATURE_COMINTEROP
628 // We are finished cleaning up the sync blocks.
629 m_bSyncBlockCleanupInProgress = FALSE;
631 #ifdef FEATURE_COMINTEROP
633 param.pRCW->Cleanup();
637 DeleteSyncBlock(param.psb);
641 // create the sync block cache
643 void SyncBlockCache::Attach()
645 LIMITED_METHOD_CONTRACT;
648 // create the sync block cache
650 void SyncBlockCache::Start()
657 INJECT_FAULT(COMPlusThrowOM(););
661 DWORD* bm = new DWORD [BitMapSize(SYNC_TABLE_INITIAL_SIZE+1)];
663 memset (bm, 0, BitMapSize (SYNC_TABLE_INITIAL_SIZE+1)*sizeof(DWORD));
665 SyncTableEntry::GetSyncTableEntryByRef() = new SyncTableEntry[SYNC_TABLE_INITIAL_SIZE+1];
667 for (int i=0; i<SYNC_TABLE_INITIAL_SIZE+1; i++) {
668 SyncTableEntry::GetSyncTableEntry()[i].m_SyncBlock = NULL;
672 SyncTableEntry::GetSyncTableEntry()[0].m_SyncBlock = 0;
673 SyncBlockCache::GetSyncBlockCache() = new (&g_SyncBlockCacheInstance) SyncBlockCache;
675 SyncBlockCache::GetSyncBlockCache()->m_EphemeralBitmap = bm;
678 InitializeSListHead(&InteropSyncBlockInfo::s_InteropInfoStandbyList);
679 #endif // !FEATURE_PAL
683 // destroy the sync block cache
685 void SyncBlockCache::Stop()
695 // cache must be destroyed first, since it can traverse the table to find all the
696 // sync blocks which are live and thus must have their critical sections destroyed.
697 if (SyncBlockCache::GetSyncBlockCache())
699 delete SyncBlockCache::GetSyncBlockCache();
700 SyncBlockCache::GetSyncBlockCache() = 0;
703 if (SyncTableEntry::GetSyncTableEntry())
705 delete SyncTableEntry::GetSyncTableEntry();
706 SyncTableEntry::GetSyncTableEntryByRef() = 0;
711 void SyncBlockCache::InsertCleanupSyncBlock(SyncBlock* psb)
722 // free up the threads that are waiting before we use the link
723 // for other purposes
724 if (psb->m_Link.m_pNext != NULL)
726 while (ThreadQueue::DequeueThread(psb) != NULL)
730 #ifdef FEATURE_COMINTEROP
731 if (psb->m_pInteropInfo)
734 // so do only minorcleanup
735 MinorCleanupSyncBlockComData(psb->m_pInteropInfo);
737 #endif // FEATURE_COMINTEROP
739 // This method will be called only by the GC thread
740 //<TODO>@todo add an assert for the above statement</TODO>
741 // we don't need to lock here
744 psb->m_Link.m_pNext = m_pCleanupBlockList;
745 m_pCleanupBlockList = &psb->m_Link;
747 // we don't need a lock here
751 SyncBlock* SyncBlockCache::GetNextCleanupSyncBlock()
753 LIMITED_METHOD_CONTRACT;
755 // we don't need a lock here,
756 // as this is called only on the finalizer thread currently
758 SyncBlock *psb = NULL;
759 if (m_pCleanupBlockList)
761 // get the actual sync block pointer
762 psb = (SyncBlock *) (((BYTE *) m_pCleanupBlockList) - offsetof(SyncBlock, m_Link));
763 m_pCleanupBlockList = m_pCleanupBlockList->m_pNext;
769 // returns and removes the next free syncblock from the list
770 // the cache lock must be entered to call this
771 SyncBlock *SyncBlockCache::GetNextFreeSyncBlock()
775 INJECT_FAULT(COMPlusThrowOM());
782 #ifdef _DEBUG // Instrumentation for OOM fault injection testing
787 SLink *plst = m_FreeBlockList;
793 m_FreeBlockList = m_FreeBlockList->m_pNext;
798 // get the actual sync block pointer
799 psb = (SyncBlock *) (((BYTE *) plst) - offsetof(SyncBlock, m_Link));
805 if ((m_SyncBlocks == NULL) || (m_FreeSyncBlock >= MAXSYNCBLOCK))
808 // LogSpewAlways("Allocating new syncblock array\n");
809 // DumpSyncBlockCache();
811 SyncBlockArray* newsyncblocks = new(SyncBlockArray);
815 newsyncblocks->m_Next = m_SyncBlocks;
816 m_SyncBlocks = newsyncblocks;
819 return &(((SyncBlock*)m_SyncBlocks->m_Blocks)[m_FreeSyncBlock++]);
824 void SyncBlockCache::Grow()
832 INJECT_FAULT(COMPlusThrowOM(););
836 STRESS_LOG0(LF_SYNC, LL_INFO10000, "SyncBlockCache::NewSyncBlockSlot growing SyncBlockCache \n");
838 NewArrayHolder<SyncTableEntry> newSyncTable (NULL);
839 NewArrayHolder<DWORD> newBitMap (NULL);
842 // Compute the size of the new synctable. Normally, we double it - unless
843 // doing so would create slots with indices too high to fit within the
844 // mask. If so, we create a synctable up to the mask limit. If we're
845 // already at the mask limit, then caller is out of luck.
846 DWORD newSyncTableSize;
847 if (m_SyncTableSize <= (MASK_SYNCBLOCKINDEX >> 1))
849 newSyncTableSize = m_SyncTableSize * 2;
853 newSyncTableSize = MASK_SYNCBLOCKINDEX;
856 if (!(newSyncTableSize > m_SyncTableSize)) // Make sure we actually found room to grow!
861 newSyncTable = new SyncTableEntry[newSyncTableSize];
862 newBitMap = new DWORD[BitMapSize (newSyncTableSize)];
866 //! From here on, we assume that we will succeed and start doing global side-effects.
867 //! Any operation that could fail must occur before this point.
868 CANNOTTHROWCOMPLUSEXCEPTION();
871 newSyncTable.SuppressRelease();
872 newBitMap.SuppressRelease();
875 // We chain old table because we can't delete
876 // them before all the threads are stoppped
878 SyncTableEntry::GetSyncTableEntry() [0].m_Object = (Object *)m_OldSyncTables;
879 m_OldSyncTables = SyncTableEntry::GetSyncTableEntry();
881 memset (newSyncTable, 0, newSyncTableSize*sizeof (SyncTableEntry));
882 memset (newBitMap, 0, BitMapSize (newSyncTableSize)*sizeof (DWORD));
883 CopyMemory (newSyncTable, SyncTableEntry::GetSyncTableEntry(),
884 m_SyncTableSize*sizeof (SyncTableEntry));
886 CopyMemory (newBitMap, m_EphemeralBitmap,
887 BitMapSize (m_SyncTableSize)*sizeof (DWORD));
889 oldBitMap = m_EphemeralBitmap;
890 m_EphemeralBitmap = newBitMap;
893 _ASSERTE((m_SyncTableSize & MASK_SYNCBLOCKINDEX) == m_SyncTableSize);
894 // note: we do not care if another thread does not see the new size
895 // however we really do not want it to see the new size without seeing the new array
896 //@TODO do we still leak here if two threads come here at the same time ?
897 FastInterlockExchangePointer(&SyncTableEntry::GetSyncTableEntryByRef(), newSyncTable.GetValue());
899 m_FreeSyncTableIndex++;
901 m_SyncTableSize = newSyncTableSize;
904 static int dumpSBOnResize = -1;
906 if (dumpSBOnResize == -1)
907 dumpSBOnResize = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SBDumpOnResize);
911 LogSpewAlways("SyncBlockCache resized\n");
912 DumpSyncBlockCache();
918 DWORD SyncBlockCache::NewSyncBlockSlot(Object *obj)
926 INJECT_FAULT(COMPlusThrowOM(););
929 _ASSERTE(m_CacheLock.OwnedByCurrentThread()); // GetSyncBlock takes the lock, make sure no one else does.
932 if (m_FreeSyncTableList)
934 indexNewEntry = (DWORD)(m_FreeSyncTableList >> 1);
935 _ASSERTE ((size_t)SyncTableEntry::GetSyncTableEntry()[indexNewEntry].m_Object.Load() & 1);
936 m_FreeSyncTableList = (size_t)SyncTableEntry::GetSyncTableEntry()[indexNewEntry].m_Object.Load() & ~1;
938 else if ((indexNewEntry = (DWORD)(m_FreeSyncTableIndex)) >= m_SyncTableSize)
940 // This is kept out of line to keep stuff like the C++ EH prolog (needed for holders) off
941 // of the common path.
947 static int dumpSBOnNewIndex = -1;
949 if (dumpSBOnNewIndex == -1)
950 dumpSBOnNewIndex = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SBDumpOnNewIndex);
952 if (dumpSBOnNewIndex)
954 LogSpewAlways("SyncBlockCache index incremented\n");
955 DumpSyncBlockCache();
958 m_FreeSyncTableIndex ++;
962 CardTableSetBit (indexNewEntry);
964 // In debug builds the m_SyncBlock at indexNewEntry should already be null, since we should
965 // start out with a null table and always null it out on delete.
966 _ASSERTE(SyncTableEntry::GetSyncTableEntry() [indexNewEntry].m_SyncBlock == NULL);
967 SyncTableEntry::GetSyncTableEntry() [indexNewEntry].m_SyncBlock = NULL;
968 SyncTableEntry::GetSyncTableEntry() [indexNewEntry].m_Object = obj;
970 _ASSERTE(indexNewEntry != 0);
972 return indexNewEntry;
976 // free a used sync block, only called from CleanupSyncBlocks.
977 void SyncBlockCache::DeleteSyncBlock(SyncBlock *psb)
985 INJECT_FAULT(COMPlusThrowOM());
990 if (psb->m_pInteropInfo)
992 #ifdef FEATURE_COMINTEROP
993 CleanupSyncBlockComData(psb->m_pInteropInfo);
994 #endif // FEATURE_COMINTEROP
999 delete psb->m_pInteropInfo;
1003 psb->m_pInteropInfo->~InteropSyncBlockInfo();
1004 InterlockedPushEntrySList(&InteropSyncBlockInfo::s_InteropInfoStandbyList, (PSLIST_ENTRY)psb->m_pInteropInfo);
1006 #else // !FEATURE_PAL
1007 delete psb->m_pInteropInfo;
1008 #endif // !FEATURE_PAL
1011 #ifdef EnC_SUPPORTED
1012 // clean up EnC info
1013 if (psb->m_pEnCInfo)
1014 psb->m_pEnCInfo->Cleanup();
1015 #endif // EnC_SUPPORTED
1017 // Destruct the SyncBlock, but don't reclaim its memory. (Overridden
1018 // operator delete).
1021 //synchronizer with the consumers,
1022 // <TODO>@todo we don't really need a lock here, we can come up
1023 // with some simple algo to avoid taking a lock </TODO>
1025 SyncBlockCache::LockHolder lh(this);
1027 DeleteSyncBlockMemory(psb);
1032 // returns the sync block memory to the free pool but does not destruct sync block (must own cache lock already)
1033 void SyncBlockCache::DeleteSyncBlockMemory(SyncBlock *psb)
1047 psb->m_Link.m_pNext = m_FreeBlockList;
1048 m_FreeBlockList = &psb->m_Link;
1052 // free a used sync block
1053 void SyncBlockCache::GCDeleteSyncBlock(SyncBlock *psb)
1064 // Destruct the SyncBlock, but don't reclaim its memory. (Overridden
1065 // operator delete).
1071 psb->m_Link.m_pNext = m_FreeBlockList;
1072 m_FreeBlockList = &psb->m_Link;
1075 void SyncBlockCache::GCWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2)
1087 // First delete the obsolete arrays since we have exclusive access
1088 BOOL fSetSyncBlockCleanup = FALSE;
1090 SyncTableEntry* arr;
1091 while ((arr = m_OldSyncTables) != NULL)
1093 m_OldSyncTables = (SyncTableEntry*)arr[0].m_Object.Load();
1098 LogSpewAlways("GCWeakPtrScan starting\n");
1102 if (g_pConfig->GetHeapVerifyLevel()& EEConfig::HEAPVERIFY_SYNCBLK)
1103 STRESS_LOG0 (LF_GC | LF_SYNC, LL_INFO100, "GCWeakPtrScan starting\n");
1106 if (GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration() < GCHeapUtilities::GetGCHeap()->GetMaxGeneration())
1109 //for VSW 294550: we saw stale obeject reference in SyncBlkCache, so we want to make sure the card
1110 //table logic above works correctly so that every ephemeral entry is promoted.
1111 //For verification, we make a copy of the sync table in relocation phase and promote it use the
1112 //slow approach and compare the result with the original one
1113 DWORD freeSyncTalbeIndexCopy = m_FreeSyncTableIndex;
1114 SyncTableEntry * syncTableShadow = NULL;
1115 if ((g_pConfig->GetHeapVerifyLevel()& EEConfig::HEAPVERIFY_SYNCBLK) && !((ScanContext*)lp1)->promotion)
1117 syncTableShadow = new(nothrow) SyncTableEntry [m_FreeSyncTableIndex];
1118 if (syncTableShadow)
1120 memcpy (syncTableShadow, SyncTableEntry::GetSyncTableEntry(), m_FreeSyncTableIndex * sizeof (SyncTableEntry));
1123 #endif //VERIFY_HEAP
1129 while (dw < BitMapSize (m_SyncTableSize) && (m_EphemeralBitmap[dw]==0))
1133 if (dw < BitMapSize (m_SyncTableSize))
1136 for (int i = 0; i < card_word_width; i++)
1138 size_t card = i+dw*card_word_width;
1139 if (CardSetP (card))
1141 BOOL clear_card = TRUE;
1142 for (int idx = 0; idx < card_size; idx++)
1144 size_t nb = CardIndex (card) + idx;
1145 if (( nb < m_FreeSyncTableIndex) && (nb > 0))
1147 Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1148 if (o && !((size_t)o & 1))
1150 if (GCHeapUtilities::GetGCHeap()->IsEphemeral (o))
1154 GCWeakPtrScanElement ((int)nb, scanProc,
1155 lp1, lp2, fSetSyncBlockCleanup);
1171 //for VSW 294550: we saw stale obeject reference in SyncBlkCache, so we want to make sure the card
1172 //table logic above works correctly so that every ephemeral entry is promoted. To verify, we make a
1173 //copy of the sync table and promote it use the slow approach and compare the result with the real one
1174 if (g_pConfig->GetHeapVerifyLevel()& EEConfig::HEAPVERIFY_SYNCBLK)
1176 if (syncTableShadow)
1178 for (DWORD nb = 1; nb < m_FreeSyncTableIndex; nb++)
1180 Object **keyv = (Object **) &syncTableShadow[nb].m_Object;
1182 if (((size_t) *keyv & 1) == 0)
1184 (*scanProc) (keyv, NULL, lp1, lp2);
1185 SyncBlock *pSB = syncTableShadow[nb].m_SyncBlock;
1186 if (*keyv != 0 && (!pSB || !pSB->IsIDisposable()))
1188 if (syncTableShadow[nb].m_Object != SyncTableEntry::GetSyncTableEntry()[nb].m_Object)
1193 delete []syncTableShadow;
1194 syncTableShadow = NULL;
1196 if (freeSyncTalbeIndexCopy != m_FreeSyncTableIndex)
1199 #endif //VERIFY_HEAP
1204 for (DWORD nb = 1; nb < m_FreeSyncTableIndex; nb++)
1206 GCWeakPtrScanElement (nb, scanProc, lp1, lp2, fSetSyncBlockCleanup);
1212 if (fSetSyncBlockCleanup)
1214 // mark the finalizer thread saying requires cleanup
1215 FinalizerThread::GetFinalizerThread()->SetSyncBlockCleanup();
1216 FinalizerThread::EnableFinalization();
1219 #if defined(VERIFY_HEAP)
1220 if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)
1222 if (((ScanContext*)lp1)->promotion)
1225 for (int nb = 1; nb < (int)m_FreeSyncTableIndex; nb++)
1227 Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1228 if (((size_t)o & 1) == 0)
1235 #endif // VERIFY_HEAP
1238 /* Scan the weak pointers in the SyncBlockEntry and report them to the GC. If the
1239 reference is dead, then return TRUE */
1241 BOOL SyncBlockCache::GCWeakPtrScanElement (int nb, HANDLESCANPROC scanProc, LPARAM lp1, LPARAM lp2,
1253 Object **keyv = (Object **) &SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1263 PAL_TRY(Param *, pParam, ¶m) {
1264 if (! *pParam->keyv)
1265 pParam->name = "null";
1266 else if ((size_t) *pParam->keyv & 1)
1267 pParam->name = "free";
1269 pParam->name = (*pParam->keyv)->GetClass()->GetDebugClassName();
1270 if (strlen(pParam->name) == 0)
1271 pParam->name = "<INVALID>";
1273 } PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
1274 param.name = "<INVALID>";
1277 LogSpewAlways("[%4.4d]: %8.8x, %s\n", nb, *keyv, param.name);
1280 if (((size_t) *keyv & 1) == 0)
1283 if (g_pConfig->GetHeapVerifyLevel () & EEConfig::HEAPVERIFY_SYNCBLK)
1285 STRESS_LOG3 (LF_GC | LF_SYNC, LL_INFO100000, "scanning syncblk[%d, %p, %p]\n", nb, (size_t)SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock, (size_t)*keyv);
1289 (*scanProc) (keyv, NULL, lp1, lp2);
1290 SyncBlock *pSB = SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock;
1291 if ((*keyv == 0 ) || (pSB && pSB->IsIDisposable()))
1294 if (g_pConfig->GetHeapVerifyLevel () & EEConfig::HEAPVERIFY_SYNCBLK)
1296 STRESS_LOG3 (LF_GC | LF_SYNC, LL_INFO100000, "freeing syncblk[%d, %p, %p]\n", nb, (size_t)pSB, (size_t)*keyv);
1303 GCDeleteSyncBlock(pSB);
1304 //clean the object syncblock header
1305 ((Object*)(*keyv))->GetHeader()->GCResetIndex();
1311 // insert block into cleanup list
1312 InsertCleanupSyncBlock (SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock);
1314 LogSpewAlways(" Cleaning up block at %4.4d\n", nb);
1320 LogSpewAlways(" Deleting block at %4.4d\n", nb);
1322 SyncTableEntry::GetSyncTableEntry()[nb].m_Object = (Object *)(m_FreeSyncTableList | 1);
1323 m_FreeSyncTableList = nb << 1;
1324 SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock = NULL;
1330 LogSpewAlways(" Keeping block at %4.4d with oref %8.8x\n", nb, *keyv);
1337 void SyncBlockCache::GCDone(BOOL demoting, int max_gen)
1349 (GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration() ==
1350 GCHeapUtilities::GetGCHeap()->GetMaxGeneration()))
1356 while (dw < BitMapSize (m_SyncTableSize) &&
1357 (m_EphemeralBitmap[dw]==(DWORD)~0))
1361 if (dw < BitMapSize (m_SyncTableSize))
1364 for (int i = 0; i < card_word_width; i++)
1366 size_t card = i+dw*card_word_width;
1367 if (!CardSetP (card))
1369 for (int idx = 0; idx < card_size; idx++)
1371 size_t nb = CardIndex (card) + idx;
1372 if (( nb < m_FreeSyncTableIndex) && (nb > 0))
1374 Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1375 if (o && !((size_t)o & 1))
1377 if (GCHeapUtilities::GetGCHeap()->WhichGeneration (o) < (unsigned int)max_gen)
1397 #if defined (VERIFY_HEAP)
1403 #define _ASSERTE(c) if (!(c)) DebugBreak()
1406 void SyncBlockCache::VerifySyncTableEntry()
1417 for (DWORD nb = 1; nb < m_FreeSyncTableIndex; nb++)
1419 Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1420 // if the slot was just allocated, the object may still be null
1421 if (o && (((size_t)o & 1) == 0))
1423 //there is no need to verify next object's header because this is called
1424 //from verify_heap, which will verify every object anyway
1425 o->Validate(TRUE, FALSE);
1428 // This loop is just a heuristic to try to catch errors, but it is not 100%.
1429 // To prevent false positives, we weaken our assert below to exclude the case
1430 // where the index is still NULL, but we've reached the end of our loop.
1432 static const DWORD max_iterations = 100;
1435 for (; loop < max_iterations; loop++)
1437 // The syncblock index may be updating by another thread.
1438 if (o->GetHeader()->GetHeaderSyncBlockIndex() != 0)
1442 __SwitchToThread(0, CALLER_LIMITS_SPINNING);
1445 DWORD idx = o->GetHeader()->GetHeaderSyncBlockIndex();
1446 _ASSERTE(idx == nb || ((0 == idx) && (loop == max_iterations)));
1447 _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsEphemeral(o) || CardSetP(CardOf(nb)));
1454 #define _ASSERTE(expr) ((void)0)
1457 #endif // VERIFY_HEAP
1461 void DumpSyncBlockCache()
1463 STATIC_CONTRACT_NOTHROW;
1465 SyncBlockCache *pCache = SyncBlockCache::GetSyncBlockCache();
1467 LogSpewAlways("Dumping SyncBlockCache size %d\n", pCache->m_FreeSyncTableIndex);
1469 static int dumpSBStyle = -1;
1470 if (dumpSBStyle == -1)
1471 dumpSBStyle = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SBDumpStyle);
1472 if (dumpSBStyle == 0)
1475 BOOL isString = FALSE;
1476 DWORD objectCount = 0;
1477 DWORD slotCount = 0;
1479 for (DWORD nb = 1; nb < pCache->m_FreeSyncTableIndex; nb++)
1482 char buffer[1024], buffer2[1024];
1483 LPCUTF8 descrip = "null";
1484 SyncTableEntry *pEntry = &SyncTableEntry::GetSyncTableEntry()[nb];
1485 Object *oref = (Object *) pEntry->m_Object;
1486 if (((size_t) oref & 1) != 0)
1506 param.descrip = descrip;
1508 param.buffer2 = buffer2;
1509 param.cch2 = COUNTOF(buffer2);
1510 param.isString = isString;
1512 PAL_TRY(Param *, pParam, ¶m)
1514 pParam->descrip = pParam->oref->GetMethodTable()->GetDebugClassName();
1515 if (strlen(pParam->descrip) == 0)
1516 pParam->descrip = "<INVALID>";
1517 else if (pParam->oref->GetMethodTable() == g_pStringClass)
1519 sprintf_s(pParam->buffer2, pParam->cch2, "%s (%S)", pParam->descrip, ObjectToSTRINGREF((StringObject*)pParam->oref)->GetBuffer());
1520 pParam->descrip = pParam->buffer2;
1521 pParam->isString = TRUE;
1524 PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
1525 param.descrip = "<INVALID>";
1529 descrip = param.descrip;
1530 isString = param.isString;
1532 sprintf_s(buffer, COUNTOF(buffer), "%s", descrip);
1535 if (dumpSBStyle < 2)
1536 LogSpewAlways("[%4.4d]: %8.8x %s\n", nb, oref, descrip);
1537 else if (dumpSBStyle == 2 && ! isString)
1538 LogSpewAlways("[%4.4d]: %s\n", nb, descrip);
1540 LogSpewAlways("Done dumping SyncBlockCache used slots: %d, objects: %d\n", slotCount, objectCount);
1544 // ***************************************************************************
1546 // ObjHeader class implementation
1548 // ***************************************************************************
1550 #if defined(ENABLE_CONTRACTS_IMPL)
1551 // The LOCK_TAKEN/RELEASED macros need a "pointer" to the lock object to do
1552 // comparisons between takes & releases (and to provide debugging info to the
1553 // developer). Ask the syncblock for its lock contract pointer, if the
1554 // syncblock exists. Otherwise, use the MethodTable* from the Object. That's not great,
1555 // as it's not unique, so we might miss unbalanced lock takes/releases from
1556 // different objects of the same type. However, our hands are tied, and we can't
1558 void * ObjHeader::GetPtrForLockContract()
1560 if (GetHeaderSyncBlockIndex() == 0)
1562 return (void *) GetBaseObject()->GetMethodTable();
1565 return PassiveGetSyncBlock()->GetPtrForLockContract();
1567 #endif // defined(ENABLE_CONTRACTS_IMPL)
1569 // this enters the monitor of an object
1570 void ObjHeader::EnterObjMonitor()
1572 WRAPPER_NO_CONTRACT;
1573 GetSyncBlock()->EnterMonitor();
1576 // Non-blocking version of above
1577 BOOL ObjHeader::TryEnterObjMonitor(INT32 timeOut)
1579 WRAPPER_NO_CONTRACT;
1580 return GetSyncBlock()->TryEnterMonitor(timeOut);
1583 AwareLock::EnterHelperResult ObjHeader::EnterObjMonitorHelperSpin(Thread* pCurThread)
1591 // Note: EnterObjMonitorHelper must be called before this function (see below)
1593 if (g_SystemInfo.dwNumberOfProcessors == 1)
1595 return AwareLock::EnterHelperResult_Contention;
1598 YieldProcessorNormalizationInfo normalizationInfo;
1599 const DWORD spinCount = g_SpinConstants.dwMonitorSpinCount;
1600 for (DWORD spinIteration = 0; spinIteration < spinCount; ++spinIteration)
1602 AwareLock::SpinWait(normalizationInfo, spinIteration);
1604 LONG oldValue = m_SyncBlockValue.LoadWithoutBarrier();
1606 // Since spinning has begun, chances are good that the monitor has already switched to AwareLock mode, so check for that
1608 if (oldValue & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)
1610 // If we have a hash code already, we need to create a sync block
1611 if (oldValue & BIT_SBLK_IS_HASHCODE)
1613 return AwareLock::EnterHelperResult_UseSlowPath;
1616 SyncBlock *syncBlock = g_pSyncTable[oldValue & MASK_SYNCBLOCKINDEX].m_SyncBlock;
1617 _ASSERTE(syncBlock != NULL);
1618 AwareLock *awareLock = &syncBlock->m_Monitor;
1620 AwareLock::EnterHelperResult result = awareLock->TryEnterBeforeSpinLoopHelper(pCurThread);
1621 if (result != AwareLock::EnterHelperResult_Contention)
1627 if (spinIteration < spinCount)
1631 AwareLock::SpinWait(normalizationInfo, spinIteration);
1634 if (spinIteration >= spinCount)
1636 // The last lock attempt for this spin will be done after the loop
1640 result = awareLock->TryEnterInsideSpinLoopHelper(pCurThread);
1641 if (result == AwareLock::EnterHelperResult_Entered)
1643 return AwareLock::EnterHelperResult_Entered;
1645 if (result == AwareLock::EnterHelperResult_UseSlowPath)
1652 if (awareLock->TryEnterAfterSpinLoopHelper(pCurThread))
1654 return AwareLock::EnterHelperResult_Entered;
1659 DWORD tid = pCurThread->GetThreadId();
1660 if ((oldValue & (BIT_SBLK_SPIN_LOCK +
1661 SBLK_MASK_LOCK_THREADID +
1662 SBLK_MASK_LOCK_RECLEVEL)) == 0)
1664 if (tid > SBLK_MASK_LOCK_THREADID)
1666 return AwareLock::EnterHelperResult_UseSlowPath;
1669 LONG newValue = oldValue | tid;
1670 if (InterlockedCompareExchangeAcquire((LONG*)&m_SyncBlockValue, newValue, oldValue) == oldValue)
1672 pCurThread->IncLockCount();
1673 return AwareLock::EnterHelperResult_Entered;
1679 // EnterObjMonitorHelper handles the thin lock recursion case. If it's not that case, it won't become that case. If
1680 // EnterObjMonitorHelper failed to increment the recursion level, it will go down the slow path and won't come here. So,
1681 // no need to check the recursion case here.
1683 // The header is transitioning - treat this as if the lock was taken
1684 oldValue & BIT_SBLK_SPIN_LOCK ||
1685 // Here we know we have the "thin lock" layout, but the lock is not free.
1686 // It can't be the recursion case though, because the call to EnterObjMonitorHelper prior to this would have taken
1687 // the slow path in the recursive case.
1688 tid != (DWORD)(oldValue & SBLK_MASK_LOCK_THREADID));
1691 return AwareLock::EnterHelperResult_Contention;
1694 BOOL ObjHeader::LeaveObjMonitor()
1704 //this function switch to preemp mode so we need to protect the object in some path
1705 OBJECTREF thisObj = ObjectToOBJECTREF (GetBaseObject ());
1707 DWORD dwSwitchCount = 0;
1711 AwareLock::LeaveHelperAction action = thisObj->GetHeader ()->LeaveObjMonitorHelper(GetThread());
1715 case AwareLock::LeaveHelperAction_None:
1718 case AwareLock::LeaveHelperAction_Signal:
1721 SyncBlock *psb = thisObj->GetHeader ()->PassiveGetSyncBlock();
1723 psb->QuickGetMonitor()->Signal();
1726 case AwareLock::LeaveHelperAction_Yield:
1727 YieldProcessorNormalized();
1729 case AwareLock::LeaveHelperAction_Contention:
1730 // Some thread is updating the syncblock value.
1732 //protect the object before switching mode
1733 GCPROTECT_BEGIN (thisObj);
1735 __SwitchToThread(0, ++dwSwitchCount);
1740 // Must be an error otherwise - ignore it
1741 _ASSERTE(action == AwareLock::LeaveHelperAction_Error);
1747 // The only difference between LeaveObjMonitor and LeaveObjMonitorAtException is switch
1748 // to preemptive mode around __SwitchToThread
1749 BOOL ObjHeader::LeaveObjMonitorAtException()
1759 DWORD dwSwitchCount = 0;
1763 AwareLock::LeaveHelperAction action = LeaveObjMonitorHelper(GetThread());
1767 case AwareLock::LeaveHelperAction_None:
1770 case AwareLock::LeaveHelperAction_Signal:
1773 SyncBlock *psb = PassiveGetSyncBlock();
1775 psb->QuickGetMonitor()->Signal();
1778 case AwareLock::LeaveHelperAction_Yield:
1779 YieldProcessorNormalized();
1781 case AwareLock::LeaveHelperAction_Contention:
1782 // Some thread is updating the syncblock value.
1784 // We never toggle GC mode while holding the spinlock (BeginNoTriggerGC/EndNoTriggerGC
1785 // in EnterSpinLock/ReleaseSpinLock ensures it). Thus we do not need to switch to preemptive
1786 // while waiting on the spinlock.
1789 __SwitchToThread(0, ++dwSwitchCount);
1793 // Must be an error otherwise - ignore it
1794 _ASSERTE(action == AwareLock::LeaveHelperAction_Error);
1800 #endif //!DACCESS_COMPILE
1802 // Returns TRUE if the lock is owned and FALSE otherwise
1803 // threadId is set to the ID (Thread::GetThreadId()) of the thread which owns the lock
1804 // acquisitionCount is set to the number of times the lock needs to be released before
1806 BOOL ObjHeader::GetThreadOwningMonitorLock(DWORD *pThreadId, DWORD *pAcquisitionCount)
1812 #ifndef DACCESS_COMPILE
1813 if (!IsGCSpecialThread ()) {MODE_COOPERATIVE;} else {MODE_ANY;}
1820 DWORD bits = GetBits();
1822 if (bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)
1824 if (bits & BIT_SBLK_IS_HASHCODE)
1827 // This thread does not own the lock.
1830 *pAcquisitionCount = 0;
1836 // We have a syncblk
1838 DWORD index = bits & MASK_SYNCBLOCKINDEX;
1839 SyncBlock* psb = g_pSyncTable[(int)index].m_SyncBlock;
1841 _ASSERTE(psb->GetMonitor() != NULL);
1842 Thread* pThread = psb->GetMonitor()->GetHoldingThread();
1846 *pAcquisitionCount = 0;
1851 *pThreadId = pThread->GetThreadId();
1852 *pAcquisitionCount = psb->GetMonitor()->GetRecursionLevel();
1860 // We have a thinlock
1863 DWORD lockThreadId, recursionLevel;
1864 lockThreadId = bits & SBLK_MASK_LOCK_THREADID;
1865 recursionLevel = (bits & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT;
1866 //if thread ID is 0, recursionLevel got to be zero
1867 //but thread ID doesn't have to be valid because the lock could be orphanend
1868 _ASSERTE (lockThreadId != 0 || recursionLevel == 0 );
1870 *pThreadId = lockThreadId;
1871 if(lockThreadId != 0)
1873 // in the header, the recursionLevel of 0 means the lock is owned once
1874 // (this differs from m_Recursion in the AwareLock)
1875 *pAcquisitionCount = recursionLevel + 1;
1880 *pAcquisitionCount = 0;
1886 #ifndef DACCESS_COMPILE
1889 DEBUG_NOINLINE void ObjHeader::EnterSpinLock()
1891 // NOTE: This function cannot have a dynamic contract. If it does, the contract's
1892 // destructor will reset the CLR debug state to what it was before entering the
1893 // function, which will undo the BeginNoTriggerGC() call below.
1895 STATIC_CONTRACT_GC_NOTRIGGER;
1901 DWORD dwSwitchCount = 0;
1907 // Give 64bit more time because there isn't a remoting fast path now, and we've hit this assert
1908 // needlessly in CLRSTRESS.
1913 _ASSERTE(!"ObjHeader::EnterLock timed out");
1915 // get the value so that it doesn't get changed under us.
1916 LONG curValue = m_SyncBlockValue.LoadWithoutBarrier();
1918 // check if lock taken
1919 if (! (curValue & BIT_SBLK_SPIN_LOCK))
1921 // try to take the lock
1922 LONG newValue = curValue | BIT_SBLK_SPIN_LOCK;
1923 LONG result = FastInterlockCompareExchange((LONG*)&m_SyncBlockValue, newValue, curValue);
1924 if (result == curValue)
1927 if (g_SystemInfo.dwNumberOfProcessors > 1)
1929 for (int spinCount = 0; spinCount < BIT_SBLK_SPIN_COUNT; spinCount++)
1931 if (! (m_SyncBlockValue & BIT_SBLK_SPIN_LOCK))
1933 YieldProcessorNormalized(); // indicate to the processor that we are spinning
1935 if (m_SyncBlockValue & BIT_SBLK_SPIN_LOCK)
1936 __SwitchToThread(0, ++dwSwitchCount);
1939 __SwitchToThread(0, ++dwSwitchCount);
1942 INCONTRACT(Thread* pThread = GetThread());
1943 INCONTRACT(if (pThread != NULL) pThread->BeginNoTriggerGC(__FILE__, __LINE__));
1946 DEBUG_NOINLINE void ObjHeader::EnterSpinLock()
1949 STATIC_CONTRACT_GC_NOTRIGGER;
1955 DWORD dwSwitchCount = 0;
1961 _ASSERTE(!"ObjHeader::EnterLock timed out");
1963 // get the value so that it doesn't get changed under us.
1964 LONG curValue = m_SyncBlockValue.LoadWithoutBarrier();
1966 // check if lock taken
1967 if (! (curValue & BIT_SBLK_SPIN_LOCK))
1969 // try to take the lock
1970 LONG newValue = curValue | BIT_SBLK_SPIN_LOCK;
1971 LONG result = FastInterlockCompareExchange((LONG*)&m_SyncBlockValue, newValue, curValue);
1972 if (result == curValue)
1975 __SwitchToThread(0, ++dwSwitchCount);
1978 INCONTRACT(Thread* pThread = GetThread());
1979 INCONTRACT(if (pThread != NULL) pThread->BeginNoTriggerGC(__FILE__, __LINE__));
1983 DEBUG_NOINLINE void ObjHeader::ReleaseSpinLock()
1986 LIMITED_METHOD_CONTRACT;
1988 INCONTRACT(Thread* pThread = GetThread());
1989 INCONTRACT(if (pThread != NULL) pThread->EndNoTriggerGC());
1991 FastInterlockAnd(&m_SyncBlockValue, ~BIT_SBLK_SPIN_LOCK);
1994 #endif //!DACCESS_COMPILE
1996 #ifndef DACCESS_COMPILE
1998 DWORD ObjHeader::GetSyncBlockIndex()
2006 INJECT_FAULT(COMPlusThrowOM(););
2012 if ((indx = GetHeaderSyncBlockIndex()) == 0)
2014 BOOL fMustCreateSyncBlock = FALSE;
2016 //Need to get it from the cache
2017 SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
2020 if (GetHeaderSyncBlockIndex() == 0)
2022 ENTER_SPIN_LOCK(this);
2023 // Now the header will be stable - check whether hashcode, appdomain index or lock information is stored in it.
2024 DWORD bits = GetBits();
2025 if (((bits & (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE)) == (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE)) ||
2026 ((bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) == 0))
2028 // Need a sync block to store this info
2029 fMustCreateSyncBlock = TRUE;
2033 SetIndex(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | SyncBlockCache::GetSyncBlockCache()->NewSyncBlockSlot(GetBaseObject()));
2035 LEAVE_SPIN_LOCK(this);
2037 // SyncBlockCache::LockHolder goes out of scope here
2040 if (fMustCreateSyncBlock)
2043 if ((indx = GetHeaderSyncBlockIndex()) == 0)
2050 #if defined (VERIFY_HEAP)
2052 BOOL ObjHeader::Validate (BOOL bVerifySyncBlkIndex)
2054 STATIC_CONTRACT_THROWS;
2055 STATIC_CONTRACT_GC_NOTRIGGER;
2056 STATIC_CONTRACT_MODE_COOPERATIVE;
2058 DWORD bits = GetBits ();
2059 Object * obj = GetBaseObject ();
2060 BOOL bVerifyMore = g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_SYNCBLK;
2061 //the highest 2 bits have reloaded meaning
2062 //for string objects:
2063 // BIT_SBLK_STRING_HAS_NO_HIGH_CHARS 0x80000000
2064 // BIT_SBLK_STRING_HIGH_CHARS_KNOWN 0x40000000
2065 // BIT_SBLK_STRING_HAS_SPECIAL_SORT 0xC0000000
2066 //for other objects:
2067 // BIT_SBLK_FINALIZER_RUN 0x40000000
2068 if (bits & BIT_SBLK_STRING_HIGH_CHAR_MASK)
2070 if (obj->GetGCSafeMethodTable () == g_pStringClass)
2074 ASSERT_AND_CHECK (((StringObject *)obj)->ValidateHighChars());
2079 if (bits & BIT_SBLK_FINALIZER_RUN)
2081 ASSERT_AND_CHECK (obj->GetGCSafeMethodTable ()->HasFinalizer ());
2086 //BIT_SBLK_GC_RESERVE (0x20000000) is only set during GC. But for frozen object, we don't clean the bit
2087 if (bits & BIT_SBLK_GC_RESERVE)
2089 if (!GCHeapUtilities::IsGCInProgress () && !GCHeapUtilities::GetGCHeap()->IsConcurrentGCInProgress ())
2091 #ifdef FEATURE_BASICFREEZE
2092 ASSERT_AND_CHECK (GCHeapUtilities::GetGCHeap()->IsInFrozenSegment(obj));
2093 #else //FEATURE_BASICFREEZE
2094 _ASSERTE(!"Reserve bit not cleared");
2096 #endif //FEATURE_BASICFREEZE
2100 //Don't know how to verify BIT_SBLK_SPIN_LOCK (0x10000000)
2102 //BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX (0x08000000)
2103 if (bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)
2105 //if BIT_SBLK_IS_HASHCODE (0x04000000) is not set,
2106 //rest of the DWORD is SyncBlk Index
2107 if (!(bits & BIT_SBLK_IS_HASHCODE))
2109 if (bVerifySyncBlkIndex && GCHeapUtilities::GetGCHeap()->RuntimeStructuresValid ())
2111 DWORD sbIndex = bits & MASK_SYNCBLOCKINDEX;
2112 ASSERT_AND_CHECK(SyncTableEntry::GetSyncTableEntry()[sbIndex].m_Object == obj);
2117 // rest of the DWORD is a hash code and we don't have much to validate it
2122 //if BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX is clear, rest of DWORD is thin lock thread ID,
2123 //thin lock recursion level and appdomain index
2124 DWORD lockThreadId = bits & SBLK_MASK_LOCK_THREADID;
2125 DWORD recursionLevel = (bits & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT;
2126 //if thread ID is 0, recursionLeve got to be zero
2127 //but thread ID doesn't have to be valid because the lock could be orphanend
2128 ASSERT_AND_CHECK (lockThreadId != 0 || recursionLevel == 0 );
2134 #endif //VERIFY_HEAP
2136 // This holder takes care of the SyncBlock memory cleanup if an OOM occurs inside a call to NewSyncBlockSlot.
2138 // Warning: Assumes you already own the cache lock.
2139 // Assumes nothing allocated inside the SyncBlock (only releases the memory, does not destruct.)
2141 // This holder really just meets GetSyncBlock()'s special needs. It's not a general purpose holder.
2144 // Do not inline this call. (fyuan)
2145 // SyncBlockMemoryHolder is normally a check for empty pointer and return. Inlining VoidDeleteSyncBlockMemory adds expensive exception handling.
2146 void VoidDeleteSyncBlockMemory(SyncBlock* psb)
2148 LIMITED_METHOD_CONTRACT;
2149 SyncBlockCache::GetSyncBlockCache()->DeleteSyncBlockMemory(psb);
2152 typedef Wrapper<SyncBlock*, DoNothing<SyncBlock*>, VoidDeleteSyncBlockMemory, NULL> SyncBlockMemoryHolder;
2155 // get the sync block for an existing object
2156 SyncBlock *ObjHeader::GetSyncBlock()
2158 CONTRACT(SyncBlock *)
2164 INJECT_FAULT(COMPlusThrowOM(););
2165 POSTCONDITION(CheckPointer(RETVAL));
2169 PTR_SyncBlock syncBlock = GetBaseObject()->PassiveGetSyncBlock();
2171 BOOL indexHeld = FALSE;
2176 // Has our backpointer been correctly updated through every GC?
2177 PTR_SyncTableEntry pEntries(SyncTableEntry::GetSyncTableEntry());
2178 _ASSERTE(pEntries[GetHeaderSyncBlockIndex()].m_Object == GetBaseObject());
2183 //Need to get it from the cache
2185 SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
2188 syncBlock = GetBaseObject()->PassiveGetSyncBlock();
2193 SyncBlockMemoryHolder syncBlockMemoryHolder(SyncBlockCache::GetSyncBlockCache()->GetNextFreeSyncBlock());
2194 syncBlock = syncBlockMemoryHolder;
2196 if ((indx = GetHeaderSyncBlockIndex()) == 0)
2198 indx = SyncBlockCache::GetSyncBlockCache()->NewSyncBlockSlot(GetBaseObject());
2202 //We already have an index, we need to hold the syncblock
2207 //! NewSyncBlockSlot has side-effects that we don't have backout for - thus, that must be the last
2208 //! failable operation called.
2209 CANNOTTHROWCOMPLUSEXCEPTION();
2213 syncBlockMemoryHolder.SuppressRelease();
2215 new (syncBlock) SyncBlock(indx);
2218 // after this point, nobody can update the index in the header
2219 ENTER_SPIN_LOCK(this);
2222 // If the thin lock in the header is in use, transfer the information to the syncblock
2223 DWORD bits = GetBits();
2224 if ((bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) == 0)
2226 DWORD lockThreadId = bits & SBLK_MASK_LOCK_THREADID;
2227 DWORD recursionLevel = (bits & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT;
2228 if (lockThreadId != 0 || recursionLevel != 0)
2230 // recursionLevel can't be non-zero if thread id is 0
2231 _ASSERTE(lockThreadId != 0);
2233 Thread *pThread = g_pThinLockThreadIdDispenser->IdToThreadWithValidation(lockThreadId);
2235 if (pThread == NULL)
2237 // The lock is orphaned.
2238 pThread = (Thread*) -1;
2240 syncBlock->InitState(recursionLevel + 1, pThread);
2243 else if ((bits & BIT_SBLK_IS_HASHCODE) != 0)
2245 DWORD hashCode = bits & MASK_HASHCODE;
2247 syncBlock->SetHashCode(hashCode);
2251 SyncTableEntry::GetSyncTableEntry() [indx].m_SyncBlock = syncBlock;
2253 // in order to avoid a race where some thread tries to get the AD index and we've already zapped it,
2254 // make sure the syncblock etc is all setup with the AD index prior to replacing the index
2256 if (GetHeaderSyncBlockIndex() == 0)
2258 // We have transferred the AppDomain into the syncblock above.
2259 SetIndex(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | indx);
2262 //If we had already an index, hold the syncblock
2263 //for the lifetime of the object.
2265 syncBlock->SetPrecious();
2267 LEAVE_SPIN_LOCK(this);
2269 // SyncBlockCache::LockHolder goes out of scope here
2276 BOOL ObjHeader::Wait(INT32 timeOut, BOOL exitContext)
2284 INJECT_FAULT(COMPlusThrowOM(););
2288 // The following code may cause GC, so we must fetch the sync block from
2289 // the object now in case it moves.
2290 SyncBlock *pSB = GetBaseObject()->GetSyncBlock();
2292 // GetSyncBlock throws on failure
2293 _ASSERTE(pSB != NULL);
2295 // make sure we own the crst
2296 if (!pSB->DoesCurrentThreadOwnMonitor())
2297 COMPlusThrow(kSynchronizationLockException);
2300 Thread *pThread = GetThread();
2301 DWORD curLockCount = pThread->m_dwLockCount;
2304 BOOL result = pSB->Wait(timeOut,exitContext);
2306 _ASSERTE (curLockCount == pThread->m_dwLockCount);
2311 void ObjHeader::Pulse()
2319 INJECT_FAULT(COMPlusThrowOM(););
2323 // The following code may cause GC, so we must fetch the sync block from
2324 // the object now in case it moves.
2325 SyncBlock *pSB = GetBaseObject()->GetSyncBlock();
2327 // GetSyncBlock throws on failure
2328 _ASSERTE(pSB != NULL);
2330 // make sure we own the crst
2331 if (!pSB->DoesCurrentThreadOwnMonitor())
2332 COMPlusThrow(kSynchronizationLockException);
2337 void ObjHeader::PulseAll()
2345 INJECT_FAULT(COMPlusThrowOM(););
2349 // The following code may cause GC, so we must fetch the sync block from
2350 // the object now in case it moves.
2351 SyncBlock *pSB = GetBaseObject()->GetSyncBlock();
2353 // GetSyncBlock throws on failure
2354 _ASSERTE(pSB != NULL);
2356 // make sure we own the crst
2357 if (!pSB->DoesCurrentThreadOwnMonitor())
2358 COMPlusThrow(kSynchronizationLockException);
2364 // ***************************************************************************
2366 // AwareLock class implementation (GC-aware locking)
2368 // ***************************************************************************
2370 void AwareLock::AllocLockSemEvent()
2378 INJECT_FAULT(COMPlusThrowOM(););
2382 // Before we switch from cooperative, ensure that this syncblock won't disappear
2383 // under us. For something as expensive as an event, do it permanently rather
2384 // than transiently.
2389 // No need to take a lock - CLREvent::CreateMonitorEvent is thread safe
2390 m_SemEvent.CreateMonitorEvent((SIZE_T)this);
2393 void AwareLock::Enter()
2401 INJECT_FAULT(COMPlusThrowOM(););
2405 Thread *pCurThread = GetThread();
2406 LockState state = m_lockState.VolatileLoadWithoutBarrier();
2407 if (!state.IsLocked() || m_HoldingThread != pCurThread)
2409 if (m_lockState.InterlockedTryLock_Or_RegisterWaiter(this, state))
2411 // We get here if we successfully acquired the mutex.
2412 m_HoldingThread = pCurThread;
2414 pCurThread->IncLockCount();
2416 #if defined(_DEBUG) && defined(TRACK_SYNC)
2417 // The best place to grab this is from the ECall frame
2418 Frame *pFrame = pCurThread->GetFrame();
2419 int caller = (pFrame && pFrame != FRAME_TOP
2420 ? (int)pFrame->GetReturnAddress()
2422 pCurThread->m_pTrackSync->EnterSync(caller, this);
2427 // Lock was not acquired and the waiter was registered
2429 // Didn't manage to get the mutex, must wait.
2430 // The precondition for EnterEpilog is that the count of waiters be bumped
2431 // to account for this thread, which was done above.
2432 EnterEpilog(pCurThread);
2436 // Got the mutex via recursive locking on the same thread.
2437 _ASSERTE(m_Recursion >= 1);
2440 #if defined(_DEBUG) && defined(TRACK_SYNC)
2441 // The best place to grab this is from the ECall frame
2442 Frame *pFrame = pCurThread->GetFrame();
2443 int caller = (pFrame && pFrame != FRAME_TOP ? (int)pFrame->GetReturnAddress() : -1);
2444 pCurThread->m_pTrackSync->EnterSync(caller, this);
2448 BOOL AwareLock::TryEnter(INT32 timeOut)
2455 if (timeOut == 0) {MODE_ANY;} else {MODE_COOPERATIVE;}
2456 INJECT_FAULT(COMPlusThrowOM(););
2460 Thread *pCurThread = GetThread();
2462 if (pCurThread->IsAbortRequested())
2464 pCurThread->HandleThreadAbort();
2467 LockState state = m_lockState.VolatileLoadWithoutBarrier();
2468 if (!state.IsLocked() || m_HoldingThread != pCurThread)
2471 ? m_lockState.InterlockedTryLock(state)
2472 : m_lockState.InterlockedTryLock_Or_RegisterWaiter(this, state))
2474 // We get here if we successfully acquired the mutex.
2475 m_HoldingThread = pCurThread;
2477 pCurThread->IncLockCount();
2479 #if defined(_DEBUG) && defined(TRACK_SYNC)
2480 // The best place to grab this is from the ECall frame
2481 Frame *pFrame = pCurThread->GetFrame();
2482 int caller = (pFrame && pFrame != FRAME_TOP ? (int)pFrame->GetReturnAddress() : -1);
2483 pCurThread->m_pTrackSync->EnterSync(caller, this);
2488 // Lock was not acquired and the waiter was registered if the timeout is nonzero
2490 // Didn't manage to get the mutex, return failure if no timeout, else wait
2491 // for at most timeout milliseconds for the mutex.
2497 // The precondition for EnterEpilog is that the count of waiters be bumped
2498 // to account for this thread, which was done above
2499 return EnterEpilog(pCurThread, timeOut);
2502 // Got the mutex via recursive locking on the same thread.
2503 _ASSERTE(m_Recursion >= 1);
2505 #if defined(_DEBUG) && defined(TRACK_SYNC)
2506 // The best place to grab this is from the ECall frame
2507 Frame *pFrame = pCurThread->GetFrame();
2508 int caller = (pFrame && pFrame != FRAME_TOP ? (int)pFrame->GetReturnAddress() : -1);
2509 pCurThread->m_pTrackSync->EnterSync(caller, this);
2514 BOOL AwareLock::EnterEpilog(Thread* pCurThread, INT32 timeOut)
2516 STATIC_CONTRACT_THROWS;
2517 STATIC_CONTRACT_MODE_COOPERATIVE;
2518 STATIC_CONTRACT_GC_TRIGGERS;
2520 // While we are in this frame the thread is considered blocked on the
2521 // critical section of the monitor lock according to the debugger
2522 DebugBlockingItem blockingMonitorInfo;
2523 blockingMonitorInfo.dwTimeout = timeOut;
2524 blockingMonitorInfo.pMonitor = this;
2525 blockingMonitorInfo.pAppDomain = SystemDomain::GetCurrentDomain();
2526 blockingMonitorInfo.type = DebugBlock_MonitorCriticalSection;
2527 DebugBlockingItemHolder holder(pCurThread, &blockingMonitorInfo);
2529 // We need a separate helper because it uses SEH and the holder has a
2531 return EnterEpilogHelper(pCurThread, timeOut);
2535 #define _LOGCONTENTION
2538 #ifdef _LOGCONTENTION
2539 inline void LogContention()
2541 WRAPPER_NO_CONTRACT;
2543 if (LoggingOn(LF_SYNC, LL_INFO100))
2545 LogSpewAlways("Contention: Stack Trace Begin\n");
2546 void LogStackTrace();
2548 LogSpewAlways("Contention: Stack Trace End\n");
2553 #define LogContention()
2556 double ComputeElapsedTimeInNanosecond(LARGE_INTEGER startTicks, LARGE_INTEGER endTicks)
2558 static LARGE_INTEGER freq;
2559 if (freq.QuadPart == 0)
2560 QueryPerformanceFrequency(&freq);
2562 const double NsPerSecond = 1000 * 1000 * 1000;
2563 LONGLONG elapsedTicks = endTicks.QuadPart - startTicks.QuadPart;
2564 return (elapsedTicks * NsPerSecond) / freq.QuadPart;
2567 BOOL AwareLock::EnterEpilogHelper(Thread* pCurThread, INT32 timeOut)
2569 STATIC_CONTRACT_THROWS;
2570 STATIC_CONTRACT_MODE_COOPERATIVE;
2571 STATIC_CONTRACT_GC_TRIGGERS;
2574 // The caller has already registered a waiter. This function needs to unregister the waiter on all paths (exception paths
2575 // included). On runtimes where thread-abort is supported, a thread-abort also needs to unregister the waiter. There may be
2576 // a possibility for preemptive GC toggles below to handle a thread-abort, that should be taken into consideration when
2577 // porting this code back to .NET Framework.
2579 // Require all callers to be in cooperative mode. If they have switched to preemptive
2580 // mode temporarily before calling here, then they are responsible for protecting
2581 // the object associated with this lock.
2582 _ASSERTE(pCurThread->PreemptiveGCDisabled());
2584 BOOLEAN IsContentionKeywordEnabled = ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, TRACE_LEVEL_INFORMATION, CLR_CONTENTION_KEYWORD);
2585 LARGE_INTEGER startTicks = { {0} };
2587 if (IsContentionKeywordEnabled)
2589 QueryPerformanceCounter(&startTicks);
2591 // Fire a contention start event for a managed contention
2592 FireEtwContentionStart_V1(ETW::ContentionLog::ContentionStructs::ManagedContention, GetClrInstanceId());
2596 Thread::IncrementMonitorLockContentionCount(pCurThread);
2598 OBJECTREF obj = GetOwningObject();
2600 // We cannot allow the AwareLock to be cleaned up underneath us by the GC.
2601 IncrementTransientPrecious();
2604 GCPROTECT_BEGIN(obj);
2606 if (!m_SemEvent.IsMonitorEventAllocated())
2608 AllocLockSemEvent();
2610 _ASSERTE(m_SemEvent.IsMonitorEventAllocated());
2612 pCurThread->EnablePreemptiveGC();
2616 // We might be interrupted during the wait (Thread.Interrupt), so we need an
2617 // exception handler round the call.
2625 param.timeOut = timeOut;
2627 // Measure the time we wait so that, in the case where we wake up
2628 // and fail to acquire the mutex, we can adjust remaining timeout
2630 ULONGLONG start = CLRGetTickCount64();
2632 EE_TRY_FOR_FINALLY(Param *, pParam, ¶m)
2634 pParam->ret = pParam->pThis->m_SemEvent.Wait(pParam->timeOut, TRUE);
2635 _ASSERTE((pParam->ret == WAIT_OBJECT_0) || (pParam->ret == WAIT_TIMEOUT));
2639 if (GOT_EXCEPTION())
2641 // It is likely the case that An APC threw an exception, for instance Thread.Interrupt(). The wait subsystem
2642 // guarantees that if a signal to the event being waited upon is observed by the woken thread, that thread's
2643 // wait will return WAIT_OBJECT_0. So in any race between m_SemEvent being signaled and the wait throwing an
2644 // exception, a thread that is woken by an exception would not observe the signal, and the signal would wake
2645 // another thread as necessary.
2647 // We must decrement the waiter count.
2648 m_lockState.InterlockedUnregisterWaiter();
2653 if (ret != WAIT_OBJECT_0)
2655 // We timed out, decrement waiter count.
2656 m_lockState.InterlockedUnregisterWaiter();
2660 // Spin a bit while trying to acquire the lock. This has a few benefits:
2661 // - Spinning helps to reduce waiter starvation. Since other non-waiter threads can take the lock while there are
2662 // waiters (see LockState::InterlockedTryLock()), once a waiter wakes it will be able to better compete
2663 // with other spinners for the lock.
2664 // - If there is another thread that is repeatedly acquiring and releasing the lock, spinning before waiting again
2665 // helps to prevent a waiter from repeatedly context-switching in and out
2666 // - Further in the same situation above, waking up and waiting shortly thereafter deprioritizes this waiter because
2667 // events release waiters in FIFO order. Spinning a bit helps a waiter to retain its priority at least for one
2668 // spin duration before it gets deprioritized behind all other waiters.
2669 if (g_SystemInfo.dwNumberOfProcessors > 1)
2671 bool acquiredLock = false;
2672 YieldProcessorNormalizationInfo normalizationInfo;
2673 const DWORD spinCount = g_SpinConstants.dwMonitorSpinCount;
2674 for (DWORD spinIteration = 0; spinIteration < spinCount; ++spinIteration)
2676 if (m_lockState.InterlockedTry_LockAndUnregisterWaiterAndObserveWakeSignal(this))
2678 acquiredLock = true;
2682 SpinWait(normalizationInfo, spinIteration);
2690 if (m_lockState.InterlockedObserveWakeSignal_Try_LockAndUnregisterWaiter(this))
2695 // When calculating duration we consider a couple of special cases.
2696 // If the end tick is the same as the start tick we make the
2697 // duration a millisecond, to ensure we make forward progress if
2698 // there's a lot of contention on the mutex. Secondly, we have to
2699 // cope with the case where the tick counter wrapped while we where
2700 // waiting (we can cope with at most one wrap, so don't expect three
2701 // month timeouts to be very accurate). Luckily for us, the latter
2702 // case is taken care of by 32-bit modulo arithmetic automatically.
2703 if (timeOut != (INT32)INFINITE)
2705 ULONGLONG end = CLRGetTickCount64();
2713 duration = end - start;
2715 duration = min(duration, (DWORD)timeOut);
2716 timeOut -= (INT32)duration;
2720 pCurThread->DisablePreemptiveGC();
2723 DecrementTransientPrecious();
2725 if (IsContentionKeywordEnabled)
2727 LARGE_INTEGER endTicks;
2728 QueryPerformanceCounter(&endTicks);
2730 double elapsedTimeInNanosecond = ComputeElapsedTimeInNanosecond(startTicks, endTicks);
2732 // Fire a contention end event for a managed contention
2733 FireEtwContentionStop_V1(ETW::ContentionLog::ContentionStructs::ManagedContention, GetClrInstanceId(), elapsedTimeInNanosecond);
2737 if (ret == WAIT_TIMEOUT)
2742 m_HoldingThread = pCurThread;
2744 pCurThread->IncLockCount();
2746 #if defined(_DEBUG) && defined(TRACK_SYNC)
2747 // The best place to grab this is from the ECall frame
2748 Frame *pFrame = pCurThread->GetFrame();
2749 int caller = (pFrame && pFrame != FRAME_TOP ? (int)pFrame->GetReturnAddress() : -1);
2750 pCurThread->m_pTrackSync->EnterSync(caller, this);
2756 BOOL AwareLock::Leave()
2767 Thread* pThread = GetThread();
2769 AwareLock::LeaveHelperAction action = LeaveHelper(pThread);
2773 case AwareLock::LeaveHelperAction_None:
2776 case AwareLock::LeaveHelperAction_Signal:
2781 // Must be an error otherwise
2782 _ASSERTE(action == AwareLock::LeaveHelperAction_Error);
2787 LONG AwareLock::LeaveCompletely()
2789 WRAPPER_NO_CONTRACT;
2795 _ASSERTE(count > 0); // otherwise we were never in the lock
2801 BOOL AwareLock::OwnedByCurrentThread()
2803 WRAPPER_NO_CONTRACT;
2804 return (GetThread() == m_HoldingThread);
2808 // ***************************************************************************
2810 // SyncBlock class implementation
2812 // ***************************************************************************
2814 // We maintain two queues for SyncBlock::Wait.
2815 // 1. Inside SyncBlock we queue all threads that are waiting on the SyncBlock.
2816 // When we pulse, we pick the thread from this queue using FIFO.
2817 // 2. We queue all SyncBlocks that a thread is waiting for in Thread::m_WaitEventLink.
2818 // When we pulse a thread, we find the event from this queue to set, and we also
2819 // or in a 1 bit in the syncblock value saved in the queue, so that we can return
2820 // immediately from SyncBlock::Wait if the syncblock has been pulsed.
2821 BOOL SyncBlock::Wait(INT32 timeOut, BOOL exitContext)
2829 INJECT_FAULT(COMPlusThrowOM());
2833 Thread *pCurThread = GetThread();
2834 BOOL isTimedOut = FALSE;
2835 BOOL isEnqueued = FALSE;
2836 WaitEventLink waitEventLink;
2837 WaitEventLink *pWaitEventLink;
2839 // As soon as we flip the switch, we are in a race with the GC, which could clean
2840 // up the SyncBlock underneath us -- unless we report the object.
2841 _ASSERTE(pCurThread->PreemptiveGCDisabled());
2843 // Does this thread already wait for this SyncBlock?
2844 WaitEventLink *walk = pCurThread->WaitEventLinkForSyncBlock(this);
2846 if (walk->m_Next->m_WaitSB == this) {
2847 // Wait on the same lock again.
2848 walk->m_Next->m_RefCount ++;
2849 pWaitEventLink = walk->m_Next;
2851 else if ((SyncBlock*)(((DWORD_PTR)walk->m_Next->m_WaitSB) & ~1)== this) {
2852 // This thread has been pulsed. No need to wait.
2857 // First time this thread is going to wait for this SyncBlock.
2859 if (pCurThread->m_WaitEventLink.m_Next == NULL) {
2860 hEvent = &(pCurThread->m_EventWait);
2863 hEvent = GetEventFromEventStore();
2865 waitEventLink.m_WaitSB = this;
2866 waitEventLink.m_EventWait = hEvent;
2867 waitEventLink.m_Thread = pCurThread;
2868 waitEventLink.m_Next = NULL;
2869 waitEventLink.m_LinkSB.m_pNext = NULL;
2870 waitEventLink.m_RefCount = 1;
2871 pWaitEventLink = &waitEventLink;
2872 walk->m_Next = pWaitEventLink;
2874 // Before we enqueue it (and, thus, before it can be dequeued), reset the event
2875 // that will awaken us.
2878 // This thread is now waiting on this sync block
2879 ThreadQueue::EnqueueThread(pWaitEventLink, this);
2884 _ASSERTE ((SyncBlock*)((DWORD_PTR)walk->m_Next->m_WaitSB & ~1)== this);
2886 PendingSync syncState(walk);
2888 OBJECTREF obj = m_Monitor.GetOwningObject();
2890 m_Monitor.IncrementTransientPrecious();
2892 // While we are in this frame the thread is considered blocked on the
2893 // event of the monitor lock according to the debugger
2894 DebugBlockingItem blockingMonitorInfo;
2895 blockingMonitorInfo.dwTimeout = timeOut;
2896 blockingMonitorInfo.pMonitor = &m_Monitor;
2897 blockingMonitorInfo.pAppDomain = SystemDomain::GetCurrentDomain();
2898 blockingMonitorInfo.type = DebugBlock_MonitorEvent;
2899 DebugBlockingItemHolder holder(pCurThread, &blockingMonitorInfo);
2901 GCPROTECT_BEGIN(obj);
2905 // remember how many times we synchronized
2906 syncState.m_EnterCount = LeaveMonitorCompletely();
2907 _ASSERTE(syncState.m_EnterCount > 0);
2909 isTimedOut = pCurThread->Block(timeOut, &syncState);
2912 m_Monitor.DecrementTransientPrecious();
2917 void SyncBlock::Pulse()
2928 WaitEventLink *pWaitEventLink;
2930 if ((pWaitEventLink = ThreadQueue::DequeueThread(this)) != NULL)
2931 pWaitEventLink->m_EventWait->Set();
2934 void SyncBlock::PulseAll()
2945 WaitEventLink *pWaitEventLink;
2947 while ((pWaitEventLink = ThreadQueue::DequeueThread(this)) != NULL)
2948 pWaitEventLink->m_EventWait->Set();
2951 bool SyncBlock::SetInteropInfo(InteropSyncBlockInfo* pInteropInfo)
2953 WRAPPER_NO_CONTRACT;
2956 // We could be agile, but not have noticed yet. We can't assert here
2957 // that we live in any given domain, nor is this an appropriate place
2958 // to re-parent the syncblock.
2959 /* _ASSERTE (m_dwAppDomainIndex.m_dwIndex == 0 ||
2960 m_dwAppDomainIndex == SystemDomain::System()->DefaultDomain()->GetIndex() ||
2961 m_dwAppDomainIndex == GetAppDomain()->GetIndex());
2962 m_dwAppDomainIndex = GetAppDomain()->GetIndex();
2964 return (FastInterlockCompareExchangePointer(&m_pInteropInfo,
2969 #ifdef EnC_SUPPORTED
2970 // Store information about fields added to this object by EnC
2971 // This must be called from a thread in the AppDomain of this object instance
2972 void SyncBlock::SetEnCInfo(EnCSyncBlockInfo *pEnCInfo)
2974 WRAPPER_NO_CONTRACT;
2976 // We can't recreate the field contents, so this SyncBlock can never go away
2979 // Store the field info (should only ever happen once)
2980 _ASSERTE( m_pEnCInfo == NULL );
2981 m_pEnCInfo = pEnCInfo;
2983 #endif // EnC_SUPPORTED
2984 #endif // !DACCESS_COMPILE
2986 #if defined(_WIN64) && defined(_DEBUG)
2987 void ObjHeader::IllegalAlignPad()
2989 WRAPPER_NO_CONTRACT;
2991 void** object = ((void**) this) + 1;
2992 LogSpewAlways("\n\n******** Illegal ObjHeader m_alignpad not 0, object" FMT_ADDR "\n\n",
2995 _ASSERTE(m_alignpad == 0);
2997 #endif // _WIN64 && _DEBUG