[Tizen] Unify dnetmemoryenumlib terms to match the codebase (#291)
[platform/upstream/coreclr.git] / src / vm / syncblk.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4 //
5 // SYNCBLK.CPP
6 //
7
8 //
9 // Definition of a SyncBlock and the SyncBlockCache which manages it
10 //
11
12
13 #include "common.h"
14
15 #include "vars.hpp"
16 #include "util.hpp"
17 #include "class.h"
18 #include "object.h"
19 #include "threads.h"
20 #include "excep.h"
21 #include "threads.h"
22 #include "syncblk.h"
23 #include "interoputil.h"
24 #include "encee.h"
25 #include "eventtrace.h"
26 #include "dllimportcallback.h"
27 #include "comcallablewrapper.h"
28 #include "eeconfig.h"
29 #include "corhost.h"
30 #include "comdelegate.h"
31 #include "finalizerthread.h"
32
33 #ifdef FEATURE_COMINTEROP
34 #include "runtimecallablewrapper.h"
35 #endif // FEATURE_COMINTEROP
36
37 // Allocate 4K worth. Typically enough
38 #define MAXSYNCBLOCK (0x1000-sizeof(void*))/sizeof(SyncBlock)
39 #define SYNC_TABLE_INITIAL_SIZE 250
40
41 //#define DUMP_SB
42
43 class  SyncBlockArray
44 {
45   public:
46     SyncBlockArray *m_Next;
47     BYTE            m_Blocks[MAXSYNCBLOCK * sizeof (SyncBlock)];
48 };
49
50 // For in-place constructor
51 BYTE g_SyncBlockCacheInstance[sizeof(SyncBlockCache)];
52
53 SPTR_IMPL (SyncBlockCache, SyncBlockCache, s_pSyncBlockCache);
54
55 #ifndef DACCESS_COMPILE
56
57
58
59 #ifndef FEATURE_PAL
60 // static
61 SLIST_HEADER InteropSyncBlockInfo::s_InteropInfoStandbyList;
62 #endif // !FEATURE_PAL
63
64 InteropSyncBlockInfo::~InteropSyncBlockInfo()
65 {
66     CONTRACTL
67     {
68         NOTHROW;
69         DESTRUCTOR_CHECK;
70         GC_TRIGGERS;
71         MODE_ANY;
72     }
73     CONTRACTL_END;
74
75     FreeUMEntryThunkOrInterceptStub();
76 }
77
78 #ifndef FEATURE_PAL
79 // Deletes all items in code:s_InteropInfoStandbyList.
80 void InteropSyncBlockInfo::FlushStandbyList()
81 {
82     CONTRACTL
83     {
84         NOTHROW;
85         GC_TRIGGERS;
86         MODE_ANY;
87     }
88     CONTRACTL_END;
89
90     PSLIST_ENTRY pEntry = InterlockedFlushSList(&InteropSyncBlockInfo::s_InteropInfoStandbyList);
91     while (pEntry)
92     {
93         PSLIST_ENTRY pNextEntry = pEntry->Next;
94
95         // make sure to use the global delete since the destructor has already run
96         ::delete (void *)pEntry;
97         pEntry = pNextEntry;
98     }
99 }
100 #endif // !FEATURE_PAL
101
102 void InteropSyncBlockInfo::FreeUMEntryThunkOrInterceptStub()
103 {
104     CONTRACTL
105     {
106         NOTHROW;
107         DESTRUCTOR_CHECK;
108         GC_TRIGGERS;
109         MODE_ANY;
110     }
111     CONTRACTL_END
112
113     if (!g_fEEShutDown)
114     {
115         void *pUMEntryThunk = GetUMEntryThunk();
116         if (pUMEntryThunk != NULL)
117         {
118             COMDelegate::RemoveEntryFromFPtrHash((UPTR)pUMEntryThunk);
119             UMEntryThunk::FreeUMEntryThunk((UMEntryThunk *)pUMEntryThunk);
120         }
121         else
122         {
123 #if defined(_TARGET_X86_)
124             Stub *pInterceptStub = GetInterceptStub();
125             if (pInterceptStub != NULL)
126             {
127                 // There may be multiple chained stubs
128                 pInterceptStub->DecRef();
129             }
130 #else // _TARGET_X86_
131             // Intercept stubs are currently not used on other platforms.
132             _ASSERTE(GetInterceptStub() == NULL);
133 #endif // _TARGET_X86_
134         }
135     }
136     m_pUMEntryThunkOrInterceptStub = NULL;
137 }
138
139 #ifdef FEATURE_COMINTEROP
140 // Returns either NULL or an RCW on which AcquireLock has been called.
141 RCW* InteropSyncBlockInfo::GetRCWAndIncrementUseCount()
142 {
143     LIMITED_METHOD_CONTRACT;
144
145     DWORD dwSwitchCount = 0;
146     while (true)
147     {
148         RCW *pRCW = VolatileLoad(&m_pRCW);
149         if ((size_t)pRCW <= 0x1)
150         {
151             // the RCW never existed or has been released
152             return NULL;
153         }
154
155         if (((size_t)pRCW & 0x1) == 0x0)
156         {
157             // it looks like we have a chance, try to acquire the lock
158             RCW *pLockedRCW = (RCW *)((size_t)pRCW | 0x1);
159             if (InterlockedCompareExchangeT(&m_pRCW, pLockedRCW, pRCW) == pRCW)
160             {
161                 // we have the lock on the m_pRCW field, now we can safely "use" the RCW
162                 pRCW->IncrementUseCount();
163
164                 // release the m_pRCW lock
165                 VolatileStore(&m_pRCW, pRCW);
166
167                 // and return the RCW
168                 return pRCW;
169             }
170         }
171
172         // somebody else holds the lock, retry
173         __SwitchToThread(0, ++dwSwitchCount);
174     }
175 }
176
177 // Sets the m_pRCW field in a thread-safe manner, pRCW can be NULL.
178 void InteropSyncBlockInfo::SetRawRCW(RCW* pRCW)
179 {
180     LIMITED_METHOD_CONTRACT;
181
182     if (pRCW != NULL)
183     {
184         // we never set two different RCWs on a single object
185         _ASSERTE(m_pRCW == NULL);
186         m_pRCW = pRCW;
187     }
188     else
189     {
190         DWORD dwSwitchCount = 0;
191         while (true)
192         {
193             RCW *pOldRCW = VolatileLoad(&m_pRCW);
194
195             if ((size_t)pOldRCW <= 0x1)
196             {
197                 // the RCW never existed or has been released
198                 VolatileStore(&m_pRCW, (RCW *)0x1);
199                 return;
200             }
201
202             if (((size_t)pOldRCW & 0x1) == 0x0)
203             {
204                 // it looks like we have a chance, set the RCW to 0x1
205                 if (InterlockedCompareExchangeT(&m_pRCW, (RCW *)0x1, pOldRCW) == pOldRCW)
206                 {
207                     // we made it
208                     return;
209                 }
210             }
211
212             // somebody else holds the lock, retry
213             __SwitchToThread(0, ++dwSwitchCount);
214         }
215     }
216 }
217 #endif // FEATURE_COMINTEROP
218
219 #endif // !DACCESS_COMPILE
220
221 PTR_SyncTableEntry SyncTableEntry::GetSyncTableEntry()
222 {
223     LIMITED_METHOD_CONTRACT;
224     SUPPORTS_DAC;
225
226     return (PTR_SyncTableEntry)g_pSyncTable;
227 }
228
229 #ifndef DACCESS_COMPILE
230
231 SyncTableEntry*& SyncTableEntry::GetSyncTableEntryByRef()
232 {
233     LIMITED_METHOD_CONTRACT;
234     return g_pSyncTable;
235 }
236
237 /* static */
238 SyncBlockCache*& SyncBlockCache::GetSyncBlockCache()
239 {
240     LIMITED_METHOD_CONTRACT;
241
242     return s_pSyncBlockCache;
243 }
244
245
246 //----------------------------------------------------------------------------
247 //
248 //   ThreadQueue Implementation
249 //
250 //----------------------------------------------------------------------------
251 #endif //!DACCESS_COMPILE
252
253 // Given a link in the chain, get the Thread that it represents
254 /* static */
255 inline PTR_WaitEventLink ThreadQueue::WaitEventLinkForLink(PTR_SLink pLink)
256 {
257     LIMITED_METHOD_CONTRACT;
258     SUPPORTS_DAC;
259     return (PTR_WaitEventLink) (((PTR_BYTE) pLink) - offsetof(WaitEventLink, m_LinkSB));
260 }
261
262 #ifndef DACCESS_COMPILE
263
264 // Unlink the head of the Q.  We are always in the SyncBlock's critical
265 // section.
266 /* static */
267 inline WaitEventLink *ThreadQueue::DequeueThread(SyncBlock *psb)
268 {
269     CONTRACTL
270     {
271         NOTHROW;
272         GC_NOTRIGGER;
273         MODE_ANY;
274         CAN_TAKE_LOCK;
275     }
276     CONTRACTL_END;
277
278     // Be careful, the debugger inspects the queue from out of process and just looks at the memory...
279     // it must be valid even if the lock is held. Be careful if you change the way the queue is updated.
280     SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
281
282     WaitEventLink      *ret = NULL;
283     SLink       *pLink = psb->m_Link.m_pNext;
284
285     if (pLink)
286     {
287         psb->m_Link.m_pNext = pLink->m_pNext;
288 #ifdef _DEBUG
289         pLink->m_pNext = (SLink *)POISONC;
290 #endif
291         ret = WaitEventLinkForLink(pLink);
292         _ASSERTE(ret->m_WaitSB == psb);
293     }
294     return ret;
295 }
296
297 // Enqueue is the slow one.  We have to find the end of the Q since we don't
298 // want to burn storage for this in the SyncBlock.
299 /* static */
300 inline void ThreadQueue::EnqueueThread(WaitEventLink *pWaitEventLink, SyncBlock *psb)
301 {
302     CONTRACTL
303     {
304         NOTHROW;
305         GC_NOTRIGGER;
306         MODE_ANY;
307         CAN_TAKE_LOCK;
308     }
309     CONTRACTL_END;
310
311     _ASSERTE (pWaitEventLink->m_LinkSB.m_pNext == NULL);
312
313     // Be careful, the debugger inspects the queue from out of process and just looks at the memory...
314     // it must be valid even if the lock is held. Be careful if you change the way the queue is updated.
315     SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
316
317     SLink       *pPrior = &psb->m_Link;
318
319     while (pPrior->m_pNext)
320     {
321         // We shouldn't already be in the waiting list!
322         _ASSERTE(pPrior->m_pNext != &pWaitEventLink->m_LinkSB);
323
324         pPrior = pPrior->m_pNext;
325     }
326     pPrior->m_pNext = &pWaitEventLink->m_LinkSB;
327 }
328
329
330 // Wade through the SyncBlock's list of waiting threads and remove the
331 // specified thread.
332 /* static */
333 BOOL ThreadQueue::RemoveThread (Thread *pThread, SyncBlock *psb)
334 {
335     CONTRACTL
336     {
337         NOTHROW;
338         GC_NOTRIGGER;
339         MODE_ANY;
340     }
341     CONTRACTL_END;
342
343     BOOL res = FALSE;
344
345     // Be careful, the debugger inspects the queue from out of process and just looks at the memory...
346     // it must be valid even if the lock is held. Be careful if you change the way the queue is updated.
347     SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
348
349     SLink       *pPrior = &psb->m_Link;
350     SLink       *pLink;
351     WaitEventLink *pWaitEventLink;
352
353     while ((pLink = pPrior->m_pNext) != NULL)
354     {
355         pWaitEventLink = WaitEventLinkForLink(pLink);
356         if (pWaitEventLink->m_Thread == pThread)
357         {
358             pPrior->m_pNext = pLink->m_pNext;
359 #ifdef _DEBUG
360             pLink->m_pNext = (SLink *)POISONC;
361 #endif
362             _ASSERTE(pWaitEventLink->m_WaitSB == psb);
363             res = TRUE;
364             break;
365         }
366         pPrior = pLink;
367     }
368     return res;
369 }
370
371 #endif //!DACCESS_COMPILE
372
373 #ifdef DACCESS_COMPILE
374 // Enumerates the threads in the queue from front to back by calling
375 // pCallbackFunction on each one
376 /* static */
377 void ThreadQueue::EnumerateThreads(SyncBlock *psb, FP_TQ_THREAD_ENUMERATION_CALLBACK pCallbackFunction, void* pUserData)
378 {
379     CONTRACTL
380     {
381         NOTHROW;
382         GC_NOTRIGGER;
383         MODE_ANY;
384     }
385     CONTRACTL_END;
386     SUPPORTS_DAC;
387
388     PTR_SLink pLink = psb->m_Link.m_pNext;
389     PTR_WaitEventLink pWaitEventLink;
390
391     while (pLink != NULL)
392     {
393         pWaitEventLink = WaitEventLinkForLink(pLink);
394
395         pCallbackFunction(pWaitEventLink->m_Thread, pUserData);
396         pLink = pLink->m_pNext;
397     }
398 }
399 #endif //DACCESS_COMPILE
400
401 #ifndef DACCESS_COMPILE
402
403 // ***************************************************************************
404 //
405 //              Ephemeral Bitmap Helper
406 //
407 // ***************************************************************************
408
409 #define card_size 32
410
411 #define card_word_width 32
412
413 size_t CardIndex (size_t card)
414 {
415     LIMITED_METHOD_CONTRACT;
416     return card_size * card;
417 }
418
419 size_t CardOf (size_t idx)
420 {
421     LIMITED_METHOD_CONTRACT;
422     return idx / card_size;
423 }
424
425 size_t CardWord (size_t card)
426 {
427     LIMITED_METHOD_CONTRACT;
428     return card / card_word_width;
429 }
430 inline
431 unsigned CardBit (size_t card)
432 {
433     LIMITED_METHOD_CONTRACT;
434     return (unsigned)(card % card_word_width);
435 }
436
437 inline
438 void SyncBlockCache::SetCard (size_t card)
439 {
440     WRAPPER_NO_CONTRACT;
441     m_EphemeralBitmap [CardWord (card)] =
442         (m_EphemeralBitmap [CardWord (card)] | (1 << CardBit (card)));
443 }
444
445 inline
446 void SyncBlockCache::ClearCard (size_t card)
447 {
448     WRAPPER_NO_CONTRACT;
449     m_EphemeralBitmap [CardWord (card)] =
450         (m_EphemeralBitmap [CardWord (card)] & ~(1 << CardBit (card)));
451 }
452
453 inline
454 BOOL  SyncBlockCache::CardSetP (size_t card)
455 {
456     WRAPPER_NO_CONTRACT;
457     return ( m_EphemeralBitmap [ CardWord (card) ] & (1 << CardBit (card)));
458 }
459
460 inline
461 void SyncBlockCache::CardTableSetBit (size_t idx)
462 {
463     WRAPPER_NO_CONTRACT;
464     SetCard (CardOf (idx));
465 }
466
467
468 size_t BitMapSize (size_t cacheSize)
469 {
470     LIMITED_METHOD_CONTRACT;
471
472     return (cacheSize + card_size * card_word_width - 1)/ (card_size * card_word_width);
473 }
474
475 // ***************************************************************************
476 //
477 //              SyncBlockCache class implementation
478 //
479 // ***************************************************************************
480
481 SyncBlockCache::SyncBlockCache()
482     : m_pCleanupBlockList(NULL),
483       m_FreeBlockList(NULL),
484
485       // NOTE: CRST_UNSAFE_ANYMODE prevents a GC mode switch when entering this crst.
486       // If you remove this flag, we will switch to preemptive mode when entering
487       // g_criticalSection, which means all functions that enter it will become
488       // GC_TRIGGERS.  (This includes all uses of LockHolder around SyncBlockCache::GetSyncBlockCache().
489       // So be sure to update the contracts if you remove this flag.
490       m_CacheLock(CrstSyncBlockCache, (CrstFlags) (CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD)),
491
492       m_FreeCount(0),
493       m_ActiveCount(0),
494       m_SyncBlocks(0),
495       m_FreeSyncBlock(0),
496       m_FreeSyncTableIndex(1),
497       m_FreeSyncTableList(0),
498       m_SyncTableSize(SYNC_TABLE_INITIAL_SIZE),
499       m_OldSyncTables(0),
500       m_bSyncBlockCleanupInProgress(FALSE),
501       m_EphemeralBitmap(0)
502 {
503     CONTRACTL
504     {
505         CONSTRUCTOR_CHECK;
506         THROWS;
507         GC_NOTRIGGER;
508         MODE_ANY;
509         INJECT_FAULT(COMPlusThrowOM());
510     }
511     CONTRACTL_END;
512 }
513
514
515 // This method is NO longer called.
516 SyncBlockCache::~SyncBlockCache()
517 {
518     CONTRACTL
519     {
520         DESTRUCTOR_CHECK;
521         NOTHROW;
522         GC_NOTRIGGER;
523         MODE_ANY;
524     }
525     CONTRACTL_END;
526
527     // Clear the list the fast way.
528     m_FreeBlockList = NULL;
529     //<TODO>@todo we can clear this fast too I guess</TODO>
530     m_pCleanupBlockList = NULL;
531
532     // destruct all arrays
533     while (m_SyncBlocks)
534     {
535         SyncBlockArray *next = m_SyncBlocks->m_Next;
536         delete m_SyncBlocks;
537         m_SyncBlocks = next;
538     }
539
540     // Also, now is a good time to clean up all the old tables which we discarded
541     // when we overflowed them.
542     SyncTableEntry* arr;
543     while ((arr = m_OldSyncTables) != 0)
544     {
545         m_OldSyncTables = (SyncTableEntry*)arr[0].m_Object.Load();
546         delete arr;
547     }
548 }
549
550
551 // When the GC determines that an object is dead the low bit of the 
552 // m_Object field of SyncTableEntry is set, however it is not 
553 // cleaned up because we cant do the COM interop cleanup at GC time.
554 // It is put on a cleanup list and at a later time (typically during
555 // finalization, this list is cleaned up. 
556 // 
557 void SyncBlockCache::CleanupSyncBlocks()
558 {
559     STATIC_CONTRACT_THROWS;
560     STATIC_CONTRACT_MODE_COOPERATIVE;
561
562     _ASSERTE(GetThread() == FinalizerThread::GetFinalizerThread());
563
564     // Set the flag indicating sync block cleanup is in progress.
565     // IMPORTANT: This must be set before the sync block cleanup bit is reset on the thread.
566     m_bSyncBlockCleanupInProgress = TRUE;
567
568     struct Param
569     {
570         SyncBlockCache *pThis;
571         SyncBlock* psb;
572 #ifdef FEATURE_COMINTEROP
573         RCW* pRCW;
574 #endif
575     } param;
576     param.pThis = this;
577     param.psb = NULL;
578 #ifdef FEATURE_COMINTEROP
579     param.pRCW = NULL;
580 #endif
581
582     EE_TRY_FOR_FINALLY(Param *, pParam, &param)
583     {
584         // reset the flag
585         FinalizerThread::GetFinalizerThread()->ResetSyncBlockCleanup();
586
587         // walk the cleanup list and cleanup 'em up
588         while ((pParam->psb = pParam->pThis->GetNextCleanupSyncBlock()) != NULL)
589         {
590 #ifdef FEATURE_COMINTEROP
591             InteropSyncBlockInfo* pInteropInfo = pParam->psb->GetInteropInfoNoCreate();
592             if (pInteropInfo)
593             {
594                 pParam->pRCW = pInteropInfo->GetRawRCW();
595                 if (pParam->pRCW)
596                 {
597                     // We should have initialized the cleanup list with the
598                     // first RCW cache we created
599                     _ASSERTE(g_pRCWCleanupList != NULL);
600
601                     g_pRCWCleanupList->AddWrapper(pParam->pRCW);
602
603                     pParam->pRCW = NULL;
604                     pInteropInfo->SetRawRCW(NULL);
605                 }
606             }
607 #endif // FEATURE_COMINTEROP
608
609             // Delete the sync block.
610             pParam->pThis->DeleteSyncBlock(pParam->psb);
611             pParam->psb = NULL;
612
613             // pulse GC mode to allow GC to perform its work
614             if (FinalizerThread::GetFinalizerThread()->CatchAtSafePointOpportunistic())
615             {
616                 FinalizerThread::GetFinalizerThread()->PulseGCMode();
617             }
618         }
619         
620 #ifdef FEATURE_COMINTEROP
621         // Now clean up the rcw's sorted by context
622         if (g_pRCWCleanupList != NULL)
623             g_pRCWCleanupList->CleanupAllWrappers();
624 #endif // FEATURE_COMINTEROP
625     }
626     EE_FINALLY
627     {
628         // We are finished cleaning up the sync blocks.
629         m_bSyncBlockCleanupInProgress = FALSE;
630
631 #ifdef FEATURE_COMINTEROP
632         if (param.pRCW)
633             param.pRCW->Cleanup();
634 #endif        
635
636         if (param.psb)
637             DeleteSyncBlock(param.psb);
638     } EE_END_FINALLY;
639 }
640
641 // create the sync block cache
642 /* static */
643 void SyncBlockCache::Attach()
644 {
645     LIMITED_METHOD_CONTRACT;
646 }
647
648 // create the sync block cache
649 /* static */
650 void SyncBlockCache::Start()
651 {
652     CONTRACTL
653     {
654         THROWS;
655         GC_NOTRIGGER;
656         MODE_ANY;
657         INJECT_FAULT(COMPlusThrowOM(););
658     }
659     CONTRACTL_END;
660
661     DWORD* bm = new DWORD [BitMapSize(SYNC_TABLE_INITIAL_SIZE+1)];
662
663     memset (bm, 0, BitMapSize (SYNC_TABLE_INITIAL_SIZE+1)*sizeof(DWORD));
664
665     SyncTableEntry::GetSyncTableEntryByRef() = new SyncTableEntry[SYNC_TABLE_INITIAL_SIZE+1];
666 #ifdef _DEBUG
667     for (int i=0; i<SYNC_TABLE_INITIAL_SIZE+1; i++) {
668         SyncTableEntry::GetSyncTableEntry()[i].m_SyncBlock = NULL;
669     }
670 #endif    
671
672     SyncTableEntry::GetSyncTableEntry()[0].m_SyncBlock = 0;
673     SyncBlockCache::GetSyncBlockCache() = new (&g_SyncBlockCacheInstance) SyncBlockCache;
674
675     SyncBlockCache::GetSyncBlockCache()->m_EphemeralBitmap = bm;
676
677 #ifndef FEATURE_PAL
678     InitializeSListHead(&InteropSyncBlockInfo::s_InteropInfoStandbyList);
679 #endif // !FEATURE_PAL
680 }
681
682
683 // destroy the sync block cache
684 /* static */
685 void SyncBlockCache::Stop()
686 {
687     CONTRACTL
688     {
689         NOTHROW;
690         GC_NOTRIGGER;
691         MODE_ANY;
692     }
693     CONTRACTL_END;
694
695     // cache must be destroyed first, since it can traverse the table to find all the
696     // sync blocks which are live and thus must have their critical sections destroyed.
697     if (SyncBlockCache::GetSyncBlockCache())
698     {
699         delete SyncBlockCache::GetSyncBlockCache();
700         SyncBlockCache::GetSyncBlockCache() = 0;
701     }
702
703     if (SyncTableEntry::GetSyncTableEntry())
704     {
705         delete SyncTableEntry::GetSyncTableEntry();
706         SyncTableEntry::GetSyncTableEntryByRef() = 0;
707     }
708 }
709
710
711 void    SyncBlockCache::InsertCleanupSyncBlock(SyncBlock* psb)
712 {
713     CONTRACTL
714     {
715         INSTANCE_CHECK;
716         NOTHROW;
717         GC_NOTRIGGER;
718         MODE_ANY;
719     }
720     CONTRACTL_END;
721
722     // free up the threads that are waiting before we use the link
723     // for other purposes
724     if (psb->m_Link.m_pNext != NULL)
725     {
726         while (ThreadQueue::DequeueThread(psb) != NULL)
727             continue;
728     }
729
730 #ifdef FEATURE_COMINTEROP
731     if (psb->m_pInteropInfo)
732     {
733         // called during GC
734         // so do only minorcleanup
735         MinorCleanupSyncBlockComData(psb->m_pInteropInfo);
736     }
737 #endif // FEATURE_COMINTEROP
738
739     // This method will be called only by the GC thread
740     //<TODO>@todo add an assert for the above statement</TODO>
741     // we don't need to lock here
742     //EnterCacheLock();
743
744     psb->m_Link.m_pNext = m_pCleanupBlockList;
745     m_pCleanupBlockList = &psb->m_Link;
746
747     // we don't need a lock here
748     //LeaveCacheLock();
749 }
750
751 SyncBlock* SyncBlockCache::GetNextCleanupSyncBlock()
752 {
753     LIMITED_METHOD_CONTRACT;
754
755     // we don't need a lock here,
756     // as this is called only on the finalizer thread currently
757
758     SyncBlock       *psb = NULL;
759     if (m_pCleanupBlockList)
760     {
761         // get the actual sync block pointer
762         psb = (SyncBlock *) (((BYTE *) m_pCleanupBlockList) - offsetof(SyncBlock, m_Link));
763         m_pCleanupBlockList = m_pCleanupBlockList->m_pNext;
764     }
765     return psb;
766 }
767
768
769 // returns and removes the next free syncblock from the list
770 // the cache lock must be entered to call this
771 SyncBlock *SyncBlockCache::GetNextFreeSyncBlock()
772 {
773     CONTRACTL
774     {
775         INJECT_FAULT(COMPlusThrowOM());
776         THROWS;
777         GC_NOTRIGGER;
778         MODE_ANY;
779     }
780     CONTRACTL_END;
781
782 #ifdef _DEBUG  // Instrumentation for OOM fault injection testing
783     delete new char;
784 #endif
785
786     SyncBlock       *psb;
787     SLink           *plst = m_FreeBlockList;
788
789     m_ActiveCount++;
790
791     if (plst)
792     {
793         m_FreeBlockList = m_FreeBlockList->m_pNext;
794
795         // shouldn't be 0
796         m_FreeCount--;
797
798         // get the actual sync block pointer
799         psb = (SyncBlock *) (((BYTE *) plst) - offsetof(SyncBlock, m_Link));
800
801         return psb;
802     }
803     else
804     {
805         if ((m_SyncBlocks == NULL) || (m_FreeSyncBlock >= MAXSYNCBLOCK))
806         {
807 #ifdef DUMP_SB
808 //            LogSpewAlways("Allocating new syncblock array\n");
809 //            DumpSyncBlockCache();
810 #endif
811             SyncBlockArray* newsyncblocks = new(SyncBlockArray);
812             if (!newsyncblocks)
813                 COMPlusThrowOM ();
814
815             newsyncblocks->m_Next = m_SyncBlocks;
816             m_SyncBlocks = newsyncblocks;
817             m_FreeSyncBlock = 0;
818         }
819         return &(((SyncBlock*)m_SyncBlocks->m_Blocks)[m_FreeSyncBlock++]);
820     }
821
822 }
823
824 void SyncBlockCache::Grow()
825 {
826     CONTRACTL
827     {
828         INSTANCE_CHECK;
829         THROWS;
830         GC_NOTRIGGER;
831         MODE_COOPERATIVE;
832         INJECT_FAULT(COMPlusThrowOM(););
833     }
834     CONTRACTL_END;
835
836     STRESS_LOG0(LF_SYNC, LL_INFO10000, "SyncBlockCache::NewSyncBlockSlot growing SyncBlockCache \n");
837         
838     NewArrayHolder<SyncTableEntry> newSyncTable (NULL);
839     NewArrayHolder<DWORD>          newBitMap    (NULL);
840     DWORD *                        oldBitMap;
841
842     // Compute the size of the new synctable. Normally, we double it - unless
843     // doing so would create slots with indices too high to fit within the
844     // mask. If so, we create a synctable up to the mask limit. If we're
845     // already at the mask limit, then caller is out of luck.
846     DWORD newSyncTableSize;
847     if (m_SyncTableSize <= (MASK_SYNCBLOCKINDEX >> 1))
848     {
849         newSyncTableSize = m_SyncTableSize * 2;
850     }
851     else
852     {
853         newSyncTableSize = MASK_SYNCBLOCKINDEX;
854     }
855
856     if (!(newSyncTableSize > m_SyncTableSize)) // Make sure we actually found room to grow!
857     {
858         COMPlusThrowOM();
859     }
860
861     newSyncTable = new SyncTableEntry[newSyncTableSize];
862     newBitMap = new DWORD[BitMapSize (newSyncTableSize)];
863
864
865     {
866         //! From here on, we assume that we will succeed and start doing global side-effects.
867         //! Any operation that could fail must occur before this point.
868         CANNOTTHROWCOMPLUSEXCEPTION();
869         FAULT_FORBID();
870
871         newSyncTable.SuppressRelease();
872         newBitMap.SuppressRelease();
873
874
875         // We chain old table because we can't delete
876         // them before all the threads are stoppped
877         // (next GC)
878         SyncTableEntry::GetSyncTableEntry() [0].m_Object = (Object *)m_OldSyncTables;
879         m_OldSyncTables = SyncTableEntry::GetSyncTableEntry();
880
881         memset (newSyncTable, 0, newSyncTableSize*sizeof (SyncTableEntry));
882         memset (newBitMap, 0, BitMapSize (newSyncTableSize)*sizeof (DWORD));
883         CopyMemory (newSyncTable, SyncTableEntry::GetSyncTableEntry(),
884                     m_SyncTableSize*sizeof (SyncTableEntry));
885
886         CopyMemory (newBitMap, m_EphemeralBitmap,
887                     BitMapSize (m_SyncTableSize)*sizeof (DWORD));
888
889         oldBitMap = m_EphemeralBitmap;
890         m_EphemeralBitmap = newBitMap;
891         delete[] oldBitMap;
892
893         _ASSERTE((m_SyncTableSize & MASK_SYNCBLOCKINDEX) == m_SyncTableSize);
894         // note: we do not care if another thread does not see the new size
895         // however we really do not want it to see the new size without seeing the new array
896         //@TODO do we still leak here if two threads come here at the same time ?
897         FastInterlockExchangePointer(&SyncTableEntry::GetSyncTableEntryByRef(), newSyncTable.GetValue());
898
899         m_FreeSyncTableIndex++;
900
901         m_SyncTableSize = newSyncTableSize;
902
903 #ifdef _DEBUG
904         static int dumpSBOnResize = -1;
905
906         if (dumpSBOnResize == -1)
907             dumpSBOnResize = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SBDumpOnResize);
908
909         if (dumpSBOnResize)
910         {
911             LogSpewAlways("SyncBlockCache resized\n");
912             DumpSyncBlockCache();
913         }
914 #endif
915     }
916 }
917
918 DWORD SyncBlockCache::NewSyncBlockSlot(Object *obj)
919 {
920     CONTRACTL
921     {
922         INSTANCE_CHECK;
923         THROWS;
924         GC_NOTRIGGER;
925         MODE_COOPERATIVE;
926         INJECT_FAULT(COMPlusThrowOM(););
927     }
928     CONTRACTL_END;
929     _ASSERTE(m_CacheLock.OwnedByCurrentThread()); // GetSyncBlock takes the lock, make sure no one else does.  
930
931     DWORD indexNewEntry;
932     if (m_FreeSyncTableList)
933     {
934         indexNewEntry = (DWORD)(m_FreeSyncTableList >> 1);
935         _ASSERTE ((size_t)SyncTableEntry::GetSyncTableEntry()[indexNewEntry].m_Object.Load() & 1);
936         m_FreeSyncTableList = (size_t)SyncTableEntry::GetSyncTableEntry()[indexNewEntry].m_Object.Load() & ~1;
937     }
938     else if ((indexNewEntry = (DWORD)(m_FreeSyncTableIndex)) >= m_SyncTableSize)
939     {
940         // This is kept out of line to keep stuff like the C++ EH prolog (needed for holders) off 
941         // of the common path.
942         Grow();
943     }
944     else
945     {
946 #ifdef _DEBUG
947         static int dumpSBOnNewIndex = -1;
948
949         if (dumpSBOnNewIndex == -1)
950             dumpSBOnNewIndex = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SBDumpOnNewIndex);
951
952         if (dumpSBOnNewIndex)
953         {
954             LogSpewAlways("SyncBlockCache index incremented\n");
955             DumpSyncBlockCache();
956         }
957 #endif
958         m_FreeSyncTableIndex ++;
959     }
960
961
962     CardTableSetBit (indexNewEntry);
963
964     // In debug builds the m_SyncBlock at indexNewEntry should already be null, since we should 
965     // start out with a null table and always null it out on delete. 
966     _ASSERTE(SyncTableEntry::GetSyncTableEntry() [indexNewEntry].m_SyncBlock == NULL);
967     SyncTableEntry::GetSyncTableEntry() [indexNewEntry].m_SyncBlock = NULL;
968     SyncTableEntry::GetSyncTableEntry() [indexNewEntry].m_Object = obj;
969
970     _ASSERTE(indexNewEntry != 0);
971
972     return indexNewEntry;
973 }
974
975
976 // free a used sync block, only called from CleanupSyncBlocks.  
977 void SyncBlockCache::DeleteSyncBlock(SyncBlock *psb)
978 {
979     CONTRACTL
980     {
981         INSTANCE_CHECK;
982         THROWS;
983         GC_TRIGGERS;
984         MODE_ANY;
985         INJECT_FAULT(COMPlusThrowOM());
986     }
987     CONTRACTL_END;
988
989     // clean up comdata
990     if (psb->m_pInteropInfo)
991     {
992 #ifdef FEATURE_COMINTEROP
993         CleanupSyncBlockComData(psb->m_pInteropInfo);
994 #endif // FEATURE_COMINTEROP
995
996 #ifndef FEATURE_PAL
997         if (g_fEEShutDown)
998         {
999             delete psb->m_pInteropInfo;
1000         }
1001         else
1002         {
1003             psb->m_pInteropInfo->~InteropSyncBlockInfo();
1004             InterlockedPushEntrySList(&InteropSyncBlockInfo::s_InteropInfoStandbyList, (PSLIST_ENTRY)psb->m_pInteropInfo);
1005         }
1006 #else // !FEATURE_PAL
1007         delete psb->m_pInteropInfo;
1008 #endif // !FEATURE_PAL
1009     }
1010
1011 #ifdef EnC_SUPPORTED
1012     // clean up EnC info
1013     if (psb->m_pEnCInfo)
1014         psb->m_pEnCInfo->Cleanup();
1015 #endif // EnC_SUPPORTED
1016
1017     // Destruct the SyncBlock, but don't reclaim its memory.  (Overridden
1018     // operator delete).
1019     delete psb;
1020
1021     //synchronizer with the consumers,
1022     // <TODO>@todo we don't really need a lock here, we can come up
1023     // with some simple algo to avoid taking a lock </TODO>
1024     {
1025         SyncBlockCache::LockHolder lh(this);
1026
1027         DeleteSyncBlockMemory(psb);
1028     }
1029 }
1030
1031
1032 // returns the sync block memory to the free pool but does not destruct sync block (must own cache lock already)
1033 void    SyncBlockCache::DeleteSyncBlockMemory(SyncBlock *psb)
1034 {
1035     CONTRACTL
1036     {
1037         INSTANCE_CHECK;
1038         NOTHROW;
1039         GC_NOTRIGGER;
1040         FORBID_FAULT;
1041     }
1042     CONTRACTL_END
1043
1044     m_ActiveCount--;
1045     m_FreeCount++;
1046
1047     psb->m_Link.m_pNext = m_FreeBlockList;
1048     m_FreeBlockList = &psb->m_Link;
1049
1050 }
1051
1052 // free a used sync block
1053 void SyncBlockCache::GCDeleteSyncBlock(SyncBlock *psb)
1054 {
1055     CONTRACTL
1056     {
1057         INSTANCE_CHECK;
1058         NOTHROW;
1059         GC_NOTRIGGER;
1060         MODE_ANY;
1061     }
1062     CONTRACTL_END;
1063
1064     // Destruct the SyncBlock, but don't reclaim its memory.  (Overridden
1065     // operator delete).
1066     delete psb;
1067
1068     m_ActiveCount--;
1069     m_FreeCount++;
1070
1071     psb->m_Link.m_pNext = m_FreeBlockList;
1072     m_FreeBlockList = &psb->m_Link;
1073 }
1074
1075 void SyncBlockCache::GCWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2)
1076 {
1077     CONTRACTL
1078     {
1079         INSTANCE_CHECK;
1080         NOTHROW;
1081         GC_NOTRIGGER;
1082         MODE_ANY;
1083     }
1084     CONTRACTL_END;
1085
1086
1087     // First delete the obsolete arrays since we have exclusive access
1088     BOOL fSetSyncBlockCleanup = FALSE;
1089
1090     SyncTableEntry* arr;
1091     while ((arr = m_OldSyncTables) != NULL)
1092     {
1093         m_OldSyncTables = (SyncTableEntry*)arr[0].m_Object.Load();
1094         delete arr;
1095     }
1096
1097 #ifdef DUMP_SB
1098     LogSpewAlways("GCWeakPtrScan starting\n");
1099 #endif
1100
1101 #ifdef VERIFY_HEAP
1102    if (g_pConfig->GetHeapVerifyLevel()& EEConfig::HEAPVERIFY_SYNCBLK)
1103        STRESS_LOG0 (LF_GC | LF_SYNC, LL_INFO100, "GCWeakPtrScan starting\n");
1104 #endif
1105
1106    if (GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration() < GCHeapUtilities::GetGCHeap()->GetMaxGeneration())
1107    {
1108 #ifdef VERIFY_HEAP
1109         //for VSW 294550: we saw stale obeject reference in SyncBlkCache, so we want to make sure the card 
1110         //table logic above works correctly so that every ephemeral entry is promoted. 
1111         //For verification, we make a copy of the sync table in relocation phase and promote it use the 
1112         //slow approach and compare the result with the original one
1113         DWORD freeSyncTalbeIndexCopy = m_FreeSyncTableIndex;
1114         SyncTableEntry * syncTableShadow = NULL;
1115         if ((g_pConfig->GetHeapVerifyLevel()& EEConfig::HEAPVERIFY_SYNCBLK) && !((ScanContext*)lp1)->promotion)
1116         {
1117             syncTableShadow = new(nothrow) SyncTableEntry [m_FreeSyncTableIndex];
1118             if (syncTableShadow)
1119             {
1120                 memcpy (syncTableShadow, SyncTableEntry::GetSyncTableEntry(), m_FreeSyncTableIndex * sizeof (SyncTableEntry));                
1121             }
1122         }
1123 #endif //VERIFY_HEAP
1124
1125         //scan the bitmap
1126         size_t dw = 0;
1127         while (1)
1128         {
1129             while (dw < BitMapSize (m_SyncTableSize) && (m_EphemeralBitmap[dw]==0))
1130             {
1131                 dw++;
1132             }
1133             if (dw < BitMapSize (m_SyncTableSize))
1134             {
1135                 //found one
1136                 for (int i = 0; i < card_word_width; i++)
1137                 {
1138                     size_t card = i+dw*card_word_width;
1139                     if (CardSetP (card))
1140                     {
1141                         BOOL clear_card = TRUE;
1142                         for (int idx = 0; idx < card_size; idx++)
1143                         {
1144                             size_t nb = CardIndex (card) + idx;
1145                             if (( nb < m_FreeSyncTableIndex) && (nb > 0))
1146                             {
1147                                 Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1148                                 if (o && !((size_t)o & 1))
1149                                 {
1150                                     if (GCHeapUtilities::GetGCHeap()->IsEphemeral (o))
1151                                     {
1152                                         clear_card = FALSE;
1153
1154                                         GCWeakPtrScanElement ((int)nb, scanProc,
1155                                                               lp1, lp2, fSetSyncBlockCleanup);
1156                                     }
1157                                 }
1158                             }
1159                         }
1160                         if (clear_card)
1161                             ClearCard (card);
1162                     }
1163                 }
1164                 dw++;
1165             }
1166             else
1167                 break;
1168         }
1169         
1170 #ifdef VERIFY_HEAP
1171         //for VSW 294550: we saw stale obeject reference in SyncBlkCache, so we want to make sure the card 
1172         //table logic above works correctly so that every ephemeral entry is promoted. To verify, we make a 
1173         //copy of the sync table and promote it use the slow approach and compare the result with the real one
1174         if (g_pConfig->GetHeapVerifyLevel()& EEConfig::HEAPVERIFY_SYNCBLK)
1175         {
1176             if (syncTableShadow)
1177             {
1178                 for (DWORD nb = 1; nb < m_FreeSyncTableIndex; nb++)
1179                 {
1180                     Object **keyv = (Object **) &syncTableShadow[nb].m_Object;
1181
1182                     if (((size_t) *keyv & 1) == 0)
1183                     {
1184                         (*scanProc) (keyv, NULL, lp1, lp2);
1185                         SyncBlock   *pSB = syncTableShadow[nb].m_SyncBlock;
1186                         if (*keyv != 0 && (!pSB || !pSB->IsIDisposable()))
1187                         {
1188                             if (syncTableShadow[nb].m_Object != SyncTableEntry::GetSyncTableEntry()[nb].m_Object)
1189                                 DebugBreak ();
1190                         }
1191                     }
1192                 } 
1193                 delete []syncTableShadow;
1194                 syncTableShadow = NULL;
1195             }
1196             if (freeSyncTalbeIndexCopy != m_FreeSyncTableIndex)
1197                 DebugBreak ();
1198         }
1199 #endif //VERIFY_HEAP
1200
1201     }
1202     else
1203     {
1204         for (DWORD nb = 1; nb < m_FreeSyncTableIndex; nb++)
1205         {
1206             GCWeakPtrScanElement (nb, scanProc, lp1, lp2, fSetSyncBlockCleanup);
1207         }
1208
1209
1210     }
1211
1212     if (fSetSyncBlockCleanup)
1213     {
1214         // mark the finalizer thread saying requires cleanup
1215         FinalizerThread::GetFinalizerThread()->SetSyncBlockCleanup();
1216         FinalizerThread::EnableFinalization();
1217     }
1218
1219 #if defined(VERIFY_HEAP)
1220     if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)
1221     {
1222         if (((ScanContext*)lp1)->promotion)
1223         {
1224
1225             for (int nb = 1; nb < (int)m_FreeSyncTableIndex; nb++)
1226             {
1227                 Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1228                 if (((size_t)o & 1) == 0)
1229                 {
1230                     o->Validate();
1231                 }
1232             }
1233         }
1234     }
1235 #endif // VERIFY_HEAP
1236 }
1237
1238 /* Scan the weak pointers in the SyncBlockEntry and report them to the GC.  If the
1239    reference is dead, then return TRUE */
1240
1241 BOOL SyncBlockCache::GCWeakPtrScanElement (int nb, HANDLESCANPROC scanProc, LPARAM lp1, LPARAM lp2,
1242                                            BOOL& cleanup)
1243 {
1244     CONTRACTL
1245     {
1246         INSTANCE_CHECK;
1247         NOTHROW;
1248         GC_NOTRIGGER;
1249         MODE_ANY;
1250     }
1251     CONTRACTL_END;
1252
1253     Object **keyv = (Object **) &SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1254
1255 #ifdef DUMP_SB
1256     struct Param
1257     {
1258         Object **keyv;
1259         char *name;
1260     } param;
1261     param.keyv = keyv;
1262
1263     PAL_TRY(Param *, pParam, &param) {
1264         if (! *pParam->keyv)
1265             pParam->name = "null";
1266         else if ((size_t) *pParam->keyv & 1)
1267             pParam->name = "free";
1268         else {
1269             pParam->name = (*pParam->keyv)->GetClass()->GetDebugClassName();
1270             if (strlen(pParam->name) == 0)
1271                 pParam->name = "<INVALID>";
1272         }
1273     } PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
1274         param.name = "<INVALID>";
1275     }
1276     PAL_ENDTRY
1277     LogSpewAlways("[%4.4d]: %8.8x, %s\n", nb, *keyv, param.name);
1278 #endif
1279
1280     if (((size_t) *keyv & 1) == 0)
1281     {
1282 #ifdef VERIFY_HEAP
1283         if (g_pConfig->GetHeapVerifyLevel () & EEConfig::HEAPVERIFY_SYNCBLK)
1284         {
1285             STRESS_LOG3 (LF_GC | LF_SYNC, LL_INFO100000, "scanning syncblk[%d, %p, %p]\n", nb, (size_t)SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock, (size_t)*keyv);
1286         }
1287 #endif
1288
1289         (*scanProc) (keyv, NULL, lp1, lp2);
1290         SyncBlock   *pSB = SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock;
1291         if ((*keyv == 0 ) || (pSB && pSB->IsIDisposable()))
1292         {
1293 #ifdef VERIFY_HEAP
1294             if (g_pConfig->GetHeapVerifyLevel () & EEConfig::HEAPVERIFY_SYNCBLK)
1295             {
1296                 STRESS_LOG3 (LF_GC | LF_SYNC, LL_INFO100000, "freeing syncblk[%d, %p, %p]\n", nb, (size_t)pSB, (size_t)*keyv);
1297             }
1298 #endif
1299
1300             if (*keyv)
1301             {
1302                 _ASSERTE (pSB);
1303                 GCDeleteSyncBlock(pSB);
1304                 //clean the object syncblock header
1305                 ((Object*)(*keyv))->GetHeader()->GCResetIndex();
1306             }
1307             else if (pSB)
1308             {
1309
1310                 cleanup = TRUE;
1311                 // insert block into cleanup list
1312                 InsertCleanupSyncBlock (SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock);
1313 #ifdef DUMP_SB
1314                 LogSpewAlways("       Cleaning up block at %4.4d\n", nb);
1315 #endif
1316             }
1317
1318             // delete the entry
1319 #ifdef DUMP_SB
1320             LogSpewAlways("       Deleting block at %4.4d\n", nb);
1321 #endif
1322             SyncTableEntry::GetSyncTableEntry()[nb].m_Object = (Object *)(m_FreeSyncTableList | 1);
1323             m_FreeSyncTableList = nb << 1;
1324             SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock = NULL;
1325             return TRUE;
1326         }
1327         else
1328         {
1329 #ifdef DUMP_SB
1330             LogSpewAlways("       Keeping block at %4.4d with oref %8.8x\n", nb, *keyv);
1331 #endif
1332         }
1333     }
1334     return FALSE;
1335 }
1336
1337 void SyncBlockCache::GCDone(BOOL demoting, int max_gen)
1338 {
1339     CONTRACTL
1340     {
1341         INSTANCE_CHECK;
1342         NOTHROW;
1343         GC_NOTRIGGER;
1344         MODE_ANY;
1345     }
1346     CONTRACTL_END;
1347
1348     if (demoting && 
1349         (GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration() == 
1350          GCHeapUtilities::GetGCHeap()->GetMaxGeneration()))
1351     {
1352         //scan the bitmap
1353         size_t dw = 0;
1354         while (1)
1355         {
1356             while (dw < BitMapSize (m_SyncTableSize) && 
1357                    (m_EphemeralBitmap[dw]==(DWORD)~0))
1358             {
1359                 dw++;
1360             }
1361             if (dw < BitMapSize (m_SyncTableSize))
1362             {
1363                 //found one
1364                 for (int i = 0; i < card_word_width; i++)
1365                 {
1366                     size_t card = i+dw*card_word_width;
1367                     if (!CardSetP (card))
1368                     {
1369                         for (int idx = 0; idx < card_size; idx++)
1370                         {
1371                             size_t nb = CardIndex (card) + idx;
1372                             if (( nb < m_FreeSyncTableIndex) && (nb > 0))
1373                             {
1374                                 Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1375                                 if (o && !((size_t)o & 1))
1376                                 {
1377                                     if (GCHeapUtilities::GetGCHeap()->WhichGeneration (o) < (unsigned int)max_gen)
1378                                     {
1379                                         SetCard (card);
1380                                         break;
1381
1382                                     }
1383                                 }
1384                             }
1385                         }
1386                     }
1387                 }
1388                 dw++;
1389             }
1390             else
1391                 break;
1392         }
1393     }
1394 }
1395
1396
1397 #if defined (VERIFY_HEAP)
1398
1399 #ifndef _DEBUG
1400 #ifdef _ASSERTE
1401 #undef _ASSERTE
1402 #endif
1403 #define _ASSERTE(c) if (!(c)) DebugBreak()
1404 #endif
1405
1406 void SyncBlockCache::VerifySyncTableEntry()
1407 {
1408     CONTRACTL
1409     {
1410         INSTANCE_CHECK;
1411         NOTHROW;
1412         GC_NOTRIGGER;
1413         MODE_ANY;
1414     }
1415     CONTRACTL_END;
1416
1417     for (DWORD nb = 1; nb < m_FreeSyncTableIndex; nb++)
1418     {
1419         Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1420         // if the slot was just allocated, the object may still be null
1421         if (o && (((size_t)o & 1) == 0)) 
1422         {
1423             //there is no need to verify next object's header because this is called
1424             //from verify_heap, which will verify every object anyway
1425             o->Validate(TRUE, FALSE);
1426
1427             //
1428             // This loop is just a heuristic to try to catch errors, but it is not 100%.
1429             // To prevent false positives, we weaken our assert below to exclude the case
1430             // where the index is still NULL, but we've reached the end of our loop.
1431             //
1432             static const DWORD max_iterations = 100;
1433             DWORD loop = 0;
1434             
1435             for (; loop < max_iterations; loop++)
1436             {
1437                 // The syncblock index may be updating by another thread.
1438                 if (o->GetHeader()->GetHeaderSyncBlockIndex() != 0)
1439                 {
1440                     break;
1441                 }
1442                 __SwitchToThread(0, CALLER_LIMITS_SPINNING);
1443             }
1444             
1445             DWORD idx = o->GetHeader()->GetHeaderSyncBlockIndex();
1446             _ASSERTE(idx == nb || ((0 == idx) && (loop == max_iterations)));
1447             _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsEphemeral(o) || CardSetP(CardOf(nb)));
1448         }
1449     }
1450 }
1451
1452 #ifndef _DEBUG
1453 #undef _ASSERTE
1454 #define _ASSERTE(expr) ((void)0)
1455 #endif   // _DEBUG
1456
1457 #endif // VERIFY_HEAP
1458
1459 #ifdef _DEBUG
1460
1461 void DumpSyncBlockCache()
1462 {
1463     STATIC_CONTRACT_NOTHROW;
1464
1465     SyncBlockCache *pCache = SyncBlockCache::GetSyncBlockCache();
1466
1467     LogSpewAlways("Dumping SyncBlockCache size %d\n", pCache->m_FreeSyncTableIndex);
1468
1469     static int dumpSBStyle = -1;
1470     if (dumpSBStyle == -1)
1471         dumpSBStyle = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SBDumpStyle);
1472     if (dumpSBStyle == 0)
1473         return;
1474
1475     BOOL isString = FALSE;
1476     DWORD objectCount = 0;
1477     DWORD slotCount = 0;
1478
1479     for (DWORD nb = 1; nb < pCache->m_FreeSyncTableIndex; nb++)
1480     {
1481         isString = FALSE;
1482         char buffer[1024], buffer2[1024];
1483         LPCUTF8 descrip = "null";
1484         SyncTableEntry *pEntry = &SyncTableEntry::GetSyncTableEntry()[nb];
1485         Object *oref = (Object *) pEntry->m_Object;
1486         if (((size_t) oref & 1) != 0)
1487         {
1488             descrip = "free";
1489             oref = 0;
1490         }
1491         else
1492         {
1493             ++slotCount;
1494             if (oref)
1495             {
1496                 ++objectCount;
1497
1498                 struct Param
1499                 {
1500                     LPCUTF8 descrip;
1501                     Object *oref;
1502                     char *buffer2;
1503                     UINT cch2;
1504                     BOOL isString;
1505                 } param;
1506                 param.descrip = descrip;
1507                 param.oref = oref;
1508                 param.buffer2 = buffer2;
1509                 param.cch2 = COUNTOF(buffer2);
1510                 param.isString = isString;
1511
1512                 PAL_TRY(Param *, pParam, &param)
1513                 {
1514                     pParam->descrip = pParam->oref->GetMethodTable()->GetDebugClassName();
1515                     if (strlen(pParam->descrip) == 0)
1516                         pParam->descrip = "<INVALID>";
1517                     else if (pParam->oref->GetMethodTable() == g_pStringClass)
1518                     {
1519                         sprintf_s(pParam->buffer2, pParam->cch2, "%s (%S)", pParam->descrip, ObjectToSTRINGREF((StringObject*)pParam->oref)->GetBuffer());
1520                         pParam->descrip = pParam->buffer2;
1521                         pParam->isString = TRUE;
1522                     }
1523                 }
1524                 PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
1525                     param.descrip = "<INVALID>";
1526                 }
1527                 PAL_ENDTRY
1528
1529                 descrip = param.descrip;
1530                 isString = param.isString;
1531             }
1532             sprintf_s(buffer, COUNTOF(buffer), "%s", descrip);
1533             descrip = buffer;
1534         }
1535         if (dumpSBStyle < 2)
1536             LogSpewAlways("[%4.4d]: %8.8x %s\n", nb, oref, descrip);
1537         else if (dumpSBStyle == 2 && ! isString)
1538             LogSpewAlways("[%4.4d]: %s\n", nb, descrip);
1539     }
1540     LogSpewAlways("Done dumping SyncBlockCache used slots: %d, objects: %d\n", slotCount, objectCount);
1541 }
1542 #endif
1543
1544 // ***************************************************************************
1545 //
1546 //              ObjHeader class implementation
1547 //
1548 // ***************************************************************************
1549
1550 #if defined(ENABLE_CONTRACTS_IMPL)
1551 // The LOCK_TAKEN/RELEASED macros need a "pointer" to the lock object to do
1552 // comparisons between takes & releases (and to provide debugging info to the
1553 // developer).  Ask the syncblock for its lock contract pointer, if the
1554 // syncblock exists.  Otherwise, use the MethodTable* from the Object.  That's not great,
1555 // as it's not unique, so we might miss unbalanced lock takes/releases from
1556 // different objects of the same type.  However, our hands are tied, and we can't
1557 // do much better.
1558 void * ObjHeader::GetPtrForLockContract()
1559 {
1560     if (GetHeaderSyncBlockIndex() == 0)
1561     {
1562         return (void *) GetBaseObject()->GetMethodTable();
1563     }
1564
1565     return PassiveGetSyncBlock()->GetPtrForLockContract();
1566 }
1567 #endif // defined(ENABLE_CONTRACTS_IMPL)
1568
1569 // this enters the monitor of an object
1570 void ObjHeader::EnterObjMonitor()
1571 {
1572     WRAPPER_NO_CONTRACT;
1573     GetSyncBlock()->EnterMonitor();
1574 }
1575
1576 // Non-blocking version of above
1577 BOOL ObjHeader::TryEnterObjMonitor(INT32 timeOut)
1578 {
1579     WRAPPER_NO_CONTRACT;
1580     return GetSyncBlock()->TryEnterMonitor(timeOut);
1581 }
1582
1583 AwareLock::EnterHelperResult ObjHeader::EnterObjMonitorHelperSpin(Thread* pCurThread)
1584 {
1585     CONTRACTL{
1586         NOTHROW;
1587         GC_NOTRIGGER;
1588         MODE_COOPERATIVE;
1589     } CONTRACTL_END;
1590
1591     // Note: EnterObjMonitorHelper must be called before this function (see below)
1592
1593     if (g_SystemInfo.dwNumberOfProcessors == 1)
1594     {
1595         return AwareLock::EnterHelperResult_Contention;
1596     }
1597
1598     YieldProcessorNormalizationInfo normalizationInfo;
1599     const DWORD spinCount = g_SpinConstants.dwMonitorSpinCount;
1600     for (DWORD spinIteration = 0; spinIteration < spinCount; ++spinIteration)
1601     {
1602         AwareLock::SpinWait(normalizationInfo, spinIteration);
1603
1604         LONG oldValue = m_SyncBlockValue.LoadWithoutBarrier();
1605
1606         // Since spinning has begun, chances are good that the monitor has already switched to AwareLock mode, so check for that
1607         // case first
1608         if (oldValue & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)
1609         {
1610             // If we have a hash code already, we need to create a sync block
1611             if (oldValue & BIT_SBLK_IS_HASHCODE)
1612             {
1613                 return AwareLock::EnterHelperResult_UseSlowPath;
1614             }
1615
1616             SyncBlock *syncBlock = g_pSyncTable[oldValue & MASK_SYNCBLOCKINDEX].m_SyncBlock;
1617             _ASSERTE(syncBlock != NULL);
1618             AwareLock *awareLock = &syncBlock->m_Monitor;
1619
1620             AwareLock::EnterHelperResult result = awareLock->TryEnterBeforeSpinLoopHelper(pCurThread);
1621             if (result != AwareLock::EnterHelperResult_Contention)
1622             {
1623                 return result;
1624             }
1625
1626             ++spinIteration;
1627             if (spinIteration < spinCount)
1628             {
1629                 while (true)
1630                 {
1631                     AwareLock::SpinWait(normalizationInfo, spinIteration);
1632
1633                     ++spinIteration;
1634                     if (spinIteration >= spinCount)
1635                     {
1636                         // The last lock attempt for this spin will be done after the loop
1637                         break;
1638                     }
1639
1640                     result = awareLock->TryEnterInsideSpinLoopHelper(pCurThread);
1641                     if (result == AwareLock::EnterHelperResult_Entered)
1642                     {
1643                         return AwareLock::EnterHelperResult_Entered;
1644                     }
1645                     if (result == AwareLock::EnterHelperResult_UseSlowPath)
1646                     {
1647                         break;
1648                     }
1649                 }
1650             }
1651
1652             if (awareLock->TryEnterAfterSpinLoopHelper(pCurThread))
1653             {
1654                 return AwareLock::EnterHelperResult_Entered;
1655             }
1656             break;
1657         }
1658
1659         DWORD tid = pCurThread->GetThreadId();
1660         if ((oldValue & (BIT_SBLK_SPIN_LOCK +
1661             SBLK_MASK_LOCK_THREADID +
1662             SBLK_MASK_LOCK_RECLEVEL)) == 0)
1663         {
1664             if (tid > SBLK_MASK_LOCK_THREADID)
1665             {
1666                 return AwareLock::EnterHelperResult_UseSlowPath;
1667             }
1668
1669             LONG newValue = oldValue | tid;
1670             if (InterlockedCompareExchangeAcquire((LONG*)&m_SyncBlockValue, newValue, oldValue) == oldValue)
1671             {
1672                 pCurThread->IncLockCount();
1673                 return AwareLock::EnterHelperResult_Entered;
1674             }
1675
1676             continue;
1677         }
1678
1679         // EnterObjMonitorHelper handles the thin lock recursion case. If it's not that case, it won't become that case. If
1680         // EnterObjMonitorHelper failed to increment the recursion level, it will go down the slow path and won't come here. So,
1681         // no need to check the recursion case here.
1682         _ASSERTE(
1683             // The header is transitioning - treat this as if the lock was taken
1684             oldValue & BIT_SBLK_SPIN_LOCK ||
1685             // Here we know we have the "thin lock" layout, but the lock is not free.
1686             // It can't be the recursion case though, because the call to EnterObjMonitorHelper prior to this would have taken
1687             // the slow path in the recursive case.
1688             tid != (DWORD)(oldValue & SBLK_MASK_LOCK_THREADID));
1689     }
1690
1691     return AwareLock::EnterHelperResult_Contention;
1692 }
1693
1694 BOOL ObjHeader::LeaveObjMonitor()
1695 {
1696     CONTRACTL
1697     {
1698         NOTHROW;
1699         GC_TRIGGERS;
1700         MODE_COOPERATIVE;
1701     }
1702     CONTRACTL_END;
1703
1704     //this function switch to preemp mode so we need to protect the object in some path
1705     OBJECTREF thisObj = ObjectToOBJECTREF (GetBaseObject ());
1706
1707     DWORD dwSwitchCount = 0;
1708
1709     for (;;)
1710     {
1711         AwareLock::LeaveHelperAction action = thisObj->GetHeader ()->LeaveObjMonitorHelper(GetThread());
1712
1713         switch(action)
1714         {
1715         case AwareLock::LeaveHelperAction_None:
1716             // We are done
1717             return TRUE;
1718         case AwareLock::LeaveHelperAction_Signal:
1719             {
1720                 // Signal the event
1721                 SyncBlock *psb = thisObj->GetHeader ()->PassiveGetSyncBlock();
1722                 if (psb != NULL)
1723                     psb->QuickGetMonitor()->Signal();
1724             }
1725             return TRUE;
1726         case AwareLock::LeaveHelperAction_Yield:
1727             YieldProcessorNormalized();
1728             continue;
1729         case AwareLock::LeaveHelperAction_Contention:
1730             // Some thread is updating the syncblock value.
1731             {
1732                 //protect the object before switching mode
1733                 GCPROTECT_BEGIN (thisObj);
1734                 GCX_PREEMP();
1735                 __SwitchToThread(0, ++dwSwitchCount);
1736                 GCPROTECT_END ();
1737             }
1738             continue;
1739         default:
1740             // Must be an error otherwise - ignore it
1741             _ASSERTE(action == AwareLock::LeaveHelperAction_Error);
1742             return FALSE;
1743         }
1744     }
1745 }
1746
1747 // The only difference between LeaveObjMonitor and LeaveObjMonitorAtException is switch 
1748 // to preemptive mode around __SwitchToThread
1749 BOOL ObjHeader::LeaveObjMonitorAtException()
1750 {
1751     CONTRACTL
1752     {
1753         NOTHROW;
1754         GC_NOTRIGGER;
1755         MODE_COOPERATIVE;
1756     }
1757     CONTRACTL_END;
1758
1759     DWORD dwSwitchCount = 0;
1760
1761     for (;;)
1762     {
1763         AwareLock::LeaveHelperAction action = LeaveObjMonitorHelper(GetThread());
1764
1765         switch(action)
1766         {
1767         case AwareLock::LeaveHelperAction_None:
1768             // We are done
1769             return TRUE;
1770         case AwareLock::LeaveHelperAction_Signal:
1771             {
1772                 // Signal the event
1773                 SyncBlock *psb = PassiveGetSyncBlock();
1774                 if (psb != NULL)
1775                     psb->QuickGetMonitor()->Signal();
1776             }
1777             return TRUE;
1778         case AwareLock::LeaveHelperAction_Yield:
1779             YieldProcessorNormalized();
1780             continue;
1781         case AwareLock::LeaveHelperAction_Contention:
1782             // Some thread is updating the syncblock value.
1783             //
1784             // We never toggle GC mode while holding the spinlock (BeginNoTriggerGC/EndNoTriggerGC 
1785             // in EnterSpinLock/ReleaseSpinLock ensures it). Thus we do not need to switch to preemptive
1786             // while waiting on the spinlock.
1787             //
1788             {
1789                 __SwitchToThread(0, ++dwSwitchCount);
1790             }
1791             continue;
1792         default:
1793             // Must be an error otherwise - ignore it
1794             _ASSERTE(action == AwareLock::LeaveHelperAction_Error);
1795             return FALSE;
1796         }
1797     }
1798 }
1799
1800 #endif //!DACCESS_COMPILE
1801
1802 // Returns TRUE if the lock is owned and FALSE otherwise
1803 // threadId is set to the ID (Thread::GetThreadId()) of the thread which owns the lock
1804 // acquisitionCount is set to the number of times the lock needs to be released before
1805 // it is unowned
1806 BOOL ObjHeader::GetThreadOwningMonitorLock(DWORD *pThreadId, DWORD *pAcquisitionCount)
1807 {
1808     CONTRACTL
1809     {
1810         NOTHROW;
1811         GC_NOTRIGGER;
1812 #ifndef DACCESS_COMPILE
1813         if (!IsGCSpecialThread ()) {MODE_COOPERATIVE;} else {MODE_ANY;}
1814 #endif
1815     }
1816     CONTRACTL_END;
1817     SUPPORTS_DAC;
1818
1819
1820     DWORD bits = GetBits();
1821
1822     if (bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)
1823     {
1824         if (bits & BIT_SBLK_IS_HASHCODE)
1825         {
1826             //
1827             // This thread does not own the lock.
1828             //
1829             *pThreadId = 0;
1830             *pAcquisitionCount = 0;
1831             return FALSE;
1832         }
1833         else
1834         {
1835             //
1836             // We have a syncblk
1837             //
1838             DWORD index = bits & MASK_SYNCBLOCKINDEX;
1839             SyncBlock* psb = g_pSyncTable[(int)index].m_SyncBlock;
1840
1841             _ASSERTE(psb->GetMonitor() != NULL);
1842             Thread* pThread = psb->GetMonitor()->GetHoldingThread();
1843             if(pThread == NULL)
1844             {
1845                 *pThreadId = 0;
1846                 *pAcquisitionCount = 0;
1847                 return FALSE;
1848             }
1849             else
1850             {
1851                 *pThreadId = pThread->GetThreadId();
1852                 *pAcquisitionCount = psb->GetMonitor()->GetRecursionLevel();
1853                 return TRUE;
1854             }
1855         }
1856     }
1857     else
1858     {
1859         //
1860         // We have a thinlock
1861         //
1862
1863         DWORD lockThreadId, recursionLevel;
1864         lockThreadId = bits & SBLK_MASK_LOCK_THREADID;
1865         recursionLevel = (bits & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT;
1866         //if thread ID is 0, recursionLevel got to be zero
1867         //but thread ID doesn't have to be valid because the lock could be orphanend
1868         _ASSERTE (lockThreadId != 0 || recursionLevel == 0 );
1869
1870         *pThreadId = lockThreadId;
1871         if(lockThreadId != 0)
1872         {
1873             // in the header, the recursionLevel of 0 means the lock is owned once
1874             // (this differs from m_Recursion in the AwareLock)
1875             *pAcquisitionCount = recursionLevel + 1;
1876             return TRUE;
1877         }
1878         else
1879         {
1880             *pAcquisitionCount = 0;
1881             return FALSE;
1882         }
1883     }
1884 }
1885
1886 #ifndef DACCESS_COMPILE
1887
1888 #ifdef MP_LOCKS
1889 DEBUG_NOINLINE void ObjHeader::EnterSpinLock()
1890 {
1891     // NOTE: This function cannot have a dynamic contract.  If it does, the contract's
1892     // destructor will reset the CLR debug state to what it was before entering the
1893     // function, which will undo the BeginNoTriggerGC() call below.
1894     SCAN_SCOPE_BEGIN;
1895     STATIC_CONTRACT_GC_NOTRIGGER;
1896
1897 #ifdef _DEBUG
1898     int i = 0;
1899 #endif
1900
1901     DWORD dwSwitchCount = 0;
1902
1903     while (TRUE)
1904     {
1905 #ifdef _DEBUG
1906 #ifdef _WIN64
1907         // Give 64bit more time because there isn't a remoting fast path now, and we've hit this assert
1908         // needlessly in CLRSTRESS. 
1909         if (i++ > 30000)
1910 #else            
1911         if (i++ > 10000)
1912 #endif // _WIN64            
1913             _ASSERTE(!"ObjHeader::EnterLock timed out");
1914 #endif
1915         // get the value so that it doesn't get changed under us.
1916         LONG curValue = m_SyncBlockValue.LoadWithoutBarrier();
1917
1918         // check if lock taken
1919         if (! (curValue & BIT_SBLK_SPIN_LOCK))
1920         {
1921             // try to take the lock
1922             LONG newValue = curValue | BIT_SBLK_SPIN_LOCK;
1923             LONG result = FastInterlockCompareExchange((LONG*)&m_SyncBlockValue, newValue, curValue);
1924             if (result == curValue)
1925                 break;
1926         }
1927         if  (g_SystemInfo.dwNumberOfProcessors > 1)
1928         {
1929             for (int spinCount = 0; spinCount < BIT_SBLK_SPIN_COUNT; spinCount++)
1930             {
1931                 if  (! (m_SyncBlockValue & BIT_SBLK_SPIN_LOCK))
1932                     break;
1933                 YieldProcessorNormalized(); // indicate to the processor that we are spinning
1934             }
1935             if  (m_SyncBlockValue & BIT_SBLK_SPIN_LOCK)
1936                 __SwitchToThread(0, ++dwSwitchCount);
1937         }
1938         else
1939             __SwitchToThread(0, ++dwSwitchCount);
1940     }
1941
1942     INCONTRACT(Thread* pThread = GetThread());
1943     INCONTRACT(if (pThread != NULL) pThread->BeginNoTriggerGC(__FILE__, __LINE__));
1944 }
1945 #else
1946 DEBUG_NOINLINE void ObjHeader::EnterSpinLock()
1947 {
1948     SCAN_SCOPE_BEGIN;
1949     STATIC_CONTRACT_GC_NOTRIGGER;
1950
1951 #ifdef _DEBUG
1952     int i = 0;
1953 #endif
1954
1955     DWORD dwSwitchCount = 0;
1956
1957     while (TRUE)
1958     {
1959 #ifdef _DEBUG
1960         if (i++ > 10000)
1961             _ASSERTE(!"ObjHeader::EnterLock timed out");
1962 #endif
1963         // get the value so that it doesn't get changed under us.
1964         LONG curValue = m_SyncBlockValue.LoadWithoutBarrier();
1965
1966         // check if lock taken
1967         if (! (curValue & BIT_SBLK_SPIN_LOCK))
1968         {
1969             // try to take the lock
1970             LONG newValue = curValue | BIT_SBLK_SPIN_LOCK;
1971             LONG result = FastInterlockCompareExchange((LONG*)&m_SyncBlockValue, newValue, curValue);
1972             if (result == curValue)
1973                 break;
1974         }
1975         __SwitchToThread(0, ++dwSwitchCount);
1976     }
1977
1978     INCONTRACT(Thread* pThread = GetThread());
1979     INCONTRACT(if (pThread != NULL) pThread->BeginNoTriggerGC(__FILE__, __LINE__));
1980 }
1981 #endif //MP_LOCKS
1982
1983 DEBUG_NOINLINE void ObjHeader::ReleaseSpinLock()
1984 {
1985     SCAN_SCOPE_END;
1986     LIMITED_METHOD_CONTRACT;
1987
1988     INCONTRACT(Thread* pThread = GetThread());
1989     INCONTRACT(if (pThread != NULL) pThread->EndNoTriggerGC());
1990
1991     FastInterlockAnd(&m_SyncBlockValue, ~BIT_SBLK_SPIN_LOCK);
1992 }
1993
1994 #endif //!DACCESS_COMPILE
1995
1996 #ifndef DACCESS_COMPILE
1997
1998 DWORD ObjHeader::GetSyncBlockIndex()
1999 {
2000     CONTRACTL
2001     {
2002         INSTANCE_CHECK;
2003         THROWS;
2004         GC_NOTRIGGER;
2005         MODE_ANY;
2006         INJECT_FAULT(COMPlusThrowOM(););
2007     }
2008     CONTRACTL_END;
2009
2010     DWORD   indx;
2011
2012     if ((indx = GetHeaderSyncBlockIndex()) == 0)
2013     {
2014         BOOL fMustCreateSyncBlock = FALSE;
2015         {
2016             //Need to get it from the cache
2017             SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
2018
2019             //Try one more time
2020             if (GetHeaderSyncBlockIndex() == 0)
2021             {
2022                 ENTER_SPIN_LOCK(this);
2023                 // Now the header will be stable - check whether hashcode, appdomain index or lock information is stored in it.
2024                 DWORD bits = GetBits();
2025                 if (((bits & (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE)) == (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE)) ||
2026                     ((bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) == 0))
2027                 {
2028                     // Need a sync block to store this info
2029                     fMustCreateSyncBlock = TRUE;
2030                 }
2031                 else
2032                 {
2033                     SetIndex(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | SyncBlockCache::GetSyncBlockCache()->NewSyncBlockSlot(GetBaseObject()));
2034                 }
2035                 LEAVE_SPIN_LOCK(this);
2036             }
2037             // SyncBlockCache::LockHolder goes out of scope here
2038         }
2039
2040         if (fMustCreateSyncBlock)
2041             GetSyncBlock();
2042
2043         if ((indx = GetHeaderSyncBlockIndex()) == 0)
2044             COMPlusThrowOM();
2045     }
2046
2047     return indx;
2048 }
2049
2050 #if defined (VERIFY_HEAP)
2051
2052 BOOL ObjHeader::Validate (BOOL bVerifySyncBlkIndex)
2053 {
2054     STATIC_CONTRACT_THROWS;
2055     STATIC_CONTRACT_GC_NOTRIGGER;
2056     STATIC_CONTRACT_MODE_COOPERATIVE;
2057
2058     DWORD bits = GetBits ();
2059     Object * obj = GetBaseObject ();
2060     BOOL bVerifyMore = g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_SYNCBLK;
2061     //the highest 2 bits have reloaded meaning
2062     //for string objects:
2063     //         BIT_SBLK_STRING_HAS_NO_HIGH_CHARS   0x80000000
2064     //         BIT_SBLK_STRING_HIGH_CHARS_KNOWN    0x40000000
2065     //         BIT_SBLK_STRING_HAS_SPECIAL_SORT    0xC0000000
2066     //for other objects:
2067     //         BIT_SBLK_FINALIZER_RUN              0x40000000
2068     if (bits & BIT_SBLK_STRING_HIGH_CHAR_MASK)
2069     {
2070         if (obj->GetGCSafeMethodTable () == g_pStringClass)
2071         {
2072             if (bVerifyMore)
2073             {
2074                 ASSERT_AND_CHECK (((StringObject *)obj)->ValidateHighChars());
2075             }
2076         }
2077         else
2078         {
2079             if (bits & BIT_SBLK_FINALIZER_RUN)
2080             {
2081                 ASSERT_AND_CHECK (obj->GetGCSafeMethodTable ()->HasFinalizer ());
2082             }
2083         }
2084     }
2085
2086     //BIT_SBLK_GC_RESERVE (0x20000000) is only set during GC. But for frozen object, we don't clean the bit
2087     if (bits & BIT_SBLK_GC_RESERVE)
2088     {
2089         if (!GCHeapUtilities::IsGCInProgress () && !GCHeapUtilities::GetGCHeap()->IsConcurrentGCInProgress ())
2090         {
2091 #ifdef FEATURE_BASICFREEZE
2092             ASSERT_AND_CHECK (GCHeapUtilities::GetGCHeap()->IsInFrozenSegment(obj));
2093 #else //FEATURE_BASICFREEZE
2094             _ASSERTE(!"Reserve bit not cleared");
2095             return FALSE;
2096 #endif //FEATURE_BASICFREEZE
2097         }
2098     }
2099
2100     //Don't know how to verify BIT_SBLK_SPIN_LOCK (0x10000000)
2101     
2102     //BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX (0x08000000)
2103     if (bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)
2104     {
2105         //if BIT_SBLK_IS_HASHCODE (0x04000000) is not set, 
2106         //rest of the DWORD is SyncBlk Index
2107         if (!(bits & BIT_SBLK_IS_HASHCODE))
2108         {
2109             if (bVerifySyncBlkIndex  && GCHeapUtilities::GetGCHeap()->RuntimeStructuresValid ())
2110             {
2111                 DWORD sbIndex = bits & MASK_SYNCBLOCKINDEX;
2112                 ASSERT_AND_CHECK(SyncTableEntry::GetSyncTableEntry()[sbIndex].m_Object == obj);             
2113             }
2114         }
2115         else
2116         {
2117             //  rest of the DWORD is a hash code and we don't have much to validate it
2118         }
2119     }
2120     else
2121     {
2122         //if BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX is clear, rest of DWORD is thin lock thread ID, 
2123         //thin lock recursion level and appdomain index
2124         DWORD lockThreadId = bits & SBLK_MASK_LOCK_THREADID;
2125         DWORD recursionLevel = (bits & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT;
2126         //if thread ID is 0, recursionLeve got to be zero
2127         //but thread ID doesn't have to be valid because the lock could be orphanend
2128         ASSERT_AND_CHECK (lockThreadId != 0 || recursionLevel == 0 );     
2129     }
2130     
2131     return TRUE;
2132 }
2133
2134 #endif //VERIFY_HEAP
2135
2136 // This holder takes care of the SyncBlock memory cleanup if an OOM occurs inside a call to NewSyncBlockSlot.
2137 //
2138 // Warning: Assumes you already own the cache lock.
2139 //          Assumes nothing allocated inside the SyncBlock (only releases the memory, does not destruct.)
2140 //
2141 // This holder really just meets GetSyncBlock()'s special needs. It's not a general purpose holder.
2142
2143
2144 // Do not inline this call. (fyuan)
2145 // SyncBlockMemoryHolder is normally a check for empty pointer and return. Inlining VoidDeleteSyncBlockMemory adds expensive exception handling.
2146 void VoidDeleteSyncBlockMemory(SyncBlock* psb)
2147 {
2148     LIMITED_METHOD_CONTRACT;
2149     SyncBlockCache::GetSyncBlockCache()->DeleteSyncBlockMemory(psb);
2150 }
2151
2152 typedef Wrapper<SyncBlock*, DoNothing<SyncBlock*>, VoidDeleteSyncBlockMemory, NULL> SyncBlockMemoryHolder;
2153
2154
2155 // get the sync block for an existing object
2156 SyncBlock *ObjHeader::GetSyncBlock()
2157 {
2158     CONTRACT(SyncBlock *)
2159     {
2160         INSTANCE_CHECK;
2161         THROWS;
2162         GC_NOTRIGGER;
2163         MODE_ANY;
2164         INJECT_FAULT(COMPlusThrowOM(););
2165         POSTCONDITION(CheckPointer(RETVAL));
2166     }
2167     CONTRACT_END;
2168
2169     PTR_SyncBlock syncBlock = GetBaseObject()->PassiveGetSyncBlock();
2170     DWORD      indx = 0;
2171     BOOL indexHeld = FALSE;
2172
2173     if (syncBlock)
2174     {
2175 #ifdef _DEBUG 
2176         // Has our backpointer been correctly updated through every GC?
2177         PTR_SyncTableEntry pEntries(SyncTableEntry::GetSyncTableEntry());
2178         _ASSERTE(pEntries[GetHeaderSyncBlockIndex()].m_Object == GetBaseObject());
2179 #endif // _DEBUG
2180         RETURN syncBlock;
2181     }
2182
2183     //Need to get it from the cache
2184     {
2185         SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
2186
2187         //Try one more time
2188         syncBlock = GetBaseObject()->PassiveGetSyncBlock();
2189         if (syncBlock)
2190             RETURN syncBlock;
2191
2192
2193         SyncBlockMemoryHolder syncBlockMemoryHolder(SyncBlockCache::GetSyncBlockCache()->GetNextFreeSyncBlock());
2194         syncBlock = syncBlockMemoryHolder;
2195
2196         if ((indx = GetHeaderSyncBlockIndex()) == 0)
2197         {
2198             indx = SyncBlockCache::GetSyncBlockCache()->NewSyncBlockSlot(GetBaseObject());
2199         }
2200         else
2201         {
2202             //We already have an index, we need to hold the syncblock
2203             indexHeld = TRUE;
2204         }
2205
2206         {
2207             //! NewSyncBlockSlot has side-effects that we don't have backout for - thus, that must be the last
2208             //! failable operation called.
2209             CANNOTTHROWCOMPLUSEXCEPTION();
2210             FAULT_FORBID();
2211
2212
2213             syncBlockMemoryHolder.SuppressRelease();
2214
2215             new (syncBlock) SyncBlock(indx);
2216
2217             {
2218                 // after this point, nobody can update the index in the header
2219                 ENTER_SPIN_LOCK(this);
2220
2221                 {
2222                     // If the thin lock in the header is in use, transfer the information to the syncblock
2223                     DWORD bits = GetBits();
2224                     if ((bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) == 0)
2225                     {
2226                         DWORD lockThreadId = bits & SBLK_MASK_LOCK_THREADID;
2227                         DWORD recursionLevel = (bits & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT;
2228                         if (lockThreadId != 0 || recursionLevel != 0)
2229                         {
2230                             // recursionLevel can't be non-zero if thread id is 0
2231                             _ASSERTE(lockThreadId != 0);
2232
2233                             Thread *pThread = g_pThinLockThreadIdDispenser->IdToThreadWithValidation(lockThreadId);
2234
2235                             if (pThread == NULL)
2236                             {
2237                                 // The lock is orphaned.
2238                                 pThread = (Thread*) -1;
2239                             }
2240                             syncBlock->InitState(recursionLevel + 1, pThread);
2241                         }
2242                     }
2243                     else if ((bits & BIT_SBLK_IS_HASHCODE) != 0)
2244                     {
2245                         DWORD hashCode = bits & MASK_HASHCODE;
2246
2247                         syncBlock->SetHashCode(hashCode);
2248                     }
2249                 }
2250
2251                 SyncTableEntry::GetSyncTableEntry() [indx].m_SyncBlock = syncBlock;
2252
2253                 // in order to avoid a race where some thread tries to get the AD index and we've already zapped it,
2254                 // make sure the syncblock etc is all setup with the AD index prior to replacing the index
2255                 // in the header
2256                 if (GetHeaderSyncBlockIndex() == 0)
2257                 {
2258                     // We have transferred the AppDomain into the syncblock above.
2259                     SetIndex(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | indx);
2260                 }
2261
2262                 //If we had already an index, hold the syncblock
2263                 //for the lifetime of the object.
2264                 if (indexHeld)
2265                     syncBlock->SetPrecious();
2266
2267                 LEAVE_SPIN_LOCK(this);
2268             }
2269             // SyncBlockCache::LockHolder goes out of scope here
2270         }
2271     }
2272
2273     RETURN syncBlock;
2274 }
2275
2276 BOOL ObjHeader::Wait(INT32 timeOut, BOOL exitContext)
2277 {
2278     CONTRACTL
2279     {
2280         INSTANCE_CHECK;
2281         THROWS;
2282         GC_TRIGGERS;
2283         MODE_ANY;
2284         INJECT_FAULT(COMPlusThrowOM(););
2285     }
2286     CONTRACTL_END;
2287
2288     //  The following code may cause GC, so we must fetch the sync block from
2289     //  the object now in case it moves.
2290     SyncBlock *pSB = GetBaseObject()->GetSyncBlock();
2291
2292     // GetSyncBlock throws on failure
2293     _ASSERTE(pSB != NULL);
2294
2295     // make sure we own the crst
2296     if (!pSB->DoesCurrentThreadOwnMonitor())
2297         COMPlusThrow(kSynchronizationLockException);
2298
2299 #ifdef _DEBUG
2300     Thread *pThread = GetThread();
2301     DWORD curLockCount = pThread->m_dwLockCount;
2302 #endif
2303
2304     BOOL result = pSB->Wait(timeOut,exitContext);
2305
2306     _ASSERTE (curLockCount == pThread->m_dwLockCount);
2307
2308     return result;
2309 }
2310
2311 void ObjHeader::Pulse()
2312 {
2313     CONTRACTL
2314     {
2315         INSTANCE_CHECK;
2316         THROWS;
2317         GC_TRIGGERS;
2318         MODE_ANY;
2319         INJECT_FAULT(COMPlusThrowOM(););
2320     }
2321     CONTRACTL_END;
2322
2323     //  The following code may cause GC, so we must fetch the sync block from
2324     //  the object now in case it moves.
2325     SyncBlock *pSB = GetBaseObject()->GetSyncBlock();
2326
2327     // GetSyncBlock throws on failure
2328     _ASSERTE(pSB != NULL);
2329
2330     // make sure we own the crst
2331     if (!pSB->DoesCurrentThreadOwnMonitor())
2332         COMPlusThrow(kSynchronizationLockException);
2333
2334     pSB->Pulse();
2335 }
2336
2337 void ObjHeader::PulseAll()
2338 {
2339     CONTRACTL
2340     {
2341         INSTANCE_CHECK;
2342         THROWS;
2343         GC_TRIGGERS;
2344         MODE_ANY;
2345         INJECT_FAULT(COMPlusThrowOM(););
2346     }
2347     CONTRACTL_END;
2348
2349     //  The following code may cause GC, so we must fetch the sync block from
2350     //  the object now in case it moves.
2351     SyncBlock *pSB = GetBaseObject()->GetSyncBlock();
2352
2353     // GetSyncBlock throws on failure
2354     _ASSERTE(pSB != NULL);
2355
2356     // make sure we own the crst
2357     if (!pSB->DoesCurrentThreadOwnMonitor())
2358         COMPlusThrow(kSynchronizationLockException);
2359
2360     pSB->PulseAll();
2361 }
2362
2363
2364 // ***************************************************************************
2365 //
2366 //              AwareLock class implementation (GC-aware locking)
2367 //
2368 // ***************************************************************************
2369
2370 void AwareLock::AllocLockSemEvent()
2371 {
2372     CONTRACTL
2373     {
2374         INSTANCE_CHECK;
2375         THROWS;
2376         GC_TRIGGERS;
2377         MODE_ANY;
2378         INJECT_FAULT(COMPlusThrowOM(););
2379     }
2380     CONTRACTL_END;
2381
2382     // Before we switch from cooperative, ensure that this syncblock won't disappear
2383     // under us.  For something as expensive as an event, do it permanently rather
2384     // than transiently.
2385     SetPrecious();
2386
2387     GCX_PREEMP();
2388
2389     // No need to take a lock - CLREvent::CreateMonitorEvent is thread safe
2390     m_SemEvent.CreateMonitorEvent((SIZE_T)this);
2391 }
2392
2393 void AwareLock::Enter()
2394 {
2395     CONTRACTL
2396     {
2397         INSTANCE_CHECK;
2398         THROWS;
2399         GC_TRIGGERS;
2400         MODE_ANY;
2401         INJECT_FAULT(COMPlusThrowOM(););
2402     }
2403     CONTRACTL_END;
2404
2405     Thread *pCurThread = GetThread();
2406     LockState state = m_lockState.VolatileLoadWithoutBarrier();
2407     if (!state.IsLocked() || m_HoldingThread != pCurThread)
2408     {
2409         if (m_lockState.InterlockedTryLock_Or_RegisterWaiter(this, state))
2410         {
2411             // We get here if we successfully acquired the mutex.
2412             m_HoldingThread = pCurThread;
2413             m_Recursion = 1;
2414             pCurThread->IncLockCount();
2415
2416 #if defined(_DEBUG) && defined(TRACK_SYNC)
2417             // The best place to grab this is from the ECall frame
2418             Frame   *pFrame = pCurThread->GetFrame();
2419             int      caller = (pFrame && pFrame != FRAME_TOP
2420                                 ? (int)pFrame->GetReturnAddress()
2421                                 : -1);
2422             pCurThread->m_pTrackSync->EnterSync(caller, this);
2423 #endif
2424             return;
2425         }
2426
2427         // Lock was not acquired and the waiter was registered
2428
2429         // Didn't manage to get the mutex, must wait.
2430         // The precondition for EnterEpilog is that the count of waiters be bumped
2431         // to account for this thread, which was done above.
2432         EnterEpilog(pCurThread);
2433         return;
2434     }
2435
2436     // Got the mutex via recursive locking on the same thread.
2437     _ASSERTE(m_Recursion >= 1);
2438     m_Recursion++;
2439
2440 #if defined(_DEBUG) && defined(TRACK_SYNC)
2441     // The best place to grab this is from the ECall frame
2442     Frame   *pFrame = pCurThread->GetFrame();
2443     int      caller = (pFrame && pFrame != FRAME_TOP ? (int)pFrame->GetReturnAddress() : -1);
2444     pCurThread->m_pTrackSync->EnterSync(caller, this);
2445 #endif
2446 }
2447
2448 BOOL AwareLock::TryEnter(INT32 timeOut)
2449 {
2450     CONTRACTL
2451     {
2452         INSTANCE_CHECK;
2453         THROWS;
2454         GC_TRIGGERS;
2455         if (timeOut == 0) {MODE_ANY;} else {MODE_COOPERATIVE;}
2456         INJECT_FAULT(COMPlusThrowOM(););
2457     }
2458     CONTRACTL_END;
2459
2460     Thread  *pCurThread = GetThread();
2461
2462     if (pCurThread->IsAbortRequested())
2463     {
2464         pCurThread->HandleThreadAbort();
2465     }
2466
2467     LockState state = m_lockState.VolatileLoadWithoutBarrier();
2468     if (!state.IsLocked() || m_HoldingThread != pCurThread)
2469     {
2470         if (timeOut == 0
2471                 ? m_lockState.InterlockedTryLock(state)
2472                 : m_lockState.InterlockedTryLock_Or_RegisterWaiter(this, state))
2473         {
2474             // We get here if we successfully acquired the mutex.
2475             m_HoldingThread = pCurThread;
2476             m_Recursion = 1;
2477             pCurThread->IncLockCount();
2478
2479 #if defined(_DEBUG) && defined(TRACK_SYNC)
2480             // The best place to grab this is from the ECall frame
2481             Frame   *pFrame = pCurThread->GetFrame();
2482             int      caller = (pFrame && pFrame != FRAME_TOP ? (int)pFrame->GetReturnAddress() : -1);
2483             pCurThread->m_pTrackSync->EnterSync(caller, this);
2484 #endif
2485             return true;
2486         }
2487
2488         // Lock was not acquired and the waiter was registered if the timeout is nonzero
2489
2490         // Didn't manage to get the mutex, return failure if no timeout, else wait
2491         // for at most timeout milliseconds for the mutex.
2492         if (timeOut == 0)
2493         {
2494             return false;
2495         }
2496
2497         // The precondition for EnterEpilog is that the count of waiters be bumped
2498         // to account for this thread, which was done above
2499         return EnterEpilog(pCurThread, timeOut);
2500     }
2501
2502     // Got the mutex via recursive locking on the same thread.
2503     _ASSERTE(m_Recursion >= 1);
2504     m_Recursion++;
2505 #if defined(_DEBUG) && defined(TRACK_SYNC)
2506     // The best place to grab this is from the ECall frame
2507     Frame   *pFrame = pCurThread->GetFrame();
2508     int      caller = (pFrame && pFrame != FRAME_TOP ? (int)pFrame->GetReturnAddress() : -1);
2509     pCurThread->m_pTrackSync->EnterSync(caller, this);
2510 #endif
2511     return true;
2512 }
2513
2514 BOOL AwareLock::EnterEpilog(Thread* pCurThread, INT32 timeOut)
2515 {
2516     STATIC_CONTRACT_THROWS;
2517     STATIC_CONTRACT_MODE_COOPERATIVE;
2518     STATIC_CONTRACT_GC_TRIGGERS;
2519
2520     // While we are in this frame the thread is considered blocked on the
2521     // critical section of the monitor lock according to the debugger
2522     DebugBlockingItem blockingMonitorInfo;
2523     blockingMonitorInfo.dwTimeout = timeOut;
2524     blockingMonitorInfo.pMonitor = this;
2525     blockingMonitorInfo.pAppDomain = SystemDomain::GetCurrentDomain();
2526     blockingMonitorInfo.type = DebugBlock_MonitorCriticalSection;
2527     DebugBlockingItemHolder holder(pCurThread, &blockingMonitorInfo);
2528
2529     // We need a separate helper because it uses SEH and the holder has a
2530     // destructor
2531     return EnterEpilogHelper(pCurThread, timeOut);
2532 }
2533
2534 #ifdef _DEBUG
2535 #define _LOGCONTENTION
2536 #endif // _DEBUG
2537
2538 #ifdef  _LOGCONTENTION
2539 inline void LogContention()
2540 {
2541     WRAPPER_NO_CONTRACT;
2542 #ifdef LOGGING
2543     if (LoggingOn(LF_SYNC, LL_INFO100))
2544     {
2545         LogSpewAlways("Contention: Stack Trace Begin\n");
2546         void LogStackTrace();
2547         LogStackTrace();
2548         LogSpewAlways("Contention: Stack Trace End\n");
2549     }
2550 #endif
2551 }
2552 #else
2553 #define LogContention()
2554 #endif
2555
2556 double ComputeElapsedTimeInNanosecond(LARGE_INTEGER startTicks, LARGE_INTEGER endTicks)
2557 {
2558     static LARGE_INTEGER freq;
2559     if (freq.QuadPart == 0)
2560         QueryPerformanceFrequency(&freq);
2561
2562     const double NsPerSecond = 1000 * 1000 * 1000;
2563     LONGLONG elapsedTicks = endTicks.QuadPart - startTicks.QuadPart;
2564     return (elapsedTicks * NsPerSecond) / freq.QuadPart;
2565 }
2566
2567 BOOL AwareLock::EnterEpilogHelper(Thread* pCurThread, INT32 timeOut)
2568 {
2569     STATIC_CONTRACT_THROWS;
2570     STATIC_CONTRACT_MODE_COOPERATIVE;
2571     STATIC_CONTRACT_GC_TRIGGERS;
2572
2573     // IMPORTANT!!!
2574     // The caller has already registered a waiter. This function needs to unregister the waiter on all paths (exception paths
2575     // included). On runtimes where thread-abort is supported, a thread-abort also needs to unregister the waiter. There may be
2576     // a possibility for preemptive GC toggles below to handle a thread-abort, that should be taken into consideration when
2577     // porting this code back to .NET Framework.
2578
2579     // Require all callers to be in cooperative mode.  If they have switched to preemptive
2580     // mode temporarily before calling here, then they are responsible for protecting
2581     // the object associated with this lock.
2582     _ASSERTE(pCurThread->PreemptiveGCDisabled());
2583
2584     BOOLEAN IsContentionKeywordEnabled = ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_DOTNET_Context, TRACE_LEVEL_INFORMATION, CLR_CONTENTION_KEYWORD);
2585     LARGE_INTEGER startTicks = { {0} };
2586
2587     if (IsContentionKeywordEnabled)
2588     {
2589         QueryPerformanceCounter(&startTicks);
2590
2591         // Fire a contention start event for a managed contention
2592         FireEtwContentionStart_V1(ETW::ContentionLog::ContentionStructs::ManagedContention, GetClrInstanceId());
2593     }
2594
2595     LogContention();
2596     Thread::IncrementMonitorLockContentionCount(pCurThread);
2597
2598     OBJECTREF obj = GetOwningObject();
2599
2600     // We cannot allow the AwareLock to be cleaned up underneath us by the GC.
2601     IncrementTransientPrecious();
2602
2603     DWORD ret;
2604     GCPROTECT_BEGIN(obj);
2605     {
2606         if (!m_SemEvent.IsMonitorEventAllocated())
2607         {
2608             AllocLockSemEvent();
2609         }
2610         _ASSERTE(m_SemEvent.IsMonitorEventAllocated());
2611
2612         pCurThread->EnablePreemptiveGC();
2613
2614         for (;;)
2615         {
2616             // We might be interrupted during the wait (Thread.Interrupt), so we need an
2617             // exception handler round the call.
2618             struct Param
2619             {
2620                 AwareLock *pThis;
2621                 INT32 timeOut;
2622                 DWORD ret;
2623             } param;
2624             param.pThis = this;
2625             param.timeOut = timeOut;
2626
2627             // Measure the time we wait so that, in the case where we wake up
2628             // and fail to acquire the mutex, we can adjust remaining timeout
2629             // accordingly.
2630             ULONGLONG start = CLRGetTickCount64();
2631
2632             EE_TRY_FOR_FINALLY(Param *, pParam, &param)
2633             {
2634                 pParam->ret = pParam->pThis->m_SemEvent.Wait(pParam->timeOut, TRUE);
2635                 _ASSERTE((pParam->ret == WAIT_OBJECT_0) || (pParam->ret == WAIT_TIMEOUT));
2636             }
2637             EE_FINALLY
2638             {
2639                 if (GOT_EXCEPTION())
2640                 {
2641                     // It is likely the case that An APC threw an exception, for instance Thread.Interrupt(). The wait subsystem
2642                     // guarantees that if a signal to the event being waited upon is observed by the woken thread, that thread's
2643                     // wait will return WAIT_OBJECT_0. So in any race between m_SemEvent being signaled and the wait throwing an
2644                     // exception, a thread that is woken by an exception would not observe the signal, and the signal would wake
2645                     // another thread as necessary.
2646
2647                     // We must decrement the waiter count.
2648                     m_lockState.InterlockedUnregisterWaiter();
2649                 }
2650             } EE_END_FINALLY;
2651
2652             ret = param.ret;
2653             if (ret != WAIT_OBJECT_0)
2654             {
2655                 // We timed out, decrement waiter count.
2656                 m_lockState.InterlockedUnregisterWaiter();
2657                 break;
2658             }
2659
2660             // Spin a bit while trying to acquire the lock. This has a few benefits:
2661             // - Spinning helps to reduce waiter starvation. Since other non-waiter threads can take the lock while there are
2662             //   waiters (see LockState::InterlockedTryLock()), once a waiter wakes it will be able to better compete
2663             //   with other spinners for the lock.
2664             // - If there is another thread that is repeatedly acquiring and releasing the lock, spinning before waiting again
2665             //   helps to prevent a waiter from repeatedly context-switching in and out
2666             // - Further in the same situation above, waking up and waiting shortly thereafter deprioritizes this waiter because
2667             //   events release waiters in FIFO order. Spinning a bit helps a waiter to retain its priority at least for one
2668             //   spin duration before it gets deprioritized behind all other waiters.
2669             if (g_SystemInfo.dwNumberOfProcessors > 1)
2670             {
2671                 bool acquiredLock = false;
2672                 YieldProcessorNormalizationInfo normalizationInfo;
2673                 const DWORD spinCount = g_SpinConstants.dwMonitorSpinCount;
2674                 for (DWORD spinIteration = 0; spinIteration < spinCount; ++spinIteration)
2675                 {
2676                     if (m_lockState.InterlockedTry_LockAndUnregisterWaiterAndObserveWakeSignal(this))
2677                     {
2678                         acquiredLock = true;
2679                         break;
2680                     }
2681
2682                     SpinWait(normalizationInfo, spinIteration);
2683                 }
2684                 if (acquiredLock)
2685                 {
2686                     break;
2687                 }
2688             }
2689
2690             if (m_lockState.InterlockedObserveWakeSignal_Try_LockAndUnregisterWaiter(this))
2691             {
2692                 break;
2693             }
2694
2695             // When calculating duration we consider a couple of special cases.
2696             // If the end tick is the same as the start tick we make the
2697             // duration a millisecond, to ensure we make forward progress if
2698             // there's a lot of contention on the mutex. Secondly, we have to
2699             // cope with the case where the tick counter wrapped while we where
2700             // waiting (we can cope with at most one wrap, so don't expect three
2701             // month timeouts to be very accurate). Luckily for us, the latter
2702             // case is taken care of by 32-bit modulo arithmetic automatically.
2703             if (timeOut != (INT32)INFINITE)
2704             {
2705                 ULONGLONG end = CLRGetTickCount64();
2706                 ULONGLONG duration;
2707                 if (end == start)
2708                 {
2709                     duration = 1;
2710                 }
2711                 else
2712                 {
2713                     duration = end - start;
2714                 }
2715                 duration = min(duration, (DWORD)timeOut);
2716                 timeOut -= (INT32)duration;
2717             }
2718         }
2719
2720         pCurThread->DisablePreemptiveGC();
2721     }
2722     GCPROTECT_END();
2723     DecrementTransientPrecious();
2724
2725     if (IsContentionKeywordEnabled)
2726     {
2727         LARGE_INTEGER endTicks;
2728         QueryPerformanceCounter(&endTicks);
2729
2730         double elapsedTimeInNanosecond = ComputeElapsedTimeInNanosecond(startTicks, endTicks);
2731
2732         // Fire a contention end event for a managed contention
2733         FireEtwContentionStop_V1(ETW::ContentionLog::ContentionStructs::ManagedContention, GetClrInstanceId(), elapsedTimeInNanosecond);
2734     }
2735
2736
2737     if (ret == WAIT_TIMEOUT)
2738     {
2739         return false;
2740     }
2741
2742     m_HoldingThread = pCurThread;
2743     m_Recursion = 1;
2744     pCurThread->IncLockCount();
2745
2746 #if defined(_DEBUG) && defined(TRACK_SYNC)
2747     // The best place to grab this is from the ECall frame
2748     Frame   *pFrame = pCurThread->GetFrame();
2749     int      caller = (pFrame && pFrame != FRAME_TOP ? (int)pFrame->GetReturnAddress() : -1);
2750     pCurThread->m_pTrackSync->EnterSync(caller, this);
2751 #endif
2752     return true;
2753 }
2754
2755
2756 BOOL AwareLock::Leave()
2757 {
2758     CONTRACTL
2759     {
2760         INSTANCE_CHECK;
2761         NOTHROW;
2762         GC_NOTRIGGER;
2763         MODE_ANY;
2764     }
2765     CONTRACTL_END;
2766
2767     Thread* pThread = GetThread();
2768
2769     AwareLock::LeaveHelperAction action = LeaveHelper(pThread);
2770
2771     switch(action)
2772     {
2773     case AwareLock::LeaveHelperAction_None:
2774         // We are done
2775         return TRUE;
2776     case AwareLock::LeaveHelperAction_Signal:
2777         // Signal the event
2778         Signal();
2779         return TRUE;
2780     default:
2781         // Must be an error otherwise
2782         _ASSERTE(action == AwareLock::LeaveHelperAction_Error);
2783         return FALSE;
2784     }
2785 }
2786
2787 LONG AwareLock::LeaveCompletely()
2788 {
2789     WRAPPER_NO_CONTRACT;
2790
2791     LONG count = 0;
2792     while (Leave()) {
2793         count++;
2794     }
2795     _ASSERTE(count > 0);            // otherwise we were never in the lock
2796
2797     return count;
2798 }
2799
2800
2801 BOOL AwareLock::OwnedByCurrentThread()
2802 {
2803     WRAPPER_NO_CONTRACT;
2804     return (GetThread() == m_HoldingThread);
2805 }
2806
2807
2808 // ***************************************************************************
2809 //
2810 //              SyncBlock class implementation
2811 //
2812 // ***************************************************************************
2813
2814 // We maintain two queues for SyncBlock::Wait.
2815 // 1. Inside SyncBlock we queue all threads that are waiting on the SyncBlock.
2816 //    When we pulse, we pick the thread from this queue using FIFO.
2817 // 2. We queue all SyncBlocks that a thread is waiting for in Thread::m_WaitEventLink.
2818 //    When we pulse a thread, we find the event from this queue to set, and we also
2819 //    or in a 1 bit in the syncblock value saved in the queue, so that we can return
2820 //    immediately from SyncBlock::Wait if the syncblock has been pulsed.
2821 BOOL SyncBlock::Wait(INT32 timeOut, BOOL exitContext)
2822 {
2823     CONTRACTL
2824     {
2825         INSTANCE_CHECK;
2826         THROWS;
2827         GC_TRIGGERS;
2828         MODE_ANY;
2829         INJECT_FAULT(COMPlusThrowOM());
2830     }
2831     CONTRACTL_END;
2832
2833     Thread  *pCurThread = GetThread();
2834     BOOL     isTimedOut = FALSE;
2835     BOOL     isEnqueued = FALSE;
2836     WaitEventLink waitEventLink;
2837     WaitEventLink *pWaitEventLink;
2838
2839     // As soon as we flip the switch, we are in a race with the GC, which could clean
2840     // up the SyncBlock underneath us -- unless we report the object.
2841     _ASSERTE(pCurThread->PreemptiveGCDisabled());
2842
2843     // Does this thread already wait for this SyncBlock?
2844     WaitEventLink *walk = pCurThread->WaitEventLinkForSyncBlock(this);
2845     if (walk->m_Next) {
2846         if (walk->m_Next->m_WaitSB == this) {
2847             // Wait on the same lock again.
2848             walk->m_Next->m_RefCount ++;
2849             pWaitEventLink = walk->m_Next;
2850         }
2851         else if ((SyncBlock*)(((DWORD_PTR)walk->m_Next->m_WaitSB) & ~1)== this) {
2852             // This thread has been pulsed.  No need to wait.
2853             return TRUE;
2854         }
2855     }
2856     else {
2857         // First time this thread is going to wait for this SyncBlock.
2858         CLREvent* hEvent;
2859         if (pCurThread->m_WaitEventLink.m_Next == NULL) {
2860             hEvent = &(pCurThread->m_EventWait);
2861         }
2862         else {
2863             hEvent = GetEventFromEventStore();
2864         }
2865         waitEventLink.m_WaitSB = this;
2866         waitEventLink.m_EventWait = hEvent;
2867         waitEventLink.m_Thread = pCurThread;
2868         waitEventLink.m_Next = NULL;
2869         waitEventLink.m_LinkSB.m_pNext = NULL;
2870         waitEventLink.m_RefCount = 1;
2871         pWaitEventLink = &waitEventLink;
2872         walk->m_Next = pWaitEventLink;
2873
2874         // Before we enqueue it (and, thus, before it can be dequeued), reset the event
2875         // that will awaken us.
2876         hEvent->Reset();
2877
2878         // This thread is now waiting on this sync block
2879         ThreadQueue::EnqueueThread(pWaitEventLink, this);
2880
2881         isEnqueued = TRUE;
2882     }
2883
2884     _ASSERTE ((SyncBlock*)((DWORD_PTR)walk->m_Next->m_WaitSB & ~1)== this);
2885
2886     PendingSync   syncState(walk);
2887
2888     OBJECTREF     obj = m_Monitor.GetOwningObject();
2889
2890     m_Monitor.IncrementTransientPrecious();
2891
2892     // While we are in this frame the thread is considered blocked on the
2893     // event of the monitor lock according to the debugger
2894     DebugBlockingItem blockingMonitorInfo;
2895     blockingMonitorInfo.dwTimeout = timeOut;
2896     blockingMonitorInfo.pMonitor = &m_Monitor;
2897     blockingMonitorInfo.pAppDomain = SystemDomain::GetCurrentDomain();
2898     blockingMonitorInfo.type = DebugBlock_MonitorEvent;
2899     DebugBlockingItemHolder holder(pCurThread, &blockingMonitorInfo);
2900
2901     GCPROTECT_BEGIN(obj);
2902     {
2903         GCX_PREEMP();
2904
2905         // remember how many times we synchronized
2906         syncState.m_EnterCount = LeaveMonitorCompletely();
2907         _ASSERTE(syncState.m_EnterCount > 0);
2908
2909         isTimedOut = pCurThread->Block(timeOut, &syncState);
2910     }
2911     GCPROTECT_END();
2912     m_Monitor.DecrementTransientPrecious();
2913
2914     return !isTimedOut;
2915 }
2916
2917 void SyncBlock::Pulse()
2918 {
2919     CONTRACTL
2920     {
2921         INSTANCE_CHECK;
2922         NOTHROW;
2923         GC_NOTRIGGER;
2924         MODE_ANY;
2925     }
2926     CONTRACTL_END;
2927
2928     WaitEventLink  *pWaitEventLink;
2929
2930     if ((pWaitEventLink = ThreadQueue::DequeueThread(this)) != NULL)
2931         pWaitEventLink->m_EventWait->Set();
2932 }
2933
2934 void SyncBlock::PulseAll()
2935 {
2936     CONTRACTL
2937     {
2938         INSTANCE_CHECK;
2939         NOTHROW;
2940         GC_NOTRIGGER;
2941         MODE_ANY;
2942     }
2943     CONTRACTL_END;
2944
2945     WaitEventLink  *pWaitEventLink;
2946
2947     while ((pWaitEventLink = ThreadQueue::DequeueThread(this)) != NULL)
2948         pWaitEventLink->m_EventWait->Set();
2949 }
2950
2951 bool SyncBlock::SetInteropInfo(InteropSyncBlockInfo* pInteropInfo)
2952 {
2953     WRAPPER_NO_CONTRACT;
2954     SetPrecious();
2955
2956     // We could be agile, but not have noticed yet.  We can't assert here
2957     //  that we live in any given domain, nor is this an appropriate place
2958     //  to re-parent the syncblock.
2959 /*    _ASSERTE (m_dwAppDomainIndex.m_dwIndex == 0 || 
2960               m_dwAppDomainIndex == SystemDomain::System()->DefaultDomain()->GetIndex() || 
2961               m_dwAppDomainIndex == GetAppDomain()->GetIndex());
2962     m_dwAppDomainIndex = GetAppDomain()->GetIndex();
2963 */
2964     return (FastInterlockCompareExchangePointer(&m_pInteropInfo,
2965                                                 pInteropInfo,
2966                                                 NULL) == NULL);
2967 }
2968
2969 #ifdef EnC_SUPPORTED
2970 // Store information about fields added to this object by EnC
2971 // This must be called from a thread in the AppDomain of this object instance
2972 void SyncBlock::SetEnCInfo(EnCSyncBlockInfo *pEnCInfo) 
2973 {
2974     WRAPPER_NO_CONTRACT;
2975
2976     // We can't recreate the field contents, so this SyncBlock can never go away
2977     SetPrecious();
2978
2979     // Store the field info (should only ever happen once)
2980     _ASSERTE( m_pEnCInfo == NULL );
2981     m_pEnCInfo = pEnCInfo;
2982 }
2983 #endif // EnC_SUPPORTED
2984 #endif // !DACCESS_COMPILE
2985
2986 #if defined(_WIN64) && defined(_DEBUG)
2987 void ObjHeader::IllegalAlignPad()
2988 {
2989     WRAPPER_NO_CONTRACT;
2990 #ifdef LOGGING
2991     void** object = ((void**) this) + 1;
2992     LogSpewAlways("\n\n******** Illegal ObjHeader m_alignpad not 0, object" FMT_ADDR "\n\n",
2993                   DBG_ADDR(object));
2994 #endif
2995     _ASSERTE(m_alignpad == 0);
2996 }
2997 #endif // _WIN64 && _DEBUG
2998
2999