Remove always defined FEATURE_CORECLR
[platform/upstream/coreclr.git] / src / vm / syncblk.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4 //
5 // SYNCBLK.CPP
6 //
7
8 //
9 // Definition of a SyncBlock and the SyncBlockCache which manages it
10 //
11
12
13 #include "common.h"
14
15 #include "vars.hpp"
16 #include "util.hpp"
17 #include "class.h"
18 #include "object.h"
19 #include "threads.h"
20 #include "excep.h"
21 #include "threads.h"
22 #include "syncblk.h"
23 #include "interoputil.h"
24 #include "encee.h"
25 #include "perfcounters.h"
26 #include "eventtrace.h"
27 #include "dllimportcallback.h"
28 #include "comcallablewrapper.h"
29 #include "eeconfig.h"
30 #include "corhost.h"
31 #include "comdelegate.h"
32 #include "finalizerthread.h"
33 #include "gcscan.h"
34
35 #ifdef FEATURE_COMINTEROP
36 #include "runtimecallablewrapper.h"
37 #endif // FEATURE_COMINTEROP
38
39 // Allocate 1 page worth. Typically enough
40 #define MAXSYNCBLOCK (PAGE_SIZE-sizeof(void*))/sizeof(SyncBlock)
41 #define SYNC_TABLE_INITIAL_SIZE 250
42
43 //#define DUMP_SB
44
45 class  SyncBlockArray
46 {
47   public:
48     SyncBlockArray *m_Next;
49     BYTE            m_Blocks[MAXSYNCBLOCK * sizeof (SyncBlock)];
50 };
51
52 // For in-place constructor
53 BYTE g_SyncBlockCacheInstance[sizeof(SyncBlockCache)];
54
55 SPTR_IMPL (SyncBlockCache, SyncBlockCache, s_pSyncBlockCache);
56
57 #ifndef DACCESS_COMPILE
58
59
60
61 void SyncBlock::OnADUnload()
62 {
63     WRAPPER_NO_CONTRACT;
64 #ifdef EnC_SUPPORTED
65     if (m_pEnCInfo)
66     {
67         m_pEnCInfo->Cleanup();
68         m_pEnCInfo = NULL;
69     }
70 #endif
71 }
72
73 #ifndef FEATURE_PAL
74 // static
75 SLIST_HEADER InteropSyncBlockInfo::s_InteropInfoStandbyList;
76 #endif // !FEATURE_PAL
77
78 InteropSyncBlockInfo::~InteropSyncBlockInfo()
79 {
80     CONTRACTL
81     {
82         NOTHROW;
83         DESTRUCTOR_CHECK;
84         GC_TRIGGERS;
85         MODE_ANY;
86     }
87     CONTRACTL_END;
88
89     FreeUMEntryThunkOrInterceptStub();
90 }
91
92 #ifndef FEATURE_PAL
93 // Deletes all items in code:s_InteropInfoStandbyList.
94 void InteropSyncBlockInfo::FlushStandbyList()
95 {
96     CONTRACTL
97     {
98         NOTHROW;
99         GC_TRIGGERS;
100         MODE_ANY;
101     }
102     CONTRACTL_END;
103
104     PSLIST_ENTRY pEntry = InterlockedFlushSList(&InteropSyncBlockInfo::s_InteropInfoStandbyList);
105     while (pEntry)
106     {
107         PSLIST_ENTRY pNextEntry = pEntry->Next;
108
109         // make sure to use the global delete since the destructor has already run
110         ::delete (void *)pEntry;
111         pEntry = pNextEntry;
112     }
113 }
114 #endif // !FEATURE_PAL
115
116 void InteropSyncBlockInfo::FreeUMEntryThunkOrInterceptStub()
117 {
118     CONTRACTL
119     {
120         NOTHROW;
121         DESTRUCTOR_CHECK;
122         GC_TRIGGERS;
123         MODE_ANY;
124     }
125     CONTRACTL_END
126
127     if (!g_fEEShutDown)
128     {
129         void *pUMEntryThunk = GetUMEntryThunk();
130         if (pUMEntryThunk != NULL)
131         {
132             COMDelegate::RemoveEntryFromFPtrHash((UPTR)pUMEntryThunk);
133             UMEntryThunk::FreeUMEntryThunk((UMEntryThunk *)pUMEntryThunk);
134         }
135         else
136         {
137 #if defined(_TARGET_X86_)
138             Stub *pInterceptStub = GetInterceptStub();
139
140             if (pInterceptStub != NULL)
141             {
142                 // There may be multiple chained stubs, i.e. host hook stub calling MDA stack
143                 // imbalance stub, and the following DecRef will free all of them.
144                 pInterceptStub->DecRef();
145             }
146 #else // _TARGET_X86_
147             // Intercept stubs are currently not used on other platforms.
148             _ASSERTE(GetInterceptStub() == NULL);
149 #endif // _TARGET_X86_
150         }
151     }
152     m_pUMEntryThunkOrInterceptStub = NULL;
153 }
154
155 #ifdef FEATURE_COMINTEROP
156 // Returns either NULL or an RCW on which AcquireLock has been called.
157 RCW* InteropSyncBlockInfo::GetRCWAndIncrementUseCount()
158 {
159     LIMITED_METHOD_CONTRACT;
160
161     DWORD dwSwitchCount = 0;
162     while (true)
163     {
164         RCW *pRCW = VolatileLoad(&m_pRCW);
165         if ((size_t)pRCW <= 0x1)
166         {
167             // the RCW never existed or has been released
168             return NULL;
169         }
170
171         if (((size_t)pRCW & 0x1) == 0x0)
172         {
173             // it looks like we have a chance, try to acquire the lock
174             RCW *pLockedRCW = (RCW *)((size_t)pRCW | 0x1);
175             if (InterlockedCompareExchangeT(&m_pRCW, pLockedRCW, pRCW) == pRCW)
176             {
177                 // we have the lock on the m_pRCW field, now we can safely "use" the RCW
178                 pRCW->IncrementUseCount();
179
180                 // release the m_pRCW lock
181                 VolatileStore(&m_pRCW, pRCW);
182
183                 // and return the RCW
184                 return pRCW;
185             }
186         }
187
188         // somebody else holds the lock, retry
189         __SwitchToThread(0, ++dwSwitchCount);
190     }
191 }
192
193 // Sets the m_pRCW field in a thread-safe manner, pRCW can be NULL.
194 void InteropSyncBlockInfo::SetRawRCW(RCW* pRCW)
195 {
196     LIMITED_METHOD_CONTRACT;
197
198     if (pRCW != NULL)
199     {
200         // we never set two different RCWs on a single object
201         _ASSERTE(m_pRCW == NULL);
202         m_pRCW = pRCW;
203     }
204     else
205     {
206         DWORD dwSwitchCount = 0;
207         while (true)
208         {
209             RCW *pOldRCW = VolatileLoad(&m_pRCW);
210
211             if ((size_t)pOldRCW <= 0x1)
212             {
213                 // the RCW never existed or has been released
214                 VolatileStore(&m_pRCW, (RCW *)0x1);
215                 return;
216             }
217
218             if (((size_t)pOldRCW & 0x1) == 0x0)
219             {
220                 // it looks like we have a chance, set the RCW to 0x1
221                 if (InterlockedCompareExchangeT(&m_pRCW, (RCW *)0x1, pOldRCW) == pOldRCW)
222                 {
223                     // we made it
224                     return;
225                 }
226             }
227
228             // somebody else holds the lock, retry
229             __SwitchToThread(0, ++dwSwitchCount);
230         }
231     }
232 }
233 #endif // FEATURE_COMINTEROP
234
235 void UMEntryThunk::OnADUnload()
236 {
237     LIMITED_METHOD_CONTRACT;
238     m_pObjectHandle = NULL;
239 }
240
241 #endif // !DACCESS_COMPILE
242
243 PTR_SyncTableEntry SyncTableEntry::GetSyncTableEntry()
244 {
245     LIMITED_METHOD_CONTRACT;
246     SUPPORTS_DAC;
247
248     return (PTR_SyncTableEntry)g_pSyncTable;
249 }
250
251 #ifndef DACCESS_COMPILE
252
253 SyncTableEntry*& SyncTableEntry::GetSyncTableEntryByRef()
254 {
255     LIMITED_METHOD_CONTRACT;
256     return g_pSyncTable;
257 }
258
259 /* static */
260 SyncBlockCache*& SyncBlockCache::GetSyncBlockCache()
261 {
262     LIMITED_METHOD_CONTRACT;
263
264     return s_pSyncBlockCache;
265 }
266
267
268 //----------------------------------------------------------------------------
269 //
270 //   ThreadQueue Implementation
271 //
272 //----------------------------------------------------------------------------
273 #endif //!DACCESS_COMPILE
274
275 // Given a link in the chain, get the Thread that it represents
276 /* static */
277 inline PTR_WaitEventLink ThreadQueue::WaitEventLinkForLink(PTR_SLink pLink)
278 {
279     LIMITED_METHOD_CONTRACT;
280     SUPPORTS_DAC;
281     return (PTR_WaitEventLink) (((PTR_BYTE) pLink) - offsetof(WaitEventLink, m_LinkSB));
282 }
283
284 #ifndef DACCESS_COMPILE
285
286 // Unlink the head of the Q.  We are always in the SyncBlock's critical
287 // section.
288 /* static */
289 inline WaitEventLink *ThreadQueue::DequeueThread(SyncBlock *psb)
290 {
291     CONTRACTL
292     {
293         NOTHROW;
294         GC_NOTRIGGER;
295         MODE_ANY;
296         CAN_TAKE_LOCK;
297     }
298     CONTRACTL_END;
299
300     // Be careful, the debugger inspects the queue from out of process and just looks at the memory...
301     // it must be valid even if the lock is held. Be careful if you change the way the queue is updated.
302     SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
303
304     WaitEventLink      *ret = NULL;
305     SLink       *pLink = psb->m_Link.m_pNext;
306
307     if (pLink)
308     {
309         psb->m_Link.m_pNext = pLink->m_pNext;
310 #ifdef _DEBUG
311         pLink->m_pNext = (SLink *)POISONC;
312 #endif
313         ret = WaitEventLinkForLink(pLink);
314         _ASSERTE(ret->m_WaitSB == psb);
315         COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cQueueLength--);
316     }
317     return ret;
318 }
319
320 // Enqueue is the slow one.  We have to find the end of the Q since we don't
321 // want to burn storage for this in the SyncBlock.
322 /* static */
323 inline void ThreadQueue::EnqueueThread(WaitEventLink *pWaitEventLink, SyncBlock *psb)
324 {
325     CONTRACTL
326     {
327         NOTHROW;
328         GC_NOTRIGGER;
329         MODE_ANY;
330         CAN_TAKE_LOCK;
331     }
332     CONTRACTL_END;
333
334     _ASSERTE (pWaitEventLink->m_LinkSB.m_pNext == NULL);
335
336     // Be careful, the debugger inspects the queue from out of process and just looks at the memory...
337     // it must be valid even if the lock is held. Be careful if you change the way the queue is updated.
338     SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
339
340     COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cQueueLength++);
341
342     SLink       *pPrior = &psb->m_Link;
343
344     while (pPrior->m_pNext)
345     {
346         // We shouldn't already be in the waiting list!
347         _ASSERTE(pPrior->m_pNext != &pWaitEventLink->m_LinkSB);
348
349         pPrior = pPrior->m_pNext;
350     }
351     pPrior->m_pNext = &pWaitEventLink->m_LinkSB;
352 }
353
354
355 // Wade through the SyncBlock's list of waiting threads and remove the
356 // specified thread.
357 /* static */
358 BOOL ThreadQueue::RemoveThread (Thread *pThread, SyncBlock *psb)
359 {
360     CONTRACTL
361     {
362         NOTHROW;
363         GC_NOTRIGGER;
364         MODE_ANY;
365     }
366     CONTRACTL_END;
367
368     BOOL res = FALSE;
369
370     // Be careful, the debugger inspects the queue from out of process and just looks at the memory...
371     // it must be valid even if the lock is held. Be careful if you change the way the queue is updated.
372     SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
373
374     SLink       *pPrior = &psb->m_Link;
375     SLink       *pLink;
376     WaitEventLink *pWaitEventLink;
377
378     while ((pLink = pPrior->m_pNext) != NULL)
379     {
380         pWaitEventLink = WaitEventLinkForLink(pLink);
381         if (pWaitEventLink->m_Thread == pThread)
382         {
383             pPrior->m_pNext = pLink->m_pNext;
384 #ifdef _DEBUG
385             pLink->m_pNext = (SLink *)POISONC;
386 #endif
387             _ASSERTE(pWaitEventLink->m_WaitSB == psb);
388             COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cQueueLength--);
389             res = TRUE;
390             break;
391         }
392         pPrior = pLink;
393     }
394     return res;
395 }
396
397 #endif //!DACCESS_COMPILE
398
399 #ifdef DACCESS_COMPILE
400 // Enumerates the threads in the queue from front to back by calling
401 // pCallbackFunction on each one
402 /* static */
403 void ThreadQueue::EnumerateThreads(SyncBlock *psb, FP_TQ_THREAD_ENUMERATION_CALLBACK pCallbackFunction, void* pUserData)
404 {
405     CONTRACTL
406     {
407         NOTHROW;
408         GC_NOTRIGGER;
409         MODE_ANY;
410     }
411     CONTRACTL_END;
412     SUPPORTS_DAC;
413
414     PTR_SLink pLink = psb->m_Link.m_pNext;
415     PTR_WaitEventLink pWaitEventLink;
416
417     while (pLink != NULL)
418     {
419         pWaitEventLink = WaitEventLinkForLink(pLink);
420
421         pCallbackFunction(pWaitEventLink->m_Thread, pUserData);
422         pLink = pLink->m_pNext;
423     }
424 }
425 #endif //DACCESS_COMPILE
426
427 #ifndef DACCESS_COMPILE
428
429 // ***************************************************************************
430 //
431 //              Ephemeral Bitmap Helper
432 //
433 // ***************************************************************************
434
435 #define card_size 32
436
437 #define card_word_width 32
438
439 size_t CardIndex (size_t card)
440 {
441     LIMITED_METHOD_CONTRACT;
442     return card_size * card;
443 }
444
445 size_t CardOf (size_t idx)
446 {
447     LIMITED_METHOD_CONTRACT;
448     return idx / card_size;
449 }
450
451 size_t CardWord (size_t card)
452 {
453     LIMITED_METHOD_CONTRACT;
454     return card / card_word_width;
455 }
456 inline
457 unsigned CardBit (size_t card)
458 {
459     LIMITED_METHOD_CONTRACT;
460     return (unsigned)(card % card_word_width);
461 }
462
463 inline
464 void SyncBlockCache::SetCard (size_t card)
465 {
466     WRAPPER_NO_CONTRACT;
467     m_EphemeralBitmap [CardWord (card)] =
468         (m_EphemeralBitmap [CardWord (card)] | (1 << CardBit (card)));
469 }
470
471 inline
472 void SyncBlockCache::ClearCard (size_t card)
473 {
474     WRAPPER_NO_CONTRACT;
475     m_EphemeralBitmap [CardWord (card)] =
476         (m_EphemeralBitmap [CardWord (card)] & ~(1 << CardBit (card)));
477 }
478
479 inline
480 BOOL  SyncBlockCache::CardSetP (size_t card)
481 {
482     WRAPPER_NO_CONTRACT;
483     return ( m_EphemeralBitmap [ CardWord (card) ] & (1 << CardBit (card)));
484 }
485
486 inline
487 void SyncBlockCache::CardTableSetBit (size_t idx)
488 {
489     WRAPPER_NO_CONTRACT;
490     SetCard (CardOf (idx));
491 }
492
493
494 size_t BitMapSize (size_t cacheSize)
495 {
496     LIMITED_METHOD_CONTRACT;
497
498     return (cacheSize + card_size * card_word_width - 1)/ (card_size * card_word_width);
499 }
500
501 // ***************************************************************************
502 //
503 //              SyncBlockCache class implementation
504 //
505 // ***************************************************************************
506
507 SyncBlockCache::SyncBlockCache()
508     : m_pCleanupBlockList(NULL),
509       m_FreeBlockList(NULL),
510
511       // NOTE: CRST_UNSAFE_ANYMODE prevents a GC mode switch when entering this crst.
512       // If you remove this flag, we will switch to preemptive mode when entering
513       // g_criticalSection, which means all functions that enter it will become
514       // GC_TRIGGERS.  (This includes all uses of LockHolder around SyncBlockCache::GetSyncBlockCache().
515       // So be sure to update the contracts if you remove this flag.
516       m_CacheLock(CrstSyncBlockCache, (CrstFlags) (CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD)),
517
518       m_FreeCount(0),
519       m_ActiveCount(0),
520       m_SyncBlocks(0),
521       m_FreeSyncBlock(0),
522       m_FreeSyncTableIndex(1),
523       m_FreeSyncTableList(0),
524       m_SyncTableSize(SYNC_TABLE_INITIAL_SIZE),
525       m_OldSyncTables(0),
526       m_bSyncBlockCleanupInProgress(FALSE),
527       m_EphemeralBitmap(0)
528 {
529     CONTRACTL
530     {
531         CONSTRUCTOR_CHECK;
532         THROWS;
533         GC_NOTRIGGER;
534         MODE_ANY;
535         INJECT_FAULT(COMPlusThrowOM());
536     }
537     CONTRACTL_END;
538 }
539
540
541 // This method is NO longer called.
542 SyncBlockCache::~SyncBlockCache()
543 {
544     CONTRACTL
545     {
546         DESTRUCTOR_CHECK;
547         NOTHROW;
548         GC_NOTRIGGER;
549         MODE_ANY;
550     }
551     CONTRACTL_END;
552
553     // Clear the list the fast way.
554     m_FreeBlockList = NULL;
555     //<TODO>@todo we can clear this fast too I guess</TODO>
556     m_pCleanupBlockList = NULL;
557
558     // destruct all arrays
559     while (m_SyncBlocks)
560     {
561         SyncBlockArray *next = m_SyncBlocks->m_Next;
562         delete m_SyncBlocks;
563         m_SyncBlocks = next;
564     }
565
566     // Also, now is a good time to clean up all the old tables which we discarded
567     // when we overflowed them.
568     SyncTableEntry* arr;
569     while ((arr = m_OldSyncTables) != 0)
570     {
571         m_OldSyncTables = (SyncTableEntry*)arr[0].m_Object.Load();
572         delete arr;
573     }
574 }
575
576
577 // When the GC determines that an object is dead the low bit of the 
578 // m_Object field of SyncTableEntry is set, however it is not 
579 // cleaned up because we cant do the COM interop cleanup at GC time.
580 // It is put on a cleanup list and at a later time (typically during
581 // finalization, this list is cleaned up. 
582 // 
583 void SyncBlockCache::CleanupSyncBlocks()
584 {
585     STATIC_CONTRACT_THROWS;
586     STATIC_CONTRACT_MODE_COOPERATIVE;
587
588     _ASSERTE(GetThread() == FinalizerThread::GetFinalizerThread());
589
590     // Set the flag indicating sync block cleanup is in progress.
591     // IMPORTANT: This must be set before the sync block cleanup bit is reset on the thread.
592     m_bSyncBlockCleanupInProgress = TRUE;
593
594     struct Param
595     {
596         SyncBlockCache *pThis;
597         SyncBlock* psb;
598 #ifdef FEATURE_COMINTEROP
599         RCW* pRCW;
600 #endif
601     } param;
602     param.pThis = this;
603     param.psb = NULL;
604 #ifdef FEATURE_COMINTEROP
605     param.pRCW = NULL;
606 #endif
607
608     EE_TRY_FOR_FINALLY(Param *, pParam, &param)
609     {
610         // reset the flag
611         FinalizerThread::GetFinalizerThread()->ResetSyncBlockCleanup();
612
613         // walk the cleanup list and cleanup 'em up
614         while ((pParam->psb = pParam->pThis->GetNextCleanupSyncBlock()) != NULL)
615         {
616 #ifdef FEATURE_COMINTEROP
617             InteropSyncBlockInfo* pInteropInfo = pParam->psb->GetInteropInfoNoCreate();
618             if (pInteropInfo)
619             {
620                 pParam->pRCW = pInteropInfo->GetRawRCW();
621                 if (pParam->pRCW)
622                 {
623                     // We should have initialized the cleanup list with the
624                     // first RCW cache we created
625                     _ASSERTE(g_pRCWCleanupList != NULL);
626
627                     g_pRCWCleanupList->AddWrapper(pParam->pRCW);
628
629                     pParam->pRCW = NULL;
630                     pInteropInfo->SetRawRCW(NULL);
631                 }
632             }
633 #endif // FEATURE_COMINTEROP
634
635             // Delete the sync block.
636             pParam->pThis->DeleteSyncBlock(pParam->psb);
637             pParam->psb = NULL;
638
639             // pulse GC mode to allow GC to perform its work
640             if (FinalizerThread::GetFinalizerThread()->CatchAtSafePointOpportunistic())
641             {
642                 FinalizerThread::GetFinalizerThread()->PulseGCMode();
643             }
644         }
645         
646 #ifdef FEATURE_COMINTEROP
647         // Now clean up the rcw's sorted by context
648         if (g_pRCWCleanupList != NULL)
649             g_pRCWCleanupList->CleanupAllWrappers();
650 #endif // FEATURE_COMINTEROP
651     }
652     EE_FINALLY
653     {
654         // We are finished cleaning up the sync blocks.
655         m_bSyncBlockCleanupInProgress = FALSE;
656
657 #ifdef FEATURE_COMINTEROP
658         if (param.pRCW)
659             param.pRCW->Cleanup();
660 #endif        
661
662         if (param.psb)
663             DeleteSyncBlock(param.psb);
664     } EE_END_FINALLY;
665 }
666
667 // When a appdomain is unloading, we need to insure that any pointers to
668 // it from sync blocks (eg from COM Callable Wrappers) are properly 
669 // updated so that they fail gracefully if another call is made from
670 // them.  This is what this routine does.  
671 // 
672 VOID SyncBlockCache::CleanupSyncBlocksInAppDomain(AppDomain *pDomain)
673 {
674     CONTRACTL
675     {
676         GC_TRIGGERS;
677         THROWS;
678         MODE_COOPERATIVE;
679     }
680     CONTRACTL_END;
681
682 #ifndef DACCESS_COMPILE
683     _ASSERTE(IsFinalizerThread());
684     
685     ADIndex index = pDomain->GetIndex();
686
687     ADID id = pDomain->GetId();
688
689     // Make sure we dont race with anybody updating the table
690     DWORD maxIndex;
691
692     {        
693         // Taking this lock here avoids races whre m_FreeSyncTableIndex is being updated.
694         // (a volatile read would have been enough however).  
695         SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
696         maxIndex = m_FreeSyncTableIndex;
697     }
698     BOOL bModifiedCleanupList=FALSE;
699     STRESS_LOG1(LF_APPDOMAIN, LL_INFO100, "To cleanup - %d sync blocks", maxIndex);
700     DWORD nb;
701     for (nb = 1; nb < maxIndex; nb++)
702     {
703         // This is a check for syncblocks that were already cleaned up.
704         if ((size_t)SyncTableEntry::GetSyncTableEntry()[nb].m_Object.Load() & 1)
705         {
706             continue;
707         }
708
709         // If the syncblock pointer is invalid, nothing more we can do.
710         SyncBlock *pSyncBlock = SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock;
711         if (!pSyncBlock)
712         {
713             continue;
714         }
715         
716         // If we happen to have a CCW living in the AppDomain being cleaned, then we need to neuter it.
717         //  We do this check early because we have to neuter CCWs for agile objects as well.
718         //  Neutering the object simply means we disconnect the object from the CCW so it can no longer
719         //  be used.  When its ref-count falls to zero, it gets cleaned up.
720         STRESS_LOG1(LF_APPDOMAIN, LL_INFO1000000, "SyncBlock %p.", pSyncBlock);                    
721         InteropSyncBlockInfo* pInteropInfo = pSyncBlock->GetInteropInfoNoCreate();
722         if (pInteropInfo)
723         {
724 #ifdef FEATURE_COMINTEROP
725             ComCallWrapper* pWrap = pInteropInfo->GetCCW();
726             if (pWrap)
727             {
728                 SimpleComCallWrapper* pSimpleWrapper = pWrap->GetSimpleWrapper();
729                 _ASSERTE(pSimpleWrapper);
730                     
731                 if (pSimpleWrapper->GetDomainID() == id)
732                 {
733                     pSimpleWrapper->Neuter();
734                 }
735             }          
736 #endif // FEATURE_COMINTEROP
737
738             UMEntryThunk* umThunk=(UMEntryThunk*)pInteropInfo->GetUMEntryThunk();
739                 
740             if (umThunk && umThunk->GetDomainId()==id)
741             {
742                 umThunk->OnADUnload();
743                 STRESS_LOG1(LF_APPDOMAIN, LL_INFO100, "Thunk %x unloaded", umThunk);
744             }       
745
746 #ifdef FEATURE_COMINTEROP
747             {
748                 // we need to take RCWCache lock to avoid the race with another thread which is 
749                 // removing the RCW from cache, decoupling it from the object, and deleting the RCW.
750                 RCWCache* pCache = pDomain->GetRCWCache();
751                 _ASSERTE(pCache);
752                 RCWCache::LockHolder lh(pCache);
753                 RCW* pRCW = pInteropInfo->GetRawRCW();
754                 if (pRCW && pRCW->GetDomain()==pDomain)
755                 {
756                     // We should have initialized the cleanup list with the
757                     // first RCW cache we created
758                     _ASSERTE(g_pRCWCleanupList != NULL);
759
760                     g_pRCWCleanupList->AddWrapper(pRCW);
761
762                     pCache->RemoveWrapper(pRCW);
763                     pInteropInfo->SetRawRCW(NULL);
764                     bModifiedCleanupList=TRUE;
765                 }
766             }                         
767 #endif // FEATURE_COMINTEROP
768         }
769
770         // NOTE: this will only notify the sync block if it is non-agile and living in the unloading domain.
771         //  Agile objects that are still alive will not get notification!
772         if (pSyncBlock->GetAppDomainIndex() == index)
773         {
774             pSyncBlock->OnADUnload();
775         }
776     }
777     STRESS_LOG1(LF_APPDOMAIN, LL_INFO100, "AD cleanup - %d sync blocks done", nb);
778     // Make sure nobody decreased m_FreeSyncTableIndex behind our back (we would read
779     // off table limits)
780     _ASSERTE(maxIndex <= m_FreeSyncTableIndex);
781
782     if (bModifiedCleanupList)
783         GetThread()->SetSyncBlockCleanup();
784
785     while (GetThread()->RequireSyncBlockCleanup()) //we also might have something in the cleanup list
786         CleanupSyncBlocks();
787     
788 #ifdef _DEBUG
789       {            
790             SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
791             DWORD maxIndex = m_FreeSyncTableIndex;
792         for (DWORD nb = 1; nb < maxIndex; nb++)
793         {
794             if ((size_t)SyncTableEntry::GetSyncTableEntry()[nb].m_Object.Load() & 1)
795             {
796                 continue;
797             }
798
799             // If the syncblock pointer is invalid, nothing more we can do.
800             SyncBlock *pSyncBlock = SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock;
801             if (!pSyncBlock)
802             {
803                 continue;
804             }            
805             InteropSyncBlockInfo* pInteropInfo = pSyncBlock->GetInteropInfoNoCreate();
806             if (pInteropInfo)
807             {
808                 UMEntryThunk* umThunk=(UMEntryThunk*)pInteropInfo->GetUMEntryThunk();
809                 
810                 if (umThunk && umThunk->GetDomainId()==id)
811                 {
812                     _ASSERTE(!umThunk->GetObjectHandle());
813                 }
814             }
815             
816         }
817     }
818 #endif
819     
820 #endif
821 }
822
823
824 // create the sync block cache
825 /* static */
826 void SyncBlockCache::Attach()
827 {
828     LIMITED_METHOD_CONTRACT;
829 }
830
831 // destroy the sync block cache
832 // This method is NO longer called.
833 #if 0
834 void SyncBlockCache::DoDetach()
835 {
836     CONTRACTL
837     {
838         INSTANCE_CHECK;
839         NOTHROW;
840         GC_NOTRIGGER;
841         MODE_ANY;
842     }
843     CONTRACTL_END;
844
845     Object *pObj;
846     ObjHeader  *pHeader;
847
848
849     // Ensure that all the critical sections are released.  This is particularly
850     // important in DEBUG, because all critical sections are threaded onto a global
851     // list which would otherwise be corrupted.
852     for (DWORD i=0; i<m_FreeSyncTableIndex; i++)
853         if (((size_t)SyncTableEntry::GetSyncTableEntry()[i].m_Object & 1) == 0)
854             if (SyncTableEntry::GetSyncTableEntry()[i].m_SyncBlock)
855             {
856                 // <TODO>@TODO -- If threads are executing during this detach, they will
857                 // fail in various ways:
858                 //
859                 // 1) They will race between us tearing these data structures down
860                 //    as they navigate through them.
861                 //
862                 // 2) They will unexpectedly see the syncblock destroyed, even though
863                 //    they hold the synchronization lock, or have been exposed out
864                 //    to COM, etc.
865                 //
866                 // 3) The instance's hash code may change during the shutdown.
867                 //
868                 // The correct solution involves suspending the threads earlier, but
869                 // changing our suspension code so that it allows pumping if we are
870                 // in a shutdown case.
871                 //
872                 // </TODO>
873
874                 // Make sure this gets updated because the finalizer thread & others
875                 // will continue to run for a short while more during our shutdown.
876                 pObj = SyncTableEntry::GetSyncTableEntry()[i].m_Object;
877                 pHeader = pObj->GetHeader();
878
879                 {
880                     ENTER_SPIN_LOCK(pHeader);
881                     ADIndex appDomainIndex = pHeader->GetAppDomainIndex();
882                     if (! appDomainIndex.m_dwIndex)
883                     {
884                         SyncBlock* syncBlock = pObj->PassiveGetSyncBlock();
885                         if (syncBlock)
886                             appDomainIndex = syncBlock->GetAppDomainIndex();
887                     }
888
889                     pHeader->ResetIndex();
890
891         if (appDomainIndex.m_dwIndex)
892                     {
893                         pHeader->SetIndex(appDomainIndex.m_dwIndex<<SBLK_APPDOMAIN_SHIFT);
894                     }
895                     LEAVE_SPIN_LOCK(pHeader);
896                 }
897
898                 SyncTableEntry::GetSyncTableEntry()[i].m_Object = (Object *)(m_FreeSyncTableList | 1);
899                 m_FreeSyncTableList = i << 1;
900
901                 DeleteSyncBlock(SyncTableEntry::GetSyncTableEntry()[i].m_SyncBlock);
902             }
903 }
904 #endif
905
906 // destroy the sync block cache
907 /* static */
908 // This method is NO longer called.
909 #if 0
910 void SyncBlockCache::Detach()
911 {
912     SyncBlockCache::GetSyncBlockCache()->DoDetach();
913 }
914 #endif
915
916
917 // create the sync block cache
918 /* static */
919 void SyncBlockCache::Start()
920 {
921     CONTRACTL
922     {
923         THROWS;
924         GC_NOTRIGGER;
925         MODE_ANY;
926         INJECT_FAULT(COMPlusThrowOM(););
927     }
928     CONTRACTL_END;
929
930     DWORD* bm = new DWORD [BitMapSize(SYNC_TABLE_INITIAL_SIZE+1)];
931
932     memset (bm, 0, BitMapSize (SYNC_TABLE_INITIAL_SIZE+1)*sizeof(DWORD));
933
934     SyncTableEntry::GetSyncTableEntryByRef() = new SyncTableEntry[SYNC_TABLE_INITIAL_SIZE+1];
935 #ifdef _DEBUG
936     for (int i=0; i<SYNC_TABLE_INITIAL_SIZE+1; i++) {
937         SyncTableEntry::GetSyncTableEntry()[i].m_SyncBlock = NULL;
938     }
939 #endif    
940
941     SyncTableEntry::GetSyncTableEntry()[0].m_SyncBlock = 0;
942     SyncBlockCache::GetSyncBlockCache() = new (&g_SyncBlockCacheInstance) SyncBlockCache;
943
944     SyncBlockCache::GetSyncBlockCache()->m_EphemeralBitmap = bm;
945
946 #ifndef FEATURE_PAL
947     InitializeSListHead(&InteropSyncBlockInfo::s_InteropInfoStandbyList);
948 #endif // !FEATURE_PAL
949 }
950
951
952 // destroy the sync block cache
953 /* static */
954 void SyncBlockCache::Stop()
955 {
956     CONTRACTL
957     {
958         NOTHROW;
959         GC_NOTRIGGER;
960         MODE_ANY;
961     }
962     CONTRACTL_END;
963
964     // cache must be destroyed first, since it can traverse the table to find all the
965     // sync blocks which are live and thus must have their critical sections destroyed.
966     if (SyncBlockCache::GetSyncBlockCache())
967     {
968         delete SyncBlockCache::GetSyncBlockCache();
969         SyncBlockCache::GetSyncBlockCache() = 0;
970     }
971
972     if (SyncTableEntry::GetSyncTableEntry())
973     {
974         delete SyncTableEntry::GetSyncTableEntry();
975         SyncTableEntry::GetSyncTableEntryByRef() = 0;
976     }
977 }
978
979
980 void    SyncBlockCache::InsertCleanupSyncBlock(SyncBlock* psb)
981 {
982     CONTRACTL
983     {
984         INSTANCE_CHECK;
985         NOTHROW;
986         GC_NOTRIGGER;
987         MODE_ANY;
988     }
989     CONTRACTL_END;
990
991     // free up the threads that are waiting before we use the link
992     // for other purposes
993     if (psb->m_Link.m_pNext != NULL)
994     {
995         while (ThreadQueue::DequeueThread(psb) != NULL)
996             continue;
997     }
998
999 #ifdef FEATURE_COMINTEROP
1000     if (psb->m_pInteropInfo)
1001     {
1002         // called during GC
1003         // so do only minorcleanup
1004         MinorCleanupSyncBlockComData(psb->m_pInteropInfo);
1005     }
1006 #endif // FEATURE_COMINTEROP
1007
1008     // This method will be called only by the GC thread
1009     //<TODO>@todo add an assert for the above statement</TODO>
1010     // we don't need to lock here
1011     //EnterCacheLock();
1012
1013     psb->m_Link.m_pNext = m_pCleanupBlockList;
1014     m_pCleanupBlockList = &psb->m_Link;
1015
1016     // we don't need a lock here
1017     //LeaveCacheLock();
1018 }
1019
1020 SyncBlock* SyncBlockCache::GetNextCleanupSyncBlock()
1021 {
1022     LIMITED_METHOD_CONTRACT;
1023
1024     // we don't need a lock here,
1025     // as this is called only on the finalizer thread currently
1026
1027     SyncBlock       *psb = NULL;
1028     if (m_pCleanupBlockList)
1029     {
1030         // get the actual sync block pointer
1031         psb = (SyncBlock *) (((BYTE *) m_pCleanupBlockList) - offsetof(SyncBlock, m_Link));
1032         m_pCleanupBlockList = m_pCleanupBlockList->m_pNext;
1033     }
1034     return psb;
1035 }
1036
1037
1038 // returns and removes the next free syncblock from the list
1039 // the cache lock must be entered to call this
1040 SyncBlock *SyncBlockCache::GetNextFreeSyncBlock()
1041 {
1042     CONTRACTL
1043     {
1044         INJECT_FAULT(COMPlusThrowOM());
1045         THROWS;
1046         GC_NOTRIGGER;
1047         MODE_ANY;
1048     }
1049     CONTRACTL_END;
1050
1051 #ifdef _DEBUG  // Instrumentation for OOM fault injection testing
1052     delete new char;
1053 #endif
1054
1055     SyncBlock       *psb;
1056     SLink           *plst = m_FreeBlockList;
1057
1058     m_ActiveCount++;
1059
1060     if (plst)
1061     {
1062         m_FreeBlockList = m_FreeBlockList->m_pNext;
1063
1064         // shouldn't be 0
1065         m_FreeCount--;
1066
1067         // get the actual sync block pointer
1068         psb = (SyncBlock *) (((BYTE *) plst) - offsetof(SyncBlock, m_Link));
1069
1070         return psb;
1071     }
1072     else
1073     {
1074         if ((m_SyncBlocks == NULL) || (m_FreeSyncBlock >= MAXSYNCBLOCK))
1075         {
1076 #ifdef DUMP_SB
1077 //            LogSpewAlways("Allocating new syncblock array\n");
1078 //            DumpSyncBlockCache();
1079 #endif
1080             SyncBlockArray* newsyncblocks = new(SyncBlockArray);
1081             if (!newsyncblocks)
1082                 COMPlusThrowOM ();
1083
1084             newsyncblocks->m_Next = m_SyncBlocks;
1085             m_SyncBlocks = newsyncblocks;
1086             m_FreeSyncBlock = 0;
1087         }
1088         return &(((SyncBlock*)m_SyncBlocks->m_Blocks)[m_FreeSyncBlock++]);
1089     }
1090
1091 }
1092
1093 void SyncBlockCache::Grow()
1094 {
1095     CONTRACTL
1096     {
1097         INSTANCE_CHECK;
1098         THROWS;
1099         GC_NOTRIGGER;
1100         MODE_COOPERATIVE;
1101         INJECT_FAULT(COMPlusThrowOM(););
1102     }
1103     CONTRACTL_END;
1104
1105     STRESS_LOG0(LF_SYNC, LL_INFO10000, "SyncBlockCache::NewSyncBlockSlot growing SyncBlockCache \n");
1106         
1107     NewArrayHolder<SyncTableEntry> newSyncTable (NULL);
1108     NewArrayHolder<DWORD>          newBitMap    (NULL);
1109     DWORD *                        oldBitMap;
1110
1111     // Compute the size of the new synctable. Normally, we double it - unless
1112     // doing so would create slots with indices too high to fit within the
1113     // mask. If so, we create a synctable up to the mask limit. If we're
1114     // already at the mask limit, then caller is out of luck.
1115     DWORD newSyncTableSize;
1116     if (m_SyncTableSize <= (MASK_SYNCBLOCKINDEX >> 1))
1117     {
1118         newSyncTableSize = m_SyncTableSize * 2;
1119     }
1120     else
1121     {
1122         newSyncTableSize = MASK_SYNCBLOCKINDEX;
1123     }
1124
1125     if (!(newSyncTableSize > m_SyncTableSize)) // Make sure we actually found room to grow!
1126     {
1127         COMPlusThrowOM();
1128     }
1129
1130     newSyncTable = new SyncTableEntry[newSyncTableSize];
1131     newBitMap = new DWORD[BitMapSize (newSyncTableSize)];
1132
1133
1134     {
1135         //! From here on, we assume that we will succeed and start doing global side-effects.
1136         //! Any operation that could fail must occur before this point.
1137         CANNOTTHROWCOMPLUSEXCEPTION();
1138         FAULT_FORBID();
1139
1140         newSyncTable.SuppressRelease();
1141         newBitMap.SuppressRelease();
1142
1143
1144         // We chain old table because we can't delete
1145         // them before all the threads are stoppped
1146         // (next GC)
1147         SyncTableEntry::GetSyncTableEntry() [0].m_Object = (Object *)m_OldSyncTables;
1148         m_OldSyncTables = SyncTableEntry::GetSyncTableEntry();
1149
1150         memset (newSyncTable, 0, newSyncTableSize*sizeof (SyncTableEntry));
1151         memset (newBitMap, 0, BitMapSize (newSyncTableSize)*sizeof (DWORD));
1152         CopyMemory (newSyncTable, SyncTableEntry::GetSyncTableEntry(),
1153                     m_SyncTableSize*sizeof (SyncTableEntry));
1154
1155         CopyMemory (newBitMap, m_EphemeralBitmap,
1156                     BitMapSize (m_SyncTableSize)*sizeof (DWORD));
1157
1158         oldBitMap = m_EphemeralBitmap;
1159         m_EphemeralBitmap = newBitMap;
1160         delete[] oldBitMap;
1161
1162         _ASSERTE((m_SyncTableSize & MASK_SYNCBLOCKINDEX) == m_SyncTableSize);
1163         // note: we do not care if another thread does not see the new size
1164         // however we really do not want it to see the new size without seeing the new array
1165         //@TODO do we still leak here if two threads come here at the same time ?
1166         FastInterlockExchangePointer(&SyncTableEntry::GetSyncTableEntryByRef(), newSyncTable.GetValue());
1167
1168         m_FreeSyncTableIndex++;
1169
1170         m_SyncTableSize = newSyncTableSize;
1171
1172 #ifdef _DEBUG
1173         static int dumpSBOnResize = -1;
1174
1175         if (dumpSBOnResize == -1)
1176             dumpSBOnResize = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SBDumpOnResize);
1177
1178         if (dumpSBOnResize)
1179         {
1180             LogSpewAlways("SyncBlockCache resized\n");
1181             DumpSyncBlockCache();
1182         }
1183 #endif
1184     }
1185 }
1186
1187 DWORD SyncBlockCache::NewSyncBlockSlot(Object *obj)
1188 {
1189     CONTRACTL
1190     {
1191         INSTANCE_CHECK;
1192         THROWS;
1193         GC_NOTRIGGER;
1194         MODE_COOPERATIVE;
1195         INJECT_FAULT(COMPlusThrowOM(););
1196     }
1197     CONTRACTL_END;
1198     _ASSERTE(m_CacheLock.OwnedByCurrentThread()); // GetSyncBlock takes the lock, make sure no one else does.  
1199
1200     DWORD indexNewEntry;
1201     if (m_FreeSyncTableList)
1202     {
1203         indexNewEntry = (DWORD)(m_FreeSyncTableList >> 1);
1204         _ASSERTE ((size_t)SyncTableEntry::GetSyncTableEntry()[indexNewEntry].m_Object.Load() & 1);
1205         m_FreeSyncTableList = (size_t)SyncTableEntry::GetSyncTableEntry()[indexNewEntry].m_Object.Load() & ~1;
1206     }
1207     else if ((indexNewEntry = (DWORD)(m_FreeSyncTableIndex)) >= m_SyncTableSize)
1208     {
1209         // This is kept out of line to keep stuff like the C++ EH prolog (needed for holders) off 
1210         // of the common path.
1211         Grow();
1212     }
1213     else
1214     {
1215 #ifdef _DEBUG
1216         static int dumpSBOnNewIndex = -1;
1217
1218         if (dumpSBOnNewIndex == -1)
1219             dumpSBOnNewIndex = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SBDumpOnNewIndex);
1220
1221         if (dumpSBOnNewIndex)
1222         {
1223             LogSpewAlways("SyncBlockCache index incremented\n");
1224             DumpSyncBlockCache();
1225         }
1226 #endif
1227         m_FreeSyncTableIndex ++;
1228     }
1229
1230
1231     CardTableSetBit (indexNewEntry);
1232
1233     // In debug builds the m_SyncBlock at indexNewEntry should already be null, since we should 
1234     // start out with a null table and always null it out on delete. 
1235     _ASSERTE(SyncTableEntry::GetSyncTableEntry() [indexNewEntry].m_SyncBlock == NULL);
1236     SyncTableEntry::GetSyncTableEntry() [indexNewEntry].m_SyncBlock = NULL;
1237     SyncTableEntry::GetSyncTableEntry() [indexNewEntry].m_Object = obj;
1238
1239     _ASSERTE(indexNewEntry != 0);
1240
1241     return indexNewEntry;
1242 }
1243
1244
1245 // free a used sync block, only called from CleanupSyncBlocks.  
1246 void SyncBlockCache::DeleteSyncBlock(SyncBlock *psb)
1247 {
1248     CONTRACTL
1249     {
1250         INSTANCE_CHECK;
1251         THROWS;
1252         GC_TRIGGERS;
1253         MODE_ANY;
1254         INJECT_FAULT(COMPlusThrowOM());
1255     }
1256     CONTRACTL_END;
1257
1258     // clean up comdata
1259     if (psb->m_pInteropInfo)
1260     {
1261 #ifdef FEATURE_COMINTEROP
1262         CleanupSyncBlockComData(psb->m_pInteropInfo);
1263 #endif // FEATURE_COMINTEROP
1264
1265 #ifndef FEATURE_PAL
1266         if (g_fEEShutDown)
1267         {
1268             delete psb->m_pInteropInfo;
1269         }
1270         else
1271         {
1272             psb->m_pInteropInfo->~InteropSyncBlockInfo();
1273             InterlockedPushEntrySList(&InteropSyncBlockInfo::s_InteropInfoStandbyList, (PSLIST_ENTRY)psb->m_pInteropInfo);
1274         }
1275 #else // !FEATURE_PAL
1276         delete psb->m_pInteropInfo;
1277 #endif // !FEATURE_PAL
1278     }
1279
1280 #ifdef EnC_SUPPORTED
1281     // clean up EnC info
1282     if (psb->m_pEnCInfo)
1283         psb->m_pEnCInfo->Cleanup();
1284 #endif // EnC_SUPPORTED
1285
1286     // Destruct the SyncBlock, but don't reclaim its memory.  (Overridden
1287     // operator delete).
1288     delete psb;
1289
1290     //synchronizer with the consumers,
1291     // <TODO>@todo we don't really need a lock here, we can come up
1292     // with some simple algo to avoid taking a lock </TODO>
1293     {
1294         SyncBlockCache::LockHolder lh(this);
1295
1296         DeleteSyncBlockMemory(psb);
1297     }
1298 }
1299
1300
1301 // returns the sync block memory to the free pool but does not destruct sync block (must own cache lock already)
1302 void    SyncBlockCache::DeleteSyncBlockMemory(SyncBlock *psb)
1303 {
1304     CONTRACTL
1305     {
1306         INSTANCE_CHECK;
1307         NOTHROW;
1308         GC_NOTRIGGER;
1309         FORBID_FAULT;
1310     }
1311     CONTRACTL_END
1312
1313     m_ActiveCount--;
1314     m_FreeCount++;
1315
1316     psb->m_Link.m_pNext = m_FreeBlockList;
1317     m_FreeBlockList = &psb->m_Link;
1318
1319 }
1320
1321 // free a used sync block
1322 void SyncBlockCache::GCDeleteSyncBlock(SyncBlock *psb)
1323 {
1324     CONTRACTL
1325     {
1326         INSTANCE_CHECK;
1327         NOTHROW;
1328         GC_NOTRIGGER;
1329         MODE_ANY;
1330     }
1331     CONTRACTL_END;
1332
1333     // Destruct the SyncBlock, but don't reclaim its memory.  (Overridden
1334     // operator delete).
1335     delete psb;
1336
1337     m_ActiveCount--;
1338     m_FreeCount++;
1339
1340     psb->m_Link.m_pNext = m_FreeBlockList;
1341     m_FreeBlockList = &psb->m_Link;
1342 }
1343
1344 void SyncBlockCache::GCWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2)
1345 {
1346     CONTRACTL
1347     {
1348         INSTANCE_CHECK;
1349         NOTHROW;
1350         GC_NOTRIGGER;
1351         MODE_ANY;
1352     }
1353     CONTRACTL_END;
1354
1355
1356     // First delete the obsolete arrays since we have exclusive access
1357     BOOL fSetSyncBlockCleanup = FALSE;
1358
1359     SyncTableEntry* arr;
1360     while ((arr = m_OldSyncTables) != NULL)
1361     {
1362         m_OldSyncTables = (SyncTableEntry*)arr[0].m_Object.Load();
1363         delete arr;
1364     }
1365
1366 #ifdef DUMP_SB
1367     LogSpewAlways("GCWeakPtrScan starting\n");
1368 #endif
1369
1370 #ifdef VERIFY_HEAP
1371    if (g_pConfig->GetHeapVerifyLevel()& EEConfig::HEAPVERIFY_SYNCBLK)
1372        STRESS_LOG0 (LF_GC | LF_SYNC, LL_INFO100, "GCWeakPtrScan starting\n");
1373 #endif
1374
1375    if (GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration() < GCHeapUtilities::GetGCHeap()->GetMaxGeneration())
1376    {
1377 #ifdef VERIFY_HEAP
1378         //for VSW 294550: we saw stale obeject reference in SyncBlkCache, so we want to make sure the card 
1379         //table logic above works correctly so that every ephemeral entry is promoted. 
1380         //For verification, we make a copy of the sync table in relocation phase and promote it use the 
1381         //slow approach and compare the result with the original one
1382         DWORD freeSyncTalbeIndexCopy = m_FreeSyncTableIndex;
1383         SyncTableEntry * syncTableShadow = NULL;
1384         if ((g_pConfig->GetHeapVerifyLevel()& EEConfig::HEAPVERIFY_SYNCBLK) && !((ScanContext*)lp1)->promotion)
1385         {
1386             syncTableShadow = new(nothrow) SyncTableEntry [m_FreeSyncTableIndex];
1387             if (syncTableShadow)
1388             {
1389                 memcpy (syncTableShadow, SyncTableEntry::GetSyncTableEntry(), m_FreeSyncTableIndex * sizeof (SyncTableEntry));                
1390             }
1391         }
1392 #endif //VERIFY_HEAP
1393
1394         //scan the bitmap
1395         size_t dw = 0;
1396         while (1)
1397         {
1398             while (dw < BitMapSize (m_SyncTableSize) && (m_EphemeralBitmap[dw]==0))
1399             {
1400                 dw++;
1401             }
1402             if (dw < BitMapSize (m_SyncTableSize))
1403             {
1404                 //found one
1405                 for (int i = 0; i < card_word_width; i++)
1406                 {
1407                     size_t card = i+dw*card_word_width;
1408                     if (CardSetP (card))
1409                     {
1410                         BOOL clear_card = TRUE;
1411                         for (int idx = 0; idx < card_size; idx++)
1412                         {
1413                             size_t nb = CardIndex (card) + idx;
1414                             if (( nb < m_FreeSyncTableIndex) && (nb > 0))
1415                             {
1416                                 Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1417                                 if (o && !((size_t)o & 1))
1418                                 {
1419                                     if (GCHeapUtilities::GetGCHeap()->IsEphemeral (o))
1420                                     {
1421                                         clear_card = FALSE;
1422
1423                                         GCWeakPtrScanElement ((int)nb, scanProc,
1424                                                               lp1, lp2, fSetSyncBlockCleanup);
1425                                     }
1426                                 }
1427                             }
1428                         }
1429                         if (clear_card)
1430                             ClearCard (card);
1431                     }
1432                 }
1433                 dw++;
1434             }
1435             else
1436                 break;
1437         }
1438         
1439 #ifdef VERIFY_HEAP
1440         //for VSW 294550: we saw stale obeject reference in SyncBlkCache, so we want to make sure the card 
1441         //table logic above works correctly so that every ephemeral entry is promoted. To verify, we make a 
1442         //copy of the sync table and promote it use the slow approach and compare the result with the real one
1443         if (g_pConfig->GetHeapVerifyLevel()& EEConfig::HEAPVERIFY_SYNCBLK)
1444         {
1445             if (syncTableShadow)
1446             {
1447                 for (DWORD nb = 1; nb < m_FreeSyncTableIndex; nb++)
1448                 {
1449                     Object **keyv = (Object **) &syncTableShadow[nb].m_Object;
1450
1451                     if (((size_t) *keyv & 1) == 0)
1452                     {
1453                         (*scanProc) (keyv, NULL, lp1, lp2);
1454                         SyncBlock   *pSB = syncTableShadow[nb].m_SyncBlock;
1455                         if (*keyv != 0 && (!pSB || !pSB->IsIDisposable()))
1456                         {
1457                             if (syncTableShadow[nb].m_Object != SyncTableEntry::GetSyncTableEntry()[nb].m_Object)
1458                                 DebugBreak ();
1459                         }
1460                     }
1461                 } 
1462                 delete []syncTableShadow;
1463                 syncTableShadow = NULL;
1464             }
1465             if (freeSyncTalbeIndexCopy != m_FreeSyncTableIndex)
1466                 DebugBreak ();
1467         }
1468 #endif //VERIFY_HEAP
1469
1470     }
1471     else
1472     {
1473         for (DWORD nb = 1; nb < m_FreeSyncTableIndex; nb++)
1474         {
1475             GCWeakPtrScanElement (nb, scanProc, lp1, lp2, fSetSyncBlockCleanup);
1476         }
1477
1478
1479     }
1480
1481     if (fSetSyncBlockCleanup)
1482     {
1483         // mark the finalizer thread saying requires cleanup
1484         FinalizerThread::GetFinalizerThread()->SetSyncBlockCleanup();
1485         FinalizerThread::EnableFinalization();
1486     }
1487
1488 #if defined(VERIFY_HEAP)
1489     if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)
1490     {
1491         if (((ScanContext*)lp1)->promotion)
1492         {
1493
1494             for (int nb = 1; nb < (int)m_FreeSyncTableIndex; nb++)
1495             {
1496                 Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1497                 if (((size_t)o & 1) == 0)
1498                 {
1499                     o->Validate();
1500                 }
1501             }
1502         }
1503     }
1504 #endif // VERIFY_HEAP
1505 }
1506
1507 /* Scan the weak pointers in the SyncBlockEntry and report them to the GC.  If the
1508    reference is dead, then return TRUE */
1509
1510 BOOL SyncBlockCache::GCWeakPtrScanElement (int nb, HANDLESCANPROC scanProc, LPARAM lp1, LPARAM lp2,
1511                                            BOOL& cleanup)
1512 {
1513     CONTRACTL
1514     {
1515         INSTANCE_CHECK;
1516         NOTHROW;
1517         GC_NOTRIGGER;
1518         MODE_ANY;
1519     }
1520     CONTRACTL_END;
1521
1522     Object **keyv = (Object **) &SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1523
1524 #ifdef DUMP_SB
1525     struct Param
1526     {
1527         Object **keyv;
1528         char *name;
1529     } param;
1530     param.keyv = keyv;
1531
1532     PAL_TRY(Param *, pParam, &param) {
1533         if (! *pParam->keyv)
1534             pParam->name = "null";
1535         else if ((size_t) *pParam->keyv & 1)
1536             pParam->name = "free";
1537         else {
1538             pParam->name = (*pParam->keyv)->GetClass()->GetDebugClassName();
1539             if (strlen(pParam->name) == 0)
1540                 pParam->name = "<INVALID>";
1541         }
1542     } PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
1543         param.name = "<INVALID>";
1544     }
1545     PAL_ENDTRY
1546     LogSpewAlways("[%4.4d]: %8.8x, %s\n", nb, *keyv, param.name);
1547 #endif
1548
1549     if (((size_t) *keyv & 1) == 0)
1550     {
1551 #ifdef VERIFY_HEAP
1552         if (g_pConfig->GetHeapVerifyLevel () & EEConfig::HEAPVERIFY_SYNCBLK)
1553         {
1554             STRESS_LOG3 (LF_GC | LF_SYNC, LL_INFO100000, "scanning syncblk[%d, %p, %p]\n", nb, (size_t)SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock, (size_t)*keyv);
1555         }
1556 #endif
1557
1558         (*scanProc) (keyv, NULL, lp1, lp2);
1559         SyncBlock   *pSB = SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock;
1560         if ((*keyv == 0 ) || (pSB && pSB->IsIDisposable()))
1561         {
1562 #ifdef VERIFY_HEAP
1563             if (g_pConfig->GetHeapVerifyLevel () & EEConfig::HEAPVERIFY_SYNCBLK)
1564             {
1565                 STRESS_LOG3 (LF_GC | LF_SYNC, LL_INFO100000, "freeing syncblk[%d, %p, %p]\n", nb, (size_t)pSB, (size_t)*keyv);
1566             }
1567 #endif
1568
1569             if (*keyv)
1570             {
1571                 _ASSERTE (pSB);
1572                 GCDeleteSyncBlock(pSB);
1573                 //clean the object syncblock header
1574                 ((Object*)(*keyv))->GetHeader()->GCResetIndex();
1575             }
1576             else if (pSB)
1577             {
1578
1579                 cleanup = TRUE;
1580                 // insert block into cleanup list
1581                 InsertCleanupSyncBlock (SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock);
1582 #ifdef DUMP_SB
1583                 LogSpewAlways("       Cleaning up block at %4.4d\n", nb);
1584 #endif
1585             }
1586
1587             // delete the entry
1588 #ifdef DUMP_SB
1589             LogSpewAlways("       Deleting block at %4.4d\n", nb);
1590 #endif
1591             SyncTableEntry::GetSyncTableEntry()[nb].m_Object = (Object *)(m_FreeSyncTableList | 1);
1592             m_FreeSyncTableList = nb << 1;
1593             SyncTableEntry::GetSyncTableEntry()[nb].m_SyncBlock = NULL;
1594             return TRUE;
1595         }
1596         else
1597         {
1598 #ifdef DUMP_SB
1599             LogSpewAlways("       Keeping block at %4.4d with oref %8.8x\n", nb, *keyv);
1600 #endif
1601         }
1602     }
1603     return FALSE;
1604 }
1605
1606 void SyncBlockCache::GCDone(BOOL demoting, int max_gen)
1607 {
1608     CONTRACTL
1609     {
1610         INSTANCE_CHECK;
1611         NOTHROW;
1612         GC_NOTRIGGER;
1613         MODE_ANY;
1614     }
1615     CONTRACTL_END;
1616
1617     if (demoting && 
1618         (GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration() == 
1619          GCHeapUtilities::GetGCHeap()->GetMaxGeneration()))
1620     {
1621         //scan the bitmap
1622         size_t dw = 0;
1623         while (1)
1624         {
1625             while (dw < BitMapSize (m_SyncTableSize) && 
1626                    (m_EphemeralBitmap[dw]==(DWORD)~0))
1627             {
1628                 dw++;
1629             }
1630             if (dw < BitMapSize (m_SyncTableSize))
1631             {
1632                 //found one
1633                 for (int i = 0; i < card_word_width; i++)
1634                 {
1635                     size_t card = i+dw*card_word_width;
1636                     if (!CardSetP (card))
1637                     {
1638                         for (int idx = 0; idx < card_size; idx++)
1639                         {
1640                             size_t nb = CardIndex (card) + idx;
1641                             if (( nb < m_FreeSyncTableIndex) && (nb > 0))
1642                             {
1643                                 Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1644                                 if (o && !((size_t)o & 1))
1645                                 {
1646                                     if (GCHeapUtilities::GetGCHeap()->WhichGeneration (o) < (unsigned int)max_gen)
1647                                     {
1648                                         SetCard (card);
1649                                         break;
1650
1651                                     }
1652                                 }
1653                             }
1654                         }
1655                     }
1656                 }
1657                 dw++;
1658             }
1659             else
1660                 break;
1661         }
1662     }
1663 }
1664
1665
1666 #if defined (VERIFY_HEAP)
1667
1668 #ifndef _DEBUG
1669 #ifdef _ASSERTE
1670 #undef _ASSERTE
1671 #endif
1672 #define _ASSERTE(c) if (!(c)) DebugBreak()
1673 #endif
1674
1675 void SyncBlockCache::VerifySyncTableEntry()
1676 {
1677     CONTRACTL
1678     {
1679         INSTANCE_CHECK;
1680         NOTHROW;
1681         GC_NOTRIGGER;
1682         MODE_ANY;
1683     }
1684     CONTRACTL_END;
1685
1686     for (DWORD nb = 1; nb < m_FreeSyncTableIndex; nb++)
1687     {
1688         Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
1689         // if the slot was just allocated, the object may still be null
1690         if (o && (((size_t)o & 1) == 0)) 
1691         {
1692             //there is no need to verify next object's header because this is called
1693             //from verify_heap, which will verify every object anyway
1694             o->Validate(TRUE, FALSE);
1695
1696             //
1697             // This loop is just a heuristic to try to catch errors, but it is not 100%.
1698             // To prevent false positives, we weaken our assert below to exclude the case
1699             // where the index is still NULL, but we've reached the end of our loop.
1700             //
1701             static const DWORD max_iterations = 100;
1702             DWORD loop = 0;
1703             
1704             for (; loop < max_iterations; loop++)
1705             {
1706                 // The syncblock index may be updating by another thread.
1707                 if (o->GetHeader()->GetHeaderSyncBlockIndex() != 0)
1708                 {
1709                     break;
1710                 }
1711                 __SwitchToThread(0, CALLER_LIMITS_SPINNING);
1712             }
1713             
1714             DWORD idx = o->GetHeader()->GetHeaderSyncBlockIndex();
1715             _ASSERTE(idx == nb || ((0 == idx) && (loop == max_iterations)));
1716             _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsEphemeral(o) || CardSetP(CardOf(nb)));
1717         }
1718     }
1719 }
1720
1721 #ifndef _DEBUG
1722 #undef _ASSERTE
1723 #define _ASSERTE(expr) ((void)0)
1724 #endif   // _DEBUG
1725
1726 #endif // VERIFY_HEAP
1727
1728 #if CHECK_APP_DOMAIN_LEAKS
1729 void SyncBlockCache::CheckForUnloadedInstances(ADIndex unloadingIndex)
1730 {
1731     CONTRACTL
1732     {
1733         INSTANCE_CHECK;
1734         NOTHROW;
1735         GC_NOTRIGGER;
1736         MODE_ANY;
1737     }
1738     CONTRACTL_END;
1739
1740     // Can only do in leak mode because agile objects will be in the domain with
1741     // their index set to their creating domain and check will fail.
1742     if (! g_pConfig->AppDomainLeaks())
1743         return;
1744
1745     for (DWORD nb = 1; nb < m_FreeSyncTableIndex; nb++)
1746     {
1747         SyncTableEntry *pEntry = &SyncTableEntry::GetSyncTableEntry()[nb];
1748         Object *oref = (Object *) pEntry->m_Object;
1749         if (((size_t) oref & 1) != 0)
1750             continue;
1751
1752         ADIndex idx;
1753         if (oref)
1754             idx = pEntry->m_Object->GetHeader()->GetRawAppDomainIndex();
1755         if (! idx.m_dwIndex && pEntry->m_SyncBlock)
1756             idx = pEntry->m_SyncBlock->GetAppDomainIndex();
1757         // if the following assert fires, someobody is holding a reference to an object in an unloaded appdomain
1758         if (idx == unloadingIndex)
1759         {
1760             // object must be agile to have survived the unload. If can't make it agile, that's a bug            
1761             if (!oref->TrySetAppDomainAgile(TRUE))
1762                 _ASSERTE(!"Detected instance of unloaded appdomain that survived GC\n");
1763         }
1764     }
1765 }
1766 #endif
1767
1768 #ifdef _DEBUG
1769
1770 void DumpSyncBlockCache()
1771 {
1772     STATIC_CONTRACT_NOTHROW;
1773
1774     SyncBlockCache *pCache = SyncBlockCache::GetSyncBlockCache();
1775
1776     LogSpewAlways("Dumping SyncBlockCache size %d\n", pCache->m_FreeSyncTableIndex);
1777
1778     static int dumpSBStyle = -1;
1779     if (dumpSBStyle == -1)
1780         dumpSBStyle = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SBDumpStyle);
1781     if (dumpSBStyle == 0)
1782         return;
1783
1784     BOOL isString = FALSE;
1785     DWORD objectCount = 0;
1786     DWORD slotCount = 0;
1787
1788     for (DWORD nb = 1; nb < pCache->m_FreeSyncTableIndex; nb++)
1789     {
1790         isString = FALSE;
1791         char buffer[1024], buffer2[1024];
1792         LPCUTF8 descrip = "null";
1793         SyncTableEntry *pEntry = &SyncTableEntry::GetSyncTableEntry()[nb];
1794         Object *oref = (Object *) pEntry->m_Object;
1795         if (((size_t) oref & 1) != 0)
1796         {
1797             descrip = "free";
1798             oref = 0;
1799         }
1800         else
1801         {
1802             ++slotCount;
1803             if (oref)
1804             {
1805                 ++objectCount;
1806
1807                 struct Param
1808                 {
1809                     LPCUTF8 descrip;
1810                     Object *oref;
1811                     char *buffer2;
1812                     UINT cch2;
1813                     BOOL isString;
1814                 } param;
1815                 param.descrip = descrip;
1816                 param.oref = oref;
1817                 param.buffer2 = buffer2;
1818                 param.cch2 = COUNTOF(buffer2);
1819                 param.isString = isString;
1820
1821                 PAL_TRY(Param *, pParam, &param)
1822                 {
1823                     pParam->descrip = pParam->oref->GetMethodTable()->GetDebugClassName();
1824                     if (strlen(pParam->descrip) == 0)
1825                         pParam->descrip = "<INVALID>";
1826                     else if (pParam->oref->GetMethodTable() == g_pStringClass)
1827                     {
1828                         sprintf_s(pParam->buffer2, pParam->cch2, "%s (%S)", pParam->descrip, ObjectToSTRINGREF((StringObject*)pParam->oref)->GetBuffer());
1829                         pParam->descrip = pParam->buffer2;
1830                         pParam->isString = TRUE;
1831                     }
1832                 }
1833                 PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER) {
1834                     param.descrip = "<INVALID>";
1835                 }
1836                 PAL_ENDTRY
1837
1838                 descrip = param.descrip;
1839                 isString = param.isString;
1840             }
1841             ADIndex idx;
1842             if (oref)
1843                 idx = pEntry->m_Object->GetHeader()->GetRawAppDomainIndex();
1844             if (! idx.m_dwIndex && pEntry->m_SyncBlock)
1845                 idx = pEntry->m_SyncBlock->GetAppDomainIndex();
1846             if (idx.m_dwIndex && ! SystemDomain::System()->TestGetAppDomainAtIndex(idx))
1847             {
1848                 sprintf_s(buffer, COUNTOF(buffer), "** unloaded (%3.3x) %s", idx.m_dwIndex, descrip);
1849                 descrip = buffer;
1850             }
1851             else
1852             {
1853                 sprintf_s(buffer, COUNTOF(buffer), "(AD %3.3x) %s", idx.m_dwIndex, descrip);
1854                 descrip = buffer;
1855             }
1856         }
1857         if (dumpSBStyle < 2)
1858             LogSpewAlways("[%4.4d]: %8.8x %s\n", nb, oref, descrip);
1859         else if (dumpSBStyle == 2 && ! isString)
1860             LogSpewAlways("[%4.4d]: %s\n", nb, descrip);
1861     }
1862     LogSpewAlways("Done dumping SyncBlockCache used slots: %d, objects: %d\n", slotCount, objectCount);
1863 }
1864 #endif
1865
1866 // ***************************************************************************
1867 //
1868 //              ObjHeader class implementation
1869 //
1870 // ***************************************************************************
1871
1872 #if defined(ENABLE_CONTRACTS_IMPL)
1873 // The LOCK_TAKEN/RELEASED macros need a "pointer" to the lock object to do
1874 // comparisons between takes & releases (and to provide debugging info to the
1875 // developer).  Ask the syncblock for its lock contract pointer, if the
1876 // syncblock exists.  Otherwise, use the MethodTable* from the Object.  That's not great,
1877 // as it's not unique, so we might miss unbalanced lock takes/releases from
1878 // different objects of the same type.  However, our hands are tied, and we can't
1879 // do much better.
1880 void * ObjHeader::GetPtrForLockContract()
1881 {
1882     if (GetHeaderSyncBlockIndex() == 0)
1883     {
1884         return (void *) GetBaseObject()->GetMethodTable();
1885     }
1886
1887     return PassiveGetSyncBlock()->GetPtrForLockContract();
1888 }
1889 #endif // defined(ENABLE_CONTRACTS_IMPL)
1890
1891 // this enters the monitor of an object
1892 void ObjHeader::EnterObjMonitor()
1893 {
1894     WRAPPER_NO_CONTRACT;
1895     GetSyncBlock()->EnterMonitor();
1896 }
1897
1898 // Non-blocking version of above
1899 BOOL ObjHeader::TryEnterObjMonitor(INT32 timeOut)
1900 {
1901     WRAPPER_NO_CONTRACT;
1902     return GetSyncBlock()->TryEnterMonitor(timeOut);
1903 }
1904
1905 BOOL ObjHeader::LeaveObjMonitor()
1906 {
1907     CONTRACTL
1908     {
1909         NOTHROW;
1910         GC_TRIGGERS;
1911         MODE_COOPERATIVE;
1912     }
1913     CONTRACTL_END;
1914
1915     //this function switch to preemp mode so we need to protect the object in some path
1916     OBJECTREF thisObj = ObjectToOBJECTREF (GetBaseObject ());
1917
1918     DWORD dwSwitchCount = 0;
1919
1920     for (;;)
1921     {
1922         AwareLock::LeaveHelperAction action = thisObj->GetHeader ()->LeaveObjMonitorHelper(GetThread());
1923
1924         switch(action)
1925         {
1926         case AwareLock::LeaveHelperAction_None:
1927             // We are done
1928             return TRUE;
1929         case AwareLock::LeaveHelperAction_Signal:
1930             {
1931                 // Signal the event
1932                 SyncBlock *psb = thisObj->GetHeader ()->PassiveGetSyncBlock();
1933                 if (psb != NULL)
1934                     psb->QuickGetMonitor()->Signal();
1935             }
1936             return TRUE;
1937         case AwareLock::LeaveHelperAction_Yield:
1938             YieldProcessor();
1939             continue;
1940         case AwareLock::LeaveHelperAction_Contention:
1941             // Some thread is updating the syncblock value.
1942             {
1943                 //protect the object before switching mode
1944                 GCPROTECT_BEGIN (thisObj);
1945                 GCX_PREEMP();
1946                 __SwitchToThread(0, ++dwSwitchCount);
1947                 GCPROTECT_END ();
1948             }
1949             continue;
1950         default:
1951             // Must be an error otherwise - ignore it
1952             _ASSERTE(action == AwareLock::LeaveHelperAction_Error);
1953             return FALSE;
1954         }
1955     }
1956 }
1957
1958 // The only difference between LeaveObjMonitor and LeaveObjMonitorAtException is switch 
1959 // to preemptive mode around __SwitchToThread
1960 BOOL ObjHeader::LeaveObjMonitorAtException()
1961 {
1962     CONTRACTL
1963     {
1964         NOTHROW;
1965         GC_NOTRIGGER;
1966         MODE_COOPERATIVE;
1967     }
1968     CONTRACTL_END;
1969
1970     DWORD dwSwitchCount = 0;
1971
1972     for (;;)
1973     {
1974         AwareLock::LeaveHelperAction action = LeaveObjMonitorHelper(GetThread());
1975
1976         switch(action)
1977         {
1978         case AwareLock::LeaveHelperAction_None:
1979             // We are done
1980             return TRUE;
1981         case AwareLock::LeaveHelperAction_Signal:
1982             {
1983                 // Signal the event
1984                 SyncBlock *psb = PassiveGetSyncBlock();
1985                 if (psb != NULL)
1986                     psb->QuickGetMonitor()->Signal();
1987             }
1988             return TRUE;
1989         case AwareLock::LeaveHelperAction_Yield:
1990             YieldProcessor();
1991             continue;
1992         case AwareLock::LeaveHelperAction_Contention:
1993             // Some thread is updating the syncblock value.
1994             //
1995             // We never toggle GC mode while holding the spinlock (BeginNoTriggerGC/EndNoTriggerGC 
1996             // in EnterSpinLock/ReleaseSpinLock ensures it). Thus we do not need to switch to preemptive
1997             // while waiting on the spinlock.
1998             //
1999             {
2000                 __SwitchToThread(0, ++dwSwitchCount);
2001             }
2002             continue;
2003         default:
2004             // Must be an error otherwise - ignore it
2005             _ASSERTE(action == AwareLock::LeaveHelperAction_Error);
2006             return FALSE;
2007         }
2008     }
2009 }
2010
2011 #endif //!DACCESS_COMPILE
2012
2013 // Returns TRUE if the lock is owned and FALSE otherwise
2014 // threadId is set to the ID (Thread::GetThreadId()) of the thread which owns the lock
2015 // acquisitionCount is set to the number of times the lock needs to be released before
2016 // it is unowned
2017 BOOL ObjHeader::GetThreadOwningMonitorLock(DWORD *pThreadId, DWORD *pAcquisitionCount)
2018 {
2019     CONTRACTL
2020     {
2021         NOTHROW;
2022         GC_NOTRIGGER;
2023         SO_TOLERANT;
2024 #ifndef DACCESS_COMPILE
2025         if (!IsGCSpecialThread ()) {MODE_COOPERATIVE;} else {MODE_ANY;}
2026 #endif
2027     }
2028     CONTRACTL_END;
2029     SUPPORTS_DAC;
2030
2031
2032     DWORD bits = GetBits();
2033
2034     if (bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)
2035     {
2036         if (bits & BIT_SBLK_IS_HASHCODE)
2037         {
2038             //
2039             // This thread does not own the lock.
2040             //
2041             *pThreadId = 0;
2042             *pAcquisitionCount = 0;
2043             return FALSE;
2044         }
2045         else
2046         {
2047             //
2048             // We have a syncblk
2049             //
2050             DWORD index = bits & MASK_SYNCBLOCKINDEX;
2051             SyncBlock* psb = g_pSyncTable[(int)index].m_SyncBlock;
2052
2053             _ASSERTE(psb->GetMonitor() != NULL);
2054             Thread* pThread = psb->GetMonitor()->m_HoldingThread;
2055             if(pThread == NULL)
2056             {
2057                 *pThreadId = 0;
2058                 *pAcquisitionCount = 0;
2059                 return FALSE;
2060             }
2061             else
2062             {
2063                 *pThreadId = pThread->GetThreadId();
2064                 *pAcquisitionCount = psb->GetMonitor()->m_Recursion;
2065                 return TRUE;
2066             }
2067         }
2068     }
2069     else
2070     {
2071         //
2072         // We have a thinlock
2073         //
2074
2075         DWORD lockThreadId, recursionLevel;
2076         lockThreadId = bits & SBLK_MASK_LOCK_THREADID;
2077         recursionLevel = (bits & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT;
2078         //if thread ID is 0, recursionLevel got to be zero
2079         //but thread ID doesn't have to be valid because the lock could be orphanend
2080         _ASSERTE (lockThreadId != 0 || recursionLevel == 0 );
2081
2082         *pThreadId = lockThreadId;
2083         if(lockThreadId != 0)
2084         {
2085             // in the header, the recursionLevel of 0 means the lock is owned once
2086             // (this differs from m_Recursion in the AwareLock)
2087             *pAcquisitionCount = recursionLevel + 1;
2088             return TRUE;
2089         }
2090         else
2091         {
2092             *pAcquisitionCount = 0;
2093             return FALSE;
2094         }
2095     }
2096 }
2097
2098 #ifndef DACCESS_COMPILE
2099
2100 #ifdef MP_LOCKS
2101 DEBUG_NOINLINE void ObjHeader::EnterSpinLock()
2102 {
2103     // NOTE: This function cannot have a dynamic contract.  If it does, the contract's
2104     // destructor will reset the CLR debug state to what it was before entering the
2105     // function, which will undo the BeginNoTriggerGC() call below.
2106     SCAN_SCOPE_BEGIN;
2107     STATIC_CONTRACT_GC_NOTRIGGER;
2108
2109 #ifdef _DEBUG
2110     int i = 0;
2111 #endif
2112
2113     DWORD dwSwitchCount = 0;
2114
2115     while (TRUE)
2116     {
2117 #ifdef _DEBUG
2118 #ifdef _WIN64
2119         // Give 64bit more time because there isn't a remoting fast path now, and we've hit this assert
2120         // needlessly in CLRSTRESS. 
2121         if (i++ > 30000)
2122 #else            
2123         if (i++ > 10000)
2124 #endif // _WIN64            
2125             _ASSERTE(!"ObjHeader::EnterLock timed out");
2126 #endif
2127         // get the value so that it doesn't get changed under us.
2128         LONG curValue = m_SyncBlockValue.LoadWithoutBarrier();
2129
2130         // check if lock taken
2131         if (! (curValue & BIT_SBLK_SPIN_LOCK))
2132         {
2133             // try to take the lock
2134             LONG newValue = curValue | BIT_SBLK_SPIN_LOCK;
2135             LONG result = FastInterlockCompareExchange((LONG*)&m_SyncBlockValue, newValue, curValue);
2136             if (result == curValue)
2137                 break;
2138         }
2139         if  (g_SystemInfo.dwNumberOfProcessors > 1)
2140         {
2141             for (int spinCount = 0; spinCount < BIT_SBLK_SPIN_COUNT; spinCount++)
2142             {
2143                 if  (! (m_SyncBlockValue & BIT_SBLK_SPIN_LOCK))
2144                     break;
2145                 YieldProcessor();               // indicate to the processor that we are spining
2146             }
2147             if  (m_SyncBlockValue & BIT_SBLK_SPIN_LOCK)
2148                 __SwitchToThread(0, ++dwSwitchCount);
2149         }
2150         else
2151             __SwitchToThread(0, ++dwSwitchCount);
2152     }
2153
2154     INCONTRACT(Thread* pThread = GetThread());
2155     INCONTRACT(if (pThread != NULL) pThread->BeginNoTriggerGC(__FILE__, __LINE__));
2156 }
2157 #else
2158 DEBUG_NOINLINE void ObjHeader::EnterSpinLock()
2159 {
2160     SCAN_SCOPE_BEGIN;
2161     STATIC_CONTRACT_GC_NOTRIGGER;
2162
2163 #ifdef _DEBUG
2164     int i = 0;
2165 #endif
2166
2167     DWORD dwSwitchCount = 0;
2168
2169     while (TRUE)
2170     {
2171 #ifdef _DEBUG
2172         if (i++ > 10000)
2173             _ASSERTE(!"ObjHeader::EnterLock timed out");
2174 #endif
2175         // get the value so that it doesn't get changed under us.
2176         LONG curValue = m_SyncBlockValue.LoadWithoutBarrier();
2177
2178         // check if lock taken
2179         if (! (curValue & BIT_SBLK_SPIN_LOCK))
2180         {
2181             // try to take the lock
2182             LONG newValue = curValue | BIT_SBLK_SPIN_LOCK;
2183             LONG result = FastInterlockCompareExchange((LONG*)&m_SyncBlockValue, newValue, curValue);
2184             if (result == curValue)
2185                 break;
2186         }
2187         __SwitchToThread(0, ++dwSwitchCount);
2188     }
2189
2190     INCONTRACT(Thread* pThread = GetThread());
2191     INCONTRACT(if (pThread != NULL) pThread->BeginNoTriggerGC(__FILE__, __LINE__));
2192 }
2193 #endif //MP_LOCKS
2194
2195 DEBUG_NOINLINE void ObjHeader::ReleaseSpinLock()
2196 {
2197     SCAN_SCOPE_END;
2198     LIMITED_METHOD_CONTRACT;
2199
2200     INCONTRACT(Thread* pThread = GetThread());
2201     INCONTRACT(if (pThread != NULL) pThread->EndNoTriggerGC());
2202
2203     FastInterlockAnd(&m_SyncBlockValue, ~BIT_SBLK_SPIN_LOCK);
2204 }
2205
2206 #endif //!DACCESS_COMPILE
2207
2208 ADIndex ObjHeader::GetRawAppDomainIndex()
2209 {
2210     LIMITED_METHOD_CONTRACT;
2211     SUPPORTS_DAC;
2212
2213     // pull the value out before checking it to avoid race condition
2214     DWORD value = m_SyncBlockValue.LoadWithoutBarrier();
2215     if ((value & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) == 0)
2216         return ADIndex((value >> SBLK_APPDOMAIN_SHIFT) & SBLK_MASK_APPDOMAININDEX);
2217     return ADIndex(0);
2218 }
2219
2220 ADIndex ObjHeader::GetAppDomainIndex()
2221 {
2222     STATIC_CONTRACT_NOTHROW;
2223     STATIC_CONTRACT_GC_NOTRIGGER;
2224     STATIC_CONTRACT_SO_TOLERANT;
2225     STATIC_CONTRACT_SUPPORTS_DAC;
2226
2227     ADIndex indx = GetRawAppDomainIndex();
2228     if (indx.m_dwIndex)
2229         return indx;
2230     SyncBlock* syncBlock = PassiveGetSyncBlock();
2231     if (! syncBlock)
2232         return ADIndex(0);
2233
2234     return syncBlock->GetAppDomainIndex();
2235 }
2236
2237 #ifndef DACCESS_COMPILE
2238
2239 void ObjHeader::SetAppDomainIndex(ADIndex indx)
2240 {
2241     CONTRACTL
2242     {
2243         INSTANCE_CHECK;
2244         THROWS;
2245         GC_NOTRIGGER;
2246         MODE_ANY;
2247         INJECT_FAULT(COMPlusThrowOM(););
2248     }
2249     CONTRACTL_END;
2250
2251     //
2252     // This should only be called during the header initialization,
2253     // so don't worry about races.
2254     //
2255
2256     BOOL done = FALSE;
2257
2258 #ifdef _DEBUG
2259     static int forceSB = -1;
2260
2261     if (forceSB == -1)
2262         forceSB = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ADForceSB);
2263
2264     if (forceSB)
2265         // force a synblock so we get one for every object.
2266         GetSyncBlock();
2267 #endif
2268
2269     if (GetHeaderSyncBlockIndex() == 0 && indx.m_dwIndex < SBLK_MASK_APPDOMAININDEX)
2270     {
2271         ENTER_SPIN_LOCK(this);
2272         //Try one more time
2273         if (GetHeaderSyncBlockIndex() == 0)
2274         {
2275             _ASSERTE(GetRawAppDomainIndex().m_dwIndex == 0);
2276             // can store it in the object header
2277             FastInterlockOr(&m_SyncBlockValue, indx.m_dwIndex << SBLK_APPDOMAIN_SHIFT);
2278             done = TRUE;
2279         }
2280         LEAVE_SPIN_LOCK(this);
2281     }
2282
2283     if (!done)
2284     {
2285         // must create a syncblock entry and store the appdomain indx there
2286         SyncBlock *psb = GetSyncBlock();
2287         _ASSERTE(psb);
2288         psb->SetAppDomainIndex(indx);
2289     }
2290 }
2291
2292 void ObjHeader::ResetAppDomainIndex(ADIndex indx)
2293 {
2294     CONTRACTL
2295     {
2296         INSTANCE_CHECK;
2297         THROWS;
2298         GC_NOTRIGGER;
2299         MODE_ANY;
2300     }
2301     CONTRACTL_END;
2302
2303     //
2304     // This should only be called during the header initialization,
2305     // so don't worry about races.
2306     //
2307
2308     BOOL done = FALSE;
2309
2310     if (GetHeaderSyncBlockIndex() == 0 && indx.m_dwIndex < SBLK_MASK_APPDOMAININDEX)
2311     {
2312         ENTER_SPIN_LOCK(this);
2313         //Try one more time
2314         if (GetHeaderSyncBlockIndex() == 0)
2315         {
2316             // can store it in the object header
2317             while (TRUE)
2318             {
2319                 DWORD oldValue = m_SyncBlockValue.LoadWithoutBarrier();
2320                 DWORD newValue = (oldValue & (~(SBLK_MASK_APPDOMAININDEX << SBLK_APPDOMAIN_SHIFT))) |
2321                     (indx.m_dwIndex << SBLK_APPDOMAIN_SHIFT);
2322                 if (FastInterlockCompareExchange((LONG*)&m_SyncBlockValue,
2323                                                  newValue,
2324                                                  oldValue) == (LONG)oldValue)
2325                 {
2326                     break;
2327                 }
2328             }
2329             done = TRUE;
2330         }
2331         LEAVE_SPIN_LOCK(this);
2332     }
2333
2334     if (!done)
2335     {
2336         // must create a syncblock entry and store the appdomain indx there
2337         SyncBlock *psb = GetSyncBlock();
2338         _ASSERTE(psb);
2339         psb->SetAppDomainIndex(indx);
2340     }
2341 }
2342
2343 void ObjHeader::ResetAppDomainIndexNoFailure(ADIndex indx)
2344 {
2345     CONTRACTL
2346     {
2347         INSTANCE_CHECK;
2348         NOTHROW;
2349         GC_NOTRIGGER;
2350         MODE_ANY;
2351         PRECONDITION(indx.m_dwIndex < SBLK_MASK_APPDOMAININDEX);
2352     }
2353     CONTRACTL_END;
2354
2355     ENTER_SPIN_LOCK(this);
2356     if (GetHeaderSyncBlockIndex() == 0)
2357     {
2358         // can store it in the object header
2359         while (TRUE)
2360         {
2361             DWORD oldValue = m_SyncBlockValue.LoadWithoutBarrier();
2362             DWORD newValue = (oldValue & (~(SBLK_MASK_APPDOMAININDEX << SBLK_APPDOMAIN_SHIFT))) |
2363                 (indx.m_dwIndex << SBLK_APPDOMAIN_SHIFT);
2364             if (FastInterlockCompareExchange((LONG*)&m_SyncBlockValue,
2365                                              newValue,
2366                                              oldValue) == (LONG)oldValue)
2367             {
2368                 break;
2369             }
2370         }
2371     }
2372     else
2373     {
2374         SyncBlock *psb = PassiveGetSyncBlock();
2375         _ASSERTE(psb);
2376         psb->SetAppDomainIndex(indx);
2377     }
2378     LEAVE_SPIN_LOCK(this);
2379 }
2380
2381 DWORD ObjHeader::GetSyncBlockIndex()
2382 {
2383     CONTRACTL
2384     {
2385         INSTANCE_CHECK;
2386         THROWS;
2387         GC_NOTRIGGER;
2388         MODE_ANY;
2389         INJECT_FAULT(COMPlusThrowOM(););
2390     }
2391     CONTRACTL_END;
2392
2393     DWORD   indx;
2394
2395     if ((indx = GetHeaderSyncBlockIndex()) == 0)
2396     {
2397         BOOL fMustCreateSyncBlock = FALSE;
2398
2399         if (GetAppDomainIndex().m_dwIndex)
2400         {
2401             // if have an appdomain set then must create a sync block to store it
2402             fMustCreateSyncBlock = TRUE;
2403         }
2404         else
2405         {
2406             //Need to get it from the cache
2407             SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
2408
2409             //Try one more time
2410             if (GetHeaderSyncBlockIndex() == 0)
2411             {
2412                 ENTER_SPIN_LOCK(this);
2413                 // Now the header will be stable - check whether hashcode, appdomain index or lock information is stored in it.
2414                 DWORD bits = GetBits();
2415                 if (((bits & (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE)) == (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_IS_HASHCODE)) ||
2416                     ((bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) == 0 &&
2417                      (bits & ((SBLK_MASK_APPDOMAININDEX<<SBLK_APPDOMAIN_SHIFT)|SBLK_MASK_LOCK_RECLEVEL|SBLK_MASK_LOCK_THREADID)) != 0))
2418                 {
2419                     // Need a sync block to store this info
2420                     fMustCreateSyncBlock = TRUE;
2421                 }
2422                 else
2423                 {
2424                     SetIndex(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | SyncBlockCache::GetSyncBlockCache()->NewSyncBlockSlot(GetBaseObject()));
2425                 }
2426                 LEAVE_SPIN_LOCK(this);
2427             }
2428             // SyncBlockCache::LockHolder goes out of scope here
2429         }
2430
2431         if (fMustCreateSyncBlock)
2432             GetSyncBlock();
2433
2434         if ((indx = GetHeaderSyncBlockIndex()) == 0)
2435             COMPlusThrowOM();
2436     }
2437
2438     return indx;
2439 }
2440
2441 #if defined (VERIFY_HEAP)
2442
2443 BOOL ObjHeader::Validate (BOOL bVerifySyncBlkIndex)
2444 {
2445     STATIC_CONTRACT_THROWS;
2446     STATIC_CONTRACT_GC_NOTRIGGER;
2447     STATIC_CONTRACT_SO_TOLERANT;
2448     STATIC_CONTRACT_MODE_COOPERATIVE;
2449     
2450     DWORD bits = GetBits ();
2451     Object * obj = GetBaseObject ();
2452     BOOL bVerifyMore = g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_SYNCBLK;
2453     //the highest 2 bits have reloaded meaning
2454     //for string objects:
2455     //         BIT_SBLK_STRING_HAS_NO_HIGH_CHARS   0x80000000
2456     //         BIT_SBLK_STRING_HIGH_CHARS_KNOWN    0x40000000
2457     //         BIT_SBLK_STRING_HAS_SPECIAL_SORT    0xC0000000
2458     //for other objects:
2459     //         BIT_SBLK_AGILE_IN_PROGRESS          0x80000000
2460     //         BIT_SBLK_FINALIZER_RUN              0x40000000
2461     if (bits & BIT_SBLK_STRING_HIGH_CHAR_MASK)
2462     {
2463         if (obj->GetGCSafeMethodTable () == g_pStringClass)
2464         {
2465             if (bVerifyMore)
2466             {
2467                 ASSERT_AND_CHECK (((StringObject *)obj)->ValidateHighChars());
2468             }
2469         }
2470         else
2471         {
2472 #if CHECK_APP_DOMAIN_LEAKS
2473             if (bVerifyMore)
2474             {  
2475                 if (bits & BIT_SBLK_AGILE_IN_PROGRESS)
2476                 {
2477                     BOOL fResult;
2478                     ASSERT_AND_CHECK (
2479                         //BIT_SBLK_AGILE_IN_PROGRESS is set only if the object needs to check appdomain agile
2480                         obj->ShouldCheckAppDomainAgile(FALSE, &fResult)
2481                         //before BIT_SBLK_AGILE_IN_PROGRESS is cleared, the object might already be marked as agile 
2482                         ||(obj->PassiveGetSyncBlock () && obj->PassiveGetSyncBlock ()->IsAppDomainAgile ())
2483                         ||(obj->PassiveGetSyncBlock () && obj->PassiveGetSyncBlock ()->IsCheckedForAppDomainAgile ())
2484                     );
2485                 }
2486             }
2487 #else //CHECK_APP_DOMAIN_LEAKS
2488             //BIT_SBLK_AGILE_IN_PROGRESS is set only in debug build
2489             ASSERT_AND_CHECK (!(bits & BIT_SBLK_AGILE_IN_PROGRESS));
2490 #endif  //CHECK_APP_DOMAIN_LEAKS
2491             if (bits & BIT_SBLK_FINALIZER_RUN)
2492             {
2493                 ASSERT_AND_CHECK (obj->GetGCSafeMethodTable ()->HasFinalizer ());
2494             }
2495         }
2496     }
2497
2498     //BIT_SBLK_GC_RESERVE (0x20000000) is only set during GC. But for frozen object, we don't clean the bit
2499     if (bits & BIT_SBLK_GC_RESERVE)
2500     {
2501         if (!GCHeapUtilities::IsGCInProgress () && !GCHeapUtilities::GetGCHeap()->IsConcurrentGCInProgress ())
2502         {
2503 #ifdef FEATURE_BASICFREEZE
2504             ASSERT_AND_CHECK (GCHeapUtilities::GetGCHeap()->IsInFrozenSegment(obj));
2505 #else //FEATURE_BASICFREEZE
2506             _ASSERTE(!"Reserve bit not cleared");
2507             return FALSE;
2508 #endif //FEATURE_BASICFREEZE
2509         }
2510     }
2511
2512     //Don't know how to verify BIT_SBLK_SPIN_LOCK (0x10000000)
2513     
2514     //BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX (0x08000000)
2515     if (bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX)
2516     {
2517         //if BIT_SBLK_IS_HASHCODE (0x04000000) is not set, 
2518         //rest of the DWORD is SyncBlk Index
2519         if (!(bits & BIT_SBLK_IS_HASHCODE))
2520         {
2521             if (bVerifySyncBlkIndex  && GCScan::GetGcRuntimeStructuresValid ())
2522             {
2523                 DWORD sbIndex = bits & MASK_SYNCBLOCKINDEX;
2524                 ASSERT_AND_CHECK(SyncTableEntry::GetSyncTableEntry()[sbIndex].m_Object == obj);             
2525             }
2526         }
2527         else
2528         {
2529             //  rest of the DWORD is a hash code and we don't have much to validate it
2530         }
2531     }
2532     else
2533     {
2534         //if BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX is clear, rest of DWORD is thin lock thread ID, 
2535         //thin lock recursion level and appdomain index
2536         DWORD lockThreadId = bits & SBLK_MASK_LOCK_THREADID;
2537         DWORD recursionLevel = (bits & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT;
2538         //if thread ID is 0, recursionLeve got to be zero
2539         //but thread ID doesn't have to be valid because the lock could be orphanend
2540         ASSERT_AND_CHECK (lockThreadId != 0 || recursionLevel == 0 );     
2541
2542         DWORD adIndex  = (bits >> SBLK_APPDOMAIN_SHIFT) & SBLK_MASK_APPDOMAININDEX;
2543         if (adIndex!= 0)
2544         {
2545 #ifndef _DEBUG            
2546             //in non debug build, only objects of domain neutral type have appdomain index in header
2547             ASSERT_AND_CHECK (obj->GetGCSafeMethodTable()->IsDomainNeutral());
2548 #endif //!_DEBUG
2549             //todo: validate the AD index. 
2550             //The trick here is agile objects could have a invalid AD index. Ideally we should call 
2551             //Object::GetAppDomain to do all the agile validation but it has side effects like mark the object to 
2552             //be agile and it only does the check if g_pConfig->AppDomainLeaks() is on
2553         }
2554     }
2555     
2556     return TRUE;
2557 }
2558
2559 #endif //VERIFY_HEAP
2560
2561 // This holder takes care of the SyncBlock memory cleanup if an OOM occurs inside a call to NewSyncBlockSlot.
2562 //
2563 // Warning: Assumes you already own the cache lock.
2564 //          Assumes nothing allocated inside the SyncBlock (only releases the memory, does not destruct.)
2565 //
2566 // This holder really just meets GetSyncBlock()'s special needs. It's not a general purpose holder.
2567
2568
2569 // Do not inline this call. (fyuan)
2570 // SyncBlockMemoryHolder is normally a check for empty pointer and return. Inlining VoidDeleteSyncBlockMemory adds expensive exception handling.
2571 void VoidDeleteSyncBlockMemory(SyncBlock* psb)
2572 {
2573     LIMITED_METHOD_CONTRACT;
2574     SyncBlockCache::GetSyncBlockCache()->DeleteSyncBlockMemory(psb);
2575 }
2576
2577 typedef Wrapper<SyncBlock*, DoNothing<SyncBlock*>, VoidDeleteSyncBlockMemory, NULL> SyncBlockMemoryHolder;
2578
2579
2580 // get the sync block for an existing object
2581 SyncBlock *ObjHeader::GetSyncBlock()
2582 {
2583     CONTRACT(SyncBlock *)
2584     {
2585         INSTANCE_CHECK;
2586         THROWS;
2587         GC_NOTRIGGER;
2588         MODE_ANY;
2589         INJECT_FAULT(COMPlusThrowOM(););
2590         POSTCONDITION(CheckPointer(RETVAL));
2591     }
2592     CONTRACT_END;
2593
2594     PTR_SyncBlock syncBlock = GetBaseObject()->PassiveGetSyncBlock();
2595     DWORD      indx = 0;
2596     BOOL indexHeld = FALSE;
2597
2598     if (syncBlock)
2599     {
2600 #ifdef _DEBUG 
2601         // Has our backpointer been correctly updated through every GC?
2602         PTR_SyncTableEntry pEntries(SyncTableEntry::GetSyncTableEntry());
2603         _ASSERTE(pEntries[GetHeaderSyncBlockIndex()].m_Object == GetBaseObject());
2604 #endif // _DEBUG
2605         RETURN syncBlock;
2606     }
2607
2608     //Need to get it from the cache
2609     {
2610         SyncBlockCache::LockHolder lh(SyncBlockCache::GetSyncBlockCache());
2611
2612         //Try one more time
2613         syncBlock = GetBaseObject()->PassiveGetSyncBlock();
2614         if (syncBlock)
2615             RETURN syncBlock;
2616
2617
2618         SyncBlockMemoryHolder syncBlockMemoryHolder(SyncBlockCache::GetSyncBlockCache()->GetNextFreeSyncBlock());
2619         syncBlock = syncBlockMemoryHolder;
2620
2621         if ((indx = GetHeaderSyncBlockIndex()) == 0)
2622         {
2623             indx = SyncBlockCache::GetSyncBlockCache()->NewSyncBlockSlot(GetBaseObject());
2624         }
2625         else
2626         {
2627             //We already have an index, we need to hold the syncblock
2628             indexHeld = TRUE;
2629         }
2630
2631         {
2632             //! NewSyncBlockSlot has side-effects that we don't have backout for - thus, that must be the last
2633             //! failable operation called.
2634             CANNOTTHROWCOMPLUSEXCEPTION();
2635             FAULT_FORBID();
2636
2637
2638             syncBlockMemoryHolder.SuppressRelease();
2639
2640             new (syncBlock) SyncBlock(indx);
2641
2642             {
2643                 // after this point, nobody can update the index in the header to give an AD index
2644                 ENTER_SPIN_LOCK(this);
2645
2646                 {
2647                     // If there's an appdomain index stored in the header, transfer it to the syncblock
2648
2649                     ADIndex dwAppDomainIndex = GetAppDomainIndex();
2650                     if (dwAppDomainIndex.m_dwIndex)
2651                         syncBlock->SetAppDomainIndex(dwAppDomainIndex);
2652
2653                     // If the thin lock in the header is in use, transfer the information to the syncblock
2654                     DWORD bits = GetBits();
2655                     if ((bits & BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX) == 0)
2656                     {
2657                         DWORD lockThreadId = bits & SBLK_MASK_LOCK_THREADID;
2658                         DWORD recursionLevel = (bits & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT;
2659                         if (lockThreadId != 0 || recursionLevel != 0)
2660                         {
2661                             // recursionLevel can't be non-zero if thread id is 0
2662                             _ASSERTE(lockThreadId != 0);
2663
2664                             Thread *pThread = g_pThinLockThreadIdDispenser->IdToThreadWithValidation(lockThreadId);
2665
2666                             if (pThread == NULL)
2667                             {
2668                                 // The lock is orphaned.
2669                                 pThread = (Thread*) -1;
2670                             }
2671                             syncBlock->InitState();
2672                             syncBlock->SetAwareLock(pThread, recursionLevel + 1);
2673                         }
2674                     }
2675                     else if ((bits & BIT_SBLK_IS_HASHCODE) != 0)
2676                     {
2677                         DWORD hashCode = bits & MASK_HASHCODE;
2678
2679                         syncBlock->SetHashCode(hashCode);
2680                     }
2681                 }
2682
2683                 SyncTableEntry::GetSyncTableEntry() [indx].m_SyncBlock = syncBlock;
2684
2685                 // in order to avoid a race where some thread tries to get the AD index and we've already nuked it,
2686                 // make sure the syncblock etc is all setup with the AD index prior to replacing the index
2687                 // in the header
2688                 if (GetHeaderSyncBlockIndex() == 0)
2689                 {
2690                     // We have transferred the AppDomain into the syncblock above.
2691                     SetIndex(BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | indx);
2692                 }
2693
2694                 //If we had already an index, hold the syncblock
2695                 //for the lifetime of the object.
2696                 if (indexHeld)
2697                     syncBlock->SetPrecious();
2698
2699                 LEAVE_SPIN_LOCK(this);
2700             }
2701             // SyncBlockCache::LockHolder goes out of scope here
2702         }
2703     }
2704
2705     RETURN syncBlock;
2706 }
2707
2708 BOOL ObjHeader::Wait(INT32 timeOut, BOOL exitContext)
2709 {
2710     CONTRACTL
2711     {
2712         INSTANCE_CHECK;
2713         THROWS;
2714         GC_TRIGGERS;
2715         MODE_ANY;
2716         INJECT_FAULT(COMPlusThrowOM(););
2717     }
2718     CONTRACTL_END;
2719
2720     //  The following code may cause GC, so we must fetch the sync block from
2721     //  the object now in case it moves.
2722     SyncBlock *pSB = GetBaseObject()->GetSyncBlock();
2723
2724     // GetSyncBlock throws on failure
2725     _ASSERTE(pSB != NULL);
2726
2727     // make sure we own the crst
2728     if (!pSB->DoesCurrentThreadOwnMonitor())
2729         COMPlusThrow(kSynchronizationLockException);
2730
2731 #ifdef _DEBUG
2732     Thread *pThread = GetThread();
2733     DWORD curLockCount = pThread->m_dwLockCount;
2734 #endif
2735
2736     BOOL result = pSB->Wait(timeOut,exitContext);
2737
2738     _ASSERTE (curLockCount == pThread->m_dwLockCount);
2739
2740     return result;
2741 }
2742
2743 void ObjHeader::Pulse()
2744 {
2745     CONTRACTL
2746     {
2747         INSTANCE_CHECK;
2748         THROWS;
2749         GC_TRIGGERS;
2750         MODE_ANY;
2751         INJECT_FAULT(COMPlusThrowOM(););
2752     }
2753     CONTRACTL_END;
2754
2755     //  The following code may cause GC, so we must fetch the sync block from
2756     //  the object now in case it moves.
2757     SyncBlock *pSB = GetBaseObject()->GetSyncBlock();
2758
2759     // GetSyncBlock throws on failure
2760     _ASSERTE(pSB != NULL);
2761
2762     // make sure we own the crst
2763     if (!pSB->DoesCurrentThreadOwnMonitor())
2764         COMPlusThrow(kSynchronizationLockException);
2765
2766     pSB->Pulse();
2767 }
2768
2769 void ObjHeader::PulseAll()
2770 {
2771     CONTRACTL
2772     {
2773         INSTANCE_CHECK;
2774         THROWS;
2775         GC_TRIGGERS;
2776         MODE_ANY;
2777         INJECT_FAULT(COMPlusThrowOM(););
2778     }
2779     CONTRACTL_END;
2780
2781     //  The following code may cause GC, so we must fetch the sync block from
2782     //  the object now in case it moves.
2783     SyncBlock *pSB = GetBaseObject()->GetSyncBlock();
2784
2785     // GetSyncBlock throws on failure
2786     _ASSERTE(pSB != NULL);
2787
2788     // make sure we own the crst
2789     if (!pSB->DoesCurrentThreadOwnMonitor())
2790         COMPlusThrow(kSynchronizationLockException);
2791
2792     pSB->PulseAll();
2793 }
2794
2795
2796 // ***************************************************************************
2797 //
2798 //              AwareLock class implementation (GC-aware locking)
2799 //
2800 // ***************************************************************************
2801
2802 void AwareLock::AllocLockSemEvent()
2803 {
2804     CONTRACTL
2805     {
2806         INSTANCE_CHECK;
2807         THROWS;
2808         GC_TRIGGERS;
2809         MODE_ANY;
2810         INJECT_FAULT(COMPlusThrowOM(););
2811     }
2812     CONTRACTL_END;
2813
2814     // Before we switch from cooperative, ensure that this syncblock won't disappear
2815     // under us.  For something as expensive as an event, do it permanently rather
2816     // than transiently.
2817     SetPrecious();
2818
2819     GCX_PREEMP();
2820
2821     // No need to take a lock - CLREvent::CreateMonitorEvent is thread safe
2822     m_SemEvent.CreateMonitorEvent((SIZE_T)this);
2823 }
2824
2825 void AwareLock::Enter()
2826 {
2827     CONTRACTL
2828     {
2829         INSTANCE_CHECK;
2830         THROWS;
2831         GC_TRIGGERS;
2832         MODE_ANY;
2833         INJECT_FAULT(COMPlusThrowOM(););
2834     }
2835     CONTRACTL_END;
2836
2837     Thread  *pCurThread = GetThread();
2838
2839     for (;;) 
2840     {
2841         // Read existing lock state.
2842         LONG state = m_MonitorHeld.LoadWithoutBarrier();
2843
2844         if (state == 0) 
2845         {
2846             // Common case: lock not held, no waiters. Attempt to acquire lock by
2847             // switching lock bit.
2848             if (FastInterlockCompareExchange((LONG*)&m_MonitorHeld, 1, 0) == 0)
2849             {
2850                 break;
2851             }
2852         } 
2853         else 
2854         {
2855             // It's possible to get here with waiters but no lock held, but in this
2856             // case a signal is about to be fired which will wake up a waiter. So
2857             // for fairness sake we should wait too.
2858             // Check first for recursive lock attempts on the same thread.
2859             if (m_HoldingThread == pCurThread)
2860             {    
2861                 goto Recursion;
2862             }
2863
2864             // Attempt to increment this count of waiters then goto contention
2865             // handling code.
2866             if (FastInterlockCompareExchange((LONG*)&m_MonitorHeld, (state + 2), state) == state)
2867             {
2868                  goto MustWait;
2869             }
2870         }
2871     }
2872
2873     // We get here if we successfully acquired the mutex.
2874     m_HoldingThread = pCurThread;
2875     m_Recursion = 1;
2876     pCurThread->IncLockCount();
2877
2878 #if defined(_DEBUG) && defined(TRACK_SYNC)
2879     {
2880         // The best place to grab this is from the ECall frame
2881         Frame   *pFrame = pCurThread->GetFrame();
2882         int      caller = (pFrame && pFrame != FRAME_TOP
2883                             ? (int) pFrame->GetReturnAddress()
2884                             : -1);
2885         pCurThread->m_pTrackSync->EnterSync(caller, this);
2886     }
2887 #endif
2888
2889     return;
2890
2891 MustWait:
2892     // Didn't manage to get the mutex, must wait.
2893     EnterEpilog(pCurThread);
2894     return;
2895
2896 Recursion:
2897     // Got the mutex via recursive locking on the same thread.
2898     _ASSERTE(m_Recursion >= 1);
2899     m_Recursion++;
2900 #if defined(_DEBUG) && defined(TRACK_SYNC)
2901     // The best place to grab this is from the ECall frame
2902     Frame   *pFrame = pCurThread->GetFrame();
2903     int      caller = (pFrame && pFrame != FRAME_TOP ? (int) pFrame->GetReturnAddress() : -1);
2904     pCurThread->m_pTrackSync->EnterSync(caller, this);
2905 #endif
2906 }
2907
2908 BOOL AwareLock::TryEnter(INT32 timeOut)
2909 {
2910     CONTRACTL
2911     {
2912         INSTANCE_CHECK;
2913         THROWS;
2914         GC_TRIGGERS;
2915         if (timeOut == 0) {MODE_ANY;} else {MODE_COOPERATIVE;}
2916         INJECT_FAULT(COMPlusThrowOM(););
2917     }
2918     CONTRACTL_END;
2919
2920     if (timeOut != 0)
2921     {
2922         LARGE_INTEGER qpFrequency, qpcStart, qpcEnd;
2923         BOOL canUseHighRes = QueryPerformanceCounter(&qpcStart);
2924
2925         // try some more busy waiting
2926         if (Contention(timeOut))
2927             return TRUE;
2928
2929         DWORD elapsed = 0;
2930         if (canUseHighRes && QueryPerformanceCounter(&qpcEnd) && QueryPerformanceFrequency(&qpFrequency))
2931             elapsed = (DWORD)((qpcEnd.QuadPart-qpcStart.QuadPart)/(qpFrequency.QuadPart/1000));
2932
2933         if (elapsed >= (DWORD)timeOut)
2934             return FALSE;
2935
2936         if (timeOut != (INT32)INFINITE)
2937             timeOut -= elapsed;
2938     }
2939
2940     Thread  *pCurThread = GetThread();
2941     TESTHOOKCALL(AppDomainCanBeUnloaded(pCurThread->GetDomain()->GetId().m_dwId,FALSE));    
2942
2943     if (pCurThread->IsAbortRequested()) 
2944     {
2945         pCurThread->HandleThreadAbort();
2946     }
2947
2948 retry:
2949
2950     for (;;) {
2951
2952         // Read existing lock state.
2953         LONG state = m_MonitorHeld.LoadWithoutBarrier();
2954
2955         if (state == 0) 
2956         {
2957             // Common case: lock not held, no waiters. Attempt to acquire lock by
2958             // switching lock bit.
2959             if (FastInterlockCompareExchange((LONG*)&m_MonitorHeld, 1, 0) == 0)
2960             {
2961                 break;
2962             } 
2963         }
2964         else 
2965         {
2966             // It's possible to get here with waiters but no lock held, but in this
2967             // case a signal is about to be fired which will wake up a waiter. So
2968             // for fairness sake we should wait too.
2969             // Check first for recursive lock attempts on the same thread.
2970             if (m_HoldingThread == pCurThread)
2971             {
2972                 goto Recursion;
2973             }
2974             else
2975             {
2976                 goto WouldBlock;
2977             }
2978         }
2979     }
2980
2981     // We get here if we successfully acquired the mutex.
2982     m_HoldingThread = pCurThread;
2983     m_Recursion = 1;
2984     pCurThread->IncLockCount();
2985
2986 #if defined(_DEBUG) && defined(TRACK_SYNC)
2987     {
2988         // The best place to grab this is from the ECall frame
2989         Frame   *pFrame = pCurThread->GetFrame();
2990         int      caller = (pFrame && pFrame != FRAME_TOP ? (int) pFrame->GetReturnAddress() : -1);
2991         pCurThread->m_pTrackSync->EnterSync(caller, this);
2992     }
2993 #endif
2994
2995     return TRUE;
2996
2997 WouldBlock:
2998     // Didn't manage to get the mutex, return failure if no timeout, else wait
2999     // for at most timeout milliseconds for the mutex.
3000     if (!timeOut)
3001     {
3002         return FALSE;
3003     }
3004
3005     // The precondition for EnterEpilog is that the count of waiters be bumped
3006     // to account for this thread
3007
3008     for (;;)
3009     {
3010         // Read existing lock state.
3011         LONG state = m_MonitorHeld.LoadWithoutBarrier();
3012
3013         if (state == 0)
3014         {
3015             goto retry;
3016         }
3017
3018         if (FastInterlockCompareExchange((LONG*)&m_MonitorHeld, (state + 2), state) == state)
3019         {
3020             break;
3021         }
3022     }
3023
3024     return EnterEpilog(pCurThread, timeOut);
3025
3026 Recursion:
3027     // Got the mutex via recursive locking on the same thread.
3028     _ASSERTE(m_Recursion >= 1);
3029     m_Recursion++;
3030 #if defined(_DEBUG) && defined(TRACK_SYNC)
3031     // The best place to grab this is from the ECall frame
3032     Frame   *pFrame = pCurThread->GetFrame();
3033     int      caller = (pFrame && pFrame != FRAME_TOP ? (int) pFrame->GetReturnAddress() : -1);
3034     pCurThread->m_pTrackSync->EnterSync(caller, this);
3035 #endif
3036
3037     return true;
3038 }
3039
3040 BOOL AwareLock::EnterEpilog(Thread* pCurThread, INT32 timeOut)
3041 {
3042     STATIC_CONTRACT_THROWS;
3043     STATIC_CONTRACT_MODE_COOPERATIVE;
3044     STATIC_CONTRACT_GC_TRIGGERS;
3045
3046     // While we are in this frame the thread is considered blocked on the
3047     // critical section of the monitor lock according to the debugger
3048     DebugBlockingItem blockingMonitorInfo;
3049     blockingMonitorInfo.dwTimeout = timeOut;
3050     blockingMonitorInfo.pMonitor = this;
3051     blockingMonitorInfo.pAppDomain = SystemDomain::GetCurrentDomain();
3052     blockingMonitorInfo.type = DebugBlock_MonitorCriticalSection;
3053     DebugBlockingItemHolder holder(pCurThread, &blockingMonitorInfo);
3054
3055     // We need a separate helper because it uses SEH and the holder has a
3056     // destructor
3057     return EnterEpilogHelper(pCurThread, timeOut);
3058 }
3059
3060 BOOL AwareLock::EnterEpilogHelper(Thread* pCurThread, INT32 timeOut)
3061 {
3062     STATIC_CONTRACT_THROWS;
3063     STATIC_CONTRACT_MODE_COOPERATIVE;
3064     STATIC_CONTRACT_GC_TRIGGERS;
3065
3066     DWORD ret = 0;
3067     BOOL finished = false;
3068
3069     // Require all callers to be in cooperative mode.  If they have switched to preemptive
3070     // mode temporarily before calling here, then they are responsible for protecting
3071     // the object associated with this lock.
3072     _ASSERTE(pCurThread->PreemptiveGCDisabled());
3073
3074
3075
3076     OBJECTREF    obj = GetOwningObject();
3077
3078     // We cannot allow the AwareLock to be cleaned up underneath us by the GC.
3079     IncrementTransientPrecious();
3080
3081     GCPROTECT_BEGIN(obj);
3082     {
3083         if (!m_SemEvent.IsMonitorEventAllocated())
3084         {
3085             AllocLockSemEvent();
3086         }
3087         _ASSERTE(m_SemEvent.IsMonitorEventAllocated());
3088
3089         pCurThread->EnablePreemptiveGC();
3090
3091         for (;;)
3092         {
3093             // We might be interrupted during the wait (Thread.Interrupt), so we need an
3094             // exception handler round the call.
3095             struct Param
3096             {
3097                 AwareLock *pThis;
3098                 INT32 timeOut;
3099                 DWORD ret;
3100             } param;
3101             param.pThis = this;
3102             param.timeOut = timeOut;
3103             param.ret = ret;
3104
3105             EE_TRY_FOR_FINALLY(Param *, pParam, &param)
3106             {
3107                 // Measure the time we wait so that, in the case where we wake up
3108                 // and fail to acquire the mutex, we can adjust remaining timeout
3109                 // accordingly.
3110                 ULONGLONG start = CLRGetTickCount64();
3111
3112                 pParam->ret = pParam->pThis->m_SemEvent.Wait(pParam->timeOut, TRUE);
3113                 _ASSERTE((pParam->ret == WAIT_OBJECT_0) || (pParam->ret == WAIT_TIMEOUT));
3114
3115                 // When calculating duration we consider a couple of special cases.
3116                 // If the end tick is the same as the start tick we make the
3117                 // duration a millisecond, to ensure we make forward progress if
3118                 // there's a lot of contention on the mutex. Secondly, we have to
3119                 // cope with the case where the tick counter wrapped while we where
3120                 // waiting (we can cope with at most one wrap, so don't expect three
3121                 // month timeouts to be very accurate). Luckily for us, the latter
3122                 // case is taken care of by 32-bit modulo arithmetic automatically.
3123
3124                 if (pParam->timeOut != (INT32) INFINITE)
3125                 {
3126                     ULONGLONG end = CLRGetTickCount64();
3127                     ULONGLONG duration;
3128                     if (end == start)
3129                     {
3130                         duration = 1;
3131                     }
3132                     else
3133                     {
3134                         duration = end - start;
3135                     }
3136                     duration = min(duration, (DWORD)pParam->timeOut);
3137                     pParam->timeOut -= (INT32)duration;
3138                 }
3139             }
3140             EE_FINALLY
3141             {
3142                 if (GOT_EXCEPTION())
3143                 {
3144                     // We must decrement the waiter count.
3145                     for (;;)
3146                     {
3147                         LONG state = m_MonitorHeld.LoadWithoutBarrier();
3148                         _ASSERTE((state >> 1) != 0);
3149                         if (FastInterlockCompareExchange((LONG*)&m_MonitorHeld, state - 2, state) == state)
3150                         {
3151                             break;
3152                         }
3153                     }
3154
3155                     // And signal the next waiter, else they'll wait forever.
3156                     m_SemEvent.Set();
3157                 }
3158             } EE_END_FINALLY;
3159
3160             ret = param.ret;
3161
3162             if (ret == WAIT_OBJECT_0)
3163             {
3164                 // Attempt to acquire lock (this also involves decrementing the waiter count).
3165                 for (;;) 
3166                 {
3167                     LONG state = m_MonitorHeld.LoadWithoutBarrier();
3168                     _ASSERTE(((size_t)state >> 1) != 0);
3169
3170                     if ((size_t)state & 1)
3171                     {
3172                         break;
3173                     }
3174
3175                     if (FastInterlockCompareExchange((LONG*)&m_MonitorHeld, ((state - 2) | 1), state) == state)
3176                     {
3177                         finished = true;
3178                         break;
3179                     }
3180                 }
3181             }
3182             else
3183             {
3184                 // We timed out, decrement waiter count.
3185                 for (;;) 
3186                 {
3187                     LONG state = m_MonitorHeld.LoadWithoutBarrier();
3188                     _ASSERTE((state >> 1) != 0);
3189                     if (FastInterlockCompareExchange((LONG*)&m_MonitorHeld, state - 2, state) == state)
3190                     {
3191                         finished = true;
3192                         break;
3193                     }
3194                 }
3195             }
3196
3197             if (finished)
3198             {
3199                 break;
3200             }
3201         }
3202
3203         pCurThread->DisablePreemptiveGC();
3204     }
3205     GCPROTECT_END();
3206     DecrementTransientPrecious();
3207
3208     if (ret == WAIT_TIMEOUT)
3209     {
3210         return FALSE;
3211     }
3212
3213     m_HoldingThread = pCurThread;
3214     m_Recursion = 1;
3215     pCurThread->IncLockCount();
3216
3217 #if defined(_DEBUG) && defined(TRACK_SYNC)
3218     // The best place to grab this is from the ECall frame
3219     Frame   *pFrame = pCurThread->GetFrame();
3220     int      caller = (pFrame && pFrame != FRAME_TOP ? (int) pFrame->GetReturnAddress() : -1);
3221     pCurThread->m_pTrackSync->EnterSync(caller, this);
3222 #endif
3223
3224     return (ret != WAIT_TIMEOUT);
3225 }
3226
3227
3228 BOOL AwareLock::Leave()
3229 {
3230     CONTRACTL
3231     {
3232         INSTANCE_CHECK;
3233         NOTHROW;
3234         GC_NOTRIGGER;
3235         MODE_ANY;
3236     }
3237     CONTRACTL_END;
3238
3239     Thread* pThread = GetThread();
3240
3241     AwareLock::LeaveHelperAction action = LeaveHelper(pThread);
3242
3243     switch(action)
3244     {
3245     case AwareLock::LeaveHelperAction_None:
3246         // We are done
3247         return TRUE;
3248     case AwareLock::LeaveHelperAction_Signal:
3249         // Signal the event
3250         Signal();
3251         return TRUE;
3252     default:
3253         // Must be an error otherwise
3254         _ASSERTE(action == AwareLock::LeaveHelperAction_Error);
3255         return FALSE;
3256     }
3257 }
3258
3259 #ifdef _DEBUG
3260 #define _LOGCONTENTION
3261 #endif // _DEBUG
3262
3263 #ifdef  _LOGCONTENTION
3264 inline void LogContention()
3265 {
3266     WRAPPER_NO_CONTRACT;
3267 #ifdef LOGGING
3268     if (LoggingOn(LF_SYNC, LL_INFO100))
3269     {
3270         LogSpewAlways("Contention: Stack Trace Begin\n");
3271         void LogStackTrace();
3272         LogStackTrace();
3273         LogSpewAlways("Contention: Stack Trace End\n");
3274     }
3275 #endif
3276 }
3277 #else
3278 #define LogContention()
3279 #endif
3280
3281
3282
3283 bool AwareLock::Contention(INT32 timeOut)
3284 {
3285     CONTRACTL
3286     {
3287         INSTANCE_CHECK;
3288         THROWS;
3289         GC_TRIGGERS;
3290         MODE_ANY;
3291         INJECT_FAULT(COMPlusThrowOM(););
3292     }
3293     CONTRACTL_END;
3294
3295     DWORD startTime = 0;
3296     if (timeOut != (INT32)INFINITE)
3297         startTime = GetTickCount();
3298
3299     COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cContention++);
3300
3301
3302     LogContention();
3303     Thread      *pCurThread = GetThread();
3304     OBJECTREF    obj = GetOwningObject();
3305     bool    bEntered = false;
3306     bool   bKeepGoing = true;
3307
3308     // We cannot allow the AwareLock to be cleaned up underneath us by the GC.
3309     IncrementTransientPrecious();
3310
3311     GCPROTECT_BEGIN(obj);
3312     {
3313         GCX_PREEMP();
3314
3315         // Try spinning and yielding before eventually blocking.
3316         // The limit of 10 is largely arbitrary - feel free to tune if you have evidence
3317         // you're making things better  
3318         for (DWORD iter = 0; iter < g_SpinConstants.dwRepetitions && bKeepGoing; iter++)
3319         {
3320             DWORD i = g_SpinConstants.dwInitialDuration;
3321
3322             do
3323             {
3324                 if (TryEnter())
3325                 {
3326                     bEntered = true;
3327                     goto entered;
3328                 }
3329
3330                 if (g_SystemInfo.dwNumberOfProcessors <= 1)
3331                 {
3332                     bKeepGoing = false;
3333                     break;
3334                 }
3335
3336                 if (timeOut != (INT32)INFINITE && GetTickCount() - startTime >= (DWORD)timeOut)
3337                 {
3338                     bKeepGoing = false;
3339                     break;
3340                 }
3341                 
3342                 // Spin for i iterations, and make sure to never go more than 20000 iterations between
3343                 // checking if we should SwitchToThread
3344                 int remainingDelay = i;
3345
3346                 while (remainingDelay > 0)
3347                 {
3348                     int currentDelay = min(remainingDelay, 20000);
3349                     remainingDelay -= currentDelay;
3350
3351                     // Delay by approximately 2*currentDelay clock cycles (Pentium III).
3352
3353                     // This is brittle code - future processors may of course execute this
3354                     // faster or slower, and future code generators may eliminate the loop altogether.
3355                     // The precise value of the delay is not critical, however, and I can't think
3356                     // of a better way that isn't machine-dependent. 
3357                     for (int delayCount = currentDelay; (--delayCount != 0); )
3358                     {
3359                         YieldProcessor();           // indicate to the processor that we are spining
3360                     }
3361
3362                     // TryEnter will not take the lock if it has waiters.  This means we should not spin
3363                     // for long periods without giving the waiters a chance to run, since we won't
3364                     // make progress until they run and they may be waiting for our CPU.  So once
3365                     // we're spinning >20000 iterations, check every 20000 iterations if there are
3366                     // waiters and if so call SwitchToThread.
3367                     //
3368                     // Since this only affects the spinning heuristic, calling HasWaiters now
3369                     // and getting a dirty read is fine.  Note that it is important that TryEnter 
3370                     // not take the lock because we could easily starve waiting threads.  
3371                     // They make only one attempt before going back to sleep, and spinners on 
3372                     // other CPUs would likely get the lock.  We could fix this by allowing a 
3373                     // woken thread to become a spinner again, at which point there are no 
3374                     // starvation concerns and TryEnter can take the lock.
3375                     if (remainingDelay > 0 && HasWaiters())
3376                     {
3377                         __SwitchToThread(0, CALLER_LIMITS_SPINNING);
3378                     }
3379                 }
3380
3381                 // exponential backoff: wait a factor longer in the next iteration
3382                 i *= g_SpinConstants.dwBackoffFactor;
3383             }
3384             while (i < g_SpinConstants.dwMaximumDuration);
3385
3386             {
3387                 GCX_COOP();
3388                 pCurThread->HandleThreadAbort();
3389             }
3390
3391             __SwitchToThread(0, CALLER_LIMITS_SPINNING);
3392         }
3393 entered: ;
3394     }
3395     GCPROTECT_END();
3396     // we are in co-operative mode so no need to keep this set
3397     DecrementTransientPrecious();
3398     if (!bEntered && timeOut == (INT32)INFINITE)
3399     {
3400         // We've tried hard to enter - we need to eventually block to avoid wasting too much cpu
3401         // time.
3402         Enter();
3403         bEntered = TRUE;
3404     }
3405     return bEntered;
3406 }
3407
3408
3409 LONG AwareLock::LeaveCompletely()
3410 {
3411     WRAPPER_NO_CONTRACT;
3412
3413     LONG count = 0;
3414     while (Leave()) {
3415         count++;
3416     }
3417     _ASSERTE(count > 0);            // otherwise we were never in the lock
3418
3419     return count;
3420 }
3421
3422
3423 BOOL AwareLock::OwnedByCurrentThread()
3424 {
3425     WRAPPER_NO_CONTRACT;
3426     return (GetThread() == m_HoldingThread);
3427 }
3428
3429
3430 // ***************************************************************************
3431 //
3432 //              SyncBlock class implementation
3433 //
3434 // ***************************************************************************
3435
3436 // We maintain two queues for SyncBlock::Wait.
3437 // 1. Inside SyncBlock we queue all threads that are waiting on the SyncBlock.
3438 //    When we pulse, we pick the thread from this queue using FIFO.
3439 // 2. We queue all SyncBlocks that a thread is waiting for in Thread::m_WaitEventLink.
3440 //    When we pulse a thread, we find the event from this queue to set, and we also
3441 //    or in a 1 bit in the syncblock value saved in the queue, so that we can return
3442 //    immediately from SyncBlock::Wait if the syncblock has been pulsed.
3443 BOOL SyncBlock::Wait(INT32 timeOut, BOOL exitContext)
3444 {
3445     CONTRACTL
3446     {
3447         INSTANCE_CHECK;
3448         THROWS;
3449         GC_TRIGGERS;
3450         MODE_ANY;
3451         INJECT_FAULT(COMPlusThrowOM());
3452     }
3453     CONTRACTL_END;
3454
3455     Thread  *pCurThread = GetThread();
3456     BOOL     isTimedOut = FALSE;
3457     BOOL     isEnqueued = FALSE;
3458     WaitEventLink waitEventLink;
3459     WaitEventLink *pWaitEventLink;
3460
3461     // As soon as we flip the switch, we are in a race with the GC, which could clean
3462     // up the SyncBlock underneath us -- unless we report the object.
3463     _ASSERTE(pCurThread->PreemptiveGCDisabled());
3464
3465     // Does this thread already wait for this SyncBlock?
3466     WaitEventLink *walk = pCurThread->WaitEventLinkForSyncBlock(this);
3467     if (walk->m_Next) {
3468         if (walk->m_Next->m_WaitSB == this) {
3469             // Wait on the same lock again.
3470             walk->m_Next->m_RefCount ++;
3471             pWaitEventLink = walk->m_Next;
3472         }
3473         else if ((SyncBlock*)(((DWORD_PTR)walk->m_Next->m_WaitSB) & ~1)== this) {
3474             // This thread has been pulsed.  No need to wait.
3475             return TRUE;
3476         }
3477     }
3478     else {
3479         // First time this thread is going to wait for this SyncBlock.
3480         CLREvent* hEvent;
3481         if (pCurThread->m_WaitEventLink.m_Next == NULL) {
3482             hEvent = &(pCurThread->m_EventWait);
3483         }
3484         else {
3485             hEvent = GetEventFromEventStore();
3486         }
3487         waitEventLink.m_WaitSB = this;
3488         waitEventLink.m_EventWait = hEvent;
3489         waitEventLink.m_Thread = pCurThread;
3490         waitEventLink.m_Next = NULL;
3491         waitEventLink.m_LinkSB.m_pNext = NULL;
3492         waitEventLink.m_RefCount = 1;
3493         pWaitEventLink = &waitEventLink;
3494         walk->m_Next = pWaitEventLink;
3495
3496         // Before we enqueue it (and, thus, before it can be dequeued), reset the event
3497         // that will awaken us.
3498         hEvent->Reset();
3499
3500         // This thread is now waiting on this sync block
3501         ThreadQueue::EnqueueThread(pWaitEventLink, this);
3502
3503         isEnqueued = TRUE;
3504     }
3505
3506     _ASSERTE ((SyncBlock*)((DWORD_PTR)walk->m_Next->m_WaitSB & ~1)== this);
3507
3508     PendingSync   syncState(walk);
3509
3510     OBJECTREF     obj = m_Monitor.GetOwningObject();
3511
3512     m_Monitor.IncrementTransientPrecious();
3513
3514     // While we are in this frame the thread is considered blocked on the
3515     // event of the monitor lock according to the debugger
3516     DebugBlockingItem blockingMonitorInfo;
3517     blockingMonitorInfo.dwTimeout = timeOut;
3518     blockingMonitorInfo.pMonitor = &m_Monitor;
3519     blockingMonitorInfo.pAppDomain = SystemDomain::GetCurrentDomain();
3520     blockingMonitorInfo.type = DebugBlock_MonitorEvent;
3521     DebugBlockingItemHolder holder(pCurThread, &blockingMonitorInfo);
3522
3523     GCPROTECT_BEGIN(obj);
3524     {
3525         GCX_PREEMP();
3526
3527         // remember how many times we synchronized
3528         syncState.m_EnterCount = LeaveMonitorCompletely();
3529         _ASSERTE(syncState.m_EnterCount > 0);
3530
3531         Context* targetContext;
3532         targetContext = pCurThread->GetContext();
3533         _ASSERTE(targetContext);
3534         Context* defaultContext;
3535         defaultContext = pCurThread->GetDomain()->GetDefaultContext();
3536         _ASSERTE(defaultContext);
3537 #ifdef FEATURE_REMOTING
3538         if (exitContext &&
3539             targetContext != defaultContext)
3540         {
3541             Context::MonitorWaitArgs waitArgs = {timeOut, &syncState, &isTimedOut};
3542             Context::CallBackInfo callBackInfo = {Context::MonitorWait_callback, (void*) &waitArgs};
3543             Context::RequestCallBack(CURRENT_APPDOMAIN_ID, defaultContext, &callBackInfo);
3544         }
3545         else
3546 #else
3547         _ASSERTE( exitContext==NULL || targetContext == defaultContext);
3548 #endif            
3549         {
3550             isTimedOut = pCurThread->Block(timeOut, &syncState);
3551         }
3552     }
3553     GCPROTECT_END();
3554     m_Monitor.DecrementTransientPrecious();
3555
3556     return !isTimedOut;
3557 }
3558
3559 void SyncBlock::Pulse()
3560 {
3561     CONTRACTL
3562     {
3563         INSTANCE_CHECK;
3564         NOTHROW;
3565         GC_NOTRIGGER;
3566         MODE_ANY;
3567     }
3568     CONTRACTL_END;
3569
3570     WaitEventLink  *pWaitEventLink;
3571
3572     if ((pWaitEventLink = ThreadQueue::DequeueThread(this)) != NULL)
3573         pWaitEventLink->m_EventWait->Set();
3574 }
3575
3576 void SyncBlock::PulseAll()
3577 {
3578     CONTRACTL
3579     {
3580         INSTANCE_CHECK;
3581         NOTHROW;
3582         GC_NOTRIGGER;
3583         MODE_ANY;
3584     }
3585     CONTRACTL_END;
3586
3587     WaitEventLink  *pWaitEventLink;
3588
3589     while ((pWaitEventLink = ThreadQueue::DequeueThread(this)) != NULL)
3590         pWaitEventLink->m_EventWait->Set();
3591 }
3592
3593 bool SyncBlock::SetInteropInfo(InteropSyncBlockInfo* pInteropInfo)
3594 {
3595     WRAPPER_NO_CONTRACT;
3596     SetPrecious();
3597
3598     // We could be agile, but not have noticed yet.  We can't assert here
3599     //  that we live in any given domain, nor is this an appropriate place
3600     //  to re-parent the syncblock.
3601 /*    _ASSERTE (m_dwAppDomainIndex.m_dwIndex == 0 || 
3602               m_dwAppDomainIndex == SystemDomain::System()->DefaultDomain()->GetIndex() || 
3603               m_dwAppDomainIndex == GetAppDomain()->GetIndex());
3604     m_dwAppDomainIndex = GetAppDomain()->GetIndex();
3605 */
3606     return (FastInterlockCompareExchangePointer(&m_pInteropInfo,
3607                                                 pInteropInfo,
3608                                                 NULL) == NULL);
3609 }
3610
3611 #ifdef EnC_SUPPORTED
3612 // Store information about fields added to this object by EnC
3613 // This must be called from a thread in the AppDomain of this object instance
3614 void SyncBlock::SetEnCInfo(EnCSyncBlockInfo *pEnCInfo) 
3615 {
3616     WRAPPER_NO_CONTRACT;
3617
3618     // We can't recreate the field contents, so this SyncBlock can never go away
3619     SetPrecious();
3620
3621     // Store the field info (should only ever happen once)
3622     _ASSERTE( m_pEnCInfo == NULL );
3623     m_pEnCInfo = pEnCInfo;
3624
3625     // Also store the AppDomain that this object lives in.
3626     // Also verify that the AD was either not yet set, or set correctly before overwriting it.
3627     // I'm not sure why it should ever be set to the default domain and then changed to a different domain,
3628     // perhaps that can be removed.
3629     _ASSERTE (m_dwAppDomainIndex.m_dwIndex == 0 || 
3630               m_dwAppDomainIndex == SystemDomain::System()->DefaultDomain()->GetIndex() || 
3631               m_dwAppDomainIndex == GetAppDomain()->GetIndex());
3632     m_dwAppDomainIndex = GetAppDomain()->GetIndex();
3633 }
3634 #endif // EnC_SUPPORTED
3635 #endif // !DACCESS_COMPILE
3636
3637 #if defined(_WIN64) && defined(_DEBUG)
3638 void ObjHeader::IllegalAlignPad()
3639 {
3640     WRAPPER_NO_CONTRACT;
3641 #ifdef LOGGING
3642     void** object = ((void**) this) + 1;
3643     LogSpewAlways("\n\n******** Illegal ObjHeader m_alignpad not 0, object" FMT_ADDR "\n\n",
3644                   DBG_ADDR(object));
3645 #endif
3646     _ASSERTE(m_alignpad == 0);
3647 }
3648 #endif // _WIN64 && _DEBUG
3649
3650