1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
17 #include "stackwalk.h"
19 #include "comsynchronizable.h"
21 #include "gcheaputilities.h"
23 #include "dbginterface.h"
24 #include "corprof.h" // profiling
25 #include "eeprofinterfaces.h"
27 #include "perfcounters.h"
29 #include "win32threadpool.h"
30 #include "jitinterface.h"
31 #include "eventtrace.h"
32 #include "comutilnative.h"
33 #include "finalizerthread.h"
34 #include "threadsuspend.h"
38 #include "nativeoverlapped.h"
40 #include "mdaassistants.h"
41 #include "appdomain.inl"
43 #include "exceptmacros.h"
44 #include "win32threadpool.h"
46 #ifdef FEATURE_COMINTEROP
47 #include "runtimecallablewrapper.h"
48 #include "interoputil.h"
49 #include "interoputil.inl"
50 #endif // FEATURE_COMINTEROP
52 #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
53 #include "olecontexthelpers.h"
54 #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
56 #ifdef FEATURE_PERFTRACING
57 #include "eventpipebuffermanager.h"
58 #endif // FEATURE_PERFTRACING
62 SPTR_IMPL(ThreadStore, ThreadStore, s_pThreadStore);
63 CONTEXT *ThreadStore::s_pOSContext = NULL;
64 CLREvent *ThreadStore::s_pWaitForStackCrawlEvent;
66 #ifndef DACCESS_COMPILE
70 BOOL Thread::s_fCleanFinalizedThread = FALSE;
72 Volatile<LONG> Thread::s_threadPoolCompletionCountOverflow = 0;
74 CrstStatic g_DeadlockAwareCrst;
78 BOOL MatchThreadHandleToOsId ( HANDLE h, DWORD osId )
81 LIMITED_METHOD_CONTRACT;
83 DWORD id = GetThreadId(h);
85 // OS call GetThreadId may fail, and return 0. In this case we can not
86 // make a decision if the two match or not. Instead, we ignore this check.
87 return id == 0 || id == osId;
90 #endif // !FEATURE_PAL
96 template<> AutoCleanupGCAssert<TRUE>::AutoCleanupGCAssert()
99 STATIC_CONTRACT_MODE_COOPERATIVE;
102 template<> AutoCleanupGCAssert<FALSE>::AutoCleanupGCAssert()
105 STATIC_CONTRACT_MODE_PREEMPTIVE;
108 template<> void GCAssert<TRUE>::BeginGCAssert()
111 STATIC_CONTRACT_MODE_COOPERATIVE;
114 template<> void GCAssert<FALSE>::BeginGCAssert()
117 STATIC_CONTRACT_MODE_PREEMPTIVE;
125 void Thread::SetFrame(Frame *pFrame)
132 // It only makes sense for a Thread to call SetFrame on itself.
133 PRECONDITION(this == GetThread());
134 PRECONDITION(CheckPointer(pFrame));
138 if (g_pConfig->fAssertOnFailFast())
140 Frame *pWalk = m_pFrame;
142 while (pWalk != (Frame*) -1)
149 pWalk = pWalk->m_Next;
152 while (fExist && pWalk != pFrame && pWalk != (Frame*)-1)
154 if (pWalk->GetVTablePtr() == ContextTransitionFrame::GetMethodFrameVPtr())
156 _ASSERTE (((ContextTransitionFrame *)pWalk)->GetReturnDomain() == m_pDomain);
158 pWalk = pWalk->m_Next;
164 // If stack overrun corruptions are expected, then skip this check
165 // as the Frame chain may have been corrupted.
166 if (g_pConfig->fAssertOnFailFast() == false)
169 Frame* espVal = (Frame*)GetCurrentSP();
171 while (pFrame != (Frame*) -1)
173 static Frame* stopFrame = 0;
174 if (pFrame == stopFrame)
175 _ASSERTE(!"SetFrame frame == stopFrame");
177 _ASSERTE(espVal < pFrame);
178 _ASSERTE(pFrame < m_CacheStackBase);
179 _ASSERTE(pFrame->GetFrameType() < Frame::TYPE_COUNT);
181 pFrame = pFrame->m_Next;
187 //************************************************************************
189 //************************************************************************
191 extern unsigned __int64 getTimeStamp();
193 extern unsigned __int64 getTickFrequency();
195 unsigned __int64 tgetFrequency() {
196 static unsigned __int64 cachedFreq = (unsigned __int64) -1;
198 if (cachedFreq != (unsigned __int64) -1)
201 cachedFreq = getTickFrequency();
206 #endif // #ifndef DACCESS_COMPILE
208 static StackWalkAction DetectHandleILStubsForDebugger_StackWalkCallback(CrawlFrame *pCF, VOID *pData)
211 // It suffices to wait for the first CrawlFrame with non-NULL function
212 MethodDesc *pMD = pCF->GetFunction();
215 *(bool *)pData = pMD->IsILStub();
222 // This is really just a heuristic to detect if we are executing in an M2U IL stub or
223 // one of the marshaling methods it calls. It doesn't deal with U2M IL stubs.
224 // We loop through the frame chain looking for an uninitialized TransitionFrame.
225 // If there is one, then we are executing in an M2U IL stub or one of the methods it calls.
226 // On the other hand, if there is an initialized TransitionFrame, then we are not.
227 // Also, if there is an HMF on the stack, then we stop. This could be the case where
228 // an IL stub calls an FCALL which ends up in a managed method, and the debugger wants to
229 // stop in those cases. Some examples are COMException..ctor and custom marshalers.
231 // X86 IL stubs use InlinedCallFrame and are indistinguishable from ordinary methods with
232 // inlined P/Invoke when judging just from the frame chain. We use stack walk to decide
234 bool Thread::DetectHandleILStubsForDebugger()
242 Frame* pFrame = GetFrame();
246 while (pFrame != FRAME_TOP)
248 // Check for HMF's. See the comment at the beginning of this function.
249 if (pFrame->GetVTablePtr() == HelperMethodFrame::GetMethodFrameVPtr())
253 // If there is an entry frame (i.e. U2M managed), we should break.
254 else if (pFrame->GetFrameType() == Frame::TYPE_ENTRY)
258 // Check for M2U transition frames. See the comment at the beginning of this function.
259 else if (pFrame->GetFrameType() == Frame::TYPE_EXIT)
261 if (pFrame->GetReturnAddress() == NULL)
263 // If the return address is NULL, then the frame has not been initialized yet.
264 // We may see InlinedCallFrame in ordinary methods as well. Have to do
265 // stack walk to find out if this is really an IL stub.
266 bool fInILStub = false;
268 StackWalkFrames(&DetectHandleILStubsForDebugger_StackWalkCallback,
271 dac_cast<PTR_Frame>(pFrame));
273 if (fInILStub) return true;
277 // The frame is fully initialized.
281 pFrame = pFrame->Next();
293 ThreadLocalInfo gCurrentThreadInfo =
296 NULL, // m_pAppDomain
300 // index into TLS Array. Definition added by compiler
301 EXTERN_C UINT32 _tls_index;
303 #ifndef DACCESS_COMPILE
304 BOOL SetThread(Thread* t)
306 LIMITED_METHOD_CONTRACT
308 gCurrentThreadInfo.m_pThread = t;
312 BOOL SetAppDomain(AppDomain* ad)
314 LIMITED_METHOD_CONTRACT
316 gCurrentThreadInfo.m_pAppDomain = ad;
320 BOOL Thread::Alert ()
328 BOOL fRetVal = FALSE;
330 HANDLE handle = GetThreadHandle();
331 if (handle != INVALID_HANDLE_VALUE && handle != SWITCHOUT_HANDLE_VALUE)
333 fRetVal = ::QueueUserAPC(UserInterruptAPC, handle, APC_Code);
341 DWORD Thread::Join(DWORD timeout, BOOL alertable)
344 return JoinEx(timeout,alertable?WaitMode_Alertable:WaitMode_None);
346 DWORD Thread::JoinEx(DWORD timeout, WaitMode mode)
350 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
354 BOOL alertable = (mode & WaitMode_Alertable)?TRUE:FALSE;
356 Thread *pCurThread = GetThread();
357 _ASSERTE(pCurThread || dbgOnly_IsSpecialEEThread());
360 // We're not hosted, so WaitMode_InDeadlock is irrelevant. Clear it, so that this wait can be
361 // forwarded to a SynchronizationContext if needed.
362 mode = (WaitMode)(mode & ~WaitMode_InDeadlock);
364 HANDLE handle = GetThreadHandle();
365 if (handle == INVALID_HANDLE_VALUE || handle == SWITCHOUT_HANDLE_VALUE) {
369 return pCurThread->DoAppropriateWait(1, &handle, FALSE, timeout, mode);
372 return WaitForSingleObjectEx(handle,timeout,alertable);
377 extern INT32 MapFromNTPriority(INT32 NTPriority);
379 BOOL Thread::SetThreadPriority(
380 int nPriority // thread priority level
392 if (GetThreadHandle() == INVALID_HANDLE_VALUE) {
393 // When the thread starts running, we will set the thread priority.
397 fRet = ::SetThreadPriority(GetThreadHandle(), nPriority);
403 THREADBASEREF pObject = (THREADBASEREF)ObjectFromHandle(m_ExposedObject);
406 // TODO: managed ThreadPriority only supports up to 4.
407 pObject->SetPriority (MapFromNTPriority(nPriority));
413 int Thread::GetThreadPriority()
422 if (GetThreadHandle() == INVALID_HANDLE_VALUE) {
426 nRetVal = ::GetThreadPriority(GetThreadHandle());
431 void Thread::ChooseThreadCPUGroupAffinity()
440 if (!CPUGroupInfo::CanEnableGCCPUGroups() || !CPUGroupInfo::CanEnableThreadUseAllCpuGroups())
444 //Borrow the ThreadStore Lock here: Lock ThreadStore before distributing threads
445 ThreadStoreLockHolder TSLockHolder(TRUE);
447 // this thread already has CPU group affinity set
448 if (m_pAffinityMask != 0)
451 if (GetThreadHandle() == INVALID_HANDLE_VALUE)
454 GROUP_AFFINITY groupAffinity;
455 CPUGroupInfo::ChooseCPUGroupAffinity(&groupAffinity);
456 CPUGroupInfo::SetThreadGroupAffinity(GetThreadHandle(), &groupAffinity, NULL);
457 m_wCPUGroup = groupAffinity.Group;
458 m_pAffinityMask = groupAffinity.Mask;
461 void Thread::ClearThreadCPUGroupAffinity()
470 if (!CPUGroupInfo::CanEnableGCCPUGroups() || !CPUGroupInfo::CanEnableThreadUseAllCpuGroups())
474 ThreadStoreLockHolder TSLockHolder(TRUE);
476 // this thread does not have CPU group affinity set
477 if (m_pAffinityMask == 0)
480 GROUP_AFFINITY groupAffinity;
481 groupAffinity.Group = m_wCPUGroup;
482 groupAffinity.Mask = m_pAffinityMask;
483 CPUGroupInfo::ClearCPUGroupAffinity(&groupAffinity);
489 DWORD Thread::StartThread()
499 DWORD dwRetVal = (DWORD) -1;
501 _ASSERTE (m_Creater.IsCurrentThread());
505 _ASSERTE (GetThreadHandle() != INVALID_HANDLE_VALUE &&
506 GetThreadHandle() != SWITCHOUT_HANDLE_VALUE);
507 dwRetVal = ::ResumeThread(GetThreadHandle());
514 // Class static data:
515 LONG Thread::m_DebugWillSyncCount = -1;
516 LONG Thread::m_DetachCount = 0;
517 LONG Thread::m_ActiveDetachCount = 0;
518 int Thread::m_offset_counter = 0;
519 Volatile<LONG> Thread::m_threadsAtUnsafePlaces = 0;
521 //-------------------------------------------------------------------------
522 // Public function: SetupThreadNoThrow()
523 // Creates Thread for current thread if not previously created.
524 // Returns NULL for failure (usually due to out-of-memory.)
525 //-------------------------------------------------------------------------
526 Thread* SetupThreadNoThrow(HRESULT *pHR)
531 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
537 Thread *pThread = GetThread();
545 pThread = SetupThread();
549 // We failed SetupThread. GET_EXCEPTION() may depend on Thread object.
550 if (__pException == NULL)
556 hr = GET_EXCEPTION()->GetHR();
559 EX_END_CATCH(SwallowAllExceptions);
569 void DeleteThread(Thread* pThread)
573 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
577 //_ASSERTE (pThread == GetThread());
581 if (pThread->HasThreadStateNC(Thread::TSNC_ExistInThreadStore))
583 pThread->DetachThread(FALSE);
587 #ifdef FEATURE_COMINTEROP
588 pThread->RevokeApartmentSpy();
589 #endif // FEATURE_COMINTEROP
591 FastInterlockOr((ULONG *)&pThread->m_State, Thread::TS_Dead);
593 // ~Thread() calls SafeSetThrowables which has a conditional contract
594 // which says that if you call it with a NULL throwable then it is
595 // MODE_ANY, otherwise MODE_COOPERATIVE. Scan doesn't understand that
596 // and assumes that we're violating the MODE_COOPERATIVE.
597 CONTRACT_VIOLATION(ModeViolation);
603 void EnsurePreemptive()
606 Thread *pThread = GetThread();
607 if (pThread && pThread->PreemptiveGCDisabled())
609 pThread->EnablePreemptiveGC();
613 typedef StateHolder<DoNothing, EnsurePreemptive> EnsurePreemptiveModeIfException;
615 Thread* SetupThread(BOOL fInternal)
619 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
625 if ((pThread = GetThread()) != NULL)
628 #ifdef FEATURE_STACK_PROBE
629 RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), NULL);
630 #endif //FEATURE_STACK_PROBE
632 CONTRACT_VIOLATION(SOToleranceViolation);
634 // For interop debugging, we must mark that we're in a can't-stop region
635 // b.c we may take Crsts here that may block the helper thread.
636 // We're especially fragile here b/c we don't have a Thread object yet
637 CantStopHolder hCantStop;
639 EnsurePreemptiveModeIfException ensurePreemptive;
643 if (g_pConfig->SuppressChecks())
645 // EnterAssert will suppress any checks
650 // Normally, HasStarted is called from the thread's entrypoint to introduce it to
651 // the runtime. But sometimes that thread is used for DLL_THREAD_ATTACH notifications
652 // that call into managed code. In that case, a call to SetupThread here must
653 // find the correct Thread object and install it into TLS.
655 if (ThreadStore::s_pThreadStore->m_PendingThreadCount != 0)
657 DWORD ourOSThreadId = ::GetCurrentThreadId();
659 ThreadStoreLockHolder TSLockHolder;
660 _ASSERTE(pThread == NULL);
661 while ((pThread = ThreadStore::s_pThreadStore->GetAllThreadList(pThread, Thread::TS_Unstarted | Thread::TS_FailStarted, Thread::TS_Unstarted)) != NULL)
663 if (pThread->GetOSThreadId() == ourOSThreadId)
671 STRESS_LOG2(LF_SYNC, LL_INFO1000, "T::ST - recycling thread 0x%p (state: 0x%x)\n", pThread, pThread->m_State.Load());
675 // It's perfectly reasonable to not find this guy. It's just an unrelated
676 // thread spinning up.
679 if (IsThreadPoolWorkerSpecialThread())
681 FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread);
682 pThread->SetBackground(TRUE);
684 else if (IsThreadPoolIOCompletionSpecialThread())
686 FastInterlockOr ((ULONG *) &pThread->m_State, Thread::TS_CompletionPortThread);
687 pThread->SetBackground(TRUE);
689 else if (IsTimerSpecialThread() || IsWaitSpecialThread())
691 FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread);
692 pThread->SetBackground(TRUE);
695 BOOL fStatus = pThread->HasStarted();
696 ensurePreemptive.SuppressRelease();
697 return fStatus ? pThread : NULL;
701 // First time we've seen this thread in the runtime:
702 pThread = new Thread();
704 // What state are we in here? COOP???
706 Holder<Thread*,DoNothing<Thread*>,DeleteThread> threadHolder(pThread);
708 CExecutionEngine::SetupTLSForThread(pThread);
710 // A host can deny a thread entering runtime by returning a NULL IHostTask.
711 // But we do want threads used by threadpool.
712 if (IsThreadPoolWorkerSpecialThread() ||
713 IsThreadPoolIOCompletionSpecialThread() ||
714 IsTimerSpecialThread() ||
715 IsWaitSpecialThread())
720 if (!pThread->InitThread(fInternal) ||
721 !pThread->PrepareApartmentAndContext())
724 // reset any unstarted bits on the thread object
725 FastInterlockAnd((ULONG *) &pThread->m_State, ~Thread::TS_Unstarted);
726 FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_LegalToJoin);
728 ThreadStore::AddThread(pThread);
730 BOOL fOK = SetThread(pThread);
732 fOK = SetAppDomain(pThread->GetDomain());
735 // We now have a Thread object visable to the RS. unmark special status.
738 pThread->SetupThreadForHost();
740 threadHolder.SuppressRelease();
742 FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_FullyInitialized);
745 pThread->AddFiberInfo(Thread::ThreadTrackInfo_Lifetime);
748 #ifdef DEBUGGING_SUPPORTED
750 // If we're debugging, let the debugger know that this
751 // thread is up and running now.
753 if (CORDebuggerAttached())
755 g_pDebugInterface->ThreadCreated(pThread);
759 LOG((LF_CORDB, LL_INFO10000, "ThreadCreated() not called due to CORDebuggerAttached() being FALSE for thread 0x%x\n", pThread->GetThreadId()));
761 #endif // DEBUGGING_SUPPORTED
763 #ifdef PROFILING_SUPPORTED
764 // If a profiler is present, then notify the profiler that a
765 // thread has been created.
766 if (!IsGCSpecialThread())
768 BEGIN_PIN_PROFILER(CORProfilerTrackThreads());
771 g_profControlBlock.pProfInterface->ThreadCreated(
775 DWORD osThreadId = ::GetCurrentThreadId();
776 g_profControlBlock.pProfInterface->ThreadAssignedToOSThread(
777 (ThreadID)pThread, osThreadId);
780 #endif // PROFILING_SUPPORTED
782 _ASSERTE(!pThread->IsBackground()); // doesn't matter, but worth checking
783 pThread->SetBackground(TRUE);
785 ensurePreemptive.SuppressRelease();
787 if (IsThreadPoolWorkerSpecialThread())
789 FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread);
791 else if (IsThreadPoolIOCompletionSpecialThread())
793 FastInterlockOr ((ULONG *) &pThread->m_State, Thread::TS_CompletionPortThread);
795 else if (IsTimerSpecialThread() || IsWaitSpecialThread())
797 FastInterlockOr((ULONG *) &pThread->m_State, Thread::TS_TPWorkerThread);
800 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
803 pThread->QueryThreadProcessorUsage();
805 #endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
807 #ifdef FEATURE_EVENT_TRACE
808 ETW::ThreadLog::FireThreadCreated(pThread);
809 #endif // FEATURE_EVENT_TRACE
814 //-------------------------------------------------------------------------
815 void STDMETHODCALLTYPE CorMarkThreadInThreadPool()
817 LIMITED_METHOD_CONTRACT;
818 BEGIN_ENTRYPOINT_VOIDRET;
819 END_ENTRYPOINT_VOIDRET;
821 // this is no longer needed after our switch to
822 // the Win32 threadpool.
823 // keeping in mscorwks for compat reasons and to keep rotor sscoree and
824 // mscoree consistent.
828 //-------------------------------------------------------------------------
829 // Public function: SetupUnstartedThread()
830 // This sets up a Thread object for an exposed System.Thread that
831 // has not been started yet. This allows us to properly enumerate all threads
832 // in the ThreadStore, so we can report on even unstarted threads. Clearly
833 // there is no physical thread to match, yet.
835 // When there is, complete the setup with code:Thread::HasStarted()
836 //-------------------------------------------------------------------------
837 Thread* SetupUnstartedThread(BOOL bRequiresTSL)
841 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
845 Thread* pThread = new Thread();
847 FastInterlockOr((ULONG *) &pThread->m_State,
848 (Thread::TS_Unstarted | Thread::TS_WeOwn));
850 ThreadStore::AddThread(pThread, bRequiresTSL);
855 //-------------------------------------------------------------------------
856 // Public function: DestroyThread()
857 // Destroys the specified Thread object, for a thread which is about to die.
858 //-------------------------------------------------------------------------
859 void DestroyThread(Thread *th)
867 _ASSERTE (th == GetThread());
869 _ASSERTE(g_fEEShutDown || th->m_dwLockCount == 0 || th->m_fRudeAborted);
871 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
874 AppDomain* pDomain = th->GetDomain();
875 pDomain->UpdateProcessorUsage(th->QueryThreadProcessorUsage());
876 FireEtwThreadTerminated((ULONGLONG)th, (ULONGLONG)pDomain, GetClrInstanceId());
878 #endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
882 GCX_PREEMP_NO_DTOR();
884 if (th->IsAbortRequested()) {
885 // Reset trapping count.
886 th->UnmarkThreadForAbort(Thread::TAR_ALL);
889 // Clear any outstanding stale EH state that maybe still active on the thread.
890 #ifdef WIN64EXCEPTIONS
891 ExceptionTracker::PopTrackers((void*)-1);
892 #else // !WIN64EXCEPTIONS
894 PTR_ThreadExceptionState pExState = th->GetExceptionState();
895 if (pExState->IsExceptionInProgress())
898 pExState->GetCurrentExceptionTracker()->UnwindExInfo((void *)-1);
900 #else // !_TARGET_X86_
901 #error Unsupported platform
902 #endif // _TARGET_X86_
903 #endif // WIN64EXCEPTIONS
905 #ifdef FEATURE_PERFTRACING
906 // Before the thread dies, mark its buffers as no longer owned
907 // so that they can be cleaned up after the thread dies.
908 EventPipeBufferList *pBufferList = th->GetEventPipeBufferList();
909 if(pBufferList != NULL)
911 pBufferList->SetOwnedByThread(false);
913 #endif // FEATURE_PERFTRACING
915 if (g_fEEShutDown == 0)
917 th->SetThreadState(Thread::TS_ReportDead);
918 th->OnThreadTerminate(FALSE);
922 //-------------------------------------------------------------------------
923 // Public function: DetachThread()
924 // Marks the thread as needing to be destroyed, but doesn't destroy it yet.
925 //-------------------------------------------------------------------------
926 HRESULT Thread::DetachThread(BOOL fDLLThreadDetach)
928 // !!! Can not use contract here.
929 // !!! Contract depends on Thread object for GC_TRIGGERS.
930 // !!! At the end of this function, we call InternalSwitchOut,
931 // !!! and then GetThread()=NULL, and dtor of contract does not work any more.
932 STATIC_CONTRACT_NOTHROW;
933 STATIC_CONTRACT_GC_NOTRIGGER;
935 // @todo . We need to probe here, but can't introduce destructors etc.
936 BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
938 // Clear any outstanding stale EH state that maybe still active on the thread.
939 #ifdef WIN64EXCEPTIONS
940 ExceptionTracker::PopTrackers((void*)-1);
941 #else // !WIN64EXCEPTIONS
943 PTR_ThreadExceptionState pExState = GetExceptionState();
944 if (pExState->IsExceptionInProgress())
947 pExState->GetCurrentExceptionTracker()->UnwindExInfo((void *)-1);
949 #else // !_TARGET_X86_
950 #error Unsupported platform
951 #endif // _TARGET_X86_
952 #endif // WIN64EXCEPTIONS
954 #ifdef FEATURE_COMINTEROP
955 IErrorInfo *pErrorInfo;
956 // Avoid calling GetErrorInfo() if ole32 has already executed the DLL_THREAD_DETACH,
957 // otherwise we'll cause ole32 to re-allocate and leak its TLS data (SOleTlsData).
958 if (ClrTeb::GetOleReservedPtr() != NULL && GetErrorInfo(0, &pErrorInfo) == S_OK)
960 // if this is our IErrorInfo, release it now - we don't want ole32 to do it later as
961 // part of its DLL_THREAD_DETACH as we won't be able to handle the call at that point
962 if (!ComInterfaceSlotIs(pErrorInfo, 2, Unknown_ReleaseSpecial_IErrorInfo))
964 // if it's not our IErrorInfo, put it back
965 SetErrorInfo(0, pErrorInfo);
967 pErrorInfo->Release();
970 // Revoke our IInitializeSpy registration only if we are not in DLL_THREAD_DETACH
971 // (COM will do it or may have already done it automatically in that case).
972 if (!fDLLThreadDetach)
974 RevokeApartmentSpy();
976 #endif // FEATURE_COMINTEROP
978 _ASSERTE(!PreemptiveGCDisabled());
979 _ASSERTE(g_fEEShutDown || m_dwLockCount == 0 || m_fRudeAborted);
981 _ASSERTE ((m_State & Thread::TS_Detached) == 0);
983 _ASSERTE (this == GetThread());
985 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
986 if (g_fEnableARM && m_pDomain)
988 m_pDomain->UpdateProcessorUsage(QueryThreadProcessorUsage());
989 FireEtwThreadTerminated((ULONGLONG)this, (ULONGLONG)m_pDomain, GetClrInstanceId());
991 #endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
995 FastInterlockIncrement(&Thread::m_DetachCount);
997 if (IsAbortRequested()) {
998 // Reset trapping count.
999 UnmarkThreadForAbort(Thread::TAR_ALL);
1002 if (!IsBackground())
1004 FastInterlockIncrement(&Thread::m_ActiveDetachCount);
1005 ThreadStore::CheckForEEShutdown();
1008 END_CONTRACT_VIOLATION;
1010 InternalSwitchOut();
1012 #ifdef ENABLE_CONTRACTS_DATA
1013 m_pClrDebugState = NULL;
1014 #endif //ENABLE_CONTRACTS_DATA
1016 #ifdef FEATURE_PERFTRACING
1017 // Before the thread dies, mark its buffers as no longer owned
1018 // so that they can be cleaned up after the thread dies.
1019 EventPipeBufferList *pBufferList = m_pEventPipeBufferList.Load();
1020 if(pBufferList != NULL)
1022 pBufferList->SetOwnedByThread(false);
1024 #endif // FEATURE_PERFTRACING
1026 FastInterlockOr((ULONG*)&m_State, (int) (Thread::TS_Detached | Thread::TS_ReportDead));
1027 // Do not touch Thread object any more. It may be destroyed.
1029 // These detached threads will be cleaned up by finalizer thread. But if the process uses
1030 // little managed heap, it will be a while before GC happens, and finalizer thread starts
1031 // working on detached thread. So we wake up finalizer thread to clean up resources.
1033 // (It's possible that this is the startup thread, and startup failed, and so the finalization
1034 // machinery isn't fully initialized. Hence this check.)
1036 FinalizerThread::EnableFinalization();
1041 DWORD GetRuntimeId()
1043 LIMITED_METHOD_CONTRACT;
1048 //---------------------------------------------------------------------------
1049 // Creates new Thread for reverse p-invoke calls.
1050 //---------------------------------------------------------------------------
1051 Thread* WINAPI CreateThreadBlockThrow()
1054 WRAPPER_NO_CONTRACT;
1056 // This is a workaround to disable our check for throwing exception in SetupThread.
1057 // We want to throw an exception for reverse p-invoke, and our assertion may fire if
1058 // a unmanaged caller does not setup an exception handler.
1059 CONTRACT_VIOLATION(ThrowsViolation); // WON'T FIX - This enables catastrophic failure exception in reverse P/Invoke - the only way we can communicate an error to legacy code.
1060 Thread* pThread = NULL;
1061 BEGIN_ENTRYPOINT_THROWS;
1063 if (!CanRunManagedCode())
1065 // CLR is shutting down - someone's DllMain detach event may be calling back into managed code.
1066 // It is misleading to use our COM+ exception code, since this is not a managed exception.
1067 ULONG_PTR arg = E_PROCESS_SHUTDOWN_REENTRY;
1068 RaiseException(EXCEPTION_EXX, 0, 1, &arg);
1072 pThread = SetupThreadNoThrow(&hr);
1073 if (pThread == NULL)
1075 // Creating Thread failed, and we need to throw an exception to report status.
1076 // It is misleading to use our COM+ exception code, since this is not a managed exception.
1078 RaiseException(EXCEPTION_EXX, 0, 1, &arg);
1080 END_ENTRYPOINT_THROWS;
1086 DWORD_PTR Thread::OBJREF_HASH = OBJREF_TABSIZE;
1089 extern "C" void STDCALL JIT_PatchedCodeStart();
1090 extern "C" void STDCALL JIT_PatchedCodeLast();
1092 //---------------------------------------------------------------------------
1093 // One-time initialization. Called during Dll initialization. So
1094 // be careful what you do in here!
1095 //---------------------------------------------------------------------------
1096 void InitThreadManager()
1104 InitializeYieldProcessorNormalizedCrst();
1106 // All patched helpers should fit into one page.
1107 // If you hit this assert on retail build, there is most likely problem with BBT script.
1108 _ASSERTE_ALL_BUILDS("clr/src/VM/threads.cpp", (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart < (ptrdiff_t)GetOsPageSize());
1110 // I am using virtual protect to cover the entire range that this code falls in.
1113 // We could reset it to non-writeable inbetween GCs and such, but then we'd have to keep on re-writing back and forth,
1114 // so instead we'll leave it writable from here forward.
1117 if (!ClrVirtualProtect((void *)JIT_PatchedCodeStart, (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart,
1118 PAGE_EXECUTE_READWRITE, &oldProt))
1120 _ASSERTE(!"ClrVirtualProtect of code page failed");
1121 COMPlusThrowWin32();
1126 _ASSERTE(GetThread() == NULL);
1128 PTEB Teb = NtCurrentTeb();
1129 BYTE** tlsArray = (BYTE**)Teb->ThreadLocalStoragePointer;
1130 BYTE* tlsData = (BYTE*)tlsArray[_tls_index];
1132 size_t offsetOfCurrentThreadInfo = (BYTE*)&gCurrentThreadInfo - tlsData;
1134 _ASSERTE(offsetOfCurrentThreadInfo < 0x8000);
1135 _ASSERTE(_tls_index < 0x10000);
1137 // Save gCurrentThreadInfo location for debugger
1138 g_TlsIndex = (DWORD)(_tls_index + (offsetOfCurrentThreadInfo << 16) + 0x80000000);
1140 _ASSERTE(g_TrapReturningThreads == 0);
1141 #endif // !FEATURE_PAL
1143 __ClrFlsGetBlock = CExecutionEngine::GetTlsData;
1145 IfFailThrow(Thread::CLRSetThreadStackGuarantee(Thread::STSGuarantee_Force));
1147 ThreadStore::InitThreadStore();
1149 // NOTE: CRST_UNSAFE_ANYMODE prevents a GC mode switch when entering this crst.
1150 // If you remove this flag, we will switch to preemptive mode when entering
1151 // g_DeadlockAwareCrst, which means all functions that enter it will become
1152 // GC_TRIGGERS. (This includes all uses of CrstHolder.) So be sure
1153 // to update the contracts if you remove this flag.
1154 g_DeadlockAwareCrst.Init(CrstDeadlockDetection, CRST_UNSAFE_ANYMODE);
1157 // Randomize OBJREF_HASH to handle hash collision.
1158 Thread::OBJREF_HASH = OBJREF_TABSIZE - (DbgGetEXETimeStamp()%10);
1161 ThreadSuspend::Initialize();
1165 //************************************************************************
1167 //************************************************************************
1170 #if defined(_DEBUG) && defined(TRACK_SYNC)
1172 // One outstanding synchronization held by this thread:
1173 struct Dbg_TrackSyncEntry
1176 AwareLock *m_pAwareLock;
1178 BOOL Equiv (UINT_PTR caller, void *pAwareLock)
1180 LIMITED_METHOD_CONTRACT;
1182 return (m_caller == caller) && (m_pAwareLock == pAwareLock);
1185 BOOL Equiv (void *pAwareLock)
1187 LIMITED_METHOD_CONTRACT;
1189 return (m_pAwareLock == pAwareLock);
1193 // Each thread has a stack that tracks all enter and leave requests
1194 struct Dbg_TrackSyncStack : public Dbg_TrackSync
1198 MAX_TRACK_SYNC = 20, // adjust stack depth as necessary
1201 void EnterSync (UINT_PTR caller, void *pAwareLock);
1202 void LeaveSync (UINT_PTR caller, void *pAwareLock);
1204 Dbg_TrackSyncEntry m_Stack [MAX_TRACK_SYNC];
1205 UINT_PTR m_StackPointer;
1208 Dbg_TrackSyncStack() : m_StackPointer(0),
1211 LIMITED_METHOD_CONTRACT;
1215 // ensure that registers are preserved across this call
1217 #pragma optimize("", off)
1219 // A pain to do all this from ASM, but watch out for trashed registers
1220 EXTERN_C void EnterSyncHelper (UINT_PTR caller, void *pAwareLock)
1222 BEGIN_ENTRYPOINT_THROWS;
1223 WRAPPER_NO_CONTRACT;
1224 GetThread()->m_pTrackSync->EnterSync(caller, pAwareLock);
1225 END_ENTRYPOINT_THROWS;
1228 EXTERN_C void LeaveSyncHelper (UINT_PTR caller, void *pAwareLock)
1230 BEGIN_ENTRYPOINT_THROWS;
1231 WRAPPER_NO_CONTRACT;
1232 GetThread()->m_pTrackSync->LeaveSync(caller, pAwareLock);
1233 END_ENTRYPOINT_THROWS;
1237 #pragma optimize("", on)
1240 void Dbg_TrackSyncStack::EnterSync(UINT_PTR caller, void *pAwareLock)
1242 LIMITED_METHOD_CONTRACT;
1244 STRESS_LOG4(LF_SYNC, LL_INFO100, "Dbg_TrackSyncStack::EnterSync, IP=%p, Recursion=%u, LockState=%x, HoldingThread=%p.\n",
1246 ((AwareLock*)pAwareLock)->GetRecursionLevel(),
1247 ((AwareLock*)pAwareLock)->GetLockState(),
1248 ((AwareLock*)pAwareLock)->GetHoldingThread());
1252 if (m_StackPointer >= MAX_TRACK_SYNC)
1254 _ASSERTE(!"Overflowed synchronization stack checking. Disabling");
1259 m_Stack[m_StackPointer].m_caller = caller;
1260 m_Stack[m_StackPointer].m_pAwareLock = (AwareLock *) pAwareLock;
1266 void Dbg_TrackSyncStack::LeaveSync(UINT_PTR caller, void *pAwareLock)
1268 WRAPPER_NO_CONTRACT;
1270 STRESS_LOG4(LF_SYNC, LL_INFO100, "Dbg_TrackSyncStack::LeaveSync, IP=%p, Recursion=%u, LockState=%x, HoldingThread=%p.\n",
1272 ((AwareLock*)pAwareLock)->GetRecursionLevel(),
1273 ((AwareLock*)pAwareLock)->GetLockState(),
1274 ((AwareLock*)pAwareLock)->GetHoldingThread());
1278 if (m_StackPointer == 0)
1279 _ASSERTE(!"Underflow in leaving synchronization");
1281 if (m_Stack[m_StackPointer - 1].Equiv(pAwareLock))
1287 for (int i=m_StackPointer - 2; i>=0; i--)
1289 if (m_Stack[i].Equiv(pAwareLock))
1291 _ASSERTE(!"Locks are released out of order. This might be okay...");
1292 memcpy(&m_Stack[i], &m_Stack[i+1],
1293 sizeof(m_Stack[0]) * (m_StackPointer - i - 1));
1298 _ASSERTE(!"Trying to release a synchronization lock which isn't held");
1303 #endif // TRACK_SYNC
1306 static DWORD dwHashCodeSeed = 123456789;
1309 void CheckADValidity(AppDomain* pDomain, DWORD ADValidityKind)
1321 // Note: this apparently checks if any one of the supplied conditions is satisified, rather
1322 // than checking that *all* of them are satisfied. One would have expected it to assert all of the
1323 // conditions but it does not.
1326 CONTRACT_VIOLATION(FaultViolation);
1327 if (::GetAppDomain()==pDomain)
1329 if ((ADValidityKind & ADV_DEFAULTAD) &&
1330 pDomain->IsDefaultDomain())
1332 if ((ADValidityKind & ADV_ITERATOR) &&
1333 pDomain->IsHeldByIterator())
1335 if ((ADValidityKind & ADV_CREATING) &&
1336 pDomain->IsBeingCreated())
1338 if ((ADValidityKind & ADV_COMPILATION) &&
1339 pDomain->IsCompilationDomain())
1341 if ((ADValidityKind & ADV_FINALIZER) &&
1342 IsFinalizerThread())
1344 if ((ADValidityKind & ADV_ADUTHREAD) &&
1345 IsADUnloadHelperThread())
1347 if ((ADValidityKind & ADV_RUNNINGIN) &&
1348 pDomain->IsRunningIn(GetThread()))
1350 if ((ADValidityKind & ADV_REFTAKER) &&
1351 pDomain->IsHeldByRefTaker())
1354 _ASSERTE(!"Appdomain* can be invalid");
1359 //--------------------------------------------------------------------
1360 // Thread construction
1361 //--------------------------------------------------------------------
1366 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
1370 m_pFrame = FRAME_TOP;
1371 m_pUnloadBoundaryFrame = NULL;
1373 m_fPreemptiveGCDisabled = 0;
1376 m_ulForbidTypeLoad = 0;
1377 m_GCOnTransitionsOK = TRUE;
1380 #ifdef ENABLE_CONTRACTS
1381 m_pClrDebugState = NULL;
1382 m_ulEnablePreemptiveGCCount = 0;
1385 // Initialize data members related to thread statics
1388 m_pThreadLocalBlock = NULL;
1391 m_dwBeginLockCount = 0;
1394 dbg_m_cSuspendedThreads = 0;
1395 dbg_m_cSuspendedThreadsWithoutOSLock = 0;
1397 m_dwUnbreakableLockCount = 0;
1400 m_dwForbidSuspendThread = 0;
1402 // Initialize lock state
1403 m_pHead = &m_embeddedEntry;
1404 m_embeddedEntry.pNext = m_pHead;
1405 m_embeddedEntry.pPrev = m_pHead;
1406 m_embeddedEntry.dwLLockID = 0;
1407 m_embeddedEntry.dwULockID = 0;
1408 m_embeddedEntry.wReaderLevel = 0;
1410 m_pBlockingLock = NULL;
1412 m_alloc_context.init();
1413 m_thAllocContextObj = 0;
1415 m_UserInterrupt = 0;
1416 m_WaitEventLink.m_Next = NULL;
1417 m_WaitEventLink.m_LinkSB.m_pNext = NULL;
1418 m_ThreadHandle = INVALID_HANDLE_VALUE;
1419 m_ThreadHandleForClose = INVALID_HANDLE_VALUE;
1420 m_ThreadHandleForResume = INVALID_HANDLE_VALUE;
1421 m_WeOwnThreadHandle = FALSE;
1424 m_ThreadId = UNINITIALIZED_THREADID;
1427 // Initialize this variable to a very different start value for each thread
1428 // Using linear congruential generator from Knuth Vol. 2, p. 102, line 24
1429 dwHashCodeSeed = dwHashCodeSeed * 1566083941 + 1;
1430 m_dwHashCodeSeed = dwHashCodeSeed;
1432 m_hijackLock = FALSE;
1435 m_Priority = INVALID_THREAD_PRIORITY;
1436 m_ExternalRefCount = 1;
1437 m_UnmanagedRefCount = 0;
1438 m_State = TS_Unstarted;
1439 m_StateNC = TSNC_Unknown;
1441 // It can't be a LongWeakHandle because we zero stuff out of the exposed
1442 // object as it is finalized. At that point, calls to GetCurrentThread()
1443 // had better get a new one,!
1444 m_ExposedObject = CreateGlobalShortWeakHandle(NULL);
1446 GlobalShortWeakHandleHolder exposedObjectHolder(m_ExposedObject);
1448 m_StrongHndToExposedObject = CreateGlobalStrongHandle(NULL);
1449 GlobalStrongHandleHolder strongHndToExposedObjectHolder(m_StrongHndToExposedObject);
1451 m_LastThrownObjectHandle = NULL;
1452 m_ltoIsUnhandled = FALSE;
1454 m_AbortReason = NULL;
1456 m_debuggerFilterContext = NULL;
1457 m_debuggerCantStop = 0;
1458 m_debuggerWord = NULL;
1459 m_fInteropDebuggingHijacked = FALSE;
1460 m_profilerCallbackState = 0;
1461 #ifdef FEATURE_PROFAPI_ATTACH_DETACH
1462 m_dwProfilerEvacuationCounter = 0;
1463 #endif // FEATURE_PROFAPI_ATTACH_DETACH
1465 m_pProfilerFilterContext = NULL;
1467 m_CacheStackBase = 0;
1468 m_CacheStackLimit = 0;
1469 m_CacheStackSufficientExecutionLimit = 0;
1471 m_LastAllowableStackAddress= 0;
1475 m_pCleanedStackBase = NULL;
1478 #ifdef STACK_GUARDS_DEBUG
1479 m_pCurrentStackGuard = NULL;
1482 #ifdef FEATURE_HIJACK
1483 m_ppvHJRetAddrPtr = (VOID**) 0xCCCCCCCCCCCCCCCC;
1484 m_pvHJRetAddr = (VOID*) 0xCCCCCCCCCCCCCCCC;
1486 #ifndef PLATFORM_UNIX
1487 X86_ONLY(m_LastRedirectIP = 0);
1488 X86_ONLY(m_SpinCount = 0);
1489 #endif // PLATFORM_UNIX
1490 #endif // FEATURE_HIJACK
1492 #if defined(_DEBUG) && defined(TRACK_SYNC)
1493 m_pTrackSync = new Dbg_TrackSyncStack;
1494 NewHolder<Dbg_TrackSyncStack> trackSyncHolder(static_cast<Dbg_TrackSyncStack*>(m_pTrackSync));
1495 #endif // TRACK_SYNC
1497 m_RequestedStackSize = 0;
1500 m_nNestedMarshalingExceptions = 0;
1502 #ifdef FEATURE_COMINTEROP
1503 m_fDisableComObjectEagerCleanup = false;
1504 #endif //FEATURE_COMINTEROP
1505 m_fHasDeadThreadBeenConsideredForGCTrigger = false;
1507 m_TraceCallCount = 0;
1508 m_ThrewControlForThread = 0;
1510 m_ThreadTasks = (ThreadTasks)0;
1511 m_pLoadLimiter= NULL;
1512 m_pLoadingFile = NULL;
1514 // The state and the tasks must be 32-bit aligned for atomicity to be guaranteed.
1515 _ASSERTE((((size_t) &m_State) & 3) == 0);
1516 _ASSERTE((((size_t) &m_ThreadTasks) & 3) == 0);
1518 // Track perf counter for the logical thread object.
1519 COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cCurrentThreadsLogical++);
1521 // On all callbacks, call the trap code, which we now have
1522 // wired to cause a GC. Thus we will do a GC on all Transition Frame Transitions (and more).
1523 if (GCStress<cfg_transition>::IsEnabled())
1525 m_State = (ThreadState) (m_State | TS_GCOnTransitions);
1528 //m_pSharedStaticData = NULL;
1529 //m_pUnsharedStaticData = NULL;
1530 //m_pStaticDataHash = NULL;
1531 //m_pSDHCrst = NULL;
1533 m_fSecurityStackwalk = FALSE;
1535 m_AbortType = EEPolicy::TA_None;
1537 m_AbortEndTime = MAXULONGLONG;
1538 m_RudeAbortEndTime = MAXULONGLONG;
1539 m_AbortController = 0;
1540 m_AbortRequestLock = 0;
1541 m_fRudeAbortInitiated = FALSE;
1543 m_pIOCompletionContext = NULL;
1546 m_fRudeAborted = FALSE;
1550 m_pFiberData = NULL;
1552 m_TaskId = INVALID_TASK_ID;
1553 m_dwConnectionId = INVALID_CONNECTION_ID;
1556 DWORD_PTR *ttInfo = NULL;
1557 size_t nBytes = MaxThreadRecord *
1558 (sizeof(FiberSwitchInfo)-sizeof(size_t)+MaxStackDepth*sizeof(size_t));
1559 if (g_pConfig->SaveThreadInfo()) {
1560 ttInfo = new DWORD_PTR[(nBytes/sizeof(DWORD_PTR))*ThreadTrackInfo_Max];
1561 memset(ttInfo,0,nBytes*ThreadTrackInfo_Max);
1563 for (DWORD i = 0; i < ThreadTrackInfo_Max; i ++)
1565 m_FiberInfoIndex[i] = 0;
1566 m_pFiberInfo[i] = (FiberSwitchInfo*)((DWORD_PTR)ttInfo + i*nBytes);
1568 NewArrayHolder<DWORD_PTR> fiberInfoHolder(ttInfo);
1571 m_OSContext = new CONTEXT();
1572 NewHolder<CONTEXT> contextHolder(m_OSContext);
1574 m_pSavedRedirectContext = NULL;
1575 NewHolder<CONTEXT> savedRedirectContextHolder(m_pSavedRedirectContext);
1577 #ifdef FEATURE_COMINTEROP
1578 m_pRCWStack = new RCWStackHeader();
1581 m_pCerPreparationState = NULL;
1583 m_bGCStressing = FALSE;
1584 m_bUniqueStacking = FALSE;
1587 m_pPendingTypeLoad = NULL;
1589 #ifdef FEATURE_PREJIT
1593 m_dwAVInRuntimeImplOkayCount = 0;
1595 #if defined(HAVE_GCCOVER) && defined(USE_REDIRECT_FOR_GCSTRESS) && !defined(PLATFORM_UNIX) // GCCOVER
1596 m_fPreemptiveGCDisabledForGCStress = false;
1600 m_pHelperMethodFrameCallerList = (HelperMethodFrameCallerList*)-1;
1603 m_dwHostTaskRefCount = 0;
1605 m_pExceptionDuringStartup = NULL;
1608 m_pbDestCode = NULL;
1611 m_pLastAVAddress = NULL;
1612 #endif // _TARGET_X86_
1613 #endif // HAVE_GCCOVER
1615 m_fCompletionPortDrained = FALSE;
1617 m_debuggerActivePatchSkipper = NULL;
1618 m_dwThreadHandleBeingUsed = 0;
1619 SetProfilerCallbacksAllowed(TRUE);
1621 m_pCreatingThrowableForException = NULL;
1623 m_dwDisableAbortCheckCount = 0;
1626 #ifdef WIN64EXCEPTIONS
1627 m_dwIndexClauseForCatch = 0;
1628 m_sfEstablisherOfActualHandlerFrame.Clear();
1629 #endif // WIN64EXCEPTIONS
1631 m_threadPoolCompletionCount = 0;
1633 Thread *pThread = GetThread();
1634 _ASSERTE(SystemDomain::System()->DefaultDomain()->GetDefaultContext());
1636 _ASSERTE(m_Context);
1639 _ASSERTE(pThread->GetDomain() && pThread->GetDomain()->GetDefaultContext());
1640 // Start off the new thread in the default context of
1641 // the creating thread's appDomain. This could be changed by SetDelegate
1642 SetKickOffDomainId(pThread->GetDomain()->GetId());
1644 SetKickOffDomainId((ADID)DefaultADID);
1646 // Do not expose thread until it is fully constructed
1647 g_pThinLockThreadIdDispenser->NewId(this, this->m_ThreadId);
1650 // DO NOT ADD ADDITIONAL CONSTRUCTION AFTER THIS POINT.
1651 // NewId() allows this Thread instance to be accessed via a Thread Id. Do not
1652 // add additional construction after this point to prevent the race condition
1653 // of accessing a partially constructed Thread via Thread Id lookup.
1656 exposedObjectHolder.SuppressRelease();
1657 strongHndToExposedObjectHolder.SuppressRelease();
1658 #if defined(_DEBUG) && defined(TRACK_SYNC)
1659 trackSyncHolder.SuppressRelease();
1662 fiberInfoHolder.SuppressRelease();
1664 contextHolder.SuppressRelease();
1665 savedRedirectContextHolder.SuppressRelease();
1667 managedThreadCurrentCulture = NULL;
1668 managedThreadCurrentUICulture = NULL;
1670 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
1671 m_ullProcessorUsageBaseline = 0;
1672 #endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
1674 #ifdef FEATURE_COMINTEROP
1675 m_uliInitializeSpyCookie.QuadPart = 0ul;
1676 m_fInitializeSpyRegistered = false;
1677 m_pLastSTACtxCookie = NULL;
1678 #endif // FEATURE_COMINTEROP
1680 m_fGCSpecial = FALSE;
1683 m_pAffinityMask = 0;
1685 m_pAllLoggedTypes = NULL;
1687 #ifdef FEATURE_PERFTRACING
1688 m_pEventPipeBufferList = NULL;
1689 m_eventWriteInProgress = false;
1690 memset(&m_activityId, 0, sizeof(m_activityId));
1691 #endif // FEATURE_PERFTRACING
1692 m_HijackReturnKind = RT_Illegal;
1695 //--------------------------------------------------------------------
1696 // Failable initialization occurs here.
1697 //--------------------------------------------------------------------
1698 BOOL Thread::InitThread(BOOL fInternal)
1702 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
1707 HANDLE hDup = INVALID_HANDLE_VALUE;
1710 // This message actually serves a purpose (which is why it is always run)
1711 // The Stress log is run during hijacking, when other threads can be suspended
1712 // at arbitrary locations (including when holding a lock that NT uses to serialize
1713 // all memory allocations). By sending a message now, we insure that the stress
1714 // log will not allocate memory at these critical times an avoid deadlock.
1715 STRESS_LOG2(LF_ALWAYS, LL_ALWAYS, "SetupThread managed Thread %p Thread Id = %x\n", this, GetThreadId());
1717 if ((m_State & TS_WeOwn) == 0)
1719 COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cRecognizedThreads++);
1723 COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cCurrentThreadsPhysical++);
1727 // workaround: Remove this when we flow impersonation token to host.
1728 BOOL reverted = FALSE;
1729 HANDLE threadToken = INVALID_HANDLE_VALUE;
1730 #endif // !FEATURE_PAL
1732 if (m_ThreadHandle == INVALID_HANDLE_VALUE)
1734 // For WinCE, all clients have the same handle for a thread. Duplication is
1735 // not possible. We make sure we never close this handle unless we created
1736 // the thread (TS_WeOwn).
1738 // For Win32, each client has its own handle. This is achieved by duplicating
1739 // the pseudo-handle from ::GetCurrentThread(). Unlike WinCE, this service
1740 // returns a pseudo-handle which is only useful for duplication. In this case
1741 // each client is responsible for closing its own (duplicated) handle.
1743 // We don't bother duplicating if WeOwn, because we created the handle in the
1745 // Thread is created when or after the physical thread started running
1746 HANDLE curProcess = ::GetCurrentProcess();
1750 // If we're impersonating on NT, then DuplicateHandle(GetCurrentThread()) is going to give us a handle with only
1751 // THREAD_TERMINATE, THREAD_QUERY_INFORMATION, and THREAD_SET_INFORMATION. This doesn't include
1752 // THREAD_SUSPEND_RESUME nor THREAD_GET_CONTEXT. We need to be able to suspend the thread, and we need to be
1753 // able to get its context. Therefore, if we're impersonating, we revert to self, dup the handle, then
1754 // re-impersonate before we leave this routine.
1755 if (!RevertIfImpersonated(&reverted, &threadToken))
1757 COMPlusThrowWin32();
1760 class EnsureResetThreadToken
1764 HANDLE m_threadToken;
1766 EnsureResetThreadToken(HANDLE threadToken, BOOL reverted)
1768 m_threadToken = threadToken;
1769 m_NeedReset = reverted;
1771 ~EnsureResetThreadToken()
1773 UndoRevert(m_NeedReset, m_threadToken);
1774 if (m_threadToken != INVALID_HANDLE_VALUE)
1776 CloseHandle(m_threadToken);
1781 EnsureResetThreadToken resetToken(threadToken, reverted);
1783 #endif // !FEATURE_PAL
1785 if (::DuplicateHandle(curProcess, ::GetCurrentThread(), curProcess, &hDup,
1786 0 /*ignored*/, FALSE /*inherit*/, DUPLICATE_SAME_ACCESS))
1788 _ASSERTE(hDup != INVALID_HANDLE_VALUE);
1790 SetThreadHandle(hDup);
1791 m_WeOwnThreadHandle = TRUE;
1795 COMPlusThrowWin32();
1799 if ((m_State & TS_WeOwn) == 0)
1801 if (!AllocHandles())
1807 _ASSERTE(HasValidThreadHandle());
1811 // Set floating point mode to round to nearest
1813 (void) _controlfp_s( NULL, _RC_NEAR, _RC_CHOP|_RC_UP|_RC_DOWN|_RC_NEAR );
1815 m_pTEB = (struct _NT_TIB*)NtCurrentTeb();
1817 #endif // !FEATURE_PAL
1819 if (m_CacheStackBase == 0)
1821 _ASSERTE(m_CacheStackLimit == 0);
1822 _ASSERTE(m_LastAllowableStackAddress == 0);
1823 _ASSERTE(m_ProbeLimit == 0);
1824 ret = SetStackLimits(fAll);
1830 // We commit the thread's entire stack when it enters the runtime to allow us to be reliable in low me
1831 // situtations. See the comments in front of Thread::CommitThreadStack() for mor information.
1832 ret = Thread::CommitThreadStack(this);
1839 ret = Thread::AllocateIOCompletionContext();
1845 _ASSERTE(ret); // every failure case for ret should throw.
1849 // Allocate all the handles. When we are kicking of a new thread, we can call
1850 // here before the thread starts running.
1851 BOOL Thread::AllocHandles()
1853 WRAPPER_NO_CONTRACT;
1855 _ASSERTE(!m_DebugSuspendEvent.IsValid());
1856 _ASSERTE(!m_EventWait.IsValid());
1860 // create a manual reset event for getting the thread to a safe point
1861 m_DebugSuspendEvent.CreateManualEvent(FALSE);
1862 m_EventWait.CreateManualEvent(TRUE);
1867 if (!m_DebugSuspendEvent.IsValid()) {
1868 m_DebugSuspendEvent.CloseEvent();
1871 if (!m_EventWait.IsValid()) {
1872 m_EventWait.CloseEvent();
1875 EX_END_CATCH(RethrowTerminalExceptions);
1881 //--------------------------------------------------------------------
1882 // This is the alternate path to SetupThread/InitThread. If we created
1883 // an unstarted thread, we have SetupUnstartedThread/HasStarted.
1884 //--------------------------------------------------------------------
1885 BOOL Thread::HasStarted(BOOL bRequiresTSL)
1889 DISABLED(GC_NOTRIGGER);
1894 // @todo need a probe that tolerates not having a thread setup at all
1895 CONTRACT_VIOLATION(SOToleranceViolation);
1897 _ASSERTE(!m_fPreemptiveGCDisabled); // can't use PreemptiveGCDisabled() here
1899 // This is cheating a little. There is a pathway here from SetupThread, but only
1900 // via IJW SystemDomain::RunDllMain. Normally SetupThread returns a thread in
1901 // preemptive mode, ready for a transition. But in the IJW case, it can return a
1902 // cooperative mode thread. RunDllMain handles this "surprise" correctly.
1903 m_fPreemptiveGCDisabled = TRUE;
1905 // Normally, HasStarted is called from the thread's entrypoint to introduce it to
1906 // the runtime. But sometimes that thread is used for DLL_THREAD_ATTACH notifications
1907 // that call into managed code. In that case, the second HasStarted call is
1908 // redundant and should be ignored.
1909 if (GetThread() == this)
1913 _ASSERTE(GetThread() == 0);
1914 _ASSERTE(HasValidThreadHandle());
1916 BOOL fKeepTLS = FALSE;
1917 BOOL fCanCleanupCOMState = FALSE;
1920 res = SetStackLimits(fAll);
1923 m_pExceptionDuringStartup = Exception::GetOOMException();
1927 // We commit the thread's entire stack when it enters the runtime to allow us to be reliable in low memory
1928 // situtations. See the comments in front of Thread::CommitThreadStack() for mor information.
1929 res = Thread::CommitThreadStack(this);
1932 m_pExceptionDuringStartup = Exception::GetOOMException();
1936 // If any exception happens during HasStarted, we will cache the exception in Thread::m_pExceptionDuringStartup
1937 // which will be thrown in Thread.Start as an internal exception
1941 // Initialization must happen in the following order - hosts like SQL Server depend on this.
1943 CExecutionEngine::SetupTLSForThread(this);
1945 fCanCleanupCOMState = TRUE;
1946 res = PrepareApartmentAndContext();
1954 if (SetThread(this) == FALSE)
1959 if (SetAppDomain(m_pDomain) == FALSE)
1965 AddFiberInfo(Thread::ThreadTrackInfo_Lifetime);
1968 SetupThreadForHost();
1971 ThreadStore::TransferStartedThread(this, bRequiresTSL);
1973 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
1976 QueryThreadProcessorUsage();
1978 #endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
1979 #ifdef FEATURE_EVENT_TRACE
1980 ETW::ThreadLog::FireThreadCreated(this);
1981 #endif // FEATURE_EVENT_TRACE
1985 if (__pException != NULL)
1987 __pException.SuppressRelease();
1988 m_pExceptionDuringStartup = __pException;
1992 EX_END_CATCH(SwallowAllExceptions);
1997 if (m_fPreemptiveGCDisabled)
1999 m_fPreemptiveGCDisabled = FALSE;
2001 _ASSERTE (HasThreadState(TS_Unstarted));
2003 SetThreadState(TS_FailStarted);
2005 if (GetThread() != NULL && IsAbortRequested())
2006 UnmarkThreadForAbort(TAR_ALL);
2010 #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
2012 // Undo our call to PrepareApartmentAndContext above, so we don't leak a CoInitialize
2013 // If we're keeping TLS, then the host's call to ExitTask will clean this up instead.
2015 if (fCanCleanupCOMState)
2017 // The thread pointer in TLS may not be set yet, if we had a failure before we set it.
2018 // So we'll set it up here (we'll unset it a few lines down).
2019 if (SetThread(this) != FALSE)
2025 FastInterlockDecrement(&ThreadStore::s_pThreadStore->m_PendingThreadCount);
2026 // One of the components of OtherThreadsComplete() has changed, so check whether
2027 // we should now exit the EE.
2028 ThreadStore::CheckForEEShutdown();
2029 DecExternalCount(/*holdingLock*/ !bRequiresTSL);
2036 FastInterlockOr((ULONG *) &m_State, TS_FullyInitialized);
2038 #ifdef DEBUGGING_SUPPORTED
2040 // If we're debugging, let the debugger know that this
2041 // thread is up and running now.
2043 if (CORDebuggerAttached())
2045 g_pDebugInterface->ThreadCreated(this);
2049 LOG((LF_CORDB, LL_INFO10000, "ThreadCreated() not called due to CORDebuggerAttached() being FALSE for thread 0x%x\n", GetThreadId()));
2052 #endif // DEBUGGING_SUPPORTED
2054 #ifdef PROFILING_SUPPORTED
2055 // If a profiler is running, let them know about the new thread.
2057 // The call to IsGCSpecial is crucial to avoid a deadlock. See code:Thread::m_fGCSpecial for more
2061 BEGIN_PIN_PROFILER(CORProfilerTrackThreads());
2062 BOOL gcOnTransition = GC_ON_TRANSITIONS(FALSE); // disable GCStress 2 to avoid the profiler receiving a RuntimeThreadSuspended notification even before the ThreadCreated notification
2066 g_profControlBlock.pProfInterface->ThreadCreated((ThreadID) this);
2069 GC_ON_TRANSITIONS(gcOnTransition);
2071 DWORD osThreadId = ::GetCurrentThreadId();
2072 g_profControlBlock.pProfInterface->ThreadAssignedToOSThread(
2073 (ThreadID) this, osThreadId);
2076 #endif // PROFILING_SUPPORTED
2078 // CoreCLR does not support user-requested thread suspension
2079 _ASSERTE(!(m_State & TS_SuspendUnstarted));
2085 BOOL Thread::AllocateIOCompletionContext()
2087 WRAPPER_NO_CONTRACT;
2088 PIOCompletionContext pIOC = new (nothrow) IOCompletionContext;
2092 pIOC->lpOverlapped = NULL;
2093 m_pIOCompletionContext = pIOC;
2102 VOID Thread::FreeIOCompletionContext()
2104 WRAPPER_NO_CONTRACT;
2105 if (m_pIOCompletionContext != NULL)
2107 PIOCompletionContext pIOC = (PIOCompletionContext) m_pIOCompletionContext;
2109 m_pIOCompletionContext = NULL;
2113 void Thread::HandleThreadStartupFailure()
2123 _ASSERTE(GetThread() != NULL);
2127 OBJECTREF pThrowable;
2130 memset(&args, 0, sizeof(ProtectArgs));
2132 GCPROTECT_BEGIN(args);
2134 MethodTable *pMT = MscorlibBinder::GetException(kThreadStartException);
2135 args.pThrowable = AllocateObject(pMT);
2137 MethodDescCallSite exceptionCtor(METHOD__THREAD_START_EXCEPTION__EX_CTOR);
2139 if (m_pExceptionDuringStartup)
2141 args.pReason = CLRException::GetThrowableFromException(m_pExceptionDuringStartup);
2142 Exception::Delete(m_pExceptionDuringStartup);
2143 m_pExceptionDuringStartup = NULL;
2146 ARG_SLOT args1[] = {
2147 ObjToArgSlot(args.pThrowable),
2148 ObjToArgSlot(args.pReason),
2150 exceptionCtor.Call(args1);
2152 GCPROTECT_END(); //Prot
2154 RaiseTheExceptionInternalOnly(args.pThrowable, FALSE);
2158 BOOL RevertIfImpersonated(BOOL *bReverted, HANDLE *phToken)
2160 WRAPPER_NO_CONTRACT;
2162 BOOL bImpersonated = OpenThreadToken(GetCurrentThread(), // we are assuming that if this call fails,
2163 TOKEN_IMPERSONATE, // we are not impersonating. There is no win32
2164 TRUE, // api to figure this out. The only alternative
2165 phToken); // is to use NtCurrentTeb->IsImpersonating().
2168 *bReverted = RevertToSelf();
2175 void UndoRevert(BOOL bReverted, HANDLE hToken)
2179 if (!SetThreadToken(NULL, hToken))
2181 _ASSERT("Undo Revert -> SetThreadToken failed");
2182 STRESS_LOG1(LF_EH, LL_INFO100, "UndoRevert/SetThreadToken failed for hToken = %d\n",hToken);
2183 EEPOLICY_HANDLE_FATAL_ERROR(COR_E_SECURITY);
2188 #endif // !FEATURE_PAL
2191 // We don't want ::CreateThread() calls scattered throughout the source. So gather
2194 BOOL Thread::CreateNewThread(SIZE_T stackSize, LPTHREAD_START_ROUTINE start, void *args, LPCWSTR pName)
2203 //This assert is here to prevent a bug in the future
2204 // CreateTask currently takes a DWORD and we will downcast
2205 // if that interface changes to take a SIZE_T this Assert needs to be removed.
2207 _ASSERTE(stackSize <= 0xFFFFFFFF);
2211 BOOL bReverted = FALSE;
2212 bRet = RevertIfImpersonated(&bReverted, &token);
2215 #endif // !FEATURE_PAL
2217 m_StateNC = (ThreadStateNoConcurrency)((ULONG)m_StateNC | TSNC_CLRCreatedThread);
2218 bRet = CreateNewOSThread(stackSize, start, args);
2220 UndoRevert(bReverted, token);
2221 SetThreadName(m_ThreadHandle, pName);
2222 #endif // !FEATURE_PAL
2228 // This is to avoid the 64KB/1MB aliasing problem present on Pentium 4 processors,
2229 // which can significantly impact performance with HyperThreading enabled
2230 DWORD WINAPI Thread::intermediateThreadProc(PVOID arg)
2232 WRAPPER_NO_CONTRACT;
2235 if (m_offset_counter * offset_multiplier > (int) GetOsPageSize())
2236 m_offset_counter = 0;
2238 (void)_alloca(m_offset_counter * offset_multiplier);
2240 intermediateThreadParam* param = (intermediateThreadParam*)arg;
2242 LPTHREAD_START_ROUTINE ThreadFcnPtr = param->lpThreadFunction;
2243 PVOID args = param->lpArg;
2246 return ThreadFcnPtr(args);
2249 HANDLE Thread::CreateUtilityThread(Thread::StackSizeBucket stackSizeBucket, LPTHREAD_START_ROUTINE start, void *args, DWORD flags, DWORD* pThreadId)
2251 LIMITED_METHOD_CONTRACT;
2253 // TODO: we should always use small stacks for most of these threads. For CLR 4, we're being conservative
2254 // here because this is a last-minute fix.
2258 switch (stackSizeBucket)
2260 case StackSize_Small:
2261 stackSize = 256 * 1024;
2264 case StackSize_Medium:
2265 stackSize = 512 * 1024;
2269 _ASSERTE(!"Bad stack size bucket");
2270 case StackSize_Large:
2271 stackSize = 1024 * 1024;
2275 flags |= STACK_SIZE_PARAM_IS_A_RESERVATION;
2278 HANDLE hThread = CreateThread(NULL, stackSize, start, args, flags, &threadId);
2281 *pThreadId = threadId;
2287 BOOL Thread::GetProcessDefaultStackSize(SIZE_T* reserveSize, SIZE_T* commitSize)
2297 // Let's get the stack sizes from the PE file that started process.
2299 static SIZE_T ExeSizeOfStackReserve = 0;
2300 static SIZE_T ExeSizeOfStackCommit = 0;
2302 static BOOL fSizesGot = FALSE;
2307 HINSTANCE hInst = WszGetModuleHandle(NULL);
2308 _ASSERTE(hInst); // WszGetModuleHandle should never fail on the module that started the process.
2311 PEDecoder pe(hInst);
2312 pe.GetEXEStackSizes(&ExeSizeOfStackReserve, &ExeSizeOfStackCommit);
2319 EX_END_CATCH(SwallowAllExceptions);
2321 #endif // !FEATURE_PAL
2324 //return some somewhat-reasonable numbers
2325 if (NULL != reserveSize) *reserveSize = 256*1024;
2326 if (NULL != commitSize) *commitSize = 256*1024;
2330 if (NULL != reserveSize) *reserveSize = ExeSizeOfStackReserve;
2331 if (NULL != commitSize) *commitSize = ExeSizeOfStackCommit;
2335 BOOL Thread::CreateNewOSThread(SIZE_T sizeToCommitOrReserve, LPTHREAD_START_ROUTINE start, void *args)
2345 DWORD dwCreationFlags = CREATE_SUSPENDED;
2347 dwCreationFlags |= STACK_SIZE_PARAM_IS_A_RESERVATION;
2349 #ifndef FEATURE_PAL // the PAL does its own adjustments as necessary
2350 if (sizeToCommitOrReserve != 0 && sizeToCommitOrReserve <= GetOsPageSize())
2352 // On Windows, passing a value that is <= one page size bizarrely causes the OS to use the default stack size instead of
2353 // a minimum, which is undesirable. This adjustment fixes that issue to use a minimum stack size (typically 64 KB).
2354 sizeToCommitOrReserve = GetOsPageSize() + 1;
2356 #endif // !FEATURE_PAL
2358 intermediateThreadParam* lpThreadArgs = new (nothrow) intermediateThreadParam;
2359 if (lpThreadArgs == NULL)
2363 NewHolder<intermediateThreadParam> argHolder(lpThreadArgs);
2365 // Make sure we have all our handles, in case someone tries to suspend us
2366 // as we are starting up.
2367 if (!AllocHandles())
2369 // OS is out of handles/memory?
2373 lpThreadArgs->lpThreadFunction = start;
2374 lpThreadArgs->lpArg = args;
2376 h = ::CreateThread(NULL /*=SECURITY_ATTRIBUTES*/,
2377 sizeToCommitOrReserve,
2378 intermediateThreadProc,
2386 argHolder.SuppressRelease();
2388 _ASSERTE(!m_fPreemptiveGCDisabled); // leave in preemptive until HasStarted.
2391 m_WeOwnThreadHandle = TRUE;
2393 // Before we do the resume, we need to take note of the new ThreadId. This
2394 // is necessary because -- before the thread starts executing at KickofThread --
2395 // it may perform some DllMain DLL_THREAD_ATTACH notifications. These could
2396 // call into managed code. During the consequent SetupThread, we need to
2397 // perform the Thread::HasStarted call instead of going through the normal
2398 // 'new thread' pathway.
2399 _ASSERTE(GetOSThreadId() == 0);
2400 _ASSERTE(ourId != 0);
2402 m_OSThreadId = ourId;
2404 FastInterlockIncrement(&ThreadStore::s_pThreadStore->m_PendingThreadCount);
2407 m_Creater.SetToCurrentThread();
2414 // #threadDestruction
2416 // General comments on thread destruction.
2418 // The C++ Thread object can survive beyond the time when the Win32 thread has died.
2419 // This is important if an exposed object has been created for this thread. The
2420 // exposed object will survive until it is GC'ed.
2422 // A client like an exposed object can place an external reference count on that
2423 // object. We also place a reference count on it when we construct it, and we lose
2424 // that count when the thread finishes doing useful work (OnThreadTerminate).
2426 // One way OnThreadTerminate() is called is when the thread finishes doing useful
2427 // work. This case always happens on the correct thread.
2429 // The other way OnThreadTerminate() is called is during product shutdown. We do
2430 // a "best effort" to eliminate all threads except the Main thread before shutdown
2431 // happens. But there may be some background threads or external threads still
2434 // When the final reference count disappears, we destruct. Until then, the thread
2435 // remains in the ThreadStore, but is marked as "Dead".
2437 // @TODO cwb: for a typical shutdown, only background threads are still around.
2438 // Should we interrupt them? What about the non-typical shutdown?</TODO>
2440 int Thread::IncExternalCount()
2444 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
2448 Thread *pCurThread = GetThread();
2450 _ASSERTE(m_ExternalRefCount > 0);
2451 int retVal = FastInterlockIncrement((LONG*)&m_ExternalRefCount);
2452 // If we have an exposed object and the refcount is greater than one
2453 // we must make sure to keep a strong handle to the exposed object
2454 // so that we keep it alive even if nobody has a reference to it.
2455 if (pCurThread && ((*((void**)m_ExposedObject)) != NULL))
2457 // The exposed object exists and needs a strong handle so check
2458 // to see if it has one.
2459 // Only a managed thread can setup StrongHnd.
2460 if ((*((void**)m_StrongHndToExposedObject)) == NULL)
2463 // Store the object in the strong handle.
2464 StoreObjectInHandle(m_StrongHndToExposedObject, ObjectFromHandle(m_ExposedObject));
2471 int Thread::DecExternalCount(BOOL holdingLock)
2475 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
2479 // Note that it's possible to get here with a NULL current thread (during
2480 // shutdown of the thread manager).
2481 Thread *pCurThread = GetThread();
2482 _ASSERTE (pCurThread == NULL || IsAtProcessExit()
2483 || (!holdingLock && !ThreadStore::HoldingThreadStore(pCurThread))
2484 || (holdingLock && ThreadStore::HoldingThreadStore(pCurThread)));
2486 BOOL ToggleGC = FALSE;
2487 BOOL SelfDelete = FALSE;
2491 // Must synchronize count and exposed object handle manipulation. We use the
2492 // thread lock for this, which implies that we must be in pre-emptive mode
2493 // to begin with and avoid any activity that would invoke a GC (this
2494 // acquires the thread store lock).
2497 // TODO: we would prefer to use a GC Holder here, however it is hard
2498 // to get the case where we're deleting this thread correct given
2499 // the current macros. We want to supress the release of the holder
2500 // here which puts us in Preemptive mode, and also the switch to
2501 // Cooperative mode below, but since both holders will be named
2502 // the same thing (due to the generic nature of the macro) we can
2503 // not use GCX_*_SUPRESS_RELEASE() for 2 holders in the same scope
2504 // b/c they will both apply simply to the most narrowly scoped
2507 ToggleGC = pCurThread->PreemptiveGCDisabled();
2510 pCurThread->EnablePreemptiveGC();
2514 GCX_ASSERT_PREEMP();
2516 ThreadStoreLockHolder tsLock(!holdingLock);
2518 _ASSERTE(m_ExternalRefCount >= 1);
2519 _ASSERTE(!holdingLock ||
2520 ThreadStore::s_pThreadStore->m_Crst.GetEnterCount() > 0 ||
2523 retVal = FastInterlockDecrement((LONG*)&m_ExternalRefCount);
2527 HANDLE h = GetThreadHandle();
2528 if (h == INVALID_HANDLE_VALUE)
2530 h = m_ThreadHandleForClose;
2531 m_ThreadHandleForClose = INVALID_HANDLE_VALUE;
2533 // Can not assert like this. We have already removed the Unstarted bit.
2534 //_ASSERTE (IsUnstarted() || h != INVALID_HANDLE_VALUE);
2535 if (h != INVALID_HANDLE_VALUE && m_WeOwnThreadHandle)
2538 SetThreadHandle(INVALID_HANDLE_VALUE);
2540 // Switch back to cooperative mode to manipulate the thread.
2543 // TODO: we would prefer to use GCX_COOP here, see comment above.
2544 pCurThread->DisablePreemptiveGC();
2549 // during process detach the thread might still be in the thread list
2550 // if it hasn't seen its DLL_THREAD_DETACH yet. Use the following
2551 // tweak to decide if the thread has terminated yet.
2552 if (!HasValidThreadHandle())
2554 SelfDelete = this == pCurThread;
2555 m_ExceptionState.FreeAllStackTraces();
2559 AddFiberInfo(ThreadTrackInfo_Lifetime);
2567 // It only makes sense to restore the GC mode if we didn't just destroy
2568 // our own thread object.
2569 if (pCurThread && !SelfDelete && !ToggleGC)
2571 pCurThread->EnablePreemptiveGC();
2574 // Cannot use this here b/c it creates a holder named the same as GCX_ASSERT_COOP
2575 // in the same scope above...
2577 // GCX_ASSERT_PREEMP()
2581 else if (pCurThread == NULL)
2583 // We're in shutdown, too late to be worrying about having a strong
2584 // handle to the exposed thread object, we've already performed our
2592 // Check to see if the external ref count reaches exactly one. If this
2593 // is the case and we have an exposed object then it is that exposed object
2594 // that is holding a reference to us. To make sure that we are not the
2595 // ones keeping the exposed object alive we need to remove the strong
2596 // reference we have to it.
2597 if ((retVal == 1) && ((*((void**)m_StrongHndToExposedObject)) != NULL))
2599 // Switch back to cooperative mode to manipulate the object.
2601 // Don't want to switch back to COOP until we let go of the lock
2602 // however we are allowed to call StoreObjectInHandle here in preemptive
2603 // mode because we are setting the value to NULL.
2604 CONTRACT_VIOLATION(ModeViolation);
2606 // Clear the handle and leave the lock.
2607 // We do not have to to DisablePreemptiveGC here, because
2608 // we just want to put NULL into a handle.
2609 StoreObjectInHandle(m_StrongHndToExposedObject, NULL);
2613 // Switch back to the initial GC mode.
2616 pCurThread->DisablePreemptiveGC();
2627 // Switch back to the initial GC mode.
2630 pCurThread->DisablePreemptiveGC();
2638 //--------------------------------------------------------------------
2639 // Destruction. This occurs after the associated native thread
2641 //--------------------------------------------------------------------
2646 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
2650 // TODO: enable this
2651 //_ASSERTE(GetThread() != this);
2652 _ASSERTE(m_ThrewControlForThread == 0);
2654 // AbortRequest is coupled with TrapReturningThread.
2655 // We should have unmarked the thread for abort.
2656 // !!! Can not assert here. If a thread has no managed code on stack
2657 // !!! we leave the g_TrapReturningThread set so that the thread will be
2658 // !!! aborted if it enters managed code.
2659 //_ASSERTE(!IsAbortRequested());
2661 // We should not have the Thread marked for abort. But if we have
2662 // we need to unmark it so that g_TrapReturningThreads is decremented.
2663 if (IsAbortRequested())
2665 UnmarkThreadForAbort(TAR_ALL);
2668 #if defined(_DEBUG) && defined(TRACK_SYNC)
2669 _ASSERTE(IsAtProcessExit() || ((Dbg_TrackSyncStack *) m_pTrackSync)->m_StackPointer == 0);
2670 delete m_pTrackSync;
2671 #endif // TRACK_SYNC
2673 _ASSERTE(IsDead() || IsUnstarted() || IsAtProcessExit());
2675 if (m_WaitEventLink.m_Next != NULL && !IsAtProcessExit())
2677 WaitEventLink *walk = &m_WaitEventLink;
2678 while (walk->m_Next) {
2679 ThreadQueue::RemoveThread(this, (SyncBlock*)((DWORD_PTR)walk->m_Next->m_WaitSB & ~1));
2680 StoreEventToEventStore (walk->m_Next->m_EventWait);
2682 m_WaitEventLink.m_Next = NULL;
2685 if (m_StateNC & TSNC_ExistInThreadStore) {
2687 ret = ThreadStore::RemoveThread(this);
2692 m_pFrame = (Frame *)POISONC;
2695 // Update Perfmon counters.
2696 COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cCurrentThreadsLogical--);
2698 // Current recognized threads are non-runtime threads that are alive and ran under the
2699 // runtime. Check whether this Thread was one of them.
2700 if ((m_State & TS_WeOwn) == 0)
2702 COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cRecognizedThreads--);
2706 COUNTER_ONLY(GetPerfCounters().m_LocksAndThreads.cCurrentThreadsPhysical--);
2709 // Normally we shouldn't get here with a valid thread handle; however if SetupThread
2710 // failed (due to an OOM for example) then we need to CloseHandle the thread
2711 // handle if we own it.
2712 if (m_WeOwnThreadHandle && (GetThreadHandle() != INVALID_HANDLE_VALUE))
2714 CloseHandle(GetThreadHandle());
2717 if (m_DebugSuspendEvent.IsValid())
2719 m_DebugSuspendEvent.CloseEvent();
2721 if (m_EventWait.IsValid())
2723 m_EventWait.CloseEvent();
2726 FreeIOCompletionContext();
2731 if (GetSavedRedirectContext())
2733 delete GetSavedRedirectContext();
2734 SetSavedRedirectContext(NULL);
2737 #ifdef FEATURE_COMINTEROP
2742 if (m_pExceptionDuringStartup)
2744 Exception::Delete (m_pExceptionDuringStartup);
2749 if (!IsAtProcessExit())
2751 // Destroy any handles that we're using to hold onto exception objects
2752 SafeSetThrowables(NULL);
2754 DestroyShortWeakHandle(m_ExposedObject);
2755 DestroyStrongHandle(m_StrongHndToExposedObject);
2758 g_pThinLockThreadIdDispenser->DisposeId(GetThreadId());
2760 //Ensure DeleteThreadStaticData was executed
2761 _ASSERTE(m_pThreadLocalBlock == NULL);
2762 _ASSERTE(m_pTLBTable == NULL);
2763 _ASSERTE(m_TLBTableSize == 0);
2766 #ifdef FEATURE_PREJIT
2773 if (m_pFiberInfo != NULL) {
2774 delete [] (DWORD_PTR*)m_pFiberInfo[0];
2778 #ifdef FEATURE_EVENT_TRACE
2779 // Destruct the thread local type cache for allocation sampling
2780 if(m_pAllLoggedTypes) {
2781 ETW::TypeSystemLog::DeleteTypeHashNoLock(&m_pAllLoggedTypes);
2783 #endif // FEATURE_EVENT_TRACE
2785 // Wait for another thread to leave its loop in DeadlockAwareLock::TryBeginEnterLock
2786 CrstHolder lock(&g_DeadlockAwareCrst);
2789 #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
2791 void Thread::BaseCoUninitialize()
2793 STATIC_CONTRACT_THROWS;
2794 STATIC_CONTRACT_GC_TRIGGERS;
2795 STATIC_CONTRACT_SO_INTOLERANT;
2796 STATIC_CONTRACT_MODE_PREEMPTIVE;
2798 _ASSERTE(GetThread() == this);
2800 BEGIN_SO_TOLERANT_CODE(this);
2801 // BEGIN_SO_TOLERANT_CODE wraps a __try/__except around this call, so if the OS were to allow
2802 // an exception to leak through to us, we'll catch it.
2804 END_SO_TOLERANT_CODE;
2806 }// BaseCoUninitialize
2808 #ifdef FEATURE_COMINTEROP
2809 void Thread::BaseWinRTUninitialize()
2811 STATIC_CONTRACT_THROWS;
2812 STATIC_CONTRACT_GC_TRIGGERS;
2813 STATIC_CONTRACT_SO_INTOLERANT;
2814 STATIC_CONTRACT_MODE_PREEMPTIVE;
2816 _ASSERTE(WinRTSupported());
2817 _ASSERTE(GetThread() == this);
2818 _ASSERTE(IsWinRTInitialized());
2820 BEGIN_SO_TOLERANT_CODE(this);
2822 END_SO_TOLERANT_CODE;
2824 #endif // FEATURE_COMINTEROP
2826 void Thread::CoUninitialize()
2834 // Running threads might have performed a CoInitialize which must
2836 BOOL needsUninitialize = IsCoInitialized()
2837 #ifdef FEATURE_COMINTEROP
2838 || IsWinRTInitialized()
2839 #endif // FEATURE_COMINTEROP
2842 if (!IsAtProcessExit() && needsUninitialize)
2845 CONTRACT_VIOLATION(ThrowsViolation);
2847 if (IsCoInitialized())
2849 BaseCoUninitialize();
2850 FastInterlockAnd((ULONG *)&m_State, ~TS_CoInitialized);
2853 #ifdef FEATURE_COMINTEROP
2854 if (IsWinRTInitialized())
2856 _ASSERTE(WinRTSupported());
2857 BaseWinRTUninitialize();
2858 ResetWinRTInitialized();
2860 #endif // FEATURE_COMNITEROP
2863 #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
2865 void Thread::CleanupDetachedThreads()
2873 _ASSERTE(!ThreadStore::HoldingThreadStore());
2875 ThreadStoreLockHolder threadStoreLockHolder;
2877 Thread *thread = ThreadStore::GetAllThreadList(NULL, 0, 0);
2879 STRESS_LOG0(LF_SYNC, LL_INFO1000, "T::CDT called\n");
2881 while (thread != NULL)
2883 Thread *next = ThreadStore::GetAllThreadList(thread, 0, 0);
2885 if (thread->IsDetached() && thread->m_UnmanagedRefCount == 0)
2887 STRESS_LOG1(LF_SYNC, LL_INFO1000, "T::CDT - detaching thread 0x%p\n", thread);
2889 // Unmark that the thread is detached while we have the
2890 // thread store lock. This will ensure that no other
2891 // thread will race in here and try to delete it, too.
2892 FastInterlockAnd((ULONG*)&(thread->m_State), ~TS_Detached);
2893 FastInterlockDecrement(&m_DetachCount);
2894 if (!thread->IsBackground())
2895 FastInterlockDecrement(&m_ActiveDetachCount);
2897 // If the debugger is attached, then we need to unlock the
2898 // thread store before calling OnThreadTerminate. That
2899 // way, we won't be holding the thread store lock if we
2900 // need to block sending a detach thread event.
2901 BOOL debuggerAttached =
2902 #ifdef DEBUGGING_SUPPORTED
2903 CORDebuggerAttached();
2904 #else // !DEBUGGING_SUPPORTED
2906 #endif // !DEBUGGING_SUPPORTED
2908 if (debuggerAttached)
2909 ThreadStore::UnlockThreadStore();
2911 thread->OnThreadTerminate(debuggerAttached ? FALSE : TRUE);
2913 #ifdef DEBUGGING_SUPPORTED
2914 if (debuggerAttached)
2916 ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER);
2918 // We remember the next Thread in the thread store
2919 // list before deleting the current one. But we can't
2920 // use that Thread pointer now that we release the
2921 // thread store lock in the middle of the loop. We
2922 // have to start from the beginning of the list every
2923 // time. If two threads T1 and T2 race into
2924 // CleanupDetachedThreads, then T1 will grab the first
2925 // Thread on the list marked for deletion and release
2926 // the lock. T2 will grab the second one on the
2927 // list. T2 may complete destruction of its Thread,
2928 // then T1 might re-acquire the thread store lock and
2929 // try to use the next Thread in the thread store. But
2930 // T2 just deleted that next Thread.
2931 thread = ThreadStore::GetAllThreadList(NULL, 0, 0);
2934 #endif // DEBUGGING_SUPPORTED
2939 else if (thread->HasThreadState(TS_Finalized))
2941 STRESS_LOG1(LF_SYNC, LL_INFO1000, "T::CDT - finalized thread 0x%p\n", thread);
2943 thread->ResetThreadState(TS_Finalized);
2944 // We have finalized the managed Thread object. Now it is time to clean up the unmanaged part
2945 thread->DecExternalCount(TRUE);
2954 s_fCleanFinalizedThread = FALSE;
2957 #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
2959 void Thread::CleanupCOMState()
2963 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
2967 #ifdef FEATURE_COMINTEROP
2968 if (GetFinalApartment() == Thread::AS_InSTA)
2969 ReleaseRCWsInCachesNoThrow(GetCurrentCtxCookie());
2970 #endif // FEATURE_COMINTEROP
2972 // Running threads might have performed a CoInitialize which must
2973 // now be balanced. However only the thread that called COInitialize can
2974 // call CoUninitialize.
2976 BOOL needsUninitialize = IsCoInitialized()
2977 #ifdef FEATURE_COMINTEROP
2978 || IsWinRTInitialized()
2979 #endif // FEATURE_COMINTEROP
2982 if (needsUninitialize)
2985 CONTRACT_VIOLATION(ThrowsViolation);
2987 if (IsCoInitialized())
2989 BaseCoUninitialize();
2990 ResetCoInitialized();
2993 #ifdef FEATURE_COMINTEROP
2994 if (IsWinRTInitialized())
2996 _ASSERTE(WinRTSupported());
2997 BaseWinRTUninitialize();
2998 ResetWinRTInitialized();
3000 #endif // FEATURE_COMINTEROP
3003 #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
3005 // See general comments on thread destruction (code:#threadDestruction) above.
3006 void Thread::OnThreadTerminate(BOOL holdingLock)
3010 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
3014 // #ReportDeadOnThreadTerminate
3015 // Caller should have put the TS_ReportDead bit on by now.
3016 // We don't want any windows after the exit event but before the thread is marked dead.
3017 // If a debugger attached during such a window (or even took a dump at the exit event),
3018 // then it may not realize the thread is dead.
3019 // So ensure we mark the thread as dead before we send the tool notifications.
3020 // The TS_ReportDead bit will cause the debugger to view this as TS_Dead.
3021 _ASSERTE(HasThreadState(TS_ReportDead));
3023 // Should not use OSThreadId:
3024 // OSThreadId may change for the current thread is the thread is blocked and rescheduled
3026 Thread *pCurrentThread = GetThread();
3027 DWORD CurrentThreadID = pCurrentThread?pCurrentThread->GetThreadId():0;
3028 DWORD ThisThreadID = GetThreadId();
3030 #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
3031 // If the currently running thread is the thread that died and it is an STA thread, then we
3032 // need to release all the RCW's in the current context. However, we cannot do this if we
3033 // are in the middle of process detach.
3034 if (!IsAtProcessExit() && this == GetThread())
3038 #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
3040 if (g_fEEShutDown != 0)
3042 // We have started shutdown. Not safe to touch CLR state.
3046 // We took a count during construction, and we rely on the count being
3047 // non-zero as we terminate the thread here.
3048 _ASSERTE(m_ExternalRefCount > 0);
3050 // The thread is no longer running. It's important that we zero any general OBJECTHANDLE's
3051 // on this Thread object. That's because we need the managed Thread object to be subject to
3052 // GC and yet any HANDLE is opaque to the GC when it comes to collecting cycles. If e.g. the
3053 // Thread's AbortReason (which is an arbitrary object) contains transitively a reference back
3054 // to the Thread, then we have an uncollectible cycle. When the thread is executing, nothing
3055 // can be collected anyway. But now that we stop running the cycle concerns us.
3057 // It's important that we only use OBJECTHANDLE's that are retrievable while the thread is
3058 // still running. That's what allows us to zero them here with impunity:
3060 // No handles to clean up in the m_ExceptionState
3061 _ASSERTE(!m_ExceptionState.IsExceptionInProgress());
3065 // Destroy the LastThrown handle (and anything that violates the above assert).
3066 SafeSetThrowables(NULL);
3068 // Cleaning up the AbortReason is tricky, since the handle is only valid if the ADID is valid
3069 // ...and we can only perform this operation if other threads aren't racing to update these
3070 // values on our thread asynchronously.
3073 // Free all structures related to thread statics for this thread
3074 DeleteThreadStaticData();
3078 if (GCHeapUtilities::IsGCHeapInitialized())
3080 // Guaranteed to NOT be a shutdown case, because we tear down the heap before
3081 // we tear down any threads during shutdown.
3082 if (ThisThreadID == CurrentThreadID)
3085 GCHeapUtilities::GetGCHeap()->FixAllocContext(&m_alloc_context, false, NULL, NULL);
3086 m_alloc_context.init();
3090 // We switch a thread to dead when it has finished doing useful work. But it
3091 // remains in the thread store so long as someone keeps it alive. An exposed
3092 // object will do this (it releases the refcount in its finalizer). If the
3093 // thread is never released, we have another look during product shutdown and
3094 // account for the unreleased refcount of the uncollected exposed object:
3099 _ASSERTE(IsAtProcessExit());
3101 if (m_ExposedObject != NULL)
3102 DecExternalCount(holdingLock); // may destruct now
3106 #ifdef DEBUGGING_SUPPORTED
3108 // If we're debugging, let the debugger know that this thread is
3111 // There is a race here where the debugger could have attached after
3112 // we checked (and thus didn't release the lock). In this case,
3113 // we can't call out to the debugger or we risk a deadlock.
3115 if (!holdingLock && CORDebuggerAttached())
3117 g_pDebugInterface->DetachThread(this);
3119 #endif // DEBUGGING_SUPPORTED
3121 #ifdef PROFILING_SUPPORTED
3122 // If a profiler is present, then notify the profiler of thread destroy
3124 BEGIN_PIN_PROFILER(CORProfilerTrackThreads());
3126 g_profControlBlock.pProfInterface->ThreadDestroyed((ThreadID) this);
3129 #endif // PROFILING_SUPPORTED
3133 LOG((LF_SYNC, INFO3, "OnThreadTerminate obtain lock\n"));
3134 ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER);
3138 if (GCHeapUtilities::IsGCHeapInitialized() && ThisThreadID != CurrentThreadID)
3140 // We must be holding the ThreadStore lock in order to clean up alloc context.
3141 // We should never call FixAllocContext during GC.
3142 GCHeapUtilities::GetGCHeap()->FixAllocContext(&m_alloc_context, false, NULL, NULL);
3143 m_alloc_context.init();
3146 FastInterlockOr((ULONG *) &m_State, TS_Dead);
3147 ThreadStore::s_pThreadStore->m_DeadThreadCount++;
3148 ThreadStore::s_pThreadStore->IncrementDeadThreadCountForGCTrigger();
3151 ThreadStore::s_pThreadStore->m_UnstartedThreadCount--;
3155 ThreadStore::s_pThreadStore->m_BackgroundThreadCount--;
3158 FastInterlockAnd((ULONG *) &m_State, ~(TS_Unstarted | TS_Background));
3161 // If this thread was told to trip for debugging between the
3162 // sending of the detach event above and the locking of the
3163 // thread store lock, then remove the flag and decrement the
3164 // global trap returning threads count.
3166 if (!IsAtProcessExit())
3168 // A thread can't die during a GCPending, because the thread store's
3169 // lock is held by the GC thread.
3170 if (m_State & TS_DebugSuspendPending)
3171 UnmarkForSuspension(~TS_DebugSuspendPending);
3173 // CoreCLR does not support user-requested thread suspension
3174 _ASSERTE(!(m_State & TS_UserSuspendPending));
3176 if (CurrentThreadID == ThisThreadID && IsAbortRequested())
3178 UnmarkThreadForAbort(Thread::TAR_ALL);
3182 if (GetThreadHandle() != INVALID_HANDLE_VALUE)
3184 if (m_ThreadHandleForClose == INVALID_HANDLE_VALUE)
3186 m_ThreadHandleForClose = GetThreadHandle();
3188 SetThreadHandle (INVALID_HANDLE_VALUE);
3193 // If nobody else is holding onto the thread, we may destruct it here:
3194 ULONG oldCount = DecExternalCount(TRUE);
3195 // If we are shutting down the process, we only have one thread active in the
3196 // system. So we can disregard all the reasons that hold this thread alive --
3197 // TLS is about to be reclaimed anyway.
3198 if (IsAtProcessExit())
3199 while (oldCount > 0)
3201 oldCount = DecExternalCount(TRUE);
3204 // ASSUME THAT THE THREAD IS DELETED, FROM HERE ON
3206 _ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >= 0);
3207 _ASSERTE(ThreadStore::s_pThreadStore->m_BackgroundThreadCount >= 0);
3208 _ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >=
3209 ThreadStore::s_pThreadStore->m_BackgroundThreadCount);
3210 _ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >=
3211 ThreadStore::s_pThreadStore->m_UnstartedThreadCount);
3212 _ASSERTE(ThreadStore::s_pThreadStore->m_ThreadCount >=
3213 ThreadStore::s_pThreadStore->m_DeadThreadCount);
3215 // One of the components of OtherThreadsComplete() has changed, so check whether
3216 // we should now exit the EE.
3217 ThreadStore::CheckForEEShutdown();
3219 if (ThisThreadID == CurrentThreadID)
3221 // NULL out the thread block in the tls. We can't do this if we aren't on the
3222 // right thread. But this will only happen during a shutdown. And we've made
3223 // a "best effort" to reduce to a single thread before we begin the shutdown.
3230 LOG((LF_SYNC, INFO3, "OnThreadTerminate releasing lock\n"));
3231 ThreadSuspend::UnlockThreadStore(ThisThreadID == CurrentThreadID);
3236 // Helper functions to check for duplicate handles. we only do this check if
3237 // a waitfor multiple fails.
3238 int __cdecl compareHandles( const void *arg1, const void *arg2 )
3246 HANDLE h1 = *(HANDLE*)arg1;
3247 HANDLE h2 = *(HANDLE*)arg2;
3248 return (h1 == h2) ? 0 : ((h1 < h2) ? -1 : 1);
3251 BOOL CheckForDuplicateHandles(int countHandles, HANDLE *handles)
3259 qsort(handles,countHandles,sizeof(HANDLE),compareHandles);
3260 for (int i=1; i < countHandles; i++)
3262 if (handles[i-1] == handles[i])
3267 //--------------------------------------------------------------------
3268 // Based on whether this thread has a message pump, do the appropriate
3270 //--------------------------------------------------------------------
3271 DWORD Thread::DoAppropriateWait(int countHandles, HANDLE *handles, BOOL waitAll,
3272 DWORD millis, WaitMode mode, PendingSync *syncState)
3274 STATIC_CONTRACT_THROWS;
3275 STATIC_CONTRACT_GC_TRIGGERS;
3277 INDEBUG(BOOL alertable = (mode & WaitMode_Alertable) != 0;);
3278 _ASSERTE(alertable || syncState == 0);
3291 param.countHandles = countHandles;
3292 param.handles = handles;
3293 param.waitAll = waitAll;
3294 param.millis = millis;
3296 param.dwRet = (DWORD) -1;
3298 EE_TRY_FOR_FINALLY(Param *, pParam, ¶m) {
3299 pParam->dwRet = pParam->pThis->DoAppropriateWaitWorker(pParam->countHandles, pParam->handles, pParam->waitAll, pParam->millis, pParam->mode);
3303 if (!GOT_EXCEPTION() &&
3304 param.dwRet >= WAIT_OBJECT_0 && param.dwRet < (DWORD)(WAIT_OBJECT_0 + countHandles)) {
3305 // This thread has been removed from syncblk waiting list by the signalling thread
3306 syncState->Restore(FALSE);
3309 syncState->Restore(TRUE);
3312 _ASSERTE (param.dwRet != WAIT_IO_COMPLETION);
3316 return(param.dwRet);
3319 DWORD Thread::DoAppropriateWait(AppropriateWaitFunc func, void *args,
3320 DWORD millis, WaitMode mode,
3321 PendingSync *syncState)
3323 STATIC_CONTRACT_THROWS;
3324 STATIC_CONTRACT_GC_TRIGGERS;
3326 INDEBUG(BOOL alertable = (mode & WaitMode_Alertable) != 0;);
3327 _ASSERTE(alertable || syncState == 0);
3332 AppropriateWaitFunc func;
3341 param.millis = millis;
3343 param.dwRet = (DWORD) -1;
3345 EE_TRY_FOR_FINALLY(Param *, pParam, ¶m) {
3346 pParam->dwRet = pParam->pThis->DoAppropriateWaitWorker(pParam->func, pParam->args, pParam->millis, pParam->mode);
3350 if (!GOT_EXCEPTION() && WAIT_OBJECT_0 == param.dwRet) {
3351 // This thread has been removed from syncblk waiting list by the signalling thread
3352 syncState->Restore(FALSE);
3355 syncState->Restore(TRUE);
3358 _ASSERTE (WAIT_IO_COMPLETION != param.dwRet);
3362 return(param.dwRet);
3365 #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
3367 //--------------------------------------------------------------------
3368 // helper to do message wait
3369 //--------------------------------------------------------------------
3370 DWORD MsgWaitHelper(int numWaiters, HANDLE* phEvent, BOOL bWaitAll, DWORD millis, BOOL bAlertable)
3372 STATIC_CONTRACT_THROWS;
3373 // The true contract for GC trigger should be the following. But this puts a very strong restriction
3374 // on contract for functions that call EnablePreemptiveGC.
3375 //if (GetThread() && !ThreadStore::HoldingThreadStore(GetThread())) {GC_TRIGGERS;} else {GC_NOTRIGGER;}
3376 STATIC_CONTRACT_SO_INTOLERANT;
3377 STATIC_CONTRACT_GC_TRIGGERS;
3380 DWORD dwReturn=WAIT_ABANDONED;
3382 Thread* pThread = GetThread();
3383 // If pThread is NULL, we'd better shut down.
3384 if (pThread == NULL)
3385 _ASSERTE (g_fEEShutDown);
3387 DWORD lastError = 0;
3388 BEGIN_SO_TOLERANT_CODE(pThread);
3390 // If we're going to pump, we cannot use WAIT_ALL. That's because the wait would
3391 // only be satisfied if a message arrives while the handles are signalled. If we
3392 // want true WAIT_ALL, we need to fire up a different thread in the MTA and wait
3393 // on his result. This isn't implemented yet.
3395 // A change was added to WaitHandleNative::CorWaitMultipleNative to disable WaitAll
3396 // in an STA with more than one handle.
3399 if (numWaiters == 1)
3402 // The check that's supposed to prevent this condition from occuring, in WaitHandleNative::CorWaitMultipleNative,
3403 // is unfortunately behind FEATURE_COMINTEROP instead of FEATURE_COMINTEROP_APARTMENT_SUPPORT.
3404 // So on CoreCLR (where FEATURE_COMINTEROP is not currently defined) we can actually reach this point.
3405 // We can't fix this, because it's a breaking change, so we just won't assert here.
3406 // The result is that WaitAll on an STA thread in CoreCLR will behave stragely, as described above.
3410 flags |= COWAIT_WAITALL;
3413 flags |= COWAIT_ALERTABLE;
3416 hr = CoWaitForMultipleHandles(flags, millis, numWaiters, phEvent, &dwReturn);
3418 if (hr == RPC_S_CALLPENDING)
3420 dwReturn = WAIT_TIMEOUT;
3422 else if (FAILED(hr))
3424 // The service behaves differently on an STA vs. MTA in how much
3425 // error information it propagates back, and in which form. We currently
3426 // only get here in the STA case, so bias this logic that way.
3427 dwReturn = WAIT_FAILED;
3431 dwReturn += WAIT_OBJECT_0; // success -- bias back
3434 lastError = ::GetLastError();
3436 END_SO_TOLERANT_CODE;
3438 // END_SO_TOLERANT_CODE overwrites lasterror. Let's reset it.
3439 ::SetLastError(lastError);
3444 #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
3446 DWORD WaitForMultipleObjectsEx_SO_TOLERANT (DWORD nCount, HANDLE *lpHandles, BOOL bWaitAll,DWORD dwMilliseconds, BOOL bAlertable)
3448 STATIC_CONTRACT_SO_INTOLERANT;
3450 DWORD dwRet = WAIT_FAILED;
3451 DWORD lastError = 0;
3453 BEGIN_SO_TOLERANT_CODE (GetThread ());
3454 dwRet = ::WaitForMultipleObjectsEx (nCount, lpHandles, bWaitAll, dwMilliseconds, bAlertable);
3455 lastError = ::GetLastError();
3456 END_SO_TOLERANT_CODE;
3458 // END_SO_TOLERANT_CODE overwrites lasterror. Let's reset it.
3459 ::SetLastError(lastError);
3463 //--------------------------------------------------------------------
3464 // Do appropriate wait based on apartment state (STA or MTA)
3465 DWORD Thread::DoAppropriateAptStateWait(int numWaiters, HANDLE* pHandles, BOOL bWaitAll,
3466 DWORD timeout, WaitMode mode)
3475 BOOL alertable = (mode & WaitMode_Alertable) != 0;
3477 #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
3478 if (alertable && !GetDomain()->MustForceTrivialWaitOperations())
3480 ApartmentState as = GetFinalApartment();
3483 return MsgWaitHelper(numWaiters, pHandles, bWaitAll, timeout, alertable);
3486 #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
3488 return WaitForMultipleObjectsEx_SO_TOLERANT(numWaiters, pHandles, bWaitAll, timeout, alertable);
3491 // A helper called by our two flavors of DoAppropriateWaitWorker
3492 void Thread::DoAppropriateWaitWorkerAlertableHelper(WaitMode mode)
3500 // If thread abort is prevented, we do not want this thread to see thread abort and thread interrupt exception.
3501 if (IsAbortPrevented())
3506 // A word about ordering for Interrupt. If someone tries to interrupt a thread
3507 // that's in the interruptible state, we queue an APC. But if they try to interrupt
3508 // a thread that's not in the interruptible state, we just record that fact. So
3509 // we have to set TS_Interruptible before we test to see whether someone wants to
3510 // interrupt us or else we have a race condition that causes us to skip the APC.
3511 FastInterlockOr((ULONG *) &m_State, TS_Interruptible);
3513 if (HasThreadStateNC(TSNC_InRestoringSyncBlock))
3515 // The thread is restoring SyncBlock for Object.Wait.
3516 ResetThreadStateNC(TSNC_InRestoringSyncBlock);
3520 HandleThreadInterrupt((mode & WaitMode_ADUnload) != 0);
3522 // Safe to clear the interrupted state, no APC could have fired since we
3523 // reset m_UserInterrupt (which inhibits our APC callback from doing
3525 FastInterlockAnd((ULONG *) &m_State, ~TS_Interrupted);
3529 void MarkOSAlertableWait()
3531 LIMITED_METHOD_CONTRACT;
3532 GetThread()->SetThreadStateNC (Thread::TSNC_OSAlertableWait);
3535 void UnMarkOSAlertableWait()
3537 LIMITED_METHOD_CONTRACT;
3538 GetThread()->ResetThreadStateNC (Thread::TSNC_OSAlertableWait);
3541 //--------------------------------------------------------------------
3542 // Based on whether this thread has a message pump, do the appropriate
3544 //--------------------------------------------------------------------
3545 DWORD Thread::DoAppropriateWaitWorker(int countHandles, HANDLE *handles, BOOL waitAll,
3546 DWORD millis, WaitMode mode)
3556 BOOL alertable = (mode & WaitMode_Alertable) != 0;
3557 // Waits from SynchronizationContext.WaitHelper are always just WaitMode_IgnoreSyncCtx.
3558 // So if we defer to a sync ctx, we will lose any extra bits. We must therefore not
3559 // defer to a sync ctx if doing any non-default wait.
3560 // If you're doing a default wait, but want to ignore sync ctx, specify WaitMode_IgnoreSyncCtx
3561 // which will make mode != WaitMode_Alertable.
3562 BOOL ignoreSyncCtx = (mode != WaitMode_Alertable);
3564 if (GetDomain()->MustForceTrivialWaitOperations())
3565 ignoreSyncCtx = TRUE;
3567 // Unless the ignoreSyncCtx flag is set, first check to see if there is a synchronization
3568 // context on the current thread and if there is, dispatch to it to do the wait.
3569 // If the wait is non alertable we cannot forward the call to the sync context
3570 // since fundamental parts of the system (such as the GC) rely on non alertable
3571 // waits not running any managed code. Also if we are past the point in shutdown were we
3572 // are allowed to run managed code then we can't forward the call to the sync context.
3573 if (!ignoreSyncCtx && alertable && CanRunManagedCode(LoaderLockCheck::None)
3574 && !HasThreadStateNC(Thread::TSNC_BlockedForShutdown))
3578 BOOL fSyncCtxPresent = FALSE;
3579 OBJECTREF SyncCtxObj = NULL;
3580 GCPROTECT_BEGIN(SyncCtxObj)
3582 GetSynchronizationContext(&SyncCtxObj);
3583 if (SyncCtxObj != NULL)
3585 SYNCHRONIZATIONCONTEXTREF syncRef = (SYNCHRONIZATIONCONTEXTREF)SyncCtxObj;
3586 if (syncRef->IsWaitNotificationRequired())
3588 fSyncCtxPresent = TRUE;
3589 ret = DoSyncContextWait(&SyncCtxObj, countHandles, handles, waitAll, millis);
3595 if (fSyncCtxPresent)
3599 // Before going to pre-emptive mode the thread needs to be flagged as waiting for
3600 // the debugger. This used to be accomplished by the TS_Interruptible flag but that
3601 // doesn't work reliably, see DevDiv Bugs 699245. Some methods call in here already in
3602 // COOP mode so we set the bit before the transition. For the calls that are already
3603 // in pre-emptive mode those are still buggy. This is only a partial fix.
3604 BOOL isCoop = PreemptiveGCDisabled();
3605 ThreadStateNCStackHolder tsNC(isCoop && alertable, TSNC_DebuggerSleepWaitJoin);
3611 DoAppropriateWaitWorkerAlertableHelper(mode);
3614 StateHolder<MarkOSAlertableWait,UnMarkOSAlertableWait> OSAlertableWait(alertable);
3616 ThreadStateHolder tsh(alertable, TS_Interruptible | TS_Interrupted);
3618 ULONGLONG dwStart = 0, dwEnd;
3620 if (millis != INFINITE)
3622 dwStart = CLRGetTickCount64();
3625 ret = DoAppropriateAptStateWait(countHandles, handles, waitAll, millis, mode);
3627 if (ret == WAIT_IO_COMPLETION)
3629 _ASSERTE (alertable);
3631 if (m_State & TS_Interrupted)
3633 HandleThreadInterrupt(mode & WaitMode_ADUnload);
3635 // We could be woken by some spurious APC or an EE APC queued to
3636 // interrupt us. In the latter case the TS_Interrupted bit will be set
3637 // in the thread state bits. Otherwise we just go back to sleep again.
3638 if (millis != INFINITE)
3640 dwEnd = CLRGetTickCount64();
3641 if (dwEnd >= dwStart + millis)
3648 millis -= (DWORD)(dwEnd - dwStart);
3653 _ASSERTE((ret >= WAIT_OBJECT_0 && ret < (WAIT_OBJECT_0 + (DWORD)countHandles)) ||
3654 (ret >= WAIT_ABANDONED && ret < (WAIT_ABANDONED + (DWORD)countHandles)) ||
3655 (ret == WAIT_TIMEOUT) || (ret == WAIT_FAILED));
3656 // countHandles is used as an unsigned -- it should never be negative.
3657 _ASSERTE(countHandles >= 0);
3659 // We support precisely one WAIT_FAILED case, where we attempt to wait on a
3660 // thread handle and the thread is in the process of dying we might get a
3661 // invalid handle substatus. Turn this into a successful wait.
3662 // There are three cases to consider:
3663 // 1) Only waiting on one handle: return success right away.
3664 // 2) Waiting for all handles to be signalled: retry the wait without the
3666 // 3) Waiting for one of multiple handles to be signalled: return with the
3667 // first handle that is either signalled or has become invalid.
3668 if (ret == WAIT_FAILED)
3670 DWORD errorCode = ::GetLastError();
3671 if (errorCode == ERROR_INVALID_PARAMETER)
3673 if (CheckForDuplicateHandles(countHandles, handles))
3674 COMPlusThrow(kDuplicateWaitObjectException);
3676 COMPlusThrowHR(HRESULT_FROM_WIN32(errorCode));
3678 else if (errorCode == ERROR_ACCESS_DENIED)
3680 // A Win32 ACL could prevent us from waiting on the handle.
3681 COMPlusThrow(kUnauthorizedAccessException);
3683 else if (errorCode == ERROR_NOT_ENOUGH_MEMORY)
3688 else if (errorCode == ERROR_NOT_SUPPORTED)
3690 // "Wait for any" and "wait for all" operations on multiple wait handles are not supported when a cross-process sync
3691 // object is included in the array
3692 COMPlusThrow(kPlatformNotSupportedException, W("PlatformNotSupported_NamedSyncObjectWaitAnyWaitAll"));
3695 else if (errorCode != ERROR_INVALID_HANDLE)
3697 ThrowWin32(errorCode);
3700 if (countHandles == 1)
3701 ret = WAIT_OBJECT_0;
3704 // Probe all handles with a timeout of zero. When we find one that's
3705 // invalid, move it out of the list and retry the wait.
3706 for (int i = 0; i < countHandles; i++)
3708 // WaitForSingleObject won't pump memssage; we already probe enough space
3709 // before calling this function and we don't want to fail here, so we don't
3710 // do a transition to tolerant code here
3711 DWORD subRet = WaitForSingleObject (handles[i], 0);
3712 if (subRet != WAIT_FAILED)
3714 _ASSERTE(::GetLastError() == ERROR_INVALID_HANDLE);
3715 if ((countHandles - i - 1) > 0)
3716 memmove(&handles[i], &handles[i+1], (countHandles - i - 1) * sizeof(HANDLE));
3721 // Compute the new timeout value by assume that the timeout
3722 // is not large enough for more than one wrap
3723 dwEnd = CLRGetTickCount64();
3724 if (millis != INFINITE)
3726 if (dwEnd >= dwStart + millis)
3733 millis -= (DWORD)(dwEnd - dwStart);
3740 // Probe all handles with a timeout as zero, succeed with the first
3741 // handle that doesn't timeout.
3742 ret = WAIT_OBJECT_0;
3744 for (i = 0; i < countHandles; i++)
3747 // WaitForSingleObject won't pump memssage; we already probe enough space
3748 // before calling this function and we don't want to fail here, so we don't
3749 // do a transition to tolerant code here
3750 DWORD subRet = WaitForSingleObject (handles[i], 0);
3751 if ((subRet == WAIT_OBJECT_0) || (subRet == WAIT_FAILED))
3753 if (subRet == WAIT_ABANDONED)
3755 ret = (ret - WAIT_OBJECT_0) + WAIT_ABANDONED;
3758 // If we get alerted it just masks the real state of the current
3759 // handle, so retry the wait.
3760 if (subRet == WAIT_IO_COMPLETION)
3762 _ASSERTE(subRet == WAIT_TIMEOUT);
3770 _ASSERTE((ret != WAIT_TIMEOUT) || (millis != INFINITE));
3776 DWORD Thread::DoAppropriateWaitWorker(AppropriateWaitFunc func, void *args,
3777 DWORD millis, WaitMode mode)
3785 BOOL alertable = (mode & WaitMode_Alertable)!=0;
3787 // Before going to pre-emptive mode the thread needs to be flagged as waiting for
3788 // the debugger. This used to be accomplished by the TS_Interruptible flag but that
3789 // doesn't work reliably, see DevDiv Bugs 699245. Some methods call in here already in
3790 // COOP mode so we set the bit before the transition. For the calls that are already
3791 // in pre-emptive mode those are still buggy. This is only a partial fix.
3792 BOOL isCoop = PreemptiveGCDisabled();
3793 ThreadStateNCStackHolder tsNC(isCoop && alertable, TSNC_DebuggerSleepWaitJoin);
3797 // @TODO cwb: we don't know whether a thread has a message pump or
3798 // how to pump its messages, currently.
3799 // @TODO cwb: WinCE isn't going to support Thread.Interrupt() correctly until
3800 // we get alertable waits on that platform.</TODO>
3804 DoAppropriateWaitWorkerAlertableHelper(mode);
3810 option = WAIT_ALERTABLE;
3811 #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
3812 ApartmentState as = GetFinalApartment();
3813 if ((AS_InMTA != as) && !GetDomain()->MustForceTrivialWaitOperations())
3815 option |= WAIT_MSGPUMP;
3817 #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
3824 ThreadStateHolder tsh(alertable, TS_Interruptible | TS_Interrupted);
3826 ULONGLONG dwStart = 0;
3830 if (millis != INFINITE)
3832 dwStart = CLRGetTickCount64();
3834 ret = func(args, millis, option);
3836 if (ret == WAIT_IO_COMPLETION)
3838 _ASSERTE (alertable);
3840 if ((m_State & TS_Interrupted))
3842 HandleThreadInterrupt(mode & WaitMode_ADUnload);
3844 if (millis != INFINITE)
3846 dwEnd = CLRGetTickCount64();
3847 if (dwEnd >= dwStart + millis)
3854 millis -= (DWORD)(dwEnd - dwStart);
3861 _ASSERTE(ret == WAIT_OBJECT_0 ||
3862 ret == WAIT_ABANDONED ||
3863 ret == WAIT_TIMEOUT ||
3864 ret == WAIT_FAILED);
3866 _ASSERTE((ret != WAIT_TIMEOUT) || (millis != INFINITE));
3872 //--------------------------------------------------------------------
3873 // Only one style of wait for DoSignalAndWait since we don't support this on STA Threads
3874 //--------------------------------------------------------------------
3875 DWORD Thread::DoSignalAndWait(HANDLE *handles, DWORD millis, BOOL alertable, PendingSync *syncState)
3877 STATIC_CONTRACT_THROWS;
3878 STATIC_CONTRACT_GC_TRIGGERS;
3880 _ASSERTE(alertable || syncState == 0);
3891 param.handles = handles;
3892 param.millis = millis;
3893 param.alertable = alertable;
3894 param.dwRet = (DWORD) -1;
3896 EE_TRY_FOR_FINALLY(Param *, pParam, ¶m) {
3897 pParam->dwRet = pParam->pThis->DoSignalAndWaitWorker(pParam->handles, pParam->millis, pParam->alertable);
3901 if (!GOT_EXCEPTION() && WAIT_OBJECT_0 == param.dwRet) {
3902 // This thread has been removed from syncblk waiting list by the signalling thread
3903 syncState->Restore(FALSE);
3906 syncState->Restore(TRUE);
3909 _ASSERTE (WAIT_IO_COMPLETION != param.dwRet);
3913 return(param.dwRet);
3917 DWORD Thread::DoSignalAndWaitWorker(HANDLE* pHandles, DWORD millis,BOOL alertable)
3931 DoAppropriateWaitWorkerAlertableHelper(WaitMode_None);
3934 StateHolder<MarkOSAlertableWait,UnMarkOSAlertableWait> OSAlertableWait(alertable);
3936 ThreadStateHolder tsh(alertable, TS_Interruptible | TS_Interrupted);
3938 ULONGLONG dwStart = 0, dwEnd;
3940 if (INFINITE != millis)
3942 dwStart = CLRGetTickCount64();
3945 ret = SignalObjectAndWait(pHandles[0], pHandles[1], millis, alertable);
3949 if (WAIT_IO_COMPLETION == ret)
3951 _ASSERTE (alertable);
3952 // We could be woken by some spurious APC or an EE APC queued to
3953 // interrupt us. In the latter case the TS_Interrupted bit will be set
3954 // in the thread state bits. Otherwise we just go back to sleep again.
3955 if ((m_State & TS_Interrupted))
3957 HandleThreadInterrupt(FALSE);
3959 if (INFINITE != millis)
3961 dwEnd = CLRGetTickCount64();
3962 if (dwStart + millis <= dwEnd)
3969 millis -= (DWORD)(dwEnd - dwStart);
3971 dwStart = CLRGetTickCount64();
3973 //Retry case we don't want to signal again so only do the wait...
3974 ret = WaitForSingleObjectEx(pHandles[1],millis,TRUE);
3978 if (WAIT_FAILED == ret)
3980 DWORD errorCode = ::GetLastError();
3981 //If the handle to signal is a mutex and
3982 // the calling thread is not the owner, errorCode is ERROR_NOT_OWNER
3986 case ERROR_INVALID_HANDLE:
3987 case ERROR_NOT_OWNER:
3988 case ERROR_ACCESS_DENIED:
3989 COMPlusThrowWin32();
3992 case ERROR_TOO_MANY_POSTS:
3993 ret = ERROR_TOO_MANY_POSTS;
3997 CONSISTENCY_CHECK_MSGF(0, ("This errorCode is not understood '(%d)''\n", errorCode));
3998 COMPlusThrowWin32();
4005 //Check that the return state is valid
4006 _ASSERTE(WAIT_OBJECT_0 == ret ||
4007 WAIT_ABANDONED == ret ||
4008 WAIT_TIMEOUT == ret ||
4009 WAIT_FAILED == ret ||
4010 ERROR_TOO_MANY_POSTS == ret);
4012 //Wrong to time out if the wait was infinite
4013 _ASSERTE((WAIT_TIMEOUT != ret) || (INFINITE != millis));
4017 #endif // !FEATURE_PAL
4019 DWORD Thread::DoSyncContextWait(OBJECTREF *pSyncCtxObj, int countHandles, HANDLE *handles, BOOL waitAll, DWORD millis)
4026 PRECONDITION(CheckPointer(handles));
4027 PRECONDITION(IsProtectedByGCFrame (pSyncCtxObj));
4030 MethodDescCallSite invokeWaitMethodHelper(METHOD__SYNCHRONIZATION_CONTEXT__INVOKE_WAIT_METHOD_HELPER);
4032 BASEARRAYREF handleArrayObj = (BASEARRAYREF)AllocatePrimitiveArray(ELEMENT_TYPE_I, countHandles);
4033 memcpyNoGCRefs(handleArrayObj->GetDataPtr(), handles, countHandles * sizeof(HANDLE));
4037 ObjToArgSlot(*pSyncCtxObj),
4038 ObjToArgSlot(handleArrayObj),
4039 BoolToArgSlot(waitAll),
4043 // Needed by TriggerGCForMDAInternal to avoid infinite recursion
4044 ThreadStateNCStackHolder holder(TRUE, TSNC_InsideSyncContextWait);
4046 return invokeWaitMethodHelper.Call_RetI4(args);
4049 // Called out of SyncBlock::Wait() to block this thread until the Notify occurs.
4050 BOOL Thread::Block(INT32 timeOut, PendingSync *syncState)
4052 WRAPPER_NO_CONTRACT;
4054 _ASSERTE(this == GetThread());
4056 // Before calling Block, the SyncBlock queued us onto it's list of waiting threads.
4057 // However, before calling Block the SyncBlock temporarily left the synchronized
4058 // region. This allowed threads to enter the region and call Notify, in which
4059 // case we may have been signalled before we entered the Wait. So we aren't in the
4060 // m_WaitSB list any longer. Not a problem: the following Wait will return
4061 // immediately. But it means we cannot enforce the following assertion:
4062 // _ASSERTE(m_WaitSB != NULL);
4064 return (Wait(syncState->m_WaitEventLink->m_Next->m_EventWait, timeOut, syncState) != WAIT_OBJECT_0);
4068 // Return whether or not a timeout occurred. TRUE=>we waited successfully
4069 DWORD Thread::Wait(HANDLE *objs, int cntObjs, INT32 timeOut, PendingSync *syncInfo)
4071 WRAPPER_NO_CONTRACT;
4076 _ASSERTE(timeOut >= 0 || timeOut == INFINITE_TIMEOUT);
4078 dwTimeOut32 = (timeOut == INFINITE_TIMEOUT
4082 dwResult = DoAppropriateWait(cntObjs, objs, FALSE /*=waitAll*/, dwTimeOut32,
4083 WaitMode_Alertable /*alertable*/,
4086 // Either we succeeded in the wait, or we timed out
4087 _ASSERTE((dwResult >= WAIT_OBJECT_0 && dwResult < (DWORD)(WAIT_OBJECT_0 + cntObjs)) ||
4088 (dwResult == WAIT_TIMEOUT));
4093 // Return whether or not a timeout occurred. TRUE=>we waited successfully
4094 DWORD Thread::Wait(CLREvent *pEvent, INT32 timeOut, PendingSync *syncInfo)
4096 WRAPPER_NO_CONTRACT;
4101 _ASSERTE(timeOut >= 0 || timeOut == INFINITE_TIMEOUT);
4103 dwTimeOut32 = (timeOut == INFINITE_TIMEOUT
4107 dwResult = pEvent->Wait(dwTimeOut32, TRUE /*alertable*/, syncInfo);
4109 // Either we succeeded in the wait, or we timed out
4110 _ASSERTE((dwResult == WAIT_OBJECT_0) ||
4111 (dwResult == WAIT_TIMEOUT));
4116 void Thread::Wake(SyncBlock *psb)
4118 WRAPPER_NO_CONTRACT;
4120 CLREvent* hEvent = NULL;
4121 WaitEventLink *walk = &m_WaitEventLink;
4122 while (walk->m_Next) {
4123 if (walk->m_Next->m_WaitSB == psb) {
4124 hEvent = walk->m_Next->m_EventWait;
4125 // We are guaranteed that only one thread can change walk->m_Next->m_WaitSB
4126 // since the thread is helding the syncblock.
4127 walk->m_Next->m_WaitSB = (SyncBlock*)((DWORD_PTR)walk->m_Next->m_WaitSB | 1);
4131 else if ((SyncBlock*)((DWORD_PTR)walk->m_Next & ~1) == psb) {
4132 _ASSERTE (!"Can not wake a thread on the same SyncBlock more than once");
4136 PREFIX_ASSUME (hEvent != NULL);
4140 #define WAIT_INTERRUPT_THREADABORT 0x1
4141 #define WAIT_INTERRUPT_INTERRUPT 0x2
4142 #define WAIT_INTERRUPT_OTHEREXCEPTION 0x4
4145 DWORD EnterMonitorForRestore(SyncBlock *pSB)
4158 pSB->EnterMonitor();
4162 // Assume it is a normal exception unless proven.
4163 state = WAIT_INTERRUPT_OTHEREXCEPTION;
4164 Thread *pThread = GetThread();
4165 if (pThread->IsAbortInitiated())
4167 state = WAIT_INTERRUPT_THREADABORT;
4169 else if (__pException != NULL)
4171 if (__pException->GetHR() == COR_E_THREADINTERRUPTED)
4173 state = WAIT_INTERRUPT_INTERRUPT;
4177 EX_END_CATCH(SwallowAllExceptions);
4182 // This is the service that backs us out of a wait that we interrupted. We must
4183 // re-enter the monitor to the same extent the SyncBlock would, if we returned
4184 // through it (instead of throwing through it). And we need to cancel the wait,
4185 // if it didn't get notified away while we are processing the interrupt.
4186 void PendingSync::Restore(BOOL bRemoveFromSB)
4194 _ASSERTE(m_EnterCount);
4196 Thread *pCurThread = GetThread();
4198 _ASSERTE (pCurThread == m_OwnerThread);
4200 WaitEventLink *pRealWaitEventLink = m_WaitEventLink->m_Next;
4202 pRealWaitEventLink->m_RefCount --;
4203 if (pRealWaitEventLink->m_RefCount == 0)
4205 if (bRemoveFromSB) {
4206 ThreadQueue::RemoveThread(pCurThread, pRealWaitEventLink->m_WaitSB);
4208 if (pRealWaitEventLink->m_EventWait != &pCurThread->m_EventWait) {
4209 // Put the event back to the pool.
4210 StoreEventToEventStore(pRealWaitEventLink->m_EventWait);
4212 // Remove from the link.
4213 m_WaitEventLink->m_Next = m_WaitEventLink->m_Next->m_Next;
4216 // Someone up the stack is responsible for keeping the syncblock alive by protecting
4217 // the object that owns it. But this relies on assertions that EnterMonitor is only
4218 // called in cooperative mode. Even though we are safe in preemptive, do the
4220 GCX_COOP_THREAD_EXISTS(pCurThread);
4221 // We need to make sure that EnterMonitor succeeds. We may have code like
4226 // We need to make sure that the finally from lock is excuted with the lock owned.
4228 SyncBlock *psb = (SyncBlock*)((DWORD_PTR)pRealWaitEventLink->m_WaitSB & ~1);
4229 for (LONG i=0; i < m_EnterCount;)
4231 if ((state & (WAIT_INTERRUPT_THREADABORT | WAIT_INTERRUPT_INTERRUPT)) != 0)
4233 // If the thread has been interrupted by Thread.Interrupt or Thread.Abort,
4234 // disable the check at the beginning of DoAppropriateWait
4235 pCurThread->SetThreadStateNC(Thread::TSNC_InRestoringSyncBlock);
4237 DWORD result = EnterMonitorForRestore(psb);
4244 // We block the thread until the thread acquires the lock.
4245 // This is to make sure that when catch/finally is executed, the thread has the lock.
4246 // We do not want thread to run its catch/finally if the lock is not taken.
4249 // If the thread is being rudely aborted, and the thread has
4250 // no Cer on stack, we will not run managed code to release the
4251 // lock, so we can terminate the loop.
4252 if (pCurThread->IsRudeAbortInitiated() &&
4253 !pCurThread->IsExecutingWithinCer())
4260 pCurThread->ResetThreadStateNC(Thread::TSNC_InRestoringSyncBlock);
4262 if ((state & WAIT_INTERRUPT_THREADABORT) != 0)
4264 pCurThread->HandleThreadAbort();
4266 else if ((state & WAIT_INTERRUPT_INTERRUPT) != 0)
4268 COMPlusThrow(kThreadInterruptedException);
4274 // This is the callback from the OS, when we queue an APC to interrupt a waiting thread.
4275 // The callback occurs on the thread we wish to interrupt. It is a STATIC method.
4276 void WINAPI Thread::UserInterruptAPC(ULONG_PTR data)
4285 _ASSERTE(data == APC_Code);
4287 Thread *pCurThread = GetThread();
4290 // We should only take action if an interrupt is currently being
4291 // requested (our synchronization does not guarantee that we won't fire
4292 // spuriously). It's safe to check the m_UserInterrupt field and then
4293 // set TS_Interrupted in a non-atomic fashion because m_UserInterrupt is
4294 // only cleared in this thread's context (though it may be set from any
4296 if (pCurThread->IsUserInterrupted())
4298 // Set bit to indicate this routine was called (as opposed to other
4300 FastInterlockOr((ULONG *) &pCurThread->m_State, TS_Interrupted);
4305 // This is the workhorse for Thread.Interrupt().
4306 void Thread::UserInterrupt(ThreadInterruptMode mode)
4314 FastInterlockOr((DWORD*)&m_UserInterrupt, mode);
4316 if (HasValidThreadHandle() &&
4317 HasThreadState (TS_Interruptible))
4320 AddFiberInfo(ThreadTrackInfo_Abort);
4326 // Implementation of Thread.Sleep().
4327 void Thread::UserSleep(INT32 time)
4335 INCONTRACT(_ASSERTE(!GetThread()->GCNoTrigger()));
4339 // Before going to pre-emptive mode the thread needs to be flagged as waiting for
4340 // the debugger. This used to be accomplished by the TS_Interruptible flag but that
4341 // doesn't work reliably, see DevDiv Bugs 699245.
4342 ThreadStateNCStackHolder tsNC(TRUE, TSNC_DebuggerSleepWaitJoin);
4345 // A word about ordering for Interrupt. If someone tries to interrupt a thread
4346 // that's in the interruptible state, we queue an APC. But if they try to interrupt
4347 // a thread that's not in the interruptible state, we just record that fact. So
4348 // we have to set TS_Interruptible before we test to see whether someone wants to
4349 // interrupt us or else we have a race condition that causes us to skip the APC.
4350 FastInterlockOr((ULONG *) &m_State, TS_Interruptible);
4352 // If someone has interrupted us, we should not enter the wait.
4353 if (IsUserInterrupted())
4355 HandleThreadInterrupt(FALSE);
4358 ThreadStateHolder tsh(TRUE, TS_Interruptible | TS_Interrupted);
4360 FastInterlockAnd((ULONG *) &m_State, ~TS_Interrupted);
4362 DWORD dwTime = (DWORD)time;
4365 ULONGLONG start = CLRGetTickCount64();
4367 res = ClrSleepEx (dwTime, TRUE);
4369 if (res == WAIT_IO_COMPLETION)
4371 // We could be woken by some spurious APC or an EE APC queued to
4372 // interrupt us. In the latter case the TS_Interrupted bit will be set
4373 // in the thread state bits. Otherwise we just go back to sleep again.
4374 if ((m_State & TS_Interrupted))
4376 HandleThreadInterrupt(FALSE);
4379 if (dwTime == INFINITE)
4385 ULONGLONG actDuration = CLRGetTickCount64() - start;
4387 if (dwTime > actDuration)
4389 dwTime -= (DWORD)actDuration;
4398 _ASSERTE(res == WAIT_TIMEOUT || res == WAIT_OBJECT_0);
4402 // Correspondence between an EE Thread and an exposed System.Thread:
4403 OBJECTREF Thread::GetExposedObject()
4413 Thread *pCurThread = GetThread();
4414 _ASSERTE (!(pCurThread == NULL || IsAtProcessExit()));
4416 _ASSERTE(pCurThread->PreemptiveGCDisabled());
4418 if (ObjectFromHandle(m_ExposedObject) == NULL)
4420 // Allocate the exposed thread object.
4421 THREADBASEREF attempt = (THREADBASEREF) AllocateObject(g_pThreadClass);
4422 GCPROTECT_BEGIN(attempt);
4424 // The exposed object keeps us alive until it is GC'ed. This
4425 // doesn't mean the physical thread continues to run, of course.
4426 // We have to set this outside of the ThreadStore lock, because this might trigger a GC.
4427 attempt->SetInternal(this);
4429 BOOL fNeedThreadStore = (! ThreadStore::HoldingThreadStore(pCurThread));
4430 // Take a lock to make sure that only one thread creates the object.
4431 ThreadStoreLockHolder tsHolder(fNeedThreadStore);
4433 // Check to see if another thread has not already created the exposed object.
4434 if (ObjectFromHandle(m_ExposedObject) == NULL)
4436 // Keep a weak reference to the exposed object.
4437 StoreObjectInHandle(m_ExposedObject, (OBJECTREF) attempt);
4439 ObjectInHandleHolder exposedHolder(m_ExposedObject);
4441 // Increase the external ref count. We can't call IncExternalCount because we
4442 // already hold the thread lock and IncExternalCount won't be able to take it.
4443 ULONG retVal = FastInterlockIncrement ((LONG*)&m_ExternalRefCount);
4446 AddFiberInfo(ThreadTrackInfo_Lifetime);
4448 // Check to see if we need to store a strong pointer to the object.
4450 StoreObjectInHandle(m_StrongHndToExposedObject, (OBJECTREF) attempt);
4452 ObjectInHandleHolder strongHolder(m_StrongHndToExposedObject);
4455 attempt->SetManagedThreadId(GetThreadId());
4458 // Note that we are NOT calling the constructor on the Thread. That's
4459 // because this is an internal create where we don't want a Start
4460 // address. And we don't want to expose such a constructor for our
4461 // customers to accidentally call. The following is in lieu of a true
4463 attempt->InitExisting();
4465 exposedHolder.SuppressRelease();
4466 strongHolder.SuppressRelease();
4470 attempt->ClearInternal();
4475 return ObjectFromHandle(m_ExposedObject);
4479 // We only set non NULL exposed objects for unstarted threads that haven't exited
4480 // their constructor yet. So there are no race conditions.
4481 void Thread::SetExposedObject(OBJECTREF exposed)
4485 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
4489 if (exposed != NULL)
4491 _ASSERTE (GetThread() != this);
4492 _ASSERTE(IsUnstarted());
4493 _ASSERTE(ObjectFromHandle(m_ExposedObject) == NULL);
4494 // The exposed object keeps us alive until it is GC'ed. This doesn't mean the
4495 // physical thread continues to run, of course.
4496 StoreObjectInHandle(m_ExposedObject, exposed);
4497 // This makes sure the contexts on the backing thread
4498 // and the managed thread start off in sync with each other.
4499 // BEWARE: the IncExternalCount call below may cause GC to happen.
4501 // IncExternalCount will store exposed in m_StrongHndToExposedObject which is in default domain.
4502 // If the creating thread is killed before the target thread is killed in Thread.Start, Thread object
4503 // will be kept alive forever.
4504 // Instead, IncExternalCount should be called after the target thread has been started in Thread.Start.
4505 // IncExternalCount();
4509 // Simply set both of the handles to NULL. The GC of the old exposed thread
4510 // object will take care of decrementing the external ref count.
4511 StoreObjectInHandle(m_ExposedObject, NULL);
4512 StoreObjectInHandle(m_StrongHndToExposedObject, NULL);
4516 void Thread::SetLastThrownObject(OBJECTREF throwable, BOOL isUnhandled)
4520 if ((throwable == NULL) || CLRException::IsPreallocatedExceptionObject(throwable)) NOTHROW; else THROWS; // From CreateHandle
4522 if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE;
4527 STRESS_LOG_COND1(LF_EH, LL_INFO100, OBJECTREFToObject(throwable) != NULL, "in Thread::SetLastThrownObject: obj = %p\n", OBJECTREFToObject(throwable));
4529 // you can't have a NULL unhandled exception
4530 _ASSERTE(!(throwable == NULL && isUnhandled));
4532 if (m_LastThrownObjectHandle != NULL)
4534 // We'll somtimes use a handle for a preallocated exception object. We should never, ever destroy one of
4535 // these handles... they'll be destroyed when the Runtime shuts down.
4536 if (!CLRException::IsPreallocatedExceptionHandle(m_LastThrownObjectHandle))
4538 DestroyHandle(m_LastThrownObjectHandle);
4541 m_LastThrownObjectHandle = NULL; // Make sure to set this to NULL here just in case we throw trying to make
4542 // a new handle below.
4545 if (throwable != NULL)
4547 _ASSERTE(this == GetThread());
4549 // Non-compliant exceptions are always wrapped.
4550 // The use of the ExceptionNative:: helper here (rather than the global ::IsException helper)
4551 // is hokey, but we need a GC_NOTRIGGER version and it's only for an ASSERT.
4552 _ASSERTE(IsException(throwable->GetMethodTable()));
4554 // If we're tracking one of the preallocated exception objects, then just use the global handle that
4555 // matches it rather than creating a new one.
4556 if (CLRException::IsPreallocatedExceptionObject(throwable))
4558 m_LastThrownObjectHandle = CLRException::GetPreallocatedHandleForObject(throwable);
4562 BEGIN_SO_INTOLERANT_CODE(GetThread());
4564 m_LastThrownObjectHandle = GetDomain()->CreateHandle(throwable);
4566 END_SO_INTOLERANT_CODE;
4569 _ASSERTE(m_LastThrownObjectHandle != NULL);
4570 m_ltoIsUnhandled = isUnhandled;
4574 m_ltoIsUnhandled = FALSE;
4578 void Thread::SetSOForLastThrownObject()
4591 // If we are saving stack overflow exception, we can just null out the current handle.
4592 // The current domain is going to be unloaded or the process is going to be killed, so
4593 // we will not leak a handle.
4594 m_LastThrownObjectHandle = CLRException::GetPreallocatedStackOverflowExceptionHandle();
4598 // This is a nice wrapper for SetLastThrownObject which catches any exceptions caused by not being able to create
4599 // the handle for the throwable, and setting the last thrown object to the preallocated out of memory exception
4602 OBJECTREF Thread::SafeSetLastThrownObject(OBJECTREF throwable)
4608 if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE;
4613 // We return the original throwable if nothing goes wrong.
4614 OBJECTREF ret = throwable;
4618 // Try to set the throwable.
4619 SetLastThrownObject(throwable);
4623 // If it didn't work, then set the last thrown object to the preallocated OOM exception, and return that
4624 // object instead of the original throwable.
4625 ret = CLRException::GetPreallocatedOutOfMemoryException();
4626 SetLastThrownObject(ret);
4628 EX_END_CATCH(SwallowAllExceptions);
4634 // This is a nice wrapper for SetThrowable and SetLastThrownObject, which catches any exceptions caused by not
4635 // being able to create the handle for the throwable, and sets the throwable to the preallocated out of memory
4636 // exception instead. It also updates the last thrown object, which is always updated when the throwable is
4639 OBJECTREF Thread::SafeSetThrowables(OBJECTREF throwable DEBUG_ARG(ThreadExceptionState::SetThrowableErrorChecking stecFlags),
4646 if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE;
4651 // We return the original throwable if nothing goes wrong.
4652 OBJECTREF ret = throwable;
4656 // Try to set the throwable.
4657 SetThrowable(throwable DEBUG_ARG(stecFlags));
4659 // Now, if the last thrown object is different, go ahead and update it. This makes sure that we re-throw
4660 // the right object when we rethrow.
4661 if (LastThrownObject() != throwable)
4663 SetLastThrownObject(throwable);
4668 MarkLastThrownObjectUnhandled();
4673 // If either set didn't work, then set both throwables to the preallocated OOM exception, and return that
4674 // object instead of the original throwable.
4675 ret = CLRException::GetPreallocatedOutOfMemoryException();
4677 // Neither of these will throw because we're setting with a preallocated exception.
4678 SetThrowable(ret DEBUG_ARG(stecFlags));
4679 SetLastThrownObject(ret, isUnhandled);
4681 EX_END_CATCH(SwallowAllExceptions);
4687 // This method will sync the managed exception state to be in sync with the topmost active exception
4688 // for a given thread
4689 void Thread::SyncManagedExceptionState(bool fIsDebuggerThread)
4702 // Syncup the LastThrownObject on the managed thread
4703 SafeUpdateLastThrownObject();
4706 #ifdef FEATURE_CORRUPTING_EXCEPTIONS
4707 // Since the catch clause has successfully executed and we are exiting it, reset the corruption severity
4708 // in the ThreadExceptionState for the last active exception. This will ensure that when the next exception
4709 // gets thrown/raised, EH tracker wont pick up an invalid value.
4710 if (!fIsDebuggerThread)
4712 CEHelper::ResetLastActiveCorruptionSeverityPostCatchHandler(this);
4714 #endif // FEATURE_CORRUPTING_EXCEPTIONS
4718 void Thread::SetLastThrownObjectHandle(OBJECTHANDLE h)
4729 if (m_LastThrownObjectHandle != NULL &&
4730 !CLRException::IsPreallocatedExceptionHandle(m_LastThrownObjectHandle))
4732 DestroyHandle(m_LastThrownObjectHandle);
4735 m_LastThrownObjectHandle = h;
4739 // Create a duplicate handle of the current throwable and set the last thrown object to that. This ensures that the
4740 // last thrown object and the current throwable have handles that are in the same app domain.
4742 void Thread::SafeUpdateLastThrownObject(void)
4753 OBJECTHANDLE hThrowable = GetThrowableAsHandle();
4755 if (hThrowable != NULL)
4759 IGCHandleManager *pHandleTable = GCHandleUtilities::GetGCHandleManager();
4761 // Creating a duplicate handle here ensures that the AD of the last thrown object
4762 // matches the domain of the current throwable.
4763 OBJECTHANDLE duplicateHandle = pHandleTable->CreateDuplicateHandle(hThrowable);
4764 SetLastThrownObjectHandle(duplicateHandle);
4768 // If we can't create a duplicate handle, we set both throwables to the preallocated OOM exception.
4769 SafeSetThrowables(CLRException::GetPreallocatedOutOfMemoryException());
4771 EX_END_CATCH(SwallowAllExceptions);
4775 // Background threads must be counted, because the EE should shut down when the
4776 // last non-background thread terminates. But we only count running ones.
4777 void Thread::SetBackground(BOOL isBack, BOOL bRequiresTSL)
4785 // booleanize IsBackground() which just returns bits
4786 if (isBack == !!IsBackground())
4789 LOG((LF_SYNC, INFO3, "SetBackground obtain lock\n"));
4790 ThreadStoreLockHolder TSLockHolder(FALSE);
4793 TSLockHolder.Acquire();
4798 // This can only happen in a race condition, where the correct thing to do
4799 // is ignore it. If it happens without the race condition, we throw an
4805 if (!IsBackground())
4807 FastInterlockOr((ULONG *) &m_State, TS_Background);
4809 // unstarted threads don't contribute to the background count
4811 ThreadStore::s_pThreadStore->m_BackgroundThreadCount++;
4813 // If we put the main thread into a wait, until only background threads exist,
4814 // then we make that
4815 // main thread a background thread. This cleanly handles the case where it
4816 // may or may not be one as it enters the wait.
4818 // One of the components of OtherThreadsComplete() has changed, so check whether
4819 // we should now exit the EE.
4820 ThreadStore::CheckForEEShutdown();
4827 FastInterlockAnd((ULONG *) &m_State, ~TS_Background);
4829 // unstarted threads don't contribute to the background count
4831 ThreadStore::s_pThreadStore->m_BackgroundThreadCount--;
4833 _ASSERTE(ThreadStore::s_pThreadStore->m_BackgroundThreadCount >= 0);
4834 _ASSERTE(ThreadStore::s_pThreadStore->m_BackgroundThreadCount <=
4835 ThreadStore::s_pThreadStore->m_ThreadCount);
4841 TSLockHolder.Release();
4845 #ifdef FEATURE_COMINTEROP
4846 class ApartmentSpyImpl : public IUnknownCommon<IInitializeSpy>
4850 HRESULT STDMETHODCALLTYPE PreInitialize(DWORD dwCoInit, DWORD dwCurThreadAptRefs)
4852 LIMITED_METHOD_CONTRACT;
4856 HRESULT STDMETHODCALLTYPE PostInitialize(HRESULT hrCoInit, DWORD dwCoInit, DWORD dwNewThreadAptRefs)
4858 LIMITED_METHOD_CONTRACT;
4859 return hrCoInit; // this HRESULT will be returned from CoInitialize(Ex)
4862 HRESULT STDMETHODCALLTYPE PreUninitialize(DWORD dwCurThreadAptRefs)
4864 // Don't assume that Thread exists and do not create it.
4865 STATIC_CONTRACT_NOTHROW;
4866 STATIC_CONTRACT_GC_TRIGGERS;
4867 STATIC_CONTRACT_MODE_PREEMPTIVE;
4871 if (dwCurThreadAptRefs == 1 && !g_fEEShutDown)
4873 // This is the last CoUninitialize on this thread and the CLR is still running. If it's an STA
4874 // we take the opportunity to perform COM/WinRT cleanup now, when the apartment is still alive.
4876 Thread *pThread = GetThreadNULLOk();
4877 if (pThread != NULL)
4879 BEGIN_EXTERNAL_ENTRYPOINT(&hr)
4881 if (pThread->GetFinalApartment() == Thread::AS_InSTA)
4883 // This will release RCWs and purge the WinRT factory cache on all AppDomains. It
4884 // will also synchronize with the finalizer thread which ensures that the RCWs
4885 // that were already in the global RCW cleanup list will be cleaned up as well.
4887 ReleaseRCWsInCachesNoThrow(GetCurrentCtxCookie());
4890 END_EXTERNAL_ENTRYPOINT;
4896 HRESULT STDMETHODCALLTYPE PostUninitialize(DWORD dwNewThreadAptRefs)
4898 LIMITED_METHOD_CONTRACT;
4902 #endif // FEATURE_COMINTEROP
4904 // When the thread starts running, make sure it is running in the correct apartment
4906 BOOL Thread::PrepareApartmentAndContext()
4914 m_OSThreadId = ::GetCurrentThreadId();
4916 #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
4917 // Be very careful in here because we haven't set up e.g. TLS yet.
4919 if (m_State & (TS_InSTA | TS_InMTA))
4921 // Make sure TS_InSTA and TS_InMTA aren't both set.
4922 _ASSERTE(!((m_State & TS_InSTA) && (m_State & TS_InMTA)));
4924 // Determine the apartment state to set based on the requested state.
4925 ApartmentState aState = m_State & TS_InSTA ? AS_InSTA : AS_InMTA;
4927 // Clear the requested apartment state from the thread. This is requested since
4928 // the thread might actually be a fiber that has already been initialized to
4929 // a different apartment state than the requested one. If we didn't clear
4930 // the requested apartment state, then we could end up with both TS_InSTA and
4931 // TS_InMTA set at the same time.
4932 FastInterlockAnd ((ULONG *) &m_State, ~TS_InSTA & ~TS_InMTA);
4934 // Attempt to set the requested apartment state.
4935 SetApartment(aState, FALSE);
4938 // In the case where we own the thread and we have switched it to a different
4939 // starting context, it is the responsibility of the caller (KickOffThread())
4940 // to notice that the context changed, and to adjust the delegate that it will
4941 // dispatch on, as appropriate.
4942 #endif //FEATURE_COMINTEROP_APARTMENT_SUPPORT
4944 #ifdef FEATURE_COMINTEROP
4945 // Our IInitializeSpy will be registered in AppX always, in classic processes
4946 // only if the internal config switch is on.
4947 if (AppX::IsAppXProcess() || g_pConfig->EnableRCWCleanupOnSTAShutdown())
4949 NewHolder<ApartmentSpyImpl> pSpyImpl = new ApartmentSpyImpl();
4951 IfFailThrow(CoRegisterInitializeSpy(pSpyImpl, &m_uliInitializeSpyCookie));
4952 pSpyImpl.SuppressRelease();
4954 m_fInitializeSpyRegistered = true;
4956 #endif // FEATURE_COMINTEROP
4962 #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
4964 // TS_InSTA (0x00004000) -> AS_InSTA (0)
4965 // TS_InMTA (0x00008000) -> AS_InMTA (1)
4966 #define TS_TO_AS(ts) \
4967 (Thread::ApartmentState)((((DWORD)ts) >> 14) - 1) \
4969 // Retrieve the apartment state of the current thread. There are three possible
4970 // states: thread hosts an STA, thread is part of the MTA or thread state is
4971 // undecided. The last state may indicate that the apartment has not been set at
4972 // all (nobody has called CoInitializeEx) or that the EE does not know the
4973 // current state (EE has not called CoInitializeEx).
4974 Thread::ApartmentState Thread::GetApartment()
4984 ApartmentState as = AS_Unknown;
4985 ThreadState maskedTs = (ThreadState)(((DWORD)m_State) & (TS_InSTA|TS_InMTA));
4988 _ASSERTE((maskedTs == TS_InSTA) || (maskedTs == TS_InMTA));
4989 static_assert_no_msg(TS_TO_AS(TS_InSTA) == AS_InSTA);
4990 static_assert_no_msg(TS_TO_AS(TS_InMTA) == AS_InMTA);
4992 as = TS_TO_AS(maskedTs);
4996 #ifdef MDA_SUPPORTED
4997 (NULL == MDA_GET_ASSISTANT(InvalidApartmentStateChange)) &&
5004 return GetApartmentRare(as);
5007 Thread::ApartmentState Thread::GetApartmentRare(Thread::ApartmentState as)
5017 if (this == GetThread())
5022 #ifdef MDA_SUPPORTED
5023 MdaInvalidApartmentStateChange* pProbe = MDA_GET_ASSISTANT(InvalidApartmentStateChange);
5026 // Without notifications from OLE32, we cannot know when the apartment state of a
5027 // thread changes. But we have cached this fact and depend on it for all our
5028 // blocking and COM Interop behavior to work correctly. Using the CDH, log that it
5029 // is not changing underneath us, on those platforms where it is relatively cheap for
5031 if (as != AS_Unknown)
5033 hr = GetCurrentThreadTypeNT5(&type);
5036 if (type == THDTYPE_PROCESSMESSAGES && as == AS_InMTA)
5038 pProbe->ReportViolation(this, as, FALSE);
5040 else if (type == THDTYPE_BLOCKMESSAGES && as == AS_InSTA)
5042 pProbe->ReportViolation(this, as, FALSE);
5049 if (as == AS_Unknown)
5051 hr = GetCurrentThreadTypeNT5(&type);
5054 as = (type == THDTYPE_PROCESSMESSAGES) ? AS_InSTA : AS_InMTA;
5056 // If we get back THDTYPE_PROCESSMESSAGES, we are guaranteed to
5057 // be an STA thread. If not, we are an MTA thread, however
5058 // we can't know if the thread has been explicitly set to MTA
5059 // (via a call to CoInitializeEx) or if it has been implicitly
5060 // made MTA (if it hasn't been CoInitializeEx'd but CoInitialize
5061 // has already been called on some other thread in the process.
5063 FastInterlockOr((ULONG *) &m_State, AS_InSTA);
5072 // Retrieve the explicit apartment state of the current thread. There are three possible
5073 // states: thread hosts an STA, thread is part of the MTA or thread state is
5074 // undecided. The last state may indicate that the apartment has not been set at
5075 // all (nobody has called CoInitializeEx), the EE does not know the
5076 // current state (EE has not called CoInitializeEx), or the thread is implicitly in
5078 Thread::ApartmentState Thread::GetExplicitApartment()
5088 _ASSERTE(!((m_State & TS_InSTA) && (m_State & TS_InMTA)));
5090 // Initialize m_State by calling GetApartment.
5093 ApartmentState as = (m_State & TS_InSTA) ? AS_InSTA :
5094 (m_State & TS_InMTA) ? AS_InMTA :
5101 Thread::ApartmentState Thread::GetFinalApartment()
5112 _ASSERTE(this == GetThread());
5114 ApartmentState as = AS_Unknown;
5117 // On shutdown, do not use cached value. Someone might have called
5119 FastInterlockAnd ((ULONG *) &m_State, ~TS_InSTA & ~TS_InMTA);
5122 as = GetApartment();
5123 if (as == AS_Unknown)
5125 // On Win2k and above, GetApartment will only return AS_Unknown if CoInitialize
5126 // hasn't been called in the process. In that case we can simply assume MTA. However we
5127 // cannot cache this value in the Thread because if a CoInitialize does occur, then the
5128 // thread state might change.
5135 // when we get apartment tear-down notification,
5136 // we want reset the apartment state we cache on the thread
5137 VOID Thread::ResetApartment()
5145 // reset the TS_InSTA bit and TS_InMTA bit
5146 ThreadState t_State = (ThreadState)(~(TS_InSTA | TS_InMTA));
5147 FastInterlockAnd((ULONG *) &m_State, t_State);
5150 // Attempt to set current thread's apartment state. The actual apartment state
5151 // achieved is returned and may differ from the input state if someone managed
5152 // to call CoInitializeEx on this thread first (note that calls to SetApartment
5153 // made before the thread has started are guaranteed to succeed).
5154 // The fFireMDAOnMismatch indicates if we should fire the apartment state probe
5155 // on an apartment state mismatch.
5156 Thread::ApartmentState Thread::SetApartment(ApartmentState state, BOOL fFireMDAOnMismatch)
5162 INJECT_FAULT(COMPlusThrowOM(););
5166 // Reset any bits that request for CoInitialize
5167 ResetRequiresCoInitialize();
5169 // Setting the state to AS_Unknown indicates we should CoUninitialize
5171 if (state == AS_Unknown)
5173 BOOL needUninitialize = (m_State & TS_CoInitialized)
5174 #ifdef FEATURE_COMINTEROP
5175 || IsWinRTInitialized()
5176 #endif // FEATURE_COMINTEROP
5179 if (needUninitialize)
5183 // If we haven't CoInitialized the thread, then we don't have anything to do.
5184 if (m_State & TS_CoInitialized)
5186 // We should never be attempting to CoUninitialize another thread than
5187 // the currently running thread.
5188 _ASSERTE(m_OSThreadId == ::GetCurrentThreadId());
5190 // CoUninitialize the thread and reset the STA/MTA/CoInitialized state bits.
5193 ThreadState uninitialized = static_cast<ThreadState>(TS_InSTA | TS_InMTA | TS_CoInitialized);
5194 FastInterlockAnd((ULONG *) &m_State, ~uninitialized);
5197 #ifdef FEATURE_COMINTEROP
5198 if (IsWinRTInitialized())
5200 _ASSERTE(WinRTSupported());
5201 BaseWinRTUninitialize();
5202 ResetWinRTInitialized();
5204 #endif // FEATURE_COMINTEROP
5206 return GetApartment();
5209 // Call GetApartment to initialize the current apartment state.
5211 // Important note: For Win2k and above this can return AS_InMTA even if the current
5212 // thread has never been CoInitialized. Because of this we MUST NOT look at the
5213 // return value of GetApartment here. We can however look at the m_State flags
5214 // since these will only be set to TS_InMTA if we know for a fact the the
5215 // current thread has explicitly been made MTA (via a call to CoInitializeEx).
5218 // If the current thread is STA, then it is impossible to change it to
5220 if (m_State & TS_InSTA)
5222 #ifdef MDA_SUPPORTED
5223 if (state == AS_InMTA && fFireMDAOnMismatch)
5225 MDA_TRIGGER_ASSISTANT(InvalidApartmentStateChange, ReportViolation(this, state, TRUE));
5231 // If the current thread is EXPLICITLY MTA, then it is impossible to change it to
5233 if (m_State & TS_InMTA)
5235 #ifdef MDA_SUPPORTED
5236 if (state == AS_InSTA && fFireMDAOnMismatch)
5238 MDA_TRIGGER_ASSISTANT(InvalidApartmentStateChange, ReportViolation(this, state, TRUE));
5244 // If the thread isn't even started yet, we mark the state bits without
5245 // calling CoInitializeEx (since we're obviously not in the correct thread
5246 // context yet). We'll retry this call when the thread is started.
5247 // Don't use the TS_Unstarted state bit to check for this, it's cleared far
5248 // too late in the day for us. Instead check whether we're in the correct
5250 if (m_OSThreadId != ::GetCurrentThreadId())
5252 FastInterlockOr((ULONG *) &m_State, (state == AS_InSTA) ? TS_InSTA : TS_InMTA);
5260 // Attempt to set apartment by calling CoInitializeEx. This may fail if
5261 // another caller (outside EE) beat us to it.
5263 // Important note: When calling CoInitializeEx(COINIT_MULTITHREADED) on a
5264 // thread that has never been CoInitialized, the return value will always
5265 // be S_OK, even if another thread in the process has already been
5266 // CoInitialized to MTA. However if the current thread has already been
5267 // CoInitialized to MTA, then S_FALSE will be returned.
5268 hr = ::CoInitializeEx(NULL, (state == AS_InSTA) ?
5269 COINIT_APARTMENTTHREADED : COINIT_MULTITHREADED);
5274 ThreadState t_State = (state == AS_InSTA) ? TS_InSTA : TS_InMTA;
5278 // The thread has never been CoInitialized.
5279 t_State = (ThreadState)(t_State | TS_CoInitialized);
5283 _ASSERTE(hr == S_FALSE);
5285 // If the thread has already been CoInitialized to the proper mode, then
5286 // we don't want to leave an outstanding CoInit so we CoUninit.
5293 // We succeeded in setting the apartment state to the requested state.
5294 FastInterlockOr((ULONG *) &m_State, t_State);
5296 else if (hr == RPC_E_CHANGED_MODE)
5298 // We didn't manage to enforce the requested apartment state, but at least
5299 // we can work out what the state is now. No need to actually do the CoInit --
5300 // obviously someone else already took care of that.
5301 FastInterlockOr((ULONG *) &m_State, ((state == AS_InSTA) ? TS_InMTA : TS_InSTA));
5303 #ifdef MDA_SUPPORTED
5304 if (fFireMDAOnMismatch)
5306 // Report via the customer debug helper that we failed to set the apartment type
5307 // to the specified type.
5308 MDA_TRIGGER_ASSISTANT(InvalidApartmentStateChange, ReportViolation(this, state, TRUE));
5312 else if (hr == E_OUTOFMEMORY)
5318 _ASSERTE(!"Unexpected HRESULT returned from CoInitializeEx!");
5321 #ifdef FEATURE_COMINTEROP
5323 // If WinRT is supported on this OS, also initialize it at the same time. Since WinRT sits on top of COM
5324 // we need to make sure that it is initialized in the same threading mode as we just started COM itself
5325 // with (or that we detected COM had already been started with).
5326 if (WinRTSupported() && !IsWinRTInitialized())
5330 BOOL isSTA = m_State & TS_InSTA;
5331 _ASSERTE(isSTA || (m_State & TS_InMTA));
5333 HRESULT hrWinRT = RoInitialize(isSTA ? RO_INIT_SINGLETHREADED : RO_INIT_MULTITHREADED);
5335 if (SUCCEEDED(hrWinRT))
5337 if (hrWinRT == S_OK)
5339 SetThreadStateNC(TSNC_WinRTInitialized);
5343 _ASSERTE(hrWinRT == S_FALSE);
5345 // If the thread has already been initialized, back it out. We may not
5346 // always be able to call RoUninitialize on shutdown so if there's
5347 // a way to avoid having to, we should take advantage of that.
5351 else if (hrWinRT == E_OUTOFMEMORY)
5357 // We don't check for RPC_E_CHANGEDMODE, since we're using the mode that was read in by
5358 // initializing COM above. COM and WinRT need to always be in the same mode, so we should never
5359 // see that return code at this point.
5360 _ASSERTE(!"Unexpected HRESULT From RoInitialize");
5364 // Since we've just called CoInitialize, COM has effectively been started up.
5365 // To ensure the CLR is aware of this, we need to call EnsureComStarted.
5366 EnsureComStarted(FALSE);
5367 #endif // FEATURE_COMINTEROP
5369 return GetApartment();
5371 #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
5374 //----------------------------------------------------------------------------
5376 // ThreadStore Implementation
5378 //----------------------------------------------------------------------------
5380 ThreadStore::ThreadStore()
5381 : m_Crst(CrstThreadStore, (CrstFlags) (CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD)),
5383 m_MaxThreadCount(0),
5384 m_UnstartedThreadCount(0),
5385 m_BackgroundThreadCount(0),
5386 m_PendingThreadCount(0),
5387 m_DeadThreadCount(0),
5388 m_DeadThreadCountForGCTrigger(0),
5389 m_TriggerGCForDeadThreads(false),
5390 m_GuidCreated(FALSE),
5399 m_TerminationEvent.CreateManualEvent(FALSE);
5400 _ASSERTE(m_TerminationEvent.IsValid());
5404 void ThreadStore::InitThreadStore()
5412 s_pThreadStore = new ThreadStore;
5414 g_pThinLockThreadIdDispenser = new IdDispenser();
5416 ThreadSuspend::g_pGCSuspendEvent = new CLREvent();
5417 ThreadSuspend::g_pGCSuspendEvent->CreateManualEvent(FALSE);
5420 Thread::MaxThreadRecord = EEConfig::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_MaxThreadRecord,Thread::MaxThreadRecord);
5421 Thread::MaxStackDepth = EEConfig::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_MaxStackDepth,Thread::MaxStackDepth);
5422 if (Thread::MaxStackDepth > 100) {
5423 Thread::MaxStackDepth = 100;
5427 s_pWaitForStackCrawlEvent = new CLREvent();
5428 s_pWaitForStackCrawlEvent->CreateManualEvent(FALSE);
5430 s_DeadThreadCountThresholdForGCTrigger =
5431 static_cast<LONG>(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_Thread_DeadThreadCountThresholdForGCTrigger));
5432 if (s_DeadThreadCountThresholdForGCTrigger < 0)
5434 s_DeadThreadCountThresholdForGCTrigger = 0;
5436 s_DeadThreadGCTriggerPeriodMilliseconds =
5437 CLRConfig::GetConfigValue(CLRConfig::INTERNAL_Thread_DeadThreadGCTriggerPeriodMilliseconds);
5438 s_DeadThreadGenerationCounts = nullptr;
5441 // Enter and leave the critical section around the thread store. Clients should
5442 // use LockThreadStore and UnlockThreadStore because ThreadStore lock has
5443 // additional semantics well beyond a normal lock.
5444 DEBUG_NOINLINE void ThreadStore::Enter()
5446 WRAPPER_NO_CONTRACT;
5447 ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
5451 // Threadstore needs special shutdown handling.
5452 if (g_fSuspendOnShutdown)
5454 m_Crst.ReleaseAndBlockForShutdownIfNotSpecialThread();
5458 DEBUG_NOINLINE void ThreadStore::Leave()
5460 WRAPPER_NO_CONTRACT;
5461 ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
5466 void ThreadStore::LockThreadStore()
5468 WRAPPER_NO_CONTRACT;
5470 // The actual implementation is in ThreadSuspend class since it is coupled
5471 // with thread suspension logic
5472 ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_OTHER);
5475 void ThreadStore::UnlockThreadStore()
5477 WRAPPER_NO_CONTRACT;
5479 // The actual implementation is in ThreadSuspend class since it is coupled
5480 // with thread suspension logic
5481 ThreadSuspend::UnlockThreadStore(FALSE, ThreadSuspend::SUSPEND_OTHER);
5484 // AddThread adds 'newThread' to m_ThreadList
5485 void ThreadStore::AddThread(Thread *newThread, BOOL bRequiresTSL)
5489 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
5493 LOG((LF_SYNC, INFO3, "AddThread obtain lock\n"));
5495 ThreadStoreLockHolder TSLockHolder(FALSE);
5498 TSLockHolder.Acquire();
5501 s_pThreadStore->m_ThreadList.InsertTail(newThread);
5503 s_pThreadStore->m_ThreadCount++;
5504 if (s_pThreadStore->m_MaxThreadCount < s_pThreadStore->m_ThreadCount)
5505 s_pThreadStore->m_MaxThreadCount = s_pThreadStore->m_ThreadCount;
5507 if (newThread->IsUnstarted())
5508 s_pThreadStore->m_UnstartedThreadCount++;
5510 newThread->SetThreadStateNC(Thread::TSNC_ExistInThreadStore);
5512 _ASSERTE(!newThread->IsBackground());
5513 _ASSERTE(!newThread->IsDead());
5517 TSLockHolder.Release();
5521 // this function is just desgined to avoid deadlocks during abnormal process termination, and should not be used for any other purpose
5522 BOOL ThreadStore::CanAcquireLock()
5524 WRAPPER_NO_CONTRACT;
5526 return (s_pThreadStore->m_Crst.m_criticalsection.LockCount == -1 || (size_t)s_pThreadStore->m_Crst.m_criticalsection.OwningThread == (size_t)GetCurrentThreadId());
5530 // Whenever one of the components of OtherThreadsComplete() has changed in the
5531 // correct direction, see whether we can now shutdown the EE because only background
5532 // threads are running.
5533 void ThreadStore::CheckForEEShutdown()
5541 if (g_fWeControlLifetime &&
5542 s_pThreadStore->OtherThreadsComplete())
5545 bRet = s_pThreadStore->m_TerminationEvent.Set();
5551 BOOL ThreadStore::RemoveThread(Thread *target)
5562 #if 0 // This assert is not valid when failing to create background GC thread.
5563 // Main GC thread holds the TS lock.
5564 _ASSERTE (ThreadStore::HoldingThreadStore());
5567 _ASSERTE(s_pThreadStore->m_Crst.GetEnterCount() > 0 ||
5569 _ASSERTE(s_pThreadStore->DbgFindThread(target));
5570 ret = s_pThreadStore->m_ThreadList.FindAndRemove(target);
5571 _ASSERTE(ret && ret == target);
5572 found = (ret != NULL);
5576 target->ResetThreadStateNC(Thread::TSNC_ExistInThreadStore);
5578 s_pThreadStore->m_ThreadCount--;
5580 if (target->IsDead())
5582 s_pThreadStore->m_DeadThreadCount--;
5583 s_pThreadStore->DecrementDeadThreadCountForGCTrigger();
5586 // Unstarted threads are not in the Background count:
5587 if (target->IsUnstarted())
5588 s_pThreadStore->m_UnstartedThreadCount--;
5590 if (target->IsBackground())
5591 s_pThreadStore->m_BackgroundThreadCount--;
5593 FastInterlockExchangeAdd(
5594 &Thread::s_threadPoolCompletionCountOverflow,
5595 target->m_threadPoolCompletionCount);
5597 _ASSERTE(s_pThreadStore->m_ThreadCount >= 0);
5598 _ASSERTE(s_pThreadStore->m_BackgroundThreadCount >= 0);
5599 _ASSERTE(s_pThreadStore->m_ThreadCount >=
5600 s_pThreadStore->m_BackgroundThreadCount);
5601 _ASSERTE(s_pThreadStore->m_ThreadCount >=
5602 s_pThreadStore->m_UnstartedThreadCount);
5603 _ASSERTE(s_pThreadStore->m_ThreadCount >=
5604 s_pThreadStore->m_DeadThreadCount);
5606 // One of the components of OtherThreadsComplete() has changed, so check whether
5607 // we should now exit the EE.
5608 CheckForEEShutdown();
5614 // When a thread is created as unstarted. Later it may get started, in which case
5615 // someone calls Thread::HasStarted() on that physical thread. This completes
5616 // the Setup and calls here.
5617 void ThreadStore::TransferStartedThread(Thread *thread, BOOL bRequiresTSL)
5625 _ASSERTE(GetThread() == thread);
5627 LOG((LF_SYNC, INFO3, "TransferUnstartedThread obtain lock\n"));
5628 ThreadStoreLockHolder TSLockHolder(FALSE);
5631 TSLockHolder.Acquire();
5634 _ASSERTE(s_pThreadStore->DbgFindThread(thread));
5635 _ASSERTE(thread->HasValidThreadHandle());
5636 _ASSERTE(thread->m_State & Thread::TS_WeOwn);
5637 _ASSERTE(thread->IsUnstarted());
5638 _ASSERTE(!thread->IsDead());
5640 if (thread->m_State & Thread::TS_AbortRequested)
5642 PAL_CPP_THROW(EEException *, new EEException(COR_E_THREADABORTED));
5645 // Of course, m_ThreadCount is already correct since it includes started and
5646 // unstarted threads.
5648 s_pThreadStore->m_UnstartedThreadCount--;
5650 // We only count background threads that have been started
5651 if (thread->IsBackground())
5652 s_pThreadStore->m_BackgroundThreadCount++;
5654 _ASSERTE(s_pThreadStore->m_PendingThreadCount > 0);
5655 FastInterlockDecrement(&s_pThreadStore->m_PendingThreadCount);
5657 // As soon as we erase this bit, the thread becomes eligible for suspension,
5658 // stopping, interruption, etc.
5659 FastInterlockAnd((ULONG *) &thread->m_State, ~Thread::TS_Unstarted);
5660 FastInterlockOr((ULONG *) &thread->m_State, Thread::TS_LegalToJoin);
5662 // release ThreadStore Crst to avoid Crst Violation when calling HandleThreadAbort later
5665 TSLockHolder.Release();
5668 // One of the components of OtherThreadsComplete() has changed, so check whether
5669 // we should now exit the EE.
5670 CheckForEEShutdown();
5673 LONG ThreadStore::s_DeadThreadCountThresholdForGCTrigger = 0;
5674 DWORD ThreadStore::s_DeadThreadGCTriggerPeriodMilliseconds = 0;
5675 SIZE_T *ThreadStore::s_DeadThreadGenerationCounts = nullptr;
5677 void ThreadStore::IncrementDeadThreadCountForGCTrigger()
5685 // Although all increments and decrements are usually done inside a lock, that is not sufficient to synchronize with a
5686 // background GC thread resetting this value, hence the interlocked operation. Ignore overflow; overflow would likely never
5687 // occur, the count is treated as unsigned, and nothing bad would happen if it were to overflow.
5688 SIZE_T count = static_cast<SIZE_T>(FastInterlockIncrement(&m_DeadThreadCountForGCTrigger));
5690 SIZE_T countThreshold = static_cast<SIZE_T>(s_DeadThreadCountThresholdForGCTrigger);
5691 if (count < countThreshold || countThreshold == 0)
5696 IGCHeap *gcHeap = GCHeapUtilities::GetGCHeap();
5697 if (gcHeap == nullptr)
5702 SIZE_T gcLastMilliseconds = gcHeap->GetLastGCStartTime(gcHeap->GetMaxGeneration());
5703 SIZE_T gcNowMilliseconds = gcHeap->GetNow();
5704 if (gcNowMilliseconds - gcLastMilliseconds < s_DeadThreadGCTriggerPeriodMilliseconds)
5709 if (!g_fEEStarted) // required for FinalizerThread::EnableFinalization() below
5714 // The GC is triggered on the finalizer thread since it's not safe to trigger it on DLL_THREAD_DETACH.
5715 // TriggerGCForDeadThreadsIfNecessary() will determine which generation of GC to trigger, and may not actually trigger a GC.
5716 // If a GC is triggered, since there would be a delay before the dead thread count is updated, clear the count and wait for
5717 // it to reach the threshold again. If a GC would not be triggered, the count is still cleared here to prevent waking up the
5718 // finalizer thread to do the work in TriggerGCForDeadThreadsIfNecessary() for every dead thread.
5719 m_DeadThreadCountForGCTrigger = 0;
5720 m_TriggerGCForDeadThreads = true;
5721 FinalizerThread::EnableFinalization();
5724 void ThreadStore::DecrementDeadThreadCountForGCTrigger()
5732 // Although all increments and decrements are usually done inside a lock, that is not sufficient to synchronize with a
5733 // background GC thread resetting this value, hence the interlocked operation.
5734 if (FastInterlockDecrement(&m_DeadThreadCountForGCTrigger) < 0)
5736 m_DeadThreadCountForGCTrigger = 0;
5740 void ThreadStore::OnMaxGenerationGCStarted()
5742 LIMITED_METHOD_CONTRACT;
5744 // A dead thread may contribute to triggering a GC at most once. After a max-generation GC occurs, if some dead thread
5745 // objects are still reachable due to references to the thread objects, they will not contribute to triggering a GC again.
5746 // Synchronize the store with increment/decrement operations occurring on different threads, and make the change visible to
5747 // other threads in order to prevent unnecessary GC triggers.
5748 FastInterlockExchange(&m_DeadThreadCountForGCTrigger, 0);
5751 bool ThreadStore::ShouldTriggerGCForDeadThreads()
5753 LIMITED_METHOD_CONTRACT;
5755 return m_TriggerGCForDeadThreads;
5758 void ThreadStore::TriggerGCForDeadThreadsIfNecessary()
5766 if (!m_TriggerGCForDeadThreads)
5770 m_TriggerGCForDeadThreads = false;
5774 // Not safe to touch CLR state
5778 unsigned gcGenerationToTrigger = 0;
5779 IGCHeap *gcHeap = GCHeapUtilities::GetGCHeap();
5780 _ASSERTE(gcHeap != nullptr);
5781 SIZE_T generationCountThreshold = static_cast<SIZE_T>(s_DeadThreadCountThresholdForGCTrigger) / 2;
5782 unsigned maxGeneration = gcHeap->GetMaxGeneration();
5783 if (!s_DeadThreadGenerationCounts)
5785 // initialize this field on first use with an entry for every table.
5786 s_DeadThreadGenerationCounts = new (nothrow) SIZE_T[maxGeneration + 1];
5787 if (!s_DeadThreadGenerationCounts)
5793 memset(s_DeadThreadGenerationCounts, 0, sizeof(SIZE_T) * (maxGeneration + 1));
5795 ThreadStoreLockHolder threadStoreLockHolder;
5798 // Determine the generation for which to trigger a GC. Iterate over all dead threads that have not yet been considered
5799 // for triggering a GC and see how many are in which generations.
5800 for (Thread *thread = ThreadStore::GetAllThreadList(NULL, Thread::TS_Dead, Thread::TS_Dead);
5802 thread = ThreadStore::GetAllThreadList(thread, Thread::TS_Dead, Thread::TS_Dead))
5804 if (thread->HasDeadThreadBeenConsideredForGCTrigger())
5809 Object *exposedObject = OBJECTREFToObject(thread->GetExposedObjectRaw());
5810 if (exposedObject == nullptr)
5815 unsigned exposedObjectGeneration = gcHeap->WhichGeneration(exposedObject);
5816 SIZE_T newDeadThreadGenerationCount = ++s_DeadThreadGenerationCounts[exposedObjectGeneration];
5817 if (exposedObjectGeneration > gcGenerationToTrigger && newDeadThreadGenerationCount >= generationCountThreshold)
5819 gcGenerationToTrigger = exposedObjectGeneration;
5820 if (gcGenerationToTrigger >= maxGeneration)
5827 // Make sure that enough time has elapsed since the last GC of the desired generation. We don't want to trigger GCs
5828 // based on this heuristic too often. Give it some time to let the memory pressure trigger GCs automatically, and only
5829 // if it doesn't in the given time, this heuristic may kick in to trigger a GC.
5830 SIZE_T gcLastMilliseconds = gcHeap->GetLastGCStartTime(gcGenerationToTrigger);
5831 SIZE_T gcNowMilliseconds = gcHeap->GetNow();
5832 if (gcNowMilliseconds - gcLastMilliseconds < s_DeadThreadGCTriggerPeriodMilliseconds)
5837 // For threads whose exposed objects are in the generation of GC that will be triggered or in a lower GC generation,
5838 // mark them as having contributed to a GC trigger to prevent redundant GC triggers
5839 for (Thread *thread = ThreadStore::GetAllThreadList(NULL, Thread::TS_Dead, Thread::TS_Dead);
5841 thread = ThreadStore::GetAllThreadList(thread, Thread::TS_Dead, Thread::TS_Dead))
5843 if (thread->HasDeadThreadBeenConsideredForGCTrigger())
5848 Object *exposedObject = OBJECTREFToObject(thread->GetExposedObjectRaw());
5849 if (exposedObject == nullptr)
5854 if (gcGenerationToTrigger < maxGeneration &&
5855 gcHeap->WhichGeneration(exposedObject) > gcGenerationToTrigger)
5860 thread->SetHasDeadThreadBeenConsideredForGCTrigger();
5862 } // ThreadStoreLockHolder, GCX_COOP()
5864 GCHeapUtilities::GetGCHeap()->GarbageCollect(gcGenerationToTrigger, FALSE, collection_non_blocking);
5867 #endif // #ifndef DACCESS_COMPILE
5870 // Access the list of threads. You must be inside a critical section, otherwise
5871 // the "cursor" thread might disappear underneath you. Pass in NULL for the
5872 // cursor to begin at the start of the list.
5873 Thread *ThreadStore::GetAllThreadList(Thread *cursor, ULONG mask, ULONG bits)
5883 #ifndef DACCESS_COMPILE
5884 _ASSERTE((s_pThreadStore->m_Crst.GetEnterCount() > 0) || IsAtProcessExit());
5890 ? s_pThreadStore->m_ThreadList.GetNext(cursor)
5891 : s_pThreadStore->m_ThreadList.GetHead());
5896 if ((cursor->m_State & mask) == bits)
5902 // Iterate over the threads that have been started
5903 Thread *ThreadStore::GetThreadList(Thread *cursor)
5913 return GetAllThreadList(cursor, (Thread::TS_Unstarted | Thread::TS_Dead), 0);
5916 //---------------------------------------------------------------------------------------
5918 // Grab a consistent snapshot of the thread's state, for reporting purposes only.
5921 // the current state of the thread
5924 Thread::ThreadState Thread::GetSnapshotState()
5934 ThreadState res = m_State;
5936 if (res & TS_ReportDead)
5938 res = (ThreadState) (res | TS_Dead);
5944 #ifndef DACCESS_COMPILE
5946 BOOL CLREventWaitWithTry(CLREventBase *pEvent, DWORD timeout, BOOL fAlertable, DWORD *pStatus)
5951 WRAPPER(GC_TRIGGERS);
5958 *pStatus = pEvent->Wait(timeout, fAlertable);
5964 EX_END_CATCH(SwallowAllExceptions);
5969 // We shut down the EE only when all the non-background threads have terminated
5970 // (unless this is an exceptional termination). So the main thread calls here to
5971 // wait before tearing down the EE.
5972 void ThreadStore::WaitForOtherThreads()
5982 Thread *pCurThread = GetThread();
5984 // Regardless of whether the main thread is a background thread or not, force
5985 // it to be one. This simplifies our rules for counting non-background threads.
5986 pCurThread->SetBackground(TRUE);
5988 LOG((LF_SYNC, INFO3, "WaitForOtherThreads obtain lock\n"));
5989 ThreadStoreLockHolder TSLockHolder(TRUE);
5990 if (!OtherThreadsComplete())
5992 TSLockHolder.Release();
5994 FastInterlockOr((ULONG *) &pCurThread->m_State, Thread::TS_ReportDead);
5996 DWORD ret = WAIT_OBJECT_0;
5997 while (CLREventWaitWithTry(&m_TerminationEvent, INFINITE, TRUE, &ret))
6000 _ASSERTE(ret == WAIT_OBJECT_0);
6005 // Every EE process can lazily create a GUID that uniquely identifies it (for
6006 // purposes of remoting).
6007 const GUID &ThreadStore::GetUniqueEEId()
6017 ThreadStoreLockHolder TSLockHolder(TRUE);
6020 HRESULT hr = ::CoCreateGuid(&m_EEGuid);
6022 _ASSERTE(SUCCEEDED(hr));
6024 m_GuidCreated = TRUE;
6035 BOOL ThreadStore::DbgFindThread(Thread *target)
6045 // Cache the current change stamp for g_TrapReturningThreads
6046 LONG chgStamp = g_trtChgStamp;
6047 STRESS_LOG3(LF_STORE, LL_INFO100, "ThreadStore::DbgFindThread - [thread=%p]. trt=%d. chgStamp=%d\n", GetThread(), g_TrapReturningThreads.Load(), chgStamp);
6049 #if 0 // g_TrapReturningThreads debug code.
6052 #endif // g_TrapReturningThreads debug code.
6057 LONG cntUnstart = 0;
6061 while ((cur = GetAllThreadList(cur, 0, 0)) != NULL)
6068 // Unstarted threads do not contribute to the count of background threads
6069 if (cur->IsUnstarted())
6072 if (cur->IsBackground())
6078 // Note that (DebugSuspendPending | SuspendPending) implies a count of 2.
6079 // We don't count GCPending because a single trap is held for the entire
6080 // GC, instead of counting each interesting thread.
6081 if (cur->m_State & Thread::TS_DebugSuspendPending)
6084 // CoreCLR does not support user-requested thread suspension
6085 _ASSERTE(!(cur->m_State & Thread::TS_UserSuspendPending));
6087 if (cur->m_TraceCallCount > 0)
6090 if (cur->IsAbortRequested())
6094 _ASSERTE(cnt == m_ThreadCount);
6095 _ASSERTE(cntUnstart == m_UnstartedThreadCount);
6096 _ASSERTE(cntBack == m_BackgroundThreadCount);
6097 _ASSERTE(cntDead == m_DeadThreadCount);
6098 _ASSERTE(0 <= m_PendingThreadCount);
6100 #if 0 // g_TrapReturningThreads debug code.
6101 if (cntReturn != g_TrapReturningThreads /*&& !g_fEEShutDown*/)
6102 { // If count is off, try again, to account for multiple threads.
6105 // printf("Retry %d. cntReturn:%d, gReturn:%d\n", iRetry, cntReturn, g_TrapReturningThreads);
6109 printf("cnt:%d, Un:%d, Back:%d, Dead:%d, cntReturn:%d, TrapReturn:%d, eeShutdown:%d, threadShutdown:%d\n",
6110 cnt,cntUnstart,cntBack,cntDead,cntReturn,g_TrapReturningThreads, g_fEEShutDown, Thread::IsAtProcessExit());
6111 LOG((LF_CORDB, LL_INFO1000,
6112 "SUSPEND: cnt:%d, Un:%d, Back:%d, Dead:%d, cntReturn:%d, TrapReturn:%d, eeShutdown:%d, threadShutdown:%d\n",
6113 cnt,cntUnstart,cntBack,cntDead,cntReturn,g_TrapReturningThreads, g_fEEShutDown, Thread::IsAtProcessExit()) );
6115 //_ASSERTE(cntReturn + 2 >= g_TrapReturningThreads);
6117 if (iRetry > 0 && iRetry < 4)
6119 printf("%d retries to re-sync counted TrapReturn with global TrapReturn.\n", iRetry);
6121 #endif // g_TrapReturningThreads debug code.
6123 STRESS_LOG4(LF_STORE, LL_INFO100, "ThreadStore::DbgFindThread - [thread=%p]. trt=%d. chg=%d. cnt=%d\n", GetThread(), g_TrapReturningThreads.Load(), g_trtChgStamp.Load(), cntReturn);
6125 // Because of race conditions and the fact that the GC places its
6126 // own count, I can't assert this precisely. But I do want to be
6127 // sure that this count isn't wandering ever higher -- with a
6128 // nasty impact on the performance of GC mode changes and method
6131 // We don't bother asserting this during process exit, because
6132 // during a shutdown we will quietly terminate threads that are
6133 // being waited on. (If we aren't shutting down, we carefully
6134 // decrement our counts and alert anyone waiting for us to
6137 // Note: we don't actually assert this if
6138 // ThreadStore::TrapReturningThreads() updated g_TrapReturningThreads
6139 // between the beginning of this function and the moment of the assert.
6140 // *** The order of evaluation in the if condition is important ***
6142 (g_trtChgInFlight != 0 || (cntReturn + 2 >= g_TrapReturningThreads) || chgStamp != g_trtChgStamp) ||
6150 void Thread::HandleThreadInterrupt (BOOL fWaitForADUnload)
6152 STATIC_CONTRACT_THROWS;
6153 STATIC_CONTRACT_GC_TRIGGERS;
6154 STATIC_CONTRACT_SO_TOLERANT;
6156 // If we're waiting for shutdown, we don't want to abort/interrupt this thread
6157 if (HasThreadStateNC(Thread::TSNC_BlockedForShutdown))
6160 BEGIN_SO_INTOLERANT_CODE(this);
6162 if ((m_UserInterrupt & TI_Abort) != 0)
6164 // If the thread is waiting for AD unload to finish, and the thread is interrupted,
6165 // we can start aborting.
6166 HandleThreadAbort(fWaitForADUnload);
6168 if ((m_UserInterrupt & TI_Interrupt) != 0)
6170 ResetThreadState ((ThreadState)(TS_Interrupted | TS_Interruptible));
6171 FastInterlockAnd ((DWORD*)&m_UserInterrupt, ~TI_Interrupt);
6174 AddFiberInfo(ThreadTrackInfo_Abort);
6177 COMPlusThrow(kThreadInterruptedException);
6179 END_SO_INTOLERANT_CODE;
6183 #define MAXSTACKBYTES (2 * GetOsPageSize())
6184 void CleanStackForFastGCStress ()
6193 PVOID StackLimit = ClrTeb::GetStackLimit();
6194 size_t nBytes = (size_t)&nBytes - (size_t)StackLimit;
6195 nBytes &= ~sizeof (size_t);
6196 if (nBytes > MAXSTACKBYTES) {
6197 nBytes = MAXSTACKBYTES;
6199 size_t* buffer = (size_t*) _alloca (nBytes);
6200 memset(buffer, 0, nBytes);
6201 GetThread()->m_pCleanedStackBase = &nBytes;
6204 void Thread::ObjectRefFlush(Thread* thread)
6207 BEGIN_PRESERVE_LAST_ERROR;
6209 // The constructor and destructor of AutoCleanupSONotMainlineHolder (allocated by SO_NOT_MAINLINE_FUNCTION below)
6210 // may trash the last error, so we need to save and restore last error here. Also, we need to add a scope here
6211 // because we can't let the destructor run after we call SetLastError().
6213 // this is debug only code, so no need to validate
6214 STATIC_CONTRACT_NOTHROW;
6215 STATIC_CONTRACT_GC_NOTRIGGER;
6216 STATIC_CONTRACT_ENTRY_POINT;
6218 _ASSERTE(thread->PreemptiveGCDisabled()); // Should have been in managed code
6219 memset(thread->dangerousObjRefs, 0, sizeof(thread->dangerousObjRefs));
6220 thread->m_allObjRefEntriesBad = FALSE;
6221 CLEANSTACKFORFASTGCSTRESS ();
6224 END_PRESERVE_LAST_ERROR;
6228 #if defined(STRESS_HEAP)
6230 PtrHashMap *g_pUniqueStackMap = NULL;
6231 Crst *g_pUniqueStackCrst = NULL;
6233 #define UniqueStackDepth 8
6235 BOOL StackCompare (UPTR val1, UPTR val2)
6243 size_t *p1 = (size_t *)(val1 << 1);
6244 size_t *p2 = (size_t *)val2;
6245 if (p1[0] != p2[0]) {
6248 size_t nElem = p1[0];
6249 if (nElem >= UniqueStackDepth) {
6250 nElem = UniqueStackDepth;
6255 for (size_t n = 0; n < nElem; n ++) {
6256 if (p1[n] != p2[n]) {
6264 void UniqueStackSetupMap()
6266 WRAPPER_NO_CONTRACT;
6268 if (g_pUniqueStackCrst == NULL)
6270 Crst *Attempt = new Crst (
6272 CrstFlags(CRST_REENTRANCY | CRST_UNSAFE_ANYMODE));
6274 if (FastInterlockCompareExchangePointer(&g_pUniqueStackCrst,
6283 // Now we have a Crst we can use to synchronize the remainder of the init.
6284 if (g_pUniqueStackMap == NULL)
6286 CrstHolder ch(g_pUniqueStackCrst);
6288 if (g_pUniqueStackMap == NULL)
6290 PtrHashMap *map = new (SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()) PtrHashMap ();
6291 LockOwner lock = {g_pUniqueStackCrst, IsOwnerOfCrst};
6292 map->Init (256, StackCompare, TRUE, &lock);
6293 g_pUniqueStackMap = map;
6298 BOOL StartUniqueStackMapHelper()
6310 if (g_pUniqueStackMap == NULL)
6312 UniqueStackSetupMap();
6319 EX_END_CATCH(SwallowAllExceptions);
6324 BOOL StartUniqueStackMap ()
6333 return StartUniqueStackMapHelper();
6338 size_t UpdateStackHash(size_t hash, size_t retAddr)
6340 return ((hash << 3) + hash) ^ retAddr;
6343 /***********************************************************************/
6344 size_t getStackHash(size_t* stackTrace, size_t* stackTop, size_t* stackStop, size_t stackBase, size_t stackLimit)
6352 // return a hash of every return address found between 'stackTop' (the lowest address)
6353 // and 'stackStop' (the highest address)
6360 static size_t moduleBase = (size_t) -1;
6361 static size_t moduleTop = (size_t) -1;
6362 if (moduleTop == (size_t) -1)
6364 MEMORY_BASIC_INFORMATION mbi;
6366 if (ClrVirtualQuery(getStackHash, &mbi, sizeof(mbi)))
6368 moduleBase = (size_t)mbi.AllocationBase;
6369 moduleTop = (size_t)mbi.BaseAddress + mbi.RegionSize;
6373 // way bad error, probably just assert and exit
6374 _ASSERTE (!"ClrVirtualQuery failed");
6380 while (stackTop < stackStop)
6382 // Clean out things that point to stack, as those can't be return addresses
6383 if (*stackTop > moduleBase && *stackTop < moduleTop)
6387 if (isRetAddr((TADDR)*stackTop, &dummy))
6389 hash = UpdateStackHash(hash, *stackTop);
6391 // If there is no jitted code on the stack, then just use the
6392 // top 16 frames as the context.
6394 if (idx <= UniqueStackDepth)
6396 stackTrace [idx] = *stackTop;
6403 #else // _TARGET_X86_
6406 ClrCaptureContext(&ctx);
6408 UINT_PTR uControlPc = (UINT_PTR)GetIP(&ctx);
6409 UINT_PTR uImageBase;
6411 UINT_PTR uPrevControlPc = uControlPc;
6415 RtlLookupFunctionEntry(uControlPc,
6416 ARM_ONLY((DWORD*))(&uImageBase),
6420 if (((UINT_PTR)g_pMSCorEE) != uImageBase)
6425 uControlPc = Thread::VirtualUnwindCallFrame(&ctx);
6427 UINT_PTR uRetAddrForHash = uControlPc;
6429 if (uPrevControlPc == uControlPc)
6431 // This is a special case when we fail to acquire the loader lock
6432 // in RtlLookupFunctionEntry(), which then returns false. The end
6433 // result is that we cannot go any further on the stack and
6434 // we will loop infinitely (because the owner of the loader lock
6435 // is blocked on us).
6441 uPrevControlPc = uControlPc;
6444 hash = UpdateStackHash(hash, uRetAddrForHash);
6446 // If there is no jitted code on the stack, then just use the
6447 // top 16 frames as the context.
6449 if (idx <= UniqueStackDepth)
6451 stackTrace [idx] = uRetAddrForHash;
6454 #endif // _TARGET_X86_
6456 stackTrace [0] = idx;
6461 void UniqueStackHelper(size_t stackTraceHash, size_t *stackTrace)
6470 size_t nElem = stackTrace[0];
6471 if (nElem >= UniqueStackDepth) {
6472 nElem = UniqueStackDepth;
6474 AllocMemHolder<size_t> stackTraceInMap = SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(size_t *)) * (S_SIZE_T(nElem) + S_SIZE_T(1)));
6475 memcpy (stackTraceInMap, stackTrace, sizeof(size_t *) * (nElem + 1));
6476 g_pUniqueStackMap->InsertValue(stackTraceHash, stackTraceInMap);
6477 stackTraceInMap.SuppressRelease();
6482 EX_END_CATCH(SwallowAllExceptions);
6485 /***********************************************************************/
6486 /* returns true if this stack has not been seen before, useful for
6487 running tests only once per stack trace. */
6489 BOOL Thread::UniqueStack(void* stackStart)
6499 // If we where not told where to start, start at the caller of UniqueStack
6500 if (stackStart == 0)
6502 stackStart = &stackStart;
6505 if (g_pUniqueStackMap == NULL)
6507 if (!StartUniqueStackMap ())
6509 // We fail to initialize unique stack map due to OOM.
6510 // Let's say the stack is unique.
6515 size_t stackTrace[UniqueStackDepth+1] = {0};
6517 // stackTraceHash represents a hash of entire stack at the time we make the call,
6518 // We insure at least GC per unique stackTrace. What information is contained in
6519 // 'stackTrace' is somewhat arbitrary. We choose it to mean all functions live
6520 // on the stack up to the first jitted function.
6522 size_t stackTraceHash;
6523 Thread* pThread = GetThread();
6526 void* stopPoint = pThread->m_CacheStackBase;
6529 // Find the stop point (most jitted function)
6530 Frame* pFrame = pThread->GetFrame();
6534 if (pFrame == 0 || pFrame == (Frame*) -1)
6537 pFrame->GetFunction(); // This insures that helper frames are inited
6539 if (pFrame->GetReturnAddress() != 0)
6544 pFrame = pFrame->Next();
6546 #endif // _TARGET_X86_
6548 // Get hash of all return addresses between here an the top most jitted function
6549 stackTraceHash = getStackHash (stackTrace, (size_t*) stackStart, (size_t*) stopPoint,
6550 size_t(pThread->m_CacheStackBase), size_t(pThread->m_CacheStackLimit));
6552 if (stackTraceHash == 0 ||
6553 g_pUniqueStackMap->LookupValue (stackTraceHash, stackTrace) != (LPVOID)INVALIDENTRY)
6557 BOOL fUnique = FALSE;
6560 CrstHolder ch(g_pUniqueStackCrst);
6563 GetThread ()->m_bUniqueStacking = TRUE;
6565 if (g_pUniqueStackMap->LookupValue (stackTraceHash, stackTrace) != (LPVOID)INVALIDENTRY)
6573 UniqueStackHelper(stackTraceHash, stackTrace);
6577 GetThread ()->m_bUniqueStacking = FALSE;
6582 static int fCheckStack = -1;
6583 if (fCheckStack == -1)
6585 fCheckStack = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_FastGCCheckStack);
6587 if (fCheckStack && pThread->m_pCleanedStackBase > stackTrace
6588 && pThread->m_pCleanedStackBase - stackTrace > (int) MAXSTACKBYTES)
6590 _ASSERTE (!"Garbage on stack");
6596 #else // !FEATURE_PAL
6598 BOOL Thread::UniqueStack(void* stackStart)
6603 #endif // !FEATURE_PAL
6605 #endif // STRESS_HEAP
6609 * GetStackLowerBound
6611 * Returns the lower bound of the stack space. Note -- the practical bound is some number of pages greater than
6612 * this value -- those pages are reserved for a stack overflow exception processing.
6618 * address of the lower bound of the threads's stack.
6620 void * Thread::GetStackLowerBound()
6622 // Called during fiber switch. Can not have non-static contract.
6623 STATIC_CONTRACT_NOTHROW;
6624 STATIC_CONTRACT_GC_NOTRIGGER;
6625 STATIC_CONTRACT_SO_TOLERANT;
6628 MEMORY_BASIC_INFORMATION lowerBoundMemInfo;
6631 dwRes = ClrVirtualQuery((const void *)&lowerBoundMemInfo, &lowerBoundMemInfo, sizeof(MEMORY_BASIC_INFORMATION));
6633 if (sizeof(MEMORY_BASIC_INFORMATION) == dwRes)
6635 return (void *)(lowerBoundMemInfo.AllocationBase);
6641 #else // !FEATURE_PAL
6642 return PAL_GetStackLimit();
6643 #endif // !FEATURE_PAL
6647 * GetStackUpperBound
6649 * Return the upper bound of the thread's stack space.
6655 * address of the base of the threads's stack.
6657 void *Thread::GetStackUpperBound()
6659 // Called during fiber switch. Can not have non-static contract.
6660 STATIC_CONTRACT_NOTHROW;
6661 STATIC_CONTRACT_GC_NOTRIGGER;
6662 STATIC_CONTRACT_SO_TOLERANT;
6664 return ClrTeb::GetStackBase();
6667 BOOL Thread::SetStackLimits(SetStackLimitScope scope)
6679 m_CacheStackBase = GetStackUpperBound();
6680 m_CacheStackLimit = GetStackLowerBound();
6681 if (m_CacheStackLimit == NULL)
6683 _ASSERTE(!"Failed to set stack limits");
6687 // Compute the limit used by EnsureSufficientExecutionStack and cache it on the thread. This minimum stack size should
6688 // be sufficient to allow a typical non-recursive call chain to execute, including potential exception handling and
6689 // garbage collection. Used for probing for available stack space through RuntimeImports.EnsureSufficientExecutionStack,
6690 // among other things.
6692 const UINT_PTR MinExecutionStackSize = 128 * 1024;
6694 const UINT_PTR MinExecutionStackSize = 64 * 1024;
6696 _ASSERTE(m_CacheStackBase >= m_CacheStackLimit);
6697 if ((reinterpret_cast<UINT_PTR>(m_CacheStackBase) - reinterpret_cast<UINT_PTR>(m_CacheStackLimit)) >
6698 MinExecutionStackSize)
6700 m_CacheStackSufficientExecutionLimit = reinterpret_cast<UINT_PTR>(m_CacheStackLimit) + MinExecutionStackSize;
6704 m_CacheStackSufficientExecutionLimit = reinterpret_cast<UINT_PTR>(m_CacheStackBase);
6708 // Ensure that we've setup the stack guarantee properly before we cache the stack limits
6709 // as they depend upon the stack guarantee.
6710 if (FAILED(CLRSetThreadStackGuarantee()))
6713 // Cache the last stack addresses that we are allowed to touch. We throw a stack overflow
6714 // if we cross that line. Note that we ignore any subsequent calls to STSG for Whidbey until
6715 // we see an exception and recache the values. We use the LastAllowableAddresses to
6716 // determine if we've taken a hard SO and the ProbeLimits on the probes themselves.
6718 m_LastAllowableStackAddress = GetLastNormalStackAddress();
6720 if (g_pConfig->ProbeForStackOverflow())
6722 m_ProbeLimit = m_LastAllowableStackAddress;
6726 // If we have stack probing disabled, set the probeLimit to 0 so that all probes will pass. This
6727 // way we don't have to do an extra check in the probe code.
6734 //---------------------------------------------------------------------------------------------
6735 // Routines we use to managed a thread's stack, for fiber switching or stack overflow purposes.
6736 //---------------------------------------------------------------------------------------------
6738 HRESULT Thread::CLRSetThreadStackGuarantee(SetThreadStackGuaranteeScope fScope)
6749 // TODO: we need to measure what the stack usage needs are at the limits in the hosted scenario for host callbacks
6751 if (Thread::IsSetThreadStackGuaranteeInUse(fScope))
6753 // <TODO> Tune this as needed </TODO>
6754 ULONG uGuardSize = SIZEOF_DEFAULT_STACK_GUARANTEE;
6755 int EXTRA_PAGES = 0;
6757 // Free Build EH Stack Stats:
6758 // --------------------------------
6759 // currently the maximum stack usage we'll face while handling a SO includes:
6760 // 4.3k for the OS (kernel32!RaiseException, Rtl EH dispatch code, RtlUnwindEx [second pass])
6761 // 1.2k for the CLR EH setup (NakedThrowHelper*)
6762 // 4.5k for other heavy CLR stack creations (2x CONTEXT, 1x REGDISPLAY)
6763 // ~1.0k for other misc CLR stack allocations
6765 // 11.0k --> ~2.75 pages for CLR SO EH dispatch
6767 // -plus we might need some more for debugger EH dispatch, Watson, etc...
6768 // -also need to take into account that we can lose up to 1 page of the guard region
6769 // -additionally, we need to provide some region to hosts to allow for lock aquisition in a hosted scenario
6772 INDEBUG(EXTRA_PAGES += 1);
6774 int ThreadGuardPages = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ThreadGuardPages);
6775 if (ThreadGuardPages == 0)
6777 uGuardSize += (EXTRA_PAGES * GetOsPageSize());
6781 uGuardSize += (ThreadGuardPages * GetOsPageSize());
6786 uGuardSize += (1 * GetOsPageSize()); // one extra page for debug infrastructure
6790 LOG((LF_EH, LL_INFO10000, "STACKOVERFLOW: setting thread stack guarantee to 0x%x\n", uGuardSize));
6792 if (!::SetThreadStackGuarantee(&uGuardSize))
6794 return HRESULT_FROM_GetLastErrorNA();
6798 #endif // !FEATURE_PAL
6805 * GetLastNormalStackAddress
6807 * GetLastNormalStackAddress returns the last stack address before the guard
6808 * region of a thread. This is the last address that one could write to before
6809 * a stack overflow occurs.
6812 * StackLimit - the base of the stack allocation
6815 * Address of the first page of the guard region.
6817 UINT_PTR Thread::GetLastNormalStackAddress(UINT_PTR StackLimit)
6827 UINT_PTR cbStackGuarantee = GetStackGuarantee();
6829 // Here we take the "hard guard region size", the "stack guarantee" and the "fault page" and add them
6830 // all together. Note that the "fault page" is the reason for the extra GetOsPageSize() below. The OS
6831 // will guarantee us a certain amount of stack remaining after a stack overflow. This is called the
6832 // "stack guarantee". But to do this, it has to fault on the page before that region as the app is
6833 // allowed to fault at the very end of that page. So, as a result, the last normal stack address is
6835 return StackLimit + (cbStackGuarantee
6838 #endif // !FEATURE_PAL
6839 + HARD_GUARD_REGION_SIZE);
6844 static void DebugLogMBIFlags(UINT uState, UINT uProtect)
6856 #define LOG_FLAG(flags, name) \
6859 LOG((LF_EH, LL_INFO1000, "" #name " ")); \
6864 LOG((LF_EH, LL_INFO1000, "State: "));
6866 LOG_FLAG(uState, MEM_COMMIT);
6867 LOG_FLAG(uState, MEM_RESERVE);
6868 LOG_FLAG(uState, MEM_DECOMMIT);
6869 LOG_FLAG(uState, MEM_RELEASE);
6870 LOG_FLAG(uState, MEM_FREE);
6871 LOG_FLAG(uState, MEM_PRIVATE);
6872 LOG_FLAG(uState, MEM_MAPPED);
6873 LOG_FLAG(uState, MEM_RESET);
6874 LOG_FLAG(uState, MEM_TOP_DOWN);
6875 LOG_FLAG(uState, MEM_WRITE_WATCH);
6876 LOG_FLAG(uState, MEM_PHYSICAL);
6877 LOG_FLAG(uState, MEM_LARGE_PAGES);
6878 LOG_FLAG(uState, MEM_4MB_PAGES);
6883 LOG((LF_EH, LL_INFO1000, "Protect: "));
6885 LOG_FLAG(uProtect, PAGE_NOACCESS);
6886 LOG_FLAG(uProtect, PAGE_READONLY);
6887 LOG_FLAG(uProtect, PAGE_READWRITE);
6888 LOG_FLAG(uProtect, PAGE_WRITECOPY);
6889 LOG_FLAG(uProtect, PAGE_EXECUTE);
6890 LOG_FLAG(uProtect, PAGE_EXECUTE_READ);
6891 LOG_FLAG(uProtect, PAGE_EXECUTE_READWRITE);
6892 LOG_FLAG(uProtect, PAGE_EXECUTE_WRITECOPY);
6893 LOG_FLAG(uProtect, PAGE_GUARD);
6894 LOG_FLAG(uProtect, PAGE_NOCACHE);
6895 LOG_FLAG(uProtect, PAGE_WRITECOMBINE);
6899 #endif // !FEATURE_PAL
6903 static void DebugLogStackRegionMBIs(UINT_PTR uLowAddress, UINT_PTR uHighAddress)
6914 MEMORY_BASIC_INFORMATION meminfo;
6915 UINT_PTR uStartOfThisRegion = uLowAddress;
6917 LOG((LF_EH, LL_INFO1000, "----------------------------------------------------------------------\n"));
6919 while (uStartOfThisRegion < uHighAddress)
6921 SIZE_T res = ClrVirtualQuery((const void *)uStartOfThisRegion, &meminfo, sizeof(meminfo));
6923 if (sizeof(meminfo) != res)
6925 LOG((LF_EH, LL_INFO1000, "VirtualQuery failed on %p\n", uStartOfThisRegion));
6929 UINT_PTR uStartOfNextRegion = uStartOfThisRegion + meminfo.RegionSize;
6931 if (uStartOfNextRegion > uHighAddress)
6933 uStartOfNextRegion = uHighAddress;
6936 UINT_PTR uRegionSize = uStartOfNextRegion - uStartOfThisRegion;
6938 LOG((LF_EH, LL_INFO1000, "0x%p -> 0x%p (%d pg) ", uStartOfThisRegion, uStartOfNextRegion - 1, uRegionSize / GetOsPageSize()));
6939 DebugLogMBIFlags(meminfo.State, meminfo.Protect);
6940 LOG((LF_EH, LL_INFO1000, "\n"));
6942 uStartOfThisRegion = uStartOfNextRegion;
6945 LOG((LF_EH, LL_INFO1000, "----------------------------------------------------------------------\n"));
6949 void Thread::DebugLogStackMBIs()
6960 Thread* pThread = GetThread(); // N.B. this can be NULL!
6962 UINT_PTR uStackLimit = (UINT_PTR)GetStackLowerBound();
6963 UINT_PTR uStackBase = (UINT_PTR)GetStackUpperBound();
6966 uStackLimit = (UINT_PTR)pThread->GetCachedStackLimit();
6967 uStackBase = (UINT_PTR)pThread->GetCachedStackBase();
6971 uStackLimit = (UINT_PTR)GetStackLowerBound();
6972 uStackBase = (UINT_PTR)GetStackUpperBound();
6974 UINT_PTR uStackSize = uStackBase - uStackLimit;
6976 LOG((LF_EH, LL_INFO1000, "----------------------------------------------------------------------\n"));
6977 LOG((LF_EH, LL_INFO1000, "Stack Snapshot 0x%p -> 0x%p (%d pg)\n", uStackLimit, uStackBase, uStackSize / GetOsPageSize()));
6980 LOG((LF_EH, LL_INFO1000, "Last normal addr: 0x%p\n", pThread->GetLastNormalStackAddress()));
6983 DebugLogStackRegionMBIs(uStackLimit, uStackBase);
6990 // Determines if the stack pointer is beyond the stack limit, in which case
6991 // we can assume we've taken a hard SO.
6995 // Returns: bool indicating if SP is beyond the limit or not
6997 BOOL Thread::IsSPBeyondLimit()
6999 WRAPPER_NO_CONTRACT;
7001 // Reset the stack limits if necessary.
7002 // @todo . Add a vectored handler for X86 so that we reset the stack limits
7003 // there, as anything that supports SetThreadStackGuarantee will support vectored handlers.
7004 // Then we can always assume during EH processing that our stack limits are good and we
7005 // don't have to call ResetStackLimits.
7007 char *approxSP = (char *)GetCurrentSP();
7008 if (approxSP < (char *)(GetLastAllowableStackAddress()))
7015 __declspec(noinline) void AllocateSomeStack(){
7016 LIMITED_METHOD_CONTRACT;
7018 const size_t size = 0x200;
7019 #else //_TARGET_X86_
7020 const size_t size = 0x400;
7021 #endif //_TARGET_X86_
7023 INT8* mem = (INT8*)_alloca(size);
7024 // Actually touch the memory we just allocated so the compiler can't
7025 // optimize it away completely.
7026 // NOTE: this assumes the stack grows down (towards 0).
7027 VolatileStore<INT8>(mem, 0);
7034 * Commit the thread's entire stack. A thread's stack is usually only reserved memory, not committed. The OS will
7035 * commit more pages as the thread's stack grows. But, if the system is low on memory and disk space, its possible
7036 * that the OS will not have enough memory to grow the stack. That causes a stack overflow exception at very random
7037 * times, and the CLR can't handle that.
7040 * The Thread object for this thread, if there is one. NULL otherwise.
7043 * TRUE if the function succeeded, FALSE otherwise.
7046 BOOL Thread::CommitThreadStack(Thread* pThreadOptional)
7054 // static // private
7055 BOOL Thread::DoesRegionContainGuardPage(UINT_PTR uLowAddress, UINT_PTR uHighAddress)
7067 MEMORY_BASIC_INFORMATION meminfo;
7068 UINT_PTR uStartOfCurrentRegion = uLowAddress;
7070 while (uStartOfCurrentRegion < uHighAddress)
7073 // This code can run below YieldTask, which means that it must not call back into the host.
7074 // The reason is that YieldTask is invoked by the host, and the host needs not be reentrant.
7075 dwRes = VirtualQuery((const void *)uStartOfCurrentRegion, &meminfo, sizeof(meminfo));
7076 #define VirtualQuery(lpAddress, lpBuffer, dwLength) Dont_Use_VirtualQuery(lpAddress, lpBuffer, dwLength)
7078 // If the query fails then assume we have no guard page.
7079 if (sizeof(meminfo) != dwRes)
7084 if (meminfo.Protect & PAGE_GUARD)
7089 uStartOfCurrentRegion += meminfo.RegionSize;
7095 #endif // !FEATURE_PAL
7098 * DetermineIfGuardPagePresent
7100 * DetermineIfGuardPagePresent returns TRUE if the thread's stack contains a proper guard page. This function makes
7101 * a physical check of the stack, rather than relying on whether or not the CLR is currently processing a stack
7102 * overflow exception.
7104 * It seems reasonable to want to check just the 3rd page for !MEM_COMMIT or PAGE_GUARD, but that's no good in a
7105 * world where a) one can extend the guard region arbitrarily with SetThreadStackGuarantee(), b) a thread's stack
7106 * could be pre-committed, and c) another lib might reset the guard page very high up on the stack, much as we
7107 * do. In that world, we have to do VirtualQuery from the lower bound up until we find a region with PAGE_GUARD on
7108 * it. If we've never SO'd, then that's two calls to VirtualQuery.
7114 * TRUE if the thread has a guard page, FALSE otherwise.
7116 BOOL Thread::DetermineIfGuardPagePresent()
7128 BOOL bStackGuarded = FALSE;
7129 UINT_PTR uStackBase = (UINT_PTR)GetCachedStackBase();
7130 UINT_PTR uStackLimit = (UINT_PTR)GetCachedStackLimit();
7132 // Note: we start our queries after the hard guard page (one page up from the base of the stack.) We know the
7133 // very last region of the stack is never the guard page (its always the uncomitted "hard" guard page) so there's
7134 // no need to waste a query on it.
7135 bStackGuarded = DoesRegionContainGuardPage(uStackLimit + HARD_GUARD_REGION_SIZE,
7138 LOG((LF_EH, LL_INFO10000, "Thread::DetermineIfGuardPagePresent: stack guard page: %s\n", bStackGuarded ? "PRESENT" : "MISSING"));
7140 return bStackGuarded;
7141 #else // !FEATURE_PAL
7143 #endif // !FEATURE_PAL
7147 * GetLastNormalStackAddress
7149 * GetLastNormalStackAddress returns the last stack address before the guard
7150 * region of this thread. This is the last address that one could write to
7151 * before a stack overflow occurs.
7157 * Address of the first page of the guard region.
7159 UINT_PTR Thread::GetLastNormalStackAddress()
7161 WRAPPER_NO_CONTRACT;
7163 return GetLastNormalStackAddress((UINT_PTR)m_CacheStackLimit);
7167 #ifdef FEATURE_STACK_PROBE
7171 * Given a target stack pointer, this function will tell us whether or not we could restore the guard page if we
7172 * unwound the stack that far.
7175 * stackPointer -- stack pointer that we want to try to reset the thread's stack up to.
7178 * TRUE if there's enough room to reset the stack, false otherwise.
7180 BOOL Thread::CanResetStackTo(LPCVOID stackPointer)
7190 // How much space between the given stack pointer and the first guard page?
7192 // This must be signed since the stack pointer might be in the guard region,
7193 // which is at a lower address than GetLastNormalStackAddress will return.
7194 INT_PTR iStackSpaceLeft = (INT_PTR)stackPointer - GetLastNormalStackAddress();
7196 // We need to have enough space to call back into the EE from the handler, so we use the twice the entry point amount.
7197 // We need enough to do work and enough that partway through that work we won't probe and COMPlusThrowSO.
7199 const INT_PTR iStackSizeThreshold = (ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT * 2) * GetOsPageSize());
7201 if (iStackSpaceLeft > iStackSizeThreshold)
7212 * IsStackSpaceAvailable
7214 * Given a number of stack pages, this function will tell us whether or not we have that much space
7215 * before the top of the stack. If we are in the guard region we must be already handling an SO,
7216 * so we report how much space is left in the guard region
7219 * numPages -- the number of pages that we need. This can be a fractional amount.
7222 * TRUE if there's that many pages of stack available
7224 BOOL Thread::IsStackSpaceAvailable(float numPages)
7234 // How much space between the current stack pointer and the first guard page?
7236 // This must be signed since the stack pointer might be in the guard region,
7237 // which is at a lower address than GetLastNormalStackAddress will return.
7238 float iStackSpaceLeft = static_cast<float>((INT_PTR)GetCurrentSP() - (INT_PTR)GetLastNormalStackAddress());
7240 // If we have access to the stack guarantee (either in the guard region or we've tripped the guard page), then
7242 if ((iStackSpaceLeft/GetOsPageSize()) < numPages && !DetermineIfGuardPagePresent())
7244 UINT_PTR stackGuarantee = GetStackGuarantee();
7245 // GetLastNormalStackAddress actually returns the 2nd to last stack page on the stack. We'll add that to our available
7246 // amount of stack, in addition to any sort of stack guarantee we might have.
7248 // All these values are OS supplied, and will never overflow. (If they do, that means the stack is on the order
7249 // over GB, which isn't possible.
7250 iStackSpaceLeft += stackGuarantee + GetOsPageSize();
7252 if ((iStackSpaceLeft/GetOsPageSize()) < numPages)
7260 #endif // FEATURE_STACK_PROBE
7265 * Returns the amount of stack guaranteed after an SO but before the OS rips the process.
7271 * The stack guarantee in OS pages.
7273 UINT_PTR Thread::GetStackGuarantee()
7275 WRAPPER_NO_CONTRACT;
7278 // There is a new API available on new OS's called SetThreadStackGuarantee. It allows you to change the size of
7279 // the guard region on a per-thread basis. If we're running on an OS that supports the API, then we must query
7280 // it to see if someone has changed the size of the guard region for this thread.
7281 if (!IsSetThreadStackGuaranteeInUse())
7283 return SIZEOF_DEFAULT_STACK_GUARANTEE;
7286 ULONG cbNewStackGuarantee = 0;
7287 // Passing in a value of 0 means that we're querying, and the value is changed with the new guard region
7289 if (::SetThreadStackGuarantee(&cbNewStackGuarantee) &&
7290 (cbNewStackGuarantee != 0))
7292 return cbNewStackGuarantee;
7294 #endif // FEATURE_PAL
7296 return SIZEOF_DEFAULT_STACK_GUARANTEE;
7304 // Given a page base address, try to turn it into a guard page and then requery to determine success.
7306 // static // private
7307 BOOL Thread::MarkPageAsGuard(UINT_PTR uGuardPageBase)
7320 ClrVirtualProtect((LPVOID)uGuardPageBase, 1,
7321 (PAGE_READWRITE | PAGE_GUARD), &flOldProtect);
7323 // Intentionally ignore return value -- if it failed, we'll find out below
7324 // and keep moving up the stack until we either succeed or we hit the guard
7325 // region. If we don't succeed before we hit the guard region, we'll end up
7326 // with a fatal error.
7328 // Now, make sure the guard page is really there. If its not, then VirtualProtect most likely failed
7329 // because our stack had grown onto the page we were trying to protect by the time we made it into
7330 // VirtualProtect. So try the next page down.
7331 MEMORY_BASIC_INFORMATION meminfo;
7334 dwRes = ClrVirtualQuery((const void *)uGuardPageBase, &meminfo, sizeof(meminfo));
7336 return ((sizeof(meminfo) == dwRes) && (meminfo.Protect & PAGE_GUARD));
7343 * RestoreGuardPage will replace the guard page on this thread's stack. The assumption is that it was removed by
7344 * the OS due to a stack overflow exception. This function requires that you know that you have enough stack space
7345 * to restore the guard page, so make sure you know what you're doing when you decide to call this.
7353 VOID Thread::RestoreGuardPage()
7364 // Need a hard SO probe here.
7365 CONTRACT_VIOLATION(SOToleranceViolation);
7367 BOOL bStackGuarded = DetermineIfGuardPagePresent();
7369 // If the guard page is still there, then just return.
7372 LOG((LF_EH, LL_INFO100, "Thread::RestoreGuardPage: no need to restore... guard page is already there.\n"));
7376 UINT_PTR approxStackPointer;
7377 UINT_PTR guardPageBase;
7378 UINT_PTR guardRegionThreshold;
7383 // The normal guard page is the 3rd page from the base. The first page is the "hard" guard, the second one is
7384 // reserve, and the 3rd one is marked as a guard page. However, since there is now an API (on some platforms)
7385 // to change the size of the guard region, we'll just go ahead and protect the next page down from where we are
7386 // now. The guard page will get pushed forward again, just like normal, until the next stack overflow.
7387 approxStackPointer = (UINT_PTR)GetCurrentSP();
7388 guardPageBase = (UINT_PTR)ALIGN_DOWN(approxStackPointer, GetOsPageSize()) - GetOsPageSize();
7390 // OS uses soft guard page to update the stack info in TEB. If our guard page is not beyond the current stack, the TEB
7391 // will not be updated, and then OS's check of stack during exception will fail.
7392 if (approxStackPointer >= guardPageBase)
7394 guardPageBase -= GetOsPageSize();
7396 // If we're currently "too close" to the page we want to mark as a guard then the call to VirtualProtect to set
7397 // PAGE_GUARD will fail, but it won't return an error. Therefore, we protect the page, then query it to make
7398 // sure it worked. If it didn't, we try the next page down. We'll either find a page to protect, or run into
7399 // the guard region and rip the process down with EEPOLICY_HANDLE_FATAL_ERROR below.
7400 guardRegionThreshold = GetLastNormalStackAddress();
7405 LOG((LF_EH, LL_INFO10000,
7406 "Thread::RestoreGuardPage: restoring guard page @ 0x%p, approxStackPointer=0x%p, "
7407 "last normal stack address=0x%p\n",
7408 guardPageBase, approxStackPointer, guardRegionThreshold));
7410 // Make sure we set the guard page above the guard region.
7411 if (guardPageBase < guardRegionThreshold)
7416 if (MarkPageAsGuard(guardPageBase))
7418 // The current GuardPage should be beyond the current SP.
7419 _ASSERTE (guardPageBase < approxStackPointer);
7420 pageMissing = FALSE;
7424 guardPageBase -= GetOsPageSize();
7430 //GetAppDomain()->EnableADUnloadWorker(EEPolicy::ADU_Rude);
7432 INDEBUG(DebugLogStackMBIs());
7437 STRESS_LOG2(LF_EH, LL_ALWAYS,
7438 "Thread::RestoreGuardPage: too close to the guard region (0x%p) to restore guard page @0x%p\n",
7439 guardRegionThreshold, guardPageBase);
7440 _ASSERTE(!"Too close to the guard page to reset it!");
7441 EEPOLICY_HANDLE_FATAL_ERROR(COR_E_STACKOVERFLOW);
7444 #endif // !FEATURE_PAL
7446 #endif // #ifndef DACCESS_COMPILE
7449 // InitRegDisplay: initializes a REGDISPLAY for a thread. If validContext
7450 // is false, pRD is filled from the current context of the thread. The
7451 // thread's current context is also filled in pctx. If validContext is true,
7452 // pctx should point to a valid context and pRD is filled from that.
7454 bool Thread::InitRegDisplay(const PREGDISPLAY pRD, PT_CONTEXT pctx, bool validContext)
7464 if (GetFilterContext()!= NULL)
7466 pctx = GetFilterContext();
7470 #ifdef DACCESS_COMPILE
7473 pctx->ContextFlags = CONTEXT_FULL;
7475 _ASSERTE(this != GetThread()); // do not call GetThreadContext on the active thread
7477 BOOL ret = EEGetThreadContext(this, pctx);
7482 pRD->ControlPC = pctx->Eip;
7483 pRD->PCTAddr = (TADDR)&(pctx->Eip);
7484 #elif defined(_TARGET_AMD64_)
7485 // nothing more to do here, on Win64 setting the IP to 0 is enough.
7486 #elif defined(_TARGET_ARM_)
7487 // nothing more to do here, on Win64 setting the IP to 0 is enough.
7489 PORTABILITY_ASSERT("NYI for platform Thread::InitRegDisplay");
7494 #endif // DACCESS_COMPILE
7498 FillRegDisplay( pRD, pctx );
7504 void Thread::FillRegDisplay(const PREGDISPLAY pRD, PT_CONTEXT pctx)
7506 WRAPPER_NO_CONTRACT;
7509 ::FillRegDisplay(pRD, pctx);
7511 #if defined(DEBUG_REGDISPLAY) && !defined(_TARGET_X86_)
7512 CONSISTENCY_CHECK(!pRD->_pThread || pRD->_pThread == this);
7513 pRD->_pThread = this;
7515 CheckRegDisplaySP(pRD);
7516 #endif // defined(DEBUG_REGDISPLAY) && !defined(_TARGET_X86_)
7520 #ifdef DEBUG_REGDISPLAY
7522 void CheckRegDisplaySP (REGDISPLAY *pRD)
7524 if (pRD->SP && pRD->_pThread)
7526 #ifndef NO_FIXED_STACK_LIMIT
7527 _ASSERTE(PTR_VOID(pRD->SP) >= pRD->_pThread->GetCachedStackLimit());
7528 #endif // NO_FIXED_STACK_LIMIT
7529 _ASSERTE(PTR_VOID(pRD->SP) < pRD->_pThread->GetCachedStackBase());
7533 #endif // DEBUG_REGDISPLAY
7537 // When a thread reaches a safe place, it will rendezvous back with us, via one of
7538 // the following trip functions:
7540 void CommonTripThread()
7542 #ifndef DACCESS_COMPILE
7549 Thread *thread = GetThread();
7551 thread->HandleThreadAbort ();
7553 if (thread->CatchAtSafePoint())
7555 _ASSERTE(!ThreadStore::HoldingThreadStore(thread));
7556 #ifdef FEATURE_HIJACK
7557 thread->UnhijackThread();
7558 #endif // FEATURE_HIJACK
7561 thread->PulseGCMode();
7565 #endif // #ifndef DACCESS_COMPILE
7568 #ifndef DACCESS_COMPILE
7570 void Thread::SetFilterContext(CONTEXT *pContext)
7572 // SetFilterContext is like pushing a Frame onto the Frame chain.
7576 MODE_COOPERATIVE; // Absolutely must be in coop to coordinate w/ Runtime suspension.
7577 PRECONDITION(GetThread() == this); // must be on current thread.
7580 m_debuggerFilterContext = pContext;
7583 #endif // #ifndef DACCESS_COMPILE
7585 T_CONTEXT *Thread::GetFilterContext(void)
7587 LIMITED_METHOD_DAC_CONTRACT;
7589 return m_debuggerFilterContext;
7592 #ifndef DACCESS_COMPILE
7594 // @todo - eventually complete remove the CantStop count on the thread and use
7595 // the one in the PreDef block. For now, we increment both our thread counter,
7596 // and the FLS counter. Eventually we can remove our thread counter and only use
7598 void Thread::SetDebugCantStop(bool fCantStop)
7600 LIMITED_METHOD_CONTRACT;
7605 m_debuggerCantStop++;
7610 m_debuggerCantStop--;
7614 // @todo - remove this, we only read this from oop.
7615 bool Thread::GetDebugCantStop(void)
7617 LIMITED_METHOD_CONTRACT;
7619 return m_debuggerCantStop != 0;
7623 //-----------------------------------------------------------------------------
7624 // Call w/a wrapper.
7625 // We've already transitioned AppDomains here. This just places a 1st-pass filter to sniff
7626 // for catch-handler found callbacks for the debugger.
7627 //-----------------------------------------------------------------------------
7628 void MakeADCallDebuggerWrapper(
7629 FPAPPDOMAINCALLBACK fpCallback,
7630 CtxTransitionBaseArgs * args,
7631 ContextTransitionFrame* pFrame)
7633 STATIC_CONTRACT_THROWS;
7634 STATIC_CONTRACT_GC_TRIGGERS;
7635 STATIC_CONTRACT_MODE_ANY;
7637 BYTE * pCatcherStackAddr = (BYTE*) pFrame;
7639 struct Param : NotifyOfCHFFilterWrapperParam
7641 FPAPPDOMAINCALLBACK fpCallback;
7642 CtxTransitionBaseArgs *args;
7644 param.pFrame = pCatcherStackAddr;
7645 param.fpCallback = fpCallback;
7648 PAL_TRY(Param *, pParam, ¶m)
7650 pParam->fpCallback(pParam->args);
7652 PAL_EXCEPT_FILTER(AppDomainTransitionExceptionFilter)
7654 // Should never reach here b/c handler should always continue search.
7661 // Invoke a callback in another appdomain.
7662 // Caller should have checked that we're actually transitioning domains here.
7663 void MakeCallWithAppDomainTransition(
7665 FPAPPDOMAINCALLBACK fpCallback,
7666 CtxTransitionBaseArgs * args)
7668 DEBUG_ASSURE_NO_RETURN_BEGIN(MAKECALL)
7670 Thread* _ctx_trans_pThread = GetThread();
7671 TESTHOOKCALL(EnteringAppDomain((TargetDomain.m_dwId)));
7672 AppDomainFromIDHolder pTargetDomain(TargetDomain, TRUE);
7673 pTargetDomain.ThrowIfUnloaded();
7674 _ASSERTE(_ctx_trans_pThread != NULL);
7675 _ASSERTE(_ctx_trans_pThread->GetDomain()->GetId()!= TargetDomain);
7677 bool _ctx_trans_fRaiseNeeded = false;
7678 Exception* _ctx_trans_pTargetDomainException=NULL; \
7680 FrameWithCookie<ContextTransitionFrame> _ctx_trans_Frame;
7681 ContextTransitionFrame* _ctx_trans_pFrame = &_ctx_trans_Frame;
7683 _ctx_trans_pThread->EnterContextRestricted(
7684 pTargetDomain->GetDefaultContext(),
7687 pTargetDomain.Release();
7688 args->pCtxFrame = _ctx_trans_pFrame;
7689 TESTHOOKCALL(EnteredAppDomain((TargetDomain.m_dwId)));
7690 /* work around unreachable code warning */
7693 // Invoke the callback
7694 if (CORDebuggerAttached())
7696 // If a debugger is attached, do it through a wrapper that will sniff for CHF callbacks.
7697 MakeADCallDebuggerWrapper(fpCallback, args, GET_CTX_TRANSITION_FRAME());
7701 // If no debugger is attached, call directly.
7707 LOG((LF_EH|LF_APPDOMAIN, LL_INFO1000, "ENTER_DOMAIN(%s, %s, %d): exception in flight\n",
7708 __FUNCTION__, __FILE__, __LINE__));
7710 _ctx_trans_pTargetDomainException=EXTRACT_EXCEPTION();
7711 _ctx_trans_fRaiseNeeded = true;
7713 /* SwallowAllExceptions is fine because we don't get to this point */
7714 /* unless fRaiseNeeded = true or no exception was thrown */
7715 EX_END_CATCH(SwallowAllExceptions);
7716 TESTHOOKCALL(LeavingAppDomain((TargetDomain.m_dwId)));
7717 if (_ctx_trans_fRaiseNeeded)
7719 LOG((LF_EH, LL_INFO1000, "RaiseCrossContextException(%s, %s, %d)\n",
7720 __FUNCTION__, __FILE__, __LINE__));
7721 _ctx_trans_pThread->RaiseCrossContextException(_ctx_trans_pTargetDomainException,_ctx_trans_pFrame);
7724 LOG((LF_APPDOMAIN, LL_INFO1000, "LEAVE_DOMAIN(%s, %s, %d)\n",
7725 __FUNCTION__, __FILE__, __LINE__));
7727 _ctx_trans_pThread->ReturnToContext(_ctx_trans_pFrame);
7729 #ifdef FEATURE_TESTHOOKS
7730 TESTHOOKCALL(LeftAppDomain(TargetDomain.m_dwId));
7733 DEBUG_ASSURE_NO_RETURN_END(MAKECALL)
7738 void Thread::InitContext()
7742 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
7746 // this should only be called when initializing a thread
7747 _ASSERTE(m_Context == NULL);
7748 _ASSERTE(m_pDomain == NULL);
7749 GCX_COOP_NO_THREAD_BROKEN();
7750 m_Context = SystemDomain::System()->DefaultDomain()->GetDefaultContext();
7751 m_pDomain = m_Context->GetDomain();
7752 _ASSERTE(m_pDomain);
7753 m_pDomain->ThreadEnter(this, NULL);
7756 void Thread::ClearContext()
7760 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
7764 // if one is null, both must be
7765 _ASSERTE(m_pDomain && m_Context || ! (m_pDomain && m_Context));
7770 m_pDomain->ThreadExit(this, NULL);
7772 // must set exposed context to null first otherwise object verification
7773 // checks will fail AV when m_Context is null
7775 #ifdef FEATURE_COMINTEROP
7776 m_fDisableComObjectEagerCleanup = false;
7777 #endif //FEATURE_COMINTEROP
7782 void Thread::DoContextCallBack(ADID appDomain, Context *pContext, Context::ADCallBackFcnType pTarget, LPVOID args)
7784 //Do not deference pContext if it's not from the current appdomain
7787 TADDR espVal = (TADDR)GetCurrentSP();
7789 LOG((LF_APPDOMAIN, LL_INFO100, "Thread::DoADCallBack Calling %p at esp %p in [%d]\n",
7790 pTarget, espVal, appDomain.m_dwId));
7792 _ASSERTE(GetThread()->GetContext() != pContext);
7793 Thread* pThread = GetThread();
7795 // Get the default context for the current domain as well as for the
7796 // destination domain.
7797 AppDomain* pCurrDomain = pThread->GetContext()->GetDomain();
7798 Context* pCurrDefCtx = pCurrDomain->GetDefaultContext();
7799 BOOL bDefaultTargetCtx=FALSE;
7802 AppDomainFromIDHolder ad(appDomain, TRUE);
7803 ad.ThrowIfUnloaded();
7804 bDefaultTargetCtx=(ad->GetDefaultContext()==pContext);
7807 if (pCurrDefCtx == pThread->GetContext() && bDefaultTargetCtx)
7809 ENTER_DOMAIN_ID(appDomain);
7811 END_DOMAIN_TRANSITION;
7817 LOG((LF_APPDOMAIN, LL_INFO100, "Thread::DoADCallBack Done at esp %p\n", espVal));
7821 void Thread::DoADCallBack(AppDomain* pDomain , Context::ADCallBackFcnType pTarget, LPVOID args, DWORD dwADV,
7822 BOOL fSetupEHAtTransition /* = TRUE */)
7827 TADDR espVal = (TADDR)GetCurrentSP();
7829 LOG((LF_APPDOMAIN, LL_INFO100, "Thread::DoADCallBack Calling %p at esp %p in [%d]\n",
7830 pTarget, espVal, pDomain->GetId().m_dwId));
7832 Thread* pThread = GetThread();
7834 // Get the default context for the current domain as well as for the
7835 // destination domain.
7836 AppDomain* pCurrDomain = pThread->GetContext()->GetDomain();
7838 if (pCurrDomain!=pDomain)
7840 // use the target domain's default context as the target context
7841 // so that the actual call to a transparent proxy would enter the object into the correct context.
7843 BOOL fThrow = FALSE;
7846 // FEATURE_PAL must setup EH at AD transition - the option to omit the setup
7847 // is only for regular Windows builds.
7848 _ASSERTE(fSetupEHAtTransition);
7849 #endif // FEATURE_PAL
7851 LOG((LF_APPDOMAIN, LL_INFO10, "Thread::DoADCallBack - performing AD transition with%s EH at transition boundary.\n",
7852 (fSetupEHAtTransition == FALSE)?"out":""));
7854 if (fSetupEHAtTransition)
7856 ENTER_DOMAIN_PTR(pDomain,dwADV)
7860 // unloadBoundary is cleared by ReturnToContext, so get it now.
7861 Frame* unloadBoundaryFrame = pThread->GetUnloadBoundaryFrame();
7862 fThrow = pThread->ShouldChangeAbortToUnload(GET_CTX_TRANSITION_FRAME(), unloadBoundaryFrame);
7864 END_DOMAIN_TRANSITION;
7869 ENTER_DOMAIN_PTR_NO_EH_AT_TRANSITION(pDomain,dwADV)
7873 // unloadBoundary is cleared by ReturnToContext, so get it now.
7874 Frame* unloadBoundaryFrame = pThread->GetUnloadBoundaryFrame();
7875 fThrow = pThread->ShouldChangeAbortToUnload(GET_CTX_TRANSITION_FRAME(), unloadBoundaryFrame);
7877 END_DOMAIN_TRANSITION_NO_EH_AT_TRANSITION;
7879 #endif // !FEATURE_PAL
7881 // if someone caught the abort before it got back out to the AD transition (like DispatchEx_xxx does)
7882 // then need to turn the abort into an unload, as they're gonna keep seeing it anyway
7885 LOG((LF_APPDOMAIN, LL_INFO10, "Thread::DoADCallBack turning abort into unload\n"));
7886 COMPlusThrow(kAppDomainUnloadedException, W("Remoting_AppDomainUnloaded_ThreadUnwound"));
7893 LOG((LF_APPDOMAIN, LL_INFO100, "Thread::DoADCallBack Done at esp %p\n", espVal));
7896 void Thread::DoADCallBack(ADID appDomainID , Context::ADCallBackFcnType pTarget, LPVOID args, BOOL fSetupEHAtTransition /* = TRUE */)
7901 TADDR espVal = (TADDR)GetCurrentSP();
7903 LOG((LF_APPDOMAIN, LL_INFO100, "Thread::DoADCallBack Calling %p at esp %p in [%d]\n",
7904 pTarget, espVal, appDomainID.m_dwId));
7906 Thread* pThread = GetThread();
7908 // Get the default context for the current domain as well as for the
7909 // destination domain.
7910 AppDomain* pCurrDomain = pThread->GetContext()->GetDomain();
7912 if (pCurrDomain->GetId()!=appDomainID)
7914 // use the target domain's default context as the target context
7915 // so that the actual call to a transparent proxy would enter the object into the correct context.
7917 BOOL fThrow = FALSE;
7920 // FEATURE_PAL must setup EH at AD transition - the option to omit the setup
7921 // is only for regular Windows builds.
7922 _ASSERTE(fSetupEHAtTransition);
7923 #endif // FEATURE_PAL
7925 LOG((LF_APPDOMAIN, LL_INFO10, "Thread::DoADCallBack - performing AD transition with%s EH at transition boundary.\n",
7926 (fSetupEHAtTransition == FALSE)?"out":""));
7928 if (fSetupEHAtTransition)
7930 ENTER_DOMAIN_ID(appDomainID)
7934 // unloadBoundary is cleared by ReturnToContext, so get it now.
7935 Frame* unloadBoundaryFrame = pThread->GetUnloadBoundaryFrame();
7936 fThrow = pThread->ShouldChangeAbortToUnload(GET_CTX_TRANSITION_FRAME(), unloadBoundaryFrame);
7938 END_DOMAIN_TRANSITION;
7943 ENTER_DOMAIN_ID_NO_EH_AT_TRANSITION(appDomainID)
7947 // unloadBoundary is cleared by ReturnToContext, so get it now.
7948 Frame* unloadBoundaryFrame = pThread->GetUnloadBoundaryFrame();
7949 fThrow = pThread->ShouldChangeAbortToUnload(GET_CTX_TRANSITION_FRAME(), unloadBoundaryFrame);
7951 END_DOMAIN_TRANSITION_NO_EH_AT_TRANSITION;
7953 #endif // !FEATURE_PAL
7955 // if someone caught the abort before it got back out to the AD transition (like DispatchEx_xxx does)
7956 // then need to turn the abort into an unload, as they're gonna keep seeing it anyway
7959 LOG((LF_APPDOMAIN, LL_INFO10, "Thread::DoADCallBack turning abort into unload\n"));
7960 COMPlusThrow(kAppDomainUnloadedException, W("Remoting_AppDomainUnloaded_ThreadUnwound"));
7967 LOG((LF_APPDOMAIN, LL_INFO100, "Thread::DoADCallBack Done at esp %p\n", espVal));
7970 void Thread::EnterContextRestricted(Context *pContext, ContextTransitionFrame *pFrame)
7979 _ASSERTE(GetThread() == this);
7980 _ASSERTE(pContext); // should never enter a null context
7981 _ASSERTE(m_Context); // should always have a current context
7983 AppDomain *pPrevDomain = m_pDomain;
7984 AppDomain *pDomain = pContext->GetDomain();
7985 // and it should always have an AD set
7988 if (m_pDomain != pDomain && !pDomain->CanThreadEnter(this))
7990 pFrame->SetReturnContext(NULL);
7991 COMPlusThrow(kAppDomainUnloadedException);
7994 pFrame->SetReturnContext(m_Context);
7995 pFrame->SetReturnExecutionContext(NULL);
7997 if (pPrevDomain != pDomain)
7999 pFrame->SetLockCount(m_dwBeginLockCount);
8000 m_dwBeginLockCount = m_dwLockCount;
8003 if (m_Context == pContext) {
8004 _ASSERTE(m_Context->GetDomain() == pContext->GetDomain());
8008 LOG((LF_APPDOMAIN, LL_INFO1000, "%sThread::EnterContext from (%p) [%d] (count %d)\n",
8009 FinalizerThread::IsCurrentThreadFinalizer() ? "FT: " : "",
8010 m_Context, m_Context->GetDomain()->GetId().m_dwId,
8011 m_Context->GetDomain()->GetThreadEnterCount()));
8012 LOG((LF_APPDOMAIN, LL_INFO1000, " into (%p) [%d] (count %d)\n", pContext,
8013 pContext->GetDomain()->GetId().m_dwId,
8014 pContext->GetDomain()->GetThreadEnterCount()));
8016 #ifdef _DEBUG_ADUNLOAD
8017 printf("Thread::EnterContext %x from (%8.8x) [%d]\n", GetThreadId(), m_Context,
8018 m_Context ? m_Context->GetDomain()->GetId() : -1);
8019 printf(" into (%8.8x) [%d] %S\n", pContext,
8020 pContext->GetDomain()->GetId());
8023 CantStopHolder hCantStop;
8025 bool fChangedDomains = m_pDomain != pDomain;
8026 if (fChangedDomains)
8029 #ifdef FEATURE_STACK_PROBE
8030 if (pDomain == SystemDomain::System()->DefaultDomain() &&
8031 GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeUnloadAppDomain)
8033 // Make sure default domain does not see SO.
8034 // probe for our entry point amount and throw if not enough stack
8035 RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT*2), this);
8041 STRESS_LOG1(LF_APPDOMAIN, LL_INFO100000, "Entering into ADID=%d\n", pDomain->GetId().m_dwId);
8045 // Store the last thrown object in the ContextTransitionFrame before we null it out
8046 // to prevent it from leaking into the domain we are transitionning into.
8049 pFrame->SetLastThrownObjectInParentContext(LastThrownObject());
8050 SafeSetLastThrownObject(NULL);
8053 m_Context = pContext;
8056 #ifdef _DEBUG_ADUNLOAD
8057 printf("Thread::EnterContext %x,%8.8x push? %d current frame is %8.8x\n", GetThreadId(), this, 1, GetFrame());
8060 if (fChangedDomains)
8062 pDomain->ThreadEnter(this, pFrame);
8064 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
8067 // Update previous AppDomain's count of processor usage by threads executing within it.
8068 pPrevDomain->UpdateProcessorUsage(QueryThreadProcessorUsage());
8069 FireEtwThreadDomainEnter((ULONGLONG)this, (ULONGLONG)pDomain, GetClrInstanceId());
8071 #endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
8073 // NULL out the Thread's pointer to the current ThreadLocalBlock. On the next
8074 // access to thread static data, the Thread's pointer to the current ThreadLocalBlock
8075 // will be updated correctly.
8076 m_pThreadLocalBlock = NULL;
8078 m_pDomain = pDomain;
8079 SetAppDomain(m_pDomain);
8083 // main difference between EnterContext and ReturnToContext is that are allowed to return
8084 // into a domain that is unloading but cannot enter a domain that is unloading
8085 void Thread::ReturnToContext(ContextTransitionFrame *pFrame)
8092 _ASSERTE(GetThread() == this);
8094 Context *pReturnContext = pFrame->GetReturnContext();
8095 _ASSERTE(pReturnContext);
8099 AppDomain *pReturnDomain = pReturnContext->GetDomain();
8100 AppDomain* pCurrentDomain = m_pDomain;
8102 bool fChangedDomains = m_pDomain != pReturnDomain;
8104 if (fChangedDomains)
8106 if (HasLockInCurrentDomain())
8108 if (GetAppDomain()->IsDefaultDomain() || // We should never orphan a lock in default domain.
8109 !IsRudeAbort()) // If rudeabort, managed backout may not be run.
8111 // One would like to assert that this case never occurs, but
8112 // a rude abort can easily leave unreachable locked objects,
8113 // which we have to allow.
8114 STRESS_LOG2(LF_SYNC, LL_INFO1000, "Locks are orphaned while exiting a domain (enter: %d, exit: %d)\n", m_dwBeginLockCount, m_dwLockCount);
8116 STRESS_LOG0 (LF_APPDOMAIN, LL_INFO10, "Thread::ReturnToContext Lock not released\n");
8120 AppDomain *pFromDomain = GetAppDomain();
8122 // There is a race when EE Thread for a new thread is allocated in the place of the old EE Thread.
8123 // The lock accounting will get confused if there are orphaned locks. Set the flag that allows us to relax few asserts.
8124 SetThreadStateNC(TSNC_UnbalancedLocks);
8125 pFromDomain->SetOrphanedLocks();
8127 if (!pFromDomain->IsDefaultDomain())
8129 // If a Thread orphaned a lock, we don't want a host to recycle the Thread object,
8130 // since the lock count is reset when the thread leaves this domain.
8131 SetThreadStateNC(TSNC_CannotRecycle);
8134 // It is a disaster if a lock leaks in default domain. We can never unload default domain.
8135 // _ASSERTE (!pFromDomain->IsDefaultDomain());
8136 EPolicyAction action = GetEEPolicy()->GetActionOnFailure(FAIL_OrphanedLock);
8139 case eUnloadAppDomain:
8140 if (!pFromDomain->IsDefaultDomain())
8142 pFromDomain->EnableADUnloadWorker(EEPolicy::ADU_Safe);
8145 case eRudeUnloadAppDomain:
8146 if (!pFromDomain->IsDefaultDomain())
8148 pFromDomain->EnableADUnloadWorker(EEPolicy::ADU_Rude);
8152 case eFastExitProcess:
8153 case eRudeExitProcess:
8154 case eDisableRuntime:
8155 GetEEPolicy()->HandleExitProcessFromEscalation(action,HOST_E_EXITPROCESS_ADUNLOAD);
8162 m_dwLockCount = m_dwBeginLockCount;
8163 m_dwBeginLockCount = pFrame->GetLockCount();
8167 if (m_Context == pReturnContext)
8169 _ASSERTE(m_Context->GetDomain() == pReturnContext->GetDomain());
8175 LOG((LF_APPDOMAIN, LL_INFO1000, "%sThread::ReturnToContext from (%p) [%d] (count %d)\n",
8176 FinalizerThread::IsCurrentThreadFinalizer() ? "FT: " : "",
8177 m_Context, m_Context->GetDomain()->GetId().m_dwId,
8178 m_Context->GetDomain()->GetThreadEnterCount()));
8179 LOG((LF_APPDOMAIN, LL_INFO1000, " into (%p) [%d] (count %d)\n", pReturnContext,
8180 pReturnContext->GetDomain()->GetId().m_dwId,
8181 pReturnContext->GetDomain()->GetThreadEnterCount()));
8183 #ifdef _DEBUG_ADUNLOAD
8184 printf("Thread::ReturnToContext %x from (%p) [%d]\n", GetThreadId(), m_Context,
8185 m_Context->GetDomain()->GetId(),
8186 printf(" into (%p) [%d]\n", pReturnContext,
8187 pReturnContext->GetDomain()->GetId(),
8188 m_Context->GetDomain()->GetThreadEnterCount());
8191 CantStopHolder hCantStop;
8193 m_Context = pReturnContext;
8195 if (fChangedDomains)
8197 STRESS_LOG2(LF_APPDOMAIN, LL_INFO100000, "Returning from %d to %d\n", pADOnStack.m_dwId, pReturnContext->GetDomain()->GetId().m_dwId);
8199 _ASSERTE(pADOnStack == m_pDomain->GetId());
8202 //_ASSERTE(!fLinkFrame || pThread->GetFrame() == pFrame);
8206 // NULL out the Thread's pointer to the current ThreadLocalBlock. On the next
8207 // access to thread static data, the Thread's pointer to the current ThreadLocalBlock
8208 // will be updated correctly.
8209 m_pThreadLocalBlock = NULL;
8211 m_pDomain = pReturnDomain;
8212 SetAppDomain(pReturnDomain);
8214 if (pFrame == m_pUnloadBoundaryFrame)
8216 m_pUnloadBoundaryFrame = NULL;
8217 if (IsAbortRequested())
8219 EEResetAbort(TAR_ADUnload);
8221 ResetBeginAbortedForADUnload();
8224 // Restore the last thrown object to what it was before the AD transition. Note that if
8225 // an exception was thrown out of the AD we transitionned into, it will be raised in
8226 // RaiseCrossContextException and the EH system will store it as the last thrown
8227 // object if it gets handled by an EX_CATCH.
8228 SafeSetLastThrownObject(pFrame->GetLastThrownObjectInParentContext());
8233 if (fChangedDomains)
8236 // Do this last so that thread is not labeled as out of the domain until all cleanup is done.
8237 ADID adid=pCurrentDomain->GetId();
8238 pCurrentDomain->ThreadExit(this, pFrame);
8240 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
8243 // Update the old AppDomain's count of processor usage by threads executing within it.
8244 pCurrentDomain->UpdateProcessorUsage(QueryThreadProcessorUsage());
8245 FireEtwThreadDomainEnter((ULONGLONG)this, (ULONGLONG)pReturnDomain, GetClrInstanceId());
8247 #endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
8250 if (fChangedDomains && IsAbortRequested() && HasLockInCurrentDomain())
8252 EPolicyAction action = GetEEPolicy()->GetActionOnFailure(FAIL_CriticalResource);
8253 // It is a disaster if a lock leaks in default domain. We can never unload default domain.
8254 // _ASSERTE (action == eThrowException || !pReturnDomain->IsDefaultDomain());
8257 case eUnloadAppDomain:
8258 if (!pReturnDomain->IsDefaultDomain())
8260 pReturnDomain->EnableADUnloadWorker(EEPolicy::ADU_Safe);
8263 case eRudeUnloadAppDomain:
8264 if (!pReturnDomain->IsDefaultDomain())
8266 pReturnDomain->EnableADUnloadWorker(EEPolicy::ADU_Rude);
8270 case eFastExitProcess:
8271 case eRudeExitProcess:
8272 case eDisableRuntime:
8273 GetEEPolicy()->HandleExitProcessFromEscalation(action,HOST_E_EXITPROCESS_ADUNLOAD);
8280 #ifdef _DEBUG_ADUNLOAD
8281 printf("Thread::ReturnToContext %x,%8.8x pop? %d current frame is %8.8x\n", GetThreadId(), this, 1, GetFrame());
8288 void Thread::ReturnToContextAndThrow(ContextTransitionFrame* pFrame, EEException* pEx, BOOL* pContextSwitched)
8294 PRECONDITION(CheckPointer(pContextSwitched));
8297 #ifdef FEATURE_TESTHOOKS
8298 ADID adid=GetAppDomain()->GetId();
8300 ReturnToContext(pFrame);
8301 *pContextSwitched=TRUE;
8302 #ifdef FEATURE_TESTHOOKS
8303 TESTHOOKCALL(LeftAppDomain(adid.m_dwId));
8306 COMPlusThrow(CLRException::GetThrowableFromException(pEx));
8309 void Thread::ReturnToContextAndOOM(ContextTransitionFrame* pFrame)
8318 #ifdef FEATURE_TESTHOOKS
8319 ADID adid=GetAppDomain()->GetId();
8322 ReturnToContext(pFrame);
8323 #ifdef FEATURE_TESTHOOKS
8324 TESTHOOKCALL(LeftAppDomain(adid.m_dwId));
8331 void DECLSPEC_NORETURN Thread::RaiseCrossContextException(Exception* pExOrig, ContextTransitionFrame* pFrame)
8336 WRAPPER(GC_TRIGGERS);
8340 // pEx is NULL means that the exception is CLRLastThrownObjectException
8341 CLRLastThrownObjectException lastThrown;
8342 Exception* pException = pExOrig ? pExOrig : &lastThrown;
8343 COMPlusThrow(CLRException::GetThrowableFromException(pException));
8347 struct FindADCallbackType {
8348 AppDomain *pSearchDomain;
8349 AppDomain *pPrevDomain;
8352 enum TargetTransition
8353 {fFirstTransitionInto, fMostRecentTransitionInto}
8356 FindADCallbackType() : pSearchDomain(NULL), pPrevDomain(NULL), pFrame(NULL)
8358 LIMITED_METHOD_CONTRACT;
8362 StackWalkAction StackWalkCallback_FindAD(CrawlFrame* pCF, void* data)
8370 FindADCallbackType *pData = (FindADCallbackType *)data;
8372 Frame *pFrame = pCF->GetFrame();
8375 return SWA_CONTINUE;
8377 AppDomain *pReturnDomain = pFrame->GetReturnDomain();
8378 if (!pReturnDomain || pReturnDomain == pData->pPrevDomain)
8379 return SWA_CONTINUE;
8381 LOG((LF_APPDOMAIN, LL_INFO100, "StackWalkCallback_FindAD transition frame %8.8x into AD [%d]\n",
8382 pFrame, pReturnDomain->GetId().m_dwId));
8384 if (pData->pPrevDomain == pData->pSearchDomain) {
8386 // this is a transition into the domain we are unloading, so save it in case it is the first
8387 pData->pFrame = pFrame;
8388 if (pData->fTargetTransition == FindADCallbackType::fMostRecentTransitionInto)
8389 return SWA_ABORT; // only need to find last transition, so bail now
8392 pData->pPrevDomain = pReturnDomain;
8393 return SWA_CONTINUE;
8396 // This determines if a thread is running in the given domain at any point on the stack
8397 Frame *Thread::IsRunningIn(AppDomain *pDomain, int *count)
8405 FindADCallbackType fct;
8406 fct.pSearchDomain = pDomain;
8407 if (!fct.pSearchDomain)
8410 // set prev to current so if are currently running in the target domain,
8411 // we will detect the transition
8412 fct.pPrevDomain = m_pDomain;
8413 fct.fTargetTransition = FindADCallbackType::fMostRecentTransitionInto;
8416 // when this returns, if there is a transition into the AD, it will be in pFirstFrame
8417 StackWalkAction res;
8418 res = StackWalkFrames(StackWalkCallback_FindAD, (void*) &fct, ALLOW_ASYNC_STACK_WALK);
8424 // This finds the very first frame on the stack where the thread transitioned into the given domain
8425 Frame *Thread::GetFirstTransitionInto(AppDomain *pDomain, int *count)
8433 FindADCallbackType fct;
8434 fct.pSearchDomain = pDomain;
8435 // set prev to current so if are currently running in the target domain,
8436 // we will detect the transition
8437 fct.pPrevDomain = m_pDomain;
8438 fct.fTargetTransition = FindADCallbackType::fFirstTransitionInto;
8441 // when this returns, if there is a transition into the AD, it will be in pFirstFrame
8442 StackWalkAction res;
8443 res = StackWalkFrames(StackWalkCallback_FindAD, (void*) &fct, ALLOW_ASYNC_STACK_WALK);
8449 // Get outermost (oldest) AppDomain for this thread (not counting the default
8450 // domain every one starts in).
8451 AppDomain *Thread::GetInitialDomain()
8459 AppDomain *pDomain = m_pDomain;
8460 AppDomain *pPrevDomain = NULL;
8461 Frame *pFrame = GetFrame();
8462 while (pFrame != FRAME_TOP)
8464 if (pFrame->GetVTablePtr() == ContextTransitionFrame::GetMethodFrameVPtr())
8467 pDomain = pPrevDomain;
8468 pPrevDomain = pFrame->GetReturnDomain();
8470 pFrame = pFrame->Next();
8475 #ifndef DACCESS_COMPILE
8476 void Thread::SetUnloadBoundaryFrame(Frame *pFrame)
8478 LIMITED_METHOD_CONTRACT;
8479 _ASSERTE((this == GetThread() && PreemptiveGCDisabled()) ||
8480 ThreadStore::HoldingThreadStore());
8481 if ((ULONG_PTR)m_pUnloadBoundaryFrame < (ULONG_PTR)pFrame)
8483 m_pUnloadBoundaryFrame = pFrame;
8487 ResetBeginAbortedForADUnload();
8491 void Thread::ResetUnloadBoundaryFrame()
8493 LIMITED_METHOD_CONTRACT;
8494 _ASSERTE(this == GetThread() && PreemptiveGCDisabled());
8495 m_pUnloadBoundaryFrame=NULL;
8496 ResetBeginAbortedForADUnload();
8501 BOOL Thread::ShouldChangeAbortToUnload(Frame *pFrame, Frame *pUnloadBoundaryFrame)
8509 if (! pUnloadBoundaryFrame)
8510 pUnloadBoundaryFrame = GetUnloadBoundaryFrame();
8512 // turn the abort request into an AD unloaded exception when go past the boundary.
8513 if (pFrame != pUnloadBoundaryFrame)
8516 // Only time have an unloadboundaryframe is when have specifically marked that thread for aborting
8517 // during unload processing, so this won't trigger UnloadedException if have simply thrown a ThreadAbort
8518 // past an AD transition frame
8519 _ASSERTE (IsAbortRequested());
8521 EEResetAbort(TAR_ADUnload);
8523 if (m_AbortType == EEPolicy::TA_None)
8533 BOOL Thread::HaveExtraWorkForFinalizer()
8535 LIMITED_METHOD_CONTRACT;
8537 return m_ThreadTasks
8538 || OverlappedDataObject::CleanupNeededFromGC()
8539 || ThreadpoolMgr::HaveTimerInfosToFlush()
8540 || ExecutionManager::IsCacheCleanupRequired()
8541 || Thread::CleanupNeededForFinalizedThread()
8542 || (m_DetachCount > 0)
8543 || AppDomain::HasWorkForFinalizerThread()
8544 || SystemDomain::System()->RequireAppDomainCleanup()
8545 || ThreadStore::s_pThreadStore->ShouldTriggerGCForDeadThreads();
8548 void Thread::DoExtraWorkForFinalizer()
8556 _ASSERTE(GetThread() == this);
8557 _ASSERTE(this == FinalizerThread::GetFinalizerThread());
8559 #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
8560 if (RequiresCoInitialize())
8562 SetApartment(AS_InMTA, FALSE);
8564 #endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
8566 if (AppDomain::HasWorkForFinalizerThread())
8568 AppDomain::ProcessUnloadDomainEventOnFinalizeThread();
8571 if (RequireSyncBlockCleanup())
8574 InteropSyncBlockInfo::FlushStandbyList();
8575 #endif // !FEATURE_PAL
8577 #ifdef FEATURE_COMINTEROP
8578 RCW::FlushStandbyList();
8579 #endif // FEATURE_COMINTEROP
8581 SyncBlockCache::GetSyncBlockCache()->CleanupSyncBlocks();
8583 if (SystemDomain::System()->RequireAppDomainCleanup())
8585 SystemDomain::System()->ProcessDelayedUnloadDomains();
8588 if(m_DetachCount > 0 || Thread::CleanupNeededForFinalizedThread())
8590 Thread::CleanupDetachedThreads();
8593 if(ExecutionManager::IsCacheCleanupRequired() && GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration()>=1)
8595 ExecutionManager::ClearCaches();
8598 OverlappedDataObject::RequestCleanupFromGC();
8600 // If there were any TimerInfos waiting to be released, they'll get flushed now
8601 ThreadpoolMgr::FlushQueueOfTimerInfos();
8603 ThreadStore::s_pThreadStore->TriggerGCForDeadThreadsIfNecessary();
8607 // HELPERS FOR THE BASE OF A MANAGED THREAD, INCLUDING AD TRANSITION SUPPORT
8609 // We have numerous places where we start up a managed thread. This includes several places in the
8610 // ThreadPool, the 'new Thread(...).Start()' case, and the Finalizer. Try to factor the code so our
8611 // base exception handling behavior is consistent across those places. The resulting code is convoluted,
8612 // but it's better than the prior situation of each thread being on a different plan.
8614 // We need Middle & Outer methods for the usual problem of combining C++ & SEH.
8616 /* The effect of all this is that we get:
8618 Base of thread -- OS unhandled exception filter that we hook
8620 SEH handler from DispatchOuter
8621 C++ handler from DispatchMiddle
8623 And if there is an AppDomain transition before we call back to user code, we additionally get:
8625 AppDomain transition -- contains its own handlers to terminate the first pass
8626 and marshal the exception.
8628 SEH handler from DispatchOuter
8629 C++ handler from DispatchMiddle
8631 Regardless of whether or not there is an AppDomain transition, we then have:
8633 User code that obviously can throw.
8635 So if we don't have an AD transition, or we take a fault before we successfully transition the
8636 AppDomain, then the base-most DispatchOuter/Middle will deal with the exception. This may
8637 involve swallowing exceptions or it may involve Watson & debugger attach. It will always
8638 involve notifications to any AppDomain.UnhandledException event listeners.
8640 But if we did transition the AppDomain, then any Watson, debugger attach and UnhandledException
8641 events will occur in that AppDomain in the initial first pass. So we get a good debugging
8642 experience and we get notifications to the host that show which AppDomain is allowing exceptions
8643 to go unhandled (so perhaps it can be unloaded or otherwise dealt with).
8645 The trick is that if the exception goes unhandled at the process level, we would normally try
8646 to fire AppDomain events and display the faulting exception on the console from two more
8647 places. These are the base-most DispatchOuter/Middle pair and the hook of the OS unhandled
8648 exception handler at the base of the thread.
8650 This is redundant and messy. (There's no concern with getting a 2nd Watson because we only
8651 do one of these per process anyway). The solution for the base-most DispatchOuter/Middle is
8652 to use the ManagedThreadCallState.flags to control whether the exception has already been
8653 dealt with or not. These flags cause the ThreadBaseRedirectingFilter to either do normal
8654 "base of the thread" exception handling, or to ignore the exception because it has already
8655 been reported in the AppDomain we transitioned to.
8657 But turning off the reporting in the OS unhandled exception filter is harder. We don't want
8658 to flip a bit on the Thread to disable this, unless we can be sure we are only disabling
8659 something we already reported, and that this thread will never recover from that situation and
8660 start executing code again. Here's the normal nightmare scenario with SEH:
8662 1) exception of type A is thrown
8663 2) All the filters in the 1st pass say they don't want an A
8664 3) The exception gets all the way out and is considered unhandled. We report this "fact".
8665 4) Imagine we then set a bit that says this thread shouldn't report unhandled exceptions.
8666 5) The 2nd pass starts.
8667 6) Inside a finally, someone throws an exception of type B.
8668 7) A new 1st pass starts from the point of the throw, with a type B.
8669 8) Now a filter says "Yes, I will swallow exception B."
8670 9) We no longer have an unhandled exception, and execution continues merrily.
8672 This is an unavoidable consequence of the 2-pass model. If you report unhandled exceptions
8673 in the 1st pass (for good debugging), you might find that this was premature and you don't
8674 have an unhandled exception when you get to the 2nd pass.
8676 But it would not be optimal if in step 4 we set a bit that says we should suppress normal
8677 notifications and reporting on this thread, believing that the process will terminate.
8679 The solution is to recognize that the base OS unhandled exception filter runs in two modes.
8680 In the first mode, it operates as today and serves as our backstop. In the second mode
8681 it is fully redundant with the handlers pushed after the AppDomain transition, which are
8682 completely containing the exception to the AD that it occurred in (for purposes of reporting).
8683 So we just need a flag on the thread that says whether or not that set of handlers are pushed
8684 and functioning. That flag enables / disables the base exception reporting and is called
8685 TSNC_AppDomainContainUnhandled
8690 enum ManagedThreadCallStateFlags
8693 MTCSF_ContainToAppDomain,
8694 MTCSF_SuppressDuplicate,
8697 struct ManagedThreadCallState
8700 AppDomain* pUnsafeAppDomain;
8703 Context::ADCallBackFcnType pTarget;
8705 UnhandledExceptionLocation filterType;
8706 ManagedThreadCallStateFlags flags;
8707 BOOL IsAppDomainEqual(AppDomain* pApp)
8709 LIMITED_METHOD_CONTRACT;
8710 return bDomainIsAsID?(pApp->GetId()==pAppDomainId):(pUnsafeAppDomain==pApp);
8712 ManagedThreadCallState(ADID AppDomainId,Context::ADCallBackFcnType Target,LPVOID Args,
8713 UnhandledExceptionLocation FilterType, ManagedThreadCallStateFlags Flags):
8714 pAppDomainId(AppDomainId),
8715 pUnsafeAppDomain(NULL),
8716 bDomainIsAsID(TRUE),
8719 filterType(FilterType),
8722 LIMITED_METHOD_CONTRACT;
8725 ManagedThreadCallState(AppDomain* AppDomain,Context::ADCallBackFcnType Target,LPVOID Args,
8726 UnhandledExceptionLocation FilterType, ManagedThreadCallStateFlags Flags):
8727 pAppDomainId(ADID(0)),
8728 pUnsafeAppDomain(AppDomain),
8729 bDomainIsAsID(FALSE),
8732 filterType(FilterType),
8735 LIMITED_METHOD_CONTRACT;
8737 void InitForFinalizer(AppDomain* AppDomain,Context::ADCallBackFcnType Target,LPVOID Args)
8739 LIMITED_METHOD_CONTRACT;
8740 filterType=FinalizerThread;
8741 pUnsafeAppDomain=AppDomain;
8746 friend void ManagedThreadBase_NoADTransition(Context::ADCallBackFcnType pTarget,
8747 UnhandledExceptionLocation filterType);
8748 friend void ManagedThreadBase::FinalizerAppDomain(AppDomain* pAppDomain,
8749 Context::ADCallBackFcnType pTarget,
8751 ManagedThreadCallState *pTurnAround);
8754 // The following static helpers are outside of the ManagedThreadBase struct because I
8755 // don't want to change threads.h whenever I change the mechanism for how unhandled
8756 // exceptions works. The ManagedThreadBase struct is for the public exposure of the
8759 static void ManagedThreadBase_DispatchOuter(ManagedThreadCallState *pCallState);
8762 // Here's the tricky part. *IF and only IF* we took an AppDomain transition at the base, then we
8763 // now want to push another complete set of handlers above us. The reason is that we want the
8764 // Watson report and the unhandled exception event to occur in the target AppDomain. If we don't
8765 // do this apparently redundant push of handlers, then we will marshal back the exception to the
8766 // handlers on the Default AppDomain side. This will erase all the important exception state by
8767 // unwinding (catch and rethrow) in DoADCallBack. And it will cause all unhandled exceptions to
8768 // be reported from the Default AppDomain, which is annoying to any AppDomain.UnhandledException
8771 // So why not skip the handlers that are in the Default AppDomain and just push the ones after the
8772 // transition? Well, transitioning out of the Default AppDomain into the target AppDomain could
8773 // fail. We need handlers pushed for that case. And in that case it's perfectly reasonable to
8774 // report the problem as occurring in the Default AppDomain, which is what the base handlers will
8777 static void ManagedThreadBase_DispatchInCorrectAD(LPVOID args)
8787 ManagedThreadCallState *pCallState = (ManagedThreadCallState *) args;
8789 // Ensure we aren't going to infinitely recurse.
8790 _ASSERTE(pCallState->IsAppDomainEqual(GetThread()->GetDomain()));
8792 // And then go round one more time. But this time we want to ensure that the filter contains
8793 // any exceptions that aren't swallowed. These must be treated as unhandled, rather than
8794 // propagated through the AppDomain boundary in search of an outer handler. Otherwise we
8795 // will not get correct Watson behavior.
8796 pCallState->flags = MTCSF_ContainToAppDomain;
8797 ManagedThreadBase_DispatchOuter(pCallState);
8798 pCallState->flags = MTCSF_NormalBase;
8801 static void ManagedThreadBase_DispatchInner(ManagedThreadCallState *pCallState)
8812 Thread *pThread = GetThread();
8814 if (!pCallState->IsAppDomainEqual(pThread->GetDomain()))
8816 // On Win7 and later, AppDomain transitions at the threadbase will *not* have EH setup at transition boundary.
8817 // This implies that an unhandled exception from the base domain (i.e. AD in which the thread starts) will
8818 // not return to DefDomain but will continue to go up the stack with the thread still being in base domain.
8819 // We have a holder in ENTER_DOMAIN_*_NO_EH_AT_TRANSITION macro (ReturnToPreviousAppDomainHolder) that will
8820 // revert AD context at threadbase if an unwind is triggered after the exception has gone unhandled.
8822 // This also implies that there will be no exception object marshalling (and it may not be required after all)
8823 // as well and once the holder reverts the AD context, the LastThrownObject in Thread will be set to NULL.
8825 BOOL fSetupEHAtTransition = !(RunningOnWin7());
8826 #else // !FEATURE_PAL
8827 BOOL fSetupEHAtTransition = TRUE;
8828 #endif // !FEATURE_PAL
8830 if (pCallState->bDomainIsAsID)
8831 pThread->DoADCallBack(pCallState->pAppDomainId,
8832 ManagedThreadBase_DispatchInCorrectAD,
8833 pCallState, fSetupEHAtTransition);
8835 pThread->DoADCallBack(pCallState->pUnsafeAppDomain,
8836 ManagedThreadBase_DispatchInCorrectAD,
8837 pCallState, ADV_FINALIZER, fSetupEHAtTransition);
8841 // Since no AppDomain transition is necessary, we need no additional handlers pushed
8842 // *AFTER* the transition. We now have adequate handlers below us. Go ahead and
8843 // dispatch the call.
8844 (*pCallState->pTarget) (pCallState->args);
8848 static void ManagedThreadBase_DispatchMiddle(ManagedThreadCallState *pCallState)
8850 STATIC_CONTRACT_GC_TRIGGERS;
8851 STATIC_CONTRACT_THROWS;
8852 STATIC_CONTRACT_MODE_COOPERATIVE;
8853 STATIC_CONTRACT_SO_TOLERANT;
8855 // We have the probe outside the EX_TRY below since corresponding EX_CATCH
8856 // also invokes SO_INTOLERANT code.
8857 BEGIN_SO_INTOLERANT_CODE(GetThread());
8861 // During an unwind, we have some cleanup:
8863 // 1) We should no longer suppress any unhandled exception reporting at the base
8864 // of the thread, because any handler that contained the exception to the AppDomain
8865 // where it occurred is now being removed from the stack.
8867 // 2) We need to unwind the Frame chain. We cannot do it when we get to the __except clause
8868 // because at this point we are in the 2nd phase and the stack has been popped. Any
8869 // stack crawling from another thread will see a frame chain in a popped region of stack.
8870 // Nor can we pop it in a filter, since this would destroy all the stack-walking information
8871 // we need to perform the 2nd pass. So doing it in a C++ destructor will ensure it happens
8872 // during the 2nd pass but before the stack is actually popped.
8875 Frame *m_pEntryFrame;
8879 Cleanup(Thread* pThread)
8881 m_pThread = pThread;
8882 m_pEntryFrame = pThread->m_pFrame;
8888 m_pThread->SetFrame(m_pEntryFrame);
8889 m_pThread->ResetThreadStateNC(Thread::TSNC_AppDomainContainUnhandled);
8893 Cleanup cleanup(GetThread());
8895 ManagedThreadBase_DispatchInner(pCallState);
8900 Exception *pException = GET_EXCEPTION();
8902 // RudeThreadAbort is a pre-allocated instance of ThreadAbort. So the following is sufficient.
8903 // For Whidbey, by default only swallow certain exceptions. If reverting back to Everett's
8904 // behavior (swallowing all unhandled exception), then swallow all unhandled exception.
8906 if (SwallowUnhandledExceptions() ||
8907 IsExceptionOfType(kThreadAbortException, pException) ||
8908 IsExceptionOfType(kAppDomainUnloadedException, pException))
8910 // Do nothing to swallow the exception
8914 // Setting up the unwind_and_continue_handler ensures that C++ exceptions do not leak out.
8915 // An example is when Thread1 in Default AppDomain creates AppDomain2, enters it, creates
8916 // another thread T2 and T2 throws OOM exception (that goes unhandled). At the transition
8917 // boundary, END_DOMAIN_TRANSITION will catch it and invoke RaiseCrossContextException
8918 // that will rethrow the OOM as a C++ exception.
8920 // Without unwind_and_continue_handler below, the exception will fly up the stack to
8921 // this point, where it will be rethrown and thus leak out.
8922 INSTALL_UNWIND_AND_CONTINUE_HANDLER;
8926 UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
8929 EX_END_CATCH(SwallowAllExceptions);
8931 END_SO_INTOLERANT_CODE;
8935 typedef struct Param
8937 ManagedThreadCallState * m_pCallState;
8939 Param(ManagedThreadCallState * pCallState, Frame * pFrame): m_pCallState(pCallState), m_pFrame(pFrame) {}
8942 typedef struct Param: public NotifyOfCHFFilterWrapperParam
8944 ManagedThreadCallState * m_pCallState;
8945 Param(ManagedThreadCallState * pCallState): m_pCallState(pCallState) {}
8948 // Dispatch to the appropriate filter, based on the active CallState.
8949 static LONG ThreadBaseRedirectingFilter(PEXCEPTION_POINTERS pExceptionInfo, LPVOID pParam)
8951 STATIC_CONTRACT_THROWS;
8952 STATIC_CONTRACT_GC_TRIGGERS;
8953 STATIC_CONTRACT_MODE_ANY;
8955 LONG (*ptrFilter) (PEXCEPTION_POINTERS, PVOID);
8957 TryParam * pRealParam = reinterpret_cast<TryParam *>(pParam);
8958 ManagedThreadCallState * _pCallState = pRealParam->m_pCallState;
8959 ManagedThreadCallStateFlags flags = _pCallState->flags;
8961 if (flags == MTCSF_SuppressDuplicate)
8963 LOG((LF_EH, LL_INFO100, "ThreadBaseRedirectingFilter: setting TSNC_AppDomainContainUnhandled\n"));
8964 GetThread()->SetThreadStateNC(Thread::TSNC_AppDomainContainUnhandled);
8965 return EXCEPTION_CONTINUE_SEARCH;
8969 BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return EXCEPTION_CONTINUE_SEARCH;);
8971 // This will invoke the swallowing filter. If that returns EXCEPTION_CONTINUE_SEARCH,
8972 // it will trigger unhandled exception processing.
8973 ptrFilter = ThreadBaseExceptionAppDomainFilter;
8975 // WARNING - ptrFilter may not return
8976 // This occurs when the debugger decides to intercept an exception and catch it in a frame closer
8977 // to the leaf than the one executing this filter
8978 ret = (*ptrFilter) (pExceptionInfo, _pCallState);
8980 // Although EXCEPTION_EXECUTE_HANDLER can also be returned in cases corresponding to
8981 // unhandled exceptions, all of those cases have already notified the debugger of an unhandled
8982 // exception which prevents a second notification indicating the exception was caught
8983 if (ret == EXCEPTION_EXECUTE_HANDLER)
8986 // WARNING - NotifyOfCHFFilterWrapper may not return
8987 // This occurs when the debugger decides to intercept an exception and catch it in a frame closer
8988 // to the leaf than the one executing this filter
8989 NotifyOfCHFFilterWrapper(pExceptionInfo, pRealParam);
8992 // If we are containing unhandled exceptions to the AppDomain we transitioned into, and the
8993 // exception is coming out, then this exception is going unhandled. We have already done
8994 // Watson and managed events, so suppress all filters below us. Otherwise we are swallowing
8995 // it and returning out of the AppDomain.
8996 if (flags == MTCSF_ContainToAppDomain)
8998 if(ret == EXCEPTION_CONTINUE_SEARCH)
9000 _pCallState->flags = MTCSF_SuppressDuplicate;
9002 else if(ret == EXCEPTION_EXECUTE_HANDLER)
9004 _pCallState->flags = MTCSF_NormalBase;
9006 // else if( EXCEPTION_CONTINUE_EXECUTION ) do nothing
9009 // Get the reference to the current thread..
9010 Thread *pCurThread = GetThread();
9011 _ASSERTE(pCurThread);
9013 if (flags == MTCSF_ContainToAppDomain)
9016 if (((ManagedThreadCallState *) _pCallState)->flags == MTCSF_SuppressDuplicate)
9018 // Set the flag that we have done unhandled exception processing
9019 // for this managed thread that started in a non-default domain
9020 LOG((LF_EH, LL_INFO100, "ThreadBaseRedirectingFilter: setting TSNC_AppDomainContainUnhandled\n"));
9021 pCurThread->SetThreadStateNC(Thread::TSNC_AppDomainContainUnhandled);
9026 _ASSERTE(flags == MTCSF_NormalBase);
9028 if(!IsSingleAppDomain())
9030 // This assert shouldnt be hit in CoreCLR since:
9032 // 1) It has no concept of managed entry point that is invoked by the shim. You can
9033 // only run managed code via hosting APIs that will run code in non-default domains.
9035 // 2) Managed threads cannot be created in DefaultDomain since no user code executes
9036 // in default domain.
9038 // So, if this is hit, something is not right!
9039 _ASSERTE(!"How come a managed thread in CoreCLR has suffered unhandled exception in DefaultDomain?");
9042 LOG((LF_EH, LL_INFO100, "ThreadBaseRedirectingFilter: setting TSNC_ProcessedUnhandledException\n"));
9045 // In the default domain, when an exception goes unhandled on a managed thread whose threadbase is in the VM (e.g. explicitly spawned threads,
9046 // ThreadPool threads, finalizer thread, etc), CLR can end up in the unhandled exception processing path twice.
9048 // The first attempt to perform UE processing happens at the managed thread base (via this function). When it completes,
9049 // we will set TSNC_ProcessedUnhandledException state against the thread to indicate that we have perform the unhandled exception processing.
9051 // On the desktop CLR, after the first attempt, we will return back to the OS with EXCEPTION_CONTINUE_SEARCH as unhandled exceptions cannot be swallowed. When the exception reaches
9052 // the native threadbase in the OS kernel, the OS will invoke the UEF registered for the process. This can result in CLR's UEF (COMUnhandledExceptionFilter)
9053 // getting invoked that will attempt to perform UE processing yet again for the same thread. To avoid this duplicate processing, we check the presence of
9054 // TSNC_ProcessedUnhandledException state on the thread and if present, we simply return back to the OS.
9056 // On desktop CoreCLR, we will only do UE processing once (at the managed threadbase) since no thread is created in default domain - all are created and executed in non-default domain.
9057 // As a result, we go via completely different codepath that prevents duplication of UE processing from happening, especially since desktop CoreCLR is targetted for SL and SL
9058 // always passes us a flag to swallow unhandled exceptions.
9060 // On CoreSys CoreCLR, the host can ask CoreCLR to run all code in the default domain. As a result, when we return from the first attempt to perform UE
9061 // processing, the call could return back with EXCEPTION_EXECUTE_HANDLER since, like desktop CoreCLR is instructed by SL host to swallow all unhandled exceptions,
9062 // CoreSys CoreCLR can also be instructed by its Phone host to swallow all unhandled exceptions. As a result, the exception dispatch will never continue to go upstack
9063 // to the native threadbase in the OS kernel and thus, there will never be a second attempt to perform UE processing. Hence, we dont, and shouldnt, need to set
9064 // TSNC_ProcessedUnhandledException state against the thread if we are in SingleAppDomain mode and have been asked to swallow the exception.
9066 // If we continue to set TSNC_ProcessedUnhandledException and a ThreadPool Thread A has an exception go unhandled, we will swallow it correctly for the first time.
9067 // The next time Thread A has an exception go unhandled, our UEF will see TSNC_ProcessedUnhandledException set and assume (incorrectly) UE processing has happened and
9068 // will fail to honor the host policy (e.g. swallow unhandled exception). Thus, the 2nd unhandled exception may end up crashing the app when it should not.
9070 if (IsSingleAppDomain() && (ret != EXCEPTION_EXECUTE_HANDLER))
9072 // Since we have already done unhandled exception processing for it, we dont want it
9073 // to happen again if our UEF gets invoked upon returning back to the OS.
9075 // Set the flag to indicate so.
9076 pCurThread->SetThreadStateNC(Thread::TSNC_ProcessedUnhandledException);
9081 END_SO_INTOLERANT_CODE;
9085 static void ManagedThreadBase_DispatchOuter(ManagedThreadCallState *pCallState)
9087 STATIC_CONTRACT_GC_TRIGGERS;
9088 STATIC_CONTRACT_THROWS;
9089 STATIC_CONTRACT_MODE_COOPERATIVE;
9091 // HasStarted() must have already been performed by our caller
9092 _ASSERTE(GetThread() != NULL);
9094 Thread *pThread = GetThread();
9095 #ifdef WIN64EXCEPTIONS
9096 Frame *pFrame = pThread->m_pFrame;
9097 #endif // WIN64EXCEPTIONS
9099 // The sole purpose of having this frame is to tell the debugger that we have a catch handler here
9100 // which may swallow managed exceptions. The debugger needs this in order to send a
9101 // CatchHandlerFound (CHF) notification.
9102 FrameWithCookie<DebuggerU2MCatchHandlerFrame> catchFrame;
9104 TryParam param(pCallState);
9105 param.pFrame = &catchFrame;
9109 TryParam *pTryParam;
9112 BOOL *pfHadException;
9114 #ifdef WIN64EXCEPTIONS
9116 #endif // WIN64EXCEPTIONS
9119 args.pTryParam = ¶m;
9120 args.pThread = pThread;
9122 BOOL fHadException = TRUE;
9123 args.pfHadException = &fHadException;
9125 #ifdef WIN64EXCEPTIONS
9126 args.pFrame = pFrame;
9127 #endif // WIN64EXCEPTIONS
9129 PAL_TRY(TryArgs *, pArgs, &args)
9131 PAL_TRY(TryParam *, pParam, pArgs->pTryParam)
9133 ManagedThreadBase_DispatchMiddle(pParam->m_pCallState);
9135 PAL_EXCEPT_FILTER(ThreadBaseRedirectingFilter)
9137 // Note: one of our C++ exceptions will never reach this filter because they're always caught by
9138 // the EX_CATCH in ManagedThreadBase_DispatchMiddle().
9140 // If eCLRDeterminedPolicy, we only swallow for TA, RTA, and ADU exception.
9141 // For eHostDeterminedPolicy, we will swallow all the managed exception.
9142 #ifdef WIN64EXCEPTIONS
9143 // this must be done after the second pass has run, it does not
9144 // reference anything on the stack, so it is safe to run in an
9145 // SEH __except clause as well as a C++ catch clause.
9146 ExceptionTracker::PopTrackers(pArgs->pFrame);
9147 #endif // WIN64EXCEPTIONS
9149 // Fortunately, ThreadAbortExceptions are always
9150 if (pArgs->pThread->IsAbortRequested())
9151 pArgs->pThread->EEResetAbort(Thread::TAR_Thread);
9155 *(pArgs->pfHadException) = FALSE;
9159 // If we had a breakpoint exception that has gone unhandled,
9160 // then switch to the correct AD context. Its fine to do this
9163 // 1) We are in an unwind (this is a C++ destructor).
9164 // 2) SetFrame (below) does validation to be in the correct AD context. Thus,
9165 // this should be done before that.
9166 if (fHadException && (GetCurrentExceptionCode() == STATUS_BREAKPOINT))
9168 ReturnToPreviousAppDomain();
9176 // For the implementation, there are three variants of work possible:
9178 // 1. Establish the base of a managed thread, and switch to the correct AppDomain.
9179 static void ManagedThreadBase_FullTransitionWithAD(ADID pAppDomain,
9180 Context::ADCallBackFcnType pTarget,
9182 UnhandledExceptionLocation filterType)
9192 ManagedThreadCallState CallState(pAppDomain, pTarget, args, filterType, MTCSF_NormalBase);
9193 ManagedThreadBase_DispatchOuter(&CallState);
9196 // 2. Establish the base of a managed thread, but the AppDomain transition must be
9197 // deferred until later.
9198 void ManagedThreadBase_NoADTransition(Context::ADCallBackFcnType pTarget,
9199 UnhandledExceptionLocation filterType)
9209 AppDomain *pAppDomain = GetAppDomain();
9211 ManagedThreadCallState CallState(pAppDomain, pTarget, NULL, filterType, MTCSF_NormalBase);
9213 // self-describing, to create a pTurnAround data for eventual delivery to a subsequent AppDomain
9215 CallState.args = &CallState;
9217 ManagedThreadBase_DispatchOuter(&CallState);
9222 // And here are the various exposed entrypoints for base thread behavior
9224 // The 'new Thread(...).Start()' case from COMSynchronizable kickoff thread worker
9225 void ManagedThreadBase::KickOff(ADID pAppDomain, Context::ADCallBackFcnType pTarget, LPVOID args)
9227 WRAPPER_NO_CONTRACT;
9228 ManagedThreadBase_FullTransitionWithAD(pAppDomain, pTarget, args, ManagedThread);
9231 // The IOCompletion, QueueUserWorkItem, AddTimer, RegisterWaitForSingleObject cases in the ThreadPool
9232 void ManagedThreadBase::ThreadPool(ADID pAppDomain, Context::ADCallBackFcnType pTarget, LPVOID args)
9234 WRAPPER_NO_CONTRACT;
9235 ManagedThreadBase_FullTransitionWithAD(pAppDomain, pTarget, args, ThreadPoolThread);
9238 // The Finalizer thread establishes exception handling at its base, but defers all the AppDomain
9240 void ManagedThreadBase::FinalizerBase(Context::ADCallBackFcnType pTarget)
9242 WRAPPER_NO_CONTRACT;
9243 ManagedThreadBase_NoADTransition(pTarget, FinalizerThread);
9246 void ManagedThreadBase::FinalizerAppDomain(AppDomain *pAppDomain,
9247 Context::ADCallBackFcnType pTarget,
9249 ManagedThreadCallState *pTurnAround)
9251 WRAPPER_NO_CONTRACT;
9252 pTurnAround->InitForFinalizer(pAppDomain,pTarget,args);
9253 _ASSERTE(pTurnAround->flags == MTCSF_NormalBase);
9254 ManagedThreadBase_DispatchInner(pTurnAround);
9257 //+----------------------------------------------------------------------------
9259 // Method: Thread::GetStaticFieldAddress private
9261 // Synopsis: Get the address of the field relative to the current thread.
9262 // If an address has not been assigned yet then create one.
9264 //+----------------------------------------------------------------------------
9266 LPVOID Thread::GetStaticFieldAddress(FieldDesc *pFD)
9274 _ASSERTE(pFD != NULL);
9275 _ASSERTE(pFD->IsThreadStatic());
9276 _ASSERTE(!pFD->IsRVA());
9278 // for static field the MethodTable is exact even for generic classes
9279 MethodTable *pMT = pFD->GetEnclosingMethodTable();
9281 // We need to make sure that the class has been allocated, however
9282 // we should not call the class constructor
9283 ThreadStatics::GetTLM(pMT)->EnsureClassAllocated(pMT);
9285 PTR_BYTE base = NULL;
9287 if (pFD->GetFieldType() == ELEMENT_TYPE_CLASS ||
9288 pFD->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
9290 base = pMT->GetGCThreadStaticsBasePointer();
9294 base = pMT->GetNonGCThreadStaticsBasePointer();
9297 _ASSERTE(base != NULL);
9299 DWORD offset = pFD->GetOffset();
9300 _ASSERTE(offset <= FIELD_OFFSET_LAST_REAL_OFFSET);
9302 LPVOID result = (LPVOID)((PTR_BYTE)base + (DWORD)offset);
9304 // For value classes, the handle points at an OBJECTREF
9305 // which holds the boxed value class, so derefernce and unbox.
9306 if (pFD->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
9308 OBJECTREF obj = ObjectToOBJECTREF(*(Object**) result);
9309 result = obj->GetData();
9315 #endif // #ifndef DACCESS_COMPILE
9317 //+----------------------------------------------------------------------------
9319 // Method: Thread::GetStaticFieldAddrNoCreate private
9321 // Synopsis: Get the address of the field relative to the thread.
9322 // If an address has not been assigned, return NULL.
9323 // No creating is allowed.
9325 //+----------------------------------------------------------------------------
9327 TADDR Thread::GetStaticFieldAddrNoCreate(FieldDesc *pFD, PTR_AppDomain pDomain)
9336 _ASSERTE(pFD != NULL);
9337 _ASSERTE(pFD->IsThreadStatic());
9339 // for static field the MethodTable is exact even for generic classes
9340 PTR_MethodTable pMT = pFD->GetEnclosingMethodTable();
9342 PTR_BYTE base = NULL;
9344 if (pFD->GetFieldType() == ELEMENT_TYPE_CLASS ||
9345 pFD->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
9347 base = pMT->GetGCThreadStaticsBasePointer(dac_cast<PTR_Thread>(this), pDomain);
9351 base = pMT->GetNonGCThreadStaticsBasePointer(dac_cast<PTR_Thread>(this), pDomain);
9357 DWORD offset = pFD->GetOffset();
9358 _ASSERTE(offset <= FIELD_OFFSET_LAST_REAL_OFFSET);
9360 TADDR result = dac_cast<TADDR>(base) + (DWORD)offset;
9362 // For value classes, the handle points at an OBJECTREF
9363 // which holds the boxed value class, so derefernce and unbox.
9364 if (pFD->IsByValue())
9366 _ASSERTE(result != NULL);
9367 PTR_Object obj = *PTR_UNCHECKED_OBJECTREF(result);
9370 result = dac_cast<TADDR>(obj->GetData());
9376 #ifndef DACCESS_COMPILE
9379 // NotifyFrameChainOfExceptionUnwind
9380 // -----------------------------------------------------------
9381 // This method will walk the Frame chain from pStartFrame to
9382 // the last frame that is below pvLimitSP and will call each
9383 // frame's ExceptionUnwind method. It will return the first
9384 // Frame that is above pvLimitSP.
9386 Frame * Thread::NotifyFrameChainOfExceptionUnwind(Frame* pStartFrame, LPVOID pvLimitSP)
9391 DISABLED(GC_TRIGGERS); // due to UnwindFrameChain from NOTRIGGER areas
9393 PRECONDITION(CheckPointer(pStartFrame));
9394 PRECONDITION(CheckPointer(pvLimitSP));
9402 // assert that the specified Thread's Frame chain actually
9403 // contains the start Frame.
9406 while ((pFrame != pStartFrame) &&
9407 (pFrame != FRAME_TOP))
9409 pFrame = pFrame->Next();
9411 CONSISTENCY_CHECK_MSG(pFrame == pStartFrame, "pStartFrame is not on pThread's Frame chain!");
9414 pFrame = pStartFrame;
9415 while (pFrame < pvLimitSP)
9417 CONSISTENCY_CHECK(pFrame != PTR_NULL);
9418 CONSISTENCY_CHECK((pFrame) > static_cast<Frame *>((LPVOID)GetCurrentSP()));
9419 pFrame->ExceptionUnwind();
9420 pFrame = pFrame->Next();
9423 // return the frame after the last one notified of the unwind
9427 //+----------------------------------------------------------------------------
9429 // Method: Thread::DeleteThreadStaticData private
9431 // Synopsis: Delete the static data for each appdomain that this thread
9435 //+----------------------------------------------------------------------------
9437 void Thread::DeleteThreadStaticData()
9445 // Deallocate the memory used by the table of ThreadLocalBlocks
9446 if (m_pTLBTable != NULL)
9448 for (SIZE_T i = 0; i < m_TLBTableSize; ++i)
9450 ThreadLocalBlock * pTLB = m_pTLBTable[i];
9453 m_pTLBTable[i] = NULL;
9462 m_pThreadLocalBlock = NULL;
9466 //+----------------------------------------------------------------------------
9468 // Method: Thread::DeleteThreadStaticData protected
9470 // Synopsis: Delete the static data for the given appdomain. This is called
9471 // when the appdomain unloads.
9474 //+----------------------------------------------------------------------------
9476 void Thread::DeleteThreadStaticData(AppDomain *pDomain)
9484 // Look up the AppDomain index
9485 SIZE_T index = pDomain->GetIndex().m_dwIndex;
9487 ThreadLocalBlock * pTLB = NULL;
9489 // NULL out the pointer to the ThreadLocalBlock
9490 if (index < m_TLBTableSize)
9492 pTLB = m_pTLBTable[index];
9493 m_pTLBTable[index] = NULL;
9498 // Since the AppDomain is being unloaded anyway, all the memory used by
9499 // the TLB will be reclaimed, so we don't really have to call FreeTable()
9507 void Thread::InitCultureAccessors()
9515 OBJECTREF *pCurrentCulture = NULL;
9516 Thread *pThread = GetThread();
9520 if (managedThreadCurrentCulture == NULL) {
9521 managedThreadCurrentCulture = MscorlibBinder::GetField(FIELD__THREAD__CULTURE);
9522 pCurrentCulture = (OBJECTREF*)pThread->GetStaticFieldAddress(managedThreadCurrentCulture);
9525 if (managedThreadCurrentUICulture == NULL) {
9526 managedThreadCurrentUICulture = MscorlibBinder::GetField(FIELD__THREAD__UI_CULTURE);
9527 pCurrentCulture = (OBJECTREF*)pThread->GetStaticFieldAddress(managedThreadCurrentUICulture);
9532 ARG_SLOT Thread::CallPropertyGet(BinderMethodID id, OBJECTREF pObject)
9547 GCPROTECT_BEGIN(pObject);
9548 MethodDescCallSite propGet(id, &pObject);
9550 // Set up the Stack.
9551 ARG_SLOT pNewArgs = ObjToArgSlot(pObject);
9553 // Make the actual call.
9554 retVal = propGet.Call_RetArgSlot(&pNewArgs);
9560 ARG_SLOT Thread::CallPropertySet(BinderMethodID id, OBJECTREF pObject, OBJECTREF pValue)
9575 GCPROTECT_BEGIN(pObject);
9576 GCPROTECT_BEGIN(pValue);
9577 MethodDescCallSite propSet(id, &pObject);
9579 // Set up the Stack.
9580 ARG_SLOT pNewArgs[] = {
9581 ObjToArgSlot(pObject),
9582 ObjToArgSlot(pValue)
9585 // Make the actual call.
9586 retVal = propSet.Call_RetArgSlot(pNewArgs);
9593 OBJECTREF Thread::GetCulture(BOOL bUICulture)
9604 _ASSERTE(PreemptiveGCDisabled());
9606 // This is the case when we're building mscorlib and haven't yet created
9607 // the system assembly.
9608 if (SystemDomain::System()->SystemAssembly()==NULL || g_fForbidEnterEE) {
9612 // Get the actual thread culture.
9613 OBJECTREF pCurThreadObject = GetExposedObject();
9614 _ASSERTE(pCurThreadObject!=NULL);
9616 THREADBASEREF pThreadBase = (THREADBASEREF)(pCurThreadObject);
9617 OBJECTREF pCurrentCulture = bUICulture ? pThreadBase->GetCurrentUICulture() : pThreadBase->GetCurrentUserCulture();
9619 if (pCurrentCulture==NULL) {
9620 GCPROTECT_BEGIN(pThreadBase);
9622 // Call the Getter for the CurrentUICulture. This will cause it to populate the field.
9623 ARG_SLOT retVal = CallPropertyGet(METHOD__THREAD__GET_UI_CULTURE,
9624 (OBJECTREF)pThreadBase);
9625 pCurrentCulture = ArgSlotToObj(retVal);
9627 //This is faster than calling the property, because this is what the call does anyway.
9628 pFD = MscorlibBinder::GetField(FIELD__CULTURE_INFO__CURRENT_CULTURE);
9631 pFD->CheckRunClassInitThrowing();
9633 pCurrentCulture = pFD->GetStaticOBJECTREF();
9634 _ASSERTE(pCurrentCulture!=NULL);
9639 return pCurrentCulture;
9644 // copy culture name into szBuffer and return length
9645 int Thread::GetParentCultureName(__out_ecount(length) LPWSTR szBuffer, int length, BOOL bUICulture)
9654 // This is the case when we're building mscorlib and haven't yet created
9655 // the system assembly.
9656 if (SystemDomain::System()->SystemAssembly()==NULL) {
9657 const WCHAR *tempName = W("en");
9658 INT32 tempLength = (INT32)wcslen(tempName);
9659 _ASSERTE(length>=tempLength);
9660 memcpy(szBuffer, tempName, tempLength*sizeof(WCHAR));
9664 ARG_SLOT Result = 0;
9667 INT32 bufferLength=0;
9668 STRINGREF cultureName = NULL;
9673 OBJECTREF pCurrentCulture;
9674 OBJECTREF pParentCulture;
9676 ZeroMemory(&gc, sizeof(gc));
9677 GCPROTECT_BEGIN(gc);
9679 gc.pCurrentCulture = GetCulture(bUICulture);
9680 if (gc.pCurrentCulture != NULL) {
9681 Result = CallPropertyGet(METHOD__CULTURE_INFO__GET_PARENT, gc.pCurrentCulture);
9685 gc.pParentCulture = (OBJECTREF)(ArgSlotToObj(Result));
9686 if (gc.pParentCulture != NULL)
9689 Result = CallPropertyGet(METHOD__CULTURE_INFO__GET_NAME, gc.pParentCulture);
9700 // Extract the data out of the String.
9701 cultureName = (STRINGREF)(ArgSlotToObj(Result));
9702 cultureName->RefInterpretGetStringValuesDangerousForGC((WCHAR**)&buffer, &bufferLength);
9704 if (bufferLength<length) {
9705 memcpy(szBuffer, buffer, bufferLength * sizeof (WCHAR));
9706 szBuffer[bufferLength]=0;
9707 retVal = bufferLength;
9716 // copy culture name into szBuffer and return length
9717 int Thread::GetCultureName(__out_ecount(length) LPWSTR szBuffer, int length, BOOL bUICulture)
9726 // This is the case when we're building mscorlib and haven't yet created
9727 // the system assembly.
9728 if (SystemDomain::System()->SystemAssembly()==NULL || g_fForbidEnterEE) {
9729 const WCHAR *tempName = W("en-US");
9730 INT32 tempLength = (INT32)wcslen(tempName);
9731 _ASSERTE(length>=tempLength);
9732 memcpy(szBuffer, tempName, tempLength*sizeof(WCHAR));
9736 ARG_SLOT Result = 0;
9739 INT32 bufferLength=0;
9740 STRINGREF cultureName = NULL;
9744 OBJECTREF pCurrentCulture = NULL;
9745 GCPROTECT_BEGIN(pCurrentCulture)
9747 pCurrentCulture = GetCulture(bUICulture);
9748 if (pCurrentCulture != NULL)
9749 Result = CallPropertyGet(METHOD__CULTURE_INFO__GET_NAME, pCurrentCulture);
9757 // Extract the data out of the String.
9758 cultureName = (STRINGREF)(ArgSlotToObj(Result));
9759 cultureName->RefInterpretGetStringValuesDangerousForGC((WCHAR**)&buffer, &bufferLength);
9761 if (bufferLength<length) {
9762 memcpy(szBuffer, buffer, bufferLength * sizeof (WCHAR));
9763 szBuffer[bufferLength]=0;
9764 retVal = bufferLength;
9770 LCID GetThreadCultureIdNoThrow(Thread *pThread, BOOL bUICulture)
9780 LCID Result = LCID(-1);
9784 Result = pThread->GetCultureId(bUICulture);
9789 EX_END_CATCH (SwallowAllExceptions);
9791 return (INT32)Result;
9794 // Return a language identifier.
9795 LCID Thread::GetCultureId(BOOL bUICulture)
9804 // This is the case when we're building mscorlib and haven't yet created
9805 // the system assembly.
9806 if (SystemDomain::System()->SystemAssembly()==NULL || g_fForbidEnterEE) {
9810 LCID Result = (LCID) -1;
9812 #ifdef FEATURE_USE_LCID
9815 OBJECTREF pCurrentCulture = NULL;
9816 GCPROTECT_BEGIN(pCurrentCulture)
9818 pCurrentCulture = GetCulture(bUICulture);
9819 if (pCurrentCulture != NULL)
9820 Result = (LCID)CallPropertyGet(METHOD__CULTURE_INFO__GET_ID, pCurrentCulture);
9828 void Thread::SetCultureId(LCID lcid, BOOL bUICulture)
9839 OBJECTREF CultureObj = NULL;
9840 GCPROTECT_BEGIN(CultureObj)
9842 // Convert the LCID into a CultureInfo.
9843 GetCultureInfoForLCID(lcid, &CultureObj);
9845 // Set the newly created culture as the thread's culture.
9846 SetCulture(&CultureObj, bUICulture);
9851 void Thread::SetCulture(OBJECTREF *CultureObj, BOOL bUICulture)
9860 // Retrieve the exposed thread object.
9861 OBJECTREF pCurThreadObject = GetExposedObject();
9862 _ASSERTE(pCurThreadObject!=NULL);
9864 // Set the culture property on the thread.
9865 THREADBASEREF pThreadBase = (THREADBASEREF)(pCurThreadObject);
9866 CallPropertySet(bUICulture
9867 ? METHOD__THREAD__SET_UI_CULTURE
9868 : METHOD__THREAD__SET_CULTURE,
9869 (OBJECTREF)pThreadBase, *CultureObj);
9872 void Thread::SetHasPromotedBytes ()
9882 _ASSERTE(GCHeapUtilities::IsGCInProgress() && IsGCThread ());
9884 if (!m_fPreemptiveGCDisabled)
9886 if (FRAME_TOP == GetFrame())
9887 m_fPromoted = FALSE;
9891 BOOL ThreadStore::HoldingThreadStore(Thread *pThread)
9902 return (pThread == s_pThreadStore->m_HoldingThread);
9906 return (s_pThreadStore->m_holderthreadid.IsCurrentThread());
9913 int Thread::MaxThreadRecord = 20;
9914 int Thread::MaxStackDepth = 20;
9916 const int Thread::MaxThreadTrackInfo = Thread::ThreadTrackInfo_Max;
9918 void Thread::AddFiberInfo(DWORD type)
9920 STATIC_CONTRACT_NOTHROW;
9921 STATIC_CONTRACT_GC_NOTRIGGER;
9922 STATIC_CONTRACT_MODE_ANY;
9923 STATIC_CONTRACT_SO_TOLERANT;
9927 if (m_pFiberInfo[0] == NULL) {
9931 DWORD mask = g_pConfig->SaveThreadInfoMask();
9932 if ((mask & type) == 0)
9944 _ASSERTE (slot < ThreadTrackInfo_Max);
9946 // use try to force ebp frame.
9948 ULONG index = FastInterlockIncrement((LONG*)&m_FiberInfoIndex[slot])-1;
9949 index %= MaxThreadRecord;
9950 size_t unitBytes = sizeof(FiberSwitchInfo)-sizeof(size_t)+MaxStackDepth*sizeof(size_t);
9951 FiberSwitchInfo *pInfo = (FiberSwitchInfo*)((char*)m_pFiberInfo[slot] + index*unitBytes);
9952 pInfo->timeStamp = getTimeStamp();
9953 pInfo->threadID = GetCurrentThreadId();
9955 #ifdef FEATURE_HIJACK
9956 // We can't crawl the stack of a thread that currently has a hijack pending
9957 // (since the hijack routine won't be recognized by any code manager). So we
9958 // undo any hijack, the EE will re-attempt it later.
9959 // Stack crawl happens on the current thread, which may not be 'this' thread.
9960 Thread* pCurrentThread = GetThread();
9961 if (pCurrentThread != NULL && (pCurrentThread->m_State & TS_Hijacked))
9963 pCurrentThread->UnhijackThread();
9967 int count = UtilCaptureStackBackTrace (2,MaxStackDepth,(PVOID*)pInfo->callStack,NULL);
9968 while (count < MaxStackDepth) {
9969 pInfo->callStack[count++] = 0;
9972 PAL_EXCEPT_NAKED (EXCEPTION_EXECUTE_HANDLER)
9976 #endif // !FEATURE_PAL
9981 HRESULT Thread::SwitchIn(HANDLE threadHandle)
9983 // can't have dynamic contracts because this method is going to mess with TLS
9984 STATIC_CONTRACT_NOTHROW;
9985 STATIC_CONTRACT_GC_NOTRIGGER;
9986 STATIC_CONTRACT_MODE_ANY;
9988 //can't do heap allocation in this method
9989 CantAllocHolder caHolder;
9991 // !!! Can not use the following line, since it uses an object which .dctor calls
9992 // !!! FLS_SETVALUE, and a new FLS is created after SwitchOut.
9993 // CANNOTTHROWCOMPLUSEXCEPTION();
9995 // Case Cookie to thread object and add to tls
9997 Thread *pThread = GetThread();
9998 // If this is hit, we need to understand.
9999 // Sometimes we see the assert but the memory does not match the assert.
10003 //_ASSERT(GetThread() == NULL);
10006 if (GetThread() != NULL) {
10007 return HOST_E_INVALIDOPERATION;
10010 CExecutionEngine::SwitchIn();
10012 // !!! no contract for this class.
10013 // !!! We have not switched in tls block.
10014 class EnsureTlsData
10020 EnsureTlsData(Thread* pThread){m_pThread = pThread; m_fNeedReset = TRUE;}
10026 SetAppDomain(NULL);
10027 CExecutionEngine::SwitchOut();
10030 void SuppressRelease()
10032 m_fNeedReset = FALSE;
10036 EnsureTlsData ensure(this);
10038 if (SetThread(this))
10040 Thread *pThread = GetThread();
10042 return E_OUTOFMEMORY;
10044 // !!! make sure that we switchin TLS so that FLS is available for Contract etc.
10046 // We redundantly keep the domain in its own TLS slot, for faster access from
10048 if (!SetAppDomain(m_pDomainAtTaskSwitch))
10050 return E_OUTOFMEMORY;
10053 CANNOTTHROWCOMPLUSEXCEPTION();
10055 // We switch out a fiber only if the fiber is in preemptive gc mode.
10056 _ASSERTE (!PreemptiveGCDisabled());
10060 // We have to be switched in on the same fiber
10061 _ASSERTE (GetCachedStackBase() == GetStackUpperBound());
10065 // only set the m_OSThreadId to bad food in Fiber mode
10066 m_OSThreadId = ::GetCurrentThreadId();
10067 #ifdef PROFILING_SUPPORTED
10068 // If a profiler is present, then notify the profiler that a
10069 // thread has been created.
10071 BEGIN_PIN_PROFILER(CORProfilerTrackThreads());
10072 g_profControlBlock.pProfInterface->ThreadAssignedToOSThread(
10073 (ThreadID)this, m_OSThreadId);
10074 END_PIN_PROFILER();
10076 #endif // PROFILING_SUPPORTED
10078 SetThreadHandle(threadHandle);
10080 #ifndef FEATURE_PAL
10081 m_pTEB = (struct _NT_TIB*)NtCurrentTeb();
10082 #endif // !FEATURE_PAL
10085 if (g_TrapReturningThreads && m_fPreemptiveGCDisabled && this != ThreadSuspend::GetSuspensionThread()) {
10086 WorkingOnThreadContextHolder workingOnThreadContext(this);
10087 if (workingOnThreadContext.Acquired())
10089 HandledJITCase(TRUE);
10095 // For debugging purpose, we save callstack during task switch. On Win64, the callstack
10096 // is done within OS loader lock, and obtaining managed callstack may cause fiber switch.
10097 SetThreadStateNC(TSNC_InTaskSwitch);
10098 AddFiberInfo(ThreadTrackInfo_Schedule);
10099 ResetThreadStateNC(TSNC_InTaskSwitch);
10102 ensure.SuppressRelease();
10111 HRESULT Thread::SwitchOut()
10113 LIMITED_METHOD_CONTRACT;
10118 void Thread::InternalSwitchOut()
10120 INDEBUG( BOOL fNoTLS = (CExecutionEngine::CheckThreadStateNoCreate(0) == NULL));
10132 // Can't do heap allocation in this method.
10133 // We need to scope this holder because its destructor accesses FLS.
10134 CantAllocHolder caHolder;
10136 // !!! Can not use the following line, since it uses an object which .dctor calls
10137 // !!! FLS_SETVALUE, and a new FLS is created after SwitchOut.
10138 // CANNOTTHROWCOMPLUSEXCEPTION();
10140 _ASSERTE(GetThread() == this);
10142 _ASSERTE (!fNoTLS ||
10143 (CExecutionEngine::CheckThreadStateNoCreate(0) == NULL));
10146 // workaround wwl: for SQL reschedule
10148 if (PreemptiveGCDisabled)
10153 _ASSERTE(!PreemptiveGCDisabled());
10156 // Can not assert here. If a mutex is orphaned, the thread will have ThreadAffinity.
10157 //_ASSERTE(!HasThreadAffinity());
10159 _ASSERTE (!fNoTLS ||
10160 (CExecutionEngine::CheckThreadStateNoCreate(0) == NULL));
10163 // For debugging purpose, we save callstack during task switch. On Win64, the callstack
10164 // is done within OS loader lock, and obtaining managed callstack may cause fiber switch.
10165 SetThreadStateNC(TSNC_InTaskSwitch);
10166 AddFiberInfo(ThreadTrackInfo_Schedule);
10167 ResetThreadStateNC(TSNC_InTaskSwitch);
10170 _ASSERTE (!fNoTLS ||
10171 (CExecutionEngine::CheckThreadStateNoCreate(0) == NULL));
10173 m_pDomainAtTaskSwitch = GetAppDomain();
10177 // only set the m_OSThreadId to bad food in Fiber mode
10178 m_OSThreadId = SWITCHED_OUT_FIBER_OSID;
10179 #ifdef PROFILING_SUPPORTED
10180 // If a profiler is present, then notify the profiler that a
10181 // thread has been created.
10183 BEGIN_PIN_PROFILER(CORProfilerTrackThreads());
10184 g_profControlBlock.pProfInterface->ThreadAssignedToOSThread(
10185 (ThreadID)this, m_OSThreadId);
10186 END_PIN_PROFILER();
10188 #endif // PROFILING_SUPPORTED
10191 _ASSERTE (!fNoTLS ||
10192 (CExecutionEngine::CheckThreadStateNoCreate(0) == NULL));
10194 HANDLE hThread = GetThreadHandle();
10196 SetThreadHandle (SWITCHOUT_HANDLE_VALUE);
10197 while (m_dwThreadHandleBeingUsed > 0)
10199 // Another thread is using the handle now.
10201 // We can not call __SwitchToThread since we can not go back to host.
10203 #define Sleep(a) Dont_Use_Sleep(a)
10206 if (m_WeOwnThreadHandle && m_ThreadHandleForClose == INVALID_HANDLE_VALUE)
10208 m_ThreadHandleForClose = hThread;
10211 _ASSERTE (!fNoTLS ||
10212 (CExecutionEngine::CheckThreadStateNoCreate(0) == NULL));
10215 CExecutionEngine::SwitchOut();
10217 // We need to make sure that TLS are touched last here.
10218 // Contract uses TLS.
10220 SetAppDomain(NULL);
10222 _ASSERTE (!fNoTLS ||
10223 (CExecutionEngine::CheckThreadStateNoCreate(0) == NULL));
10228 LONG Thread::GetTotalThreadPoolCompletionCount()
10238 if (g_fEEStarted) //make sure we actually have a thread store
10240 // make sure up-to-date thread-local counts are visible to us
10241 ::FlushProcessWriteBuffers();
10243 // enumerate all threads, summing their local counts.
10244 ThreadStoreLockHolder tsl;
10246 total = s_threadPoolCompletionCountOverflow.Load();
10248 Thread *pThread = NULL;
10249 while ((pThread = ThreadStore::GetAllThreadList(pThread, 0, 0)) != NULL)
10251 total += pThread->m_threadPoolCompletionCount;
10256 total = s_threadPoolCompletionCountOverflow.Load();
10263 INT32 Thread::ResetManagedThreadObject(INT32 nPriority)
10272 return ResetManagedThreadObjectInCoopMode(nPriority);
10275 INT32 Thread::ResetManagedThreadObjectInCoopMode(INT32 nPriority)
10285 THREADBASEREF pObject = (THREADBASEREF)ObjectFromHandle(m_ExposedObject);
10286 if (pObject != NULL)
10288 pObject->ResetCulture();
10289 pObject->ResetName();
10290 nPriority = pObject->GetPriority();
10296 void Thread::FullResetThread()
10306 // We need to put this thread in COOPERATIVE GC first to solve race between AppDomain::Unload
10307 // and Thread::Reset. AppDomain::Unload does a full GC to collect all roots in one AppDomain.
10308 // ThreadStaticData used to be coupled with a managed array of objects in the managed Thread
10309 // object, however this is no longer the case.
10311 // TODO: Do we still need to put this thread into COOP mode?
10314 DeleteThreadStaticData();
10316 m_alloc_context.alloc_bytes = 0;
10317 m_fPromoted = FALSE;
10320 BOOL Thread::IsRealThreadPoolResetNeeded()
10331 if(!IsBackground())
10334 THREADBASEREF pObject = (THREADBASEREF)ObjectFromHandle(m_ExposedObject);
10336 if(pObject != NULL)
10338 INT32 nPriority = pObject->GetPriority();
10340 if(nPriority != ThreadNative::PRIORITY_NORMAL)
10347 void Thread::InternalReset(BOOL fFull, BOOL fNotFinalizerThread, BOOL fThreadObjectResetNeeded, BOOL fResetAbort)
10351 if(!fNotFinalizerThread || fThreadObjectResetNeeded) {GC_TRIGGERS;SO_INTOLERANT;} else {GC_NOTRIGGER;SO_TOLERANT;}
10355 _ASSERTE (this == GetThread());
10359 INT32 nPriority = ThreadNative::PRIORITY_NORMAL;
10361 if (!fNotFinalizerThread && this == FinalizerThread::GetFinalizerThread())
10363 nPriority = ThreadNative::PRIORITY_HIGHEST;
10366 if(fThreadObjectResetNeeded)
10368 nPriority = ResetManagedThreadObject(nPriority);
10377 //m_MarshalAlloc.Collapse(NULL);
10379 if (fResetAbort && IsAbortRequested()) {
10380 UnmarkThreadForAbort(TAR_ALL);
10383 if (fResetAbort && IsAborted())
10386 if (IsThreadPoolThread() && fThreadObjectResetNeeded)
10388 SetBackground(TRUE);
10389 if (nPriority != ThreadNative::PRIORITY_NORMAL)
10391 SetThreadPriority(THREAD_PRIORITY_NORMAL);
10394 else if (!fNotFinalizerThread && this == FinalizerThread::GetFinalizerThread())
10396 SetBackground(TRUE);
10397 if (nPriority != ThreadNative::PRIORITY_HIGHEST)
10399 SetThreadPriority(THREAD_PRIORITY_HIGHEST);
10404 HRESULT Thread::Reset(BOOL fFull)
10406 // !!! Can not use non-static contract here.
10407 // !!! Contract depends on Thread object for GC_TRIGGERS.
10408 // !!! At the end of this function, we call InternalSwitchOut,
10409 // !!! and then GetThread()=NULL, and dtor of contract does not work any more.
10410 STATIC_CONTRACT_NOTHROW;
10411 STATIC_CONTRACT_GC_TRIGGERS;
10412 STATIC_CONTRACT_ENTRY_POINT;
10414 if ( !g_fEEStarted)
10419 BEGIN_SO_INTOLERANT_CODE_NOPROBE;
10422 _ASSERTE (GetThread() == this);
10423 #ifdef _TARGET_X86_
10424 _ASSERTE (GetExceptionState()->GetContextRecord() == NULL);
10428 if (GetThread() != this)
10430 IfFailGo(E_UNEXPECTED);
10433 _ASSERTE (!PreemptiveGCDisabled());
10434 _ASSERTE (m_pFrame == FRAME_TOP);
10435 // A host should not recycle a CLRTask if the task is created by us through CreateNewThread.
10436 // We need to make Thread.Join work for this case.
10437 if ((m_StateNC & (TSNC_CLRCreatedThread | TSNC_CannotRecycle)) != 0)
10439 // Todo: wwl better returning code.
10440 IfFailGo(E_UNEXPECTED);
10443 #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
10444 if (IsCoInitialized())
10446 // The current thread has done CoInitialize
10447 IfFailGo(E_UNEXPECTED);
10452 AddFiberInfo(ThreadTrackInfo_Lifetime);
10455 SetThreadState(TS_TaskReset);
10457 if (IsAbortRequested())
10459 EEResetAbort(Thread::TAR_ALL);
10462 InternalReset(fFull);
10464 if (PreemptiveGCDisabled())
10466 EnablePreemptiveGC();
10471 #ifdef WIN64EXCEPTIONS
10472 ExceptionTracker::PopTrackers((void*)-1);
10473 #endif // WIN64EXCEPTIONS
10475 ResetThreadStateNC(TSNC_UnbalancedLocks);
10478 InternalSwitchOut();
10479 m_OSThreadId = SWITCHED_OUT_FIBER_OSID;
10484 END_SO_INTOLERANT_CODE_NOPROBE;
10486 #ifdef ENABLE_CONTRACTS_DATA
10487 // Decouple our cache from the Task.
10488 // Next time, the thread may be run on a different thread.
10491 m_pClrDebugState = NULL;
10498 HRESULT Thread::ExitTask ()
10500 // !!! Can not use contract here.
10501 // !!! Contract depends on Thread object for GC_TRIGGERS.
10502 // !!! At the end of this function, we call InternalSwitchOut,
10503 // !!! and then GetThread()=NULL, and dtor of contract does not work any more.
10504 STATIC_CONTRACT_NOTHROW;
10505 STATIC_CONTRACT_GC_TRIGGERS;
10506 STATIC_CONTRACT_ENTRY_POINT;
10508 if ( !g_fEEStarted)
10513 // <TODO> We need to probe here, but can't introduce destructors etc.</TODO>
10514 BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
10516 //OnThreadTerminate(FALSE);
10517 _ASSERTE (this == GetThread());
10518 _ASSERTE (!PreemptiveGCDisabled());
10520 // Can not assert the following. SQL may call ExitTask after addref and abort a task.
10521 //_ASSERTE (m_UnmanagedRefCount == 0);
10522 if (this != GetThread())
10523 IfFailGo(HOST_E_INVALIDOPERATION);
10525 #ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
10526 if (IsCoInitialized())
10528 // This thread has used ole32. We need to balance CoInitialize call on this thread.
10529 // We also need to free any COM objects created on this thread.
10531 // If we don't do this work, ole32 is going to do the same during its DLL_THREAD_DETACH,
10532 // and may re-enter CLR.
10536 m_OSThreadId = SWITCHED_OUT_FIBER_OSID;
10537 hr = DetachThread(FALSE);
10538 // !!! Do not touch any field of Thread object. The Thread object is subject to delete
10539 // !!! after DetachThread call.
10542 END_CONTRACT_VIOLATION;
10547 HRESULT Thread::Abort ()
10552 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
10557 BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW;);
10560 UserAbort(TAR_Thread, EEPolicy::TA_Safe, INFINITE, Thread::UAC_Host);
10565 EX_END_CATCH(SwallowAllExceptions);
10566 END_SO_INTOLERANT_CODE;
10571 HRESULT Thread::RudeAbort()
10576 if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
10581 BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
10585 UserAbort(TAR_Thread, EEPolicy::TA_Rude, INFINITE, Thread::UAC_Host);
10590 EX_END_CATCH(SwallowAllExceptions);
10592 END_SO_INTOLERANT_CODE;
10597 HRESULT Thread::NeedsPriorityScheduling(BOOL *pbNeedsPriorityScheduling)
10606 *pbNeedsPriorityScheduling = (m_fPreemptiveGCDisabled ||
10607 (g_fEEStarted && this == FinalizerThread::GetFinalizerThread()));
10612 HRESULT Thread::LocksHeld(SIZE_T *pLockCount)
10614 LIMITED_METHOD_CONTRACT;
10616 *pLockCount = m_dwLockCount;
10620 HRESULT Thread::SetTaskIdentifier(TASKID asked)
10622 LIMITED_METHOD_CONTRACT;
10624 // @todo: Should be check for uniqueness?
10629 HRESULT Thread::BeginPreventAsyncAbort()
10631 WRAPPER_NO_CONTRACT;
10636 FastInterlockIncrement((LONG*)&m_PreventAbort);
10640 AddFiberInfo(ThreadTrackInfo_Abort);
10642 FastInterlockIncrement((LONG*)&m_dwDisableAbortCheckCount);
10648 HRESULT Thread::EndPreventAsyncAbort()
10650 WRAPPER_NO_CONTRACT;
10655 FastInterlockDecrement((LONG*)&m_PreventAbort);
10658 ASSERT(count >= 0);
10659 AddFiberInfo(ThreadTrackInfo_Abort);
10661 FastInterlockDecrement((LONG*)&m_dwDisableAbortCheckCount);
10668 ULONG Thread::AddRef()
10670 WRAPPER_NO_CONTRACT;
10672 _ASSERTE(m_ExternalRefCount > 0);
10674 _ASSERTE (m_UnmanagedRefCount != (DWORD) -1);
10675 ULONG ref = FastInterlockIncrement((LONG*)&m_UnmanagedRefCount);
10678 AddFiberInfo(ThreadTrackInfo_Lifetime);
10683 ULONG Thread::Release()
10685 WRAPPER_NO_CONTRACT;
10686 SUPPORTS_DAC_HOST_ONLY;
10688 _ASSERTE (m_ExternalRefCount > 0);
10689 _ASSERTE (m_UnmanagedRefCount > 0);
10690 ULONG ref = FastInterlockDecrement((LONG*)&m_UnmanagedRefCount);
10692 AddFiberInfo(ThreadTrackInfo_Lifetime);
10697 HRESULT Thread::QueryInterface(REFIID riid, void **ppUnk)
10699 LIMITED_METHOD_CONTRACT;
10701 return E_NOINTERFACE;
10705 void Thread::SetupThreadForHost()
10715 _ASSERTE (GetThread() == this);
10716 CONTRACT_VIOLATION(SOToleranceViolation);
10721 ETaskType GetCurrentTaskType()
10723 STATIC_CONTRACT_NOTHROW;
10724 STATIC_CONTRACT_GC_NOTRIGGER;
10725 STATIC_CONTRACT_SO_TOLERANT;
10727 ETaskType TaskType = TT_UNKNOWN;
10728 size_t type = (size_t)ClrFlsGetValue (TlsIdx_ThreadType);
10729 if (type & ThreadType_DbgHelper)
10731 TaskType = TT_DEBUGGERHELPER;
10733 else if (type & ThreadType_GC)
10737 else if (type & ThreadType_Finalizer)
10739 TaskType = TT_FINALIZER;
10741 else if (type & ThreadType_Timer)
10743 TaskType = TT_THREADPOOL_TIMER;
10745 else if (type & ThreadType_Gate)
10747 TaskType = TT_THREADPOOL_GATE;
10749 else if (type & ThreadType_Wait)
10751 TaskType = TT_THREADPOOL_WAIT;
10753 else if (type & ThreadType_ADUnloadHelper)
10755 TaskType = TT_ADUNLOAD;
10757 else if (type & ThreadType_Threadpool_IOCompletion)
10759 TaskType = TT_THREADPOOL_IOCOMPLETION;
10761 else if (type & ThreadType_Threadpool_Worker)
10763 TaskType = TT_THREADPOOL_WORKER;
10767 Thread *pThread = GetThread();
10770 TaskType = TT_USER;
10777 DeadlockAwareLock::DeadlockAwareLock(const char *description)
10778 : m_pHoldingThread(NULL)
10780 , m_description(description)
10783 LIMITED_METHOD_CONTRACT;
10786 DeadlockAwareLock::~DeadlockAwareLock()
10797 // Wait for another thread to leave its loop in DeadlockAwareLock::TryBeginEnterLock
10798 CrstHolder lock(&g_DeadlockAwareCrst);
10801 CHECK DeadlockAwareLock::CheckDeadlock(Thread *pThread)
10805 PRECONDITION(g_DeadlockAwareCrst.OwnedByCurrentThread());
10811 // Note that this check is recursive in order to produce descriptive check failure messages.
10812 Thread *pHoldingThread = m_pHoldingThread.Load();
10813 if (pThread == pHoldingThread)
10815 CHECK_FAILF(("Lock %p (%s) is held by thread %d", this, m_description, pThread));
10818 if (pHoldingThread != NULL)
10820 DeadlockAwareLock *pBlockingLock = pHoldingThread->m_pBlockingLock.Load();
10821 if (pBlockingLock != NULL)
10823 CHECK_MSGF(pBlockingLock->CheckDeadlock(pThread),
10824 ("Deadlock: Lock %p (%s) is held by thread %d", this, m_description, pHoldingThread));
10831 BOOL DeadlockAwareLock::CanEnterLock()
10833 Thread * pThread = GetThread();
10835 CONSISTENCY_CHECK_MSG(pThread != NULL,
10836 "Cannot do deadlock detection on non-EE thread");
10837 CONSISTENCY_CHECK_MSG(pThread->m_pBlockingLock.Load() == NULL,
10838 "Cannot block on two locks at once");
10841 CrstHolder lock(&g_DeadlockAwareCrst);
10843 // Look for deadlocks
10844 DeadlockAwareLock *pLock = this;
10848 Thread * holdingThread = pLock->m_pHoldingThread;
10850 if (holdingThread == pThread)
10855 if (holdingThread == NULL)
10861 pLock = holdingThread->m_pBlockingLock;
10865 // Thread is running free
10874 BOOL DeadlockAwareLock::TryBeginEnterLock()
10883 Thread * pThread = GetThread();
10885 CONSISTENCY_CHECK_MSG(pThread != NULL,
10886 "Cannot do deadlock detection on non-EE thread");
10887 CONSISTENCY_CHECK_MSG(pThread->m_pBlockingLock.Load() == NULL,
10888 "Cannot block on two locks at once");
10891 CrstHolder lock(&g_DeadlockAwareCrst);
10893 // Look for deadlocks
10894 DeadlockAwareLock *pLock = this;
10898 Thread * holdingThread = pLock->m_pHoldingThread;
10900 if (holdingThread == pThread)
10905 if (holdingThread == NULL)
10911 pLock = holdingThread->m_pBlockingLock;
10915 // Thread is running free
10920 pThread->m_pBlockingLock = this;
10926 void DeadlockAwareLock::BeginEnterLock()
10935 Thread * pThread = GetThread();
10937 CONSISTENCY_CHECK_MSG(pThread != NULL,
10938 "Cannot do deadlock detection on non-EE thread");
10939 CONSISTENCY_CHECK_MSG(pThread->m_pBlockingLock.Load() == NULL,
10940 "Cannot block on two locks at once");
10943 CrstHolder lock(&g_DeadlockAwareCrst);
10945 // Look for deadlock loop
10946 CONSISTENCY_CHECK_MSG(CheckDeadlock(pThread), "Deadlock detected!");
10948 pThread->m_pBlockingLock = this;
10952 void DeadlockAwareLock::EndEnterLock()
10961 Thread * pThread = GetThread();
10963 CONSISTENCY_CHECK(m_pHoldingThread.Load() == NULL || m_pHoldingThread.Load() == pThread);
10964 CONSISTENCY_CHECK(pThread->m_pBlockingLock.Load() == this);
10966 // No need to take a lock when going from blocking to holding. This
10967 // transition implies the lack of a deadlock that other threads can see.
10968 // (If they would see a deadlock after the transition, they would see
10969 // one before as well.)
10971 m_pHoldingThread = pThread;
10974 void DeadlockAwareLock::LeaveLock()
10983 CONSISTENCY_CHECK(m_pHoldingThread == GetThread());
10984 CONSISTENCY_CHECK(GetThread()->m_pBlockingLock.Load() == NULL);
10986 m_pHoldingThread = NULL;
10992 // Normally, any thread we operate on has a Thread block in its TLS. But there are
10993 // a few special threads we don't normally execute managed code on.
10995 // There is a scenario where we run managed code on such a thread, which is when the
10996 // DLL_THREAD_ATTACH notification of an (IJW?) module calls into managed code. This
10997 // is incredibly dangerous. If a GC is provoked, the system may have trouble performing
10998 // the GC because its threads aren't available yet.
10999 static DWORD SpecialEEThreads[10];
11000 static LONG cnt_SpecialEEThreads = 0;
11002 void dbgOnly_IdentifySpecialEEThread()
11004 WRAPPER_NO_CONTRACT;
11006 LONG ourCount = FastInterlockIncrement(&cnt_SpecialEEThreads);
11008 _ASSERTE(ourCount < (LONG) NumItems(SpecialEEThreads));
11009 SpecialEEThreads[ourCount-1] = ::GetCurrentThreadId();
11012 BOOL dbgOnly_IsSpecialEEThread()
11014 WRAPPER_NO_CONTRACT;
11016 DWORD ourId = ::GetCurrentThreadId();
11018 for (LONG i=0; i<cnt_SpecialEEThreads; i++)
11019 if (ourId == SpecialEEThreads[i])
11022 // If we have an EE thread doing helper thread duty, then it is temporarily
11024 #ifdef DEBUGGING_SUPPORTED
11025 if (g_pDebugInterface)
11027 //<TODO>We probably should use Thread::GetThreadId</TODO>
11028 DWORD helperID = g_pDebugInterface->GetHelperThreadID();
11029 if (helperID == ourId)
11034 //<TODO>Clean this up</TODO>
11035 if (GetThread() == NULL)
11045 // There is an MDA which can detect illegal reentrancy into the CLR. For instance, if you call managed
11046 // code from a native vectored exception handler, this might cause a reverse PInvoke to occur. But if the
11047 // exception was triggered from code that was executing in cooperative GC mode, we now have GC holes and
11048 // general corruption.
11049 #ifdef MDA_SUPPORTED
11050 NOINLINE BOOL HasIllegalReentrancyRare()
11061 Thread *pThread = GetThread();
11062 if (pThread == NULL || !pThread->PreemptiveGCDisabled())
11065 BEGIN_ENTRYPOINT_VOIDRET;
11066 MDA_TRIGGER_ASSISTANT(Reentrancy, ReportViolation());
11067 END_ENTRYPOINT_VOIDRET;
11072 // Actually fire the Reentrancy probe, if warranted.
11073 BOOL HasIllegalReentrancy()
11084 #ifdef MDA_SUPPORTED
11085 if (NULL == MDA_GET_ASSISTANT(Reentrancy))
11087 return HasIllegalReentrancyRare();
11090 #endif // MDA_SUPPORTED
11094 #endif // #ifndef DACCESS_COMPILE
11096 #ifdef DACCESS_COMPILE
11099 STATIC_DATA::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
11101 WRAPPER_NO_CONTRACT;
11103 DAC_ENUM_STHIS(STATIC_DATA);
11107 Thread::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
11109 WRAPPER_NO_CONTRACT;
11112 if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE)
11114 if (m_pDomain.IsValid())
11116 m_pDomain->EnumMemoryRegions(flags, true);
11119 if (m_Context.IsValid())
11121 m_Context->EnumMemoryRegions(flags);
11125 if (m_debuggerFilterContext.IsValid())
11127 m_debuggerFilterContext.EnumMem();
11130 OBJECTHANDLE_EnumMemoryRegions(m_LastThrownObjectHandle);
11132 m_ExceptionState.EnumChainMemoryRegions(flags);
11134 // Like the old thread static implementation, we only enumerate
11135 // the current TLB. Should we be enumerating all of the TLBs?
11136 if (m_pThreadLocalBlock.IsValid())
11137 m_pThreadLocalBlock->EnumMemoryRegions(flags);
11139 if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE)
11143 // Allow all of the frames on the stack to enumerate
11147 PTR_Frame frame = m_pFrame;
11148 while (frame.IsValid() &&
11149 frame.GetAddr() != dac_cast<TADDR>(FRAME_TOP))
11151 frame->EnumMemoryRegions(flags);
11152 frame = frame->m_Next;
11157 // Try and do a stack trace and save information
11158 // for each part of the stack. This is very vulnerable
11159 // to memory problems so ignore all exceptions here.
11162 CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED
11164 EnumMemoryRegionsWorker(flags);
11169 Thread::EnumMemoryRegionsWorker(CLRDataEnumMemoryFlags flags)
11171 WRAPPER_NO_CONTRACT;
11179 BOOL DacGetThreadContext(Thread* thread, T_CONTEXT* context);
11180 REGDISPLAY regDisp;
11181 StackFrameIterator frameIter;
11183 TADDR previousSP = 0; //start at zero; this allows first check to always succeed.
11186 // Init value. The Limit itself is not legal, so move one target pointer size to the smallest-magnitude
11188 currentSP = dac_cast<TADDR>(m_CacheStackLimit) + sizeof(TADDR);
11190 if (GetFilterContext())
11192 context = *GetFilterContext();
11196 DacGetThreadContext(this, &context);
11199 FillRegDisplay(®Disp, &context);
11200 frameIter.Init(this, NULL, ®Disp, 0);
11201 while (frameIter.IsValid())
11204 // There are identical stack pointer checking semantics in code:ClrDataAccess::EnumMemWalkStackHelper
11205 // You ***MUST*** maintain identical semantics for both checks!
11208 // Before we continue, we should check to be sure we have a valid
11209 // stack pointer. This is to prevent stacks that are not walked
11211 // a) stack corruption bugs
11212 // b) bad stack walks
11213 // from continuing on indefinitely.
11215 // We will force SP to strictly increase.
11216 // this check can only happen for real stack frames (i.e. not for explicit frames that don't update the RegDisplay)
11217 // for ia64, SP may be equal, but in this case BSP must strictly decrease.
11218 // We will force SP to be properly aligned.
11219 // We will force SP to be in the correct range.
11221 if (frameIter.GetFrameState() == StackFrameIterator::SFITER_FRAMELESS_METHOD)
11223 // This check cannot be applied to explicit frames; they may not move the SP at all.
11224 // Also, a single function can push several on the stack at a time with no guarantees about
11225 // ordering so we can't check that the addresses of the explicit frames are monotonically increasing.
11226 // There is the potential that the walk will not terminate if a set of explicit frames reference
11227 // each other circularly. While we could choose a limit for the number of explicit frames allowed
11228 // in a row like the total stack size/pointer size, we have no known problems with this scenario.
11229 // Thus for now we ignore it.
11230 currentSP = (TADDR)GetRegdisplaySP(®Disp);
11232 if (currentSP <= previousSP)
11234 _ASSERTE(!"Target stack has been corrupted, SP for current frame must be larger than previous frame.");
11239 // On windows desktop, the stack pointer should be a multiple
11240 // of pointer-size-aligned in the target address space
11241 if (currentSP % sizeof(TADDR) != 0)
11243 _ASSERTE(!"Target stack has been corrupted, SP must be aligned.");
11247 if (!IsAddressInStack(currentSP))
11249 _ASSERTE(!"Target stack has been corrupted, SP must in in the stack range.");
11253 // Enumerate the code around the call site to help debugger stack walking heuristics
11254 PCODE callEnd = GetControlPC(®Disp);
11255 DacEnumCodeForStackwalk(callEnd);
11257 if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE)
11259 if (frameIter.m_crawl.GetAppDomain())
11261 frameIter.m_crawl.GetAppDomain()->EnumMemoryRegions(flags, true);
11265 // To stackwalk through funceval frames, we need to be sure to preserve the
11266 // DebuggerModule's m_pRuntimeDomainFile. This is the only case that doesn't use the current
11267 // vmDomainFile in code:DacDbiInterfaceImpl::EnumerateInternalFrames. The following
11268 // code mimics that function.
11269 // Allow failure, since we want to continue attempting to walk the stack regardless of the outcome.
11272 if ((frameIter.GetFrameState() == StackFrameIterator::SFITER_FRAME_FUNCTION) ||
11273 (frameIter.GetFrameState() == StackFrameIterator::SFITER_SKIPPED_FRAME_FUNCTION))
11275 Frame * pFrame = frameIter.m_crawl.GetFrame();
11276 g_pDebugInterface->EnumMemoryRegionsIfFuncEvalFrame(flags, pFrame);
11279 EX_CATCH_RETHROW_ONLY_COR_E_OPERATIONCANCELLED
11281 MethodDesc* pMD = frameIter.m_crawl.GetFunction();
11284 pMD->EnumMemoryRegions(flags);
11285 #if defined(WIN64EXCEPTIONS) && defined(FEATURE_PREJIT)
11286 // Enumerate unwind info
11287 // Note that we don't do this based on the MethodDesc because in theory there isn't a 1:1 correspondence
11288 // between MethodDesc and code (and so unwind info, and even debug info). Eg., EnC creates new versions
11289 // of the code, but the MethodDesc always points at the latest version (which isn't necessarily
11290 // the one on the stack). In practice this is unlikely to be a problem since wanting a minidump
11291 // and making EnC edits are usually mutually exclusive.
11292 if (frameIter.m_crawl.IsFrameless())
11294 frameIter.m_crawl.GetJitManager()->EnumMemoryRegionsForMethodUnwindInfo(flags, frameIter.m_crawl.GetCodeInfo());
11296 #endif // defined(WIN64EXCEPTIONS) && defined(FEATURE_PREJIT)
11299 previousSP = currentSP;
11301 if (frameIter.Next() != SWA_CONTINUE)
11309 ThreadStore::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
11312 WRAPPER_NO_CONTRACT;
11314 // This will write out the context of the s_pThreadStore. ie
11315 // just the pointer
11317 s_pThreadStore.EnumMem();
11318 if (s_pThreadStore.IsValid())
11320 // write out the whole ThreadStore structure
11321 DacEnumHostDPtrMem(s_pThreadStore);
11323 // The thread list may be corrupt, so just
11324 // ignore exceptions during enumeration.
11327 Thread* thread = s_pThreadStore->m_ThreadList.GetHead();
11328 LONG dwNumThreads = s_pThreadStore->m_ThreadCount;
11330 for (LONG i = 0; (i < dwNumThreads) && (thread != NULL); i++)
11332 // Even if this thread is totally broken and we can't enum it, struggle on.
11333 // If we do not, we will leave this loop and not enum stack memory for any further threads.
11334 CATCH_ALL_EXCEPT_RETHROW_COR_E_OPERATIONCANCELLED(
11335 thread->EnumMemoryRegions(flags);
11337 thread = s_pThreadStore->m_ThreadList.GetNext(thread);
11340 EX_CATCH_RETHROW_ONLY_COR_E_OPERATIONCANCELLED
11344 #endif // #ifdef DACCESS_COMPILE
11347 #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING
11348 // For the purposes of tracking resource usage we implement a simple cpu resource usage counter on each
11349 // thread. Every time QueryThreadProcessorUsage() is invoked it returns the amount of cpu time (a combination
11350 // of user and kernel mode time) used since the last call to QueryThreadProcessorUsage(). The result is in 100
11351 // nanosecond units.
11352 ULONGLONG Thread::QueryThreadProcessorUsage()
11354 LIMITED_METHOD_CONTRACT;
11356 // Get current values for the amount of kernel and user time used by this thread over its entire lifetime.
11357 FILETIME sCreationTime, sExitTime, sKernelTime, sUserTime;
11358 HANDLE hThread = GetThreadHandle();
11359 BOOL fResult = GetThreadTimes(hThread,
11367 ULONG error = GetLastError();
11368 printf("GetThreadTimes failed: %d; handle is %p\n", error, hThread);
11374 // Combine the user and kernel times into a single value (FILETIME is just a structure representing an
11375 // unsigned int64 in two 32-bit pieces).
11376 _ASSERTE(sizeof(FILETIME) == sizeof(UINT64));
11377 ULONGLONG ullCurrentUsage = *(ULONGLONG*)&sKernelTime + *(ULONGLONG*)&sUserTime;
11379 // Store the current processor usage as the new baseline, and retrieve the previous usage.
11380 ULONGLONG ullPreviousUsage = VolatileLoad(&m_ullProcessorUsageBaseline);
11381 if (ullPreviousUsage >= ullCurrentUsage ||
11382 ullPreviousUsage != (ULONGLONG)InterlockedCompareExchange64(
11383 (LONGLONG*)&m_ullProcessorUsageBaseline,
11384 (LONGLONG)ullCurrentUsage,
11385 (LONGLONG)ullPreviousUsage))
11387 // another thread beat us to it, and already reported this usage.
11391 // The result is the difference between this value and the previous usage value.
11392 return ullCurrentUsage - ullPreviousUsage;
11394 #endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING