return CORDBG_E_FUNC_EVAL_BAD_START_POINT;
}
- if (MethodDescBackpatchInfoTracker::IsLockOwnedByAnyThread())
- {
- // A thread may have suspended for the debugger while holding the slot backpatching lock while trying to enter
- // cooperative GC mode. If the FuncEval calls a method that is eligible for slot backpatching (virtual or interface
- // methods that are eligible for tiering), the FuncEval may deadlock on trying to acquire the same lock. Fail the
- // FuncEval to avoid the issue.
- return CORDBG_E_FUNC_EVAL_BAD_START_POINT;
- }
-
// Create a DebuggerEval to hold info about this eval while its in progress. Constructor copies the thread's
// CONTEXT.
DebuggerEval *pDE = new (interopsafe, nothrow) DebuggerEval(filterContext, pEvalInfo, fInException);
{
CodeVersionManager *codeVersionManager = appDomain->GetCodeVersionManager();
- MethodDescBackpatchInfoTracker::PollForDebuggerSuspension();
- MethodDescBackpatchInfoTracker::ConditionalLockHolder slotBackpatchLockHolder;
+ MethodDescBackpatchInfoTracker::ConditionalLockHolderForGCCoop slotBackpatchLockHolder;
// Backpatching entry point slots requires cooperative GC mode, see
// MethodDescBackpatchInfoTracker::Backpatch_Locked(). The code version manager's table lock is an unsafe lock that
TieredCompilationManager *tieredCompilationManager = GetAppDomain()->GetTieredCompilationManager();
bool scheduleTieringBackgroundWork = false;
{
- MethodDescBackpatchInfoTracker::PollForDebuggerSuspension();
- MethodDescBackpatchInfoTracker::ConditionalLockHolder slotBackpatchLockHolder;
+ MethodDescBackpatchInfoTracker::ConditionalLockHolderForGCCoop slotBackpatchLockHolder;
ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_OTHER);
struct AutoRestartEE
do
{
bool mayHaveEntryPointSlotsToBackpatch = doPublish && pMethodDesc->MayHaveEntryPointSlotsToBackpatch();
- MethodDescBackpatchInfoTracker::ConditionalLockHolder slotBackpatchLockHolder(mayHaveEntryPointSlotsToBackpatch);
+ MethodDescBackpatchInfoTracker::ConditionalLockHolderForGCCoop slotBackpatchLockHolder(
+ mayHaveEntryPointSlotsToBackpatch);
// Try a faster check to see if we can avoid checking the currently active code version
// - For the default code version, if a profiler is attached it may be notified of JIT events and may request rejit
#endif // _DEBUG
+CrstBase::CrstAndForbidSuspendForDebuggerHolder::CrstAndForbidSuspendForDebuggerHolder(CrstBase *pCrst)
+ : m_pCrst(pCrst), m_pThreadForExitingForbidRegion(nullptr)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ if (pCrst == nullptr)
+ {
+ return;
+ }
+
+ // Reentrant locks are currently not supported
+ _ASSERTE((pCrst->m_dwFlags & CRST_REENTRANCY) == 0);
+
+ Thread *pThread = GetThread();
+ _ASSERTE(pThread != nullptr);
+ if (pThread->IsInForbidSuspendForDebuggerRegion())
+ {
+ AcquireLock(pCrst);
+ return;
+ }
+
+ while (true)
+ {
+ // Enter the forbid region and make that state change visible to other threads (memory barrier) before checking for the
+ // TS_DebugSuspendPending state. The other interacting thread in SysStartSuspendForDebug(), sets TS_DebugSuspendPending
+ // and makes that state change visible to other threads (memory barrier) before checking for whether this thread is in
+ // the forbid region. This ensures that in race conditions where both threads update the respective state and make the
+ // state change visible to other threads, at least one of those threads will see the state change made by the other
+ // thread. If this thread sees the state change (TS_DebugSuspendPending), it will avoid entering the forbid region by
+ // exiting the lock and pulsing the GC mode to try suspending for the debugger. If SysStartSuspendForDebug() sees the
+ // state change (that this thread is in the forbid region), then it will flag this thread appropriately to sync for
+ // suspend, and the debugger will later identify this thread as synced after this thread leaves the forbid region.
+ //
+ // The forbid region could be entered after acquiring the lock, but an additional memory barrier would be necessary. It
+ // is entered before the lock just to make use of the implicit memory barrier from acquiring the lock. It is anyway a
+ // prerequisite for entering a lock along with entering a forbid-suspend-for-debugger region, that the lock is not held
+ // for too long such that the thread can suspend for the debugger in reasonable time.
+ pThread->EnterForbidSuspendForDebuggerRegion();
+ AcquireLock(pCrst); // implicit full memory barrier on all supported platforms
+
+ // This can be an opportunistic check (instead of a volatile load), because if the GC mode change below does not suspend
+ // for the debugger (which is also possible with a volatile load), it will just loop around and try again if necessary
+ if (!pThread->HasThreadStateOpportunistic(Thread::TS_DebugSuspendPending))
+ {
+ m_pThreadForExitingForbidRegion = pThread;
+ return;
+ }
+
+ // Cannot enter the forbid region when a suspend for the debugger is pending because there are likely to be subsequent
+ // changes to the GC mode inside the lock, and this thread needs to suspend for the debugger in reasonable time. Exit
+ // the lock and pulse the GC mode to suspend for the debugger.
+ ReleaseLock(pCrst);
+ pThread->ExitForbidSuspendForDebuggerRegion();
+ GCX_COOP();
+ }
+}
+
+CrstBase::CrstAndForbidSuspendForDebuggerHolder::~CrstAndForbidSuspendForDebuggerHolder()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ if (m_pCrst == nullptr)
+ {
+ return;
+ }
+
+ ReleaseLock(m_pCrst);
+ if (m_pThreadForExitingForbidRegion != nullptr)
+ {
+ m_pThreadForExitingForbidRegion->ExitForbidSuspendForDebuggerRegion();
+ }
+}
+
#endif // !DACCESS_COMPILE
#ifdef TEST_DATA_CONSISTENCY
{
m_dwFlags = 0;
}
+
// ------------------------------- Holders ------------------------------
- public:
- //
- // CrstHolder is optimized for the common use that takes the lock in constructor
- // and releases it in destructor. Users that require all Holder features
- // can use CrstHolderWithState.
- //
+public:
+ //
+ // CrstHolder is optimized for the common use that takes the lock in constructor
+ // and releases it in destructor. Users that require all Holder features
+ // can use CrstHolderWithState.
+ //
class CrstHolder
{
CrstBase * m_pCrst;
// Generally, it's better to use a regular CrstHolder, and then use the Release() / Acquire() methods on it.
// This just exists to convert legacy OS Critical Section patterns over to holders.
typedef DacHolder<CrstBase *, CrstBase::ReleaseLock, CrstBase::AcquireLock, 0, CompareDefault> UnsafeCrstInverseHolder;
+
+ class CrstAndForbidSuspendForDebuggerHolder
+ {
+ private:
+ CrstBase *m_pCrst;
+ Thread *m_pThreadForExitingForbidRegion;
+
+ public:
+ CrstAndForbidSuspendForDebuggerHolder(CrstBase *pCrst);
+ ~CrstAndForbidSuspendForDebuggerHolder();
+ };
};
typedef CrstBase::CrstHolder CrstHolder;
typedef CrstBase::CrstHolderWithState CrstHolderWithState;
-
+typedef CrstBase::CrstAndForbidSuspendForDebuggerHolder CrstAndForbidSuspendForDebuggerHolder;
// The CRST.
class Crst : public CrstBase
if (setTargetAfterAddingToHashTable)
{
+ GCX_PREEMP();
+
_ASSERTE(pMD->IsVersionableWithVtableSlotBackpatch());
PCODE temporaryEntryPoint = pMD->GetTemporaryEntryPoint();
- MethodDescBackpatchInfoTracker::ConditionalLockHolder slotBackpatchLockHolder;
+ MethodDescBackpatchInfoTracker::ConditionalLockHolderForGCPreemp slotBackpatchLockHolder;
// Set the funcptr stub's entry point to the current entry point inside the lock and after the funcptr stub is exposed,
// to synchronize with backpatching in MethodDesc::BackpatchEntryPointSlots()
{
WRAPPER_NO_CONTRACT;
+ GCX_PREEMP();
+
LoaderAllocator *mdLoaderAllocator = GetLoaderAllocator();
- MethodDescBackpatchInfoTracker::ConditionalLockHolder slotBackpatchLockHolder;
+ MethodDescBackpatchInfoTracker::ConditionalLockHolderForGCCoop slotBackpatchLockHolder;
RecordAndBackpatchEntryPointSlot_Locked(
mdLoaderAllocator,
// MethodDescBackpatchInfoTracker
CrstStatic MethodDescBackpatchInfoTracker::s_lock;
-bool MethodDescBackpatchInfoTracker::s_isLocked = false;
#ifndef DACCESS_COMPILE
}
#endif // _DEBUG
-#ifndef DACCESS_COMPILE
-void MethodDescBackpatchInfoTracker::PollForDebuggerSuspension()
-{
- CONTRACTL
- {
- NOTHROW;
- GC_TRIGGERS;
- MODE_PREEMPTIVE;
- }
- CONTRACTL_END;
-
- _ASSERTE(!IsLockOwnedByCurrentThread());
-
- // If suspension is pending for the debugger, pulse the GC mode to suspend the thread here. Following this call, typically
- // the lock is acquired and the GC mode is changed, and suspending there would cause FuncEvals to fail (see
- // Debugger::FuncEvalSetup() at the reference to IsLockOwnedByAnyThread()). Since this thread is in preemptive mode, the
- // debugger may think it's already suspended and it would be unfortunate to suspend the thread with the lock held.
- Thread *thread = GetThread();
- _ASSERTE(thread != nullptr);
- if (thread->HasThreadState(Thread::TS_DebugSuspendPending))
- {
- GCX_COOP();
- }
-}
-#endif
-
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
{
private:
static CrstStatic s_lock;
- static bool s_isLocked;
class BackpatchInfoTrackerHashTraits : public NoRemoveDefaultCrossLoaderAllocatorHashTraits<MethodDesc *, UINT_PTR>
{
static bool IsLockOwnedByCurrentThread();
#endif
-#ifndef DACCESS_COMPILE
public:
- static bool IsLockOwnedByAnyThread()
+ // To be used when the thread will remain in preemptive GC mode while holding the lock
+ class ConditionalLockHolderForGCPreemp : private CrstHolderWithState
{
- LIMITED_METHOD_CONTRACT;
- return VolatileLoadWithoutBarrier(&s_isLocked);
- }
-
- static void PollForDebuggerSuspension();
-#endif
-
-public:
- class ConditionalLockHolder : private CrstHolderWithState
- {
- private:
- bool m_isLocked;
-
public:
- ConditionalLockHolder(bool acquireLock = true)
+ ConditionalLockHolderForGCPreemp(bool acquireLock = true)
: CrstHolderWithState(
#ifndef DACCESS_COMPILE
acquireLock ? &s_lock : nullptr
#else
nullptr
#endif
- ),
- m_isLocked(false)
+ )
{
- WRAPPER_NO_CONTRACT;
-
- #ifndef DACCESS_COMPILE
- if (acquireLock)
+ CONTRACTL
{
- _ASSERTE(IsLockOwnedByCurrentThread());
- _ASSERTE(!s_isLocked);
- m_isLocked = true;
- s_isLocked = true;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
}
- #endif
+ CONTRACTL_END;
}
- ~ConditionalLockHolder()
- {
- WRAPPER_NO_CONTRACT;
+ DISABLE_COPY(ConditionalLockHolderForGCPreemp);
+ };
- #ifndef DACCESS_COMPILE
- if (m_isLocked)
+#ifndef DACCESS_COMPILE
+public:
+ // To be used when the thread may enter cooperative GC mode while holding the lock. The thread enters a
+ // forbid-suspend-for-debugger region along with acquiring the lock, such that it would not suspend for the debugger while
+ // holding the lock, as that may otherwise cause a FuncEval to deadlock when trying to acquire the lock.
+ class ConditionalLockHolderForGCCoop : private CrstAndForbidSuspendForDebuggerHolder
+ {
+ public:
+ ConditionalLockHolderForGCCoop(bool acquireLock = true)
+ : CrstAndForbidSuspendForDebuggerHolder(acquireLock ? &s_lock : nullptr)
+ {
+ CONTRACTL
{
- _ASSERTE(IsLockOwnedByCurrentThread());
- _ASSERTE(s_isLocked);
- s_isLocked = false;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
}
- #endif
+ CONTRACTL_END;
}
- DISABLE_COPY(ConditionalLockHolder);
+ DISABLE_COPY(ConditionalLockHolderForGCCoop);
};
+#endif
public:
MethodDescBackpatchInfoTracker()
// Only take the lock if the method is versionable with vtable slot backpatch, for recording slots and synchronizing with
// backpatching slots
- MethodDescBackpatchInfoTracker::ConditionalLockHolder slotBackpatchLockHolder(isVersionableWithVtableSlotBackpatch);
+ MethodDescBackpatchInfoTracker::ConditionalLockHolderForGCCoop slotBackpatchLockHolder(
+ isVersionableWithVtableSlotBackpatch);
// Get the method entry point inside the lock above to synchronize with backpatching in
// MethodDesc::BackpatchEntryPointSlots()
SHash<CodeActivationBatchTraits>::Iterator endIter = mgrToCodeActivationBatch.End();
{
- MethodDescBackpatchInfoTracker::ConditionalLockHolder slotBackpatchLockHolder;
+ MethodDescBackpatchInfoTracker::ConditionalLockHolderForGCCoop slotBackpatchLockHolder;
for (SHash<CodeActivationBatchTraits>::Iterator iter = beginIter; iter != endIter; iter++)
{
m_DeserializationTracker = NULL;
m_currentPrepareCodeConfig = nullptr;
+ m_isInForbidSuspendForDebuggerRegion = false;
#ifdef _DEBUG
memset(dangerousObjRefs, 0, sizeof(dangerousObjRefs));
private:
PrepareCodeConfig *m_currentPrepareCodeConfig;
+
+#ifndef DACCESS_COMPILE
+public:
+ bool IsInForbidSuspendForDebuggerRegion() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_isInForbidSuspendForDebuggerRegion;
+ }
+
+ void EnterForbidSuspendForDebuggerRegion();
+ void ExitForbidSuspendForDebuggerRegion();
+#endif
+
+private:
+ bool m_isInForbidSuspendForDebuggerRegion;
};
// End of class Thread
config->SetNextInSameThread(nullptr);
}
+inline void Thread::EnterForbidSuspendForDebuggerRegion()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(this == GetThread());
+
+ _ASSERTE(!m_isInForbidSuspendForDebuggerRegion);
+ m_isInForbidSuspendForDebuggerRegion = true;
+}
+
+inline void Thread::ExitForbidSuspendForDebuggerRegion()
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(this == GetThread());
+
+ _ASSERTE(m_isInForbidSuspendForDebuggerRegion);
+ m_isInForbidSuspendForDebuggerRegion = false;
+}
+
#ifdef HOST_WINDOWS
inline size_t Thread::GetOffsetOfThreadStatic(void* pThreadStatic)
{
// waiting for GC
_ASSERTE ((m_StateNC & Thread::TSNC_OwnsSpinLock) == 0);
+ _ASSERTE(!MethodDescBackpatchInfoTracker::IsLockOwnedByCurrentThread() || IsInForbidSuspendForDebuggerRegion());
+
if (!GCHeapUtilities::IsGCHeapInitialized())
{
goto Exit;
// holding a spin lock in coop mode and transit to preemp mode will cause deadlock on GC
_ASSERTE ((m_StateNC & Thread::TSNC_OwnsSpinLock) == 0);
+ _ASSERTE(!MethodDescBackpatchInfoTracker::IsLockOwnedByCurrentThread() || IsInForbidSuspendForDebuggerRegion());
+
FastInterlockOr (&m_fPreemptiveGCDisabled, 0);
#if defined(STRESS_HEAP) && defined(_DEBUG)
// for GC, the fact that we are leaving the EE means that it no longer needs to
// suspend us. But if we are doing a non-GC suspend, we need to block now.
// Give the debugger precedence over user suspensions:
- while (m_State & TS_DebugSuspendPending)
+ while ((m_State & TS_DebugSuspendPending) && !IsInForbidSuspendForDebuggerRegion())
{
#ifdef DEBUGGING_SUPPORTED
// and the moment we enable TrapReturningThreads in MarkForSuspension. However,
// nothing bad happens if the thread has transitioned to preemptive before marking
// the thread for suspension; the thread will later be identified as Synced in
- // SysSweepThreadsForDebug
+ // SysSweepThreadsForDebug.
+ //
+ // If the thread transitions to preemptive mode and into a forbid-suspend-for-debugger
+ // region, SysSweepThreadsForDebug would similarly identify the thread as synced
+ // after it leaves the forbid region.
#else // DISABLE_THREADSUSPEND
// Resume the thread and let it run to a safe point
thread->ResumeThread();
// they attempt to re-enter they will trip.
thread->MarkForSuspension(TS_DebugSuspendPending);
+ if (
#ifdef DISABLE_THREADSUSPEND
- // There'a a race above between the moment we first check m_fPreemptiveGCDisabled
- // and the moment we enable TrapReturningThreads in MarkForSuspension. To account
- // for that we check whether the thread moved into cooperative mode, and if it had
- // we mark it as a DebugWillSync thread, that will be handled later in
- // SysSweepThreadsForDebug
- if (thread->m_fPreemptiveGCDisabled)
+ // There'a a race above between the moment we first check m_fPreemptiveGCDisabled
+ // and the moment we enable TrapReturningThreads in MarkForSuspension. To account
+ // for that we check whether the thread moved into cooperative mode, and if it had
+ // we mark it as a DebugWillSync thread, that will be handled later in
+ // SysSweepThreadsForDebug.
+ thread->m_fPreemptiveGCDisabled ||
+#endif // DISABLE_THREADSUSPEND
+ // The thread may have been suspended in a forbid-suspend-for-debugger region, or
+ // before the state change to set TS_DebugSuspendPending is made visible to other
+ // threads, the thread may have transitioned into a forbid region. In either case,
+ // flag the thread as TS_DebugWillSync and let SysSweepThreadsForDebug later
+ // identify the thread as synced after the thread leaves the forbid region.
+ thread->IsInForbidSuspendForDebuggerRegion())
{
// Remember that this thread will be running to a safe point
FastInterlockIncrement(&m_DebugWillSyncCount);
thread->SetThreadState(TS_DebugWillSync);
}
-#else // DISABLE_THREADSUSPEND
+
+#ifndef DISABLE_THREADSUSPEND
if (str == STR_Success) {
thread->ResumeThread();
}
-#endif // DISABLE_THREADSUSPEND
+#endif // !DISABLE_THREADSUSPEND
LOG((LF_CORDB, LL_INFO1000,
"[0x%x] SUSPEND: gc enabled.\n", thread->GetThreadId()));
// this thread will happen after any earlier writes on a different
// thread.
FastInterlockOr(&thread->m_fPreemptiveGCDisabled, 0);
- if (!thread->m_fPreemptiveGCDisabled)
+ if (!thread->m_fPreemptiveGCDisabled && !thread->IsInForbidSuspendForDebuggerRegion())
{
- // If the thread toggled to preemptive mode, then it's synced.
+ // If the thread toggled to preemptive mode and is not in a
+ // forbid-suspend-for-debugger region, then it's synced.
goto Label_MarkThreadAsSynced;
}
else
else if (str == STR_SwitchedOut)
{
// The thread was switched b/c of fiber-mode stuff.
- if (!thread->m_fPreemptiveGCDisabled)
+ if (!thread->m_fPreemptiveGCDisabled && !thread->IsInForbidSuspendForDebuggerRegion())
{
goto Label_MarkThreadAsSynced;
}
}
else if (!thread->m_fPreemptiveGCDisabled)
{
- // If the thread toggled to preemptive mode, then it's synced.
+ // If the thread toggled to preemptive mode and is not in a
+ // forbid-suspend-for-debugger region, then it's synced.
// We can safely resume the thread here b/c it's in PreemptiveMode and the
// EE will trap anybody trying to re-enter cooperative. So letting it run free
// won't hurt the runtime.
+ //
+ // If the thread is in a forbid-suspend-for-debugger region, the thread needs to
+ // be resumed to give it a chance to leave the forbid region. The EE will also
+ // trap the thread if it tries to re-enter a forbid region.
_ASSERTE(str == STR_Success);
thread->ResumeThread();
- goto Label_MarkThreadAsSynced;
+ if (!thread->IsInForbidSuspendForDebuggerRegion())
+ {
+ goto Label_MarkThreadAsSynced;
+ }
+ else
+ {
+ continue;
+ }
}
#if defined(FEATURE_HIJACK) && !defined(TARGET_UNIX)
// If the thread is in jitted code, HandledJitCase will try to hijack it; and the hijack
COUNT_T methodCount = methodsPendingCounting->GetCount();
CodeVersionManager *codeVersionManager = GetAppDomain()->GetCodeVersionManager();
- MethodDescBackpatchInfoTracker::PollForDebuggerSuspension();
- MethodDescBackpatchInfoTracker::ConditionalLockHolder slotBackpatchLockHolder;
+ MethodDescBackpatchInfoTracker::ConditionalLockHolderForGCCoop slotBackpatchLockHolder;
// Backpatching entry point slots requires cooperative GC mode, see
// MethodDescBackpatchInfoTracker::Backpatch_Locked(). The code version manager's table lock is an unsafe lock that
HRESULT hr = S_OK;
{
bool mayHaveEntryPointSlotsToBackpatch = pMethod->MayHaveEntryPointSlotsToBackpatch();
- if (mayHaveEntryPointSlotsToBackpatch)
- {
- MethodDescBackpatchInfoTracker::PollForDebuggerSuspension();
- }
- MethodDescBackpatchInfoTracker::ConditionalLockHolder slotBackpatchLockHolder(mayHaveEntryPointSlotsToBackpatch);
+ MethodDescBackpatchInfoTracker::ConditionalLockHolderForGCCoop slotBackpatchLockHolder(
+ mayHaveEntryPointSlotsToBackpatch);
// Backpatching entry point slots requires cooperative GC mode, see
// MethodDescBackpatchInfoTracker::Backpatch_Locked(). The code version manager's table lock is an unsafe lock that
TADDR slot = holder->stub()->implTargetSlot(&slotType);
pMD->RecordAndBackpatchEntryPointSlot(m_loaderAllocator, slot, slotType);
- // RecordAndBackpatchEntryPointSlot() takes a lock that would exit and reenter cooperative GC mode
+ // RecordAndBackpatchEntryPointSlot() may exit and reenter cooperative GC mode
*pMayHaveReenteredCooperativeGCMode = true;
}
#endif
TADDR slot = holder->stub()->implTargetSlot(&slotType);
pMD->RecordAndBackpatchEntryPointSlot(m_loaderAllocator, slot, slotType);
- // RecordAndBackpatchEntryPointSlot() takes a lock that would exit and reenter cooperative GC mode
+ // RecordAndBackpatchEntryPointSlot() may exit and reenter cooperative GC mode
*pMayHaveReenteredCooperativeGCMode = true;
}
#endif
(TADDR)&e->target,
EntryPointSlots::SlotType_Normal);
- // RecordAndBackpatchEntryPointSlot() takes a lock that would exit and reenter cooperative GC mode
+ // RecordAndBackpatchEntryPointSlot() may exit and reenter cooperative GC mode
*pMayHaveReenteredCooperativeGCMode = true;
}
#endif