BOOL QCALLTYPE FooNative::Bar(int flags, LPCWSTR wszString, QCall::StringHandleOnStack retString)
{
// All QCalls should have QCALL_CONTRACT.
- // It is alias for THROWS; GC_TRIGGERS; MODE_PREEMPTIVE; SO_TOLERANT.
+ // It is alias for THROWS; GC_TRIGGERS; MODE_PREEMPTIVE.
QCALL_CONTRACT;
// Optionally, use QCALL_CHECK instead and the expanded form of the contract
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
PROFILER_TO_CLR_ENTRYPOINT_SYNC((LF_CORPROF,
* [2.10.1.5 LOADS_TYPE(loadlevel)](#2.10.1.5)
* [2.10.1.6 CAN_TAKE_LOCK / CANNOT_TAKE_LOCK](#2.10.1.6)
* [2.10.1.7 EE_THREAD_REQUIRED / EE_THREAD_NOT_REQUIRED](#2.10.1.7)
- * [2.10.1.8 SO_TOLERANT/SO_INTOLERANT](#2.10.1.8)
- * [2.10.1.9 PRECONDITION(expr)](#2.10.1.9)
- * [2.10.1.10 POSTCONDITION(expr)](#2.10.1.10)
+ * [2.10.1.8 PRECONDITION(expr)](#2.10.1.8)
+ * [2.10.1.9 POSTCONDITION(expr)](#2.10.1.9)
* [2.10.2 Is order important?](#2.10.2)
* [2.10.3 Using the right form of contract](#2.10.3)
* [2.10.4 When is it safe to use a runtime contract?](#2.10.4)
If the latter is true, it's generally best to push BEGIN/END_GETTHREAD_ALLOWED down the callee chain so all callers benefit.
-#### <a name="2.10.1.8"/>2.10.1.8 SO_TOLERANT/SO_INTOLERANT
-
-These are related to stack probes. SO_TOLERANT means the function is written in such a way that it is safe to throw a StackOverflow exception between any two instructions. It doesn't update global state, doesn't modify data structures, and doesn't call out to the operating system.
-
-If you don't specify SO_TOLERANT, the function is treated as SO_INTOLERANT.
-
-The CLR asserts if you invoke an SO_INTOLERANT function outside the scope of a stack probe. The probe's purpose is to check in advance if sufficient stack is available and trigger the SO exception before venturing into SO_INTOLERANT code.
-
-#### <a name="2.10.1.9"/>2.10.1.9 PRECONDITION(_expr_)
+#### <a name="2.10.1.8"/>2.10.1.8 PRECONDITION(_expr_)
This is pretty self-explanatory. It is basically an **_ASSERTE.** Both _ASSERTE's and PRECONDITIONS are used widely in the codebase. The expression can evaluate to either a Boolean or a Check.
-#### <a name="2.10.1.10"/>2.10.1.10 POSTCONDITION(_expr_)
+#### <a name="2.10.1.9"/>2.10.1.9 POSTCONDITION(_expr_)
This is an expression that's tested on a _normal_ function exit. It will not be tested if an exception is thrown out of the function. Postconditions can access the function's locals provided that the locals were declared at the top level scope of the function. C++ objects will not have been destructed yet.
`MscorsnLogging` | Enables strong name logging | `DWORD` | `INTERNAL` | `0` | REGUTIL_default
`NativeImageRequire` | | `DWORD` | `EXTERNAL` | `0` | REGUTIL_default
`NestedEhOom` | | `DWORD` | `INTERNAL` | `0` | REGUTIL_default
-`NO_SO_NOT_MAINLINE` | | `DWORD` | `EXTERNAL` | `0` | REGUTIL_default
`NoGuiOnAssert` | | `DWORD` | `INTERNAL` | `INTERNAL_NoGuiOnAssert_Default` | REGUTIL_default
`NoProcedureSplitting` | | `DWORD` | `EXTERNAL` | `0` | REGUTIL_default
`NoStringInterning` | Disallows string interning. I see no value in it anymore. | `DWORD` | `INTERNAL` | `1` | REGUTIL_default
// ==--==
#include "strike.h"
#include "util.h"
-#include "genericstackprobe.h"
/**********************************************************************\
* Routine Description: *
GC_NOTRIGGER;
THROWS;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
MethodTable* pElemMT)
{
STATIC_CONTRACT_MODE_COOPERATIVE;
- STATIC_CONTRACT_SO_INTOLERANT;
// Ensure that the array element type is fully loaded before executing its code
pElemMT->EnsureInstanceActive();
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
PRECONDITION(pSrc != NULL);
PRECONDITION(pDest != NULL);
}
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
PRECONDITION(pSrc != NULL);
PRECONDITION(srcIndex >= 0);
PRECONDITION(pDest != NULL);
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_TOLERANT;
}CONTRACTL_END;
struct
// skip this, if required.
if (IsWatsonEnabled())
{
- BEGIN_SO_INTOLERANT_CODE(pThread);
if ((gc.refExceptionForWatsonBucketing == NULL) || !SetupWatsonBucketsForFailFast(gc.refExceptionForWatsonBucketing))
{
PTR_EHWatsonBucketTracker pUEWatsonBucketTracker = pThread->GetExceptionState()->GetUEWatsonBucketTracker();
pUEWatsonBucketTracker->ClearWatsonBucketDetails();
}
}
- END_SO_INTOLERANT_CODE;
}
#endif // !FEATURE_PAL
{
CONTRACTL
{
- SO_INTOLERANT;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pCodeInfo));
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
CONSTRUCTOR_CHECK;
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
DESTRUCTOR_CHECK;
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS; // from GetJitInfo
GC_NOTRIGGER;
MODE_ANY; // don't really care what mode we're in.
{
CONTRACTL
{
- SO_NOT_MAINLINE; // take Controller lock.
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS; // from GetJitInfo
GC_NOTRIGGER;
MODE_ANY; // don't really care what mode we're in.
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS; // from GetJitInfo
GC_NOTRIGGER;
MODE_ANY; // don't really care what mode we're in.
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS;
GC_NOTRIGGER;
MODE_ANY; // don't really care what mode we're in.
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
// @todo - should this throw or not?
NOTHROW;
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
// This can only modify controller's internal state. Can't send managed debug events.
CONTRACTL
{
- SO_NOT_MAINLINE;
GC_NOTRIGGER;
NOTHROW;
MODE_ANY;
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER; // don't send IPC events
MODE_COOPERATIVE; // TriggerUnwind always is coop
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
SENDEVENT_CONTRACT_ITEMS;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
GC_TRIGGERS;
NOTHROW;
}
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
// If this exception is for the debugger, then we may trigger a GC.
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
// Patch skippers only operate on patches set in managed code. But the infrastructure may have
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
SENDEVENT_CONTRACT_ITEMS;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
WRAPPER(THROWS);
GC_NOTRIGGER;
PRECONDITION(ThisIsHelperThreadWorker()); // Only help initializes a stepper.
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS; // from GetJitInfo
GC_NOTRIGGER; // don't send IPC events
MODE_COOPERATIVE; // TriggerUnwind always is coop
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
SENDEVENT_CONTRACT_ITEMS;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
SENDEVENT_CONTRACT_ITEMS;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
SENDEVENT_CONTRACT_ITEMS;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS;
SENDEVENT_CONTRACT_ITEMS;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
SENDEVENT_CONTRACT_ITEMS;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
SENDEVENT_CONTRACT_ITEMS;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
}
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
WRAPPER(THROWS);
WRAPPER(GC_TRIGGERS);
CONSTRUCTOR_CHECK;
NOTHROW;
GC_NOTRIGGER;
DESTRUCTOR_CHECK;
- SO_INTOLERANT;
}
CONTRACTL_END;
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
THROWS;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
THROWS;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
THROWS;
GC_TRIGGERS;
}
{
CONTRACTL
{
- SO_INTOLERANT;
THROWS;
GC_TRIGGERS;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS;
GC_TRIGGERS;
}
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(ThisMaybeHelperThread());
{
CONTRACTL
{
- SO_INTOLERANT;
THROWS;
GC_NOTRIGGER;
PRECONDITION(ThreadHoldsLock()); // ensure we're serialized, requires GC_NOTRIGGER
{
CONTRACTL
{
- SO_INTOLERANT;
THROWS;
GC_NOTRIGGER;
PRECONDITION(ThisMaybeHelperThread());
{
CONTRACTL
{
- SO_INTOLERANT;
THROWS;
GC_NOTRIGGER;
PRECONDITION(ThisMaybeHelperThread());
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_TRIGGERS;
PRECONDITION(fp != NULL);
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
THROWS;
GC_NOTRIGGER;
CONTRACTL
{
- SO_INTOLERANT;
THROWS;
PRECONDITION(!HasDebuggerDataLock());
PRECONDITION(newAddress != NULL);
{
CONTRACTL
{
- SO_INTOLERANT;
THROWS;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
THROWS;
GC_NOTRIGGER;
PRECONDITION(!g_pDebugger->HasDebuggerDataLock());
{
CONTRACTL
{
- SO_INTOLERANT;
SUPPORTS_DAC;
THROWS;
GC_NOTRIGGER;
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS;
GC_TRIGGERS_FROM_GETJITINFO;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
#ifndef DACCESS_COMPILE
CONTRACTL
{
- SO_INTOLERANT;
THROWS;
GC_TRIGGERS;
}
#ifndef DACCESS_COMPILE
CONTRACTL
{
- SO_INTOLERANT;
THROWS;
GC_TRIGGERS_FROM_GETJITINFO;
PRECONDITION(!ThisIsHelperThreadWorker());
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(thread));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(dji));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(dji));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pSig));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
// @todo - convert this to throwing w/ holders. It will be cleaner.
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(rgpVCs));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pCtx));
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS;
CALLED_IN_DEBUGGERDATALOCK_HOLDER_SCOPE_MAY_GC_TRIGGERS_CONTRACT;
PRECONDITION(!djiNew || djiNew->m_nativeCodeVersion.GetMethodDesc() == fd);
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS;
CALLED_IN_DEBUGGERDATALOCK_HOLDER_SCOPE_MAY_GC_TRIGGERS_CONTRACT;
PRECONDITION(djiTo != NULL);
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(ThreadHoldsLock());
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS;
GC_NOTRIGGER;
MODE_ANY;
CONTRACTL
{
- SO_TOLERANT;
NOTHROW;
// No clear GC_triggers semantics here. See DispatchNativeException.
{
THROWS;
GC_NOTRIGGER;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pModule));
}
CONTRACTL_END;
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(ThisIsHelperThreadWorker());
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_TRIGGERS; // from sending managed event
STATIC_CONTRACT_MODE_PREEMPTIVE; // we're in umanaged code.
- SO_NOT_MAINLINE_FUNCTION;
-
LOG((LF_CORDB, LL_INFO1000, "D::M2UHHW: Context=0x%p exception record=0x%p\n",
pContext, pExceptionRecord));
{
MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
- SO_INTOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
if (GetThread()) { GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
- SO_INTOLERANT;
}
CONTRACTL_END;
{
THROWS;
MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
- SO_NOT_MAINLINE;
PRECONDITION(ThisIsHelperThread());
MODE_ANY;
{
THROWS;
MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
- SO_NOT_MAINLINE;
PRECONDITION(ThisIsHelperThread());
MODE_ANY;
{
THROWS;
GC_NOTRIGGER;
- SO_INTOLERANT;
PRECONDITION(CheckPointer(m_pAppDomainCB));
}
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
{
THROWS;
GC_NOTRIGGER;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
PRECONDITION(ThisMaybeHelperThread());
}
CONTRACTL_END;
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS;
WRAPPER(GC_TRIGGERS);
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
}
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
MODE_COOPERATIVE;
GC_TRIGGERS;
THROWS;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(pDE));
}
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(NULL != pfAccurate);
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(map != NULL);
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(ThisMaybeHelperThread());
{
CONTRACTL
{
- SO_INTOLERANT;
THROWS;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(this));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
DESTRUCTOR_CHECK;
{
CONTRACTL
{
- SO_INTOLERANT;
WRAPPER(THROWS);
WRAPPER(GC_TRIGGERS);
CONSTRUCTOR_CHECK;
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
SUPPORTS_DAC;
NOTHROW;
GC_NOTRIGGER;
{
CONTRACTL
{
- SO_INTOLERANT;
THROWS;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
THROWS;
GC_NOTRIGGER;
PRECONDITION(!g_pDebugger->HasDebuggerDataLock());
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL
{
- SO_INTOLERANT;
THROWS;
CALLED_IN_DEBUGGERDATALOCK_HOLDER_SCOPE_MAY_GC_TRIGGERS_CONTRACT;
PRECONDITION(!g_pDebugger->HasDebuggerDataLock());
#include <hosting.h>
#include "eemessagebox.h"
-#include "genericstackprobe.h"
#ifndef SM_REMOTESESSION
#define SM_REMOTESESSION 0x1000
{
CONTRACTL
{
- SO_INTOLERANT;
WRAPPER(THROWS);
GC_NOTRIGGER;
CONSTRUCTOR_CHECK;
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
DESTRUCTOR_CHECK;
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
THROWS;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
THROWS;
GC_NOTRIGGER;
PRECONDITION(!ThisIsHelperThreadWorker()); // initialized by main thread
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_TRIGGERS; // Debugger::SuspendComplete can trigger GC
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
if (g_pEEInterface->GetThread() != NULL) { GC_TRIGGERS; } else { GC_NOTRIGGER; }
PRECONDITION(ThisIsHelperThreadWorker());
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
PRECONDITION(m_thread != NULL);
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
// We just wrap the instance method DebuggerRCThread::ThreadProc
WRAPPER_NO_CONTRACT;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD_FORCE_SO();
-
ClrFlsSetThreadType(ThreadType_DbgHelper);
LOG((LF_CORDB, LL_EVERYTHING, "ThreadProcStatic called\n"));
DebuggerRCThread* t = (DebuggerRCThread*)g_pRCThread;
t->ThreadProc(); // this thread is local, go and become the helper
-
- END_SO_INTOLERANT_CODE;
return 0;
}
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER; // duh, we're in preemptive..
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_TRIGGERS;
#include "corsym.h"
#include "palclr.h"
#include "cor.h"
-#include "genericstackprobe.h"
// I'm not sure why this code uses these macros for memory management (they should at least be
// in-line functions). DELETE is a symbol defined in WinNt.h as an access-type. We're probably
pDocument->SetDocumentWriter(sdw);
// stack check needed to call back into utilcode
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD_FORCE_SO();
hr = m_pStringPool->AddStringW(wcsUrl, (UINT32 *)&UrlEntry);
- END_SO_INTOLERANT_CODE;
IfFailGo(hr);
pDocument->SetUrlEntry(UrlEntry);
ULONG32 sigLen;
sigLen = cSig;
- // stack check needed to call back into utilcode
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD_FORCE_SO();
// Copy the name.
hr = m_pStringPool->AddStringW(name, (UINT32 *)&NameEntry);
- END_SO_INTOLERANT_CODE;
IfFailGo(hr);
var->SetName(NameEntry);
var->SetSequence(sequence);
- // stack check needed to call back into utilcode
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD_FORCE_SO();
// Copy the name.
hr = m_pStringPool->AddStringW(name, (UINT32 *)&NameEntry);
- END_SO_INTOLERANT_CODE;
IfFailGo(hr);
var->SetName(NameEntry);
// the stringpool
if (V_VT(&value) == VT_BSTR)
{
- // stack check needed to call back into utilcode
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD_FORCE_SO();
// Copy the bstrValue.
hr = m_pStringPool->AddStringW(V_BSTR(&value), (UINT32 *)&ValueBstr);
- END_SO_INTOLERANT_CODE;
IfFailGo(hr);
V_BSTR(&value) = NULL;
}
con->SetValue(value, ValueBstr);
- // stack check needed to call back into utilcode
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD_FORCE_SO();
// Copy the name.
hr = m_pStringPool->AddStringW(name, (UINT32 *)&Name);
- END_SO_INTOLERANT_CODE;
IfFailGo(hr);
con->SetName(Name);
SymUsingNamespace *use;
IfNullGo( use = m_MethodInfo.m_usings.next());
- // stack check needed to call back into utilcode
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD_FORCE_SO();
// Copy the name.
hr = m_pStringPool->AddStringW(fullName, (UINT32 *)&Name);
- END_SO_INTOLERANT_CODE;
IfFailGo(hr);
use->SetName(Name);
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
PRECONDITION(CheckPointer(ppv, NULL_OK));
} CONTRACTL_END;
STDMETHODIMP_(ULONG) ComCallUnmarshal::AddRef(void)
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return 2;
}
STDMETHODIMP_(ULONG) ComCallUnmarshal::Release(void)
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return 1;
}
LPCLSID pclsid)
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
// Marshal side only.
_ASSERTE(FALSE);
return E_NOTIMPL;
ULONG * pSize)
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
// Marshal side only.
_ASSERTE(FALSE);
return E_NOTIMPL;
ULONG mshlflags)
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
// Marshal side only.
_ASSERTE(FALSE);
return E_NOTIMPL;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;;
STATIC_CONTRACT_MODE_PREEMPTIVE;
PRECONDITION(CheckPointer(pStm));
PRECONDITION(CheckPointer(ppvObj));
ULONG mshlflags;
HRESULT hr = E_FAIL;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
// The marshal code added a reference to the object, but we return a
// reference to the object as well, so don't change the ref count on the
// success path. Need to release on error paths though (if we manage to
hr = pOldUnk->QueryInterface(riid, ppvObj);
}
ErrExit:
- ;
- END_SO_INTOLERANT_CODE;
return hr;
}
NOTHROW;
GC_NOTRIGGER;
STATIC_CONTRACT_MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pStm));
} CONTRACTL_END;
if (!pStm)
return E_POINTER;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
-
// Read the raw IP out of the marshalling stream. Do this first since we
// need to update the stream pointer even in case of failures.
hr = pStm->Read (&pUnk, sizeof (pUnk), &bytesRead);
pUnk->Release ();
ErrExit:
- ;
- END_SO_INTOLERANT_CODE;
return hr;
}
STDMETHODIMP ComCallUnmarshal::DisconnectObject (ULONG dwReserved)
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
// Nothing we can (or need to) do here. The client is using a raw IP to
// access this server, so the server shouldn't go away until the client
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
PRECONDITION(CheckPointer(ppv));
} CONTRACTL_END;
STDMETHODIMP_(ULONG) CComCallUnmarshalFactory::AddRef(void)
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
-
return 2;
}
STDMETHODIMP_(ULONG) CComCallUnmarshalFactory::Release(void)
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
-
return 1;
}
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
PRECONDITION(CheckPointer(ppv));
} CONTRACTL_END;
STDMETHODIMP CComCallUnmarshalFactory::LockServer(BOOL fLock)
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
-
return S_OK;
}
HRESULT ICeeFileGen::GenerateCeeFile (HCEEFILE ceeFile)
{
- SO_NOT_MAINLINE_FUNCTION;
-
TESTANDRETURNPOINTER(ceeFile);
CeeFileGenWriter *gen = reinterpret_cast<CeeFileGenWriter*>(ceeFile);
#define STATIC_CONTRACT_DEBUG_ONLY
#define STATIC_CONTRACT_NOTHROW
#define STATIC_CONTRACT_CAN_TAKE_LOCK
-#define STATIC_CONTRACT_SO_TOLERANT
#define STATIC_CONTRACT_GC_NOTRIGGER
#define STATIC_CONTRACT_MODE_COOPERATIVE
#define CONTRACTL
#define INSTANCE_CHECK
#define MODE_COOPERATIVE
#define MODE_ANY
-#define SO_INTOLERANT
-#define SO_TOLERANT
#define GC_TRIGGERS
#define GC_NOTRIGGER
#define CAN_TAKE_LOCK
// returns TRUE if the pointer is in one of the GC heaps.
bool GCHeap::IsHeapPointer (void* vpObject, bool small_heap_only)
{
- STATIC_CONTRACT_SO_TOLERANT;
-
// removed STATIC_CONTRACT_CAN_TAKE_LOCK here because find_segment
// no longer calls GCEvent::Wait which eventually takes a lock.
{
MODE_ANY;
}
- SO_INTOLERANT;
}
CONTRACTL_END;
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_MODE_COOPERATIVE;
STATIC_CONTRACT_DEBUG_ONLY;
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_MODE_COOPERATIVE;
STATIC_CONTRACT_DEBUG_ONLY;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
CAN_TAKE_LOCK; // because of TableFreeSingleHandleToCache
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_MODE_COOPERATIVE;
#if !defined(DACCESS_COMPILE) && defined(FEATURE_EVENT_TRACE)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_MODE_COOPERATIVE;
// unwrap the objectref we were given
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_COOPERATIVE;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_COOPERATIVE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
CAN_TAKE_LOCK; // because of TableCacheMissOnFree
}
CONTRACTL_END;
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_MODE_COOPERATIVE;
// fetch the table segment we are working in
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_MODE_COOPERATIVE;
UNREFERENCED_PARAMETER(pExtraInfo);
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_COOPERATIVE;
}
CONTRACTL_END;
g_pszExeFile = lpCmdLine[0];
#endif
- // ildasm does not need to be SO-robust.
- SO_NOT_MAINLINE_FUNCTION;
-
// SWI has requested that the exact form of the function call below be used. For details see http://swi/SWI%20Docs/Detecting%20Heap%20Corruption.doc
(void)HeapSetInformation(NULL, HeapEnableTerminationOnCorruption, NULL, 0);
void Set(DWORD index, PTR_VOID element)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_INTOLERANT;
*GetPtr(index) = element;
}
void Init()
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_INTOLERANT;
m_count = 0;
m_firstBlock.m_next = NULL;
void Destroy()
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_INTOLERANT;
Clear();
}
static Iterator Create(ArrayListBlock* block, DWORD remaining)
{
LIMITED_METHOD_DAC_CONTRACT;
- STATIC_CONTRACT_SO_INTOLERANT;
Iterator i;
i.m_block = block;
i.m_index = (DWORD) -1;
Iterator Iterate()
{
- STATIC_CONTRACT_SO_INTOLERANT;
WRAPPER_NO_CONTRACT;
return Iterator::Create((ArrayListBlock*)&m_firstBlock, m_count);
}
ConstIterator Iterate() const
{
- STATIC_CONTRACT_SO_INTOLERANT;
-
// Const cast is safe because ConstIterator does not expose any way to modify the block
ArrayListBlock *pFirstBlock = const_cast<ArrayListBlock *>(reinterpret_cast<const ArrayListBlock *>(&m_firstBlock));
return ConstIterator(pFirstBlock, m_count);
#ifndef DACCESS_COMPILE
ArrayList()
{
- STATIC_CONTRACT_SO_INTOLERANT;
WRAPPER_NO_CONTRACT;
Init();
}
~ArrayList()
{
- STATIC_CONTRACT_SO_INTOLERANT;
WRAPPER_NO_CONTRACT;
Destroy();
}
CONFIG_DWORD_INFO_EX(INTERNAL_MscorsnLogging, W("MscorsnLogging"), 0, "Enables strong name logging", CLRConfig::REGUTIL_default)
RETAIL_CONFIG_DWORD_INFO_EX(EXTERNAL_NativeImageRequire, W("NativeImageRequire"), 0, "", CLRConfig::REGUTIL_default)
CONFIG_DWORD_INFO_EX(INTERNAL_NestedEhOom, W("NestedEhOom"), 0, "", CLRConfig::REGUTIL_default)
-RETAIL_CONFIG_DWORD_INFO_EX(EXTERNAL_NO_SO_NOT_MAINLINE, W("NO_SO_NOT_MAINLINE"), 0, "", CLRConfig::REGUTIL_default)
#define INTERNAL_NoGuiOnAssert_Default 1
RETAIL_CONFIG_DWORD_INFO_EX(INTERNAL_NoGuiOnAssert, W("NoGuiOnAssert"), INTERNAL_NoGuiOnAssert_Default, "", CLRConfig::REGUTIL_default)
RETAIL_CONFIG_DWORD_INFO_EX(EXTERNAL_NoProcedureSplitting, W("NoProcedureSplitting"), 0, "", CLRConfig::REGUTIL_default)
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_ANY;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
- STATIC_CONTRACT_SO_TOLERANT;
_ASSERTE(increment != 0);
{
BEGIN_PRESERVE_LAST_ERROR;
- ANNOTATION_VIOLATION(SOToleranceViolation);
-
IExecutionEngine * pEngine = GetExecutionEngine();
value = (size_t) pEngine->TLS_GetValue(slot);
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_ANY;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
- STATIC_CONTRACT_SO_TOLERANT;
void **block = (*__ClrFlsGetBlock)();
if (block != NULL)
}
else
{
- ANNOTATION_VIOLATION(SOToleranceViolation);
-
void * value = GetExecutionEngine()->TLS_GetValue(slot);
return value;
}
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_ANY;
- STATIC_CONTRACT_SO_TOLERANT;
#ifdef _DEBUG
*pValue = ULongToPtr(0xcccccccc);
}
else
{
- ANNOTATION_VIOLATION(SOToleranceViolation);
BOOL result = GetExecutionEngine()->TLS_CheckValue(slot, pValue);
return result;
}
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_ANY;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
- STATIC_CONTRACT_SO_TOLERANT;
- void **block = (*__ClrFlsGetBlock)();
- if (block != NULL)
+ void **block = (*__ClrFlsGetBlock)();
+ if (block != NULL)
{
block[slot] = pData;
}
{
BEGIN_PRESERVE_LAST_ERROR;
- ANNOTATION_VIOLATION(SOToleranceViolation);
GetExecutionEngine()->TLS_SetValue(slot, pData);
END_PRESERVE_LAST_ERROR;
// for stress log the rule is more restrict, we have to check the global counter too
extern BOOL IsInCantAllocStressLogRegion();
-#include "genericstackprobe.inl"
-
#endif
T Min(T v1, T v2)
{
STATIC_CONTRACT_LEAF;
- STATIC_CONTRACT_SO_TOLERANT;
return v1 < v2 ? v1 : v2;
}
T Max(T v1, T v2)
{
STATIC_CONTRACT_LEAF;
- STATIC_CONTRACT_SO_TOLERANT;
return v1 > v2 ? v1 : v2;
}
inline UINT AlignUp(UINT value, UINT alignment)
{
STATIC_CONTRACT_LEAF;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_SUPPORTS_DAC;
return (value+alignment-1)&~(alignment-1);
}
inline ULONG AlignUp(ULONG value, UINT alignment)
{
STATIC_CONTRACT_LEAF;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_SUPPORTS_DAC;
return (value+alignment-1)&~(alignment-1);
}
inline UINT64 AlignUp(UINT64 value, UINT alignment)
{
STATIC_CONTRACT_LEAF;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_SUPPORTS_DAC;
return (value+alignment-1)&~(UINT64)(alignment-1);
}
inline UINT AlignDown(UINT value, UINT alignment)
{
STATIC_CONTRACT_LEAF;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_SUPPORTS_DAC;
return (value&~(alignment-1));
}
inline ULONG AlignDown(ULONG value, UINT alignment)
{
STATIC_CONTRACT_LEAF;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_SUPPORTS_DAC;
return (value&~(ULONG)(alignment-1));
}
inline UINT64 AlignDown(UINT64 value, UINT alignment)
{
STATIC_CONTRACT_LEAF;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_SUPPORTS_DAC;
return (value&~(UINT64)(alignment-1));
}
inline UINT AlignmentTrim(UINT value, UINT alignment)
{
STATIC_CONTRACT_LEAF;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_SUPPORTS_DAC;
return value&(alignment-1);
}
inline UINT AlignmentTrim(ULONG value, UINT alignment)
{
STATIC_CONTRACT_LEAF;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_SUPPORTS_DAC;
return value&(alignment-1);
}
inline UINT AlignmentTrim(UINT64 value, UINT alignment)
{
STATIC_CONTRACT_LEAF;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_SUPPORTS_DAC;
return ((UINT)value)&(alignment-1);
}
//
// LOADS_TYPE(level) the function promises not to load any types beyond "level"
//
-// SO_INTOLERANT the function cannot tolerate an SO at any point and must run behind an
-// an SO probe via BEGIN_SO_INTOLERANT_XXX. This is the default. We want most
-// of our code to run behind an SO probe. The only time you need to explicitly
-// mark something as SO_INTOLERANT is if the static analysis tool incorrectly
-// flags it as an entry point.
-// -or- SO_TOLERANT the function can tolerate an SO. It either does not update any global state
-// that needs to be cleaned up should a random SO occur, or it protects those
-// updates behind an SO probe.
-// -or- SO_NOT_MAINLINE the function is not hardened to SO and should never run on a managed thread
-// where we need to be hardened to SO. You can use this for functions that run
-// only for ngen or Win9X etc.
-//
// CAN_TAKE_LOCK the function has a code path that takes a lock
// _or_ (CAN_TAKE_LOCK and CANNOT_RETAKE_LOCK)
// the function has a code path that takes a lock, but never tries to reenter
// STATIC_CONTRACT_GCNOTRIGGER
// STATIC_CONTRACT_FAULT
// STATIC_CONTRACT_FORBID_FAULT
-// STATIC_CONTRACT_SO_INTOLERANT
-// STATIC_CONTRACT_SO_TOLERANT
-// STATIC_CONTRACT_SO_NOT_MAINLINE
// use to implement statically checkable contracts
// when runtime contracts cannot be used.
//
// GCViolation
// ModeViolation
// FaultViolation
-// SOToleranceViolation
// FaultNotFatal
// HostViolation
// LoadsTypeViolation
UINT m_GCForbidCount;
UINT m_maxLoadTypeLevel; // taken from enum ClassLoadLevel
BOOL m_allowGetThread; // TRUE if GetThread() is ok in this scope
-#ifdef FEATURE_STACK_PROBE //StackMarkerStack required only when SO infrastructure is enabled
- /* Used to validate backout stack consumption required for StackOverflow infrastructure */
- StackMarkerStack m_StackMarkerStack; // The stack of stack markers
-#endif
DbgStateLockState m_LockState;
public:
m_allowGetThread = TRUE; // By default, GetThread() is perfectly fine to call
-#ifdef FEATURE_STACK_PROBE
- m_StackMarkerStack.Init();
-#endif
-
m_LockState.SetStartingValues();
}
CONTRACT_BITMASK_RESET(CONTRACT_BITMASK_HOSTCALLS);
}
-#ifdef FEATURE_STACK_PROBE //SO contract functions only required when SO infrastructure is enabled
- //--//
- BOOL IsSOTolerant()
- {
- return CONTRACT_BITMASK_IS_SET(CONTRACT_BITMASK_SOTOLERANT);
- }
-
- void SetSOTolerance()
- {
- CONTRACT_BITMASK_SET(CONTRACT_BITMASK_SOTOLERANT);
- }
-
- BOOL SetSOTolerance(BOOL tolerance)
- {
- BOOL prevState = CONTRACT_BITMASK_IS_SET(CONTRACT_BITMASK_SOTOLERANT);
- CONTRACT_BITMASK_UPDATE(CONTRACT_BITMASK_SOTOLERANT,tolerance);
- return prevState;
- }
-
- void ResetSOTolerance()
- {
- CONTRACT_BITMASK_RESET(CONTRACT_BITMASK_SOTOLERANT);
- }
-
-#endif
-
//--//
BOOL IsDebugOnly()
{
CONTRACT_BITMASK_RESET(CONTRACT_BITMASK_DEBUGONLY);
}
- #ifdef FEATURE_STACK_PROBE
- //--//
- BOOL IsSONotMainline()
- {
- return CONTRACT_BITMASK_IS_SET(CONTRACT_BITMASK_SONOTMAINLINE);
- }
-
- void SetSONotMainline()
- {
- CONTRACT_BITMASK_SET(CONTRACT_BITMASK_SONOTMAINLINE);
- }
-
- BOOL SetSONotMainline(BOOL value)
- {
- BOOL prevState = CONTRACT_BITMASK_IS_SET(CONTRACT_BITMASK_SONOTMAINLINE);
- CONTRACT_BITMASK_UPDATE(CONTRACT_BITMASK_SONOTMAINLINE,value);
- return prevState;
- }
-
- void ResetSONotMainline()
- {
- CONTRACT_BITMASK_RESET(CONTRACT_BITMASK_SONOTMAINLINE);
- }
-#endif
-
//--//
BOOL IsGetThreadAllowed()
{
m_LockState.OnEnterCannotRetakeLockFunction();
}
-#ifdef FEATURE_STACK_PROBE //SO contract functions only required when SO infrastructure is enabled
- BOOL IsSOIntolerant()
- {
- return !IsSOTolerant();
- }
-
- BOOL BeginSOTolerant()
- {
- return SetSOTolerance(TRUE);
- }
-
- BOOL BeginSOIntolerant()
- {
- return SetSOTolerance(FALSE);
- }
-
-
- void CheckIfSOIntolerantOK(const char *szFunction, const char *szFile, int lineNum);
-
-
-
-
-
- //--//
-
- StackMarkerStack& GetStackMarkerStack()
- {
- return m_StackMarkerStack;
- }
-#endif
-
void CheckOkayToLock(__in_z const char *szFunction, __in_z const char *szFile, int lineNum); // Asserts if its not okay to lock
BOOL CheckOkayToLockNoAssert(); // Returns if OK to lock
void LockTaken(DbgStateLockType dbgStateLockType,
inline ClrDebugState *CheckClrDebugState()
{
STATIC_CONTRACT_LIMITED_METHOD;
- STATIC_CONTRACT_SO_TOLERANT;
ClrDebugState *ret = (ClrDebugState*)ClrFlsGetValue(TlsIdx_ClrDebugState);
return ret;
}
MODE_Preempt = 0x00000040,
MODE_Coop = 0x00000080,
- // The following are used to assert the type of global state update being done by the function.
- // This is used by the SO infrastructure to detect if we are probing properly. A CLR process will
- // run in one of two states: SO-tolerant or SO-intolerant. In SO-tolerant mode, an SO is OK and we
- // will not corrupt any global state. However, we cannot allow an SO to occur in SO-intolerant code
- // because we might end up with our global state being corrupted.
- //
- // When we enter the EE from any entry point, we will begin in SO-tolerant mode and must probe for sufficient
- // stack before entering SO-intolerant code. We will tell the differnce between SO-tolerant and SO-intolerant code
- // by contract annotations on that function: SO_TOLERANT and SO_INTOLERANT.
-
- // We enter the EE in SO_TOLERANT mode. All entry point functions into the EE must be marked as SO_TOLERANT and
- // and must probe before calling an SO-intolerant function. We have a static analsysis tool that ensures that every
- // entry point is tagged as SO_TOLERANT and that it probes before calling an SO_TOLERANT function.
-
- // By default, all unannotated functions in the EE are SO_INTOLERANT which means that they must run behind a probe.
- // Our contract checking will verify this at runtime. We only need to annotate a function explicilty as SO_INTOLERANT
- // to tell our static analysis tool that they are not entry points (if it can't find a caller for a function, it assumes that the
- // function is an entry point and should be marked SO_INTOLERANT.)
-
- SO_TOLERANCE_Mask = 0x00000300,
- SO_TOLERANT_No = 0x00000000, // the default.
- SO_TOLERANT_Yes = 0x00000100,
- SO_TOLERANCE_Disabled = 0x00000200,
-
DEBUG_ONLY_Yes = 0x00000400, // code runs under debug only
SO_MAINLINE_No = 0x00000800, // code is not part of our mainline SO scenario
LOADS_TYPE_Disabled = 0x00000000, // the default
ALL_Disabled = THROWS_Disabled|GC_Disabled|FAULT_Disabled|MODE_Disabled|LOADS_TYPE_Disabled|
- SO_TOLERANCE_Disabled|HOST_Disabled|EE_THREAD_Disabled|CAN_TAKE_LOCK_Disabled|CAN_RETAKE_LOCK_No_Disabled
+ HOST_Disabled|EE_THREAD_Disabled|CAN_TAKE_LOCK_Disabled|CAN_RETAKE_LOCK_No_Disabled
};
ModeViolation = 0x00000004, // suppress MODE_PREEMP and MODE_COOP tags in this scope
FaultViolation = 0x00000008, // suppress INJECT_FAULT assertions in this scope
FaultNotFatal = 0x00000010, // suppress INJECT_FAULT but not fault injection by harness
- SOToleranceViolation = 0x00000020, // suppress SO_TOLERANCE tags in this scope
LoadsTypeViolation = 0x00000040, // suppress LOADS_TYPE tags in this scope
TakesLockViolation = 0x00000080, // suppress CAN_TAKE_LOCK tags in this scope
HostViolation = 0x00000100, // suppress HOST_CALLS tags in this scope
#define NOTHROW do { STATIC_CONTRACT_NOTHROW; REQUEST_TEST(Contract::THROWS_No, Contract::THROWS_Disabled); } while(0) \
-#define ENTRY_POINT do { STATIC_CONTRACT_ENTRY_POINT; REQUEST_TEST(Contract::SO_TOLERANT_Yes, Contract::SO_TOLERANCE_Disabled); } while(0)
+#define ENTRY_POINT STATIC_CONTRACT_ENTRY_POINT
#define LOADS_TYPE(maxlevel) do { REQUEST_TEST( ((maxlevel) + 1) << Contract::LOADS_TYPE_Shift, Contract::LOADS_TYPE_Disabled ); } while(0)
-#define SO_TOLERANT do { STATIC_CONTRACT_SO_TOLERANT; REQUEST_TEST(Contract::SO_TOLERANT_Yes, Contract::SO_TOLERANCE_Disabled); } while(0)
-
-#define SO_INTOLERANT do { STATIC_CONTRACT_SO_INTOLERANT; REQUEST_TEST(Contract::SO_TOLERANT_No, Contract::SO_TOLERANCE_Disabled); } while(0)
-
-#define SO_NOT_MAINLINE do { STATIC_CONTRACT_SO_NOT_MAINLINE; REQUEST_TEST(Contract::SO_MAINLINE_No, 0); } while (0)
-
#define CAN_TAKE_LOCK do { STATIC_CONTRACT_CAN_TAKE_LOCK; REQUEST_TEST(Contract::CAN_TAKE_LOCK_Yes, Contract::CAN_TAKE_LOCK_Disabled); } while(0)
#define CANNOT_TAKE_LOCK do { STATIC_CONTRACT_CANNOT_TAKE_LOCK; REQUEST_TEST(Contract::CAN_TAKE_LOCK_No, Contract::CAN_TAKE_LOCK_Disabled); } while(0)
#define CANNOT_TAKE_LOCK
#define CANNOT_RETAKE_LOCK
#define LOADS_TYPE(maxlevel)
-#define SO_TOLERANT
-#define SO_INTOLERANT
-#define SO_NOT_MAINLINE
#define ENTRY_POINT
#ifdef _DEBUG
FORCEINLINE void EnterInternal(UINT_PTR violationMask)
{
_ASSERTE(0 == (violationMask & ~(ThrowsViolation | GCViolation | ModeViolation | FaultViolation |
- FaultNotFatal | SOToleranceViolation | HostViolation |
+ FaultNotFatal | HostViolation |
TakesLockViolation | LoadsTypeViolation)) ||
violationMask == AllViolation);
inline ClrDebugState *GetClrDebugState(BOOL fAlloc)
{
STATIC_CONTRACT_LIMITED_METHOD;
- STATIC_CONTRACT_SO_NOT_MAINLINE;
ClrDebugState *pState = CheckClrDebugState();
}
#endif // ENABLE_CONTRACTS_IMPL
-#ifdef FEATURE_STACK_PROBE
-
-#ifdef ENABLE_CONTRACTS_IMPL
-class SONotMainlineHolder
-{
- public:
- DEBUG_NOINLINE void Enter()
- {
- SCAN_SCOPE_BEGIN;
- STATIC_CONTRACT_SO_NOT_MAINLINE;
-
- m_pClrDebugState = GetClrDebugState();
- if (m_pClrDebugState)
- {
- m_oldSONotMainlineValue = m_pClrDebugState->IsSONotMainline();
- m_pClrDebugState->SetSONotMainline();
- }
- }
-
- DEBUG_NOINLINE void Leave()
- {
- SCAN_SCOPE_END;
-
- m_pClrDebugState = CheckClrDebugState();
- if (m_pClrDebugState)
- {
- m_pClrDebugState->SetSONotMainline( m_oldSONotMainlineValue );
- }
- }
-
- private:
- BOOL m_oldSONotMainlineValue;
- ClrDebugState *m_pClrDebugState;
-};
-
-#define ENTER_SO_NOT_MAINLINE_CODE \
- SONotMainlineHolder __soNotMainlineHolder; \
- __soNotMainlineHolder.Enter();
-
-#define LEAVE_SO_NOT_MAINLINE_CODE \
- __soNotMainlineHolder.Leave();
-
-
-class AutoCleanupSONotMainlineHolder : public SONotMainlineHolder
-{
- public:
- DEBUG_NOINLINE AutoCleanupSONotMainlineHolder()
- {
- SCAN_SCOPE_BEGIN;
- STATIC_CONTRACT_SO_NOT_MAINLINE;
-
- Enter();
- }
-
- DEBUG_NOINLINE ~AutoCleanupSONotMainlineHolder()
- {
- SCAN_SCOPE_END;
-
- Leave();
- }
-};
-
-#define SO_NOT_MAINLINE_FUNCTION \
- AutoCleanupSONotMainlineHolder __soNotMainlineHolder;
-
-#define SO_NOT_MAINLINE_REGION() \
- AutoCleanupSONotMainlineHolder __soNotMainlineHolder;
-
-#else // ENABLE_CONTRACTS_IMPL
-#define SO_NOT_MAINLINE_FUNCTION STATIC_CONTRACT_SO_NOT_MAINLINE
-#define SO_NOT_MAINLINE_REGION() STATIC_CONTRACT_SO_NOT_MAINLINE
-#define ENTER_SO_NOT_MAINLINE_CODE
-#define LEAVE_SO_NOT_MAINLINE_CODE
-#endif
-
-#else // FEATURE_STACK_PROBE
-
-#define SO_NOT_MAINLINE_FUNCTION
-#define SO_NOT_MAINLINE_REGION()
-#define ENTER_SO_NOT_MAINLINE_CODE
-#define LEAVE_SO_NOT_MAINLINE_CODE
-
-#endif // FEATURE_STACK_PROBE
-
#ifdef ENABLE_CONTRACTS_IMPL
class HostNoCallHolder
//
#define STANDARD_VM_CHECK \
- THROWS; \
- SO_INTOLERANT; \
+ THROWS;
#define STANDARD_VM_CONTRACT \
CONTRACTL \
#define STATIC_STANDARD_VM_CONTRACT \
STATIC_CONTRACT_THROWS; \
STATIC_CONTRACT_GC_TRIGGERS; \
- STATIC_CONTRACT_MODE_PREEMPTIVE; \
- STATIC_CONTRACT_SO_INTOLERANT
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
#define AFTER_CONTRACTS
#include "volatile.h"
#ifdef ENABLE_CONTRACTS_IMPL
-#ifdef FEATURE_STACK_PROBE
-/* FLAG to turn on/off dynamic SO Contract checking */
-extern BOOL g_EnableDefaultRWValidation;
-
-/* Used to report any code with SO_NOT_MAINLINE being run in a test environment
- * with COMPLUS_NO_SO_NOT_MAINLINE enabled
- */
-void SONotMainlineViolation(const char *szFunction, const char *szFile, int lineNum);
-
-/* Wrapper over SOTolerantViolation(). Used to report SO_Intolerant functions being called
- * from SO_tolerant funcs without stack probing.
- */
-void SoTolerantViolationHelper(const char *szFunction,
- const char *szFile,
- int lineNum);
-#endif
-
-
inline void BaseContract::DoChecks(UINT testmask, __in_z const char *szFunction, __in_z const char *szFile, int lineNum)
{
STATIC_CONTRACT_DEBUG_ONLY;
m_pClrDebugState->SetDebugOnly();
}
-#ifdef FEATURE_STACK_PROBE //Dynamic SO contract checks only required when SO infrastructure is present.
-
- if (testmask & SO_MAINLINE_No)
- {
- static DWORD dwCheckNotMainline = -1;
-
- // Some tests should never hit an SO_NOT_MAINLINE contract
- if (dwCheckNotMainline == -1)
- dwCheckNotMainline = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_NO_SO_NOT_MAINLINE);
-
-
- if (dwCheckNotMainline)
- {
- SONotMainlineViolation(m_contractStackRecord.m_szFunction,
- m_contractStackRecord.m_szFile,
- m_contractStackRecord.m_lineNum);
- }
-
- m_pClrDebugState->SetSONotMainline();
- }
-
-#endif // FEATURE_STACK_PROBE
-
switch (testmask & FAULT_Mask)
{
case FAULT_Forbid:
break;
}
-#ifdef FEATURE_STACK_PROBE
-
- switch (testmask & SO_TOLERANCE_Mask)
- {
- case SO_TOLERANT_No:
- if (g_EnableDefaultRWValidation)
- {
- m_pClrDebugState->CheckIfSOIntolerantOK(m_contractStackRecord.m_szFunction,
- m_contractStackRecord.m_szFile,
- m_contractStackRecord.m_lineNum);
- }
- break;
-
- case SO_TOLERANT_Yes:
- case SO_TOLERANCE_Disabled:
- // Nothing
- break;
-
- default:
- UNREACHABLE();
- }
-
-#endif // FEATURE_STACK_PROBE
-
if (testmask & CAN_RETAKE_LOCK_No)
{
m_pClrDebugState->OnEnterCannotRetakeLockFunction();
return m_pLockData;
}
-#ifdef FEATURE_STACK_PROBE
-// We don't want to allow functions that use holders to EX_TRY to be intolerant
-// code... if an exception were to occur, the holders and EX_CATCH/FINALLY would
-// have less than 1/4 clean up.
-inline void EnsureSOIntolerantOK(const char *szFunction,
- const char *szFile,
- int lineNum)
-{
- // We don't want to use a holder here, because a holder will
- // call EnsureSOIntolerantOK
-
- DWORD error = GetLastError();
- if (! g_EnableDefaultRWValidation)
- {
- SetLastError(error);
- return;
- }
- ClrDebugState *pClrDebugState = CheckClrDebugState();
- if (! pClrDebugState)
- {
- SetLastError(error);
- return;
- }
- pClrDebugState->CheckIfSOIntolerantOK(szFunction, szFile, lineNum);
- SetLastError(error);
-}
-
-inline void ClrDebugState::CheckIfSOIntolerantOK(const char *szFunction,
- const char *szFile,
- int lineNum)
-
-{
- // If we are an RW function on a managed thread, we must be in SO-intolerant mode. Ie. we must be behind a probe.
- if (IsSOIntolerant() || IsDebugOnly() || IsSONotMainline() || (m_violationmask & SOToleranceViolation) ||
- !g_fpShouldValidateSOToleranceOnThisThread || !g_fpShouldValidateSOToleranceOnThisThread())
- {
- return;
- }
- SoTolerantViolationHelper(szFunction, szFile, lineNum);
-}
-
-#endif
-
inline
void CONTRACT_ASSERT(const char *szElaboration,
UINT whichTest,
#define EXCEPTION_HIJACK 0xe0434f4e // 0xe0000000 | 'COM'+1
-#ifdef FEATURE_STACK_PROBE
-#define EXCEPTION_SOFTSO 0xe053534f // 0xe0000000 | 'SSO'
- // We can not throw internal C++ exception through managed frame.
- // At boundary, we will raise an exception with this error code
-#endif
-
#if defined(_DEBUG)
#define EXCEPTION_INTERNAL_ASSERT 0xe0584d4e // 0xe0000000 | 'XMN'
// An internal Assert will raise this exception when the config
#endif // FEATURE_PAL
// helper macro to make the vtables unique for DAC
-#define VPTR_UNIQUE(unique) virtual int MakeVTableUniqueForDAC() { STATIC_CONTRACT_SO_TOLERANT; return unique; }
+#define VPTR_UNIQUE(unique) virtual int MakeVTableUniqueForDAC() { return unique; }
#define VPTR_UNIQUE_BaseDomain (100000)
#define VPTR_UNIQUE_SystemDomain (VPTR_UNIQUE_BaseDomain + 1)
#define VPTR_UNIQUE_ComMethodFrame (VPTR_UNIQUE_SystemDomain + 1)
#ifndef __ENTRYPOINTS_h__
#define __ENTRYPOINTS_h__
-#define BEGIN_ENTRYPOINT_THROWS \
- BEGIN_SO_INTOLERANT_CODE(GetThread()) \
-
-
-#define END_ENTRYPOINT_THROWS \
- END_SO_INTOLERANT_CODE;
-
-#define BEGIN_ENTRYPOINT_THROWS_WITH_THREAD(____thread) \
- BEGIN_SO_INTOLERANT_CODE(____thread) \
-
-#define END_ENTRYPOINT_THROWS_WITH_THREAD \
- END_SO_INTOLERANT_CODE;
-
-#define BEGIN_ENTRYPOINT_NOTHROW_WITH_THREAD(___thread) \
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW)
-
-#define END_ENTRYPOINT_NOTHROW_WITH_THREAD \
- END_SO_INTOLERANT_CODE;
-
-#define BEGIN_ENTRYPOINT_NOTHROW \
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW)
-
-#define END_ENTRYPOINT_NOTHROW \
- END_SO_INTOLERANT_CODE;
-
-extern void (*g_fpHandleSoftStackOverflow)(BOOL fSkipDebugger);
-inline void FailedVoidEntryPoint()
-{
- if (g_fpHandleSoftStackOverflow)
- {
- g_fpHandleSoftStackOverflow(FALSE);
- }
-}
-#define BEGIN_ENTRYPOINT_VOIDRET \
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(FailedVoidEntryPoint();)
-
-#define END_ENTRYPOINT_VOIDRET \
- END_SO_INTOLERANT_CODE;
-
-#define BEGIN_CLEANUP_ENTRYPOINT \
- VALIDATE_BACKOUT_STACK_CONSUMPTION;
-
+#define BEGIN_ENTRYPOINT_THROWS
+#define END_ENTRYPOINT_THROWS
+#define BEGIN_ENTRYPOINT_THROWS_WITH_THREAD(____thread)
+#define END_ENTRYPOINT_THROWS_WITH_THREAD
+#define BEGIN_ENTRYPOINT_NOTHROW_WITH_THREAD(___thread)
+#define END_ENTRYPOINT_NOTHROW_WITH_THREAD
+#define BEGIN_ENTRYPOINT_NOTHROW
+#define END_ENTRYPOINT_NOTHROW
+#define BEGIN_ENTRYPOINT_VOIDRET
+#define END_ENTRYPOINT_VOIDRET
+#define BEGIN_CLEANUP_ENTRYPOINT
#define END_CLEANUP_ENTRYPOINT
#endif // __ENTRYPOINTS_h__
#include "winwrap.h"
#include "corerror.h"
#include "stresslog.h"
-#include "genericstackprobe.h"
#include "staticcontract.h"
#include "entrypoints.h"
bool IsCurrentExceptionSO();
// ---------------------------------------------------------------------------
-// Return TRUE if the current exception is hard( or soft) SO. Soft SO
-// is defined when the stack probing code is enabled (FEATURE_STACK_PROBE)
-// ---------------------------------------------------------------------------
-bool IsSOExceptionCode(DWORD exceptionCode);
-
-// ---------------------------------------------------------------------------
// Standard exception hierarchy & infrastructure for library code & EE
// ---------------------------------------------------------------------------
#endif
-#define HANDLE_SO_TOLERANCE_FOR_THROW
-
#define EX_THROW(_type, _args) \
{ \
FAULT_NOT_FATAL(); \
\
- HANDLE_SO_TOLERANCE_FOR_THROW; \
_type * ___pExForExThrow = new _type _args ; \
/* don't embed file names in retail to save space and avoid IP */ \
/* a findstr /n will allow you to locate it in a pinch */ \
{ \
FAULT_NOT_FATAL(); \
\
- HANDLE_SO_TOLERANCE_FOR_THROW; \
Exception *_inner2 = ExThrowWithInnerHelper(_inner); \
_type *___pExForExThrow = new _type _args ; \
___pExForExThrow->SetInnerException(_inner2); \
PAL_CPP_CATCH_ALL \
{ \
SCAN_EHMARKER_CATCH(); \
- VALIDATE_BACKOUT_STACK_CONSUMPTION; \
__defaultException_t __defaultException; \
CHECK::ResetAssert(); \
ExceptionHolder __pException(__state.m_pExceptionPtr); \
__state.m_pExceptionPtr = __pExceptionRaw; \
SCAN_EHMARKER_END_CATCH(); \
SCAN_IGNORE_THROW_MARKER; \
- VALIDATE_BACKOUT_STACK_CONSUMPTION; \
__defaultException_t __defaultException; \
CHECK::ResetAssert(); \
ExceptionHolder __pException(__state.m_pExceptionPtr); \
} \
SCAN_EHMARKER_END_CATCH(); \
} \
- EX_ENDTRY \
-
+ EX_ENDTRY
+
#define EX_ENDTRY \
- PAL_CPP_ENDTRY \
- if (__state.DidCatch()) \
- { \
- RESTORE_SO_TOLERANCE_STATE; \
- } \
- if (__state.DidCatchSO()) \
- { \
- HANDLE_STACKOVERFLOW_AFTER_CATCH; \
- }
+ PAL_CPP_ENDTRY
#define EX_RETHROW \
{ \
// exception. This will allow the stack to unwind point, and so we won't be jeopardizing a
// second stack overflow.
//===================================================================================
+#ifndef VM_NO_SO_INFRASTRUCTURE_CODE
+#define VM_NO_SO_INFRASTRUCTURE_CODE(x)
+#endif
+
#define EX_HOOK \
EX_CATCH \
{ \
if (!__state.DidCatchSO()) \
EX_RETHROW; \
EX_END_CATCH_FOR_HOOK; \
- SO_INFRASTRUCTURE_CODE(if (__state.DidCatchSO())) \
- SO_INFRASTRUCTURE_CODE(ThrowStackOverflow();) \
- } \
+ }
// ---------------------------------------------------------------------------
// Inline implementations. Pay no attention to that man behind the curtain.
inline Exception::HandlerState::HandlerState()
{
STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
STATIC_CONTRACT_SUPPORTS_DAC;
CONTRACTL
{
THROWS;
- SO_TOLERANT;
GC_NOTRIGGER;
}
CONTRACTL_END;
PCCOR_SIGNATURE pBegin, pEnd=NULL;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(ThrowStackOverflow());
-
#ifdef __ILDASM__
ULONG L = (ULONG)(out->Size());
#endif
}
}
#endif
- END_SO_INTOLERANT_CODE;
return pEnd;
}
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-//
-
-//
-//-----------------------------------------------------------------------------
-// Generic Stack Probe Code
-// Used to setup stack guards and probes outside the VM tree
-//-----------------------------------------------------------------------------
-
-#ifndef __GENERICSTACKPROBE_h__
-#define __GENERICSTACKPROBE_h__
-
-#include "staticcontract.h"
-#include "predeftlsslot.h"
-
-#if defined(DISABLE_CONTRACTS)
-#undef FEATURE_STACK_PROBE
-#endif
-
-#if defined(FEATURE_STACK_PROBE)
-#ifdef _DEBUG
-#define STACK_GUARDS_DEBUG
-#else
-#define STACK_GUARDS_RELEASE
-#endif
-#endif
-
-#ifdef FEATURE_STACK_PROBE
-#define SO_INFRASTRUCTURE_CODE(x) x
-#define NO_SO_INFRASTRUCTURE_CODE_ASSERTE(x)
-#else
-#define SO_INFRASTRUCTURE_CODE(x)
-#define NO_SO_INFRASTRUCTURE_CODE_ASSERTE(x) _ASSERTE(x);
-#endif
-
-/* This macro is redefined in stackprobe.h
- * so that code expanded using this macro is present only for files
- * within VM directory. See StackProbe.h for more details
- */
-#define VM_NO_SO_INFRASTRUCTURE_CODE(x)
-
-// The types of stack validation we support in holders.
-enum HolderStackValidation
-{
- HSV_NoValidation,
- HSV_ValidateMinimumStackReq,
- HSV_ValidateNormalStackReq,
-};
-
-// Used to track transitions into the profiler
-#define REMOVE_STACK_GUARD_FOR_PROFILER_CALL \
- REMOVE_STACK_GUARD
-
-// For AMD64, the stack size is 4K, same as X86, but the pointer size is 64, so the
-// stack tends to grow a lot faster than X86.
-#ifdef _TARGET_AMD64_
-#define ADJUST_PROBE(n) (2 * (n))
-#else
-#define ADJUST_PROBE(n) (n)
-#endif
-
-#if defined(FEATURE_STACK_PROBE)
-
-#ifdef STACK_GUARDS_DEBUG // DAC and non-DAC - all data structures referenced in DAC'ized code
- // must be included so we can calculate layout. SO probes are not
- // active in the DAC but the SO probe structures contribute to layout
-
-
-// This class is used to place a marker upstack and verify that it was not overrun. It is
-// different from the full blown stack probes in that it does not chain with other probes or
-// test for stack overflow. Its sole purpose is to verify stack consumption.
-// It is effectively an implicit probe though, because we are guaranteeing that we have
-// enought stack to run and will not take an SO. So we enter SO-intolerant code when
-// we install one of these.
-
-class StackMarkerStack;
-struct ClrDebugState;
-
-class BaseStackMarker
-{
- friend StackMarkerStack;
-
- ClrDebugState *m_pDebugState;
- BOOL m_prevWasSOTolerant; // Were we SO-tolerant when we came in?
- BOOL m_fMarkerSet; // Has the marker been set?
- BOOL m_fTemporarilyDisabled;// Has the marker been temporarely disabled?
- BOOL m_fAddedToStack; // Has this BaseStackMarker been added to the stack of markers for the thread.
- float m_numPages;
- UINT_PTR *m_pMarker; // Pointer to where to put our marker cookie on the stack.
- BaseStackMarker*m_pPrevious;
- BOOL m_fProtectedStackPage;
- BOOL m_fAllowDisabling;
-
- BaseStackMarker() {}; // no default construction allowed
-
- // These should only be called by the ClrDebugState.
- void RareDisableMarker();
- void RareReEnableMarker();
-
- public:
- BaseStackMarker(float numPages, BOOL fAllowDisabling);
-
- // we have this so that the check of the global can be inlined
- // and we don't make the call to CheckMarker unless we need to.
- void CheckForBackoutViolation();
-
- void SetMarker(float numPages);
- void CheckMarker();
-
- void ProtectMarkerPageInDebugger();
- void UndoPageProtectionInDebugger();
-
-};
-
-class StackMarkerStack
-{
-public:
- // Since this is used from the ClrDebugState which can't have a default constructor,
- // we need to provide an Init method to intialize the instance instead of having a constructor.
- void Init()
- {
- m_pTopStackMarker = NULL;
- m_fDisabled = FALSE;
- }
-
- void PushStackMarker(BaseStackMarker *pStackMarker);
- BaseStackMarker *PopStackMarker();
-
- BOOL IsEmpty()
- {
- return (m_pTopStackMarker == NULL);
- }
- BOOL IsDisabled()
- {
- return m_fDisabled;
- }
-
- void RareDisableStackMarkers();
- void RareReEnableStackMarkers();
-
-private:
- BaseStackMarker *m_pTopStackMarker; // The top of the stack of stack markers for the current thread.
- BOOL m_fDisabled;
-};
-
-#endif // STACK_GUARDS_DEBUG
-
-#if !defined(DACCESS_COMPILE)
-
-// In debug builds, we redefine DEFAULT_ENTRY_PROBE_AMOUNT to a global static
-// so that we can tune the entry point probe size at runtime.
-#define DEFAULT_ENTRY_PROBE_SIZE 12
-#define DEFAULT_ENTRY_PROBE_AMOUNT DEFAULT_ENTRY_PROBE_SIZE
-
-#define BACKOUT_CODE_STACK_LIMIT 4.0
-#define HOLDER_CODE_NORMAL_STACK_LIMIT BACKOUT_CODE_STACK_LIMIT
-#define HOLDER_CODE_MINIMUM_STACK_LIMIT 0.25
-
-void DontCallDirectlyForceStackOverflow();
-void SOBackoutViolation(const char *szFunction, const char *szFile, int lineNum);
-typedef void *EEThreadHandle;
-class SOIntolerantTransitionHandler;
-extern bool g_StackProbingEnabled;
-extern void (*g_fpCheckForSOInSOIntolerantCode)();
-extern void (*g_fpSetSOIntolerantTransitionMarker)();
-extern BOOL (*g_fpDoProbe)(unsigned int n);
-extern void (*g_fpHandleSoftStackOverflow)(BOOL fSkipDebugger);
-
-// Once we enter SO-intolerant code, we can never take a hard SO as we will be
-// in an unknown state. SOIntolerantTransitionHandler is used to detect a hard SO in SO-intolerant
-// code and to raise a Fatal Error if one occurs.
-class SOIntolerantTransitionHandler
-{
-private:
- bool m_exceptionOccurred;
- void * m_pPreviousHandler;
-
-public:
- FORCEINLINE SOIntolerantTransitionHandler()
- {
- if (g_StackProbingEnabled)
- {
- CtorImpl();
- }
- }
-
- FORCEINLINE ~SOIntolerantTransitionHandler()
- {
- if (g_StackProbingEnabled)
- {
- DtorImpl();
- }
- }
-
- NOINLINE void CtorImpl();
- NOINLINE void DtorImpl();
-
- void SetNoException()
- {
- m_exceptionOccurred = false;
- }
-
- bool DidExceptionOccur()
- {
- return m_exceptionOccurred;
- }
-};
-
-
-extern void (*g_fpHandleStackOverflowAfterCatch)();
-void HandleStackOverflowAfterCatch();
-
-#if defined(STACK_GUARDS_DEBUG)
-
-#ifdef _WIN64
-#define STACK_COOKIE_VALUE 0x0123456789ABCDEF
-#define DISABLED_STACK_COOKIE_VALUE 0xDCDCDCDCDCDCDCDC
-#else
-#define STACK_COOKIE_VALUE 0x01234567
-#define DISABLED_STACK_COOKIE_VALUE 0xDCDCDCDC
-#endif
-
-// This allows us to adjust the probe amount at run-time in checked builds
-#undef DEFAULT_ENTRY_PROBE_AMOUNT
-#define DEFAULT_ENTRY_PROBE_AMOUNT g_EntryPointProbeAmount
-
-class BaseStackGuardGeneric;
-class BaseStackGuard;
-
-extern void (*g_fpRestoreCurrentStackGuard)(BOOL fDisabled);
-extern BOOL (*g_fp_BaseStackGuard_RequiresNStackPages)(BaseStackGuardGeneric *pGuard, unsigned int n, BOOL fThrowOnSO);
-extern void (*g_fp_BaseStackGuard_CheckStack)(BaseStackGuardGeneric *pGuard);
-extern BOOL (*g_fpCheckNStackPagesAvailable)(unsigned int n);
-extern BOOL g_ProtectStackPagesInDebugger;
-void RestoreSOToleranceState();
-void EnsureSOTolerant();
-
-extern BOOL g_EnableBackoutStackValidation;
-extern DWORD g_EntryPointProbeAmount;
-
-//-----------------------------------------------------------------------------
-// Check if a cookie is still at the given marker
-//-----------------------------------------------------------------------------
-inline BOOL IsMarkerOverrun(UINT_PTR *pMarker)
-{
- return (*pMarker != STACK_COOKIE_VALUE);
-}
-
-class AutoCleanupStackMarker : public BaseStackMarker
-{
-public:
- DEBUG_NOINLINE AutoCleanupStackMarker(float numPages) :
- BaseStackMarker(numPages, TRUE)
- {
- SCAN_SCOPE_BEGIN;
- ANNOTATION_FN_SO_INTOLERANT;
- }
-
- DEBUG_NOINLINE ~AutoCleanupStackMarker()
- {
- SCAN_SCOPE_END;
- CheckForBackoutViolation();
- }
-};
-
-#define VALIDATE_BACKOUT_STACK_CONSUMPTION \
- AutoCleanupStackMarker __stackMarker(ADJUST_PROBE(BACKOUT_CODE_STACK_LIMIT));
-
-#define VALIDATE_BACKOUT_STACK_CONSUMPTION_FOR(numPages) \
- AutoCleanupStackMarker __stackMarker(ADJUST_PROBE(numPages));
-
-#define UNSAFE_BEGIN_VALIDATE_BACKOUT_STACK_CONSUMPTION_NO_DISABLE \
- BaseStackMarker __stackMarkerNoDisable(ADJUST_PROBE(BACKOUT_CODE_STACK_LIMIT), FALSE);
-
-#define UNSAFE_BEGIN_VALIDATE_BACKOUT_STACK_CONSUMPTION_NO_DISABLE_FOR(numPages) \
- BaseStackMarker __stackMarkerNoDisable(ADJUST_PROBE(numPages), FALSE);
-
-#define UNSAFE_END_VALIDATE_BACKOUT_STACK_CONSUMPTION_NO_DISABLE \
- __stackMarkerNoDisable.CheckForBackoutViolation();
-
-#define VALIDATE_HOLDER_STACK_CONSUMPTION_FOR_TYPE(validationType) \
- _ASSERTE(validationType != HSV_NoValidation); \
- AutoCleanupStackMarker __stackMarker( \
- ADJUST_PROBE(validationType == HSV_ValidateNormalStackReq ? HOLDER_CODE_NORMAL_STACK_LIMIT : HOLDER_CODE_MINIMUM_STACK_LIMIT));
-
-class AutoCleanupDisableBackoutStackValidation
-{
- public:
- AutoCleanupDisableBackoutStackValidation();
- ~AutoCleanupDisableBackoutStackValidation();
-
-private:
- BOOL m_fAlreadyDisabled;
-
-};
-
-// This macros disables the backout stack validation in the current scope. It should
-// only be used in very rare situations. If you think you might have such a situation,
-// please talk to the stack overflow devs before using it.
-#define DISABLE_BACKOUT_STACK_VALIDATION \
- AutoCleanupDisableBackoutStackValidation __disableBacoutStackValidation;
-
-// In debug mode, we want to do a little more work on this transition to note the transition in the thread.
-class DebugSOIntolerantTransitionHandler : public SOIntolerantTransitionHandler
-{
- BOOL m_prevSOTolerantState;
- ClrDebugState* m_clrDebugState;
-
- public:
- DebugSOIntolerantTransitionHandler();
- ~DebugSOIntolerantTransitionHandler();
-};
-
-// This is the base class structure for our probe infrastructure. We declare it here
-// so that we can properly declare instances outside of the VM tree. But we only do the
-// probes when we have a managed thread.
-class BaseStackGuardGeneric
-{
-public:
- enum
- {
- cPartialInit, // Not yet intialized
- cInit, // Initialized and installed
- cUnwound, // Unwound on a normal path (used for debugging)
- cEHUnwound // Unwound on an exception path (used for debugging)
- } m_eInitialized;
-
- // *** Following fields must not move. The fault injection framework depends on them.
- BaseStackGuard *m_pPrevGuard; // Previous guard for this thread.
- UINT_PTR *m_pMarker; // Pointer to where to put our marker cookie on the stack.
- unsigned int m_numPages; // space needed, specified in number of pages
- BOOL m_isBoundaryGuard; // used to mark when we've left the EE
- BOOL m_fDisabled; // Used to enable/disable stack guard
-
-
- // *** End of fault injection-dependent fields
-
- // The following fields are really here to provide us with some nice debugging information.
- const char *m_szFunction;
- const char *m_szFile;
- unsigned int m_lineNum;
- const char *m_szNextFunction; // Name of the probe that came after us.
- const char *m_szNextFile;
- unsigned int m_nextLineNum;
- DWORD m_UniqueId;
- unsigned int m_depth; // How deep is this guard in the list of guards for this thread?
- BOOL m_fProtectedStackPage; // TRUE if we protected a stack page with PAGE_NOACCESS.
- BOOL m_fEHInProgress; // Is an EH in progress? This is cleared on a catch.
- BOOL m_exceptionOccurred; // Did an exception occur through this probe?
-
-protected:
- BaseStackGuardGeneric()
- {
- }
-
-public:
- BaseStackGuardGeneric(const char *szFunction, const char *szFile, unsigned int lineNum) :
- m_pPrevGuard(NULL), m_pMarker(NULL),
- m_szFunction(szFunction), m_szFile(szFile), m_lineNum(lineNum),
- m_szNextFunction(NULL), m_szNextFile(NULL), m_nextLineNum(0),
- m_fProtectedStackPage(FALSE), m_UniqueId(-1), m_numPages(0),
- m_eInitialized(cPartialInit), m_fDisabled(FALSE),
- m_isBoundaryGuard(FALSE),
- m_fEHInProgress(FALSE),
- m_exceptionOccurred(FALSE)
- {
- STATIC_CONTRACT_LEAF;
- }
-
- BOOL RequiresNStackPages(unsigned int n, BOOL fThrowOnSO = TRUE)
- {
- if (g_fp_BaseStackGuard_RequiresNStackPages == NULL)
- {
- return TRUE;
- }
- return g_fp_BaseStackGuard_RequiresNStackPages(this, n, fThrowOnSO);
- }
-
- BOOL RequiresNStackPagesThrowing(unsigned int n)
- {
- if (g_fp_BaseStackGuard_RequiresNStackPages == NULL)
- {
- return TRUE;
- }
- return g_fp_BaseStackGuard_RequiresNStackPages(this, n, TRUE);
- }
-
- BOOL RequiresNStackPagesNoThrow(unsigned int n)
- {
- if (g_fp_BaseStackGuard_RequiresNStackPages == NULL)
- {
- return TRUE;
- }
- return g_fp_BaseStackGuard_RequiresNStackPages(this, n, FALSE);
- }
-
- void CheckStack()
- {
- if (m_eInitialized == cInit)
- {
- g_fp_BaseStackGuard_CheckStack(this);
- }
- }
-
- void SetNoException()
- {
- m_exceptionOccurred = FALSE;
- }
-
- BOOL DidExceptionOccur()
- {
- return m_exceptionOccurred;
- }
-
- BOOL Enabled()
- {
- return !m_fDisabled;
- }
-
- void DisableGuard()
- {
- // As long as we don't have threads mucking with other thread's stack
- // guards, we don't need to synchronize this.
- m_fDisabled = TRUE;
- }
-
- void EnableGuard()
- {
- // As long as we don't have threads mucking with other thread's stack
- // guards, we don't need to synchronize this.
- m_fDisabled = FALSE;
- }
-
-
-};
-
-class StackGuardDisabler
-{
- BOOL m_fDisabledGuard;
-
-public:
- StackGuardDisabler();
- ~StackGuardDisabler();
- void NeverRestoreGuard();
-
-};
-
-
-
-// Derived version, add a dtor that automatically calls Check_Stack, move convenient, but can't use with SEH.
-class AutoCleanupStackGuardGeneric : public BaseStackGuardGeneric
-{
-protected:
- AutoCleanupStackGuardGeneric()
- {
- }
-
-public:
- AutoCleanupStackGuardGeneric(const char *szFunction, const char *szFile, unsigned int lineNum) :
- BaseStackGuardGeneric(szFunction, szFile, lineNum)
- {
- STATIC_CONTRACT_LEAF;
- }
-
- ~AutoCleanupStackGuardGeneric()
- {
- STATIC_CONTRACT_WRAPPER;
- CheckStack();
- }
-};
-
-
-// Used to remove stack guard... (kind of like a poor man's BEGIN_SO_TOLERANT
-#define REMOVE_STACK_GUARD \
- StackGuardDisabler __guardDisable;
-
-// Used to transition into intolerant code when handling a SO
-#define BEGIN_SO_INTOLERANT_CODE_NOPROBE \
- { \
- DebugSOIntolerantTransitionHandler __soIntolerantTransitionHandler; \
- /* work around unreachable code warning */ \
- if (true) \
- { \
- DEBUG_ASSURE_NO_RETURN_BEGIN(SO_INTOLERANT)
-
-#define END_SO_INTOLERANT_CODE_NOPROBE \
- ; \
- DEBUG_ASSURE_NO_RETURN_END(SO_INTOLERANT) \
- } \
- __soIntolerantTransitionHandler.SetNoException(); \
- } \
-
-
-
-#define BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(ActionOnSO) \
- { \
- AutoCleanupStackGuardGeneric stack_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \
- if (! stack_guard_XXX.RequiresNStackPagesNoThrow(ADJUST_PROBE(g_EntryPointProbeAmount))) \
- { \
- ActionOnSO; \
- } \
- else \
- { \
- DebugSOIntolerantTransitionHandler __soIntolerantTransitionHandler; \
- ANNOTATION_SO_PROBE_BEGIN(DEFAULT_ENTRY_PROBE_AMOUNT); \
- if (true) \
- { \
- DEBUG_ASSURE_NO_RETURN_BEGIN(SO_INTOLERANT)
-
-
-#define END_SO_INTOLERANT_CODE \
- ; \
- DEBUG_ASSURE_NO_RETURN_END(SO_INTOLERANT) \
- } \
- ANNOTATION_SO_PROBE_END; \
- __soIntolerantTransitionHandler.SetNoException(); \
- stack_guard_XXX.SetNoException(); \
- } \
- } \
-
-
-#define BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD_FORCE_SO() \
- EnsureSOTolerant(); \
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(DontCallDirectlyForceStackOverflow()); \
-
-
-// Restores the SO-tolerance state and the marker for the current guard if any
-#define RESTORE_SO_TOLERANCE_STATE \
- RestoreSOToleranceState();
-
-#define HANDLE_STACKOVERFLOW_AFTER_CATCH \
- HandleStackOverflowAfterCatch()
-
-#elif defined(STACK_GUARDS_RELEASE)
-
-#define VALIDATE_BACKOUT_STACK_CONSUMPTION
-#define VALIDATE_BACKOUT_STACK_CONSUMPTION_FOR
-#define UNSAFE_BEGIN_VALIDATE_BACKOUT_STACK_CONSUMPTION_NO_DISABLE
-#define UNSAFE_BEGIN_VALIDATE_BACKOUT_STACK_CONSUMPTION_NO_DISABLE_FOR(numPages)
-#define UNSAFE_END_VALIDATE_BACKOUT_STACK_CONSUMPTION_NO_DISABLE
-#define VALIDATE_HOLDER_STACK_CONSUMPTION_FOR_TYPE(validationType)
-#define RESTORE_SO_TOLERANCE_STATE
-#define HANDLE_STACKOVERFLOW_AFTER_CATCH \
- HandleStackOverflowAfterCatch()
-#define DISABLE_BACKOUT_STACK_VALIDATION
-#define BACKOUT_STACK_VALIDATION_VIOLATION
-#define BEGIN_SO_INTOLERANT_CODE_NOPROBE
-#define END_SO_INTOLERANT_CODE_NOPROBE
-#define REMOVE_STACK_GUARD
-
-#define BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(ActionOnSO) \
- { \
- if (g_StackProbingEnabled && !g_fpDoProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT)))\
- { \
- ActionOnSO; \
- } else { \
- SOIntolerantTransitionHandler __soIntolerantTransitionHandler; \
- /* work around unreachable code warning */ \
- if (true) \
- { \
- DEBUG_ASSURE_NO_RETURN_BEGIN(SO_INTOLERANT)
-
-#define BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD_FORCE_SO() \
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(DontCallDirectlyForceStackOverflow()); \
-
-#define END_SO_INTOLERANT_CODE \
- ; \
- DEBUG_ASSURE_NO_RETURN_END(SO_INTOLERANT) \
- } \
- __soIntolerantTransitionHandler.SetNoException(); \
- } \
- }
-
-#endif
-
-#endif // !DACCESS_COMPILE
-#endif // FEATURE_STACK_PROBES
-
-// if the feature is off or we are compiling for DAC, disable all the probes
-#if !defined(FEATURE_STACK_PROBE) || defined(DACCESS_COMPILE)
-
-#define VALIDATE_BACKOUT_STACK_CONSUMPTION
-#define VALIDATE_BACKOUT_STACK_CONSUMPTION_FOR
-#define UNSAFE_BEGIN_VALIDATE_BACKOUT_STACK_CONSUMPTION_NO_DISABLE
-#define UNSAFE_BEGIN_VALIDATE_BACKOUT_STACK_CONSUMPTION_NO_DISABLE_FOR(numPages)
-#define UNSAFE_END_VALIDATE_BACKOUT_STACK_CONSUMPTION_NO_DISABLE
-#define VALIDATE_HOLDER_STACK_CONSUMPTION_FOR_TYPE(validationType)
-#define BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(ActionOnSO)
-#define BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD_FORCE_SO()
-#define END_SO_INTOLERANT_CODE
-#define RESTORE_SO_TOLERANCE_STATE
-
-#define HANDLE_STACKOVERFLOW_AFTER_CATCH
-
-#define DISABLE_BACKOUT_STACK_VALIDATION
-#define BACKOUT_STACK_VALIDATION_VIOLATION
-#define BEGIN_SO_INTOLERANT_CODE_NOPROBE
-#define END_SO_INTOLERANT_CODE_NOPROBE
-#define REMOVE_STACK_GUARD
-
-// Probe size is 0 as Stack Overflow probing is not enabled
-#define DEFAULT_ENTRY_PROBE_AMOUNT 0
-
-#define BACKOUT_CODE_STACK_LIMIT 0
-
-#endif //!FEATURE_STACK_PROBE || DACCESS_COMPILE
-
-#endif // __GENERICSTACKPROBE_h__
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-//
-
-//
-
-
-#ifndef _GENERICSTACKPROBE_INL_
-#define _GENERICSTACKPROBE_INL_
-
-#include "genericstackprobe.h"
-
-
-#endif // _GENERICSTACKPROBE_INL_
#include <wincrypt.h>
#include "cor.h"
-#include "genericstackprobe.h"
#include "staticcontract.h"
#include "volatile.h"
#include "palclr.h"
_NAME & operator=(_NAME const &);
#endif
+// The types of stack validation we support in holders.
+enum HolderStackValidation
+{
+ HSV_NoValidation,
+ HSV_ValidateMinimumStackReq,
+ HSV_ValidateNormalStackReq,
+};
#ifdef _DEBUG
HolderBase(TYPE value)
: m_value(value)
{
- // TODO: Find a way to enable this check.
- // We can have a holder in SO tolerant, then probe, then acquire a value. This works
- // because the dtor is guaranteed to run with enough stack.
- // EnsureSOIntolerantOK(__FUNCTION__, __FILE__, __LINE__);
-
#ifdef _DEBUG
m_pAutoExpVisibleValue = (const AutoExpVisibleValue *)(&m_value);
#endif //_DEBUG
if (VALIDATION_TYPE != HSV_NoValidation)
{
- VALIDATE_HOLDER_STACK_CONSUMPTION_FOR_TYPE(VALIDATION_TYPE);
this->DoRelease();
}
else
{
if (VALIDATION_TYPE != HSV_NoValidation)
{
- VALIDATE_HOLDER_STACK_CONSUMPTION_FOR_TYPE(VALIDATION_TYPE);
RELEASEF();
}
else
{
if (VALIDATION_TYPE != HSV_NoValidation)
{
- VALIDATE_HOLDER_STACK_CONSUMPTION_FOR_TYPE(VALIDATION_TYPE);
RELEASEF(m_value);
}
else
}
-
-
//-----------------------------------------------------------------------------
// Holder/Wrapper are the simplest way to define holders - they synthesizes a base class out of
// function pointers
// one that is already being done in BaseHolder & BaseWrapper. </TODO>
if (VALIDATION_TYPE != HSV_NoValidation)
{
- VALIDATE_HOLDER_STACK_CONSUMPTION_FOR_TYPE(VALIDATION_TYPE);
RELEASEF(this->m_value);
}
else
{
if (value)
{
- VALIDATE_HOLDER_STACK_CONSUMPTION_FOR_TYPE(HSV_ValidateNormalStackReq);
value->Release();
}
}
THROWS;
GC_NOTRIGGER;
INJECT_FAULT(ThrowOutOfMemory());
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_INTOLERANT;
}
CONTRACTL_END;
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_NOT_MAINLINE;
if (!Enabled())
return;
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_NOT_MAINLINE;
if (s_IBCLogMetaDataAccess != NULL)
s_IBCLogMetaDataAccess(address);
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_NOT_MAINLINE;
if (s_IBCLogMetaDataSearch != NULL && result != NULL)
s_IBCLogMetaDataSearch(result);
GC_NOTRIGGER;
INJECT_FAULT(ThrowOutOfMemory());
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
- SO_INTOLERANT;
}
CONTRACT_END;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
- STATIC_CONTRACT_SO_TOLERANT;
#ifdef CROSSGEN_COMPILE
return TRUE;
#if defined(_DEBUG_IMPL) && !defined(JIT_BUILD) && !defined(JIT64_BUILD) && !defined(CROSS_COMPILE) && !defined(_TARGET_ARM_) // @ARMTODO: no contracts for speed
#define PAL_TRY_HANDLER_DBG_BEGIN \
BOOL ___oldOkayToThrowValue = FALSE; \
- SO_INFRASTRUCTURE_CODE(BOOL ___oldSOTolerantState = FALSE;) \
ClrDebugState *___pState = ::GetClrDebugState(); \
__try \
{ \
___oldOkayToThrowValue = ___pState->IsOkToThrow(); \
- SO_INFRASTRUCTURE_CODE(___oldSOTolerantState = ___pState->IsSOTolerant();) \
___pState->SetOkToThrow(); \
PAL_ENTER_THROWS_REGION;
// Special version that avoids touching the debug state after doing work in a DllMain for process or thread detach.
#define PAL_TRY_HANDLER_DBG_BEGIN_DLLMAIN(_reason) \
BOOL ___oldOkayToThrowValue = FALSE; \
- SO_INFRASTRUCTURE_CODE(BOOL ___oldSOTolerantState = FALSE;) \
ClrDebugState *___pState = NULL; \
if (_reason != DLL_PROCESS_ATTACH) \
___pState = CheckClrDebugState(); \
if (___pState) \
{ \
___oldOkayToThrowValue = ___pState->IsOkToThrow(); \
- SO_INFRASTRUCTURE_CODE(___oldSOTolerantState = ___pState->IsSOTolerant();) \
___pState->SetOkToThrow(); \
} \
if ((_reason == DLL_PROCESS_DETACH) || (_reason == DLL_THREAD_DETACH)) \
{ \
_ASSERTE(___pState == CheckClrDebugState()); \
___pState->SetOkToThrow( ___oldOkayToThrowValue ); \
- SO_INFRASTRUCTURE_CODE(___pState->SetSOTolerance( ___oldSOTolerantState );) \
} \
}
-#define PAL_ENDTRY_NAKED_DBG \
- if (__exHandled) \
- { \
- RESTORE_SO_TOLERANCE_STATE; \
- } \
-
+#define PAL_ENDTRY_NAKED_DBG
+
#else
#define PAL_TRY_HANDLER_DBG_BEGIN ANNOTATION_TRY_BEGIN;
#define PAL_TRY_HANDLER_DBG_BEGIN_DLLMAIN(_reason) ANNOTATION_TRY_BEGIN;
#if defined(_DEBUG_IMPL) && !defined(JIT_BUILD) && !defined(JIT64_BUILD) && !defined(_ARM_) // @ARMTODO
#define WIN_PAL_TRY_HANDLER_DBG_BEGIN \
BOOL ___oldOkayToThrowValue = FALSE; \
- BOOL ___oldSOTolerantState = FALSE; \
ClrDebugState *___pState = GetClrDebugState(); \
__try \
{ \
___oldOkayToThrowValue = ___pState->IsOkToThrow(); \
- ___oldSOTolerantState = ___pState->IsSOTolerant(); \
___pState->SetOkToThrow(TRUE); \
ANNOTATION_TRY_BEGIN;
if (___pState) \
{ \
___oldOkayToThrowValue = ___pState->IsOkToThrow(); \
- ___oldSOTolerantState = ___pState->IsSOTolerant(); \
___pState->SetOkToThrow(TRUE); \
} \
if ((_reason == DLL_PROCESS_DETACH) || (_reason == DLL_THREAD_DETACH)) \
} \
}
-#define WIN_PAL_ENDTRY_NAKED_DBG \
- if (__exHandled) \
- { \
- RESTORE_SO_TOLERANCE_STATE; \
- } \
-
+#define WIN_PAL_ENDTRY_NAKED_DBG
+
#else
#define WIN_PAL_TRY_HANDLER_DBG_BEGIN ANNOTATION_TRY_BEGIN;
#define WIN_PAL_TRY_HANDLER_DBG_BEGIN_DLLMAIN(_reason) ANNOTATION_TRY_BEGIN;
PRECONDITION(PEDecoder(mappedBase,fixedUp).CheckNTHeaders());
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
PRECONDITION(HasNTHeaders());
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
CANNOT_TAKE_LOCK;
SUPPORTS_DAC;
PRECONDITION(CheckNTHeaders());
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
}
CONTRACT_END;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACTL_END;
PRECONDITION(CheckNTHeaders());
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
POSTCONDITION(CheckPointer(RETVAL));
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
SUPPORTS_DAC;
}
NOTHROW;
GC_NOTRIGGER;
POSTCONDITION(CheckPointer((void *)RETVAL, NULL_OK));
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
SUPPORTS_DAC;
}
PRECONDITION(CheckDirectory(pDir, 0, NULL_OK));
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
POSTCONDITION(CheckPointer((void *)RETVAL, NULL_OK));
CANNOT_TAKE_LOCK;
PRECONDITION(CheckPointer(pSize));
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
POSTCONDITION(CheckPointer((void *)RETVAL, NULL_OK));
CANNOT_TAKE_LOCK;
NOTHROW;
GC_NOTRIGGER;
POSTCONDITION(CheckPointer((void *)RETVAL));
- SO_TOLERANT;
}
CONTRACT_END;
NOTHROW;
SUPPORTS_DAC;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
PRECONDITION(HasCorHeader());
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACTL_END;
PRECONDITION(CheckNTHeaders());
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
PRECONDITION(CheckNTHeaders());
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACT_CHECK_END;
PRECONDITION(HasCorHeader());
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
POSTCONDITION(CheckPointer(RETVAL));
CANNOT_TAKE_LOCK;
SUPPORTS_DAC;
NOTHROW;
GC_NOTRIGGER;
POSTCONDITION(CheckPointer(RETVAL));
- SO_TOLERANT;
SUPPORTS_DAC;
CANNOT_TAKE_LOCK;
- SO_TOLERANT;
}
CONTRACT_END;
PRECONDITION(CheckNativeHeader());
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
SUPPORTS_DAC;
}
NOTHROW;
GC_NOTRIGGER;
POSTCONDITION(CheckPointer(RETVAL));
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
SUPPORTS_DAC;
}
GC_NOTRIGGER;
POSTCONDITION(CheckPointer(RETVAL));
CANNOT_TAKE_LOCK;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACT_END;
NOTHROW;
GC_NOTRIGGER;
POSTCONDITION(CheckPointer(RETVAL));
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
SUPPORTS_DAC;
}
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
POSTCONDITION(CheckPointer(RETVAL));
- SO_TOLERANT;
SUPPORTS_DAC;
CANNOT_TAKE_LOCK;
- SO_TOLERANT;
}
CONTRACT_END;
TlsIdx_PEXCEPTION_RECORD,
TlsIdx_PCONTEXT,
- TlsIdx_SOIntolerantTransitionHandler, // The thread is entering SO intolerant code. This one is used by
- // Thread::IsSOIntolerant to decide the SO mode of the thread.
MAX_PREDEFINED_TLS_SLOT
};
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
SUPPORTS_DAC;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
GC_NOTRIGGER;
}
CONTRACTL_END;
POSTCONDITION(Equals(buffer, size));
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC_HOST_ONLY;
}
CONTRACT_END;
{
NOTHROW;
DESTRUCTOR_CHECK;
- SO_TOLERANT;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC_HOST_ONLY;
}
CONTRACT_END;
- VALIDATE_BACKOUT_STACK_CONSUMPTION;
if (IsAllocated())
{
INSTANCE_CHECK;
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACT_END;
PRECONDITION(compare.Check());
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACT_END;
PRECONDITION(CheckSize(size));
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACT_END;
POSTCONDITION(GetSize() == size);
POSTCONDITION(CheckInvariant(*this));
NOTHROW;
- SO_TOLERANT;
GC_NOTRIGGER;
SUPPORTS_DAC_HOST_ONLY;
}
{
PRECONDITION(CheckSize(allocation));
POSTCONDITION(CheckPointer(buffer));
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC_HOST_ONLY;
}
CONTRACT_END;
- VALIDATE_BACKOUT_STACK_CONSUMPTION;
CONSISTENCY_CHECK(CheckBuffer(buffer, allocation));
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
PRECONDITION(CheckPointer(buffer));
}
PRECONDITION((value & ~REPRESENTATION_MASK) == 0);
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC_HOST_ONLY;
}
CONTRACT_END;
CONSTRUCTOR_CHECK;
POSTCONDITION(IsEmpty());
NOTHROW;
- SO_TOLERANT;
GC_NOTRIGGER;
}
CONTRACT_END;
RETURN;
#else
STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_SUPPORTS_DAC_HOST_ONLY;
#endif
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_SUPPORTS_DAC;
#endif
GC_NOTRIGGER;
PRECONDITION(CheckPointer(this));
NOTHROW;
- SO_TOLERANT;
SUPPORTS_DAC;
}
SS_CONTRACT_END;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(this));
NOTHROW;
- SO_TOLERANT;
}
SS_CONTRACT_END;
PRECONDITION(CheckSize(size));
SS_POSTCONDITION(CountToSize(RETVAL) == size);
NOTHROW;
- SO_TOLERANT;
SUPPORTS_DAC;
}
SS_CONTRACT_END;
{
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_SUPPORTS_DAC;
return (GetSize() >> GetCharacterSizeShift());
#define ANNOTATION_IGNORE_LOCK __annotation(W("CAN_TAKE_LOCK"), W("CANNOT_TAKE_LOCK"), W("CONDITIONAL_EXEMPT"))
#define ANNOTATION_IGNORE_FAULT __annotation(W("FAULT"), W("FORBID_FAULT"), W("CONDITIONAL_EXEMPT"))
#define ANNOTATION_IGNORE_TRIGGER __annotation(W("GC_TRIGGERS"), W("GC_NOTRIGGER"), W("CONDITIONAL_EXEMPT"))
-#define ANNOTATION_IGNORE_SO __annotation(W("SO_TOLERANT"), W("SO_INTOLERANT"), W("CONDITIONAL_EXEMPT"))
#define ANNOTATION_VIOLATION(violationmask) __annotation(W("VIOLATION(") L#violationmask W(")"))
#define ANNOTATION_UNCHECKED(thecheck) __annotation(W("UNCHECKED(") L#thecheck W(")"))
#define ANNOTATION_FN_FORBID_FAULT __annotation(W("FORBID_FAULT ") SCAN_WIDEN(__FUNCTION__))
#define ANNOTATION_FN_GC_TRIGGERS __annotation(W("GC_TRIGGERS ") SCAN_WIDEN(__FUNCTION__))
#define ANNOTATION_FN_GC_NOTRIGGER __annotation(W("GC_NOTRIGGER ") SCAN_WIDEN(__FUNCTION__))
-#define ANNOTATION_FN_SO_TOLERANT __annotation(W("SO_TOLERANT ") SCAN_WIDEN(__FUNCTION__))
-#define ANNOTATION_FN_SO_INTOLERANT __annotation(W("SO_INTOLERANT ") SCAN_WIDEN(__FUNCTION__))
-#define ANNOTATION_FN_SO_NOT_MAINLINE __annotation(W("SO_NOT_MAINLINE ") SCAN_WIDEN(__FUNCTION__))
#define ANNOTATION_FN_MODE_COOPERATIVE __annotation(W("MODE_COOPERATIVE ") SCAN_WIDEN(__FUNCTION__))
#define ANNOTATION_FN_MODE_PREEMPTIVE __annotation(W("MODE_PREEMPTIVE ") SCAN_WIDEN(__FUNCTION__))
#define ANNOTATION_FN_MODE_ANY __annotation(W("MODE_ANY ") SCAN_WIDEN(__FUNCTION__))
#define ANNOTATION_IGNORE_LOCK { }
#define ANNOTATION_IGNORE_FAULT { }
#define ANNOTATION_IGNORE_TRIGGER { }
-#define ANNOTATION_IGNORE_SO { }
#define ANNOTATION_VIOLATION(violationmask) { }
#define ANNOTATION_UNCHECKED(thecheck) { }
#define ANNOTATION_FN_FORBID_FAULT { }
#define ANNOTATION_FN_GC_TRIGGERS { }
#define ANNOTATION_FN_GC_NOTRIGGER { }
-#define ANNOTATION_FN_SO_TOLERANT { }
-#define ANNOTATION_FN_SO_INTOLERANT { }
-#define ANNOTATION_FN_SO_NOT_MAINLINE { }
#define ANNOTATION_FN_MODE_COOPERATIVE { }
#define ANNOTATION_FN_MODE_PREEMPTIVE { }
#define ANNOTATION_FN_MODE_ANY { }
#define ANNOTATION_SO_PROBE_BEGIN(probeAmount) { }
#define ANNOTATION_SO_PROBE_END { }
-#define ANNOTATION_SO_TOLERANT { }
-#define ANNOTATION_SO_INTOLERANT { }
-#define ANNOTATION_SO_NOT_MAINLINE { }
-#define ANNOTATION_SO_NOT_MAINLINE_BEGIN { }
-#define ANNOTATION_SO_NOT_MAINLINE_END { }
#define ANNOTATION_ENTRY_POINT { }
#ifdef _DEBUG
#define ANNOTATION_DEBUG_ONLY { }
#define STATIC_CONTRACT_LIMITED_METHOD ANNOTATION_FN_LEAF
#define STATIC_CONTRACT_WRAPPER ANNOTATION_FN_WRAPPER
-#ifdef FEATURE_STACK_PROBE // Static SO contracts only required when SO Infrastructure code is present
-#define STATIC_CONTRACT_SO_INTOLERANT ANNOTATION_FN_SO_INTOLERANT
-#define STATIC_CONTRACT_SO_TOLERANT ANNOTATION_FN_SO_TOLERANT
-#define STATIC_CONTRACT_SO_NOT_MAINLINE ANNOTATION_FN_SO_NOT_MAINLINE
-
-#define STATIC_CONTRACT_ENTRY_POINT ANNOTATION_ENTRY_POINT; ANNOTATION_FN_SO_TOLERANT
-#else // FEATURE_STACK_PROBE
-#define STATIC_CONTRACT_SO_INTOLERANT
-#define STATIC_CONTRACT_SO_TOLERANT
-#define STATIC_CONTRACT_SO_NOT_MAINLINE
#define STATIC_CONTRACT_ENTRY_POINT
-#endif // FEATURE_STACK_PROBE
#ifdef _DEBUG
#define STATIC_CONTRACT_DEBUG_ONLY \
ANNOTATION_DEBUG_ONLY; \
STATIC_CONTRACT_CANNOT_TAKE_LOCK; \
- ANNOTATION_VIOLATION(TakesLockViolation); \
- ANNOTATION_FN_SO_NOT_MAINLINE;
+ ANNOTATION_VIOLATION(TakesLockViolation);
#else
#define STATIC_CONTRACT_DEBUG_ONLY
#endif
METHOD_CANNOT_BE_FOLDED_DEBUG;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
}
static void used()
#define STATIC_CONTRACT_THROWS_TERMINAL \
typedef StaticContract::ScanThrowMarkerTerminal ScanThrowMarker; if (0) ScanThrowMarker::used();
-#if defined(_DEBUG) && !defined(DACCESS_COMPILE) && defined(FEATURE_STACK_PROBE) && !defined(_TARGET_ARM_) // @ARMTODO
-extern void EnsureSOIntolerantOK(const char *szFunction, const char *szFile, int lineNum);
-
-extern BOOL (*g_fpShouldValidateSOToleranceOnThisThread)();
-
-// @todo Is there any checks we can do here?
-#define ENSURE_SHOULD_NOT_PROBE_FOR_SO
-
-#define CHECK_IF_SO_INTOLERANT_OK \
- EnsureSOIntolerantOK(__FUNCTION__, __FILE__, __LINE__);
-
-// Even if we can't have a full-blown contract, we can at least check
-// if its ok to run an SO-Intolerant function.
-#undef STATIC_CONTRACT_SO_INTOLERANT
-#define STATIC_CONTRACT_SO_INTOLERANT \
- ANNOTATION_FN_SO_INTOLERANT; \
- CHECK_IF_SO_INTOLERANT_OK;
-
-#undef STATIC_CONTRACT_SO_NOT_MAINLINE
-#define STATIC_CONTRACT_SO_NOT_MAINLINE \
- ENSURE_SHOULD_NOT_PROBE_FOR_SO \
- ANNOTATION_FN_SO_NOT_MAINLINE
-
-#else
-#define EnsureSOIntolerantOK(x,y,z)
-
-#endif
-
-
#ifdef _MSC_VER
#define SCAN_IGNORE_THROW typedef StaticContract::ScanThrowMarkerIgnore ScanThrowMarker; ANNOTATION_IGNORE_THROW
#define SCAN_IGNORE_LOCK ANNOTATION_IGNORE_LOCK
#define SCAN_IGNORE_FAULT ANNOTATION_IGNORE_FAULT
#define SCAN_IGNORE_TRIGGER ANNOTATION_IGNORE_TRIGGER
-#define SCAN_IGNORE_SO ANNOTATION_IGNORE_SO
#else
#define SCAN_IGNORE_THROW
#define SCAN_IGNORE_LOCK
#define SCAN_IGNORE_FAULT
#define SCAN_IGNORE_TRIGGER
-#define SCAN_IGNORE_SO
#endif
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_ANY;
- STATIC_CONTRACT_SO_TOLERANT;
ClrFlsSetValue (TlsIdx_ThreadType, (LPVOID)(((size_t)ClrFlsGetValue (TlsIdx_ThreadType)) |flag));
}
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_ANY;
STATIC_CONTRACT_SUPPORTS_DAC;
- STATIC_CONTRACT_SO_TOLERANT;
#if !defined(DACCESS_COMPILE)
return IsGCSpecialThread () || IsSuspendEEThread ();
if (pmdEnum == NULL)
return;
- // This function may be called through RCW. When hosted, we have probed before this call, so the
- // following contract violation is OK.
- CONTRACT_VIOLATION(SOToleranceViolation);
HENUMInternal::DestroyEnum(pmdEnum);
END_CLEANUP_ENTRYPOINT;
} // RegMeta::CloseEnum
// Thus Release() is in a satellite lib.
ULONG RegMeta::Release()
{
- // This is called during cleanup. We can not fail this call by probing.
- // As long as we make sure the cleanup does not use too much space through
- // BEGIN_CLEANUP_ENTRYPOINT, we are OK.
- CONTRACT_VIOLATION (SOToleranceViolation);
BEGIN_CLEANUP_ENTRYPOINT;
#if defined(FEATURE_METADATA_IN_VM)
HRESULT hr = NOERROR;
RID rid;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
-
LOCKREAD();
if (!m_pStgdb->m_MiniMd.IsSorted(TBL_NestedClass) && !m_pStgdb->m_MiniMd.IsTableVirtualSorted(TBL_NestedClass))
}
ErrExit:
- END_SO_INTOLERANT_CODE;
return hr;
} // MDInternalRW::GetNestedClassProps
_ASSERTE(pDeltaMD);
_ASSERTE(ppv);
- // debugging-specific usages don't need SO hardening
- SO_NOT_MAINLINE_FUNCTION;
-
HRESULT hr = E_FAIL;
IMDInternalImportENC *pDeltaMDImport = NULL;
if (IsMinimalDelta())
return CLDB_E_INCOMPATIBLE;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
-
IfFailGo(m_StringHeap.MakeWritable());
IfFailGo(m_GuidHeap.MakeWritable());
IfFailGo(m_UserStringHeap.MakeWritable());
m_fIsReadOnly = false;
ErrExit:
- ;
- END_SO_INTOLERANT_CODE;
return hr;
} // CMiniMdRW::ConvertToRW
IMDCommon *pInternalROMDCommon = NULL;
MDFileFormat format;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
-
if (ppIUnk == NULL)
IfFailGo(E_INVALIDARG);
if ( pInternalROMDCommon )
pInternalROMDCommon->Release();
- END_SO_INTOLERANT_CODE;
-
return hr;
} // GetMDInternalInterface
#include "regdisp.h"
#include "stackframe.h"
#include "gms.h"
-#include "stackprobe.h"
#include "fcall.h"
#include "syncblk.h"
#include "gcdesc.h"
#include "domainfile.inl"
#include "clsload.inl"
#include "method.inl"
-#include "stackprobe.inl"
#include "syncblk.inl"
#include "threads.inl"
#include "eehash.inl"
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_FORBID_FAULT;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
SUPPORTS_DAC;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pCultureNames));
- SO_INTOLERANT;
}
CONTRACTL_END;
SPECIALIZED_VIOLATION(ModeViolation);
SPECIALIZED_VIOLATION(FaultViolation);
SPECIALIZED_VIOLATION(FaultNotFatal);
-SPECIALIZED_VIOLATION(SOToleranceViolation);
SPECIALIZED_VIOLATION(HostViolation);
SPECIALIZED_VIOLATION(TakesLockViolation);
SPECIALIZED_VIOLATION(LoadsTypeViolation);
SPECIALIZED_VIOLATION(ThrowsViolation|GCViolation);
SPECIALIZED_VIOLATION(ThrowsViolation|GCViolation|TakesLockViolation);
-SPECIALIZED_VIOLATION(ThrowsViolation|SOToleranceViolation);
SPECIALIZED_VIOLATION(ThrowsViolation|ModeViolation);
SPECIALIZED_VIOLATION(ThrowsViolation|FaultNotFatal);
SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation);
SPECIALIZED_VIOLATION(ThrowsViolation|TakesLockViolation);
-SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation|SOToleranceViolation);
SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation|TakesLockViolation);
SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation|GCViolation);
SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation|GCViolation|TakesLockViolation|LoadsTypeViolation);
SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation|GCViolation|ModeViolation);
SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation|GCViolation|ModeViolation|FaultNotFatal);
SPECIALIZED_VIOLATION(ThrowsViolation|FaultViolation|GCViolation|ModeViolation|FaultNotFatal|TakesLockViolation);
-SPECIALIZED_VIOLATION(GCViolation|SOToleranceViolation);
SPECIALIZED_VIOLATION(GCViolation|FaultViolation);
-SPECIALIZED_VIOLATION(GCViolation|FaultViolation|SOToleranceViolation);
-SPECIALIZED_VIOLATION(GCViolation|FaultViolation|ModeViolation|SOToleranceViolation);
-SPECIALIZED_VIOLATION(GCViolation|ModeViolation|SOToleranceViolation);
-SPECIALIZED_VIOLATION(GCViolation|ModeViolation|SOToleranceViolation|FaultNotFatal);
-SPECIALIZED_VIOLATION(GCViolation|ModeViolation|SOToleranceViolation|FaultNotFatal|TakesLockViolation);
+SPECIALIZED_VIOLATION(GCViolation|FaultNotFatal|ModeViolation);
SPECIALIZED_VIOLATION(GCViolation|FaultNotFatal|TakesLockViolation);
+SPECIALIZED_VIOLATION(GCViolation|FaultNotFatal|TakesLockViolation|ModeViolation);
+SPECIALIZED_VIOLATION(GCViolation|ModeViolation);
SPECIALIZED_VIOLATION(FaultViolation|FaultNotFatal);
SPECIALIZED_VIOLATION(FaultNotFatal|TakesLockViolation);
void CHECK::Trigger(LPCSTR reason)
{
- STATIC_CONTRACT_SO_NOT_MAINLINE;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
void CHECK::Setup(LPCSTR message, LPCSTR condition, LPCSTR file, INT line)
{
- STATIC_CONTRACT_SO_NOT_MAINLINE;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_SUPPORTS_DAC_HOST_ONLY;
LPCSTR CHECK::FormatMessage(LPCSTR messageFormat, ...)
{
- STATIC_CONTRACT_SO_NOT_MAINLINE;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_NOT_MAINLINE;
// Make a copy of it.
StackScratchBuffer buffer;
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT; // Need this to be tolerant to stack overflows since REGUTIL::GetConfigDWORD was too. (This replaces calls to REGUTIL::GetConfigDWORD)
}
CONTRACTL_END;
//! So don't put in a runtime contract and don't invoke other functions in the CLR (not even _ASSERTE!)
STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_SUPPORTS_DAC; // DAC can call in here since we initialize the SxS callbacks in ClrDataAccess::Initialize.
#ifdef DACCESS_COMPILE
// and has low perf impact.
static ClrDebugState gBadClrDebugState;
gBadClrDebugState.ViolationMaskSet( AllViolation );
- // SO_INFRASTRUCTURE_CODE() Macro to remove SO infrastructure code during build
- SO_INFRASTRUCTURE_CODE(gBadClrDebugState.BeginSOTolerant();)
gBadClrDebugState.SetOkToThrow();
ClrDebugState *pNewClrDebugState = NULL;
LPVOID ClrAllocInProcessHeapBootstrap (DWORD dwFlags, SIZE_T dwBytes)
{
- STATIC_CONTRACT_SO_INTOLERANT;
-
#if defined(SELF_NO_HOST)
static HANDLE hHeap = NULL;
BOOL ClrFreeInProcessHeapBootstrap (DWORD dwFlags, LPVOID lpMem)
{
- STATIC_CONTRACT_SO_INTOLERANT;
-
#if defined(SELF_NO_HOST)
static HANDLE hHeap = NULL;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FAULT;
- STATIC_CONTRACT_SO_TOLERANT; // The memory allocation itself should be SO-tolerant. But we must protect the use of it.
STATIC_CONTRACT_SUPPORTS_DAC_HOST_ONLY;
void * result = ClrAllocInProcessHeap(0, S_SIZE_T(n));
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FAULT;
- STATIC_CONTRACT_SO_TOLERANT; // The memory allocation itself should be SO-tolerant. But we must protect the use of it.
STATIC_CONTRACT_SUPPORTS_DAC_HOST_ONLY;
void * result = ClrAllocInProcessHeap(0, S_SIZE_T(n));
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FAULT;
- STATIC_CONTRACT_SO_TOLERANT; // The memory allocation itself should be SO-tolerant. But we must protect the use of it.
STATIC_CONTRACT_SUPPORTS_DAC_HOST_ONLY;
INCONTRACT(_ASSERTE(!ARE_FAULTS_FORBIDDEN()));
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FAULT;
- STATIC_CONTRACT_SO_TOLERANT; // The memory allocation itself should be SO-tolerant. But we must protect the use of it.
STATIC_CONTRACT_SUPPORTS_DAC_HOST_ONLY;
INCONTRACT(_ASSERTE(!ARE_FAULTS_FORBIDDEN()));
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT; // The memory management routines should be SO-tolerant.
STATIC_CONTRACT_SUPPORTS_DAC_HOST_ONLY;
if (p != NULL)
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT; // The memory management routines should be SO-tolerant.
STATIC_CONTRACT_SUPPORTS_DAC_HOST_ONLY;
if (p != NULL)
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FAULT;
- STATIC_CONTRACT_SO_TOLERANT; // The memory management routines should be SO-tolerant.
HANDLE hExecutableHeap = ClrGetProcessExecutableHeap();
if (hExecutableHeap == NULL) {
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FAULT;
- STATIC_CONTRACT_SO_TOLERANT; // The memory management routines should be SO-tolerant.
HANDLE hExecutableHeap = ClrGetProcessExecutableHeap();
if (hExecutableHeap == NULL) {
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FAULT;
- STATIC_CONTRACT_SO_TOLERANT; // The memory management routines should be SO-tolerant.
INCONTRACT(_ASSERTE(!ARE_FAULTS_FORBIDDEN()));
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FAULT;
- STATIC_CONTRACT_SO_TOLERANT; // The memory management routines should be SO-tolerant.
INCONTRACT(_ASSERTE(!ARE_FAULTS_FORBIDDEN()));
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
- STATIC_CONTRACT_SO_TOLERANT;
SUPPORTS_DAC_HOST_ONLY;
if (g_pExecutionEngine == NULL)
IEEMemoryManager * GetEEMemoryManager()
{
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
LPVOID *ClrFlsGetBlockGeneric()
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return (LPVOID *) GetExecutionEngine()->TLS_GetDataBlock();
}
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
GC_NOTRIGGER;
SUPPORTS_DAC;
}
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_DEBUG_ONLY;
// Log asserts to the stress log. Note that we can't include the szExpr b/c that
DWORD dwAssertStacktrace = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_AssertStacktrace);
-#if !defined(DACCESS_COMPILE) && defined(FEATURE_STACK_PROBE)
- //global g_fpCheckNStackPagesAvailable is not present when SO infrastructure code is not present
- // Trying to get a stack trace if there is little stack available can cause a silent process
- // teardown, so only try to do this there is plenty of stack.
- if ((dwAssertStacktrace) != 0 && (g_fpCheckNStackPagesAvailable != NULL))
- {
- if (!g_fpCheckNStackPagesAvailable(12))
- {
- fConstrained = TRUE;
- }
- }
-#endif
-
LONG lAlreadyOwned = InterlockedExchange((LPLONG)&g_BufferLock, 1);
if (fConstrained || dwAssertStacktrace == 0 || lAlreadyOwned == 1)
{
{
GC_NOTRIGGER;
NOTHROW;
- SO_TOLERANT;
SUPPORTS_DAC_HOST_ONLY; // Exceptions aren't currently marshalled by DAC - just used in the host
}
CONTRACTL_END;
void DECLSPEC_NORETURN ThrowHR(HRESULT hr, SString const &msg)
{
- STATIC_CONTRACT_SO_TOLERANT;
WRAPPER_NO_CONTRACT;
STRESS_LOG1(LF_EH, LL_INFO100, "ThrowHR: HR = %x\n", hr);
void DECLSPEC_NORETURN ThrowWin32(DWORD err)
{
- STATIC_CONTRACT_SO_TOLERANT;
WRAPPER_NO_CONTRACT;
if (err == ERROR_NOT_ENOUGH_MEMORY)
{
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
#include "corexcep.h"
-#ifdef FEATURE_STACK_PROBE
-void DECLSPEC_NORETURN ThrowStackOverflow()
-{
- CONTRACTL
- {
- // This should be throws... But it isn't because a SO doesn't technically
- // fall into the same THROW/NOTHROW conventions as the rest of the contract
- // infrastructure.
- NOTHROW;
-
- GC_NOTRIGGER;
- SO_TOLERANT;
- SUPPORTS_DAC;
- }
- CONTRACTL_END;
-
- //g_hrFatalError=COR_E_STACKOVERFLOW;
- PTR_INT32 p_ghrFatalError = dac_cast<PTR_INT32>(GVAL_ADDR(g_hrFatalError));
- _ASSERTE(p_ghrFatalError != NULL);
- *p_ghrFatalError = COR_E_STACKOVERFLOW;
-
-
- RaiseException(EXCEPTION_SOFTSO, 0, 0, NULL);
- UNREACHABLE();
-}
-#endif
-
void DECLSPEC_NORETURN ThrowMessage(LPCSTR string, ...)
{
CONTRACTL
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
bool IsCurrentExceptionSO()
{
WRAPPER_NO_CONTRACT;
- DWORD exceptionCode = GetCurrentExceptionCode();
- return IsSOExceptionCode(exceptionCode);
-}
-
-bool IsSOExceptionCode(DWORD exceptionCode)
-{
- if (exceptionCode == STATUS_STACK_OVERFLOW
-#ifdef FEATURE_STACK_PROBE
- || exceptionCode == EXCEPTION_SOFTSO
-#endif
- )
- {
- return TRUE;
- }
- else
- return FALSE;
+ return GetCurrentExceptionCode() == STATUS_STACK_OVERFLOW;
}
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FORBID_FAULT;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_SUPPORTS_DAC;
_ASSERTE(IsInstanceTaggedSEHCode(dwExceptionCode));
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-//
-
-//
-//*****************************************************************************
-// genericstackprobe.cpp
-//
-// This contains code for generic SO stack probes outside the VM, where we don't have a thread object
-//
-//*****************************************************************************
-
-#include "stdafx.h" // Precompiled header key.
-#include "utilcode.h"
-#include "genericstackprobe.h"
-#include "log.h"
-
-#if defined(FEATURE_STACK_PROBE) && !defined(DACCESS_COMPILE)
-
-#ifdef ENABLE_CONTRACTS_IMPL
-BOOL g_EnableDefaultRWValidation = FALSE;
-#endif
-
-bool g_StackProbingEnabled;
-void (*g_fpCheckForSOInSOIntolerantCode)();
-void (*g_fpSetSOIntolerantTransitionMarker)();
-BOOL (*g_fpDoProbe)(unsigned int n);
-void (*g_fpHandleSoftStackOverflow)(BOOL fSkipDebugger);
-
-// This function is used for NO_THROW probes that have no error return path. In this
-// case, we'll just force a stack overflow exception. Do not call it directly - use
-// one of the FORCE_SO macros.
-void DontCallDirectlyForceStackOverflow()
-{
-#ifdef _PREFAST_
-#pragma warning(push)
-#pragma warning(disable:26001) // "Suppress PREFast warning about underflows"
-#endif
-
- UINT_PTR *sp = NULL;
- // we don't have access to GetCurrentSP from here, so just get an approximation
- sp = (UINT_PTR *)&sp;
- while (TRUE)
- {
- sp -= (GetOsPageSize() / sizeof(UINT_PTR));
- *sp = NULL;
- }
-
-#ifdef _PREFAST_
-#pragma warning(pop)
-#endif
-}
-
-void (*g_fpHandleStackOverflowAfterCatch)() = 0;
-
-// HandleStackOverflowAfterCatch
-//
-void HandleStackOverflowAfterCatch()
-{
- if (!g_fpHandleStackOverflowAfterCatch)
- {
- // If g_fpUnwindGuardChainTo has not been set, then we haven't called InitStackProbes
- // and we aren't probing, so bail.
- return;
- }
-
- // Reset the SO-tolerance state and restore the current guard
- g_fpHandleStackOverflowAfterCatch();
-}
-
-NOINLINE void SOIntolerantTransitionHandler::CtorImpl()
-{
- m_exceptionOccurred = true;
- m_pPreviousHandler = ClrFlsGetValue(TlsIdx_SOIntolerantTransitionHandler);
- g_fpSetSOIntolerantTransitionMarker();
-}
-
-NOINLINE void SOIntolerantTransitionHandler::DtorImpl()
-{
- // if we take a stack overflow exception in SO intolerant code, then we must
- // rip the process. We check this by determining if the SP is beyond the calculated
- // limit. Checking for the guard page being present is too much overhead during
- // exception handling (if you can believe that) and impacts perf.
-
- if (m_exceptionOccurred)
- {
- g_fpCheckForSOInSOIntolerantCode();
- }
-
- ClrFlsSetValue(TlsIdx_SOIntolerantTransitionHandler, m_pPreviousHandler);
-}
-
-#ifdef STACK_GUARDS_DEBUG
-
-// If this is TRUE, we'll make the stack page that we put our stack marker in PAGE_NOACCESS so that you get an AV
-// as soon as you go past the stack guard.
-BOOL g_ProtectStackPagesInDebugger = FALSE;
-
-// This is the smallest size backout probe for which we will try to do a virtual protect for debugging.
-// If this number is too small, the 1 page ganularity of VirtualProtect becomes a problem. This number
-// should be less than or equal to the default backout probe size.
-#define MINIMUM_PAGES_FOR_DEBUGGER_PROTECTION 4.0
-
-void (*g_fpRestoreCurrentStackGuard)(BOOL fDisabled) = 0;
-BOOL g_EnableBackoutStackValidation = FALSE;
-BOOL (*g_fpShouldValidateSOToleranceOnThisThread)() = 0;
-BOOL (*g_fp_BaseStackGuard_RequiresNStackPages)(BaseStackGuardGeneric *pGuard, unsigned int n, BOOL fThrowOnSO) = NULL;
-void (*g_fp_BaseStackGuard_CheckStack)(BaseStackGuardGeneric *pGuard) = NULL;
-BOOL (*g_fpCheckNStackPagesAvailable)(unsigned int n) = NULL;
-
-// Always initialize g_EntryPointProbeAmount to a valid value as there could be a race where a
-// function probes with g_EntryPointProbeAmount's value before it is initialized in InitStackProbes.
-DWORD g_EntryPointProbeAmount = DEFAULT_ENTRY_PROBE_SIZE;
-
-// RestoreSOToleranceState
-//
-// Restores the EE SO-tolerance state after a catch.
-
-void RestoreSOToleranceState()
-{
- if (!g_fpRestoreCurrentStackGuard)
- {
- // If g_fpUnwindGuardChainTo has not been set, then we haven't called InitStackProbes
- // and we aren't probing, so bail.
- return;
- }
-
- // Reset the SO-tolerance state and restore the current guard
- g_fpRestoreCurrentStackGuard(FALSE);
-}
-
-//
-// EnsureSOTolerant ASSERTS if we are not in an SO-tolerant mode
-//
-void EnsureSOTolerant()
-{
-#ifdef ENABLE_CONTRACTS_IMPL
- ClrDebugState *pClrDebugState = GetClrDebugState();
- _ASSERTE(! pClrDebugState || pClrDebugState->IsSOTolerant());
-#endif
-}
-
-DEBUG_NOINLINE DebugSOIntolerantTransitionHandler::DebugSOIntolerantTransitionHandler()
- : SOIntolerantTransitionHandler()
-{
- SCAN_SCOPE_BEGIN;
- // This CANNOT be a STATIC_CONTRACT_SO_INTOLERANT b/c that isn't
- // really just a static contract, it is actually calls EnsureSOIntolerantOK
- // as well. Instead we just use the annotation.
- ANNOTATION_FN_SO_INTOLERANT;
-#ifdef ENABLE_CONTRACTS_IMPL
- m_clrDebugState = GetClrDebugState();
- if (m_clrDebugState)
- {
- m_prevSOTolerantState = m_clrDebugState->BeginSOIntolerant();
- }
-#endif
-}
-
-DEBUG_NOINLINE DebugSOIntolerantTransitionHandler::~DebugSOIntolerantTransitionHandler()
-{
- SCAN_SCOPE_END;
-
- if (m_clrDebugState)
- {
- m_clrDebugState->SetSOTolerance(m_prevSOTolerantState);
- }
-}
-
-// This is effectively an implicit probe, because we are guaranteeing that we have
-// enought stack to run and will not take an SO. So we enter SO-intolerant code when
-// we install one of these.
-DEBUG_NOINLINE BaseStackMarker::BaseStackMarker(float numPages, BOOL fAllowDisabling)
- : m_prevWasSOTolerant(FALSE)
- , m_pDebugState(
-#ifdef ENABLE_CONTRACTS_IMPL
- CheckClrDebugState()
-#else
- NULL
-#endif
- )
- , m_fMarkerSet(FALSE)
- , m_fTemporarilyDisabled(FALSE), m_fAddedToStack(FALSE), m_pPrevious(NULL)
- , m_numPages(0.0), m_pMarker(NULL)
- , m_fProtectedStackPage(FALSE), m_fAllowDisabling(fAllowDisabling)
-{
- SCAN_SCOPE_BEGIN;
- // This CANNOT be a STATIC_CONTRACT_SO_INTOLERANT b/c that isn't
- // really just a static contract, it is actually calls EnsureSOIntolerantOK
- // as well. Instead we just use the annotation.
- ANNOTATION_FN_SO_INTOLERANT;
-
- {
- DEBUG_ONLY_REGION();
- // If backout stack validation isn't enabled then we are done.
- if (!g_EnableBackoutStackValidation)
- {
- return;
- }
-
- // If we can't talk to other markers then the markers could get in each others way
- if (!m_pDebugState)
- {
- return;
- }
-
- // Allow only the lowest marker to be active at any one time. Yes, this means that
- // the stack will only ever have one element in it. However having multiple markers
- // is problematic for debugging and conflicts with the VirtualProtect option. It
- // adds little value, in that small backout checks stop happening in exception
- // codepaths, but these get plenty of coverage in success cases and the lowest
- // placed marked is the one that could actually indicate a stack overflow.
- if (!m_pDebugState->GetStackMarkerStack().IsEmpty())
- {
- return;
- }
-
- // Switch the SO tolerance mode
- m_prevWasSOTolerant = m_pDebugState->SetSOTolerance(FALSE);
-
- // If we have less then numPages left before the end of the stack then there is
- // no point in adding a marker since we will take an SO anyway if we use too much
- // stack. Putting the marker is actually very bad since it artificially forces an
- // SO in cases where it wouldn't normally occur if we use less than num pages of stack.
- if (g_fpCheckNStackPagesAvailable &&
- !g_fpCheckNStackPagesAvailable(numPages < 1 ? 1 : (unsigned int)numPages))
- {
- return;
- }
-
- if (m_fAllowDisabling)
- {
- // Push ourselves on to the stack of stack markers on the CLR debug state.
- m_pDebugState->GetStackMarkerStack().PushStackMarker(this);
- m_fAddedToStack = TRUE;
- }
-
- // Set the actual stack guard marker if we have enough stack to do so.
- SetMarker(numPages);
-
- if (m_fMarkerSet && m_fAllowDisabling)
- {
- ProtectMarkerPageInDebugger();
- }
- }
-}
-
-// we have this so that the check of the global can be inlined
-// and we don't make the call to CheckMarker unless we need to.
-DEBUG_NOINLINE void BaseStackMarker::CheckForBackoutViolation()
-{
- SCAN_SCOPE_END;
-
- // If backout stack validation isn't enabled then we are done.
- if (!g_EnableBackoutStackValidation)
- {
- return;
- }
-
- {
- DEBUG_ONLY_REGION()
-
- // The marker should always be re-enabled at this point.
- CONSISTENCY_CHECK_MSG(!m_fTemporarilyDisabled, "The stack guard was disabled but not properly re-enabled. This is a bug somewhere in the code called after this marker has been set up.");
-
- if (!m_pDebugState || m_fTemporarilyDisabled)
- {
- return;
- }
-
- // Reset the SO tolerance of the thread.
- m_pDebugState->SetSOTolerance(m_prevWasSOTolerant);
-
- if (m_fAddedToStack)
- {
- // Pop ourselves off of the stack of stack markers on the CLR debug state.
- CONSISTENCY_CHECK(m_pDebugState != NULL);
- BaseStackMarker *pPopResult = m_pDebugState->GetStackMarkerStack().PopStackMarker();
-
- CONSISTENCY_CHECK_MSG(pPopResult == this, "The marker we pop off the stack should always be the current marker.");
- CONSISTENCY_CHECK_MSG(m_pPrevious == NULL, "PopStackMarker should reset the current marker's m_pPrevious field to NULL.");
- }
-
- // Not cancellable markers should only be checked when no cancellable markers are present.
- if (!m_fAllowDisabling && !(m_pDebugState->GetStackMarkerStack().IsEmpty()))
- {
- return;
- }
-
- if (m_fProtectedStackPage)
- {
- UndoPageProtectionInDebugger();
- }
-
- if (m_fMarkerSet)
- {
- // Check to see if we overwrote the stack guard marker.
- CheckMarker();
- }
- }
-}
-
-void BaseStackMarker::SetMarker(float numPages)
-{
- LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_DEBUG_ONLY;
-
- m_numPages = numPages;
-
- // Use the address of the argument to get the current stack pointer. Note that this
- // won't be the exact SP; however it will be close enough.
- LPVOID pStack = &numPages;
-
- UINT_PTR *pMarker = (UINT_PTR*)pStack - (int)(GetOsPageSize() / sizeof(UINT_PTR) * m_numPages);
-
- // We might not have committed our stack yet, so allocate the number of pages
- // we need so that they will be commited and we won't AV when we try to set the mark.
- _alloca( (int)(GetOsPageSize() * m_numPages) );
- m_pMarker = pMarker;
- *m_pMarker = STACK_COOKIE_VALUE;
-
- m_fMarkerSet = TRUE;
-
-}
-
-void BaseStackMarker::RareDisableMarker()
-{
- LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_DEBUG_ONLY;
-
- if (m_fProtectedStackPage)
- {
- UndoPageProtectionInDebugger();
- }
-
- m_fTemporarilyDisabled = TRUE;
-
- if (m_fMarkerSet)
- {
- *m_pMarker = DISABLED_STACK_COOKIE_VALUE;
- }
-}
-
-void BaseStackMarker::RareReEnableMarker()
-{
- LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_DEBUG_ONLY;
-
- m_fTemporarilyDisabled = FALSE;
-
- if (m_fMarkerSet) {
- *m_pMarker = STACK_COOKIE_VALUE;
- }
-
- if (m_fProtectedStackPage)
- {
- ProtectMarkerPageInDebugger();
- }
-}
-
-//-----------------------------------------------------------------------------
-// Protect the page where we put the marker if a debugger is attached. That way, you get an AV right away
-// when you go past the stack guard when running under a debugger.
-//-----------------------------------------------------------------------------
-void BaseStackMarker::ProtectMarkerPageInDebugger()
-{
- WRAPPER_NO_CONTRACT;
- DEBUG_ONLY_FUNCTION;
-
- if (!g_ProtectStackPagesInDebugger)
- {
- return;
- }
-
- if (m_numPages < MINIMUM_PAGES_FOR_DEBUGGER_PROTECTION)
- {
- return;
- }
-
- DWORD flOldProtect;
-
- LOG((LF_EH, LL_INFO100000, "BSM::PMP: m_pMarker 0x%p, value 0x%p\n", m_pMarker, *m_pMarker));
-
- // We cannot call into host for VirtualProtect. EEVirtualProtect will try to restore previous
- // guard, but the location has been marked with PAGE_NOACCESS.
-#undef VirtualProtect
- BOOL fSuccess = ::VirtualProtect(m_pMarker, 1, PAGE_NOACCESS, &flOldProtect);
- _ASSERTE(fSuccess);
-
-#define VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect) \
- Dont_Use_VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect)
-
- m_fProtectedStackPage = fSuccess;
-}
-
-//-----------------------------------------------------------------------------
-// Remove page protection installed for this probe
-//-----------------------------------------------------------------------------
-void BaseStackMarker::UndoPageProtectionInDebugger()
-{
- WRAPPER_NO_CONTRACT;
- DEBUG_ONLY_FUNCTION;
-
- _ASSERTE(m_fProtectedStackPage);
- _ASSERTE(!m_fTemporarilyDisabled);
-
- DWORD flOldProtect;
- // EEVirtualProtect installs a BoundaryStackGuard. To avoid recursion, we call
- // into OS for VirtualProtect instead.
-#undef VirtualProtect
- BOOL fSuccess = ::VirtualProtect(m_pMarker, 1, PAGE_READWRITE, &flOldProtect);
- _ASSERTE(fSuccess);
-
- LOG((LF_EH, LL_INFO100000, "BSM::UMP m_pMarker 0x%p\n", m_pMarker));
-
-#define VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect) \
- Dont_Use_VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect)
-}
-
-void BaseStackMarker::CheckMarker()
-{
- WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_DEBUG_ONLY;
-
- if ( IsMarkerOverrun(m_pMarker) )
- {
- SOBackoutViolation(__FUNCTION__, __FILE__, __LINE__);
- }
-}
-
-AutoCleanupDisableBackoutStackValidation::AutoCleanupDisableBackoutStackValidation()
-{
- WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_DEBUG_ONLY;
-#ifdef ENABLE_CONTRACTS_IMPL
- m_fAlreadyDisabled = GetClrDebugState()->GetStackMarkerStack().IsDisabled();
- if (!m_fAlreadyDisabled)
- {
- GetClrDebugState()->GetStackMarkerStack().RareDisableStackMarkers();
- }
-#endif
-}
-
-AutoCleanupDisableBackoutStackValidation::~AutoCleanupDisableBackoutStackValidation()
-{
- WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_DEBUG_ONLY;
-
-#ifdef ENABLE_CONTRACTS_IMPL
- if (!m_fAlreadyDisabled)
- {
- GetClrDebugState()->GetStackMarkerStack().RareReEnableStackMarkers();
- }
-#endif
-}
-
-inline void StackMarkerStack::PushStackMarker(BaseStackMarker *pStackMarker)
-{
- LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_DEBUG_ONLY;
-
- pStackMarker->m_pPrevious = m_pTopStackMarker;
- m_pTopStackMarker = pStackMarker;
-}
-
-BaseStackMarker *StackMarkerStack::PopStackMarker()
-{
- LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_DEBUG_ONLY;
-
- BaseStackMarker *pOldTop = m_pTopStackMarker;
- m_pTopStackMarker = pOldTop->m_pPrevious;
- pOldTop->m_pPrevious = NULL;
- return pOldTop;
-}
-
-void StackMarkerStack::RareDisableStackMarkers()
-{
- WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_DEBUG_ONLY;
-
- // Walk up the stack of markers and disable them all.
- BaseStackMarker *pCurrentStackMarker = m_pTopStackMarker;
- while (pCurrentStackMarker)
- {
- pCurrentStackMarker->RareDisableMarker();
- pCurrentStackMarker = pCurrentStackMarker->m_pPrevious;
- }
- m_fDisabled = TRUE;
-}
-
-void StackMarkerStack::RareReEnableStackMarkers()
-{
- WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_DEBUG_ONLY;
-
- // Walk up the stack of markers and re-enable them all.
- BaseStackMarker *pCurrentStackMarker = m_pTopStackMarker;
- while (pCurrentStackMarker)
- {
- pCurrentStackMarker->RareReEnableMarker();
- pCurrentStackMarker = pCurrentStackMarker->m_pPrevious;
- }
- m_fDisabled = FALSE;
-}
-
-#endif // STACK_GUARDS_DEBUG
-
-#endif // FEATURE_STACK_PROBE && !DACCESS_COMPILE
NOTHROW;
FORBID_FAULT;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END
{
STATIC_CONTRACT_WRAPPER;
- ENTER_SO_NOT_MAINLINE_CODE;
-
#ifdef SELF_NO_HOST
if (TRUE)
#else //!SELF_NO_HOST
// Cannot acquire the required lock, as this would call back
// into the host. Eat the log message.
}
-
- LEAVE_SO_NOT_MAINLINE_CODE;
}
VOID LogSpew2(DWORD facility2, DWORD level, const char *fmt, ... )
{
STATIC_CONTRACT_WRAPPER;
- ENTER_SO_NOT_MAINLINE_CODE;
-
#ifdef SELF_NO_HOST
if (TRUE)
#else //!SELF_NO_HOST
// Cannot acquire the required lock, as this would call back
// into the host. Eat the log message.
}
-
- LEAVE_SO_NOT_MAINLINE_CODE;
}
VOID LogSpewAlways (const char *fmt, ... )
{
STATIC_CONTRACT_WRAPPER;
- ENTER_SO_NOT_MAINLINE_CODE;
-
#ifdef SELF_NO_HOST
if (TRUE)
#else //!SELF_NO_HOST
// Cannot acquire the required lock, as this would call back
// into the host. Eat the log message.
}
-
- LEAVE_SO_NOT_MAINLINE_CODE;
}
#endif // LOGGING
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
HMODULE ret = NULL;
DWORD lastError;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return NULL;)
EX_TRY
{
lastError = GetLastError();
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK)
{
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
DWORD lastError;
HANDLE ret = INVALID_HANDLE_VALUE;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return NULL;)
-
EX_TRY
{
LongPathString path(LongPathString::Literal, lpFileName);
lastError = GetLastError();
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK )
{
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
BOOL ret = FALSE;
DWORD lastError;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return FALSE;)
-
EX_TRY
{
LongPathString path(LongPathString::Literal, lpFileName);
lastError = GetLastError();
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK )
{
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
DWORD ret = INVALID_FILE_ATTRIBUTES;
DWORD lastError;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return INVALID_FILE_ATTRIBUTES;)
-
EX_TRY
{
LongPathString path(LongPathString::Literal, lpFileName);
lastError = GetLastError();
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK )
{
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
BOOL ret = FALSE;
DWORD lastError;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return FALSE;)
-
EX_TRY
{
LongPathString path(LongPathString::Literal, lpFileName);
lastError = GetLastError();
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK )
{
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
BOOL ret = FALSE;
DWORD lastError;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return FALSE;)
-
EX_TRY
{
LongPathString path(LongPathString::Literal, lpFileName);
lastError = GetLastError();
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK )
{
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
BOOL ret = FALSE;
DWORD lastError;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return FALSE;)
-
EX_TRY
{
LongPathString Existingpath(LongPathString::Literal, lpExistingFileName);
lastError = GetLastError();
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK )
{
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
BOOL ret = FALSE;
DWORD lastError;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return FALSE;)
-
EX_TRY
{
LongPathString Existingpath(LongPathString::Literal, lpExistingFileName);
lastError = GetLastError();
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK )
{
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
DWORD ret = 0;
DWORD lastError;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return 0;)
-
EX_TRY
{
LongPathString Existingpath(LongPathString::Literal, lpPath);
lastError = GetLastError();
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK)
{
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
HRESULT hr = S_OK;
DWORD lastError;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return 0;)
-
EX_TRY
{
LongPathString longPath(LongPathString::Literal, lpszLongPath);
lastError = GetLastError();
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK )
{
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
HRESULT hr = S_OK;
DWORD lastError;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return 0;)
-
EX_TRY
{
LongPathString shortPath(LongPathString::Literal, lpszShortPath);
lastError = GetLastError();
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK )
{
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
BOOL ret = FALSE;
DWORD lastError;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return FALSE;)
-
EX_TRY
{
LongPathString path(LongPathString::Literal, lpPathName);
lastError = GetLastError();
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK )
{
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
BOOL ret = FALSE;
DWORD lastError;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return FALSE;)
-
EX_TRY
{
LongPathString path(LongPathString::Literal, lpPathName);
lastError = GetLastError();
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK )
{
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
DWORD ret = 0;
DWORD lastError;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return 0;)
-
EX_TRY
{
COUNT_T size = buffer.GetUnicodeAllocation() + 1;
buffer.CloseBuffer(ret);
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK)
{
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
UINT ret = 0;
DWORD lastError;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return 0;)
-
EX_TRY
{
//Change the behaviour in Redstone to retry
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK)
{
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
DWORD ret = 0;
DWORD lastError;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return 0;)
-
EX_TRY
{
//Change the behaviour in Redstone to retry
lpBuffer.CloseBuffer(ret);
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK)
{
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
DWORD ret = 0;
DWORD lastError;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return 0;)
-
EX_TRY
{
//Change the behaviour in Redstone to retry
lpBuffer.CloseBuffer(ret);
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK)
{
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
DWORD ret = 0;
DWORD lastError;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return 0;)
-
EX_TRY
{
lpBuffer.CloseBuffer(ret);
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK)
{
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
BOOL ret = FALSE;
DWORD lastError;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return FALSE;)
-
EX_TRY
{
LongPathString Existingpath(LongPathString::Literal, lpExistingFileName);
lastError = GetLastError();
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK )
{
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
BOOL ret = FALSE;
DWORD lastError;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return FALSE;)
-
EX_TRY
{
LongPathString Existingpath(LongPathString::Literal, lpExistingFileName);
lastError = GetLastError();
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK )
{
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
HANDLE ret = INVALID_HANDLE_VALUE;
DWORD lastError;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return FALSE;)
-
EX_TRY
{
LongPathString path(LongPathString::Literal, lpFileName);
lastError = GetLastError();
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK )
{
GC_NOTRIGGER;
SUPPORTS_DAC;
PRECONDITION(HasContents());
- SO_TOLERANT;
}
CONTRACT_END;
GC_NOTRIGGER;
SUPPORTS_DAC;
PRECONDITION(HasContents());
- SO_TOLERANT;
}
CONTRACT_CHECK_END;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACT_CHECK_END;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACT_CHECK_END;
PRECONDITION(HasDirectoryEntry(entry));
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACT_CHECK_END;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACT_CHECK_END;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACT_CHECK_END;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACT_CHECK_END;
PRECONDITION(CheckNTHeaders());
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACT_CHECK_END;
PRECONDITION(CheckNTHeaders());
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACT_CHECK_END;
NOTHROW;
GC_NOTRIGGER;
POSTCONDITION(CheckRva(RETVAL));
- SO_TOLERANT;
}
CONTRACT_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_TOLERANT;
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
}
CONTRACT_END;
CANNOT_TAKE_LOCK;
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACT_END;
GC_NOTRIGGER;
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACT_END;
PRECONDITION(CheckRva(rva, NULL_OK));
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
SUPPORTS_DAC;
}
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACT_CHECK_END;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACT_END;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACT_CHECK_END;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACTL_END;
SUPPORTS_DAC;
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACT_END;
SUPPORTS_DAC;
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACT_END;
SUPPORTS_DAC;
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACT_END;
#ifdef _DEBUG
STATIC_CONTRACT_NOTHROW; \
ANNOTATION_DEBUG_ONLY; \
- STATIC_CONTRACT_CANNOT_TAKE_LOCK; \
- ANNOTATION_FN_SO_NOT_MAINLINE;
+ STATIC_CONTRACT_CANNOT_TAKE_LOCK;
#endif
#if defined(DACCESS_COMPILE) || defined(FEATURE_PAL)
return TRUE;
#else
- CONTRACT_VIOLATION(SOToleranceViolation);
-
// Contracts in ConfigDWORD do WszLoadLibrary(MSCOREE_SHIM_W).
// This check prevents recursion.
if (wcsstr(lpFileName, MSCOREE_SHIM_W) != 0)
{
DISABLED(NOTHROW);
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
HRESULT retVal = E_OUTOFMEMORY;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
SString::Startup();
EX_TRY
{
}
EX_END_CATCH(SwallowAllExceptions);
- END_SO_INTOLERANT_CODE;
-
return retVal;
}
{
DISABLED(NOTHROW);
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
HRESULT retVal = E_OUTOFMEMORY;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
EX_TRY
{
SString::Startup();
retVal = E_OUTOFMEMORY;
}
EX_END_CATCH(SwallowAllExceptions);
- END_SO_INTOLERANT_CODE;
return retVal;
}
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
NewArrayHolder<WCHAR> ret = NULL;
HRESULT hr = S_OK;
DWORD Len;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(SetLastError(COR_E_STACKOVERFLOW); return NULL;)
EX_TRY
{
PathString temp;
}
EX_CATCH_HRESULT(hr);
- END_SO_INTOLERANT_CODE
if (hr != S_OK)
{
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
POSTCONDITION(IsEmpty());
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC_HOST_ONLY;
}
CONTRACT_END;
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END;
return NULL;
}
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return NULL);
StressLogLockHolder lockh(theLog.lock, FALSE);
class NestedCaller
if (noFLSNow == FALSE && theLog.facilitiesToLog != 0)
msgs = CreateThreadStressLogHelper();
- END_SO_INTOLERANT_CODE;
-
return msgs;
}
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_INTOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_FORBID_FAULT;
- STATIC_CONTRACT_SO_TOLERANT;
// Asserts in this function cause infinite loops in the asserting mechanism.
// Just use debug breaks instead.
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FORBID_FAULT;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_SUPPORTS_DAC;
// Any stresslog LogMsg could theoretically create a new stress log and thus
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
- STATIC_CONTRACT_SO_TOLERANT;
-
+
WinRTStatusEnum winRTStatus = WINRT_STATUS_UNSUPPORTED;
const WCHAR wszComBaseDll[] = W("\\combase.dll");
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
GC_NOTRIGGER;
}
CONTRACTL_END;
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
GC_NOTRIGGER;
}
CONTRACTL_END;
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
GC_NOTRIGGER;
}
CONTRACTL_END;
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
GC_NOTRIGGER;
}
CONTRACTL_END;
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
HRESULT hr = S_OK; // Value returned.
BOOL bRepeat = TRUE; // MODOPT and MODREQ belong to the arg after them
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
while(bRepeat)
{
bRepeat = FALSE;
} // switch (ulElementType)
} // end while(bRepeat)
ErrExit:
-
- END_SO_INTOLERANT_CODE;
return hr;
} // validateOneArg()
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
- STATIC_CONTRACT_SO_TOLERANT;
BOOL fSupportedPlatform = FALSE;
OSVERSIONINFOEX sVer;
HWND hwnd = NULL;
- // We are already in a catastrophic situation so we can tolerate faults as well as SO & GC mode violations to keep going.
- CONTRACT_VIOLATION(FaultNotFatal | GCViolation | ModeViolation | SOToleranceViolation);
+ // We are already in a catastrophic situation so we can tolerate faults as well as GC mode violations to keep going.
+ CONTRACT_VIOLATION(FaultNotFatal | GCViolation | ModeViolation);
if (!ShouldDisplayMsgBoxOnCriticalFailure())
return IDABORT;
HWND hwnd = NULL;
- // We are already in a catastrophic situation so we can tolerate faults as well as SO & GC mode violations to keep going.
- CONTRACT_VIOLATION(FaultNotFatal | GCViolation | ModeViolation | SOToleranceViolation);
+ // We are already in a catastrophic situation so we can tolerate faults as well as GC mode violations to keep going.
+ CONTRACT_VIOLATION(FaultNotFatal | GCViolation | ModeViolation);
if (!ShouldDisplayMsgBoxOnCriticalFailure())
return IDABORT;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_FORBID_FAULT;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
- STATIC_CONTRACT_SO_TOLERANT;
if (!g_fEnsureCharSetInfoInitialized)
{
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
} CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
} CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
PRECONDITION(pCode != GetPreStubEntryPoint());
} CONTRACTL_END;
MODE_ANY;
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
PRECONDITION((offsetNum > 0) && (offsetNum < 20)); /* we only allow reasonable offsetNums 1..19 */
}
CONTRACTL_END;
Thread::ObjectRefFlush(CURRENT_THREAD);
#endif
- BEGIN_SO_INTOLERANT_CODE(CURRENT_THREAD);
-
_ASSERTE(IS_ALIGNED((size_t)pThunk, sizeof(INT64)));
FrameWithCookie<ExternalMethodFrame> frame(pTransitionBlock);
}
// Ready to return
-
- END_SO_INTOLERANT_CODE;
-
return pCode;
}
{
CONTRACTL
{
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
{
CONTRACTL
{
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
{
CONTRACTL
{
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
{
CONTRACTL
{
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
if (!SystemDomain::IsUnderDomainLock() && !IsGCThread()) { MODE_COOPERATIVE;} else { DISABLED(MODE_ANY);}
#endif
GC_NOTRIGGER;
- SO_TOLERANT;
NOTHROW;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_INTOLERANT;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
StackWalkAction SystemDomain::CallersMethodCallback(CrawlFrame* pCf, VOID* data)
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
MethodDesc *pFunc = pCf->GetFunction();
/* We asked to be called back only for functions */
void Invoke()
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_INTOLERANT;
SetupThread();
pThis->LoadDomainAssembly(pSpec, pFile, targetLevel);
}
}
{
- // This is not executed for SO exceptions so we need to disable the backout
- // stack validation to prevent false violations from being reported.
- DISABLE_BACKOUT_STACK_VALIDATION;
-
BOOL fFailure = PostBindResolveAssembly(pSpec, &NewSpec, ex->GetHR(), &pFailedSpec);
if (fFailure)
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
} CONTRACTL_END;
- { // SO tolerance exception for debug-only assertion.
- CONTRACT_VIOLATION(SOToleranceViolation);
+ {
CONSISTENCY_CHECK(GetDomainFile()->GetModule() == pMT->GetModuleForStatics());
}
{
CONTRACTL {
NOTHROW;
- SO_TOLERANT;
WRAPPER(GC_TRIGGERS);
PRECONDITION(pMT->GetDomain() == this);
} CONTRACTL_END;
PTR_MethodTable BaseDomain::LookupType(UINT32 id) {
CONTRACTL {
NOTHROW;
- SO_TOLERANT;
WRAPPER(GC_TRIGGERS);
CONSISTENCY_CHECK(id != TYPE_ID_THIS_CLASS);
} CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
} CONTRACTL_END;
m_typeIDMap.RemoveTypes(pLoaderAllocator);
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_COOPERATIVE;
SUPPORTS_DAC;
}
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_COOPERATIVE;
SUPPORTS_DAC;
}
ADID GetId (void)
{
LIMITED_METHOD_DAC_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return m_dwId;
}
virtual PTR_AppDomain AsAppDomain()
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
_ASSERTE(!"Not an AppDomain");
return NULL;
}
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
UINT ArrayStubCache::Length(const BYTE *pRawStub)
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return ((ArrayOpScript*)pRawStub)->Length();
}
NewHolder<Assembly> pAssembly (new Assembly(pDomain, pFile, debuggerFlags, fIsCollectible));
- // If there are problems that arise from this call stack, we'll chew up a lot of stack
- // with the various EX_TRY/EX_HOOKs that we will encounter.
- INTERIOR_STACK_PROBE_FOR(GetThread(), DEFAULT_ENTRY_PROBE_SIZE);
#ifdef PROFILING_SUPPORTED
{
BEGIN_PIN_PROFILER(CORProfilerTrackAssemblyLoads());
EX_END_HOOK;
#endif
pAssembly.SuppressRelease();
- END_INTERIOR_STACK_PROBE;
return pAssembly;
} // Assembly::Create
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACT_END;
#include "interoputil.h"
#include "frames.h"
#include "typeparse.h"
-#include "stackprobe.h"
#include "appdomainnative.hpp"
#include "../binder/inc/clrprivbindercoreclr.h"
THROWS;
MODE_COOPERATIVE;
GC_TRIGGERS;
- SO_INTOLERANT;
PRECONDITION(IsProtectedByGCFrame (pAsmName));
}
CONTRACTL_END;
#include "memorypool.h"
#include "assemblyspecbase.h"
#include "domainfile.h"
-#include "genericstackprobe.h"
#include "holder.h"
class AppDomain;
{
THROWS;
GC_TRIGGERS;
- SO_TOLERANT;
INJECT_FAULT(ThrowOutOfMemory());
}
CONTRACTL_END;
void AssertMulticoreJitAllowedModule(PCODE pTarget)
{
- CONTRACTL
- {
- SO_NOT_MAINLINE;
- }
- CONTRACTL_END;
-
MethodDesc* pMethod = Entry2MethodDesc(pTarget, NULL);
Module * pModule = pMethod->GetModule_NoLogging();
CallDescrData * pCallDescrData,
BOOL fCriticalCall)
{
- STATIC_CONTRACT_SO_INTOLERANT;
-
#if defined(FEATURE_MULTICOREJIT) && defined(_DEBUG)
// For multicore JITting, background thread should not call managed code, except when calling system code (e.g. throwing managed exception)
#endif // 0
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
- STATIC_CONTRACT_SO_TOLERANT;
_ASSERTE(!NingenEnabled() && "You cannot invoke managed code inside the ngen compilation process.");
CURRENT_THREAD->HandleThreadAbort(); \
} \
} \
- BEGIN_SO_TOLERANT_CODE(CURRENT_THREAD); \
INSTALL_CALL_TO_MANAGED_EXCEPTION_HOLDER(); \
INSTALL_COMPLUS_EXCEPTION_HANDLER_NO_DECLARE();
#define END_CALL_TO_MANAGED() \
UNINSTALL_COMPLUS_EXCEPTION_HANDLER(); \
- END_SO_TOLERANT_CODE; \
}
/***********************************************************************/
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
PTR_Module ret = NULL;
- INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(DontCallDirectlyForceStackOverflow());
ret = Module::ComputePreferredZapModuleHelper( pDefinitionModule,
classInst,
methodInst );
- END_INTERIOR_STACK_PROBE;
return ret;
}
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
SUPPORTS_DAC;
}
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
SUPPORTS_DAC;
}
PTR_Module pRet=NULL;
- INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(10, NO_FORBIDGC_LOADER_USE_ThrowSO(););
-
if (pMT->IsArray())
{
TypeHandle elemTH = pMT->GetApproxArrayElementTypeHandle();
// then its loader module is simply the module containing its TypeDef
pRet= pMT->GetModule();
}
- END_INTERIOR_STACK_PROBE;
return pRet;
}
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
SUPPORTS_DAC;
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACT_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACT_END;
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END
FORBID_FAULT;
MODE_ANY;
NOTHROW;
- SO_TOLERANT;
GC_NOTRIGGER;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC;
PRECONDITION(MapIsCompressed());
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC;
PRECONDITION(MapIsCompressed());
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
POSTCONDITION(CheckPointer(RETVAL));
}
CONTRACT_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
POSTCONDITION(CheckPointer(RETVAL));
SUPPORTS_DAC;
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_INTOLERANT;
POSTCONDITION(CheckPointer(RETVAL));
SUPPORTS_DAC;
}
}
CONTRACTL_END;
- //This is called from inside EEStartupHelper, so it breaks the SO rules. However, this is debug only
- //(and only supported for limited jit testing), so it's ok here.
- CONTRACT_VIOLATION(SOToleranceViolation);
-
//If the EE isn't started yet, it's not safe to jit. We fail in COM jitting a p/invoke.
if (!g_fEEStarted)
return;
GC_NOTRIGGER;
SUPPORTS_DAC;
CANNOT_TAKE_LOCK;
- SO_TOLERANT;
}
CONTRACT_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
SUPPORTS_DAC;
}
#include "util.hpp"
#include "shimload.h"
#include "comthreadpool.h"
-#include "stackprobe.h"
#include "posterror.h"
#include "virtualcallstub.h"
#include "strongnameinternal.h"
static BOOL WINAPI DbgCtrlCHandler(DWORD dwCtrlType)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
#if defined(DEBUGGING_SUPPORTED)
// Note that if a managed-debugger is attached, it's actually attached with the native
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
StackwalkCache::Init();
- // In coreclr, clrjit is compiled into it, but SO work in clrjit has not been done.
-#ifdef FEATURE_STACK_PROBE
- if (CLRHosted() && GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeUnloadAppDomain)
- {
- InitStackProbes();
- }
-#endif
-
// This isn't done as part of InitializeGarbageCollector() above because it
// requires write barriers to have been set up on x86, which happens as part
// of InitJITHelpers1.
ENTRY_POINT;
} CONTRACTL_END;
- CONTRACT_VIOLATION(GCViolation | ModeViolation | SOToleranceViolation);
+ CONTRACT_VIOLATION(GCViolation | ModeViolation);
if (g_fEEShutDown || !g_fEEStarted)
return;
SystemDomain::DetachEnd();
}
- TerminateStackProbes();
-
// Unregister our vectored exception and continue handlers from the OS.
// This will ensure that if any other DLL unload (after ours) has an exception,
// we wont attempt to process that exception (which could lead to various
//
DWORD WINAPI EEShutDownProcForSTAThread(LPVOID lpParameter)
{
- STATIC_CONTRACT_SO_INTOLERANT;;
-
-
ClrFlsSetThreadType(ThreadType_ShutdownHelper);
EEShutDownHelper(FALSE);
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT; // we don't need to cleanup 'cus we're shutting down
PRECONDITION(g_fEEStarted);
} CONTRACTL_END;
return;
}
- // Stop stack probing and asserts right away. Once we're shutting down, we can do no more.
- // And we don't want to SO-protect anything at this point anyway. This really only has impact
- // on a debug build.
- TerminateStackProbes();
-
- // The process is shutting down. No need to check SO contract.
- SO_NOT_MAINLINE_FUNCTION;
-
// We only do the first part of the shutdown once.
static LONG OnlyOne = -1;
NOTHROW;
if (checkKind == LoaderLockCheck::ForMDA) { GC_TRIGGERS; } else { GC_NOTRIGGER; }; // because of the CustomerDebugProbe
MODE_ANY;
- SO_TOLERANT;
} CONTRACTL_END;
// If we are shutting down the runtime, then we cannot run code.
NOTHROW;
if (checkKind == LoaderLockCheck::ForMDA) { GC_TRIGGERS; } else { GC_NOTRIGGER; }; // because of the CustomerDebugProbe
MODE_ANY;
- SO_TOLERANT;
} CONTRACTL_END;
// Special-case the common success cases
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
if (fFromThunk) THROWS; else NOTHROW;
WRAPPER(GC_TRIGGERS);
MODE_ANY;
- SO_TOLERANT;
} CONTRACTL_END;
// If we have a failure result, and we're called from a thunk,
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_TRIGGERS;
- // this runs at the top of a thread, SO is not a concern here...
- STATIC_CONTRACT_SO_NOT_MAINLINE;
-
-
// HRESULT hr;
// BEGIN_EXTERNAL_ENTRYPOINT(&hr);
// EE isn't spun up enough to use this macro
_ASSERTE(pParam->lpReserved || !g_fEEStarted);
g_fProcessDetach = TRUE;
-#if defined(ENABLE_CONTRACTS_IMPL) && defined(FEATURE_STACK_PROBE)
- // We are shutting down process. No need to check SO contract.
- // And it is impossible to enforce SO contract in global dtor, like ModIntPairList.
- g_EnableDefaultRWValidation = FALSE;
-#endif
-
if (g_fEEStarted)
{
// GetThread() may be set to NULL for Win9x during shutdown.
GC_NOTRIGGER;
MODE_ANY;
PRECONDITION(CheckPointer(pCultureNames));
- SO_INTOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_INTOLERANT;;
} CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_INTOLERANT;;
} CONTRACTL_END;
_ASSERTE(sizeof(LocaleIDValue)/sizeof(WCHAR) >= LOCALE_NAME_MAX_LENGTH);
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
#include "listlock.h"
#include "methodimpl.h"
#include "guidfromname.h"
-#include "stackprobe.h"
#include "encee.h"
#include "encee.h"
#include "comsynchronizable.h"
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(ppv, NULL_OK));
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(m_cbRefCount > 0);
}
CONTRACTL_END;
{
HRESULT hr = S_OK;
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
- // SetupForComCallHR uses "SO_INTOLERANT_CODE_NOTHROW" to setup the SO-Intolerant transition
- // for COM Interop. However, "SO_INTOLERANT_CODE_NOTHROW" expects that no exception can escape
- // through this boundary but all it does is (in addition to checking that no exception has escaped it)
- // do stack probing.
- //
- // However, Corrupting Exceptions [CE] can escape the COM Interop boundary. Thus, to address that scenario,
- // we use the macro below that uses BEGIN_SO_INTOLERANT_CODE_NOTHROW to do the equivalent of
- // SO_INTOLERANT_CODE_NOTHROW and yet allow for CEs to escape through. Since there will be a corresponding
- // END_SO_INTOLERANT_CODE, the call is splitted into two parts: the Begin and End (see below).
BeginSetupForComCallHRWithEscapingCorruptingExceptions();
#else // !FEATURE_CORRUPTING_EXCEPTIONS
SetupForComCallHR();
#endif // FEATURE_CORRUPTING_EXCEPTIONS
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
HRESULT hr = S_OK;
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
- // SetupForComCallHR uses "SO_INTOLERANT_CODE_NOTHROW" to setup the SO-Intolerant transition
- // for COM Interop. However, "SO_INTOLERANT_CODE_NOTHROW" expects that no exception can escape
- // through this boundary but all it does is (in addition to checking that no exception has escaped it)
- // do stack probing.
- //
- // However, Corrupting Exceptions [CE] can escape the COM Interop boundary. Thus, to address that scenario,
- // we use the macro below that uses BEGIN_SO_INTOLERANT_CODE_NOTHROW to do the equivalent of
- // SO_INTOLERANT_CODE_NOTHROW and yet allow for CEs to escape through. Since there will be a corresponding
- // END_SO_INTOLERANT_CODE, the call is splitted into two parts: the Begin and End (see below).
BeginSetupForComCallHRWithEscapingCorruptingExceptions();
#else // !FEATURE_CORRUPTING_EXCEPTIONS
SetupForComCallHR();
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
HRESULT hr = S_OK;
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
- // SetupForComCallHR uses "SO_INTOLERANT_CODE_NOTHROW" to setup the SO-Intolerant transition
- // for COM Interop. However, "SO_INTOLERANT_CODE_NOTHROW" expects that no exception can escape
- // through this boundary but all it does is (in addition to checking that no exception has escaped it)
- // do stack probing.
- //
- // However, Corrupting Exceptions [CE] can escape the COM Interop boundary. Thus, to address that scenario,
- // we use the macro below that uses BEGIN_SO_INTOLERANT_CODE_NOTHROW to do the equivalent of
- // SO_INTOLERANT_CODE_NOTHROW and yet allow for CEs to escape through. Since there will be a corresponding
- // END_SO_INTOLERANT_CODE, the call is splitted into two parts: the Begin and End (see below).
BeginSetupForComCallHRWithEscapingCorruptingExceptions();
#else // !FEATURE_CORRUPTING_EXCEPTIONS
SetupForComCallHR();
#endif // FEATURE_CORRUPTING_EXCEPTIONS
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
GC_TRIGGERS;
MODE_ANY;
INJECT_FAULT(COMPlusThrowOM(););
- SO_TOLERANT;
}
CONTRACTL_END;
MethodTable* tempMT = NULL;
EX_TRY
{
- BEGIN_SO_INTOLERANT_CODE(pThread);
GCX_COOP();
tempMT = GetTypeForCLSID(m_ClsId);
- END_SO_INTOLERANT_CODE;
}
EX_CATCH
{
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
#endif // FEATURE_CORRUPTING_EXCEPTIONS
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pMT));
PRECONDITION(CheckPointer(ppv, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(ppv, NULL_OK));
}
CONTRACTL_END;
{
Thread *pThread = GetThread();
- BEGIN_SO_INTOLERANT_CODE(pThread);
GCX_COOP();
pMT = GetTypeForCLSID(rclsid);
- END_SO_INTOLERANT_CODE;
}
// If we can't find the class based on the CLSID or if the registered managed
DISABLED(NOTHROW);
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(typeName, NULL_OK));
PRECONDITION(CheckPointer(ppv, NULL_OK));
}
virtual void UseKeys(__in_ecount(2) LPUTF8 *pKey1)
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
SUPPORTS_DAC;
bReturn = (
{
CAN_TAKE_LOCK; // because of DestroyHandle
}
- SO_TOLERANT;
}
CONTRACTL_END;
if (throwable == NULL)
{
- // We need to disable the backout stack validation at this point since GetThrowable can
- // take arbitrarily large amounts of stack for different exception types; however we know
- // for a fact that we will never go through this code path if the exception is a stack
- // overflow exception since we already handled that case above with the pre-allocated SO exception.
- DISABLE_BACKOUT_STACK_VALIDATION;
-
class RestoreLastException
{
Thread *m_pThread;
}
{
- DISABLE_BACKOUT_STACK_VALIDATION;
if (throwable == NULL)
{
STRESS_LOG0(LF_EH, LL_INFO100, "CLRException::GetThrowable: We have failed to track exceptions accurately through the system.\n");
DISABLED(NOTHROW);
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
- HRESULT hr = E_FAIL;
-
- BEGIN_SO_INTOLERANT_CODE(GetThread());
-
-// Is it legal to switch to GCX_COOP in a SO_TOLERANT region?
GCX_COOP();
- hr = GetExceptionHResult(GetThrowable());
-
- END_SO_INTOLERANT_CODE;
-
- return hr;
+ return GetExceptionHResult(GetThrowable());
}
#ifdef FEATURE_COMINTEROP
GC_TRIGGERS;
NOTHROW;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
GC_TRIGGERS;
THROWS;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
// Not all codepaths expect to have it initialized (e.g. hosting APIs).
if (g_fComStarted)
{
- // We probe here for SO since GetThrowable and GetComIPFromObjectRef are SO intolerant
- BEGIN_SO_INTOLERANT_CODE(GetThread());
-
// Get errorinfo only when our SO probe succeeds
{
// Switch to coop mode since GetComIPFromObjectRef requires that
GCPROTECT_END();
}
-
- END_SO_INTOLERANT_CODE;
}
else
{
GC_NOTRIGGER;
MODE_COOPERATIVE;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END;
GC_NOTRIGGER;
MODE_ANY;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END;
GC_NOTRIGGER;
MODE_COOPERATIVE;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
{
FAULT_NOT_FATAL();
- BEGIN_SO_INTOLERANT_CODE(GetThread());
-
EXCEPTIONREF pOutOfMemory = (EXCEPTIONREF)AllocateObject(g_pOutOfMemoryExceptionClass);
pOutOfMemory->SetHResult(COR_E_OUTOFMEMORY);
pOutOfMemory->SetXCode(EXCEPTION_COMPLUS);
retVal = pOutOfMemory;
-
- END_SO_INTOLERANT_CODE;
}
EX_CATCH
{
GC_TRIGGERS;
NOTHROW;
MODE_COOPERATIVE;
- SO_INTOLERANT;
}
CONTRACTL_END;
GC_NOTRIGGER;
NOTHROW;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_ANY;
- STATIC_CONTRACT_SO_TOLERANT;
if (m_pThread != NULL)
{
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_ANY;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
- STATIC_CONTRACT_SO_TOLERANT;
bool fVMInitialized = g_fEEStarted?true:false;
Exception::HandlerState::SetupCatch(INDEBUG_COMMA(szFile) lineNum, fVMInitialized);
if (!DidCatchCxx())
{
- if (IsSOExceptionCode(exceptionCode))
+ if (exceptionCode == STATUS_STACK_OVERFLOW)
{
// Handle SO exception
//
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_ANY;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
- STATIC_CONTRACT_SO_TOLERANT;
LOG((LF_EH, LL_INFO100, "EX_CATCH catch succeeded (CLRException::HandlerState)\n"));
GC_TRIGGERS;
THROWS;
MODE_ANY;
- SO_TOLERANT; // no risk of an SO after we've allocated the object here
}
CONTRACTL_END;
#ifndef _CLREX_H_
#define _CLREX_H_
+// BCL classnativelib includes <ex.h> first
+#ifndef VM_NO_SO_INFRASTRUCTURE_CODE
+#define VM_NO_SO_INFRASTRUCTURE_CODE(x) x
+#endif
+
#include <ex.h>
#include "runtimeexceptionkind.h"
#define GET_EXCEPTION() (__pException == NULL ? __defaultException.Validate() : __pException.GetValue())
#endif // _DEBUG
-// When we throw an exception, we need stay in SO-intolerant state and
-// probe for sufficient stack so that we don't SO during the processing.
-#undef HANDLE_SO_TOLERANCE_FOR_THROW
-#define HANDLE_SO_TOLERANCE_FOR_THROW STACK_PROBE_FOR_THROW(GetThread());
-
LONG CLRNoCatchHandler(EXCEPTION_POINTERS* pExceptionInfo, PVOID pv);
// Re-define the macro to add automatic restoration of the guard page to PAL_EXCEPT and PAL_EXCEPT_FILTER and
#undef EX_ENDTRY
#define EX_ENDTRY \
PAL_CPP_ENDTRY \
- SO_INFRASTRUCTURE_CODE(if (__state.DidCatch()) { RESTORE_SO_TOLERANCE_STATE; }) \
- SO_INFRASTRUCTURE_CODE(if (__state.DidCatchSO()) { HANDLE_STACKOVERFLOW_AFTER_CATCH; }) \
- NO_SO_INFRASTRUCTURE_CODE_ASSERTE(!__state.DidCatchSO()) \
+ _ASSERTE(!__state.DidCatchSO());
// CLRException::GetErrorInfo below invokes GetComIPFromObjectRef
} \
if (CURRENT_THREAD != NULL) \
{ \
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(CURRENT_THREAD, *__phr = COR_E_STACKOVERFLOW); \
EX_TRY_THREAD(CURRENT_THREAD); \
{ \
#define END_EXTERNAL_ENTRYPOINT \
} \
EX_CATCH_HRESULT(*__phr); \
- END_SO_INTOLERANT_CODE; \
} \
} \
} \
*__phr = GET_EXCEPTION()->GetHR(); \
} \
EX_END_CATCH(RethrowCorruptingExceptionsEx(fCond)); \
- END_SO_INTOLERANT_CODE; \
} \
} \
} \
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_ANY;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
- STATIC_CONTRACT_SO_TOLERANT;
m_pThread = pThread;
if (m_pThread == NULL)
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_ANY;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
- STATIC_CONTRACT_SO_TOLERANT;
_ASSERTE(pThread != NULL);
m_pThread = pThread;
#include "typehash.h"
#include "comdelegate.h"
#include "array.h"
-#include "stackprobe.h"
#include "posterror.h"
#include "wrappers.h"
#include "generics.h"
MODE_ANY;
PRECONDITION(CheckPointer(pDefinitionModule, NULL_OK));
POSTCONDITION(CheckPointer(RETVAL));
- SO_INTOLERANT;
SUPPORTS_DAC;
}
CONTRACT_END
MODE_ANY;
PRECONDITION(CheckPointer(pDefinitionModule, NULL_OK));
POSTCONDITION(CheckPointer(RETVAL));
- SO_INTOLERANT;
}
CONTRACT_END
if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
if (FORBIDGC_LOADER_USE_ENABLED() || fLoadTypes != LoadTypes) { LOADS_TYPE(CLASS_LOAD_BEGIN); } else { LOADS_TYPE(level); }
- if (fLoadTypes == DontLoadTypes) SO_TOLERANT; else SO_INTOLERANT;
PRECONDITION(CheckPointer(pKey));
PRECONDITION(level > CLASS_LOAD_BEGIN && level <= CLASS_LOADED);
PRECONDITION(CheckPointer(pInstContext, NULL_OK));
if (typeHnd.GetLoadLevel() < level)
{
- INTERIOR_STACK_PROBE_CHECK_THREAD;
-
#ifdef FEATURE_PREJIT
if (typeHnd.GetLoadLevel() == CLASS_LOAD_UNRESTOREDTYPEKEY)
{
Module *pLoaderModule = ComputeLoaderModule(&typeKey);
pLoaderModule->GetClassLoader()->LoadTypeHandleForTypeKey(&typeKey, typeHnd, level);
}
-
- END_INTERIOR_STACK_PROBE;
}
#endif // DACCESS_COMPILE
} CONTRACT_END
TypeHandle typeHnd;
- INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(RETURN_FROM_INTERIOR_PROBE(TypeHandle()));
-
Module * pFoundModule = NULL;
mdToken FoundCl;
HashedTypeEntry foundEntry;
#endif // !DACCESS_COMPILE
}
- END_INTERIOR_STACK_PROBE;
RETURN typeHnd;
} // ClassLoader::LoadTypeHandleThrowing
RETURN(typeHnd);
}
- // We don't want to probe on any threads except for those with a managed thread. This function
- // can be called from the GC thread etc. so need to control how we probe.
- INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(goto Exit;);
-
IMDInternalImport *pInternalImport = pModule->GetMDImport();
#ifndef DACCESS_COMPILE
#endif // !DACCESS_COMPILE
}
-// If stack guards are disabled, then this label is unreferenced and produces a compile error.
-#if defined(FEATURE_STACK_PROBE) && !defined(DACCESS_COMPILE)
-Exit:
-#endif
-
#ifndef DACCESS_COMPILE
if ((fUninstantiated == FailIfUninstDefOrRef) && !typeHnd.IsNull() && typeHnd.IsGenericTypeDefinition())
{
}
#endif
;
- END_INTERIOR_STACK_PROBE;
RETURN(typeHnd);
}
GCX_PREEMP();
- // Type loading can be recursive. Probe for sufficient stack.
- //
- // Execution of the FINALLY in LoadTypeHandleForTypeKey_Body can eat
- // a lot of stack because LoadTypeHandleForTypeKey_Inner can rethrow
- // any non-SO exceptions that it takes, ensure that we have plenty
- // of stack before getting into it (>24 pages on AMD64, remember
- // that num pages probed is 2*N on AMD64).
- INTERIOR_STACK_PROBE_FOR(GetThread(),20);
-
#ifdef _DEBUG
if (LoggingOn(LF_CLASSLOADER, LL_INFO1000))
{
PushFinalLevels(typeHnd, targetLevel, pInstContext);
- END_INTERIOR_STACK_PROBE;
-
return typeHnd;
}
TypeHandle typeHnd = TypeHandle();
- // Type loading can be recursive. Probe for sufficient stack.
- INTERIOR_STACK_PROBE_FOR(GetThread(),8);
-
ClassLoadLevel currentLevel = CLASS_LOAD_BEGIN;
ClassLoadLevel targetLevelUnderLock = targetLevel < CLASS_DEPENDENCIES_LOADED ? targetLevel : (ClassLoadLevel) (CLASS_DEPENDENCIES_LOADED-1);
while (currentLevel < targetLevelUnderLock)
PushFinalLevels(typeHnd, targetLevel, pInstContext);
- END_INTERIOR_STACK_PROBE;
-
return typeHnd;
}
if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
if (FORBIDGC_LOADER_USE_ENABLED() || fLoadTypes != LoadTypes) { LOADS_TYPE(CLASS_LOAD_BEGIN); } else { LOADS_TYPE(level); }
- if (fLoadTypes == DontLoadTypes) SO_TOLERANT; else SO_INTOLERANT;
MODE_ANY;
SUPPORTS_DAC;
POSTCONDITION(CheckPointer(RETVAL, ((fLoadTypes == LoadTypes) ? NULL_NOT_OK : NULL_OK)));
MODE_ANY;
}
CONTRACT_END;
-
- // Recursive: CanAccess->CheckAccessMember->CanAccessClass->CanAccess
- INTERIOR_STACK_PROBE(GetThread());
AccessCheckOptions accessCheckOptionsNoThrow(accessCheckOptions, FALSE);
if (!canAccess)
{
BOOL fail = accessCheckOptions.FailOrThrow(pContext);
- RETURN_FROM_INTERIOR_PROBE(fail);
+ RETURN(fail);
}
}
- RETURN_FROM_INTERIOR_PROBE(TRUE);
-
- END_INTERIOR_STACK_PROBE;
+ RETURN(TRUE);
} // BOOL ClassLoader::CanAccess()
//******************************************************************************
{
CONTRACTL
{
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_ANY;
NOTHROW;
if (hostCallPreference == AllowHostCalls) { HOST_CALLS; } else { HOST_NOCALLS; }
GC_NOTRIGGER;
- SO_TOLERANT;
CAN_TAKE_LOCK;
} CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
HOST_NOCALLS;
SUPPORTS_DAC;
} CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
} CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
} CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
} CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
} CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
} CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
} CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
} CONTRACTL_END;
if (currentPC == NULL)
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
} CONTRACTL_END;
ReaderLockHolder rlh;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
} CONTRACTL_END;
#ifdef DACCESS_COMPILE
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
} CONTRACTL_END;
// This may get called for arbitrary code addresses. Note that the lock is
NOTHROW;
HOST_NOCALLS;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
} CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
STATIC_CONTRACT_HOST_CALLS;
SUPPORTS_DAC;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
STATIC_CONTRACT_HOST_CALLS;
SUPPORTS_DAC;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
EECodeInfo * pCodeInfo)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
int High)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
} CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
} CONTRACTL_END;
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(pComCallData));
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
LONGLONG newRefCount;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), goto NoLog );
StackSString ssMessage;
ComCallWrapper *pWrap = GetMainWrapper();
LogRefCount(pWrap, ssMessage, GET_EXT_COM_REF(newRefCount));
- END_SO_INTOLERANT_CODE;
return newRefCount;
-
-#ifdef FEATURE_STACK_PROBE // this code is unreachable if FEATURE_STACK_PROBE is not defined
-NoLog:
- // Decrement the ref count
- return ::InterlockedDecrement64(pRefCount);
-#endif // FEATURE_STACK_PROBE
}
MODE_ANY; \
NOTHROW; \
GC_NOTRIGGER; \
- SO_TOLERANT; \
POSTCONDITION(RETVAL == !!IsEqualGUID(iid, riid)); \
} \
CONTRACT_END; \
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
GC_NOTRIGGER;
MODE_ANY;
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
- SO_TOLERANT;
}
CONTRACT_END;
WRAPPER(GC_TRIGGERS);
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(m_ppThis));
- SO_TOLERANT;
}
CONTRACT_END;
INSTANCE_CHECK;
POSTCONDITION(CheckPointer(RETVAL));
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACT_END;
WRAPPER(THROWS);
WRAPPER(GC_TRIGGERS);
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACT_END;
MODE_ANY;
PRECONDITION(CheckPointer(pUnk));
POSTCONDITION(CheckPointer(RETVAL));
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACT_END;
SUPPORTS_DAC;
INSTANCE_CHECK;
POSTCONDITION(CheckPointer(RETVAL));
- SO_TOLERANT;
}
CONTRACT_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
if (!CanRunManagedCode())
return;
- SO_INTOLERANT_CODE_NOTHROW(GetThread(), return; );
m_pWrap->Cleanup();
}
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
WRAPPER(THROWS);
WRAPPER(GC_TRIGGERS);
MODE_ANY;
- SO_TOLERANT;
INSTANCE_CHECK;
}
CONTRACTL_END;
WRAPPER(THROWS);
WRAPPER(GC_TRIGGERS);
MODE_ANY;
- SO_TOLERANT;
INSTANCE_CHECK;
PRECONDITION(CheckPointer(m_pSimpleWrapper));
}
PRECONDITION(CheckPointer(pUnk));
POSTCONDITION(CheckPointer(RETVAL));
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACT_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(ppv, NULL_OK));
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pIID, NULL_OK));
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(ppCPC, NULL_OK));
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pUnk, NULL_OK));
PRECONDITION(CheckPointer(pdwCookie, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(ppEnum, NULL_OK));
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(ppv, NULL_OK));
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(ppCP, NULL_OK));
PRECONDITION(CheckPointer(pcFetched, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(ppEnum, NULL_OK));
}
CONTRACTL_END;
{
NOTHROW;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(ppv, NULL_OK));
}
CONTRACTL_END;
{
NOTHROW;
GC_TRIGGERS;
- SO_TOLERANT;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(rgcd, NULL_OK));
PRECONDITION(CheckPointer(pcFetched, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(ppEnum, NULL_OK));
}
CONTRACTL_END;
// Initialize the out parameters.
*ppEnum = NULL;
- // This should setup a SO_INTOLERANT region, why isn't it?
SetupForComCallHR();
ConnectionEnum *pConEnum = new(nothrow) ConnectionEnum(m_pConnectionPoint);
MODE_ANY;
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
DELEGATEREF innerDel = NULL;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
#include "regdisp.h"
#include "stackframe.h"
#include "gms.h"
-#include "stackprobe.h"
#include "fcall.h"
#include "syncblk.h"
#include "gcdesc.h"
#include "clsload.inl"
#include "domainfile.inl"
#include "method.inl"
-#include "stackprobe.inl"
#include "syncblk.inl"
#include "threads.inl"
#include "eehash.inl"
LPVOID pArgs)
{
STATIC_CONTRACT_MODE_COOPERATIVE;
- STATIC_CONTRACT_SO_INTOLERANT;
- HRESULT hrRetVal = E_UNEXPECTED;
- BEGIN_SO_TOLERANT_CODE(GetThread());
- hrRetVal = pfnCallback(pArgs);
- END_SO_TOLERANT_CODE;
- return hrRetVal;
+ return pfnCallback(pArgs);
}
HRESULT CEECompileInfo::MakeCrossDomainCallback(
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
GC_TRIGGERS;
THROWS;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
pThread = GetThread();
_ASSERTE(pThread);
GCPROTECT_BEGIN(gc);
- BEGIN_SO_INTOLERANT_CODE(pThread);
gc.orDelegate = ObjectFromHandle(args->share->m_Threadable);
gc.orThreadStartArg = ObjectFromHandle(args->share->m_ThreadStartArg);
}
STRESS_LOG2(LF_SYNC, LL_INFO10, "Managed thread exiting normally for delegate %p Type %pT\n", OBJECTREFToObject(gc.orDelegate), (size_t) gc.orDelegate->GetMethodTable());
- END_SO_INTOLERANT_CODE;
GCPROTECT_END();
}
GC_TRIGGERS;
THROWS;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
ULONG retVal = 0;
// Before we do anything else, get Setup so that we have a real thread.
- // Our thread isn't setup yet, so we can't use the standard probe
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return E_FAIL);
-
KickOffThread_Args args;
// don't have a separate var becuase this can be updated in the worker
args.share = (SharedState *) pass;
DestroyThread(pThread);
}
- END_SO_INTOLERANT_CODE;
-
return retVal;
}
FCALL_CONTRACT;
BOOL bRet = FALSE;
- HELPER_METHOD_FRAME_BEGIN_RET_0(); // Eventually calls BEGIN_SO_INTOLERANT_CODE_NOTHROW
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
bRet = ThreadpoolMgr::SetMaxThreads(workerThreads,completionPortThreads);
HELPER_METHOD_FRAME_END();
FCALL_CONTRACT;
BOOL bRet = FALSE;
- HELPER_METHOD_FRAME_BEGIN_RET_0(); // Eventually calls BEGIN_SO_INTOLERANT_CODE_NOTHROW
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
bRet = ThreadpoolMgr::SetMinThreads(workerThreads,completionPortThreads);
HELPER_METHOD_FRAME_END();
gc.waitObject = (WAITHANDLEREF) ObjectToOBJECTREF(waitObjectUNSAFE);
gc.state = (OBJECTREF) stateUNSAFE;
gc.registeredWaitObject = (OBJECTREF) registeredWaitObjectUNSAFE;
- HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc); // Eventually calls BEGIN_SO_INTOLERANT_CODE_NOTHROW
+ HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
if(gc.waitObject == NULL)
COMPlusThrow(kArgumentNullException);
BOOL retVal = false;
SAFEHANDLEREF refSH = (SAFEHANDLEREF) ObjectToOBJECTREF(objectToNotify);
- HELPER_METHOD_FRAME_BEGIN_RET_1(refSH); // Eventually calls BEGIN_SO_INTOLERANT_CODE_NOTHROW
+ HELPER_METHOD_FRAME_BEGIN_RET_1(refSH);
HANDLE hWait = (HANDLE) WaitHandle;
HANDLE hObjectToNotify = NULL;
{
FCALL_CONTRACT;
- HELPER_METHOD_FRAME_BEGIN_0(); // Eventually calls BEGIN_SO_INTOLERANT_CODE_NOTHROW
+ HELPER_METHOD_FRAME_BEGIN_0();
HANDLE hWait = (HANDLE)WaitHandle;
ThreadpoolMgr::WaitHandleCleanup(hWait);
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_ANY;
- STATIC_CONTRACT_SO_TOLERANT;
-
}
VOID BindIoCompletionCallBack_Worker(LPVOID args)
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_ANY;
- STATIC_CONTRACT_SO_INTOLERANT;
DWORD ErrorCode = ((BindIoCompletion_Args *)args)->ErrorCode;
DWORD numBytesTransferred = ((BindIoCompletion_Args *)args)->numBytesTransferred;
THROWS;
MODE_ANY;
GC_TRIGGERS;
- SO_INTOLERANT;
}
CONTRACTL_END;
BOOL retVal = FALSE;
- HELPER_METHOD_FRAME_BEGIN_RET_0(); // Eventually calls BEGIN_SO_INTOLERANT_CODE_NOTHROW
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
HANDLE hFile = (HANDLE) fileHandle;
DWORD errCode = 0;
BOOL res = FALSE;
- HELPER_METHOD_FRAME_BEGIN_RET_1(overlapped); // Eventually calls BEGIN_SO_INTOLERANT_CODE_NOTHROW
+ HELPER_METHOD_FRAME_BEGIN_RET_1(overlapped);
// OS doesn't signal handle, so do it here
lpOverlapped->Internal = 0;
THROWS;
MODE_ANY;
GC_TRIGGERS;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pMD));
PRECONDITION(CheckPointer(pThread));
PRECONDITION(CORProfilerTrackTransitions());
// Do not add a CONTRACT here. We haven't set up SEH. We rely
// on HandleThreadAbort dealing with this situation properly.
- // @todo - We need to probe here, but can't introduce destructors etc.
- BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
-
-
// WARNING!!!!
// when we start executing here, we are actually in cooperative mode. But we
// haven't synchronized with the barrier to reentry yet. So we are in a highly
// should always be in coop mode here
_ASSERTE(pThread->PreemptiveGCDisabled());
- END_CONTRACT_VIOLATION;
-
// Note that this code does not handle rare signatures that do not return HRESULT properly
return hr;
PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonILStubWillNotThrow);
//
- // NOTE! We do not use BEGIN_CALL_TO_MANAGEDEX around this call because we stayed in the SO_TOLERANT
- // mode and COMToCLRDispatchHelper is responsible for pushing/popping the CPFH into the FS:0 chain.
+ // NOTE! We do not use BEGIN_CALL_TO_MANAGEDEX around this call because COMToCLRDispatchHelper is
+ // responsible for pushing/popping the CPFH into the FS:0 chain.
//
*pRetValOut = COMToCLRDispatchHelper(
NOTHROW;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
bool fSuccess = true;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, { *pRetValOut = COR_E_STACKOVERFLOW; return false; } );
-
EX_TRY
{
*pObjectOut = AllocateObject(pMT);
}
EX_END_CATCH(SwallowAllExceptions);
- END_SO_INTOLERANT_CODE;
-
return fSuccess;
}
NOTHROW;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
return;
}
-void COMToCLRWorkerBody_SOIntolerant(Thread * pThread, ComMethodFrame * pFrame, ComCallWrapper * pWrap, UINT64 * pRetValOut)
-{
- STATIC_CONTRACT_THROWS; // THROWS due to END_SO_TOLERANT_CODE
- STATIC_CONTRACT_GC_TRIGGERS;
- STATIC_CONTRACT_MODE_COOPERATIVE;
- STATIC_CONTRACT_SO_INTOLERANT;
-
- BEGIN_SO_TOLERANT_CODE(pThread);
-
- COMToCLRWorkerBody(pThread, pFrame, pWrap, pRetValOut);
-
- END_SO_TOLERANT_CODE;
-}
-
//------------------------------------------------------------------
// UINT64 __stdcall COMToCLRWorker(Thread *pThread,
// ComMethodFrame* pFrame)
// to leave the MODE_ contract enabled on x86.
DISABLED(MODE_PREEMPTIVE);
#endif
- SO_TOLERANT;
PRECONDITION(CheckPointer(pFrame));
PRECONDITION(CheckPointer(pThread, NULL_OK));
}
HRESULT hrRetVal = S_OK;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, return COR_E_STACKOVERFLOW);
- // BEGIN_ENTRYPOINT_NOTHROW_WITH_THREAD(pThread);
-
IUnknown** pip = (IUnknown **)pFrame->GetPointerToArguments();
IUnknown* pUnk = (IUnknown *)*pip;
_ASSERTE(pUnk != NULL);
LOG((LF_STUBS, LL_INFO1000000, "FieldCallWorker leave\n"));
- END_SO_INTOLERANT_CODE;
- //END_ENTRYPOINT_NOTHROW_WITH_THREAD;
-
return hrRetVal;
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(IsFieldCall());
}
CONTRACT_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(IsMethodCall());
}
CONTRACT_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(!IsFieldCall());
PRECONDITION(CheckPointer(m_pMD));
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(!IsFieldCall());
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
SUPPORTS_DAC;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(IsFieldCall());
PRECONDITION(CheckPointer(m_pFD));
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(IsMethodCall());
PRECONDITION(CheckPointer(m_pMD));
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(m_flags & enum_NativeInfoInitialized);
SUPPORTS_DAC;
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(IsMethodCall());
PRECONDITION(CheckPointer(m_pMD));
}
THROWS;
MODE_COOPERATIVE;
DISABLED(GC_TRIGGERS); // can't use this in an FCALL because we're in forbid gc mode until we setup a H_M_F.
- SO_TOLERANT;
}
CONTRACTL_END;
THROWS;
MODE_COOPERATIVE;
DISABLED(GC_TRIGGERS); // can't use this in an FCALL because we're in forbid gc mode until we setup a H_M_F.
- SO_TOLERANT;
}
CONTRACTL_END;
FORCEINLINE UINT64 GCInterface::InterlockedAdd (UINT64 *pAugend, UINT64 addend) {
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
UINT64 oldMemValue;
UINT64 newMemValue;
FORCEINLINE UINT64 GCInterface::InterlockedSub(UINT64 *pMinuend, UINT64 subtrahend) {
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
UINT64 oldMemValue;
UINT64 newMemValue;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
} CONTRACTL_END;
_ASSERTE(mt != NULL);
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
} CONTRACTL_END;
INT32 hashCode = 0;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
} CONTRACTL_END;
PCODE actual = pMT->GetRestoredSlot(slot);
void AcquireSafeHandleFromWaitHandle(WAITHANDLEREF wh)
{
CONTRACTL {
- THROWS;
- GC_TRIGGERS;
- SO_INTOLERANT;
+ THROWS;
+ GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(wh != NULL);
} CONTRACTL_END;
void ReleaseSafeHandleFromWaitHandle(WAITHANDLEREF wh)
{
CONTRACTL {
- THROWS;
- GC_TRIGGERS;
- SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
MODE_COOPERATIVE;
PRECONDITION(wh != NULL);
} CONTRACTL_END;
{
CONTRACTL {
NOTHROW;
- SO_TOLERANT;
PRECONDITION(CheckPointer(GetThread()));
if (GetThread()->PreemptiveGCDisabled()) { GC_NOTRIGGER; } else { GC_TRIGGERS; }
} CONTRACTL_END;
{
CONTRACTL {
NOTHROW;
- SO_TOLERANT;
PRECONDITION(CheckPointer(GetThread()));
if (GetThread()->PreemptiveGCDisabled()) { GC_NOTRIGGER; } else { GC_TRIGGERS; }
PRECONDITION(id <= TypeIDProvider::MAX_TYPE_ID);
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
} CONTRACTL_END;
// Take the lock
THROWS;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
INJECT_FAULT(COMPlusThrowOM());
PRECONDITION(m_nextID != 0);
PRECONDITION(m_incSize != 0);
THROWS;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
INJECT_FAULT(COMPlusThrowOM());
PRECONDITION(m_nextFatID != 0);
PRECONDITION(m_incSize != 0);
void * cookie)
{
STATIC_CONTRACT_THROWS;
- STATIC_CONTRACT_SO_INTOLERANT;
- HRESULT hr = S_OK;
-
- BEGIN_SO_TOLERANT_CODE(GetThread());
- hr = pCallback(cookie);
- END_SO_TOLERANT_CODE;
-
- return hr;
+ return pCallback(cookie);
}
HRESULT CorHost2::ExecuteInAppDomain(DWORD dwAppDomainId,
ENTER_DOMAIN_ID(ADID(dwAppDomainId))
{
// We are calling an unmanaged function pointer, either an unmanaged function, or a marshaled out delegate.
- // The thread should be in preemptive mode, and SO_Tolerant.
+ // The thread should be in preemptive mode.
GCX_PREEMP();
hr=ExecuteInAppDomainHelper (pCallback, cookie);
}
STDMETHODIMP CorHost2::UnloadAppDomain2(DWORD dwDomainId, BOOL fWaitUntilDone, int *pLatchedExitCode)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
if (!m_fStarted)
return HOST_E_INVALIDOPERATION;
{
WRAPPER(THROWS);
WRAPPER(GC_TRIGGERS);
- SO_TOLERANT;
}
CONTRACTL_END;
return InterlockedIncrement(&m_cRef);
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT; // no global state updates that need guarding.
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
HRESULT hr = S_OK;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW; );
CorHost2 *pCorHost = new (nothrow) CorHost2();
if (!pCorHost)
{
if (FAILED(hr))
delete pCorHost;
}
- END_SO_INTOLERANT_CODE;
return (hr);
}
virtual ULONG STDMETHODCALLTYPE AddRef(void)
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return 1;
}
virtual ULONG STDMETHODCALLTYPE Release(void)
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return 1;
}
void **ppvObject)
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
if (riid != IID_ICLRPolicyManager && riid != IID_IUnknown)
return (E_NOINTERFACE);
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT; // no global state updates
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
HRESULT STDMETHODCALLTYPE CExecutionEngine::QueryInterface(REFIID id, void **pInterface)
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
if (!pInterface)
return E_POINTER;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_MODE_ANY;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
- STATIC_CONTRACT_SO_TOLERANT;
//<TODO> @TODO: Decide on an exception strategy for all the DLLs of the CLR, and then
// enable all the exceptions out of this method.</TODO>
goto LError;
}
memset (pTlsInfo, 0, sizeof(ClrTlsInfo));
- // We save the last intolerant marker on stack in this slot.
- // -1 is the larget unsigned number, and therefore our marker is always smaller than it.
- pTlsInfo->data[TlsIdx_SOIntolerantTransitionHandler] = (void*)(-1);
}
if (!fInTls && pTlsInfo)
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_MODE_ANY;
- STATIC_CONTRACT_SO_TOLERANT;
// !!! This function is called during Thread::SwitchIn and SwitchOut
// !!! It is extremely important that while executing this function, we will not
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_MODE_ANY;
#ifdef STRESS_LOG
VOID STDMETHODCALLTYPE CExecutionEngine::TLS_AssociateCallback(DWORD slot, PTLS_CALLBACK_FUNCTION callback)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
CheckThreadState(slot);
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_MODE_ANY;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
- STATIC_CONTRACT_SO_TOLERANT;
return CExecutionEngine::GetTlsData();
}
LPVOID STDMETHODCALLTYPE CExecutionEngine::TLS_GetValue(DWORD slot)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return EETlsGetValue(slot);
}
BOOL STDMETHODCALLTYPE CExecutionEngine::TLS_CheckValue(DWORD slot, LPVOID * pValue)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return EETlsCheckValue(slot, pValue);
}
VOID STDMETHODCALLTYPE CExecutionEngine::TLS_SetValue(DWORD slot, LPVOID pData)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
EETlsSetValue(slot,pData);
}
VOID STDMETHODCALLTYPE CExecutionEngine::TLS_ThreadDetaching()
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
CExecutionEngine::ThreadDetaching(NULL);
}
void STDMETHODCALLTYPE CExecutionEngine::DestroyLock(CRITSEC_COOKIE cookie)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
::EEDeleteCriticalSection(cookie);
}
void STDMETHODCALLTYPE CExecutionEngine::AcquireLock(CRITSEC_COOKIE cookie)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
- BEGIN_SO_INTOLERANT_CODE(GetThread());
::EEEnterCriticalSection(cookie);
- END_SO_INTOLERANT_CODE;
}
void STDMETHODCALLTYPE CExecutionEngine::ReleaseLock(CRITSEC_COOKIE cookie)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
- BEGIN_SO_INTOLERANT_CODE(GetThread());
::EELeaveCriticalSection(cookie);
- END_SO_INTOLERANT_CODE;
}
// Locking routines supplied by the EE to the other DLLs of the CLR. In a _DEBUG
void STDMETHODCALLTYPE CExecutionEngine::CloseEvent(EVENT_COOKIE event)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
if (event) {
CLREvent *pEvent = CookieToCLREvent(event);
pEvent->CloseEvent();
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
BOOL bAlertable)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
if (event) {
CLREvent *pEvent = CookieToCLREvent(event);
return pEvent->Wait(dwMilliseconds,bAlertable);
DWORD dwMilliseconds)
{
STATIC_CONTRACT_WRAPPER;
- STATIC_CONTRACT_SO_TOLERANT;
return ::WaitForSingleObject(handle,dwMilliseconds);
}
static inline SEMAPHORE_COOKIE CLRSemaphoreToCookie(CLRSemaphore * pSemaphore)
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
_ASSERTE((((uintptr_t) pSemaphore) & POISON_BITS) == 0);
#ifdef _DEBUG
THROWS;
MODE_ANY;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
BOOL bAlertable)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
CLRSemaphore *pSemaphore = CookieToCLRSemaphore(semaphore);
return pSemaphore->Wait(dwMilliseconds,bAlertable);
}
NOTHROW;
MODE_ANY;
GC_NOTRIGGER;
- SO_TOLERANT; // we catch any erros and free the allocated memory
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
DWORD STDMETHODCALLTYPE CExecutionEngine::ClrSleepEx(DWORD dwMilliseconds, BOOL bAlertable)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
-
return EESleepEx(dwMilliseconds,bAlertable);
}
#define ClrSleepEx EESleepEx
BOOL STDMETHODCALLTYPE CExecutionEngine::ClrAllocationDisallowed()
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return EEAllocationDisallowed();
}
#define ClrAllocationDisallowed EEAllocationDisallowed
DWORD flProtect)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return EEVirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect);
}
#define ClrVirtualAlloc EEVirtualAlloc
DWORD dwFreeType)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return EEVirtualFree(lpAddress, dwSize, dwFreeType);
}
#define ClrVirtualFree EEVirtualFree
SIZE_T dwLength)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return EEVirtualQuery(lpAddress, lpBuffer, dwLength);
}
#define ClrVirtualQuery EEVirtualQuery
PDWORD lpflOldProtect)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
// Get the UEF installation details - we will use these to validate
// that the calls to ClrVirtualProtect are not going to affect the UEF.
HANDLE STDMETHODCALLTYPE CExecutionEngine::ClrGetProcessHeap()
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return EEGetProcessHeap();
}
#define ClrGetProcessHeap EEGetProcessHeap
HANDLE STDMETHODCALLTYPE CExecutionEngine::ClrGetProcessExecutableHeap()
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return EEGetProcessExecutableHeap();
}
#define ClrGetProcessExecutableHeap EEGetProcessExecutableHeap
SIZE_T dwMaximumSize)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return EEHeapCreate(flOptions, dwInitialSize, dwMaximumSize);
}
#define ClrHeapCreate EEHeapCreate
BOOL STDMETHODCALLTYPE CExecutionEngine::ClrHeapDestroy(HANDLE hHeap)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return EEHeapDestroy(hHeap);
}
#define ClrHeapDestroy EEHeapDestroy
SIZE_T dwBytes)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
-
- // We need to guarentee a very small stack consumption in allocating. And we can't allow
- // an SO to happen while calling into the host. This will force a hard SO which is OK because
- // we shouldn't ever get this close inside the EE in SO-intolerant code, so this should
- // only fail if we call directly in from outside the EE, such as the JIT.
- MINIMAL_STACK_PROBE_CHECK_THREAD(GetThread());
return EEHeapAlloc(hHeap, dwFlags, dwBytes);
}
LPVOID lpMem)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return EEHeapFree(hHeap, dwFlags, lpMem);
}
#define ClrHeapFree EEHeapFree
LPCVOID lpMem)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return EEHeapValidate(hHeap, dwFlags, lpMem);
}
#define ClrHeapValidate EEHeapValidate
void CExecutionEngine::GetLastThrownObjectExceptionFromThread(void **ppvException)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
// Cast to our real type.
Exception **ppException = reinterpret_cast<Exception**>(ppvException);
inline ~CrstHolder()
{
WRAPPER_NO_CONTRACT;
-
- VALIDATE_HOLDER_STACK_CONSUMPTION_FOR_TYPE(HSV_ValidateMinimumStackReq);
ReleaseLock(m_pCrst);
}
};
int inherited = 0;
int allowMultiple = 1;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
{
CustomAttributeParser ca(pData, cData);
*pInherited = namedArgs[inherited].val.boolean == TRUE;
*pAllowMultiple = namedArgs[allowMultiple].val.boolean == TRUE;
}
- END_SO_INTOLERANT_CODE;
}
FCIMPLEND
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_COOPERATIVE;
}
CONTRACTL_END;
int __cdecl DataImage::rvaInfoVectorEntryCmp(const void* a_, const void* b_)
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
DataImage::RvaInfoStructure *a = (DataImage::RvaInfoStructure *)a_;
DataImage::RvaInfoStructure *b = (DataImage::RvaInfoStructure *)b_;
int rvaComparisonResult = (int)(a->rva - b->rva);
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
POSTCONDITION(CheckPointer(RETVAL));
}
CONTRACT_END;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
POSTCONDITION(CheckPointer(RETVAL));
}
CONTRACT_END;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
SUPPORTS_DAC;
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
Thread *pThread;
pThread = GetThread();
_ASSERTE(pThread);
- INTERIOR_STACK_PROBE_FOR(pThread, 8);
switch (level)
{
UNREACHABLE();
}
- END_INTERIOR_STACK_PROBE;
-
#ifdef FEATURE_MULTICOREJIT
{
Module * pModule = GetModule();
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
inline Module* DomainFile::GetCurrentModule()
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
SUPPORTS_DAC;
return m_pModule;
inline Assembly* DomainAssembly::GetCurrentAssembly()
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return m_pAssembly;
}
inline Assembly* DomainAssembly::GetAssembly()
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
- {
- // CheckLoadLevel() is SO_INTOLERANT. However, this is only done in
- // debug for the consistency check, so we can accept the SO violation.
- CONTRACT_VIOLATION(SOToleranceViolation);
- CONSISTENCY_CHECK(CheckLoadLevel(FILE_LOAD_ALLOCATE));
- }
+ CONSISTENCY_CHECK(CheckLoadLevel(FILE_LOAD_ALLOCATE));
return m_pAssembly;
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_INTOLERANT;
INJECT_FAULT(COMPlusThrowOM(););
PRECONDITION(CheckPointer(securityControlFlags));
PRECONDITION(CheckPointer(typeOwner));
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
GC_NOTRIGGER;
MODE_ANY;
HOST_NOCALLS;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(pMD->IsFCall());
}
CONTRACTL_END;
{
CONTRACTL
{
- SO_TOLERANT; // STATIC_CONTRACT_DEBUG_ONLY
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pValue));
PRECONDITION(CheckPointer(pKey));
} CONTRACT_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT; // TODO: Verify this does not do anything that would make it so_intolerant
PRECONDITION(CheckPointer(pValue));
PRECONDITION(CheckPointer(pKey));
} CONTRACT_END;
ConfigStringHashtable* table = iter.Next();
if(table != NULL)
{
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, RETURN E_FAIL;)
pair = table->Lookup(pKey);
- END_SO_INTOLERANT_CODE
if(pair != NULL)
{
*pValue = pair->value;
table != NULL;
table = iter.Next())
{
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, RETURN E_FAIL;)
pair = table->Lookup(pKey);
- END_SO_INTOLERANT_CODE
if(pair != NULL)
{
*pValue = pair->value;
table != NULL;
table = iter.Previous())
{
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, RETURN E_FAIL;)
pair = table->Lookup(pKey);
- END_SO_INTOLERANT_CODE
if(pair != NULL)
{
*pValue = pair->value;
GC_NOTRIGGER;
// MODE_ANY;
FORBID_FAULT;
- SO_TOLERANT;
} CONTRACTL_END;
pEnd = &(pList->m_pElement);
GC_NOTRIGGER;
// MODE_ANY;
FORBID_FAULT;
- SO_TOLERANT;
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
} CONTRACT_END;
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- // MODE_ANY;
- SO_TOLERANT;
+ // MODE_ANY;
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
} CONTRACT_END;
SCAN_IGNORE_FAULT; // due to the contract checking logic itself.
SCAN_IGNORE_TRIGGER;
SCAN_IGNORE_LOCK;
- SCAN_IGNORE_SO;
// Many of the checks below result in calls to GetThread()
// that work just fine if GetThread() returns NULL, so temporarily
#define EECONTRACT_H_
#include "contract.h"
-#include "stackprobe.h"
// --------------------------------------------------------------------------------
// EECONTRACT is an extension of the lower level CONTRACT macros to include some
THROWS; \
GC_TRIGGERS; \
MODE_PREEMPTIVE; \
- SO_INTOLERANT; \
INJECT_FAULT(COMPlusThrowOM();); \
#endif // EECONTRACT_H_
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACT(Frame *)
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pCF));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pThread));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(o));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pMT));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(so));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(so));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS; // From CreateHandle
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pAppDomain));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pThread));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
DISABLED(GC_TRIGGERS); // Called by unmanaged threads.
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pRuntimeThread));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pRuntimeThread));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(thread));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pCold));
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
DISABLED(GC_TRIGGERS); // Disabled because disabled in RareDisablePreemptiveGC()
}
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
DISABLED(GC_TRIGGERS); // Disabled because disabled in RareEnablePreemptiveGC()
}
{
CONTRACTL
{
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pFD));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS;
GC_TRIGGERS;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS;
GC_TRIGGERS;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS;
GC_TRIGGERS;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pModule));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pModule));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
LOG((LF_ENC, LL_INFO100, "EncApplyChanges\n"));
CONTRACTL
{
- SO_NOT_MAINLINE;
DISABLED(THROWS);
DISABLED(GC_TRIGGERS);
PRECONDITION(CheckPointer(pModule));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
DISABLED(THROWS);
DISABLED(GC_TRIGGERS);
PRECONDITION(CheckPointer(pModule));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pCF));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
#ifndef DACCESS_COMPILE
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS;
DISABLED(GC_TRIGGERS); // This is not a bug - the debugger can call this on an un-managed thread.
PRECONDITION(CheckPointer(frame));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_TRIGGERS;
PRECONDITION(CheckPointer(stubManager));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(thread));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(thread));
{
CONTRACTL
{
- SO_INTOLERANT;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pTLSIndex));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
THROWS;
GC_TRIGGERS;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pThread));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pThread));
{
THROWS;
GC_TRIGGERS;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
THROWS;
GC_TRIGGERS;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END
#ifdef MODE_COOPERATIVE // This header file sees contract.h, not eecontract.h - what a kludge!
MODE_COOPERATIVE;
#endif
- SO_TOLERANT;
}
CONTRACTL_END
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END
WRAPPER(THROWS);
WRAPPER(GC_NOTRIGGER);
FORBID_FAULT;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END
#ifdef MODE_COOPERATIVE // This header file sees contract.h, not eecontract.h - what a kludge!
MODE_COOPERATIVE;
#endif
- SO_TOLERANT;
}
CONTRACTL_END
{
CONTRACTL
{
- SO_TOLERANT;
MODE_ANY;
GC_NOTRIGGER;
NOTHROW;
{
CONTRACTL
{
- SO_TOLERANT;
MODE_ANY;
GC_NOTRIGGER;
NOTHROW;
THROWS;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
void SafeExitProcess(UINT exitCode, BOOL fAbort = FALSE, ShutdownCompleteAction sca = SCA_ExitProcessWhenShutdownComplete)
{
- // The process is shutting down. No need to check SO contract.
- SO_NOT_MAINLINE_FUNCTION;
STRESS_LOG2(LF_SYNC, LL_INFO10, "SafeExitProcess: exitCode = %d, fAbort = %d\n", exitCode, fAbort);
CONTRACTL
{
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
PerformResourceConstraintAction(pThread, action, HOST_E_EXITPROCESS_OUTOFMEMORY, TRUE);
}
-#ifdef FEATURE_STACK_PROBE
-//---------------------------------------------------------------------------------------
-//
-// IsSOTolerant - Is the current thread in SO Tolerant region?
-//
-// Arguments:
-// pLimitFrame: the limit of search for frames
-//
-// Return Value:
-// TRUE if in SO tolerant region.
-// FALSE if in SO intolerant region.
-//
-// Note:
-// We walk our frame chain to decide. If HelperMethodFrame is seen first, we are in tolerant
-// region. If EnterSOIntolerantCodeFrame is seen first, we are in intolerant region.
-//
-BOOL Thread::IsSOTolerant(void * pLimitFrame)
-{
- LIMITED_METHOD_CONTRACT;
-
- Frame *pFrame = GetFrame();
- void* pSOIntolerantMarker = ClrFlsGetValue(TlsIdx_SOIntolerantTransitionHandler);
- if (pSOIntolerantMarker == FRAME_TOP)
- {
- // We have not set a marker for intolerant transition yet.
- return TRUE;
- }
- while (pFrame != FRAME_TOP && pFrame < pLimitFrame)
- {
- Frame::ETransitionType type = pFrame->GetTransitionType();
- if (pFrame > pSOIntolerantMarker)
- {
- return FALSE;
- }
- else if (type == Frame::TT_M2U || type == Frame::TT_InternalCall ||
- // We can not call HelperMethodFrame::GetFunction on SO since the call
- // may need to call into host. This is why we check for TT_InternalCall first.
- pFrame->GetFunction() != NULL)
- {
- return TRUE;
- }
- pFrame = pFrame->Next();
- }
-
- if (pFrame == FRAME_TOP)
- // We walked to the end of chain, but the thread has one IntolerantMarker on stack decided from
- // the check above while loop.
- return FALSE;
- else
- return TRUE;
-}
-
-#endif
-
//---------------------------------------------------------------------------------------
//
// EEPolicy::HandleStackOverflow - Handle stack overflow according to policy
// 3. If stack overflows in SO intolerant region, the process is killed as soon as the exception is seen by our vector handler, or
// our managed exception handler.
//
-// If SO Probing code is disabled (by FEATURE_STACK_PROBE not defined) then the process
-// is terminated if there is StackOverflow as all clr code will be considered SO Intolerant.
+// The process is terminated if there is StackOverflow as all clr code is considered SO Intolerant.
void EEPolicy::HandleStackOverflow(StackOverflowDetector detector, void * pLimitFrame)
{
WRAPPER_NO_CONTRACT;
return;
}
-#ifdef FEATURE_STACK_PROBE
-
- // We only process SO once at
- // 1. VectoredExceptionHandler if SO in mscorwks
- // 2. managed exception handler
- // 3. SO_Tolerant transition handler
- if (pThread->HasThreadStateNC(Thread::TSNC_SOWorkNeeded) &&
- detector != SOD_UnmanagedFrameHandler)
- {
- return;
- }
-#endif
-
-#ifdef FEATURE_STACK_PROBE
- BOOL fInSoTolerant = pThread->IsSOTolerant(pLimitFrame);
-#else
- BOOL fInSoTolerant = false;
-#endif
-
EXCEPTION_POINTERS exceptionInfo;
GetCurrentExceptionPointers(&exceptionInfo);
_ASSERTE(exceptionInfo.ExceptionRecord);
-#ifdef FEATURE_STACK_PROBE
- DWORD exceptionCode = exceptionInfo.ExceptionRecord->ExceptionCode;
-
- AppDomain *pCurrentDomain = ::GetAppDomain();
- BOOL fInDefaultDomain = (pCurrentDomain == SystemDomain::System()->DefaultDomain());
- BOOL fInCLR = IsIPInModule(g_pMSCorEE, (PCODE)GetIP(exceptionInfo.ContextRecord));
-
- if (exceptionCode == EXCEPTION_SOFTSO)
- {
- // Our probe detects a thread does not have enough stack. But we have not trashed the process
- // state yet.
- fInSoTolerant = TRUE;
- }
- else
- {
- _ASSERTE (exceptionCode == STATUS_STACK_OVERFLOW);
-
- switch (detector)
- {
- case SOD_ManagedFrameHandler:
- if (!pThread->PreemptiveGCDisabled() && !fInCLR && fInSoTolerant)
- {
- // Managed exception handler detects SO, but the thread is in preemptive GC mode,
- // and the IP is outside CLR. This means we are inside a PINVOKE call.
- fInSoTolerant = FALSE;
- }
- break;
-
- case SOD_UnmanagedFrameHandler:
- break;
-
- case SOD_SOIntolerantTransitor:
- fInSoTolerant = FALSE;
- break;
-
- case SOD_SOTolerantTransitor:
- if (!fInCLR)
- {
- // If SO happens outside of CLR, and it is not detected by managed frame handler,
- // it is fatal
- fInSoTolerant = FALSE;
- }
- break;
-
- default:
- _ASSERTE(!"should not get here");
- }
-
- if (fInDefaultDomain)
- {
- // StackOverflow in default domain is fatal
- fInSoTolerant = FALSE;
- }
- }
-
-#endif // FEATURE_STACK_PROBE
-
- ProcessSOEventForHost(&exceptionInfo, fInSoTolerant);
+ ProcessSOEventForHost(&exceptionInfo, false /* fInSoTolerant */);
-#ifdef FEATURE_STACK_PROBE
- if (!CLRHosted() || GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) != eRudeUnloadAppDomain)
- {
- // For security reason, it is not safe to continue execution if stack overflow happens
- // unless a host tells us to do something different.
- EEPolicy::HandleFatalStackOverflow(&exceptionInfo);
- }
-#endif
-
- if (!fInSoTolerant)
- {
- EEPolicy::HandleFatalStackOverflow(&exceptionInfo);
- }
-#ifdef FEATURE_STACK_PROBE
- else
- {
- // EnableADUnloadWorker is SO_Intolerant.
- // But here we know that if we have only one page, we will only update states of the Domain.
- CONTRACT_VIOLATION(SOToleranceViolation);
-
- pThread->PrepareThreadForSOWork();
-
- pThread->MarkThreadForAbort(
- (Thread::ThreadAbortRequester)(Thread::TAR_Thread|Thread::TAR_StackOverflow),
- EEPolicy::TA_Rude);
-
- pThread->SetSOWorkNeeded();
- }
-#endif
+ EEPolicy::HandleFatalStackOverflow(&exceptionInfo);
}
EXCEPTION_POINTERS g_SOExceptionPointers = {&g_SOExceptionRecord, NULL};
-#ifdef FEATURE_STACK_PROBE
-// This function may be called on a thread before debugger is notified of the thread, like in
-// ManagedThreadBase_DispatchMiddle. Currently we can not notify managed debugger, because
-// RS requires that notification is sent first.
-void EEPolicy::HandleSoftStackOverflow(BOOL fSkipDebugger)
-{
- WRAPPER_NO_CONTRACT;
-
- // If we trigger a SO while handling the soft stack overflow,
- // we'll rip the process
- BEGIN_SO_INTOLERANT_CODE_NOPROBE;
-
- AppDomain *pCurrentDomain = ::GetAppDomain();
-
- if (GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) != eRudeUnloadAppDomain ||
- pCurrentDomain == SystemDomain::System()->DefaultDomain())
- {
- // We may not be able to build a context on stack
- ProcessSOEventForHost(NULL, FALSE);
-
-
- EEPolicy::HandleFatalStackOverflow(&g_SOExceptionPointers, fSkipDebugger);
- }
- //else if (pCurrentDomain == SystemDomain::System()->DefaultDomain())
- //{
- // We hit soft SO in Default domain, but default domain can not be unloaded.
- // Soft SO can happen in default domain, eg. GetResourceString, or EnsureGrantSetSerialized.
- // So the caller is going to throw a managed exception.
- // RaiseException(EXCEPTION_SOFTSO, 0, 0, NULL);
- //}
- else
- {
- Thread* pThread = GetThread();
-
- // We are leaving VM boundary, either entering managed code, or entering
- // non-VM unmanaged code.
- // We should not throw internal C++ exception. Instead we throw an exception
- // with EXCEPTION_SOFTSO code.
- RaiseException(EXCEPTION_SOFTSO, 0, 0, NULL);
- }
-
- END_SO_INTOLERANT_CODE_NOPROBE;
-
-}
-
-void EEPolicy::HandleStackOverflowAfterCatch()
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- SO_TOLERANT;
- MODE_ANY;
- }
- CONTRACTL_END;
-
-#ifdef STACK_GUARDS_DEBUG
- BaseStackGuard::RestoreCurrentGuard(FALSE);
-#endif
- Thread *pThread = GetThread();
- pThread->RestoreGuardPage();
- pThread->FinishSOWork();
-}
-#endif
-
-
//---------------------------------------------------------------------------------------
// HandleExitProcess is used to shutdown the runtime, based on policy previously set,
// then to exit the process. Note, however, that the process will not exit if
{
THROWS;
GC_TRIGGERS;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
// This is fatal error. We do not care about SO mode any more.
// All of the code from here on out is robust to any failures in any API's that are called.
- CONTRACT_VIOLATION(GCViolation | ModeViolation | SOToleranceViolation | FaultNotFatal | TakesLockViolation);
+ CONTRACT_VIOLATION(GCViolation | ModeViolation | FaultNotFatal | TakesLockViolation);
WRAPPER_NO_CONTRACT;
{
// This is fatal error. We do not care about SO mode any more.
// All of the code from here on out is robust to any failures in any API's that are called.
- CONTRACT_VIOLATION(GCViolation | ModeViolation | SOToleranceViolation | FaultNotFatal | TakesLockViolation);
+ CONTRACT_VIOLATION(GCViolation | ModeViolation | FaultNotFatal | TakesLockViolation);
// Setting g_fFatalErrorOccuredOnGCThread allows code to avoid attempting to make GC mode transitions which could
// If you use MODE_ANY, you must comment why you don't want an exact mode.
// CAN_TAKE_LOCK
// ASSERT_NO_EE_LOCKS_HELD()
-// SO_NOT_MAINLINE
// Note that the preferred contracts in this file are DIFFERENT than the preferred
// contracts for proftoeeinterfaceimpl.cpp.
//
return S_OK; \
}
-// Least common denominator for the callback wrappers. Logs, removes stack
-// guard (REMOVE_STACK_GUARD_FOR_PROFILER_CALL), records in EE Thread object that
-// we're in a callback, and asserts that we're allowed to issue callbacks for the
+// Least common denominator for the callback wrappers. Logs, records in EE Thread object
+// that we're in a callback, and asserts that we're allowed to issue callbacks for the
// specified ThreadID (i.e., no ThreadDestroyed callback has been issued for the
// ThreadID).
//
CHECK_PROFILER_STATUS(ee2pFlags); \
LOG(logParams); \
_ASSERTE(m_pCallback2 != NULL); \
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL; \
/* Normally, set COR_PRF_CALLBACKSTATE_INCALLBACK | */ \
/* COR_PRF_CALLBACKSTATE_IN_TRIGGERS_SCOPE in the callback state, but omit */ \
/* COR_PRF_CALLBACKSTATE_IN_TRIGGERS_SCOPE if we're in a GC_NOTRIGGERS callback */ \
// which takes locks.
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
} CONTRACTL_END;
_ASSERTE(pClsid != NULL);
CAN_TAKE_LOCK;
MODE_PREEMPTIVE;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Always called before Thread created.
_ASSERTE(GetThreadNULLOk() == NULL);
- // We'll be calling into the profiler to create its ICorProfilerCallback*
- // implementation
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
-
// Try and CoCreate the registered profiler
ReleaseHolder<ICorProfilerCallback2> pCallback2;
HModuleHolder hmodProfilerDLL;
{
if (m_pCallback2 != NULL)
{
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
m_pCallback2->Release();
m_pCallback2 = NULL;
}
if (fIsV4Profiler)
{
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
m_pCallback3->Release();
m_pCallback3 = NULL;
}
if (m_pCallback4 != NULL)
{
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
m_pCallback4->Release();
m_pCallback4 = NULL;
}
if (m_pCallback5 != NULL)
{
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
m_pCallback5->Release();
m_pCallback5 = NULL;
}
if (m_pCallback6 != NULL)
{
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
m_pCallback6->Release();
m_pCallback6 = NULL;
}
if (m_pCallback7 != NULL)
{
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
m_pCallback7->Release();
m_pCallback7 = NULL;
}
if (m_pCallback8 != NULL)
{
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
m_pCallback8->Release();
m_pCallback8 = NULL;
}
if (m_pCallback9 != NULL)
{
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
m_pCallback9->Release();
m_pCallback9 = NULL;
}
// ListLockEntry typically held during this callback (thanks to
// MethodTable::DoRunClassInitThrowing).
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
MODE_ANY;
EE_THREAD_NOT_REQUIRED;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
MODE_ANY;
EE_THREAD_NOT_REQUIRED;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
MODE_ANY;
EE_THREAD_NOT_REQUIRED;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
MODE_ANY;
EE_THREAD_NOT_REQUIRED;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
MODE_ANY;
EE_THREAD_NOT_REQUIRED;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Thread store lock is typically held during this callback
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// The JIT / MethodDesc code likely hold locks while this callback is made
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// The JIT / MethodDesc code likely hold locks while this callback is made
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
GC_TRIGGERS;
MODE_COOPERATIVE; // RuntimeMethodHandle::Destroy (the caller) moves from QCALL to GCX_COOP
CAN_TAKE_LOCK;
- SO_TOLERANT;
}
CONTRACTL_END;
// The JIT / MethodDesc code likely hold locks while this callback is made
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// The JIT / MethodDesc code likely hold locks while this callback is made
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// The JIT / MethodDesc code likely hold locks while this callback is made
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// The JIT / MethodDesc code likely hold locks while this callback is made
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// The JIT / MethodDesc code likely hold locks while this callback is made
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// The JIT / MethodDesc code likely hold locks while this callback is made
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// The ReJIT code holds a lock while this callback is made
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// ReJit holds a lock as well as possibly others...
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// UnresolvedClassLock typically held during this callback
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// UnresolvedClassLock typically held during this callback
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// exception is thrown, and EEClass::Destruct is called from the catch clause
// inside ClassLoader::CreateTypeHandleForTypeDefThrowing.
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Locks can be held when this is called. See comment in ClassUnloadStarted
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Thread store lock is typically held during this callback
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Thread store lock is typically held during this callback
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Thread store lock is typically held during this callback
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Thread store lock is typically held during this callback
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Thread store lock is typically held during this callback
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Thread store lock is typically held during this callback
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Remaining essentials from our entrypoint macros with kEE2PNoTrigger flag
SetCallbackStateFlagsHolder csf(COR_PRF_CALLBACKSTATE_INCALLBACK);
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
_ASSERTE(m_pCallback2 != NULL);
{
// Thread store lock is typically held during this callback
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Remaining essentials from our entrypoint macros with kEE2PNoTrigger flag
SetCallbackStateFlagsHolder csf(COR_PRF_CALLBACKSTATE_INCALLBACK);
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
_ASSERTE(m_pCallback2 != NULL);
{
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
ASSERT_NO_EE_LOCKS_HELD();
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// CrstAppDomainHandleTable can be held while this is called
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Thread store lock normally held during this callback
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Thread store lock normally held during this callback
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Thread store lock normally held during this callback
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Thread store lock normally held during this callback
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Thread store lock normally held during this callback
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Thread store lock normally held during this callback
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// CrstAppDomainHandleTable can be held during this callback
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Thread store lock is typically held during this callback
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Thread store lock normally held during this callback
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Thread store lock normally held during this callback
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// ProfilingAPIUtility::s_csStatus is held while this callback is issued.
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
THROWS;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
THROWS;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
THROWS;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
THROWS;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
THROWS;
GC_TRIGGERS;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
MODE_ANY;
CAN_TAKE_LOCK;
STATIC_CONTRACT_FAULT;
- SO_NOT_MAINLINE;
} CONTRACTL_END;
// Mark that we are the special ETWRundown thread. Currently all this does
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
CONTRACTL {
THROWS;
GC_NOTRIGGER;
- SO_NOT_MAINLINE;
} CONTRACTL_END;
Module *pModule = NULL;
{
THROWS;
GC_NOTRIGGER;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
PRECONDITION(CheckPointer(pExceptionRecord));
} CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
// We should throw c++ exception instead.
ThrowOutOfMemory();
}
-#ifdef FEATURE_STACK_PROBE
- else if (throwable == CLRException::GetPreallocatedStackOverflowException())
- {
- ThrowStackOverflow();
- }
-#else
_ASSERTE(throwable != CLRException::GetPreallocatedStackOverflowException());
-#endif
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
if (!g_pConfig->LegacyCorruptedStateExceptionsPolicy())
RaiseException(code, flags, argCount, args);
}
- // Probe for sufficient stack.
- PUSH_STACK_PROBE_FOR_THROW(pParam->pThread);
-
-#ifndef STACK_GUARDS_DEBUG
// This needs to be both here and inside the handler below
// enable preemptive mode before call into OS
GCX_PREEMP_NO_DTOR();
// In non-debug, we can just raise the exception once we've probed.
RaiseException(code, flags, argCount, args);
-
-#else
- // In a debug build, we need to unwind our probe structure off the stack.
- BaseStackGuard *pThrowGuard = NULL;
- // Stach away the address of the guard we just pushed above in PUSH_STACK_PROBE_FOR_THROW
- SAVE_ADDRESS_OF_STACK_PROBE_FOR_THROW(pThrowGuard);
-
- // Add the stack guard reference to the structure below so that it can be accessed within
- // PAL_TRY as well
- struct ParamInner
- {
- ULONG code;
- ULONG flags;
- ULONG argCount;
- ULONG_PTR *args;
- BaseStackGuard *pGuard;
- } param;
- param.code = code;
- param.flags = flags;
- param.argCount = argCount;
- param.args = args;
- param.pGuard = pThrowGuard;
-
- PAL_TRY(ParamInner *, pParam, ¶m)
- {
- // enable preemptive mode before call into OS
- GCX_PREEMP_NO_DTOR();
-
- RaiseException(pParam->code, pParam->flags, pParam->argCount, pParam->args);
-
- // We never return from RaiseException, so shouldn't have to call SetNoException.
- // However, in the debugger we can, and if we don't call SetNoException we get
- // a short-circuit return assert.
- RESET_EXCEPTION_FROM_STACK_PROBE_FOR_THROW(pParam->pGuard);
- }
- PAL_FINALLY
- {
- // pop the guard that we pushed above in PUSH_STACK_PROBE_FOR_THROW
- POP_STACK_PROBE_FOR_THROW(pThrowGuard);
- }
- PAL_ENDTRY
-#endif
}
PAL_EXCEPT_FILTER (RaiseExceptionFilter)
{
// We should throw c++ exception instead.
ThrowOutOfMemory();
}
-#ifdef FEATURE_STACK_PROBE
- else if (throwable == CLRException::GetPreallocatedStackOverflowException())
- {
- ThrowStackOverflow();
- }
-#else
_ASSERTE(throwable != CLRException::GetPreallocatedStackOverflowException());
-#endif
// TODO: Do we need to install COMPlusFrameHandler here?
INSTALL_COMPLUS_EXCEPTION_HANDLER();
gc.key = key;
gc.ret = NULL;
- // The standard probe isn't good enough here. It's possible that we only have ~14 pages of stack
- // left. By the time we transition to the default domain and start fetching this resource string,
- // another 12 page probe could fail.
- // This failing probe would cause us to unload the default appdomain, which would cause us
- // to take down the process.
-
- // Instead, let's probe for a lots more stack to make sure that doesn' happen.
-
- // We need to have enough stack to survive 2 more probes... the original entrypoint back
- // into mscorwks after we go into managed code, and a "large" probe that protects the GC
-
- INTERIOR_STACK_PROBE_FOR(GetThread(), DEFAULT_ENTRY_PROBE_AMOUNT * 2);
GCPROTECT_BEGIN(gc);
MethodDescCallSite getResourceStringLocal(METHOD__ENVIRONMENT__GET_RESOURCE_STRING_LOCAL);
GCPROTECT_END();
- END_INTERIOR_STACK_PROBE;
-
-
return gc.ret;
}
{
NOTHROW;
GC_TRIGGERS;
- SO_TOLERANT;
}
CONTRACTL_END;
GC_NOTRIGGER;
MODE_ANY;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END;
GC_NOTRIGGER;
MODE_ANY;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
DISABLED(GC_TRIGGERS); // some Frames' ExceptionUnwind methods trigger :(
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
- // @todo - Remove this and add a hard SO probe as can't throw from here.
- CONTRACT_VIOLATION(SOToleranceViolation);
-
Frame* pFrame = pThread->m_pFrame;
if (pFrame < pvLimitSP)
{
BOOL IsUncatchable(OBJECTREF *pThrowable)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
BOOL IsStackOverflowException(Thread* pThread, EXCEPTION_RECORD* pExceptionRecord)
{
- if (IsSOExceptionCode(pExceptionRecord->ExceptionCode))
+ if (pExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW)
{
return true;
}
void DECLSPEC_NORETURN RaiseDeadLockException()
{
STATIC_CONTRACT_THROWS;
- STATIC_CONTRACT_SO_TOLERANT;
// Disable the "initialization of static local vars is no thread safe" error
#ifdef _MSC_VER
GC_NOTRIGGER;
MODE_ANY;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END;
GC_NOTRIGGER;
MODE_ANY;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_ANY;
- // We don't need to be SO-robust for an unhandled exception
- SO_NOT_MAINLINE_FUNCTION;
LOG((LF_EH, LL_INFO100, "InternalUnhandledExceptionFilter: at sp %p.\n", GetCurrentSP()));
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
LONG ret = -1;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return EXCEPTION_CONTINUE_SEARCH;);
-
// Invoke the UEF worker to perform unhandled exception processing
ret = InternalUnhandledExceptionFilter_Worker (pExceptionInfo);
LOG((LF_EH, LL_INFO100, "EntryPointFilter: setting TSNC_ProcessedUnhandledException\n"));
pThread->SetThreadStateNC(Thread::TSNC_ProcessedUnhandledException);
}
-
-
- END_SO_INTOLERANT_CODE;
return ret;
}
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_ANY;
- // We don't need to be SO-robust for an unhandled exception
- SO_NOT_MAINLINE_FUNCTION;
LONG retVal = EXCEPTION_CONTINUE_SEARCH;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
// Question: Should we also check for (pThread->m_PreventAsync == 0)
-#if !defined(WIN64EXCEPTIONS) && defined(FEATURE_STACK_PROBE)
- // On Win64, this function is called by our exception handling code which has probed.
- // But on X86, this is called from JIT code directly. We probe here so that
- // we can restore the state of the thread below.
- if (GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeUnloadAppDomain)
- {
- // In case of SO, we will skip the managed code.
- CONTRACT_VIOLATION(ThrowsViolation);
- RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), pThread);
- }
-#endif // !WIN64EXCEPTIONS && FEATURE_STACK_PROBE
-
pThread->SetThrowControlForThread(Thread::InducedThreadRedirectAtEndOfCatch);
if (!pThread->ReadyForAbort())
{
GC_NOTRIGGER;
MODE_ANY;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END;
GC_NOTRIGGER;
MODE_ANY;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END;
GC_TRIGGERS;
MODE_COOPERATIVE;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END;
{
EX_TRY
{
- // We need to disable the backout stack validation at this point since CreateThrowable can
- // take arbitrarily large amounts of stack for different exception types; however we know
- // for a fact that we will never go through this code path if the exception is a stack
- // overflow exception since we already handled that case above with the pre-allocated SO exception.
- DISABLE_BACKOUT_STACK_VALIDATION;
-
FAULT_NOT_FATAL();
ThreadPreventAsyncHolder preventAsync;
LIMITED_METHOD_CONTRACT;
#ifdef DEBUGGING_SUPPORTED
- SO_NOT_MAINLINE_FUNCTION;
#ifdef _TARGET_ARM_
// On ARM we don't have any reliable hardware support for single stepping so it is emulated in software.
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
}
#endif // defined(WIN64EXCEPTIONS) && defined(FEATURE_HIJACK)
- if (IsSOExceptionCode(pExceptionInfo->ExceptionRecord->ExceptionCode))
+ if (pExceptionInfo->ExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW)
{
//
// Not an Out-of-memory situation, so no need for a forbid fault region here
LONG retVal = 0;
-#ifdef FEATURE_STACK_PROBE
- // See if we've got enough stack to handle this exception
-
- // There isn't much stack left to attempt to report an exception. Let's trigger a hard
- // SO, so we clear the guard page and give us at least another page of stack to work with.
-
- if (pThread && !pThread->IsStackSpaceAvailable(ADJUST_PROBE(1)))
- {
- DontCallDirectlyForceStackOverflow();
- }
-#endif // FEATURE_STACK_PROBE
-
// We can't probe here, because we won't return from the CLRVectoredExceptionHandlerPhase2
// on WIN64
//
BOOL fExternalException = FALSE;
- BEGIN_SO_INTOLERANT_CODE_NOPROBE;
-
{
// ExecutionManager::IsManagedCode takes a spinlock. Since we're in the middle of throwing,
// we'll allow the lock, even if a caller didn't expect it.
!IsIPInModule(g_pMSCorEE, GetIP(pExceptionInfo->ContextRecord)));
}
- END_SO_INTOLERANT_CODE_NOPROBE;
-
if (fExternalException)
{
// The breakpoint was not ours. Someone else can handle it. (Or if not, we'll get it again as
PCODE ip = (PCODE)GetIP(pContext);
if (IsIPInModule(g_pMSCorEE, ip) || IsIPInModule(GCHeapUtilities::GetGCModule(), ip))
{
- CONTRACT_VIOLATION(ThrowsViolation|FaultViolation|SOToleranceViolation);
+ CONTRACT_VIOLATION(ThrowsViolation|FaultViolation);
//
// If you're debugging, set the debugger to catch first-chance AV's, then simply hit F5 or
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_ANY;
- STATIC_CONTRACT_SO_TOLERANT;
Thread* pThread = GetThread();
if (!NingenEnabled())
{
CONTRACT_VIOLATION(ThrowsViolation);
- BEGIN_SO_INTOLERANT_CODE(pThread);
// Call CLRException::GetThrowableFromException to force us to retrieve the THROWABLE
// while we are still within the context of the catch block. This will help diagnose
// cases where the last thrown object is NULL.
OBJECTREF orThrowable = CLRException::GetThrowableFromException(pException);
CONSISTENCY_CHECK(orThrowable != NULL);
- END_SO_INTOLERANT_CODE;
}
#endif
}
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_ANY;
- STATIC_CONTRACT_SO_TOLERANT;
-
- // We really should probe before switching to cooperative mode, although there's no chance
- // we'll SO in doing that as we've just caught an exception. We can't probe just
- // yet though, because we want to avoid reprobing on an SO exception and we need to switch
- // to cooperative to check the throwable for an SO as well as the pException object (as the
- // pException could be a LastThrownObjectException.) Blech.
- CONTRACT_VIOLATION(SOToleranceViolation);
GCX_COOP();
}
else if (orThrowable->GetMethodTable() == g_pStackOverflowExceptionClass)
{
-#ifdef FEATURE_STACK_PROBE
- EEPolicy::HandleSoftStackOverflow();
-#else
/* The parameters of the function do not matter here */
EEPolicy::HandleStackOverflow(SOD_UnmanagedFrameHandler, NULL);
-#endif
}
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
if (CExecutionEngine::CheckThreadStateNoCreate(TlsIdx_PEXCEPTION_RECORD))
{
BOOL fSave = TRUE;
- if (!IsSOExceptionCode(pRecord->ExceptionCode))
+ if (pRecord->ExceptionCode != STATUS_STACK_OVERFLOW)
{
DWORD dwLastExceptionCode = (DWORD)(SIZE_T) (ClrFlsGetValue(TlsIdx_EXCEPTION_CODE));
- if (IsSOExceptionCode(dwLastExceptionCode))
+ if (dwLastExceptionCode == STATUS_STACK_OVERFLOW)
{
PEXCEPTION_RECORD lastRecord =
static_cast<PEXCEPTION_RECORD> (ClrFlsGetValue(TlsIdx_PEXCEPTION_RECORD));
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
THROWS;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(IsWatsonEnabled());
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(IsWatsonEnabled());
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(IsWatsonEnabled());
}
CONTRACTL_END;
GC_NOTRIGGER;
MODE_COOPERATIVE;
NOTHROW;
- SO_TOLERANT;
PRECONDITION(GetThread() != NULL);
PRECONDITION(oThrowable != NULL);
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
PRECONDITION(oThrowable != NULL);
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(!fIsFirstPass); // This method should only be called during an unwind
PRECONDITION(pCurThread != NULL);
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(GetThread() != NULL);
}
CONTRACTL_END;
DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
CANNOT_TAKE_LOCK;
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
#endif // FEATURE_COMINTEROP
-
-#ifdef FEATURE_STACK_PROBE
-//==========================================================================
-// Throw a StackOverflowError
-//==========================================================================
-VOID DECLSPEC_NORETURN RealCOMPlusThrowSO()
-{
- CONTRACTL
- {
- // This should be throws... But it isn't because a SO doesn't technically
- // fall into the same THROW/NOTHROW conventions as the rest of the contract
- // infrastructure.
- NOTHROW;
-
- DISABLED(GC_NOTRIGGER); // Must sanitize first pass handling to enable this
- SO_TOLERANT;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- // We only use BreakOnSO if we are in debug mode, so we'll only checking if the
- // _DEBUG flag is set.
-#ifdef _DEBUG
- static int breakOnSO = -1;
-
- if (breakOnSO == -1)
- breakOnSO = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_BreakOnSO);
-
- if (breakOnSO != 0)
- {
- _ASSERTE(!"SO occurred");
- }
-#endif
-
- ThrowStackOverflow();
-}
-#endif
-
//==========================================================================
// Throw an InvalidCastException
//==========================================================================
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_INTOLERANT;
} CONTRACTL_END;
Module *pModuleTypeFrom = thCastFrom.GetModule();
// we don't handle the SO.
if (!(dwExceptionFlags & EXCEPTION_UNWINDING))
{
- if (IsSOExceptionCode(pExceptionRecord->ExceptionCode))
+ if (pExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW)
{
// We don't need to unwind the frame chain here because we have backstop
// personality routines at the U2M boundary to handle do that. They are
FastInterlockAnd (&pThread->m_fPreemptiveGCDisabled, 0);
return ExceptionContinueSearch;
}
- else
- {
-#ifdef FEATURE_STACK_PROBE
- if (GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeUnloadAppDomain)
- {
- RetailStackProbe(static_cast<unsigned int>(ADJUST_PROBE(BACKOUT_CODE_STACK_LIMIT)), pThread);
- }
-#endif
- }
}
else
{
// look at our saved exception code.
exceptionCode = GetCurrentExceptionCode();
- if (IsSOExceptionCode(exceptionCode))
+ if (exceptionCode == STATUS_STACK_OVERFLOW)
{
return ExceptionContinueSearch;
}
}
- BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
-
StackFrame sf((UINT_PTR)MemoryStackFp);
// It is a breakpoint; is it from the runtime or managed code?
PCODE ip = GetIP(pContextRecord); // IP of the fault.
- BOOL fExternalException = FALSE;
-
- BEGIN_SO_INTOLERANT_CODE_NOPROBE;
+ BOOL fExternalException;
fExternalException = (!ExecutionManager::IsManagedCode(ip) &&
!IsIPInModule(g_pMSCorEE, ip));
- END_SO_INTOLERANT_CODE_NOPROBE;
-
if (fExternalException)
{
// The breakpoint was not ours. Someone else can handle it. (Or if not, we'll get it again as
// SO-tolerant mode before we do so.
RestoreSOToleranceState();
#endif
- RESET_CONTRACT_VIOLATION();
+
ExceptionTracker::ResumeExecution(pContextRecord,
NULL
);
GCX_PREEMP_NO_DTOR();
}
- END_CONTRACT_VIOLATION;
-
SetLastError(dwLastError);
return returnDisposition;
MODE_COOPERATIVE;
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
}
-#ifdef FEATURE_STACK_PROBE
- // Don't call a handler if we're within a certain distance of the end of the stack. Could end up here via probe, in
- // which case guard page is intact, or via hard SO, in which case guard page won't be. So don't check for presence of
- // guard page, just check for sufficient space on stack.
- if ( IsStackOverflowException()
- && !pThread->CanResetStackTo((void*)sf.SP))
- {
- EH_LOG((LL_INFO100, " STACKOVERFLOW: IGNOREFRAME: stack frame too close to guard page: sf.SP: %p\n", sf.SP));
- }
- else
-#endif // FEATURE_STACK_PROBE
{
IJitManager* pJitMan = pcfThisFrame->GetJitManager();
const METHODTOKEN& MethToken = pcfThisFrame->GetMethodToken();
return ReturnStatus;
}
-// <64bit_And_Arm_Specific>
-
-// For funclets, add support for unwinding frame chain during SO. These definitions will be automatically picked up by
-// BEGIN_SO_TOLERANT_CODE/END_SO_TOLERANT_CODE usage in ExceptionTracker::CallHandler below.
-//
-// This is required since funclet invocation is the only case of calling managed code from VM that is not wrapped by
-// assembly helper with associated personality routine. The personality routine will invoke CleanupForSecondPass to
-// release exception trackers and unwind frame chain.
-//
-// We need to do the same work as CleanupForSecondPass for funclet invocation in the face of SO. Thus, we redefine OPTIONAL_SO_CLEANUP_UNWIND
-// below. This will perform frame chain unwind inside the "__finally" block that is part of the END_SO_TOLERANT_CODE macro only in the face
-// of an SO.
-//
-// The second part of work, releasing exception trackers, is done inside the "__except" block also part of the END_SO_TOLERANT_CODE by invoking
-// ClearExceptionStateAfterSO.
-//
-// </64bit_And_Arm_Specific>
-
#undef OPTIONAL_SO_CLEANUP_UNWIND
#define OPTIONAL_SO_CLEANUP_UNWIND(pThread, pFrame) if (pThread->GetFrame() < pFrame) { UnwindFrameChain(pThread, pFrame); }
throwable = PossiblyUnwrapThrowable(pThread->GetThrowable(), pMD->GetAssembly());
- // We probe for stack space before attempting to call a filter, finally, or catch clause. The path from
- // here to the actual managed code is very short. We must probe, however, because the JIT does not generate a
- // probe for us upon entry to the handler. This probe ensures we have enough stack space to actually make it
- // into the managed code.
- //
- // Incase a SO happens, this macro will also unwind the frame chain before continuing to dispatch the SO
- // upstack (look at the macro implementation for details).
- BEGIN_SO_TOLERANT_CODE(pThread);
-
// Stores the current SP and BSP, which will be the caller SP and BSP for the funclet.
// Note that we are making the assumption here that the SP and BSP don't change from this point
// forward until we actually make the call to the funclet. If it's not the case then we will need
this->m_EHClauseInfo.SetManagedCodeEntered(FALSE);
- END_SO_TOLERANT_CODE;
-
// The first parameter specifies whether we want to make callbacks before (true) or after (false)
// calling the handler.
MakeCallbacksRelatedToHandler(false, pThread, pMD, pEHClause, uHandlerStartPC, sf);
//
// Thus, if we see that we are here for SO in the 2nd pass, then
// we shouldn't attempt to create a throwable.
- if ((!fIsFirstPass) && (IsSOExceptionCode(pExceptionRecord->ExceptionCode)))
+ if ((!fIsFirstPass) && (pExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW))
{
fCreateThrowableForCurrentPass = false;
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
GC_NOTRIGGER;
NOTHROW;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
Thread* pThread = GetThread();
CONTEXT *pNewContext = NULL;
- VALIDATE_BACKOUT_STACK_CONSUMPTION;
-
if (FirstCallToHandler(pDispatcherContext, &pNewContext))
{
//
{
CONTEXT* pNewContext = NULL;
- VALIDATE_BACKOUT_STACK_CONSUMPTION;
-
- // Our backout validation should ensure that we don't SO here.
- BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
-
if (FirstCallToHandler(pDispatcherContext, &pNewContext))
{
//
FixupDispatcherContext(pDispatcherContext, pNewContext, pContextRecord);
- END_CONTRACT_VIOLATION;
-
// Returning ExceptionCollidedUnwind will cause the OS to take our new context record
// and dispatcher context and restart the exception dispatching on this call frame,
// which is exactly the behavior we want in order to restore our thread's unwindability
return ExceptionContinueSearch;
}
- bool fIsSO =
- IsSOExceptionCode(pExceptionRecord->ExceptionCode);
-
- VALIDATE_BACKOUT_STACK_CONSUMPTION;
+ bool fIsSO = pExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW;
if (IS_UNWINDING(pExceptionRecord->ExceptionFlags))
{
pThread->DisablePreemptiveGC();
}
}
- // The VALIDATE_BACKOUT_STACK_CONSUMPTION makes sure that this function does not use stack more than backout limit.
- CONTRACT_VIOLATION(SOToleranceViolation);
CleanUpForSecondPass(pThread, fIsSO, (void*)MemoryStackFp, (void*)MemoryStackFp);
}
Thread* pThread = GetThread();
_ASSERTE(pThread);
- if (IsSOExceptionCode(pExceptionRecord->ExceptionCode))
+ if (pExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW)
{
if (IS_UNWINDING(pExceptionRecord->ExceptionFlags))
{
pContextRecord,
pDispatcherContext);
- // Our backout validation should ensure that we don't SO here. Add a
- // backout validation here.
- BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
-
if (retVal == ExceptionContinueSearch)
{
GCX_PREEMP_NO_DTOR();
}
- END_CONTRACT_VIOLATION;
-
return retVal;
}
GC_NOTRIGGER;
NOTHROW;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
pContextRecord,
pDispatcherContext->ContextRecord);
- VALIDATE_BACKOUT_STACK_CONSUMPTION;
-
CONTEXT *pRedirectedContext = GetCONTEXTFromRedirectedStubStackFrame(pDispatcherContext);
FixupDispatcherContext(pDispatcherContext, pRedirectedContext, pContextRecord);
class Exception;
VOID DECLSPEC_NORETURN RealCOMPlusThrowOM();
-VOID DECLSPEC_NORETURN RealCOMPlusThrowSO();
#include <excepcpu.h>
-#include "stackprobe.h"
-
-
//==========================================================================
// Macros to allow catching exceptions from within the EE. These are lightweight
#define INSTALL_UNWIND_AND_CONTINUE_HANDLER \
INSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE \
/* The purpose of the INSTALL_UNWIND_AND_CONTINUE_HANDLER is to translate an exception to a managed */ \
- /* exception before it hits managed code. The transition to SO_INTOLERANT code does not logically belong here. */ \
- /* However, we don't want to miss any probe points and the intersection between a probe point and installing */ \
- /* an INSTALL_UNWIND_AND_CONTINUE_HANDLER is very high. The probes are very cheap, so we can tolerate */ \
- /* those few places where we are probing and don't need to. */ \
- /* Ideally, we would instead have an encompassing ENTER_SO_INTOLERANT_CODE macro that would */ \
- /* include INSTALL_UNWIND_AND_CONTINUE_HANDLER */ \
- BEGIN_SO_INTOLERANT_CODE(GET_THREAD());
+ /* exception before it hits managed code. */
// Optimized version for helper method frame. Avoids redundant GetThread() calls.
#define INSTALL_UNWIND_AND_CONTINUE_HANDLER_FOR_HMF(pHelperFrame) \
SCAN_EHMARKER(); \
if (true) PAL_CPP_TRY { \
SCAN_EHMARKER_TRY(); \
- DEBUG_ASSURE_NO_RETURN_BEGIN(IUACH); \
- BEGIN_SO_INTOLERANT_CODE(GET_THREAD());
+ DEBUG_ASSURE_NO_RETURN_BEGIN(IUACH);
#define UNINSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE \
DEBUG_ASSURE_NO_RETURN_END(IUACH) \
} \
#define UNINSTALL_UNWIND_AND_CONTINUE_HANDLER \
- END_SO_INTOLERANT_CODE; \
- UNINSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE; \
+ UNINSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE;
#endif // DACCESS_COMPILE || CROSSGEN_COMPILE
#define COMPlusThrowHR if(THROWLOG() && 0) { } else RealCOMPlusThrowHR
#define COMPlusThrowWin32 if(THROWLOG() && 0) { } else RealCOMPlusThrowWin32
#define COMPlusThrowOM if(THROWLOG() && 0) { } else RealCOMPlusThrowOM
-#ifdef FEATURE_STACK_PROBE
-#define COMPlusThrowSO if(THROWLOG() && 0) { } else RealCOMPlusThrowSO
-#endif
#define COMPlusThrowArithmetic if(THROWLOG() && 0) { } else RealCOMPlusThrowArithmetic
#define COMPlusThrowArgumentNull if(THROWLOG() && 0) { } else RealCOMPlusThrowArgumentNull
#define COMPlusThrowArgumentOutOfRange if(THROWLOG() && 0) { } else RealCOMPlusThrowArgumentOutOfRange
#endif
#define COMPlusThrowWin32 RealCOMPlusThrowWin32
#define COMPlusThrowOM RealCOMPlusThrowOM
-#ifdef FEATURE_STACK_PROBE
-#define COMPlusThrowSO RealCOMPlusThrowSO
-#endif
#define COMPlusThrowArithmetic RealCOMPlusThrowArithmetic
#define COMPlusThrowArgumentNull RealCOMPlusThrowArgumentNull
#define COMPlusThrowArgumentOutOfRange RealCOMPlusThrowArgumentOutOfRange
{ \
MAKE_CURRENT_THREAD_AVAILABLE(); \
BEGIN_GCX_ASSERT_PREEMP; \
- BEGIN_SO_INTOLERANT_CODE(CURRENT_THREAD); \
CoopTransitionHolder __CoopTransition(CURRENT_THREAD); \
DEBUG_ASSURE_NO_RETURN_BEGIN(COOP_TRANSITION)
#define COOPERATIVE_TRANSITION_END() \
DEBUG_ASSURE_NO_RETURN_END(COOP_TRANSITION) \
__CoopTransition.SuppressRelease(); \
- END_SO_INTOLERANT_CODE; \
END_GCX_ASSERT_PREEMP; \
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
GC_NOTRIGGER;
if (GetThread() != NULL) MODE_COOPERATIVE; else MODE_ANY;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END;
GC_NOTRIGGER;
MODE_ANY;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW; // This function does not throw.
GC_NOTRIGGER;
if (GetThread() != NULL) MODE_COOPERATIVE; else MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FORBID_FAULT;
- STATIC_CONTRACT_SO_TOLERANT;
_ASSERTE(pCER != NULL);
m_ExceptionCode = pCER->ExceptionCode;
MODE_COOPERATIVE;
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
if ((throwable == NULL) || CLRException::IsPreallocatedExceptionObject(throwable)) NOTHROW; else THROWS; // From CreateHandle
GC_NOTRIGGER;
if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
}
else
{
- BEGIN_SO_INTOLERANT_CODE(GetThread());
- {
- AppDomain* pDomain = GetMyThread()->GetDomain();
- PREFIX_ASSUME(pDomain != NULL);
- hNewThrowable = pDomain->CreateHandle(throwable);
- }
- END_SO_INTOLERANT_CODE;
+ AppDomain* pDomain = GetMyThread()->GetDomain();
+ PREFIX_ASSUME(pDomain != NULL);
+ hNewThrowable = pDomain->CreateHandle(throwable);
}
#ifdef WIN64EXCEPTIONS
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pMD));
PRECONDITION(CheckPointer(pData, NULL_OK));
PRECONDITION(pMD->GetMethodTable() != NULL);
// that we won't trigger without having setup a frame.
// STATIC_CONTRACT_TRIGGER
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT; // function probes before it does any work
// side effect the compiler can't remove
if (FC_NO_TAILCALL != 1)
// that we won't trigger without having setup a frame.
// STATIC_CONTRACT_TRIGGER
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT; // function probes before it does any work
// side effect the compiler can't remove
if (FC_NO_TAILCALL != 1)
// This isn't strictly true... But the guarentee that we make here is
// that we won't trigger without having setup a frame.
UNCHECKED(GC_NOTRIGGER);
- SO_TOLERANT; // function probes before it does any work
} CONTRACTL_END;
FC_CAN_TRIGGER_GC();
#include "gms.h"
#include "runtimeexceptionkind.h"
#include "debugreturn.h"
-#include "stackprobe.h"
//==============================================================================================
// These macros defeat compiler optimizations that might mix nonvolatile
#define HELPER_METHOD_FRAME_BEGIN_EX_NOTHROW(ret, helperFrame, gcpoll, allowGC, probeFailExpr) \
HELPER_METHOD_FRAME_BEGIN_EX_BODY(ret, helperFrame, gcpoll, allowGC) \
/* <TODO>TODO TURN THIS ON!!! </TODO> */ \
- /* gcpoll; */ \
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GET_THREAD(), probeFailExpr);
+ /* gcpoll; */
// The while(__helperframe.RestoreState() needs a bit of explanation.
HELPER_METHOD_FRAME_END_EX_BODY(gcpoll,allowGC);
#define HELPER_METHOD_FRAME_END_EX_NOTHROW(gcpoll,allowGC) \
- END_SO_INTOLERANT_CODE; \
HELPER_METHOD_FRAME_END_EX_BODY(gcpoll,allowGC);
#define HELPER_METHOD_FRAME_BEGIN_ATTRIB(attribs) \
{ \
Thread *_pThread = GetThread(); \
Thread::ObjectRefFlush(_pThread); \
- /*_ASSERTE (_pThread->IsSOTolerant() ||*/ \
- /* _pThread->HasThreadStateNC(Thread::TSNC_DisableSOCheckInHCALL)); */ \
} \
FCallCheck __fCallCheck(__FILE__, __LINE__); \
FCALL_TRANSITION_BEGIN(); \
#define FCALL_CHECK \
THROWS; \
DISABLED(GC_TRIGGERS); /* FCALLS with HELPER frames have issues with GC_TRIGGERS */ \
- MODE_COOPERATIVE; \
- SO_TOLERANT
+ MODE_COOPERATIVE;
//
// FCALL_CONTRACT should be the following shortcut:
// Since there is very little value in having runtime contracts in FCalls, FCALL_CONTRACT is defined as static contract only for performance reasons.
//
#define FCALL_CONTRACT \
- STATIC_CONTRACT_SO_TOLERANT; \
STATIC_CONTRACT_THROWS; \
/* FCALLS are a special case contract wise, they are "NOTRIGGER, unless you setup a frame" */ \
STATIC_CONTRACT_GC_NOTRIGGER; \
GC_NOTRIGGER;
MODE_ANY;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END
);
// == FailIfNotLoaded, can also assert that the thing is restored
- TypeHandle th = NULL;
-
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return NULL);
- {
- th = sig.GetLastTypeHandleThrowing(ClassLoader::DontLoadTypes, level, dropGenericArgumentLevel);
- }
- END_SO_INTOLERANT_CODE;
-
- return th;
+ return sig.GetLastTypeHandleThrowing(ClassLoader::DontLoadTypes, level, dropGenericArgumentLevel);
}
#else //simplified version
TypeHandle FieldDesc::LookupFieldTypeHandle(ClassLoadLevel level, BOOL dropGenericArgumentLevel)
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY; // Needed by profiler and server GC
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
GC_NOTRIGGER;
MODE_ANY;
FORBID_FAULT;
- SO_TOLERANT;
PRECONDITION(IsStatic());
PRECONDITION(GetEnclosingMethodTable()->IsRestored_NoLogging());
}
PTR_VOID retVal = NULL;
- // BEGIN_SO_INTOLERANT_CODE will throw if we don't have enough stack
- // and GetStaticAddressHandle has no failure semantics, so we need
- // to just do the SO policy (e.g. rip the appdomain or process).
- CONTRACT_VIOLATION(ThrowsViolation)
-
#ifdef DACCESS_COMPILE
DacNotImpl();
#else
- BEGIN_SO_INTOLERANT_CODE(GetThread());
{
GCX_COOP();
// This routine doesn't have a failure semantic - but Resolve*Field(...) does.
CONTRACT_VIOLATION(ThrowsViolation|FaultViolation|GCViolation); //B#25680 (Fix Enc violations)
retVal = (void *)(pModule->ResolveOrAllocateField(NULL, pFD));
}
- END_SO_INTOLERANT_CODE;
#endif // !DACCESS_COMPILE
return retVal;
}
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
PRECONDITION(!IsEnCNew());
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(!th.IsNull());
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
}
CONTRACT_END;
{
THROWS;
GC_TRIGGERS;
- SO_INTOLERANT;
INJECT_FAULT(ThrowOutOfMemory(););
}
CONTRACTL_END
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
} CONTRACTL_END;
MethodDesc * pMD = m_pMD;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
if (m_Attribs & FRAME_ATTR_NO_THREAD_ABORT) NOTHROW; else THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_TOLERANT;
} CONTRACTL_END;
//
if (m_Attribs & FRAME_ATTR_NO_THREAD_ABORT) NOTHROW; else THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_TOLERANT;
} CONTRACTL_END;
Thread * pThread = m_pThread;
if (m_Attribs & FRAME_ATTR_NO_THREAD_ABORT) NOTHROW; else THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_TOLERANT;
} CONTRACTL_END;
if (!(m_Attribs & FRAME_ATTR_NO_THREAD_ABORT))
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_TOLERANT;
} CONTRACTL_END;
m_pThread->HandleThreadAbort();
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
if ((hostCallPreference == AllowHostCalls) && !m_MachState.isValid()) { HOST_CALLS; } else { HOST_NOCALLS; }
SUPPORTS_DAC;
} CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE; // Frame MethodDesc should be always updated in cooperative mode to avoid racing with GC stackwalk
- SO_TOLERANT;
}
CONTRACTL_END;
BOOL OnGcCoverageInterrupt(PCONTEXT regs)
{
- SO_NOT_MAINLINE_FUNCTION;
-
// So that you can set counted breakpoint easily;
GCcoverCount++;
forceStack[0]= ®s; // This is so I can see it fastchecked
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_COOPERATIVE;
}
CONTRACTL_END;
GC_NOTRIGGER;
MODE_ANY;
CAN_TAKE_LOCK;
- SO_TOLERANT;
}
CONTRACTL_END;
GC_NOTRIGGER;
MODE_ANY;
CAN_TAKE_LOCK;
- SO_TOLERANT;
}
CONTRACTL_END;
Object *retVal = NULL;
CheckObjectSize(size);
- // We don't want to throw an SO during the GC, so make sure we have plenty
- // of stack before calling in.
- INTERIOR_STACK_PROBE_FOR(GetThread(), static_cast<unsigned>(DEFAULT_ENTRY_PROBE_AMOUNT * 1.5));
if (GCHeapUtilities::UseThreadAllocationContexts())
{
gc_alloc_context *threadContext = GetThreadAllocContext();
ThrowOutOfMemory();
}
- END_INTERIOR_STACK_PROBE;
return retVal;
}
Object *retVal = NULL;
CheckObjectSize(size);
- // We don't want to throw an SO during the GC, so make sure we have plenty
- // of stack before calling in.
- INTERIOR_STACK_PROBE_FOR(GetThread(), static_cast<unsigned>(DEFAULT_ENTRY_PROBE_AMOUNT * 1.5));
if (GCHeapUtilities::UseThreadAllocationContexts())
{
gc_alloc_context *threadContext = GetThreadAllocContext();
ThrowOutOfMemory();
}
- END_INTERIOR_STACK_PROBE;
return retVal;
}
#endif // FEATURE_64BIT_ALIGNMENT
Object *retVal = NULL;
CheckObjectSize(size);
- // We don't want to throw an SO during the GC, so make sure we have plenty
- // of stack before calling in.
- INTERIOR_STACK_PROBE_FOR(GetThread(), static_cast<unsigned>(DEFAULT_ENTRY_PROBE_AMOUNT * 1.5));
retVal = GCHeapUtilities::GetGCHeap()->AllocLHeap(size, flags);
if (!retVal)
ThrowOutOfMemory();
}
- END_INTERIOR_STACK_PROBE;
return retVal;
}
}
else
{
- // Since we're about to *really* recurse, probe for stack.
- // @todo: is the default amount really correct?
- _ASSERTE(GetThread());
- INTERIOR_STACK_PROBE(GetThread());
-
TypeHandle subArrayType = pArrayMT->GetApproxArrayElementTypeHandle();
for (UINT32 i = 0; i < cElements; i++)
{
iholder.Release();
- END_INTERIOR_STACK_PROBE
-
orArray = (ArrayBase *) OBJECTREFToObject(outerArray);
}
} // GcStressPolicy::~InhibitHolder()
MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
} CONTRACTL_END;
-#ifdef _DEBUG
- // fastPrimitiveArrayAllocator is called by VM and managed code. If called from managed code, we
- // make sure that the thread is in SOTolerantState.
-#ifdef FEATURE_STACK_PROBE
- Thread::DisableSOCheckInHCALL disableSOCheckInHCALL;
-#endif // FEATURE_STACK_PROBE
-#endif // _DEBUG
return OBJECTREF( HCCALL2(fastPrimitiveArrayAllocator, type, cElements) );
}
// typehandle for every object in the heap.
TypeHandle ArrayType = ClassLoader::LoadArrayTypeThrowing(ElementType);
-#ifdef _DEBUG
- // fastObjectArrayAllocator is called by VM and managed code. If called from managed code, we
- // make sure that the thread is in SOTolerantState.
-#ifdef FEATURE_STACK_PROBE
- Thread::DisableSOCheckInHCALL disableSOCheckInHCALL;
-#endif // FEATURE_STACK_PROBE
-#endif // _DEBUG
return OBJECTREF( HCCALL2(fastObjectArrayAllocator, ArrayType.AsArray()->GetTemplateMethodTable(), cElements));
}
MODE_COOPERATIVE; // returns an objref without pinning it => cooperative
} CONTRACTL_END;
-#ifdef _DEBUG
- // fastStringAllocator is called by VM and managed code. If called from managed code, we
- // make sure that the thread is in SOTolerantState.
-#ifdef FEATURE_STACK_PROBE
- Thread::DisableSOCheckInHCALL disableSOCheckInHCALL;
-#endif // FEATURE_STACK_PROBE
-#endif // _DEBUG
return STRINGREF(HCCALL1(fastStringAllocator, cchStringLength));
}
STATIC_CONTRACT_MODE_COOPERATIVE;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
// if the dst is outside of the heap (unboxed value classes) then we
// simply exit
STATIC_CONTRACT_MODE_COOPERATIVE;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
*dst = ref;
#include "eeconfig.h"
#include "generics.h"
#include "genericdict.h"
-#include "stackprobe.h"
#include "typestring.h"
#include "typekey.h"
#include "dumpcommon.h"
ThrowHR(COR_E_OVERFLOW);
TypeHandle ret = TypeHandle();
- DECLARE_INTERIOR_STACK_PROBE;
-#ifndef DACCESS_COMPILE
- if ((dwAllocSize/GetOsPageSize()+1) >= 2)
- {
- DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD((10+dwAllocSize/GetOsPageSize()+1), NO_FORBIDGC_LOADER_USE_ThrowSO(););
- }
-#endif // DACCESS_COMPILE
TypeHandle *repInst = (TypeHandle*) _alloca(dwAllocSize);
for (DWORD i = 0; i < ntypars; i++)
TypeKey canonKey(pTypeKey->GetModule(), pTypeKey->GetTypeToken(), Instantiation(repInst, ntypars));
ret = ClassLoader::LoadConstructedTypeThrowing(&canonKey, fLoadTypes, level);
- END_INTERIOR_STACK_PROBE;
RETURN(ret);
}
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
PRECONDITION(CheckPointer(pRepMethod));
SUPPORTS_DAC;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
PRECONDITION(CheckPointer(pRepMethod));
SUPPORTS_DAC;
{
DISABLED(THROWS); // This is not a bug, we cannot decide, since the function ptr called may be either.
DISABLED(GC_NOTRIGGER); // This is not a bug, we cannot decide, since the function ptr called may be either.
- SO_TOLERANT;
}
CONTRACTL_END;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_HOST_CALLS;
METHOD_CANNOT_BE_FOLDED_DEBUG;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
// Note: this can be called a little early for real contracts, so we use static contracts instead.
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
- {
- return GetProcessHeap();
- }
+ return GetProcessHeap();
}
#define GetProcessHeap() Dont_Use_GetProcessHeap()
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
LPVOID EEHeapAlloc(HANDLE hHeap, DWORD dwFlags, SIZE_T dwBytes)
{
STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_SO_INTOLERANT;
#ifdef FAILPOINTS_ENABLED
if (RFS_HashStack ())
LPVOID EEHeapAllocInProcessHeap(DWORD dwFlags, SIZE_T dwBytes)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
#ifdef _DEBUG
// Check whether (indispensable) implicit casting in ClrAllocInProcessHeapBootstrap is safe.
static HANDLE ProcessHeap = NULL;
- // We need to guarentee a very small stack consumption in allocating. And we can't allow
- // an SO to happen while calling into the host. This will force a hard SO which is OK because
- // we shouldn't ever get this close inside the EE in SO-intolerant code, so this should
- // only fail if we call directly in from outside the EE, such as the JIT.
- MINIMAL_STACK_PROBE_CHECK_THREAD(GetThread());
-
if (ProcessHeap == NULL)
ProcessHeap = EEGetProcessHeap();
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
-
- // @todo - Need a backout validation here.
- CONTRACT_VIOLATION(SOToleranceViolation);
-
BOOL retVal = FALSE;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
static FastFreeInProcessHeapFunc pFunc = EEHeapFreeInProcessHeap;
#endif
- // Take a look at comment in EEHeapFree and EEHeapAllocInProcessHeap, obviously someone
- // needs to take a little time to think more about this code.
- //CONTRACT_VIOLATION(SOToleranceViolation);
-
static HANDLE ProcessHeap = NULL;
if (ProcessHeap == NULL)
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(dwSleepMSec < 10000 || GetThread() == NULL || !GetThread()->PreemptiveGCDisabled());
}
CONTRACTL_END;
{
NOTHROW;
WRAPPER(GC_NOTRIGGER);
- SO_TOLERANT;
}
CONTRACTL_END;
- VALIDATE_BACKOUT_STACK_CONSUMPTION;
-
Crst *pCrst = CookieToCrst(cookie);
_ASSERTE(pCrst);
{
WRAPPER(THROWS);
WRAPPER(GC_TRIGGERS);
- SO_INTOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
}
CONTRACTL_END;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_MODE_ANY;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
- STATIC_CONTRACT_SO_TOLERANT;
//
// @todo: we don't want TlsGetValue to throw, but CheckThreadState throws right now. Either modify
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_MODE_ANY;
- STATIC_CONTRACT_SO_TOLERANT;
//
// @todo: we don't want TlsGetValue to throw, but CheckThreadState throws right now. Either modify
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_MODE_ANY;
- STATIC_CONTRACT_SO_TOLERANT;
void **pTlsData = CExecutionEngine::CheckThreadState(slot);
_ASSERTE(tempVal.dwCPUType);
#ifdef _DEBUG
- {
- SO_NOT_MAINLINE_REGION();
-
/* Set Family+Model+Stepping string (eg., x690 for Banias, or xF30 for P4 Prescott)
* instead of Family only
*/
assert((configCpuFamily & 0xFFF) == configCpuFamily);
tempVal.dwCPUType = (tempVal.dwCPUType & 0xFFFF0000) | configCpuFamily;
}
- }
#endif
tempVal.dwFeatures = GetSpecificCpuFeaturesAsm(&tempVal.dwExtendedFeatures); // written in ASM & doesn't participate in contracts
#ifdef _DEBUG
- {
- SO_NOT_MAINLINE_REGION();
-
/* Set the 32-bit feature mask
*/
{
tempVal.dwFeatures = configCpuFeatures;
}
- }
#endif
val = *cpuInfo = tempVal;
GC_NOTRIGGER;
PRECONDITION(CORDebuggerAttached());
PRECONDITION(CheckPointer(pAddr));
- SO_TOLERANT;
} CONTRACT_END;
// Ordering is because x86 is little-endien.
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
PRECONDITION(pCode != NULL);
PRECONDITION(pCode != GetPreStubEntryPoint());
} CONTRACTL_END;
LOG((LF_EH, LL_INFO1000, "COMPlusAfterUnwind: going to: pFunc:%#X, pStack:%#X\n",
tct.pFunc, tct.pStack));
- // TODO: UnwindFrames ends up calling into StackWalkFrames which is SO_INTOLERANT
- // as is UnwindFrames, etc... Should we make COMPlusAfterUnwind SO_INTOLERANT???
- ANNOTATION_VIOLATION(SOToleranceViolation);
-
UnwindFrames(pThread, &tct);
#ifdef DEBUGGING_SUPPORTED
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
- STATIC_CONTRACT_SO_TOLERANT;
#ifdef _DEBUG
static int breakOnFirstPass = -1;
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
{
- BEGIN_SO_INTOLERANT_CODE(GetThread());
// Setup the state in current exception tracker indicating the corruption severity
// of the active exception.
CEHelper::SetupCorruptionSeverityForActiveException(bRethrownException, bNestedException,
// Failfast if exception indicates corrupted process state
if (pExInfo->GetCorruptionSeverity() == ProcessCorrupting)
EEPOLICY_HANDLE_FATAL_ERROR(exceptionCode);
-
- END_SO_INTOLERANT_CODE;
}
#endif // FEATURE_CORRUPTING_EXCEPTIONS
if (bRethrownException || bNestedException)
{
_ASSERTE(pExInfo->m_pPrevNestedInfo != NULL);
-
- BEGIN_SO_INTOLERANT_CODE(GetThread());
SetStateForWatsonBucketing(bRethrownException, pExInfo->GetPreviousExceptionTracker()->GetThrowableAsHandle());
- END_SO_INTOLERANT_CODE;
}
#ifdef DEBUGGING_SUPPORTED
Thread *pThread = GetThread();
if ((pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND)) == 0)
{
- if (IsSOExceptionCode(pExceptionRecord->ExceptionCode))
+ if (pExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW)
{
EEPolicy::HandleStackOverflow(SOD_ManagedFrameHandler, (void*)pEstablisherFrame);
return ExceptionContinueSearch;
}
- else
- {
-#ifdef FEATURE_STACK_PROBE
- if (GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeUnloadAppDomain)
- {
- RetailStackProbe(static_cast<unsigned int>(ADJUST_PROBE(BACKOUT_CODE_STACK_LIMIT)), pThread);
- }
-#endif
- }
}
else
{
exceptionCode = GetCurrentExceptionCode();
}
- if (IsSOExceptionCode(exceptionCode))
+ if (exceptionCode == STATUS_STACK_OVERFLOW)
{
// We saved the context during the first pass in case the stack overflow exception is
// unhandled and Watson dump code needs it. Now we are in the second pass, therefore
}
}
- // <TODO> . We need to probe here, but can't introduce destructors etc. </TODO>
- BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
-
if (pExceptionRecord->ExceptionFlags & (EXCEPTION_UNWINDING | EXCEPTION_EXIT_UNWIND))
{
retVal = CPFH_UnwindHandler(pExceptionRecord,
}
- END_CONTRACT_VIOLATION;
-
return retVal;
} // COMPlusFrameHandler()
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
- STATIC_CONTRACT_SO_INTOLERANT;
LOG((LF_EH, LL_INFO1000, "COMPlusPEndCatch:called with "
"pThread:0x%x\n",pThread));
void* esp = NULL;
- // @todo . We need to probe in the EH code, but can't introduce destructors etc.
- BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
-
// Notify the profiler that the catcher has finished running
// IL stubs don't contain catch blocks so inability to perform this check does not matter.
// if (!pFunc->IsILStub())
pThread->SyncManagedExceptionState(fIsDebuggerHelperThread);
LOG((LF_EH, LL_INFO1000, "COMPlusPEndCatch: esp=%p\n", esp));
-
- END_CONTRACT_VIOLATION;
return esp;
}
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
- STATIC_CONTRACT_SO_INTOLERANT;
ETW::ExceptionLog::ExceptionCatchEnd();
ETW::ExceptionLog::ExceptionThrownEnd();
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_COOPERATIVE;
- STATIC_CONTRACT_SO_TOLERANT;
_ASSERTE(pExInfo && context);
// No CONTRACT here, because we can't run the risk of it pushing any SEH into the current method.
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
PEXCEPTION_REGISTRATION_RECORD pEHR = GetCurrentSEHRecord();
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
- STATIC_CONTRACT_SO_INTOLERANT;
int retVal = EXCEPTION_CONTINUE_SEARCH;
EXCEPTION_DISPOSITION retval = ExceptionContinueSearch;
- BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
-
// We must forward to the COMPlusFrameHandler. This will unwind the Frame Chain up to here, and also leave the
// preemptive GC mode set correctly.
retval = EXCEPTION_HANDLER_FWD(COMPlusFrameHandler);
pFrame->Pop(pThread);
}
- END_CONTRACT_VIOLATION;
-
return retval;
}
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
} CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
} CONTRACTL_END;
#ifdef _DEBUG
extern "C" void STDCALL WriteBarrierAssert(BYTE* ptr, Object* obj)
{
- STATIC_CONTRACT_SO_TOLERANT;
WRAPPER_NO_CONTRACT;
static BOOL fVerifyHeap = -1;
__declspec(naked) void F_CALL_CONV JIT_Stelem_Ref(PtrArray* array, unsigned idx, Object* val)
{
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
extern "C" __declspec(naked) Object* F_CALL_CONV JIT_IsInstanceOfClass(MethodTable *pMT, Object *pObject)
{
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
extern "C" __declspec(naked) Object* F_CALL_CONV JIT_ChkCastClass(MethodTable *pMT, Object *pObject)
{
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
extern "C" __declspec(naked) Object* F_CALL_CONV JIT_ChkCastClassSpecial(MethodTable *pMT, Object *pObject)
{
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
} CONTRACTL_END;
pThread->UnhijackThread();
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_INTOLERANT;
} CONTRACTL_END;
return OBJECTREFToObject(AllocateArrayEx(pArrayMT,
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_INTOLERANT;
} CONTRACTL_END;
return OBJECTREFToObject( AllocatePrimitiveArray(type, cElements, FALSE) );
void IBCLogger::LogAccessThreadSafeHelper(const void * p, pfnIBCAccessCallback callback)
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
CONTRACT_VIOLATION( HostViolation );
/* For the Global Class we may see p == NULL */
THROWS;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
ThreadLocalIBCInfo::ThreadLocalIBCInfo()
{
LIMITED_METHOD_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
m_fCallbackFailed = false;
m_fProcessingDelayedList = false;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
THROWS;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
THROWS;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
GC_NOTRIGGER;
MODE_ANY;
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
PRECONDITION(g_IBCLogger.InstrEnabled());
}
CONTRACTL_END;
void IBCLogger::LogMethodDescAccessHelper(const MethodDesc *pMD)
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
LogMethodAccessHelper(pMD, ReadMethodDesc);
}
void IBCLogger::LogMethodDescWriteAccessHelper(MethodDesc *pMD)
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
LogMethodAccessHelper(pMD, ReadMethodDesc);
LogMethodAccessHelper(pMD, WriteMethodDesc);
void IBCLogger::LogMethodPrecodeAccessHelper(MethodDesc *pMD)
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
LogMethodAccessHelper(pMD, ReadMethodPrecode);
}
void IBCLogger::LogMethodPrecodeWriteAccessHelper(MethodDesc *pMD)
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
LogMethodAccessHelper(pMD, ReadMethodPrecode);
LogMethodAccessHelper(pMD, WriteMethodPrecode);
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
PRECONDITION(g_IBCLogger.InstrEnabled());
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
PRECONDITION(g_IBCLogger.InstrEnabled());
}
CONTRACTL_END;
void IBCLogger::LogMethodGCInfoAccessHelper(MethodDesc *pMD)
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
_ASSERTE(InstrEnabled());
void IBCLogger::LogMethodTableAccessHelper(MethodTable const * pMT)
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
LogTypeAccessHelper(pMT, ReadMethodTable);
}
void IBCLogger::LogTypeMethodTableAccessHelper(const TypeHandle *th)
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
LogTypeAccessHelper(*th, ReadMethodTable);
}
void IBCLogger::LogTypeMethodTableWriteableAccessHelper(const TypeHandle *th)
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
LogTypeAccessHelper(*th, ReadTypeDesc);
LogTypeAccessHelper(*th, WriteTypeDesc);
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
PRECONDITION(g_IBCLogger.InstrEnabled());
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
void IBCLogger::LogMethodTableWriteableDataAccessHelper(MethodTable const * pMT)
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
LogTypeAccessHelper(pMT, ReadMethodTable);
LogTypeAccessHelper(pMT, ReadMethodTableWriteableData);
void IBCLogger::LogMethodTableWriteableDataWriteAccessHelper(MethodTable *pMT)
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
LogTypeAccessHelper(pMT, ReadMethodTable);
LogTypeAccessHelper(pMT, WriteMethodTableWriteableData);
void IBCLogger::LogMethodTableNonVirtualSlotsAccessHelper(MethodTable const * pMT)
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
LogTypeAccessHelper(pMT, ReadMethodTable);
LogTypeAccessHelper(pMT, ReadNonVirtualSlots);
void IBCLogger::LogEEClassAndMethodTableAccessHelper(MethodTable * pMT)
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
if (pMT == NULL)
return;
void IBCLogger::LogEEClassCOWTableAccessHelper(MethodTable * pMT)
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
if (pMT == NULL)
return;
void IBCLogger::LogFieldDescsAccessHelper(FieldDesc * pFD)
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
MethodTable * pMT = pFD->GetApproxEnclosingMethodTable_NoLogging();
void IBCLogger::LogDispatchMapAccessHelper(MethodTable *pMT)
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
LogTypeAccessHelper(pMT, ReadMethodTable);
LogTypeAccessHelper(pMT, ReadDispatchMap);
void IBCLogger::LogDispatchTableAccessHelper(MethodTable *pMT)
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
LogTypeAccessHelper(pMT, ReadMethodTable);
LogTypeAccessHelper(pMT, ReadDispatchMap);
void IBCLogger::LogDispatchTableSlotAccessHelper(DispatchSlot *pDS)
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
if (pDS->IsNull())
return;
void IBCLogger::LogFieldMarshalersReadAccessHelper(MethodTable * pMT)
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
if (pMT == NULL)
return;
void IBCLogger::LogCCtorInfoReadAccessHelper(MethodTable *pMT)
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
LogTypeAccessHelper(pMT, ReadCCtorInfo);
}
void IBCLogger::LogTypeHashTableAccessHelper(const TypeHandle *th)
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
LogTypeAccessHelper(*th, ReadTypeHashTable);
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
PRECONDITION(g_IBCLogger.InstrEnabled());
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
PRECONDITION(g_IBCLogger.InstrEnabled());
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
PRECONDITION(g_IBCLogger.InstrEnabled());
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
PRECONDITION(g_IBCLogger.InstrEnabled());
}
CONTRACTL_END;
void IBCLogger::LogRidMapAccessHelper( RidMapLogData data )
{
WRAPPER_NO_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
data.First()->LogTokenAccess( data.Second(), RidMap );
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
PRECONDITION(g_IBCLogger.InstrEnabled());
}
CONTRACTL_END;
{
NOTHROW;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
#ifndef CROSSGEN_COMPILE
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return;)
-
#ifdef MDA_SUPPORTED
// Report the exception that was thrown.
if (pProbe)
LogInterop(W("An exception occurred during release"));
LogInteropLeak(pUnk);
#endif // FEATURE_COMINTEROP
-
- END_SO_INTOLERANT_CODE;
#endif // CROSSGEN_COMPILE
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pUnk, NULL_OK));
} CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pUnk, NULL_OK));
} CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pInternalImport));
}
CONTRACTL_END;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_ANY;
- STATIC_CONTRACT_SO_TOLERANT;
_ASSERTE(pUnk);
_ASSERTE(pResUnk);
GCX_PREEMP_NO_DTOR_HAVE_THREAD(pThread);
BEGIN_CONTRACT_VIOLATION(ThrowsViolation); // message pump could happen, so arbitrary managed code could run
- BEGIN_SO_TOLERANT_CODE(pThread);
struct Param { HRESULT * const hr; IUnknown** const pUnk; REFIID riid; IUnknown*** const pResUnk; } param = { &hr, &pUnk, riid, &pResUnk };
#define PAL_TRY_ARG(argName) (*(pParam->argName))
#undef PAL_TRY_ARG
#undef PAL_TRY_REFARG
- END_SO_TOLERANT_CODE;
END_CONTRACT_VIOLATION;
LOG((LF_INTEROP, LL_EVERYTHING, hr == S_OK ? "QI Succeeded\n" : "QI Failed\n"));
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_PREEMPTIVE;
- STATIC_CONTRACT_SO_TOLERANT;
_ASSERTE(pUnk);
_ASSERTE(pResUnk);
HRESULT hr = E_FAIL;
BEGIN_CONTRACT_VIOLATION(ThrowsViolation); // message pump could happen, so arbitrary managed code could run
- BEGIN_SO_TOLERANT_CODE(pThread);
struct Param { HRESULT * const hr; IUnknown** const pUnk; REFIID riid; IUnknown*** const pResUnk; } param = { &hr, &pUnk, riid, &pResUnk };
#define PAL_TRY_ARG(argName) (*(pParam->argName))
#undef PAL_TRY_ARG
#undef PAL_TRY_REFARG
- END_SO_TOLERANT_CODE;
END_CONTRACT_VIOLATION;
-
LOG((LF_INTEROP, LL_EVERYTHING, hr == S_OK ? "QI Succeeded\n" : "QI Failed\n"));
// Ensure if the QI returned ok that it actually set a pointer.
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(g_fEEStarted);
PRECONDITION(GetThread() != NULL); // Should always be inside BEGIN_EXTERNAL_ENTRYPOINT
}
GCX_COOP();
EX_TRY
{
- BEGIN_SO_INTOLERANT_CODE(GetThread());
-
EnsureComStarted(fCoInitCurrentThread);
-
- END_SO_INTOLERANT_CODE;
}
EX_CATCH_HRESULT(hr);
}
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_ANY;
- STATIC_CONTRACT_SO_INTOLERANT;
_ASSERTE(pDispEx != NULL);
PAL_TRY(Param *, pParam, ¶m)
{
- BEGIN_SO_TOLERANT_CODE(GetThread());
-
pParam->hr = pParam->pDispEx->InvokeEx(pParam->MemberID,
pParam->lcid,
pParam->flags,
pParam->pVarResult,
pParam->pExcepInfo,
pParam->pspCaller);
-
- END_SO_TOLERANT_CODE;
}
PAL_EXCEPT_FILTER(CallOutFilter)
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_ANY;
- STATIC_CONTRACT_SO_INTOLERANT;
_ASSERTE(pDisp != NULL);
PAL_TRY(Param *, pParam, ¶m)
{
- BEGIN_SO_TOLERANT_CODE(GetThread());
-
pParam->hr = pParam->pDisp->Invoke(pParam->MemberID,
pParam->riid,
pParam->lcid,
pParam->pVarResult,
pParam->pExcepInfo,
pParam->piArgErr);
-
- END_SO_TOLERANT_CODE;
}
PAL_EXCEPT_FILTER(CallOutFilter)
{
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(CheckPointer(pItf));
- SO_TOLERANT;
}
CONTRACTL_END;
GC_NOTRIGGER;
MODE_ANY;
PRECONDITION(CheckPointer(pItf, NULL_OK));
- SO_TOLERANT;
}
CONTRACTL_END;
GC_NOTRIGGER;
FORBID_FAULT;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pUnk));
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pUnk, NULL_OK));
}
CONTRACT_END;
bool Interpreter::MethodHandlesException(OBJECTREF orThrowable)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
bool Interpreter::SearchForCoveringFinally()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_ANY;
void Interpreter::LdArg(int argNum)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::LdArgA(int argNum)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
void Interpreter::StArg(int argNum)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::LdLocA(int locNum)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
void Interpreter::LdIcon(INT32 c)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
void Interpreter::LdR4con(INT32 c)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
void Interpreter::LdLcon(INT64 c)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
void Interpreter::LdR8con(INT64 c)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
void Interpreter::LdNull()
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
void Interpreter::BinaryArithOp()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::BinaryArithOvfOp()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::BinaryArithOvfOpWork(T val1, T val2)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::BinaryIntOp()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::ShiftOp()
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
void Interpreter::Neg()
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
void Interpreter::Not()
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
void Interpreter::Conv()
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
void Interpreter::ConvRUn()
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
void Interpreter::ConvOvf()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::ConvOvfUn()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::LdObj()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::LdObjValueClassWork(CORINFO_CLASS_HANDLE valueClsHnd, unsigned ind, void* src)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
CORINFO_CLASS_HANDLE Interpreter::GetTypeFromToken(BYTE* codePtr, CorInfoTokenKind tokKind InterpTracingArg(ResolveTokenKind rtk))
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::CpObj()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::StObj()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::InitObj()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::LdStr()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
{
#if INTERP_DYNAMIC_CONTRACTS
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
#else
// Dynamic contract occupies too much stack.
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
void Interpreter::NewArr()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::IsInst()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::CastClass()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::LocAlloc()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::MkRefany()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::RefanyType()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
CorInfoType Interpreter::GetTypeForPrimitiveValueClass(CORINFO_CLASS_HANDLE clsHnd)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::RefanyVal()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::CkFinite()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::LdToken()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::LdFtn()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::LdVirtFtn()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::Sizeof()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::CompareOp()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
INT32 Interpreter::CompareOpRes(unsigned op1idx)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::BrOnComparison()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::LdFld(FieldDesc* fldIn)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::LdFldA()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::StFld()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
bool Interpreter::StaticFldAddrWork(CORINFO_ACCESS_FLAGS accessFlgs, /*out (byref)*/void** pStaticFieldAddr, /*out*/InterpreterType* pit, /*out*/UINT* pFldSize, /*out*/bool* pManagedMem)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::LdSFld()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::LdSFldA()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::StSFld()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::LdElemWithType()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::StElemWithType()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::LdElem()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::StElem()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::InitBlk()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::CpBlk()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::Box()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::BoxStructRefAt(unsigned ind, CORINFO_CLASS_HANDLE valCls)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::Unbox()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::Throw()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::Rethrow()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::UnboxAny()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::LdLen()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
{
#if INTERP_DYNAMIC_CONTRACTS
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
#else
// Dynamic contract occupies too much stack.
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
{
#if INTERP_DYNAMIC_CONTRACTS
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
#else
// Dynamic contract occupies too much stack.
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
{
#if INTERP_DYNAMIC_CONTRACTS
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
} CONTRACTL_END;
#else
// Dynamic contract occupies too much stack.
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
bool Interpreter::IsDeadSimpleGetter(CEEInfo* info, MethodDesc* pMD, size_t* offsetOfLd)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_ANY;
void Interpreter::DoStringLength()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::DoStringGetChar()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::DoGetTypeFromHandle()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::DoByReferenceCtor()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::DoByReferenceValue()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::DoSIMDHwAccelerated()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::RecordConstrainedCall()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::ThrowDivideByZero()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::ThrowSysArithException()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::ThrowNullPointerException()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::ThrowOverflowException()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::ThrowArrayBoundsException()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::ThrowInvalidCastException()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::ThrowStackOverflow()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
{
#if 0
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_NOTRIGGER;
} CONTRACTL_END;
{
#if 0
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
} CONTRACTL_END;
MethodDesc* Interpreter::InterpretationStubToMethodInfo(PCODE addr)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
} CONTRACTL_END;
{
#if 0
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_NOTRIGGER;
} CONTRACTL_END;
{
#if 0
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
} CONTRACTL_END;
InterpreterMethodInfo* Interpreter::MethodHandleToInterpreterMethInfoPtr(CORINFO_METHOD_HANDLE md)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_TRIGGERS;
} CONTRACTL_END;
const char* eeGetMethodFullName(CEEInfo* info, CORINFO_METHOD_HANDLE hnd, const char** clsName)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_ANY;
inline void Interpreter::LdFromMemAddr(void* addr, InterpreterType tp)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_NOTRIGGER;
MODE_COOPERATIVE;
inline void Interpreter::LdLoc(int locNum)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::StLoc(int locNum)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
void Interpreter::StToLocalMemAddr(void* addr, InterpreterType tp)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
#include "ecall.h"
#include "generics.h"
#include "typestring.h"
-#include "stackprobe.h"
#include "typedesc.h"
#include "genericdict.h"
#include "array.h"
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pObject));
PRECONDITION(pObject->GetMethodTable()->IsArray());
PRECONDITION(toTypeHnd.IsArray());
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pObject));
PRECONDITION(pObject->GetMethodTable()->IsArray());
PRECONDITION(pInterfaceMT->IsInterface());
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pObject));
} CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_INTOLERANT;
} CONTRACTL_END;
STRINGREF result;
{
FCALL_CONTRACT;
- // This "violation" isn't a really a violation.
- // We are calling a assembly helper that can't have an SO Tolerance contract
- CONTRACT_VIOLATION(SOToleranceViolation);
/* Make no assumptions about the current machine state */
ResetCurrentContext();
MODE_ANY;
WRAPPER(GC_TRIGGERS);
WRAPPER(THROWS);
- SO_NOT_MAINLINE; // If process is coming down, SO probe is not going to do much good
} CONTRACTL_END;
LOG((LF_ALWAYS, LL_FATALERROR, "Unsafe buffer security check failure: Buffer overrun detected"));
HCIMPL0(void, JIT_DbgIsJustMyCode)
{
FCALL_CONTRACT;
- SO_NOT_MAINLINE_FUNCTION;
// We need to get both the ip of the managed function this probe is in
// (which will be our return address) and the frame pointer for that
{
CONTRACTL
{
- SO_TOLERANT;
NOTHROW;
GC_TRIGGERS;
} CONTRACTL_END;
// Can't have a regular contract because we would never pop it
// We only throw a stack overflow if needed, and we can't handle
// a GC because the incoming parameters are totally unprotected.
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_COOPERATIVE
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
#include "ecall.h"
#include "generics.h"
#include "typestring.h"
-#include "stackprobe.h"
#include "typedesc.h"
#include "genericdict.h"
#include "array.h"
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
PRECONDITION(context != NULL);
}
{
THROWS;
GC_TRIGGERS;
- SO_INTOLERANT;
}
CONTRACTL_END;
CORINFO_CLASS_HANDLE CEEInfo::getTokenTypeAsHandle (CORINFO_RESOLVED_TOKEN * pResolvedToken)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
size_t FQNameCapacity)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CorInfoCanSkipVerificationResult CEEInfo::canSkipMethodVerification(CORINFO_METHOD_HANDLE ftnHnd)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_MODULE_HANDLE moduleHnd)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
mdToken metaTOK)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
mdToken metaTOK)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CorInfoHelpFunc CEEInfo::getLazyStringLiteralHelper(CORINFO_MODULE_HANDLE handle)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
void CEEInfo::resolveToken(/* IN, OUT */ CORINFO_RESOLVED_TOKEN * pResolvedToken)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
} CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
} CONTRACTL_END;
bool CEEInfo::tryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken)
{
// No dynamic contract here because SEH is used
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_PREEMPTIVE;
)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
bool CEEInfo::isFieldStatic(CORINFO_FIELD_HANDLE fldHnd)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_SIG_INFO * sigRet)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_SIG_INFO * sigRet)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_CLASS_HANDLE clsHnd)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
CORINFO_CLASS_HANDLE clsHnd)
{
CONTRACTL{
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
BOOL CEEInfo::canAllocateOnStack(CORINFO_CLASS_HANDLE clsHnd)
{
CONTRACTL{
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
unsigned CEEInfo::getClassAlignmentRequirement(CORINFO_CLASS_HANDLE type, BOOL fDoubleAlignHint)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
CEEInfo::getFieldInClass(CORINFO_CLASS_HANDLE clsHnd, INT num)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
CEEInfo::getMethodDefFromMethod(CORINFO_METHOD_HANDLE hMethod)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
BOOL fOptional)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
unsigned CEEInfo::getClassGClayout (CORINFO_CLASS_HANDLE clsHnd, BYTE* gcPtrs)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
/*OUT*/ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
unsigned CEEInfo::getClassNumInstanceFields (CORINFO_CLASS_HANDLE clsHnd)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
CorInfoType CEEInfo::asCorInfoType (CORINFO_CLASS_HANDLE clsHnd)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_LOOKUP_KIND CEEInfo::getLocationOfThisType(CORINFO_METHOD_HANDLE context)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
DelegateCtorArgs *pCtorData)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
void CEEInfo::MethodCompileComplete(CORINFO_METHOD_HANDLE methHnd)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_GENERICHANDLE_RESULT *pResult)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
const char* CEEInfo::getClassName (CORINFO_CLASS_HANDLE clsHnd)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
const char* CEEInfo::getHelperName (CorInfoHelpFunc ftnNum)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
BOOL fAssembly)
{
CONTRACTL {
- SO_TOLERANT;
MODE_PREEMPTIVE;
THROWS;
GC_TRIGGERS;
CORINFO_MODULE_HANDLE CEEInfo::getClassModule(CORINFO_CLASS_HANDLE clsHnd)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
CORINFO_ASSEMBLY_HANDLE CEEInfo::getModuleAssembly(CORINFO_MODULE_HANDLE modHnd)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
const char* CEEInfo::getAssemblyName(CORINFO_ASSEMBLY_HANDLE asmHnd)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
void* CEEInfo::LongLifetimeMalloc(size_t sz)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
void CEEInfo::LongLifetimeFree(void* obj)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
size_t CEEInfo::getClassModuleIdForStatics(CORINFO_CLASS_HANDLE clsHnd, CORINFO_MODULE_HANDLE *pModuleHandle, void **ppIndirection)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
BOOL CEEInfo::isValueClass(CORINFO_CLASS_HANDLE clsHnd)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
CorInfoInlineTypeCheck CEEInfo::canInlineTypeCheck(CORINFO_CLASS_HANDLE clsHnd, CorInfoInlineTypeCheckSource source)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
BOOL CEEInfo::canInlineTypeCheckWithObjectVTable (CORINFO_CLASS_HANDLE clsHnd)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
DWORD CEEInfo::getClassAttribs (CORINFO_CLASS_HANDLE clsHnd)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
BOOL CEEInfo::isStructRequiringStackAllocRetBuf(CORINFO_CLASS_HANDLE clsHnd)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
BOOL speculative)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
void CEEInfo::classMustBeLoadedBeforeCodeIsRun (CORINFO_CLASS_HANDLE typeToLoadHnd)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
void CEEInfo::methodMustBeLoadedBeforeCodeIsRun (CORINFO_METHOD_HANDLE methHnd)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
CORINFO_METHOD_HANDLE CEEInfo::mapMethodDeclToMethodImpl(CORINFO_METHOD_HANDLE methHnd)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_CLASS_HANDLE CEEInfo::getBuiltinClass(CorInfoClassId classId)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_CLASS_HANDLE clsHnd)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_CLASS_HANDLE clsHnd)
{
CONTRACTL{
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
void CEEInfo::getGSCookie(GSCookie * pCookieVal, GSCookie ** ppCookieVal)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_CLASS_HANDLE parent)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_CLASS_HANDLE cls2)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_CLASS_HANDLE toClass)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_CLASS_HANDLE cls2)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_CLASS_HANDLE cls2)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_CLASS_HANDLE cls)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
BOOL CEEInfo::satisfiesClassConstraints(CORINFO_CLASS_HANDLE cls)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
BOOL CEEInfo::isSDArray(CORINFO_CLASS_HANDLE cls)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
unsigned CEEInfo::getArrayRank(CORINFO_CLASS_HANDLE cls)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_CALL_INFO *pResult /*out */)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
void CEEInfo::ThrowExceptionForHelper(const CORINFO_HELPER_DESC * throwHelper)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
BOOL CEEInfo::isRIDClassDomainID(CORINFO_CLASS_HANDLE cls)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
void **ppIndirection)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CorInfoHelpFunc CEEInfo::getNewHelper(CORINFO_RESOLVED_TOKEN * pResolvedToken, CORINFO_METHOD_HANDLE callerHandle, bool * pHasSideEffects)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CorInfoHelpFunc CEEInfo::getNewArrHelper (CORINFO_CLASS_HANDLE arrayClsHnd)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CorInfoHelpFunc CEEInfo::getCastingHelper(CORINFO_RESOLVED_TOKEN * pResolvedToken, bool fThrowing)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CorInfoHelpFunc CEEInfo::getSharedCCtorHelper(CORINFO_CLASS_HANDLE clsHnd)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
CorInfoHelpFunc CEEInfo::getBoxHelper(CORINFO_CLASS_HANDLE clsHnd)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CorInfoHelpFunc CEEInfo::getSecurityPrologHelper(CORINFO_METHOD_HANDLE ftn)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
void **ppIndirection)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
unsigned CEEInfo::getMethodHash (CORINFO_METHOD_HANDLE ftnHnd)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
const char* CEEInfo::getMethodName (CORINFO_METHOD_HANDLE ftnHnd, const char** scopeName)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
const char* CEEInfo::getMethodNameFromMetadata(CORINFO_METHOD_HANDLE ftnHnd, const char** className, const char** namespaceName, const char **enclosingClassName)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
const char* CEEInfo::getClassNameFromMetadata(CORINFO_CLASS_HANDLE cls, const char** namespaceName)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_CLASS_HANDLE CEEInfo::getTypeInstantiationArgument(CORINFO_CLASS_HANDLE cls, unsigned index)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
DWORD CEEInfo::getMethodAttribs (CORINFO_METHOD_HANDLE ftn)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CorInfoMethodRuntimeFlags attribs)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_METHOD_INFO * methInfo)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
DWORD* pRestrictions)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
- STATIC_CONTRACT_SO_TOLERANT;
JIT_TO_EE_TRANSITION();
BOOL *pfHasCircularMethodConstraints)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CEEInfo::isInstantiationOfVerifiedGeneric(CORINFO_METHOD_HANDLE hMethod)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
bool fIsTailPrefix)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
- STATIC_CONTRACT_SO_TOLERANT;
JIT_TO_EE_TRANSITION();
CORINFO_EH_CLAUSE* clause)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_CLASS_HANDLE owner)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_METHOD_HANDLE methodHnd)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_MODULE_HANDLE CEEInfo::getMethodModule (CORINFO_METHOD_HANDLE methodHnd)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
bool * pMustExpand)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
bool CEEInfo::isInSIMDModule(CORINFO_CLASS_HANDLE classHnd)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
bool * isRelative)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
CORINFO_CONTEXT_HANDLE ownerType)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
bool* requiresInstMethodTableArg)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_CLASS_HANDLE CEEInfo::getDefaultEqualityComparerClass(CORINFO_CLASS_HANDLE elemType)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_CLASS_HANDLE CEEInfo::getDefaultEqualityComparerClassHelper(CORINFO_CLASS_HANDLE elemType)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_ACCESS_FLAGS accessFlags)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_CONST_LOOKUP * pResult)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
const char* CEEInfo::getFieldName (CORINFO_FIELD_HANDLE fieldHnd, const char** scopeName)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_CLASS_HANDLE CEEInfo::getFieldClass (CORINFO_FIELD_HANDLE fieldHnd)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
CORINFO_CLASS_HANDLE owner)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
unsigned CEEInfo::getFieldOffset (CORINFO_FIELD_HANDLE fieldHnd)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
bool CEEInfo::isWriteBarrierHelperRequired(CORINFO_FIELD_HANDLE field)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
DWORD CEEInfo::getFieldThreadLocalStoreID(CORINFO_FIELD_HANDLE fieldHnd, void **ppIndirection)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
void *CEEInfo::allocateArray(ULONG cBytes)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
void CEEInfo::freeArray(void *array)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
ICorDebugInfo::BoundaryTypes *implicitBoundaries)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
bool *extendOthers)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_ARG_LIST_HANDLE CEEInfo::getArgNext(CORINFO_ARG_LIST_HANDLE args)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CorInfoType CEEInfo::getHFAType(CORINFO_CLASS_HANDLE hClass)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CorInfoUnmanagedCallConv CEEInfo::getUnmanagedCallConv(CORINFO_METHOD_HANDLE method)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
BOOL CEEInfo::pInvokeMarshalingRequired(CORINFO_METHOD_HANDLE method, CORINFO_SIG_INFO* callSiteSig)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_METHOD_HANDLE method)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
BOOL* pfIsOpenDelegate)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
void **ppIndirection)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
void **ppIndirection)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
CORINFO_JUST_MY_CODE_HANDLE**ppIndirection)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
void CEEInfo::getEEInfo(CORINFO_EE_INFO *pEEInfoOut)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
LPCWSTR CEEInfo::getJitTimeLogFilename()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
DWORD CEEInfo::getThreadTLSIndex(void **ppIndirection)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
const void * CEEInfo::getInlinedCallFrameVptr(void **ppIndirection)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
LONG * CEEInfo::getAddrOfCaptureThreadGlobal(void **ppIndirection)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
HRESULT CEEInfo::GetErrorHRESULT(struct _EXCEPTION_POINTERS *pExceptionPointers)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
ULONG CEEInfo::GetErrorMessage(__inout_ecount(bufferLength) LPWSTR buffer, ULONG bufferLength)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
LONG EEFilterException(struct _EXCEPTION_POINTERS *pExceptionPointers, void *unused)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
} CONTRACTL_END;
JIT_TO_EE_TRANSITION_LEAF();
- VALIDATE_BACKOUT_STACK_CONSUMPTION;
-
unsigned code = pExceptionPointers->ExceptionRecord->ExceptionCode;
#ifdef _DEBUG
void CEEInfo::HandleException(struct _EXCEPTION_POINTERS *pExceptionPointers)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
} CONTRACTL_END;
HRESULT result)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
void **ppIndirection)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
void **ppIndirection)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
void **ppIndirection)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
void **ppIndirection)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
DWORD CEEInfo::getJitFlags(CORJIT_FLAGS* jitFlags, DWORD sizeInBytes)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
// No dynamic contract here because SEH is used
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_MODE_PREEMPTIVE;
// NOTE: the lack of JIT/EE transition markers in this method is intentional. Any
IEEMemoryManager* CEEInfo::getMemoryManager()
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
/*********************************************************************/
int CEEInfo::doAssert(const char* szFile, int iLine, const char* szExpr)
{
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_PREEMPTIVE;
void CEEInfo::reportFatalError(CorJitResult result)
{
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_PREEMPTIVE;
BOOL CEEInfo::logMsg(unsigned level, const char* fmt, va_list args)
{
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_PREEMPTIVE;
void ** ppIndirection) /* OUT */
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
BOOL *pbIndirectedHandles)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
ICorDebugInfo::OffsetMapping *pMap)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
void CEEJitInfo::setVars(CORINFO_METHOD_HANDLE ftn, ULONG32 cVars, ICorDebugInfo::NativeVarInfo *vars)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
void CEEJitInfo::CompressDebugInfo()
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
{
#ifdef WIN64EXCEPTIONS
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
{
#ifdef WIN64EXCEPTIONS
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
INT32 addlDelta)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
WORD CEEJitInfo::getRelocTypeHint(void * target)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
void CEEJitInfo::getModuleNativeEntryPointRange(void** pStart, void** pEnd)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_PREEMPTIVE;
void **ppValue)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
InfoAccessType CEEJitInfo::emptyStringLiteral(void ** ppValue)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
void **ppIndirection)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
bool* pIsSpeculative)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
void **ppIndirection)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
void * CEEJitInfo::allocGCInfo (size_t size)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
unsigned cEH)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
const CORINFO_EH_CLAUSE* clause)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CORINFO_EH_CLAUSE* clause) /* OUT */
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_PREEMPTIVE;
- STATIC_CONTRACT_SO_INTOLERANT;
SString namespaceOrClassName, methodName, methodSignature;
// Fire an ETW event to mark the beginning of JIT'ing
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_PREEMPTIVE;
- STATIC_CONTRACT_SO_INTOLERANT;
CorJitResult ret = CORJIT_SKIPPED; // Note that CORJIT_SKIPPED is an error exit status code
-
comp->setJitFlags(jitFlags);
#ifdef FEATURE_STACK_SAMPLING
- // SO_INTOLERANT due to init affecting global state.
static ConfigDWORD s_stackSamplingEnabled;
bool samplingEnabled = (s_stackSamplingEnabled.val(CLRConfig::UNSUPPORTED_StackSamplingEnabled) != 0);
#endif
- BEGIN_SO_TOLERANT_CODE(GetThread());
-
-
#if defined(ALLOW_SXS_JIT) && !defined(CROSSGEN_COMPILE)
if (FAILED(ret) && jitMgr->m_alternateJit
#ifdef FEATURE_STACK_SAMPLING
}
#endif
- END_SO_TOLERANT_CODE;
-
return ret;
}
{
THROWS;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
CorInfoHelperTailCallSpecialHandling flags)
{
CONTRACTL {
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
void **ppIndirection)
{
CONTRACTL{
- SO_TOLERANT;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
} CONTRACTL_END;
Init(codeAddress, ExecutionManager::GetScanFlags());
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
} CONTRACTL_END;
m_codeAddress = codeAddress;
void ResetForJitRetry()
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
} CONTRACTL_END;
BOOL HasLock()
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return(m_Crst.OwnedByCurrentThread());
}
#endif
CONTRACTL
{
THROWS;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
CONTRACTL
{
THROWS;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
CONTRACTL
{
THROWS;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_INTOLERANT;
}
CONTRACTL_END;
// List of LoaderAllocators being deleted
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
} CONTRACTL_END;
if (m_fTerminated)
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
} CONTRACTL_END;
#ifdef FAT_DISPATCH_TOKENS
// exceptions and just return an invalid token, since this is
EX_TRY
{
- BEGIN_SO_INTOLERANT_CODE(GetThread());
SimpleReadLockHolder rlock(m_pFatTokenSetLock);
if (m_pFatTokenSet != NULL)
{
DispatchTokenFat key(typeId, slotNumber);
pFat = m_pFatTokenSet->Lookup(&key);
}
- END_SO_INTOLERANT_CODE;
}
EX_CATCH
{
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
HRESULT hr = S_OK;
Object *pRetVal = NULL;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrow(kStackOverflowException));
-
IMDInternalImport *_pScope = pScope;
MDDefaultValue value;
*pCorElementType = (UINT32)value.m_bType;
*pLength = (INT32)value.m_cbSize;
ErrExit:
- END_SO_INTOLERANT_CODE;
-
if (FAILED(hr))
{
FCThrow(kBadImageFormatException);
HRESULT hr = S_OK;
IMDInternalImport *_pScope = pScope;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
IfFailGo(_pScope->GetCustomAttributeProps(cv, ptkType));
IfFailGo(_pScope->GetCustomAttributeAsBlob(cv, (const void **)&ppBlob->m_array, (ULONG *)&ppBlob->m_count));
ErrExit:
- END_SO_INTOLERANT_CODE;
-
if (FAILED(hr))
{
FCThrowVoid(kBadImageFormatException);
HRESULT hr = S_OK;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
{
IMDInternalImport *_pScope = pScope;
}
}
ErrExit:
- END_SO_INTOLERANT_CODE;
-
if (FAILED(hr))
{
FCThrowVoid(kBadImageFormatException);
MD_CLASS_LAYOUT layout;
BOOL retVal = FALSE;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrow(kStackOverflowException));
IfFailGo(_pScope->GetClassLayoutInit(td, &layout));
ULONG cFieldOffset;
}
}
ErrExit:
- END_SO_INTOLERANT_CODE;
-
if (FAILED(hr))
{
FCThrow(kBadImageFormatException);
IMDInternalImport *_pScope = pScope;
BOOL bHasExtendedChars;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
hr = _pScope->GetUserString(tk, pCount, &bHasExtendedChars, (LPCWSTR *)pszName);
- END_SO_INTOLERANT_CODE;
-
if (FAILED(hr))
{
FCThrowVoid(kBadImageFormatException);
HRESULT hr = S_OK;
IMDInternalImport *_pScope = pScope;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
if (TypeFromToken(tk) == mdtMethodDef)
{
hr = _pScope->GetNameOfMethodDef(tk, pszName);
{
hr = E_FAIL;
}
- END_SO_INTOLERANT_CODE;
if (FAILED(hr))
{
IMDInternalImport *_pScope = pScope;
LPCSTR szName = NULL;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
hr = _pScope->GetNameOfTypeDef(tk, &szName, pszName);
- END_SO_INTOLERANT_CODE;
if (FAILED(hr))
{
HRESULT hr;
IMDInternalImport *_pScope = pScope;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
hr = _pScope->GetGenericParamProps(tk, NULL, pAttributes, NULL, NULL, NULL);
- END_SO_INTOLERANT_CODE;
-
if (FAILED(hr))
{
FCThrowVoid(kBadImageFormatException);
HRESULT hr;
IMDInternalImport *_pScope = pScope;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
hr = _pScope->GetEventProps(tk, pszName, (DWORD*)pdwEventFlags, NULL);
- END_SO_INTOLERANT_CODE;
-
if (FAILED(hr))
{
FCThrowVoid(kBadImageFormatException);
IMDInternalImport *_pScope = pScope;
mdModule tkModule;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
hr = _pScope->GetPinvokeMap(tk, pMappingFlags, pszImportName, &tkModule);
if (FAILED(hr))
{
{
hr = _pScope->GetModuleRefProps(tkModule, pszImportDll);
}
- END_SO_INTOLERANT_CODE;
if (FAILED(hr))
{
IMDInternalImport *_pScope = pScope;
USHORT usSequence = 0;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
-
// Is this a valid token?
if (_pScope->IsValidToken((mdParamDef)tk))
{
hr = COR_E_BADIMAGEFORMAT;
}
*pSequence = (INT32) usSequence;
- END_SO_INTOLERANT_CODE;
if (FAILED(hr))
{
HRESULT hr;
IMDInternalImport *_pScope = pScope;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
hr = _pScope->GetFieldDefProps(tk, (DWORD *)pdwFieldFlags);
- END_SO_INTOLERANT_CODE;
-
if (FAILED(hr))
{
FCThrowVoid(kBadImageFormatException);
HRESULT hr;
IMDInternalImport *_pScope = pScope;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
hr = _pScope->GetPropertyProps(tk, pszName, (DWORD*)pdwPropertyFlags, (PCCOR_SIGNATURE*)&ppValue->m_array, (ULONG*)&ppValue->m_count);
- END_SO_INTOLERANT_CODE;
-
if (FAILED(hr))
{
FCThrowVoid(kBadImageFormatException);
HRESULT hr;
IMDInternalImport *_pScope = pScope;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
hr = _pScope->GetFieldMarshal(tk, (PCCOR_SIGNATURE *)&ppValue->m_array, (ULONG *)&ppValue->m_count);
if (hr == CLDB_E_RECORD_NOTFOUND)
{
ppValue->m_count = 0;
hr = S_OK;
}
- END_SO_INTOLERANT_CODE;
if (FAILED(hr))
{
HRESULT hr;
IMDInternalImport *_pScope = pScope;
- BEGIN_SO_INTOLERANT_CODE(GetThread())
hr = _pScope->GetSigOfMethodDef(tk, (ULONG*)&ppValue->m_count, (PCCOR_SIGNATURE *)&ppValue->m_array);
- END_SO_INTOLERANT_CODE;
-
if (FAILED(hr))
{
FCThrowVoid(kBadImageFormatException);
HRESULT hr;
IMDInternalImport *_pScope = pScope;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
hr = _pScope->GetSigFromToken(tk, (ULONG*)&ppValue->m_count, (PCCOR_SIGNATURE *)&(ppValue->m_array));
- END_SO_INTOLERANT_CODE;
-
if (FAILED(hr))
{
FCThrowVoid(kBadImageFormatException);
HRESULT hr;
IMDInternalImport *_pScope = pScope;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
hr = _pScope->GetSigOfFieldDef(tk, (ULONG*)&ppValue->m_count, (PCCOR_SIGNATURE *)&ppValue->m_array);
- END_SO_INTOLERANT_CODE;
-
if (FAILED(hr))
{
FCThrowVoid(kBadImageFormatException);
HRESULT hr;
IMDInternalImport *_pScope = pScope;
-
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
switch (TypeFromToken(tk))
{
break;
}
- END_SO_INTOLERANT_CODE;
-
if (FAILED(hr))
{
FCThrowVoid(kBadImageFormatException);
HRESULT hr;
LPCSTR szName;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
IMDInternalImport *_pScope = pScope;
hr = _pScope->GetScopeProps(&szName, pmvid);
- END_SO_INTOLERANT_CODE;
-
if (FAILED(hr))
{
FCThrowVoid(kBadImageFormatException);
IMDInternalImport *_pScope = pScope;
LPCSTR szName_Ignore;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrowVoid(kStackOverflowException));
hr = _pScope->GetNameAndSigOfMemberRef(mr, (PCCOR_SIGNATURE*)&ppvSigBlob->m_array, (ULONG*)&ppvSigBlob->m_count, &szName_Ignore);
- END_SO_INTOLERANT_CODE;
-
if (FAILED(hr))
{
FCThrowVoid(kBadImageFormatException);
NOTHROW; // Used by reverse COM IL stubs, so we must not throw exceptions back to COM
DISABLED(GC_TRIGGERS); // FCALLS with HELPER frames have issues with GC_TRIGGERS
MODE_COOPERATIVE;
- SO_TOLERANT;
} CONTRACTL_END;
int retVal = 0;
NOTHROW; // Used by reverse COM IL stubs, so we must not throw exceptions back to COM
DISABLED(GC_TRIGGERS); // FCALLS with HELPER frames have issues with GC_TRIGGERS
MODE_COOPERATIVE;
- SO_TOLERANT;
} CONTRACTL_END;
int retVal = 0;
private: // <xs:*>
void DefineSchema() { WRAPPER_NO_CONTRACT; m_tos = m_schemaRootFactory.Create(); }
- void DefineSchemaEnd() { CONTRACTL {NOTHROW; GC_NOTRIGGER; SO_TOLERANT; MODE_ANY; PRECONDITION(m_stack.GetDepth() == 0); } CONTRACTL_END; }
+ void DefineSchemaEnd() { CONTRACTL {NOTHROW; GC_NOTRIGGER; MODE_ANY; PRECONDITION(m_stack.GetDepth() == 0); } CONTRACTL_END; }
void AddElement(MdaElemDeclDef name) { WRAPPER_NO_CONTRACT; Push(CreateDeclDef(name, &m_elementFactory)); Push(m_complexTypeFactory.Create()); }
void AddElementRefType(MdaElemDeclDef name, MdaElemDeclDef type) { WRAPPER_NO_CONTRACT; AddTerminal(CreateDeclDef(name, &m_elementRefTypeFactory)->InitRef(GetDef(type))); }
void AddElementAny(MdaElemDeclDef name) { WRAPPER_NO_CONTRACT; AddTerminal(CreateDeclDef(name, &m_elementAnyFactory)); }
////
-// Why is ANYTHING in here marked SO_TOLERANT?? Presumably some of them are called from managed code????
-
-
//
// MdaFramework
//
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return);
-
EX_TRY
{
GCHeapUtilities::GetGCHeap()->GarbageCollect();
// Caller cannot take exceptions.
}
EX_END_CATCH(SwallowAllExceptions);
-
- END_SO_INTOLERANT_CODE;
}
//
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_INTOLERANT;
PRECONDITION(CheckPointer(pEntryThunk));
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_INTOLERANT;
PRECONDITION((index >= 0) && (index < m_size));
PRECONDITION(CheckPointer(m_pList));
}
NOTHROW;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pData));
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return);
-
MdaXmlElement* pXml;
MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
msg.SendMessagef(MDARC_DLLMAIN_RETURNS_FALSE);
-
- END_SO_INTOLERANT_CODE;
}
//
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
{ \
THROWS; \
GC_TRIGGERS; \
- SO_TOLERANT; \
MODE_ANY; \
} \
CONTRACTL_END; \
}
CONTRACTL_END;
-// TODO: CONTRACT_VIOLATION(SOToleranceViolation);
-
m_entries = PInvokeTable;
m_entryCount = sizeof(PInvokeTable) / sizeof(pinvoke_entry);
m_bJustMyCode = pXmlInput->GetAttributeValueAsBool(MdaAttrDecl(JustMyCode));
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
BOOL bFoundSomething = FALSE;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return FALSE);
-
SString moduleNameFullPath, moduleName;
ClrGetModuleFileNameNoThrow(hmod,moduleNameFullPath);
// Strip any path info
}
}
- END_SO_INTOLERANT_CODE;
-
return bFoundSomething;
}
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
// Is the overlapped pointer in the gc heap?
if (pOverlapped != NULL)
{
- // If a stack overflow occurs, we would just want to continue and
- // return the function pointer as expected.
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return pEntry->m_realFunction);
-
BOOL fHeapPointer;
{
pEntry->m_functionName,
pEntry->m_moduleName);
}
-
- END_SO_INTOLERANT_CODE;
}
return pEntry->m_realFunction;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
if (!bStackImbalance)
return;
- BEGIN_SO_INTOLERANT_CODE(GetThread());
-
MdaXmlElement* pXml;
MdaXmlMessage msg(this->AsMdaAssistant(), TRUE, &pXml);
MdaXmlElement* pMethod = pXml->AddChild(MdaElemDecl(Method));
StackSString sszMethodName;
msg.SendMessagef(MDARC_PINVOKE_SIGNATURE_MISMATCH, AsMdaAssistant()->ToString(sszMethodName, pSICookie->m_pMD).GetUnicode());
-
- END_SO_INTOLERANT_CODE;
}
#endif
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
PRECONDITION(CheckPointer(pFM));
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_NOTRIGGER;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_NOTRIGGER;
MODE_ANY;
- SO_INTOLERANT;
INJECT_FAULT(COMPlusThrowOM());
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- // Called from SO_TOLERANT CODE
- SO_TOLERANT;
}
CONTRACTL_END;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return);
-
EX_TRY
{
MdaXmlElement* pXml;
// Caller cannot take exceptions.
}
EX_END_CATCH(SwallowAllExceptions);
-
- END_SO_INTOLERANT_CODE;
}
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return);
-
EX_TRY
{
MdaXmlElement* pXml;
// Caller cannot take exceptions.
}
EX_END_CATCH(SwallowAllExceptions);
-
- END_SO_INTOLERANT_CODE;
}
//
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
GC_TRIGGERS;
MODE_ANY;
DEBUG_ONLY;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
PRECONDITION(CheckPointer(pValidationResult->m_pViolatingElement));
GC_TRIGGERS;
MODE_ANY;
DEBUG_ONLY;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
GC_TRIGGERS;
MODE_ANY;
DEBUG_ONLY;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
#include "dllimportcallback.h"
#include "listlock.h"
#include "methodimpl.h"
-#include "stackprobe.h"
#include "encee.h"
#include "comsynchronizable.h"
#include "customattribute.h"
GC_NOTRIGGER;
FORBID_FAULT;
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACTL_END
if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS; // MethodImpl::FindMethodDesc can throw.
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
SUPPORTS_DAC;
}CONTRACTL_END;
{
// Get the metadata string name for this method
LPCUTF8 result = NULL;
-
- // This probes only if we have a thread, in which case it is OK to throw the SO.
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(COMPlusThrowSO());
-
+
if (FAILED(GetMDImport()->GetNameOfMethodDef(GetMemberDef(), &result)))
{
result = NULL;
}
-
- END_SO_INTOLERANT_CODE;
-
+
return(result);
}
}
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
SUPPORTS_DAC;
}
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END
if (FORBIDGC_LOADER_USE_ENABLED()) NOTHROW; else THROWS;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FORBID_FAULT;
- STATIC_CONTRACT_SO_TOLERANT;
SUPPORTS_DAC;
g_IBCLogger.LogMethodDescAccess(this);
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FORBID_FAULT;
- STATIC_CONTRACT_SO_TOLERANT;
SUPPORTS_DAC;
MethodTable* pMT = GetMethodDescChunk()->GetMethodTable();
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
POSTCONDITION(CheckPointer(RETVAL));
}
CONTRACT_END
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
GC_NOTRIGGER;
NOTHROW;
- SO_TOLERANT;
}
CONTRACT_END;
//*******************************************************************************
BOOL MethodDesc::IsRestored_NoLogging()
{
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FORBID_FAULT;
BOOL MethodDesc::IsRestored()
{
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FORBID_FAULT;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
THROWS;
GC_TRIGGERS;
- SO_INTOLERANT;
}
CONTRACTL_END;
{
THROWS;
GC_TRIGGERS;
- SO_INTOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
SUPPORTS_DAC;
}
GC_NOTRIGGER;
NOTHROW;
PRECONDITION(IsLCGMethod());
- SO_TOLERANT;
}
CONTRACTL_END;
GC_NOTRIGGER;
NOTHROW;
PRECONDITION(IsILStub());
- SO_TOLERANT;
}
CONTRACTL_END;
GC_NOTRIGGER;
NOTHROW;
PRECONDITION(IsDynamicMethod());
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
#include "listlock.h"
#include "methodimpl.h"
#include "guidfromname.h"
-#include "stackprobe.h"
#include "encee.h"
#include "encee.h"
#include "comsynchronizable.h"
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT; // we are called from MethodTable::CanCastToClass
}
CONTRACTL_END;
return (GetApproxArrayElementTypeHandle().IsEquivalentTo(pOtherMT->GetApproxArrayElementTypeHandle() COMMA_INDEBUG(&newVisited)));
}
- BOOL bResult = FALSE;
-
- BEGIN_SO_INTOLERANT_CODE(GetThread());
- bResult = IsEquivalentTo_WorkerInner(pOtherMT COMMA_INDEBUG(&newVisited));
- END_SO_INTOLERANT_CODE;
-
- return bResult;
+ return IsEquivalentTo_WorkerInner(pOtherMT COMMA_INDEBUG(&newVisited));
}
//==========================================================================================
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
LOADS_TYPE(CLASS_DEPENDENCIES_LOADED);
}
CONTRACTL_END;
GC_NOTRIGGER;
MODE_ANY;
INSTANCE_CHECK;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pTargetMT));
PRECONDITION(pTargetMT->IsInterface());
PRECONDITION(!pTargetMT->HasVariance());
GC_NOTRIGGER;
MODE_ANY;
INSTANCE_CHECK;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pTargetMT));
PRECONDITION(pTargetMT->IsInterface());
PRECONDITION(IsRestored_NoLogging());
GC_NOTRIGGER;
MODE_ANY;
INSTANCE_CHECK;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pTargetMT));
PRECONDITION(!pTargetMT->IsArray());
PRECONDITION(!pTargetMT->IsInterface());
THROWS;
MODE_ANY;
GC_TRIGGERS;
- SO_INTOLERANT;
}
CONTRACTL_END;
{
THROWS;
GC_TRIGGERS;
- SO_TOLERANT;
PRECONDITION(pInterface->IsInterface()); // class we are looking up should be an interface
}
CONTRACTL_END;
{
THROWS;
GC_TRIGGERS;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
THROWS;
GC_TRIGGERS;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
Thread *pThread;
pThread = GetThread();
_ASSERTE(pThread);
- INTERIOR_STACK_PROBE_FOR(pThread, 8);
AppDomain *pDomain = GetAppDomain();
g_IBCLogger.LogMethodTableAccess(this);
Exit:
;
- END_INTERIOR_STACK_PROBE;
}
//==========================================================================================
{
THROWS;
GC_TRIGGERS;
- SO_TOLERANT;
INJECT_FAULT(COMPlusThrowOM());
PRECONDITION(IsFullyLoaded());
}
CONTRACTL_END;
{ // Debug-only code causes SO volation, so add exception.
- CONTRACT_VIOLATION(SOToleranceViolation);
CONSISTENCY_CHECK(CheckActivated());
}
{
THROWS;
GC_TRIGGERS;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_COOPERATIVE;
- STATIC_CONTRACT_SO_INTOLERANT;
BEGIN_CALL_TO_MANAGEDEX(fCriticalCall ? EEToManagedCriticalCall : EEToManagedDefault);
{
THROWS;
GC_TRIGGERS;
- SO_INTOLERANT;
}
CONTRACTL_END;
}
- BEGIN_SO_INTOLERANT_CODE(GetThread());
// First ensure that we're loaded to just below CLASS_DEPENDENCIES_LOADED
ClassLoader::EnsureLoaded(this, (ClassLoadLevel) (level-1));
_ASSERTE(th.IsTypeDesc() && th.IsArray());
_ASSERTE(!(level == CLASS_LOADED && !th.IsFullyLoaded()));
}
-
- END_SO_INTOLERANT_CODE;
-
#endif //!DACCESS_COMPILE
} //MethodTable::DoFullyLoad
DispatchSlot MethodTable::FindDispatchSlot(UINT32 typeID, UINT32 slotNumber, BOOL throwOnConflict)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
DispatchSlot implSlot(NULL);
FindDispatchImpl(typeID, slotNumber, &implSlot, throwOnConflict);
return implSlot;
{
THROWS;
GC_TRIGGERS;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
GC_NOTRIGGER;
NOTHROW;
- SO_TOLERANT;
POSTCONDITION(CheckPointer(RETVAL, NULL_NOT_OK));
POSTCONDITION(RETVAL->m_pDebugMethodTable.IsNull() || // We must be in BuildMethdTableThrowing()
RETVAL->SanityCheck());
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pWhichParent));
PRECONDITION(IsRestored_NoLogging());
PRECONDITION(pWhichParent->IsRestored_NoLogging());
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pWhichParent));
PRECONDITION(IsRestored_NoLogging());
PRECONDITION(pWhichParent->IsRestored_NoLogging());
CONTRACTL {
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
} CONTRACTL_END;
if (s_pMethodDataCache == NULL)
{
if (HasInstantiation())
{
- // This is going to go recursive, so we need to use an interior stack probe
-
- INTERIOR_STACK_PROBE(GetThread());
{
Instantiation inst = GetInstantiation();
for (DWORD i = 0; i < inst.GetNumArgs(); i++)
}
}
}
- END_INTERIOR_STACK_PROBE;
}
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC;
} CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC;
} CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
PCODE GetSlot(UINT32 slotNumber)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
CONSISTENCY_CHECK(slotNumber < GetNumVtableSlots());
TADDR pSlot = GetSlotPtrRaw(slotNumber);
TADDR GetSlotPtrRaw(UINT32 slotNum)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
CONSISTENCY_CHECK(slotNum < GetNumVtableSlots());
if (slotNum < GetNumVirtuals())
TADDR GetSlotPtr(UINT32 slotNum)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
// Slots in NGened images are relative pointers
CONSISTENCY_CHECK(!IsZapped());
// See JIT_IsInstanceOfInterface
inline BOOL InstanceRequiresNonTrivialInterfaceCast()
{
- STATIC_CONTRACT_SO_TOLERANT;
LIMITED_METHOD_CONTRACT;
return GetFlag(enum_flag_NonTrivialInterfaceCast);
inline DPTR(TYPE) GETTER() \
{ \
LIMITED_METHOD_CONTRACT; \
- STATIC_CONTRACT_SO_TOLERANT; \
_ASSERTE(Has##NAME()); \
return dac_cast<DPTR(TYPE)>(dac_cast<TADDR>(this) + GetOffsetOfOptionalMember(OptionalMember_##NAME)); \
}
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_COOPERATIVE;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_COOPERATIVE;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_COOPERATIVE;
}
CONTRACTL_END;
GC_NOTRIGGER;
MODE_ANY;
INSTANCE_CHECK;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pTargetMT));
PRECONDITION(!pTargetMT->IsArray());
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
PRECONDITION(pInterface->IsInterface()); // class we are looking up should be an interface
}
CONTRACTL_END;
{
STANDARD_VM_CONTRACT;
- INTERIOR_STACK_PROBE(GetThread());
-
// Expand interfaces in superclasses first. Interfaces inherited from parents
// must have identical indexes as in the parent.
bmtRTType * pParentOfParent = pParentType->GetParentType();
// Restore parent's substitution
pParentType->SetSubstitution(parentSubstitution);
-
- END_INTERIOR_STACK_PROBE;
} // MethodTableBuilder::ExpandApproxInheritedInterfaces
//*******************************************************************************
bmtInternal->pType = new (GetStackingAllocator())
bmtMDType(pParent, pModule, cl, bmtGenericsInfo->typeContext);
- // put the interior stack probe after all the stack-allocted goop above. We check compare our this pointer to the SP on
- // the dtor to determine if we are being called on an EH path or not.
- INTERIOR_STACK_PROBE_FOR(GetThread(), 8);
-
// If not NULL, it means there are some by-value fields, and this contains an entry for each inst
#ifdef _DEBUG
_ASSERTE(pComputedPZM == Module::GetPreferredZapModuleForMethodTable(pMT));
#endif // FEATURE_PREJIT
- END_INTERIOR_STACK_PROBE;
-
return GetHalfBakedMethodTable();
} // MethodTableBuilder::BuildMethodTableThrowing
#ifdef _PREFAST_
MethodTable * pMT = NULL;
Thread * pThread = GetThread();
- BEGIN_SO_INTOLERANT_CODE_FOR(pThread, DefaultEntryProbeAmount() * 2)
MethodTable * pParentMethodTable = NULL;
SigPointer parentInst;
parentInst,
(WORD)cInterfaces);
- END_SO_INTOLERANT_CODE;
RETURN(TypeHandle(pMT));
} // ClassLoader::CreateTypeHandleForTypeDefThrowing
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
static OverlappedDataObject* GetOverlapped(LPOVERLAPPED nativeOverlapped)
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
-
+
_ASSERTE (nativeOverlapped != NULL);
return (OverlappedDataObject*)OBJECTREFToObject(ObjectFromHandle(((NATIVEOVERLAPPED_AND_HANDLE*)nativeOverlapped)->m_handle));
}
static OverlappedDataObject* GetOverlappedForTracing(LPOVERLAPPED nativeOverlapped)
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
_ASSERTE(nativeOverlapped != NULL);
return *(OverlappedDataObject**)(((NATIVEOVERLAPPED_AND_HANDLE*)nativeOverlapped)->m_handle);
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
MODE_COOPERATIVE;
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_COOPERATIVE;
}
CONTRACTL_END;
}
CONTRACTL_END;
- /*_ASSERTE(GetThread()->IsSOTolerant());*/
SetAppDomain(pDomain);
}
#endif
{
THROWS;
GC_NOTRIGGER;
- SO_INTOLERANT;
INJECT_FAULT(COMPlusThrowOM(););
PRECONDITION(CheckPointer(pDomain));
}
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_COOPERATIVE;
}
CONTRACTL_END;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FORBID_FAULT;
STATIC_CONTRACT_MODE_COOPERATIVE;
- STATIC_CONTRACT_SO_TOLERANT;
OBJECTREF* location;
OBJECTREF o;
PRECONDITION(CheckPointer(strAChars));
PRECONDITION(CheckPointer(strBChars));
PRECONDITION(CheckPointer(result));
- SO_TOLERANT;
} CONTRACTL_END;
WCHAR *strAStart = strAChars;
==============================================================================*/
BOOL StringObject::HasTrailByte() {
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
SyncBlock * pSyncBlock = PassiveGetSyncBlock();
if(pSyncBlock != NULL)
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FORBID_FAULT;
- STATIC_CONTRACT_VIOLATION(SOToleranceViolation);
-
m_asObj = (Object*)POISONC;
Thread::ObjectRefNew(this);
}
STATIC_CONTRACT_MODE_COOPERATIVE;
STATIC_CONTRACT_FORBID_FAULT;
- STATIC_CONTRACT_VIOLATION(SOToleranceViolation);
-
VALIDATEOBJECT(objref.m_asObj);
// !!! If this assert is fired, there are two possibilities:
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FORBID_FAULT;
- STATIC_CONTRACT_VIOLATION(SOToleranceViolation);
-
//_ASSERTE(nul == 0);
m_asObj = (Object*)nul;
if( m_asObj != NULL)
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FORBID_FAULT;
- STATIC_CONTRACT_SO_TOLERANT;
if (!(((*(BYTE**)&dest) < g_lowest_address ) ||
((*(BYTE**)&dest) >= g_highest_address)))
{
THROWS;
GC_TRIGGERS;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
Nullable* dest = (Nullable*) destPtr;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
Nullable* dest = (Nullable*) destPtr;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
Nullable* dest = (Nullable*) destPtr;
GC_NOTRIGGER;
NOTHROW;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
GC_NOTRIGGER;
NOTHROW;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
GC_NOTRIGGER;
NOTHROW;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_COOPERATIVE;
}
CONTRACTL_END;
NOTHROW;
MODE_COOPERATIVE;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
MODE_COOPERATIVE;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
MODE_COOPERATIVE;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
MODE_COOPERATIVE;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_COOPERATIVE;
}
CONTRACTL_END;
FORCEINLINE bool Object::TryEnterObjMonitorSpinHelper()
{
CONTRACTL{
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END
// if we have allocated an array object of type T then the ArrayTypeDesc
// for T[] is available and restored
- // @todo This should be turned into a probe with a hard SO when we have one
- // See also: ArrayBase::SetArrayMethodTable, ArrayBase::SetArrayMethodTableForLargeObject and MethodTable::DoFullyLoad
- CONTRACT_VIOLATION(SOToleranceViolation);
// == FailIfNotLoadedOrNotRestored
TypeHandle arrayType = ClassLoader::LoadArrayTypeThrowing(pMT->GetApproxArrayElementTypeHandle(), kind, rank, ClassLoader::DontLoadTypes);
CONSISTENCY_CHECK(!arrayType.IsNull());
// type is stored in the array or not
inline TypeHandle ArrayBase::GetArrayElementTypeHandle() const
{
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FORBID_FAULT;
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
}
CONTRACT_END;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pType));
}
CONTRACTL_END;
void SafeVariantClearHelper(_Inout_ VARIANT* pVar)
{
- STATIC_CONTRACT_SO_INTOLERANT;
WRAPPER_NO_CONTRACT;
- BEGIN_SO_TOLERANT_CODE(GetThread());
VariantClear(pVar);
- END_SO_TOLERANT_CODE;
}
class OutOfMemoryException;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
GC_NOTRIGGER;
MODE_ANY;
SUPPORTS_DAC;
- SO_TOLERANT;
}
CONTRACTL_END;
INSTANCE_CHECK;
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACT_CHECK_END;
{
THROWS;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
THROWS;
GC_NOTRIGGER;
MODE_ANY;
- SO_INTOLERANT;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END;
NOTHROW;
CANNOT_TAKE_LOCK;
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACT_END;
*/
WRAPPER(GC_TRIGGERS);
MODE_ANY;
CAN_TAKE_LOCK;
- SO_INTOLERANT;
}
CONTRACTL_END;
inline BOOL PEFile::IsILOnly()
{
- STATIC_CONTRACT_SO_TOLERANT;
WRAPPER_NO_CONTRACT;
SUPPORTS_DAC;
{
BOOL retVal = FALSE;
- BEGIN_SO_INTOLERANT_CODE(GetThread());
-
//don't want to touch the IL image unless we already have
ReleaseHolder<PEImage> pNativeImage = GetNativeImageWithRef();
if (pNativeImage)
retVal = pNativeImage->IsNativeILILOnly();
}
- END_SO_INTOLERANT_CODE;
-
return retVal;
}
#endif // DACCESS_COMPILE
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
SUPPORTS_DAC;
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
SUPPORTS_DAC;
}
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
SUPPORTS_DAC;
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
SUPPORTS_DAC;
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACT_END;
THROWS;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
POSTCONDITION(CheckStartup());
INJECT_FAULT(COMPlusThrowOM(););
}
if (CheckStartup())
RETURN;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(COMPlusThrowSO());
-
s_hashLock.Init(CrstPEImage, (CrstFlags)(CRST_REENTRANCY|CRST_TAKEN_DURING_SHUTDOWN));
LockOwner lock = { &s_hashLock, IsOwnerOfCrst };
s_Images = ::new PtrHashMap;
#else // FEATURE_USE_LCID
g_lcid = NULL; // invariant
#endif //FEATURE_USE_LCID
- END_SO_INTOLERANT_CODE;
RETURN;
}
PTR_PEImageLayout pRetVal;
#ifndef DACCESS_COMPILE
- BEGIN_SO_INTOLERANT_CODE(GetThread());
// First attempt to find an existing layout matching imageLayoutMask. If that fails,
// and the caller has asked us to create layouts if needed, then try again passing
// the create flag to GetLayoutInternal. We need this to be synchronized, but the common
SimpleWriteLockHolder lock(m_pLayoutLock);
pRetVal = GetLayoutInternal(imageLayoutMask,flags);
}
- END_SO_INTOLERANT_CODE;
-
+
return pRetVal;
#else
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
inline const SString &PEImage::GetPath()
{
LIMITED_METHOD_DAC_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return m_path;
}
inline void PEImage::SetModuleFileNameHintForDAC()
{
LIMITED_METHOD_DAC_CONTRACT;
- STATIC_CONTRACT_SO_INTOLERANT;
// Grab module name only for triage dumps where full paths are excluded
// because may contain PII data.
{
PRECONDITION(CheckPointer((PEImageLayout *)mapping));
PRECONDITION(CheckPointer((PEImageLayout *)(base<<1),NULL_OK));
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
} CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
} CONTRACTL_END;
PCODE pTarget = NULL;
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
GC_TRIGGERS;
MODE_PREEMPTIVE;
CAN_TAKE_LOCK;
-
- // This is the entrypoint into the EE by a trigger process. As such, this
- // is profiling-specific and not considered mainline EE code.
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// reader lock to prevent things from changing while reading...
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
} CONTRACTL_END;
EEJitManager::CodeHeapIterator heapIterator;
MODE_ANY;
CAN_TAKE_LOCK;
// (See comments in code:ProfToEEInterfaceImpl::EnumModules for info about contracts.)
-
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
CAN_TAKE_LOCK;
// (See comments in code:ProfToEEInterfaceImpl::EnumModules for info about contracts.)
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
GC_TRIGGERS;
MODE_ANY;
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
GC_TRIGGERS;
MODE_ANY;
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
GC_NOTRIGGER;
MODE_ANY;
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
THROWS;
GC_TRIGGERS;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
FORBID_FAULT;
MODE_ANY;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
FORBID_FAULT;
MODE_ANY;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// GC_NOTRIGGER
// MODE_ANY
// CANNOT_TAKE_LOCK
-// SO_NOT_MAINLINE
// (EE_THREAD_(NOT)_REQUIRED are unenforced and are thus optional. If you wish
// to specify these, EE_THREAD_NOT_REQUIRED is preferred.)
// Note that the preferred contracts in this file are DIFFERENT than the preferred
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
// Yay!
MODE_ANY;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
CAN_TAKE_LOCK;
CANNOT_RETAKE_LOCK;
- SO_NOT_MAINLINE;
// If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the
// host (SQL). Corners will be cut to ensure this is the case
MODE_ANY;
EE_THREAD_NOT_REQUIRED;
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// this function was called.
CANNOT_RETAKE_LOCK;
- SO_NOT_MAINLINE;
// If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the
// host (SQL). Corners will be cut to ensure this is the case
// Grabbing the rejitid requires entering the rejit manager's hash table & lock,
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// PEFile::GetRWImporter and GetReadablePublicMetaDataInterface take locks
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// appropriate jit manager.
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
// If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the
// host (SQL). Corners will be cut to ensure this is the case
// (See locking contract comment in GetCodeInfoHelper.)
CANNOT_RETAKE_LOCK;
- SO_NOT_MAINLINE;
// If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the
// host (SQL). Corners will be cut to ensure this is the case
// (See locking contract comment in GetCodeInfoHelper.)
CANNOT_RETAKE_LOCK;
- SO_NOT_MAINLINE;
// If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the
// host (SQL). Corners will be cut to ensure this is the case
// We need to access the rejitmanager, which means taking locks
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(pcCodeInfos, NULL_OK));
PRECONDITION(CheckPointer(codeInfos, NULL_OK));
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_NOT_MAINLINE;
MODE_ANY;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// FieldDesc::GetStaticAddress takes a lock
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// FieldDesc::GetStaticAddress & FieldDesc::GetBaseInDomain take locks
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// (See comment above GC_TRIGGERS.)
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(pParentClassId, NULL_OK));
PRECONDITION(CheckPointer(pModuleId, NULL_OK));
// Yay!
EE_THREAD_NOT_REQUIRED;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer((Module *)moduleId, NULL_OK));
PRECONDITION(CheckPointer(ppBaseLoadAddress, NULL_OK));
// Yay!
EE_THREAD_NOT_REQUIRED;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer((Module *)moduleId, NULL_OK));
PRECONDITION(CheckPointer(ppBaseLoadAddress, NULL_OK));
// GetReadablePublicMetaDataInterface take locks
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// PEFile::CheckLoaded & Module::GetDynamicIL both take a lock
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// (see GC_TRIGGERS comment)
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Module::SetDynamicIL & PEFile::CheckLoaded & PEFile::GetEmitter take locks
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Debugger::SetILInstrumentedCodeMap takes a lock when it calls Debugger::GetOrCreateMethodInfo
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// mother of all locks: the thread store lock.
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Debugger::AcquireDebuggerLock
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Debugger::AcquireDebuggerLock
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Object::GetTypeHandle takes a lock
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// ClassLoader::LoadTypeDefOrRefThrowing takes a lock
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// ClassLoader::LoadGenericInstantiationThrowing takes a lock
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// MethodDesc::FindOrCreateAssociatedMethodDesc enters a Crst
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// AppDomain::GetFriendlyNameForDebugger takes a lock
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// PEAssembly::GetSimpleName() enters a lock via use of the metadata interface
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// this function was called.
CANNOT_RETAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(pClassId, NULL_OK));
PRECONDITION(CheckPointer(pModuleId, NULL_OK));
// this function was called.
CANNOT_RETAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(isDynamic, NULL_OK));
}
// Grabbing the rejitid requires entering the rejit manager's hash table & lock,
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// this function was called.
CANNOT_RETAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(pModuleId, NULL_OK));
PRECONDITION(CheckPointer(ppvSig, NULL_OK));
EE_THREAD_NOT_REQUIRED;
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(pcCodeStartAddresses, NULL_OK));
PRECONDITION(CheckPointer(codeStartAddresses, NULL_OK));
MODE_ANY;
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(pcMap, NULL_OK));
PRECONDITION(CheckPointer(map, NULL_OK));
EE_THREAD_NOT_REQUIRED;
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(pcCodeInfos, NULL_OK));
PRECONDITION(CheckPointer(codeInfos, NULL_OK));
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(pBufferLengthOffset, NULL_OK));
PRECONDITION(CheckPointer(pStringLengthOffset, NULL_OK));
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(pStringLengthOffset, NULL_OK));
PRECONDITION(CheckPointer(pBufferOffset, NULL_OK));
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(pBufferLengthOffset, NULL_OK));
PRECONDITION(CheckPointer(pStringLengthOffset, NULL_OK));
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(rFieldOffset, NULL_OK));
PRECONDITION(CheckPointer(pcFieldOffset));
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW; // throw is RIGHT out... the throw at minimum allocates the thrown object which we *must* not do
GC_NOTRIGGER; // the stack is not necessarily crawlable at this state !!!) we must not induce a GC
}
// this function was called.
CANNOT_RETAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(pcObjectRanges));
PRECONDITION(cObjectRanges <= 0 || ranges != NULL);
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(pinfo));
}
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(objectId != NULL);
PRECONDITION(CheckPointer(range));
// The rejit tables use a lock
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(pcReJitIds, NULL_OK));
PRECONDITION(CheckPointer(reJitIds, NULL_OK));
// We need to suspend the runtime, this takes a lot of locks!
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(moduleIds, NULL_OK));
PRECONDITION(CheckPointer(methodIds, NULL_OK));
// The rejit manager requires a lock to iterate through methods to revert
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(moduleIds, NULL_OK));
PRECONDITION(CheckPointer(methodIds, NULL_OK));
// the JIT data structures.
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(ppEnum, NULL_OK));
// Gathering RejitIDs also takes a lock.
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(ppEnum, NULL_OK));
// (See comment above GC_TRIGGERS.)
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(ppEnum, NULL_OK));
// Yay!
CANNOT_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Crst is used in ProfilingAPIDetach::RequestProfilerDetach
CAN_TAKE_LOCK;
-
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// ProfileArgIterator::ProfileArgIterator may take locks
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// ProfilingGetFunctionEnter3Info may take locks
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// ProfileArgIterator::ProfileArgIterator may take locks
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// ProfilingGetFunctionLeave3Info may take locks
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// ProfileArgIterator::ProfileArgIterator may take locks
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// ProfilingGetFunctionTailcall3Info may take locks
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Need to acquire the thread store lock
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
PRECONDITION(CheckPointer(ppEnum, NULL_OK));
// May take thread store lock and OS APIs may also take locks
CAN_TAKE_LOCK;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_NOT_MAINLINE;
CAN_TAKE_LOCK;
PRECONDITION(CheckPointer(ppEnum));
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
{
FunctionIDOrClientID functionIDOrClientID;
functionIDOrClientID.clientID = clientData;
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
g_profControlBlock.pProfInterface->GetEnter3WithInfoHook()(
functionIDOrClientID,
(COR_PRF_ELT_INFO)&eltInfo);
//
if (CORProfilerELT2FastPathEnterEnabled())
{
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
g_profControlBlock.pProfInterface->GetEnter2Hook()(
functionId,
clientData,
HRESULT hr = ProfilingGetFunctionEnter3Info(functionId, (COR_PRF_ELT_INFO)&eltInfo, &frameInfo, &ulArgInfoSize, pArgumentInfo);
_ASSERTE(hr == S_OK);
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
g_profControlBlock.pProfInterface->GetEnter2Hook()(functionId, clientData, frameInfo, pArgumentInfo);
goto LExit;
// Everett ELT
//
{
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
g_profControlBlock.pProfInterface->GetEnterHook()((FunctionID)clientData);
}
{
FunctionIDOrClientID functionIDOrClientID;
functionIDOrClientID.clientID = clientData;
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
g_profControlBlock.pProfInterface->GetLeave3WithInfoHook()(
functionIDOrClientID,
(COR_PRF_ELT_INFO)&eltInfo);
//
if (CORProfilerELT2FastPathLeaveEnabled())
{
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
g_profControlBlock.pProfInterface->GetLeave2Hook()(
functionId,
clientData,
HRESULT hr = ProfilingGetFunctionLeave3Info(functionId, (COR_PRF_ELT_INFO)&eltInfo, &frameInfo, &argumentRange);
_ASSERTE(hr == S_OK);
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
g_profControlBlock.pProfInterface->GetLeave2Hook()(functionId, clientData, frameInfo, &argumentRange);
goto LExit;
}
// Everett ELT
//
{
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
g_profControlBlock.pProfInterface->GetLeaveHook()((FunctionID)clientData);
}
{
FunctionIDOrClientID functionIDOrClientID;
functionIDOrClientID.clientID = clientData;
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
g_profControlBlock.pProfInterface->GetTailcall3WithInfoHook()(
functionIDOrClientID,
(COR_PRF_ELT_INFO)&eltInfo);
//
if (CORProfilerELT2FastPathTailcallEnabled())
{
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
g_profControlBlock.pProfInterface->GetTailcall2Hook()(
functionId,
clientData,
HRESULT hr = ProfilingGetFunctionTailcall3Info(functionId, (COR_PRF_ELT_INFO)&eltInfo, &frameInfo);
_ASSERTE(hr == S_OK);
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
g_profControlBlock.pProfInterface->GetTailcall2Hook()(functionId, clientData, frameInfo);
goto LExit;
}
//
// Everett ELT
//
- {
- REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
- g_profControlBlock.pProfInterface->GetTailcallHook()((FunctionID)clientData);
- }
+ g_profControlBlock.pProfInterface->GetTailcallHook()((FunctionID)clientData);
LExit:
MODE_ANY;
CANNOT_TAKE_LOCK;
EE_THREAD_NOT_REQUIRED;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
//
// BOOL QCALLTYPE FooNative::Bar(int flags, LPCWSTR wszString, QCall::StringHandleOnStack retString)
// {
-// // All QCalls should have QCALL_CONTRACT. It is alias for THROWS; GC_TRIGGERS; MODE_PREEMPTIVE; SO_TOLERANT.
+// // All QCalls should have QCALL_CONTRACT. It is alias for THROWS; GC_TRIGGERS; MODE_PREEMPTIVE.
// QCALL_CONTRACT;
//
// // Optionally, use QCALL_CHECK instead and the expanded form of the contract if you want to specify preconditions:
UNINSTALL_UNWIND_AND_CONTINUE_HANDLER \
UNINSTALL_MANAGED_EXCEPTION_DISPATCHER
-#define BEGIN_QCALL_SO_TOLERANT \
- INSTALL_MANAGED_EXCEPTION_DISPATCHER \
- INSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE
-
-#define END_QCALL_SO_TOLERANT \
- UNINSTALL_UNWIND_AND_CONTINUE_HANDLER_NO_PROBE \
- UNINSTALL_MANAGED_EXCEPTION_DISPATCHER
-
-
#define QCALL_CHECK \
THROWS; \
GC_TRIGGERS; \
MODE_PREEMPTIVE; \
- SO_TOLERANT; \
#define QCALL_CONTRACT CONTRACTL { QCALL_CHECK; } CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
{
GC_NOTRIGGER;
NOTHROW;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
{
GC_NOTRIGGER;
NOTHROW;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
{
GC_NOTRIGGER;
NOTHROW;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
{
GC_NOTRIGGER;
NOTHROW;
- SO_INTOLERANT;
SUPPORTS_DAC;
PRECONDITION(!m_availableTypesHashtable.IsNull());
}
{
GC_NOTRIGGER;
NOTHROW;
- SO_TOLERANT;
SUPPORTS_DAC;
PRECONDITION(TypeFromToken(mdType) == mdtTypeDef || TypeFromToken(mdType) == mdtTypeRef || TypeFromToken(mdType) == mdtExportedType);
}
{
GC_NOTRIGGER;
NOTHROW;
- SO_TOLERANT;
SUPPORTS_DAC;
PRECONDITION(TypeFromToken(mdType) == mdtTypeDef || TypeFromToken(mdType) == mdtTypeRef || TypeFromToken(mdType) == mdtExportedType);
}
{
GC_NOTRIGGER;
NOTHROW;
- SO_TOLERANT;
SUPPORTS_DAC;
PRECONDITION(TypeFromToken(mdToken1) == mdtTypeDef || TypeFromToken(mdToken1) == mdtTypeRef || TypeFromToken(mdToken1) == mdtExportedType);
PRECONDITION(TypeFromToken(mdToken2) == mdtTypeDef || TypeFromToken(mdToken2) == mdtExportedType);
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
}
}
DoneCreateInstance:
+ ;
HELPER_METHOD_FRAME_END();
return OBJECTREFToObject(rv);
}
Thread * pThread = GET_THREAD();
- // Make sure we have enough room on the stack for this. Note that we will need the stack amount twice - once to build the stack
- // and second time to actually make the call.
- INTERIOR_STACK_PROBE_FOR(pThread, 1 + static_cast<UINT>((2 * nAllocaSize) / GetOsPageSize()) + static_cast<UINT>(HOLDER_CODE_NORMAL_STACK_LIMIT));
-
LPBYTE pAlloc = (LPBYTE)_alloca(nAllocaSize);
LPBYTE pTransitionBlock = pAlloc + TransitionBlock::GetNegSpaceSize();
if (pProtectValueClassFrame != NULL)
pProtectValueClassFrame->Pop(pThread);
- END_INTERIOR_STACK_PROBE;
}
Done:
+ ;
HELPER_METHOD_FRAME_END();
return OBJECTREFToObject(gc.retVal);
}
PAL_ENDTRY;
-#ifdef FEATURE_STACK_PROBE
- if (param.fStackOverflow)
- COMPlusThrowSO();
-#else
//This will not be set as clr to managed transition code will terminate the
//process if there is an SO before SODetectionFilter() is called.
_ASSERTE(!param.fStackOverflow);
-#endif
}
//
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
POSTCONDITION(CheckPointer(RETVAL));
}
CONTRACT_END;
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACT_END;
#include "codeman.h"
#include "corhlpr.h"
#include "jitinterface.h"
-#include "stackprobe.h"
#include "eeconfig.h"
#include "eehash.h"
#include "interoputil.h"
if (refType == NULL)
FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrow(kStackOverflowException));
-
result = refType->GetType().GetModule();
- END_SO_INTOLERANT_CODE;
-
FC_RETURN_MODULE_OBJECT(result, refType);
}
FCIMPLEND
MethodTable* pMT = NULL;
mdTypeDef tkTypeDef = mdTokenNil;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrow(kStackOverflowException));
if (typeHandle.IsTypeDesc()) {
if (typeHandle.IsGenericVariable()) {
}
Exit:
- END_SO_INTOLERANT_CODE;
-
if (fThrowException)
{
FCThrowRes(reKind, argName);
if (!pMethod)
FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
- INT32 retVal = 0;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrow(kStackOverflowException));
- retVal = (INT32)pMethod->GetAttrs();
- END_SO_INTOLERANT_CODE;
- return retVal;
+ return (INT32)pMethod->GetAttrs();
}
FCIMPLEND
if (IsNilToken(pMethod->GetMemberDef()))
return attributes;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrow(kStackOverflowException));
- {
- attributes = (INT32)pMethod->GetImplAttrs();
- }
- END_SO_INTOLERANT_CODE;
-
- return attributes;
+ return (INT32)pMethod->GetImplAttrs();
}
FCIMPLEND
}
CONTRACTL_END;
- BOOL ret = FALSE;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrow(kStackOverflowException));
- ret = (BOOL)pMethod->IsClassConstructorOrCtor();
- END_SO_INTOLERANT_CODE;
+ BOOL ret = (BOOL)pMethod->IsClassConstructorOrCtor();
FC_RETURN_BOOL(ret);
}
FCIMPLEND
if (!pField)
FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
- INT32 ret = 0;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), FCThrow(kStackOverflowException));
- ret = (INT32)pField->GetAttributes();
- END_SO_INTOLERANT_CODE;
- return ret;
+ return (INT32)pField->GetAttributes();
}
FCIMPLEND
THROWS;
MODE_COOPERATIVE;
INSTANCE_CHECK;
- SO_TOLERANT;
} CONTRACTL_END;
_ASSERTE(IsFullyInitialized());
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
} CONTRACTL_END;
ctx->nbit_total[0] = ctx->nbit_total[1] = 0;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
} CONTRACTL_END;
const BYTE *fresh_data = msg;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
} CONTRACTL_END;
DWORDC nbit0 = ctx->nbit_total[0];
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
} CONTRACTL_END;
m_fFinalized = FALSE;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
} CONTRACTL_END;
if (m_fFinalized)
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
} CONTRACTL_END;
if (m_fFinalized)
}
else
{
- // This function is recursive, so it must have an interior probe
- INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(10, NO_FORBIDGC_LOADER_USE_ThrowSO(););
-
#ifdef _DEBUG_IMPL
// This verifies that we won't try and load a type
// if FORBIDGC_LOADER_USE_ENABLED is true.
if (!ClrSafeInt<DWORD>::multiply(ntypars, sizeof(TypeHandle), dwAllocaSize))
ThrowHR(COR_E_OVERFLOW);
- if ((dwAllocaSize/GetOsPageSize()+1) >= 2)
- {
- DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD((10+dwAllocaSize/GetOsPageSize()+1), NO_FORBIDGC_LOADER_USE_ThrowSO(););
- }
TypeHandle *thisinst = (TypeHandle*) _alloca(dwAllocaSize);
// Finally we gather up the type arguments themselves, loading at the level specified for generic arguments
{
ThrowHR(COR_E_OVERFLOW);
}
-
- if ((cAllocaSize/GetOsPageSize()+1) >= 2)
- {
- DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD((10+cAllocaSize/GetOsPageSize()+1), NO_FORBIDGC_LOADER_USE_ThrowSO(););
- }
TypeHandle *retAndArgTypes = (TypeHandle*) _alloca(cAllocaSize);
bool fReturnTypeOrParameterNotLoaded = false;
THROW_BAD_FORMAT(BFA_BAD_COMPLUS_SIG, pOrigModule);
}
- END_INTERIOR_STACK_PROBE;
}
RETURN thRet;
if (FORBIDGC_LOADER_USE_ENABLED()) GC_NOTRIGGER; else GC_TRIGGERS;
if (FORBIDGC_LOADER_USE_ENABLED()) FORBID_FAULT; else { INJECT_FAULT(COMPlusThrowOM()); }
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END
if (type == ELEMENT_TYPE_VALUETYPE)
{
- BEGIN_SO_INTOLERANT_CODE(GetThread());
{
// Everett C++ compiler can generate a TypeRef with RS=0
// without respective TypeDef for unmanaged valuetypes,
if (pthValueType != NULL)
*pthValueType = th;
}
- END_SO_INTOLERANT_CODE;
}
return(type);
GC_NOTRIGGER;
FORBID_FAULT;
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END
PRECONDITION(PeekElemTypeClosed(NULL, pTypeContext) == ELEMENT_TYPE_VALUETYPE);
FORBID_FAULT;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END
NOTHROW;
MODE_ANY;
GC_NOTRIGGER;
- SO_INTOLERANT;
}
CONTRACTL_END;
THROWS;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
} CONTRACTL_END;
- BEGIN_SO_INTOLERANT_CODE(GetThread());
if ( (--m_nRefCount) == 0 )
delete this;
- END_SO_INTOLERANT_CODE;
-
+
return m_nRefCount;
}
HRESULT STDMETHODCALLTYPE QueryInterface( REFIID rid, void **ppUnk ) {
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
if ( ppUnk == NULL ) {
return E_INVALIDARG;
}
BYTE data[]) // really a const struct _IMAGE_DEBUG_DIRECTORY *
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
return S_OK;
}
DISABLED(THROWS); // need to rewrite spin locks to no-throw.
GC_NOTRIGGER;
CAN_TAKE_LOCK;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CAN_TAKE_LOCK;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
CAN_TAKE_LOCK;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
CONTRACTL {
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
} CONTRACTL_END;
#ifdef _DEBUG
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
INJECT_FAULT(ThrowOutOfMemory());
PRECONDITION(m_CheckpointDepth > 0);
POSTCONDITION(CheckPointer(RETVAL));
THROWS;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
INJECT_FAULT(ThrowOutOfMemory());
PRECONDITION(m_CheckpointDepth > 0);
POSTCONDITION(CheckPointer(RETVAL));
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_FAULT;
- STATIC_CONTRACT_SO_TOLERANT;
#ifdef _WIN64
// size_t's too big on 64-bit platforms so we check for overflow
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_FAULT;
- STATIC_CONTRACT_SO_TOLERANT;
#ifdef _WIN64
// size_t's too big on 64-bit platforms so we check for overflow
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_FAULT;
- STATIC_CONTRACT_SO_TOLERANT;
#ifdef _WIN64
// size_t's too big on 64-bit platforms so we check for overflow
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_FAULT;
- STATIC_CONTRACT_SO_TOLERANT;
#ifdef _WIN64
// size_t's too big on 64-bit platforms so we check for overflow
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
INJECT_FAULT(CONTRACT_RETURN NULL;);
PRECONDITION(m_CheckpointDepth > 0);
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
// Is the request too large for the current block?
if (n > m_BytesLeft)
{
- bool allocatedNewBlock = false;
-
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), RETURN NULL);
- allocatedNewBlock = AllocNewBlockForBytes(n);
- END_SO_INTOLERANT_CODE;
-
- if (!allocatedNewBlock)
+ if (!AllocNewBlockForBytes(n))
{
RETURN NULL;
}
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-//
-
-//
-//-----------------------------------------------------------------------------
-// StackProbe.cpp
-//-----------------------------------------------------------------------------
-
-
-#include "common.h"
-#include "stackprobe.h"
-
-
-#ifdef FEATURE_STACK_PROBE
-
-
-// SOTolerantBoundaryFilter is called when an exception in SO-tolerant code arrives
-// at the boundary back into SO-intolerant code.
-//
-// If we are running in an environment where we must be hardened to SO, then we must
-// catch the exception if there is not enough space to run our backout code (the stuff in the
-// EX_CATCH clauses). We also cannot let a hard SO propogate into SO-intolerant code, because
-// we rip the process if that happens (we have no way to tell that the SO is ok.)
-int SOTolerantBoundaryFilter(EXCEPTION_POINTERS *pExceptionInfo, DWORD * pdwSOTolerantFlags)
-{
- Thread *pThread = GetThread();
- _ASSERTE(pThread);
- _ASSERTE(pdwSOTolerantFlags != NULL);
- _ASSERTE(!((*pdwSOTolerantFlags) & BSTC_TRIGGERING_UNWIND_FOR_SO));
-
- SaveCurrentExceptionInfo(pExceptionInfo->ExceptionRecord, pExceptionInfo->ContextRecord);
-
- NTSTATUS exceptionCode = pExceptionInfo->ExceptionRecord->ExceptionCode;
-
- // We must always handle a hard SO
- if (IsSOExceptionCode(exceptionCode))
- {
- if (exceptionCode == EXCEPTION_SOFTSO)
- {
- *pdwSOTolerantFlags |= BSTC_IS_SOFT_SO;
- }
- *pdwSOTolerantFlags |= BSTC_IS_SO;
-
- if (!CLRHosted() || pThread == NULL || GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) != eRudeUnloadAppDomain)
- {
- // For security reason, it is not safe to continue execution if stack overflow happens
- // unless a host tells us to do something different.
- EEPolicy::HandleFatalStackOverflow(pExceptionInfo);
- }
-
- /* If there is a SO_INTOLERANT region above this */
- /* we should have processed it already in SOIntolerantTransitionHandler */
- EEPolicy::HandleStackOverflow(SOD_SOTolerantTransitor, FRAME_TOP);
-
- *pdwSOTolerantFlags |= BSTC_TRIGGERING_UNWIND_FOR_SO;
-
- return EXCEPTION_EXECUTE_HANDLER;
- }
-
- // Make sure we have enough stack to run our backout code. If not,
- // catch the exception.
- if (! pThread->IsStackSpaceAvailable(ADJUST_PROBE(BACKOUT_CODE_STACK_LIMIT)))
- {
- *pdwSOTolerantFlags |= BSTC_TRIGGERING_UNWIND_FOR_SO;
- return EXCEPTION_EXECUTE_HANDLER;
- }
-
-
- return EXCEPTION_CONTINUE_SEARCH;
-}
-
-void SOTolerantCode_RecoverStack(DWORD dwFlags)
-{
- CONTRACTL
- {
- THROWS;
- GC_TRIGGERS;
- MODE_ANY;
- SO_TOLERANT;
- }
- CONTRACTL_END;
-
- Thread * pThread = GetThread();
- if (!(dwFlags & BSTC_IS_SOFT_SO))
- {
- pThread->RestoreGuardPage();
- }
- if (dwFlags & BSTC_IS_SO)
- {
- if (!pThread->PreemptiveGCDisabled())
- {
- pThread->DisablePreemptiveGC();
- }
- }
- COMPlusThrowSO();
-}
-
-void SOTolerantCode_ExceptBody(DWORD * pdwFlags, Frame * pSafeForSOFrame)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- SO_TOLERANT;
- }
- CONTRACTL_END;
-
- // do nothing here. Get our stack back post-catch and then throw a new exception
- *pdwFlags |= BSTC_RECOVER_STACK;
- if (*pdwFlags & BSTC_IS_SO)
- {
- // If this assertion fires, then it means that we have not unwound the frame chain
- Thread * pThread = GetThread();
- _ASSERTE(pSafeForSOFrame == pThread->GetFrame());
- pThread->ClearExceptionStateAfterSO(pSafeForSOFrame);
- }
-}
-
-//
-// ReportStackOverflow is called when our probe infrastructure detects that there
-// is insufficient stack to perform the operation.
-//
-
-void ReportStackOverflow()
-{
- WRAPPER_NO_CONTRACT;
-
- _ASSERTE(IsStackProbingEnabled());
-
- Thread *pThread = GetThread();
-
- if (pThread != NULL)
- {
- // We don't want an SO to happen while we are trying to throw this one. So check if there
- // is enough space left to handle an exception (this translates to check that we have stack
- // space left equivalent to the soft guard region). If not, then remove the guard page by
- // forcing a hard SO. This effectively turns the SO into a boundary SO.
-
- // We should only ever get in this situation on a probe from managed code. From within the EE,
- // we will never let our probe point get this close. Either way, we'd rip the process if a hard
- // SO occurred.
-
- UINT_PTR stackGuarantee = pThread->GetStackGuarantee();
-
- // We expect the stackGuarantee to be a multiple of the page size for
- // the call to IsStackSpaceAvailable.
- _ASSERTE(stackGuarantee%GetOsPageSize() == 0);
- if (pThread->IsStackSpaceAvailable(static_cast<float>(stackGuarantee)/GetOsPageSize()))
- {
- COMPlusThrowSO();
- }
-
- // If there isn't much stack left to attempt to report a soft stack overflow, let's trigger a hard
- // SO, so we clear the guard page and give us at least another page of stack to work with.
-
- if (!pThread->IsStackSpaceAvailable(ADJUST_PROBE(1)))
- {
- DontCallDirectlyForceStackOverflow();
- }
- }
-
- RaiseException(EXCEPTION_SOFTSO, 0, 0, NULL);
-}
-
-void CheckForSOInSOIntolerantCode()
-{
- Thread *pThread = GetThreadNULLOk();
- if (pThread == NULL)
- {
- return;
- }
- // We use the location of frames to decide SO mode. But during exception,
- // we may not unwind some frames, for example: TPMethodFrame, therefore
- // it is not safe to apply this check.
- //_ASSERTE(!pThread->IsSOTolerant(FRAME_TOP));
- if (! pThread->IsSPBeyondLimit())
- {
- return;
- }
- EEPolicy::HandleStackOverflow(SOD_SOIntolerantTransitor, FRAME_TOP);
- _ASSERTE (!"Can not reach here");
-}
-
-//---------------------------------------------------------------------------------------
-//
-// SetSOIntolerantTransitionMarker: Use the current frame as our marker for intolerant transition.
-//
-// Arguments:
-// None.
-//
-// Return Value:
-// None.
-//
-// Note:
-// SO mode is determined by what is on stack. If we see our intolerant transtion first, we are in SO.
-// Because compiler lays object in a function at random stack location, the address of our intolerant
-// transition object SOIntolerantTransitionHandler may be before the HelperMethodFrame. Therefore, we
-// can not use the address of the handlers. Instead we use the current top frame.
-//
-void SetSOIntolerantTransitionMarker()
-{
- LIMITED_METHOD_CONTRACT;
-
- Thread *pThread = GetThreadNULLOk();
- if (pThread == NULL)
- {
- return;
- }
- Frame *pFrame = pThread->GetFrame();
-
- //
- // Check to see if the Frame chain is corrupt
- // This can happen when unmanaged code calls back to managed code
- //
- if (pFrame != FRAME_TOP)
- {
- // SafeGetGCCookiePtr examines the value of the vtable pointer
- // and makes sure that it is a legal Frame subtype.
- // It returns NULL when we have an illegal (i.e. corrupt) vtable value.
- //
- if (!Frame::HasValidVTablePtr(pFrame))
- DoJITFailFast();
- }
-
- // We use pFrame - 1 as our marker so that IntolerantTransitionHandler is seen before
- // a transition frame.
- ClrFlsSetValue(TlsIdx_SOIntolerantTransitionHandler, (void*)(((size_t)pFrame)-1));
-
- _ASSERTE(!pThread->IsSOTolerant(FRAME_TOP));
-}
-
-BOOL RetailStackProbeNoThrowNoThread(unsigned int n)
-{
- STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
- STATIC_CONTRACT_MODE_ANY;
-
- BEGIN_GETTHREAD_ALLOWED;
- Thread *pThread = GetThread();
-
- if (!pThread)
- {
- // we only probe on managed threads
- return TRUE;
- }
- return RetailStackProbeNoThrow(n, pThread);
- END_GETTHREAD_ALLOWED;
-}
-
-// This functions are used by the stack probe infrastucture that is outside the VM
-// tree. It needs to call into the VM code in order to probe properly.
-void InitStackProbesRetail()
-{
- LIMITED_METHOD_CONTRACT;
- g_fpCheckForSOInSOIntolerantCode = CheckForSOInSOIntolerantCode;
- g_fpSetSOIntolerantTransitionMarker = SetSOIntolerantTransitionMarker;
- g_fpDoProbe = RetailStackProbeNoThrowNoThread;
- g_fpHandleSoftStackOverflow = EEPolicy::HandleSoftStackOverflow;
-
- g_StackProbingEnabled = g_pConfig->ProbeForStackOverflow() != 0;
-}
-
-// Shared by both the nothrow and throwing version. FORCEINLINE into both to avoid the call overhead.
-FORCEINLINE BOOL RetailStackProbeHelper(unsigned int n, Thread *pThread)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- SO_TOLERANT;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- UINT_PTR probeLimit;
-
- // @TODO - Need to devise a probe that doesn't require the thread object
- if (pThread == NULL)
- {
- UINT_PTR stackLimit = (UINT_PTR)Thread::GetStackLowerBound();
- probeLimit = Thread::GetLastNormalStackAddress(stackLimit);
- }
- else
- {
- probeLimit = pThread->GetProbeLimit();
- }
- UINT_PTR probeAddress = (UINT_PTR)(&pThread) - (n * GetOsPageSize());
-
- // If the address we want to probe to is beyond the precalculated limit we fail
- // Note that we don't check for stack probing being disabled. This is encoded in
- // the value returned from GetProbeLimit, which will be 0 if probing is disabled.
- if (probeAddress < probeLimit)
- {
-#if 0
- // @todo : remove this when iexplore, W3WP.EXE and friends allocate 512K instead
- // of 256K for their stack.
- if (((char *)(pThread->GetCachedStackBase()) - (char *)(pThread->GetCachedStackLimit())) < 0x41000)
- {
- return true;
- }
-#endif
- return FALSE;
- }
-
- return TRUE;
-}
-
-BOOL RetailStackProbeNoThrowWorker(unsigned int n, Thread *pThread)
-{
- WRAPPER_NO_CONTRACT;
- return RetailStackProbeHelper(n, pThread);
-}
-
-void RetailStackProbeWorker(unsigned int n, Thread *pThread)
-{
- STATIC_CONTRACT_THROWS;
- STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
-
- if (RetailStackProbeHelper(n, pThread))
- {
- return;
- }
- ReportStackOverflow();
-}
-
-void DefaultRetailStackProbeWorker(Thread *pThread)
-{
- STATIC_CONTRACT_THROWS;
- STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
-
- if (RetailStackProbeHelper(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), pThread))
- {
- return;
- }
- ReportStackOverflow();
-}
-
-#endif // FEATURE_STACK_PROBE
-
-#ifdef STACK_GUARDS_DEBUG
-
-DWORD g_InteriorProbeAmount = DEFAULT_INTERIOR_PROBE_AMOUNT;
-
-DWORD g_CurrentStackGuardTlsIdx = (DWORD) -1;
-DWORD g_UniqueId = 0;
-
-// If this has a non-zero value, we'll mark only those pages whose probe line number matches. This allows us
-// to turn protection on only for a specific probe so that can find multiple w/o having to rebuild. Otherwise
-// can never get past that first AV in the debugger.
-unsigned int g_ProtectStackPagesInDebuggerForProbeAtLine = 0;
-
-// These two are used to the amount probed for at a particular line number
-unsigned int g_UpdateProbeAtLine = 0;
-SString* g_pUpdateProbeAtLineInFile = NULL;
-unsigned int g_UpdateProbeAtLineAmount = 0;
-
-// If this is TRUE, we'll break in the debugger if we try to probe during the handling of a
-// probe-induced stack overflow.
-BOOL g_BreakOnProbeDuringSO = FALSE;
-
-// If this is TRUE, probe cookie validation via assertion is enabled
-// disable assertions on debug build. The stack consumption is different enough
-// that we'll always be getting spurious failures.
-BOOL g_probeAssertOnOverrun = FALSE;
-
-// SO logging pollutes the EH logging space and vice-versa. The SOLogger class
-// allows us to turn SO logging on separately and only produce SO logging, or
-// to allow both.
-#undef LOG
-#define LOG(x) s_SOLogger.LogSpew x
-
-class SOLogger {
-
- enum SOLogStyle {
- SO_LOGGING_NONE, // No SO logging
- SO_LOGGING_SEPARATE_LOG, // Log SO to separate file
- SO_LOGGING_STANDARD_LOG // Log SO to standard log
- };
-
- SOLogStyle m_SOLogStyle;
- FILE *m_SOLoggerFile;
-
-public:
- SOLogger();
- ~SOLogger();
-
- void Initialize();
-
- void LogSpew(DWORD facility, DWORD level, const char *fmt, ... );
-};
-
-static SOLogger s_SOLogger;
-
-SOLogger::SOLogger()
- : m_SOLogStyle(SO_LOGGING_NONE), m_SOLoggerFile(NULL)
-{
-}
-
-void SOLogger::Initialize()
-{
- WRAPPER_NO_CONTRACT;
-
- DWORD SOLogger = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_SOLogger, SO_LOGGING_NONE);
- if (SOLogger == SO_LOGGING_SEPARATE_LOG)
- {
- m_SOLogStyle = SO_LOGGING_SEPARATE_LOG;
- int ec = fopen_s(&m_SOLoggerFile, "SOLogSpewFile.log", "w");
- _ASSERTE(SUCCEEDED(ec));
- }
- else if (SOLogger == SO_LOGGING_STANDARD_LOG)
- {
- m_SOLogStyle = SO_LOGGING_STANDARD_LOG;
- }
- else if (SOLogger == SO_LOGGING_NONE)
- {
- m_SOLogStyle = SO_LOGGING_NONE;
- }
- else
- {
- _ASSERTE(!"Invalid SOLogger value");
- }
-}
-
-SOLogger::~SOLogger()
-{
- LIMITED_METHOD_CONTRACT;
- if (m_SOLoggerFile != NULL)
- {
- fclose(m_SOLoggerFile);
- }
-}
-
-void SOLogger::LogSpew(DWORD facility, DWORD level, const char *fmt, ... )
-{
- STATIC_CONTRACT_WRAPPER;
-
- if (m_SOLogStyle == SO_LOGGING_NONE)
- {
- return;
- }
-
- va_list args;
- va_start(args, fmt);
- if (m_SOLogStyle == SO_LOGGING_SEPARATE_LOG)
- {
- vfprintf(m_SOLoggerFile, fmt, args);
- }
- else if (LoggingEnabled())
- {
- LogSpewValist (facility, level, fmt, args);
- }
- va_end(args);
-}
-
-#define MORE_INFO_STRING \
- "\nPlease open a bug against the feature owner.\n" \
- "\nFor details about this feature, see, in a CLR enlistment, src\\ndp\\clr\\doc\\OtherDevDocs\\untriaged\\clrdev_web\\SO Guide for CLR Developers.doc\n"
-
-
-// The following are used to support the SO-injection framework
-HMODULE BaseStackGuard::m_hProbeCallBack = 0;
-BaseStackGuard::ProbeCallbackType BaseStackGuard::m_pfnProbeCallback = NULL;
-
-//
-// ShouldValidateSOToleranceOnThisThread determines if we should check for SO_Tolerance on this
-// thread.
-//
-// If it is a thread we care about, then we will assert if it calls an SO-intolerant function
-// outside of a probe
-//
-BOOL ShouldValidateSOToleranceOnThisThread()
-{
- LIMITED_METHOD_CONTRACT;
-
- if (g_StackProbingEnabled == false || g_fEEShutDown == TRUE)
- {
- return FALSE;
- }
-
- BEGIN_GETTHREAD_ALLOWED;
- Thread *pThread = GetThread();
- if (pThread == NULL || ShouldProbeOnThisThread() == FALSE)
- {
- return FALSE;
- }
-
- // We only want to probe on managed threads that have IL on the stack behind them. But
- // there's not an easy way to check for that, so we use whether or not we own the thread and
- // whether or not a stack guard is in place.
- //
- // If we don't own the thread, then just make sure that we didn't get here by leaving the EE and coming
- // back in. (In which case we would have installed a probe and the GetCurrentStackGuard is non-NULL).
- // We are only probing on managed threads, but we want to avoid asserting for cases where an unmanaged
- // app starts the EE (thereby creating a managed thread), and runs completely unmanaged, but uses some of the CLR's
- // infrastructure, such as Crsts.
- if (pThread->DoWeOwn() == FALSE && pThread->GetCurrentStackGuard() == NULL)
- {
- return FALSE;
- }
-
- if (! IsHandleNullUnchecked(pThread->GetThrowableAsHandle()))
- {
- return FALSE;
- }
-
- return TRUE;
- END_GETTHREAD_ALLOWED;
-}
-
-
-BOOL BaseStackGuard_RequiresNStackPages(BaseStackGuardGeneric *pGuard, unsigned int n, BOOL fThrowOnSO)
-{
- return ((BaseStackGuard*)pGuard)->RequiresNStackPages(n, fThrowOnSO);
-}
-
-void BaseStackGuard_CheckStack(BaseStackGuardGeneric *pGuard)
-{
- WRAPPER_NO_CONTRACT;
- ((BaseStackGuard*)pGuard)->CheckStack();
-}
-
-BOOL CheckNStackPagesAvailable(unsigned int n)
-{
- STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
- STATIC_CONTRACT_MODE_ANY;
-
- BEGIN_GETTHREAD_ALLOWED;
- Thread *pThread = GetThread();
-
- // If we don't have a managed thread object, we assume that we have the requested
- // number of pages available.
- if (!pThread)
- return TRUE;
-
- _ASSERTE(FitsIn<float>(n));
- return pThread->IsStackSpaceAvailable(static_cast<float>(n));
- END_GETTHREAD_ALLOWED;
-}
-
-void InitStackProbes()
-{
- WRAPPER_NO_CONTRACT;
-
- g_CurrentStackGuardTlsIdx = TlsIdx_StackProbe;
-
- s_SOLogger.Initialize();
-
- // If we're in a debugger, and if the config word below is set, then we'll go ahead and protect stack pages
- // when we're run under a debugger.
- //if (IsDebuggerPresent())
- //{
- if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SOEnableStackProtectionInDebugger) == 1)
- {
- g_ProtectStackPagesInDebugger = TRUE;
- }
- g_ProtectStackPagesInDebuggerForProbeAtLine =
- CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SOEnableStackProtectionInDebuggerForProbeAtLine);
-
- g_UpdateProbeAtLine = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SOUpdateProbeAtLine);
- g_UpdateProbeAtLineAmount = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SOUpdateProbeAtLineAmount);
- LPWSTR wszUpdateProbeAtLineInFile = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SOUpdateProbeAtLineInFile);
- g_pUpdateProbeAtLineInFile = new SString(wszUpdateProbeAtLineInFile);
- g_pUpdateProbeAtLineInFile->Normalize();
-
- if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_SOBreakOnProbeDuringSO) == 1)
- {
- g_BreakOnProbeDuringSO = TRUE;
- }
- //}
-
- // Never let g_EntryPointProbeAmount get set to an invalid value of <= 0 to avoid races in places that might be
- // about to probe as we set it.
- BOOL entryPointProbeAmount = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_SOEntryPointProbe, g_EntryPointProbeAmount);
- if (entryPointProbeAmount > 0)
- {
- g_EntryPointProbeAmount = entryPointProbeAmount;
- }
-
- BOOL interiorProbeAmount = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_SOInteriorProbe, g_InteriorProbeAmount);
- if (interiorProbeAmount > 0)
- {
- g_InteriorProbeAmount = interiorProbeAmount;
- }
-
- BOOL enableBackoutStackValidation = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_SOEnableBackoutStackValidation, FALSE);
-
- g_EnableDefaultRWValidation = 1;
-
- BOOL enableDefaultRWValidation = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_SOEnableDefaultRWValidation, g_EnableDefaultRWValidation);
-
-
-
- // put this first because it will cause probe validation via contract otherwise
- g_probeAssertOnOverrun = REGUTIL::GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_SOProbeAssertOnOverrun, g_probeAssertOnOverrun);
-
- BaseStackGuard::InitProbeReportingToFaultInjectionFramework();
-
- g_EnableBackoutStackValidation = enableBackoutStackValidation;
-
- g_EnableDefaultRWValidation = enableDefaultRWValidation;
-
- g_fpShouldValidateSOToleranceOnThisThread = ShouldValidateSOToleranceOnThisThread;
-
- g_fpRestoreCurrentStackGuard = BaseStackGuard::RestoreCurrentGuard;
- g_fpHandleStackOverflowAfterCatch = EEPolicy::HandleStackOverflowAfterCatch;
-
-
- g_fp_BaseStackGuard_RequiresNStackPages = BaseStackGuard_RequiresNStackPages;
- g_fp_BaseStackGuard_CheckStack = BaseStackGuard_CheckStack;
-
- g_fpCheckNStackPagesAvailable = CheckNStackPagesAvailable;
-
- InitStackProbesRetail();
-
-}
-
-void CloseSOTolerantViolationFile();
-
-//
-// This function is called when the EE is shutting down and we want to stop
-// doing stack probing. Don't clear the g_CurrentStackGuardTlsIdx field though,
-// because there may still be other threads in the process of probing and
-// they'll AV if we pull the g_CurrentStackGuardTlsIdx out from under them.
-void TerminateStackProbes()
-{
- WRAPPER_NO_CONTRACT;
-
-
- CloseSOTolerantViolationFile();
-
- // Don't actually shut down the SO infrastructure. We've got multiple threads
- // racing around in the runtime, and they can be left in an inconsisent state
- // if we flip this off.
-
- return;
-#if 0
- // Yank the stack guard on this thread
- StackGuardDisabler __guardDisable;
- __guardDisable.NeverRestoreGuard();
-
- // Clear out the current guard in case we terminate and its cleanup code
- // does not get to run.
- BaseStackGuard::SetCurrentGuard(NULL);
-
- g_StackProbingEnabled = false;
- g_EnableBackoutStackValidation = FALSE;
- g_fpShouldValidateSOToleranceOnThisThread = NULL;
-#endif
-}
-
-//-----------------------------------------------------------------------------
-// Error handling when we go past a stack guard.
-// We have different messages to more aggressively diagnose the problem
-//-----------------------------------------------------------------------------
-
-// Called by Check_Stack when we overwrite the cookie
-void BaseStackGuard::HandleOverwrittenThisStackGuard(__in_z char *stackID)
-{
- LIMITED_METHOD_CONTRACT;
-
- if (! g_probeAssertOnOverrun)
- {
- return;
- }
-
- ClrDebugState *pState = GetClrDebugState();
- _ASSERTE(pState);
- if (pState->IsSONotMainline())
- {
- return;
- }
-
- // This prevents infinite loops in this function if we call something that probes.
- // Must do it after the check for pState->IsSONotMainline() to give the first invocation
- // a chance to run.
- SO_NOT_MAINLINE_FUNCTION;
-
- // This fires at a closing Check_Stack.
- // The cookie set by Requires_?K_stack was overwritten. We detected that at
- // the closing call to check_stack.
-
- // To fix, increase the guard size at the specified ip.
- //
- // A debugging trick: If you can set a breakpoint at the opening Requires_?K_Stack
- // macro for this instance, you can step in and see where the cookie is actually
- // placed. Then, place a breakpoint that triggers when (DWORD*) 0xYYYYYYYY changes.
- // Continue execution. The breakpoint will fire exactly when the cookie is over-written.
- char buff[1024];
- buff[0] = '\0';
-
- sprintf_s(buff, COUNTOF(buff),
- "STACK GUARD VIOLATION\n"
- "The%s stack guard installed in %s at \"%s\" @ %d requested %d pages of stack.\n"
- "\nIf this is easily reproduced, please rerun the test under the debugger with the\n"
- "DWORD environment variable COMPlus_SOEnableStackProtectionInDebugger\n"
- "set to 1. This will cause an AV at the point of overrun.\n"
- "Attach the stack trace at that point to the bug in addition to this assert."
- MORE_INFO_STRING, stackID ? stackID : "",
- m_szFunction, m_szFile, m_lineNum, m_numPages);
-
- LOG((LF_EH, LL_INFO100000, "%s", buff));
-
- DbgAssertDialog((char *)m_szFile, m_lineNum, buff);
-
-}
-
-void BaseStackGuard::HandleOverwrittenPreviousStackGuard(int probeShortFall, __in_z char *stackID)
-{
- LIMITED_METHOD_CONTRACT;
-
- if (! g_probeAssertOnOverrun)
- {
- return;
- }
-
- ClrDebugState *pState = GetClrDebugState();
- _ASSERTE(pState);
- if (pState->IsSONotMainline())
- {
- return;
- }
-
- // This prevents infinite loops in this function if we call something that probes.
- // Must do it after the check for pState->IsSONotMainline() to give the first invocation
- // a chance to run.
- SO_NOT_MAINLINE_FUNCTION;
-
- // This fires at an opening Requires_?K_Stack
- // We detected that we were already passed our parent's stack guard. So this guard is
- // ok, but our parent's guard is too small. Note that if this test was removed,
- // the failure would be detected by our parent's closing Check_Stack. But if we detect it
- // here, we have more information.
- //
- // We can see how many bytes short our parent is and adjust it properly.
- char buff[2048];
- buff[0] = '\0';
-
- // We don't come in here unless we have a previous guard.
- _ASSERTE(m_pPrevGuard != NULL);
-
- sprintf_s(buff, COUNTOF(buff),
- "STACK GUARD VIOLATION\n"
- " The%s stack guard being installed in %s at \"%s\" @ %d is already in violation of the previous stack guard.\n"
- " The previous guard was installed in %s at \"%s\" @ %d and requested %d pages of stack.\n"
- "The stack requested by the previous guard is at least %d pages (%d bytes) short.\n"
- MORE_INFO_STRING, stackID ? stackID : "", m_szFunction, m_szFile, m_lineNum,
- m_pPrevGuard->m_szFunction, m_pPrevGuard->m_szFile, m_pPrevGuard->m_lineNum, m_pPrevGuard->m_numPages,
- probeShortFall/GetOsPageSize() + (probeShortFall%GetOsPageSize() ? 1 : 0), probeShortFall);
-
- LOG((LF_EH, LL_INFO100000, "%s", buff));
-
- DbgAssertDialog((char *)m_szFile, m_lineNum, buff);
-}
-
-void BaseStackGuard::HandleOverwrittenCurrentStackGuard(void *pGuard, int shortFall, __in_z char *stackID)
-{
- ( (BaseStackGuard *)pGuard)->HandleOverwrittenCurrentStackGuard(shortFall, stackID);
-}
-
-void BaseStackGuard::HandleOverwrittenCurrentStackGuard(int probeShortFall, __in_z char *stackID)
-{
- DEBUG_ONLY_FUNCTION;
-
- if (! g_probeAssertOnOverrun)
- {
- return;
- }
-
- // This fires during probe invariant validation.
- // We detected that our current stack was already past the current probe depth. Note that if this
- // test were removed, the failure should be detected the current guard's closing Check_Stack.
- // But if we detect it here, we have more information.
- //
- // We can see how many bytes short the guard is and adjust it properly.
- char buff[2048];
- buff[0] = '\0';
-
- sprintf_s(buff, COUNTOF(buff),
- "STACK GUARD VIOLATION\n\n"
- "The%s stack guard installed in %s at \"%s\" @ %d has been violated\n\n"
- "The guard requested %d pages of stack and is at least %d pages (%d bytes) short.\n"
- MORE_INFO_STRING, stackID ? stackID : "", m_szFunction, m_szFile, m_lineNum, m_numPages,
- probeShortFall/GetOsPageSize() + (probeShortFall%GetOsPageSize() ? 1 : 0), probeShortFall);
-
- LOG((LF_EH, LL_INFO100000, buff));
-
- DbgAssertDialog((char *)m_szFile, m_lineNum, buff);
-}
-
-//-----------------------------------------------------------------------------
-// Function to do the actual touching of memory during probing, so we can have
-// a good approximation of the address we should be overflowing at.
-//-----------------------------------------------------------------------------
-static __declspec(noinline) void PlaceMarker(UINT_PTR *pMarker)
-{
- LIMITED_METHOD_CONTRACT;
- *pMarker = STACK_COOKIE_VALUE;
-}
-
-
-StackGuardDisabler::StackGuardDisabler()
-{
- LIMITED_METHOD_CONTRACT;
- BaseStackGuard *pGuard = BaseStackGuard::GetCurrentGuard();
-
- if (pGuard == NULL || !BaseStackGuard::IsProbeGuard(pGuard) || !pGuard->Enabled())
- {
- // If there's no guard or its a boundary guard, there's nothing to do
- m_fDisabledGuard = FALSE;
- return;
- }
-
- // If the guard is currently enabled, then we'll need to change the page protection
- pGuard->UndoPageProtectionInDebugger();
- pGuard->DisableGuard();
- m_fDisabledGuard = TRUE;
-}// StackGuardDisabler
-
-void StackGuardDisabler::NeverRestoreGuard()
-{
- m_fDisabledGuard = FALSE;
-}
-
-StackGuardDisabler::~StackGuardDisabler()
-{
- WRAPPER_NO_CONTRACT;
- if (m_fDisabledGuard)
- {
- BaseStackGuard::RestoreCurrentGuard(TRUE);
- }
-}// ~StackProbeDisabler
-
-//-----------------------------------------------------------------------------
-// BaseStackGuard::RestoreCurrentGuard
-//
-// Function to restore the current marker's cookie after an EH.
-//
-// During an exception, we cannot restore stack guard cookies as we unwind our stack guards
-// because the stack has not been unwound and we might corrupt it. So we just pop off our
-// guards as we go and deal with restoring the cookie after the exception.
-// There are two cases:
-//
-// 1) the exception is caught outside the EE
-// 2) the exception is caught in the EE
-//
-// Case 1: If we catch the exception outside the EE, then the boundary guard that we installed before
-// leaving the EE will still be intact, so we have no work to do.
-//
-// Case 2: If we caught the exception in the EE, then on EX_END_CATCH, after we have unwound the stack, we need to
-// restore the cookie for the topmost stack guard. That is what RestoreCurrentGuard does.
-//
-//-----------------------------------------------------------------------------
-void BaseStackGuard::RestoreCurrentGuard(BOOL fWasDisabled)
-{
- if (!IsStackProbingEnabled())
- {
- // nothing to do
- return;
- }
-
- LPVOID pSP = (LPVOID)GetCurrentSP();
- BaseStackGuard *pGuard = GetCurrentGuard();
-
- if (pGuard == NULL || !IsProbeGuard(pGuard))
- {
- // If there's no guard or its a boundary guard, there's nothing to do
- // Just set state to SO-tolerant and quit.
- GetClrDebugState()->SetSOTolerance();
- return;
- }
-
- if (reinterpret_cast<LPVOID>(pGuard->m_pMarker) > pSP)
- {
- // We have caught an exception while processing an exception. So can't restore the marker and must
- // wait until the catcher of the original exception handles it.
- if (!IsBackoutCalledForEH((BYTE *)(pGuard), static_cast<BYTE *>(pSP)))
- {
- // verfiy that really are processing an exception. We could have some false positives here, but in
- // general this is a good check.
- _ASSERTE(!"After an exception was caught, we couldn't restore the marker because it is greater than the SP\n"
- "This should only happen if we caught a nested exception when already processing an exception, but"
- " the distance between the SP and the probe does not indicate an exception is in flight.");
- }
- return;
- }
-
- // Reset the SO-tolerance state
-
- // We should never get here with a guard beyond the current SP
- _ASSERTE(reinterpret_cast<LPVOID>(pGuard) > pSP);
-
- LOG((LF_EH, LL_INFO100000, "BSG::RSG: G: %p D: %d \n", pGuard, pGuard->m_depth));
-
- // If we have EX_TRY {EX_TRY {...}EX_CATCH{...}EX_END_CATCH}EX_CATCH{...}EX_END_CATCH,
- // the inner EX_END_CATCH will mark the current guard protected. When we reach the
- // outer EX_END_CATCH, we will AV when placing marker.
- pGuard->UndoPageProtectionInDebugger();
- if (fWasDisabled)
- pGuard->EnableGuard();
- // Replace the marker for the current guard
- PlaceMarker(pGuard->m_pMarker);
-
- // Protect marker page in debugger if we need it
- pGuard->ProtectMarkerPageInDebugger();
- GetClrDebugState()->ResetSOTolerance();
- pGuard->m_fEHInProgress = FALSE;
-}
-
-//-----------------------------------------------------------------------------
-// This places a marker outside the bounds of a probe. We don't want to use
-// PlaceMarker because that is how we detect if a proper SO was triggered (via
-// StackProbeContainsIP
-//-----------------------------------------------------------------------------
-static __declspec(noinline) void PlaceMarkerBeyondProbe(UINT_PTR *pMarker)
-{
- *pMarker = STACK_COOKIE_VALUE;
-}
-
-//---------------------------------------------------------------------------------------------
-// Determine if we should check integrity of previous cookie. Only check if the previous was a probe guard.
-//---------------------------------------------------------------------------------------------
-inline BOOL BaseStackGuard::ShouldCheckPreviousCookieIntegrity()
-{
- WRAPPER_NO_CONTRACT;
- if (m_pPrevGuard == NULL ||
- IsBoundaryGuard(m_pPrevGuard) ||
- m_pPrevGuard->m_pMarker==NULL ||
- m_pPrevGuard->m_fEHInProgress ||
- !m_pPrevGuard->Enabled())
- {
- return FALSE;
- }
- return TRUE;
-}
-
-//---------------------------------------------------------------------------------------------
-// Determine if we should check integrity of this cookie.
-//---------------------------------------------------------------------------------------------
-inline BOOL BaseStackGuard::ShouldCheckThisCookieIntegrity()
-{
- WRAPPER_NO_CONTRACT;
- // We only need to check if this is a probe guard and it has a non-null marker.
- // Anything else, we don't care about.
- return IsProbeGuard(this) && m_pMarker != NULL && Enabled();
-}
-
-BOOL BaseStackGuard::RequiresNStackPages(unsigned int n, BOOL fThrowsOnSO)
-{
- WRAPPER_NO_CONTRACT;
-
- return RequiresNStackPagesInternal(n, fThrowsOnSO);
-}
-
-BOOL BaseStackGuard::RequiresNStackPagesThrowing(unsigned int n)
-{
-// STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_THROWS;
- STATIC_CONTRACT_MODE_ANY;
- STATIC_CONTRACT_SO_TOLERANT;
- STATIC_CONTRACT_GC_NOTRIGGER;
-
- return RequiresNStackPagesInternal(n, TRUE);
-}
-
-BOOL BaseStackGuard::RequiresNStackPagesNoThrow(unsigned int n)
-{
- STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_MODE_ANY;
- STATIC_CONTRACT_SO_TOLERANT;
- STATIC_CONTRACT_GC_NOTRIGGER;
-
- return RequiresNStackPagesInternal(n, FALSE);
-}
-
-//-----------------------------------------------------------------------------
-// Place guard in stack.
-//-----------------------------------------------------------------------------
-BOOL BaseStackGuard::RequiresNStackPagesInternal(unsigned int n, BOOL fThrowOnSO)
-{
- CONTRACTL
- {
- DISABLED(THROWS);
- GC_NOTRIGGER;
- MODE_ANY;
- SO_TOLERANT;
- }
- CONTRACTL_END;
-
- BOOL fRet;
-
- // Temporarily initialize the exception occurred flag
- m_exceptionOccurred = FALSE;
-
- // Code below checks if there's a Thread, and exits immediately if not.
- // So the rest of the function rightly assumes there is a Thread
- BEGIN_GETTHREAD_ALLOWED;
-
- // only probe on managed threads. No thread, no probe.
- if (! IsStackProbingEnabled() || GetThread() == NULL)
- {
- return TRUE;
- }
-
- // Don't try to probe if we are checking backout and there are active backout markers on
- // the stack to avoid collision
- if (g_EnableBackoutStackValidation)
- {
- if ((!(GetClrDebugState()->GetStackMarkerStack().IsEmpty()))
- && (!(GetClrDebugState()->GetStackMarkerStack().IsDisabled())))
- {
- return TRUE;
- }
- }
-
- if (n <= 1)
- {
- // Our calculation below doesn't handle 1-page probes.
- _ASSERTE(!"RequiresNStackPages called with a probe amount less than 2");
- }
-
- // Retrieve the current stack pointer which will be used to calculate the marker.
- LPVOID pStack = (LPVOID)GetCurrentSP();
-
- // Setup some helpful debugging information. Get our caller's ip. This is useful for debugging (so we can see
- // when the previous guard was set).
- m_UniqueId = g_UniqueId++;
- m_numPages = n;
-
- // Get the address of the last few bytes on the penultimate page we probed for. This is slightly early than the probe point,
- // but gives us more conservatism in our overrun checking. ("Last" here means the bytes with the smallest address.)
- m_pMarker = ((UINT_PTR*)pStack) - (GetOsPageSize() / sizeof(UINT_PTR) * (n-1));
- m_pMarker = (UINT_PTR*)((UINT_PTR)m_pMarker & ~(GetOsPageSize() - 1));
-
- // Grab the previous guard, if any, and update our depth.
- m_pPrevGuard = GetCurrentGuard();
-
- if (m_pPrevGuard == NULL)
- {
- m_depth = 0;
- }
- else
- {
- // If we've already got a probe in place that exceeds the reach of this one, then
- // don't install this one. This avoids problems where we've installed an entry point
- // probe and then called into a function that happens to do an interior probe. If we
- // install the interior probe, then we effectively lose our deep entry point probe
- // and end up with probe overrun violations. Check for it being a probe guard
- // because boundary guards will always have 0 markers and we'd never probe
- // after a boundary guard otherwise.
- if (IsProbeGuard(m_pPrevGuard) && m_pPrevGuard->m_pMarker < m_pMarker)
- {
- return TRUE;
- }
- m_depth = m_pPrevGuard->m_depth + 1;
-
- // We need to undo the page protection that we setup when we put the previous guard in place so we don't
- // trip over it with this guard. Also, track that we came next.
- if (IsProbeGuard(m_pPrevGuard) && m_pPrevGuard->m_pMarker != NULL)
- {
- m_pPrevGuard->UndoPageProtectionInDebugger();
- m_pPrevGuard->m_szNextFunction = m_szFunction;
- m_pPrevGuard->m_szNextFile = m_szFile;
- m_pPrevGuard->m_nextLineNum = m_lineNum;
- }
- }
-
- if (ShouldCheckPreviousCookieIntegrity())
- {
- UINT_PTR *approxSP = (UINT_PTR*)GetCurrentSP();
- if (approxSP <= m_pPrevGuard->m_pMarker)
- {
- UINT_PTR uProbeShortFall = (char*)m_pPrevGuard->m_pMarker - (char*)approxSP;
- _ASSERTE(FitsIn<int>(uProbeShortFall));
- HandleOverwrittenPreviousStackGuard(static_cast<int>(uProbeShortFall), NULL);
- }
- }
-
- m_eInitialized = cPartialInit;
-
- fRet = DoProbe(m_numPages, fThrowOnSO);
- END_GETTHREAD_ALLOWED;
- return fRet;
-}
-
-BOOL BaseStackGuard::DoProbe(unsigned int n, BOOL fThrowOnSO)
-{
- CONTRACTL
- {
- DISABLED(THROWS);
- MODE_ANY;
- WRAPPER(GC_TRIGGERS);
- SO_TOLERANT;
- }
- CONTRACTL_END;
-
- if (! IsStackProbingEnabled() || m_eInitialized != cPartialInit)
- {
- return TRUE;
- }
-
- LOG((LF_EH, LL_INFO100000, "BSG::DP: %d pages, depth %d, probe 0x%p, fcn %s, prev 0x%p\n",
- m_numPages, m_depth, this, this->m_szFunction, m_pPrevGuard));
-
- // For cases where have a separate call to DoProbe, make sure the probe amounts match.
- _ASSERTE(n == m_numPages);
-
- // We'll probe for 12 pages + 4 for cleanup.... we'll just put our marker at the 12 page point.
- unsigned int nPagesToProbe = n + static_cast<unsigned int>(ADJUST_PROBE(BACKOUT_CODE_STACK_LIMIT));
-
- Thread *pThread = GetThread();
-
- // We already checked in RequiresNPagesStack that we've got a thread. But ASSERT just to
- // be sure.
- _ASSERTE(pThread);
-
- // Check if we have enough space left in the stack
- if (fThrowOnSO)
- {
- RetailStackProbe(nPagesToProbe, pThread);
- }
- else if (! RetailStackProbeNoThrow(nPagesToProbe, pThread))
- {
- return FALSE;
- }
-
- // The fault injection framework will tell us when it want to inject
- // an SO. If it returns TRUE, then inject an SO depending on the fThrowOnSO flag
- if (ReportProbeToFaultInjectionFramework() == TRUE)
- {
- if (fThrowOnSO)
- {
- COMPlusThrowSO();
- }
- // return probe failure (ie SO) if not in a throwing probe
- return FALSE;
- }
-
- LOG((LF_EH, LL_INFO100000, "BSG::DP: pushing to 0x%p\n", m_pMarker));
-
- // See if we're able to get a TLS slot to mark our guard page
- HRESULT hr = PrepGuard();
-
- // Since we can be here only with a valid managed thread object,
- // it will already have its TLS setup. Thus, accessing TLS in PrepGuard
- // call above shouldn't fail.
- _ASSERTE(SUCCEEDED(hr));
-
- // make sure the guard page is beyond the marker page, otherwise we could AV or when the guard
- // page moves up, it could wipe out our debugger page protection
- UINT_PTR *sp = (UINT_PTR*)GetCurrentSP();
- while (sp >= m_pMarker)
- {
- sp -= (GetOsPageSize() / sizeof(UINT_PTR));
- *sp = NULL;
- }
-
- // Write the cookie onto the stack.
- PlaceMarker(m_pMarker);
-
- // We'll protect the page where we put the marker if a debugger is attached. That way, you get an AV right away
- // when you go past the guard when running under a debugger.
- ProtectMarkerPageInDebugger();
-
- // Mark that we're initialized (and didn't get interupted from an exception)
- m_eInitialized = cInit;
-
- // Initialize the exception occurred flag
- m_exceptionOccurred = TRUE;
-
- // setup flag to tell if we're unwinding due to an exception
- m_fEHInProgress = FALSE;
-
- // By this point, everything is working, so go ahead and hook up.
- SetCurrentGuard(this);
-
- return TRUE;
-}
-
-
-//-----------------------------------------------------------------------------
-// PopGuardForEH
-//
-// If we are being popped during an EH unwind, our cookie is likely corrupt so we can't check it.
-// So just pop ourselves off the stack and return. We will restore the markers
-// after we've caught the exception.
-//
-// We also set the EHInProgress bit on the previous guard to indicate that the
-// current guard was unwound during EH and couldn't restore the previous guard's
-// cookie.
-//
-// Also need to clear the protection bit as go down because it will no
-// longer be protected.
-//-----------------------------------------------------------------------------
-void BaseStackGuard::PopGuardForEH()
-{
- LIMITED_METHOD_CONTRACT;
- // If we've protected this page, undo the protection
- UndoPageProtectionInDebugger();
-
- if (m_pPrevGuard)
- {
- m_pPrevGuard->m_fEHInProgress = TRUE;
-
- // Indicate that we haven't reprotected the previous guard
- m_pPrevGuard->m_fProtectedStackPage = FALSE;
- }
- // Mark it as unwound for EH. This is for debugging purposes only so we
- // know how it was popped.
- m_eInitialized = cEHUnwound;
- SetCurrentGuard(m_pPrevGuard);
-}
-
-//-----------------------------------------------------------------------------
-// Check guard in stack
-// This must be called 1:1 with RequiresNPagesStack, else:
-// - the function's stack cookie isn't restored
-// - the stack chain in TLS gets out of wack.
-//-----------------------------------------------------------------------------
-void BaseStackGuard::CheckStack()
-{
- WRAPPER_NO_CONTRACT;
-
- if (! IsStackProbingEnabled() || m_eInitialized != cInit)
- {
- return;
- }
-
- // If we are being popped during an EH unwind, our cookie is likely corrupt so we can't check it.
- // So just pop ourselves off the stack and return. We will restore the markers
- // after we've caught the exception.
- if (DidExceptionOccur())
- {
- // We may not be the topmost in the stack, but we'd better not be called when we've already
- // unwound the stack past this guy.
- _ASSERTE(GetCurrentGuard() <= this);
-
- // Make sure that if we didn't get to the END_SO_INTOLERANT_CODE that the stack usage
- // indicates an exception. This is only a rough check - we might miss some cases where the
- // stack grew a lot between construction and descrution of the guard. However, it will
- // catch most short-circuits.
- if (!IsBackoutCalledForEH((BYTE *)(this), static_cast<BYTE *>((LPVOID)GetCurrentSP())))
- {
- _ASSERTE(!"Short-circuit of END_SO_INTOLERANT_CODE detected. You cannot short-cirtuit return from an SO-intolerant region");
- }
-
- LOG((LF_EH, LL_INFO100000, "BSG::CS on EH path sp 0x %p popping probe 0x%p depth %d \n", GetCurrentSP(), this, m_depth));
- PopGuardForEH();
- return;
- }
-
- LOG((LF_EH, LL_INFO100000, "BSG::CS checking probe 0x%p depth %d \n", this, m_depth));
-
- // if we aren't being unwound during EH, then we shouldn't have our EHInProgress bit set. That
- // means we caught the exception in the EE and didn't call RestoreGuard or we missed a SO-tolerant
- // transition out of the EE and the exception occurred above us.
- _ASSERTE(m_fEHInProgress == FALSE);
-
- // we should only ever be popping ourselves if we are not on the EH unwind path
- _ASSERTE(GetCurrentGuard() == this);
-
- // Can have 0-sized probes for cases where have an entry that is small enough not to need a probe. But still
- // need to put something in place for the boundary probe assertions to work properly. So just remove it and
- // don't do any cookie checking.
- if (m_numPages == 0)
- {
- // Just unhook our guard from the chain. We're done. 0-page probes don't have anything preceding them.
- ResetCurrentGuard(m_pPrevGuard);
- return;
- }
-
- // We need to undo the page protection that we setup when we put the guard in place.
- UndoPageProtectionInDebugger();
-
- CheckMarkerIntegrity();
-
- RestorePreviousGuard();
-}
-
-void BaseStackGuard::CheckMarkerIntegrity()
-{
- LIMITED_METHOD_CONTRACT;
-
- if (m_pMarker == 0)
- {
- return;
- }
-
- // Make sure our cookie is still on the stack where it belongs.
- if (ShouldCheckThisCookieIntegrity() && IsMarkerOverrun(m_pMarker))
- {
- HandleOverwrittenThisStackGuard(NULL);
- }
-}
-
-
-void BaseStackGuard::RestorePreviousGuard()
-{
- WRAPPER_NO_CONTRACT;
-
- if (! IsProbeGuard(m_pPrevGuard) || !m_pPrevGuard->Enabled())
- {
- LOG((LF_EH, LL_INFO100000, "BSG::RPG depth %d, probe 0x%p, prev 0x%p not probe\n",
- m_depth, this, m_pPrevGuard));
- // Unhook our guard from the chain.
- ResetCurrentGuard(m_pPrevGuard);
- return;
- }
-
- if (m_pPrevGuard->m_fEHInProgress)
- {
- // If the marker was lost during exception processing, we cannot restore it and it will be restored on the catch.
- // This can happen if we were partway through an EH unwind and then called something that probed. We'll have unwound our
- // probe guards but won't have been able to put the cookie back, and we're still in that same position.
- LOG((LF_EH, LL_INFO100000, "BSG::RPG depth %d, probe 0x%p, EH in progress, not resetting prev 0x%p\n",
- m_depth, this, m_pPrevGuard));
- // Unhook our guard from the chain.
- ResetCurrentGuard(m_pPrevGuard);
- return;
- }
-
- if (m_pPrevGuard->m_pMarker == NULL)
- {
- // Previous guard had no marker.
- // We're done, so just unhook ourselves from the chain and leave.
- ResetCurrentGuard(m_pPrevGuard);
- }
-
- // Restore last cookie, so that our previous guard will be able to properly check whether it gets overwritten. Note:
- // we don't restore the previous cookie if we overwrote it with this guard. Doing so, by definition, corrupts the
- // stack. Its better to have the previous guard report the over-write.
- PlaceMarker(m_pPrevGuard->m_pMarker);
- LOG((LF_EH, LL_INFO100000, "BSG::RPG depth %d, probe 0x%p "
- "for prev 0x%p at 0x%p in %s\n",
- m_depth, this, m_pPrevGuard, m_pPrevGuard->m_pMarker, m_pPrevGuard->m_szFunction));
- // And, of course, restore the previous guard's page protection (if it had done any.)
- if (m_pPrevGuard->m_fProtectedStackPage)
- {
- m_pPrevGuard->ProtectMarkerPageInDebugger();
- }
-
- // Mark it as unwound on normal path. This is for debugging purposes only so we
- // know how it was popped.
- m_eInitialized = cUnwound;
-
- // Unhook our guard from the chain.
- ResetCurrentGuard(m_pPrevGuard);
-}
-
-void BaseStackGuard::ProtectMarkerPageInDebugger(void *pGuard)
-{
- ((BaseStackGuard *)pGuard)->ProtectMarkerPageInDebugger();
-}
-
-//-----------------------------------------------------------------------------
-// Protect the page where we put the marker if a debugger is attached. That way, you get an AV right away
-// when you go past the guard when running under a debugger.
-//-----------------------------------------------------------------------------
-void BaseStackGuard::ProtectMarkerPageInDebugger()
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- SO_TOLERANT;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- DEBUG_ONLY_FUNCTION;
-
- if (! (g_ProtectStackPagesInDebugger || g_ProtectStackPagesInDebuggerForProbeAtLine))
- {
- return;
- }
-
- DWORD flOldProtect;
-
- LOG((LF_EH, LL_INFO100000, "BSG::PMP: m_pMarker 0x%p, value 0x%p\n", m_pMarker, *m_pMarker));
-
- // We cannot call into host for VirtualProtect. EEVirtualProtect will try to restore previous
- // guard, but the location has been marked with PAGE_NOACCESS.
-#undef VirtualProtect
- BOOL fSuccess = ::VirtualProtect(m_pMarker, 1, PAGE_NOACCESS, &flOldProtect);
- _ASSERTE(fSuccess);
-
-#define VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect) \
- Dont_Use_VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect)
-
- m_fProtectedStackPage = fSuccess;
-}
-
-
-void BaseStackGuard::UndoPageProtectionInDebugger(void *pGuard)
-{
- ((BaseStackGuard *)pGuard)->UndoPageProtectionInDebugger();
-}
-
-//-----------------------------------------------------------------------------
-// Remove page protection installed for this probe
-//-----------------------------------------------------------------------------
-void BaseStackGuard::UndoPageProtectionInDebugger()
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- SO_TOLERANT;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- DEBUG_ONLY_FUNCTION;
-
- if (!m_fProtectedStackPage)
- {
- return;
- }
-
- _ASSERTE(IsProbeGuard());
-
- DWORD flOldProtect;
- // EEVirtualProtect installs a BoundaryStackGuard. To avoid recursion, we call
- // into OS for VirtualProtect instead.
-#undef VirtualProtect
- BOOL fSuccess = ::VirtualProtect(m_pMarker, 1, PAGE_READWRITE, &flOldProtect);
- _ASSERTE(fSuccess);
-
- LOG((LF_EH, LL_INFO100000, "BSG::UMP m_pMarker 0x%p\n", m_pMarker));
- // Frankly, if we had protected the stack page, then we shouldn't have gone past the guard, right? :)
- _ASSERTE(!Enabled() || !IsMarkerOverrun(m_pMarker));
-
-#define VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect) \
- Dont_Use_VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect)
-}
-
-void BaseStackGuard::InitProbeReportingToFaultInjectionFramework()
-{
- WRAPPER_NO_CONTRACT;
-
- if (! g_pConfig->ShouldInjectFault(INJECTFAULT_SO))
- {
- return;
- }
-
- m_hProbeCallBack = CLRLoadLibrary(MAKEDLLNAME_W(W("FaultHostingLayer")));
- if (!m_hProbeCallBack) {
- fprintf(stderr, "StackProbing: Failed to load " MAKEDLLNAME_A("FaultHostingLayer") ". LastErr=%d\n",
- GetLastError());
- return;
- }
-
- m_pfnProbeCallback = (ProbeCallbackType)GetProcAddress(m_hProbeCallBack, "StackProbeCallback");
- if (!m_pfnProbeCallback) {
- fprintf(stderr, "StackProbing: Couldn't find StackProbeCallback() in FaultHostingLayer\n");
- return;
- }
-}
-
-// The fault injection framework will return TRUE if we should
-// inject an SO at the point of the current probe.
-BOOL BaseStackGuard::ReportProbeToFaultInjectionFramework()
-{
- STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
- STATIC_CONTRACT_MODE_ANY;
-
- if (! g_pConfig->ShouldInjectFault(INJECTFAULT_SO) || ! m_pfnProbeCallback)
- {
- return FALSE;
- }
-
- // FORBIDGC_LOADER_USE_ENABLED says we are only doing a minimal amount of work and will not
- // update global state (just read it.) Code running in this state cannot tolerate a fault injection.
- if (FORBIDGC_LOADER_USE_ENABLED())
- {
- return FALSE;
- }
-
- // For codepaths that are not mainline or are debug only, we don't care about fault injection because
- // taking an SO here won't matter (or can't happen). However, we'd like to still probe on those paths
- // just to give us more conservative probe coverage, so we still do the probe, just not the fault injection.
- ClrDebugState *pDebugState = GetClrDebugState();
- if (pDebugState && pDebugState->IsSONotMainline() || pDebugState->IsDebugOnly())
- {
- return FALSE;
- }
-
-
- // Faults injected into the default domain are process fatal. Probing is still going to occur
- // but we never trigger fault injection.
- {
- //Attempting to figure out if we are in the default domain will trigger SO probes so
- // we temporarily mark ourselves SONotMainline during the check to prevent recursive probes
- SO_NOT_MAINLINE_REGION();
- Thread *pThread = GetThreadNULLOk();
- if (pThread && pThread->GetDomain(TRUE)->IsDefaultDomain())
- {
- return FALSE;
- }
- }
-
- return m_pfnProbeCallback(m_lineNum, m_szFile);
-}
-
-void BaseStackGuard::SetCurrentGuard(BaseStackGuard* pGuard)
-{
- WRAPPER_NO_CONTRACT;
-
- ClrFlsSetValue(g_CurrentStackGuardTlsIdx, pGuard);
-
- Thread * pThread = GetThreadNULLOk();
- if (pThread)
- {
- // For faster access, store the guard in the thread object, if available
- pThread->SetCurrentStackGuard(pGuard);
- }
-}
-
-// Reset the current guard state back to this one's
-void BaseStackGuard::ResetCurrentGuard(BaseStackGuard* pGuard)
-{
- WRAPPER_NO_CONTRACT;
-
- SetCurrentGuard(pGuard);
-}
-
-// This puts a boundary probe in the list when we leave the EE
-DEBUG_NOINLINE void BoundaryStackGuard::Push()
-{
- SCAN_SCOPE_BEGIN;
- ANNOTATION_FN_SO_TOLERANT;
-
- if (! IsStackProbingEnabled())
- {
- return;
- }
-
-
- m_isBoundaryGuard = TRUE;
- m_pPrevGuard = GetCurrentGuard();
-
- if (m_pPrevGuard)
- {
- // @todo can remove the check for IsProbeGuard when have all the probes in place
- if (IsProbeGuard(m_pPrevGuard))
- {
- // ensure that the previous probe was sufficiently large
- if (ShouldCheckPreviousCookieIntegrity())
- {
- // Grab an approximation of our current stack pointer.
- void *approxStackPointer = (LPVOID)GetCurrentSP();
-
- if (((UINT_PTR*) approxStackPointer <= m_pPrevGuard->Marker()))
- {
- UINT_PTR uProbeShortFall = (char*)m_pPrevGuard->Marker() - (char*)this;
- _ASSERTE(FitsIn<int>(uProbeShortFall));
- HandleOverwrittenPreviousStackGuard(static_cast<int>(uProbeShortFall), NULL);
- }
- }
- m_pPrevGuard->UndoPageProtectionInDebugger(); // undo previuos guard's page protection
- m_pPrevGuard->m_szNextFunction = m_szFunction; // track that we came next
- m_pPrevGuard->m_szNextFile = m_szFile;
- m_pPrevGuard->m_nextLineNum= m_lineNum;
- }
- m_depth = m_pPrevGuard->Depth(); // don't increment, but record so can transfer to next probe
- }
- LOG((LF_EH, LL_INFO100000, "BNSG::PS probe 0x%p, depth %d, prev 0x%p in %s\n",
- this, m_depth, m_pPrevGuard, m_pPrevGuard ? m_pPrevGuard->FunctionName() : NULL));
-
- // See if we're able to get a TLS slot to mark our guard page. If not, this will just be an unitialized
- // guard. This generally happens in callbacks to the host before the EE infrastructure is set up on
- // the thread, so there won't be interesting probes to protect anyway.
- if (FAILED(PrepGuard()))
- {
- return;
- }
-
- // Mark that we're initialized (and didn't get interupted from an exception)
- m_eInitialized = cInit;
-
- // setup flag to tell if we're unwinding due to an exception
- m_exceptionOccurred = TRUE;
-
- SetCurrentGuard(this);
-}
-
-
-
-// Pop the boundary probe and reset the original probe's cookie when
-// return into the EE
-DEBUG_NOINLINE void BoundaryStackGuard::Pop()
-{
- SCAN_SCOPE_END;
-
- if (! IsStackProbingEnabled() || m_eInitialized != cInit)
- {
- return;
- }
-
- // If we are being popped during an EH unwind, we cannot restore the probe cookie because it will
- // corrupt the stack. So just pop ourselves off the stack and return. We will restore the markers
- // after we've caught the exception.
- if (DidExceptionOccur())
- {
- // We may not be the topmost in the stack, but we'd better not be called when we've already
- // unwound the stack past this guy.
- _ASSERTE(GetCurrentGuard() <= this);
-
- // Make sure that if we didn't get to the END_SO_TOLERANT_CODE that the stack usage
- // indicates an exception. This is only a rough check - we might miss some cases where the
- // stack grew a lot between construction and descrution of the guard. However, it will
- // catch most short-circuits.
- if (!IsBackoutCalledForEH((BYTE *)(this), static_cast<BYTE *>((LPVOID)GetCurrentSP())))
- {
- _ASSERTE(!"Short-circuit of END_SO_TOLERANT_CODE detected. You cannot short-cirtuit return from an SO-tolerant region");
- }
-
- LOG((LF_EH, LL_INFO100000, "BNSG::PP popping on EH path 0x%p depth %d \n", this, m_depth));
- PopGuardForEH();
- return;
- }
-
- LOG((LF_EH, LL_INFO100000, "BNSG::PP 0x%p depth %d restoring CK at 0x%p "
- " probe 0x%p in %s\n",
- this, m_depth, (!IsProbeGuard(m_pPrevGuard) ? 0 : m_pPrevGuard->Marker()),
- m_pPrevGuard, m_pPrevGuard ? m_pPrevGuard->FunctionName() : NULL));
-
- // we should only ever be popping ourselves
- _ASSERTE(GetCurrentGuard() == this);
-
- RestorePreviousGuard();
-}
-
-
-//
-// IsBackoutCalledForEH
-//
-// Uses heuristics to determines whether the backout code is being called on an EH path or
-// not based on the original SP and the SP when the backout code is called.
-//
-// origSP: The SP when the mainline code was called. For example, the SP of a ctor or code in a try block
-//
-// backoutSP: The SP when the backout code is called.
-//
-// Returns: boolean indicating whether or not the backout code is being called on an EH path.
-//
-BOOL IsBackoutCalledForEH(BYTE *origSP,
- BYTE *backoutSP)
-{
- // We need to determine if we are being called in the normal or exception path. (Sure would be
- // nice if the CRT would tell us.) We use the stack pointer to determine this. On the normal path
- // the stack pointer should be not far from the this pointer, whereas on the exception path it
- // will typically be a lot higher up the stack. We will make the following assumptions:
- //
- // 1) on EH path the OS has to push a context onto the stack. So the SP will be increased by
- // at least the size of a context when calling a destructor through EH path.
- //
- // 2) the CRT will use minimal stack space to call a destructor. This is assumed to be less
- // than the size of a context.
- //
- // Caveats:
- //
- // 1) If there is less than a context on the stack on the EH path, we will miss the fact that
- // an exception occurred
- //
- // 2) If the CRT uses near the size of a context before calling the destructor in the normal case,
- // we will assume we've got an exception and ASSERT.
- //
- // So if we arrive at our backout code and the SP is more than the size of a context beyond the original SP,
- // we assume we are on an EH path.
- //
- return (origSP - sizeof(CONTEXT)) > backoutSP;
-
-}
-
-
-DebugSOIntolerantTransitionHandlerBeginOnly::DebugSOIntolerantTransitionHandlerBeginOnly(EEThreadHandle thread)
-{
- SCAN_SCOPE_BEGIN;
- ANNOTATION_FN_SO_INTOLERANT;
-
- // save the SP so that we can check if the dtor is being called with a much bigger one
- m_ctorSP = (char *)GetCurrentSP();
- m_clrDebugState = GetClrDebugState();
- m_prevSOTolerantState = m_clrDebugState->BeginSOIntolerant();
-}
-
-DebugSOIntolerantTransitionHandlerBeginOnly::~DebugSOIntolerantTransitionHandlerBeginOnly()
-{
- SCAN_SCOPE_END;
-
- // A DebugSOIntolerantTransitionHandlerBeginOnly is instantiated only for cases where we will not see
- // an exception. So the desctructor should never be called on an exception path. This will check if
- // we are handling an exception and raise an assert if so.
-
- //
- // We need to determine if we are being called in the normal or exception path. (Sure would be
- // nice if the CRT would tell us.) We use the stack pointer to determine this. On the normal path
- // the stack pointer should be not far from the this pointer, whereas on the exception path it
- // will typically be a lot higher up the stack. We will make the following assumptions:
- //
- // 1) on EH path the OS has to push a context onto the stack. So the SP will be increased by
- // at least the size of a context when calling a destructor through EH path.
- //
- // 2) the CRT will use minimal stack space to call a destructor. This is assumed to be less
- // than the size of a context.
- //
- // Caveats:
- //
- // 1) If there is less than a context on the stack on the EH path, we will miss the fact that
- // an exception occurred
- //
- // 2) If the CRT uses near the size of a context before calling the destructor in the normal case,
- // we will assume we've got an exception and ASSERT.
- //
- // So if we arrive at our destructor and the SP is within the size of a context beyond the SP when
- // we called the ctor, we assume we are on normal path.
- if ((m_ctorSP - sizeof(CONTEXT)) > (LPVOID)GetCurrentSP())
- {
- _ASSERTE(!"An exception cannot leak through a SO_INTOLERANT_CODE_NOTHROW boundary");
- }
-
- m_clrDebugState->SetSOTolerance(m_prevSOTolerantState);
-}
-#endif // STACK_GUARDS_DEBUG
-
-#if defined(FEATURE_STACK_PROBE) && defined(_DEBUG)
-
-#undef __STACKPROBE_inl__
-
-#define INCLUDE_RETAIL_STACK_PROBE
-
-#include "stackprobe.inl"
-
-#endif // defined(FEATURE_STACK_PROBE) && defined(_DEBUG)
-
-#if 0 //FEATURE_FUSION_FAST_CLOSURE - was too buggy at the end of Dev10, not used since then. Delete it after Dev12 if it is still not fixed and used.
-
-#ifdef FEATURE_STACK_PROBE
-// This is a helper that fusion (CFastAssemblyBindingClosure) uses to
-// do an interior stack probe.
-HRESULT InteriorStackProbeNothrowCheckThread()
-{
- STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_MODE_ANY;
-
- HRESULT hr = S_OK;
- INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(hr = E_OUTOFMEMORY;);
- END_INTERIOR_STACK_PROBE;
-
- return hr;
-}
-#endif
-
-#endif //0 - FEATURE_FUSION_FAST_CLOSURE
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-//
-
-//
-//-----------------------------------------------------------------------------
-// Stack Probe Header
-// Used to setup stack guards
-//-----------------------------------------------------------------------------
-
-#ifndef __STACKPROBE_h__
-#define __STACKPROBE_h__
-
-//-----------------------------------------------------------------------------
-// Stack Guards.
-//
-// The idea is to force stack overflows to occur at convenient spots.
-// * Fire at RequiresNPagesStack (beggining of func) if this functions locals
-// cause overflow. Note that in a debug mode, initing the locals to garbage
-// will cause the overflow before this macro is executed.
-//
-// * Fire at CheckStack (end of func) if either our nested function calls
-// cause or use of _alloca cause the stack overflow. Note that this macro
-// is debug only, so release builds won't catch on this
-//
-// Some comments:
-// - Stack grows *down*,
-// - Ideally, all funcs would have EBP frame and we'd use EBP instead of ESP,
-// however, we use the 'this' ptr to get the stack ptr, since the guard
-// is declared on the stack.
-//
-// Comments about inlining assembly w/ Macros:
-// - Must use cstyle comments /* ... */
-// - No semi colons, need __asm keyword at the start of each line
-//-----------------------------------------------------------------------------
-
-//-----------------------------------------------------------------------------
-// *How* to use stack guards.
-//
-// See, in a CLR enlistment, src\ndp\clr\doc\OtherDevDocs\untriaged\clrdev_web\
-//
-//-----------------------------------------------------------------------------
-
-//-----------------------------------------------------------------------------
-// Stack guards have 3 compiler states:
-//#define FEATURE_STACK_PROBE
-// (All) All stack guard code is completely removed by the preprocessor if
-// not defined. This is used for CoreCLR.
-//
-//#define STACK_GUARDS_DEBUG
-// (DEBUG) Full stack guard debugging including cookies, tracking ips, and
-// chaining. More heavy weight, recommended for a debug build only
-//
-//#define STACK_GUARDS_RELEASE
-// (RELEASE) Light stack guard code. For golden builds. Forces Stack Overflow
-// to happen at "convenient" times. No debugging help.
-//-----------------------------------------------------------------------------
-
-#include "genericstackprobe.h"
-#include "utilcode.h"
-
-/* defining VM_NO_SO_INFRASTRUCTURE_CODE for VM code
- * This macro can be used to have code which will be present
- * only for code inside VM directory when SO infrastructure code is not built.
- * Eg. Currently it is used in macro EX_END_HOOK.
- * For VM code EX_HOOK calls CLREXception::HandleState::SetupCatch().
- * When Stack guards are disabled we will tear down the process in
- * CLREXception::HandleState::SetupCatch() if there is a StackOverflow.
- * So we should not reach EX_END_HOOK when there is StackOverflow.
- * This change cannot be done for all other code because
- * CLREXception::HandleState::SetupCatch() is not called rather
- * EXception::HandleState::SetupCatch() is called which is a nop.
- */
-
-#ifndef FEATURE_STACK_PROBE
-#undef VM_NO_SO_INFRASTRUCTURE_CODE
-#define VM_NO_SO_INFRASTRUCTURE_CODE(x) x
-#endif
-
-
-#ifdef FEATURE_STACK_PROBE
-
-#define DEFAULT_INTERIOR_PROBE_AMOUNT 4
-
-#define MINIMUM_STACK_REQUIREMENT (0.25)
-
-BOOL IsBackoutCalledForEH(BYTE *origSP, BYTE *backoutSP);
-
-//=============================================================================
-// Common code
-//=============================================================================
-// Release version of the probe function
-BOOL RetailStackProbeNoThrow(unsigned int n, Thread *pThread);
-BOOL RetailStackProbeNoThrowWorker(unsigned int n, Thread *pThread);
-void RetailStackProbe(unsigned int n, Thread *pThread);
-void RetailStackProbeWorker(unsigned int n, Thread *pThread);
-void ReportStackOverflow();
-
-// Retail stack probe with default amount is the most common stack probe. Create
-// a dedicated method for it to reduce code size.
-void DefaultRetailStackProbeWorker(Thread * pThread);
-
-void RetailStackProbe(unsigned int n);
-
-BOOL ShouldProbeOnThisThread();
-
-int SOTolerantBoundaryFilter(EXCEPTION_POINTERS *pExceptionInfo, DWORD * pdwSOTolerantFlags);
-void SOTolerantCode_RecoverStack(DWORD dwFlags);
-void SOTolerantCode_ExceptBody(DWORD * pdwFlags, Frame * pSafeForSOFrame);
-
-#endif
-
-#if defined(FEATURE_STACK_PROBE) && !defined(DACCESS_COMPILE)
-
-inline bool IsStackProbingEnabled()
-{
- LIMITED_METHOD_CONTRACT;
- return g_StackProbingEnabled;
-}
-
-//=============================================================================
-// DEBUG
-//=============================================================================
-#if defined(STACK_GUARDS_DEBUG)
-
-#include "common.h"
-
-class BaseStackGuard;
-
-//-----------------------------------------------------------------------------
-// Need to chain together stack guard address for nested functions
-// Use a TLS slot to store the head of the chain
-//-----------------------------------------------------------------------------
-extern DWORD g_CurrentStackGuardTlsIdx;
-
-//-----------------------------------------------------------------------------
-// Class
-//-----------------------------------------------------------------------------
-
-// Base version - has no ctor/dtor, so we can use it with SEH
-//
-// *** Don't declare any members here. Put them in BaseStackGuardGeneric.
-// We downcast directly from the base to the derived, using the knowledge
-// that the base class and the derived class are identical for members.
-//
-class BaseStackGuard : public BaseStackGuardGeneric
-{
-protected:
- BaseStackGuard()
- {
- _ASSERTE(!"No default construction allowed");
- }
-
-public:
- BaseStackGuard(const char *szFunction, const char *szFile, unsigned int lineNum) :
- BaseStackGuardGeneric(szFunction, szFile, lineNum)
- {
- STATIC_CONTRACT_LEAF;
- }
-
- UINT_PTR *Marker() { return m_pMarker; }
-
- unsigned int Depth() { return m_depth; }
-
- const char *FunctionName() { return m_szFunction; }
-
- BOOL IsProbeGuard()
- {
- return (m_isBoundaryGuard == FALSE);
- }
-
- BOOL IsBoundaryGuard()
- {
- return (m_isBoundaryGuard == TRUE);
- }
-
- inline BOOL ShouldCheckPreviousCookieIntegrity();
- inline BOOL ShouldCheckThisCookieIntegrity();
-
- BOOL RequiresNStackPages(unsigned int n, BOOL fThrowOnSO = TRUE);
- BOOL RequiresNStackPagesThrowing(unsigned int n);
- BOOL RequiresNStackPagesNoThrow(unsigned int n);
-private:
- BOOL RequiresNStackPagesInternal(unsigned int n, BOOL fThrowOnSO = TRUE);
-public:
- BOOL DoProbe(unsigned int n, BOOL fThrowOnSO);
- void CheckStack();
-
- static void RestoreCurrentGuard(BOOL fWasDisabled = FALSE);
- void PopGuardForEH();
-
- // Different error messages for the different times we detemine there's a problem.
- void HandleOverwrittenThisStackGuard(__in_z char *stackID);
- void HandleOverwrittenPreviousStackGuard(int shortFall, __in_z char *stackID);
- void HandleOverwrittenCurrentStackGuard(int shortFall, __in_z char *stackID);
- static void HandleOverwrittenCurrentStackGuard(void *pGuard, int shortFall, __in_z char *stackID);
-
- void CheckMarkerIntegrity();
- void RestorePreviousGuard();
- void ProtectMarkerPageInDebugger();
- void UndoPageProtectionInDebugger();
- static void ProtectMarkerPageInDebugger(void *pGuard);
- static void UndoPageProtectionInDebugger(void *pGuard);
-
- inline HRESULT PrepGuard()
- {
- WRAPPER_NO_CONTRACT;
-
- // See if it has already been prepped...
- if (ClrFlsGetValue(g_CurrentStackGuardTlsIdx) != NULL)
- return S_OK;
-
- // Let's see if we'll be able to put in a guard page
- ClrFlsSetValue(g_CurrentStackGuardTlsIdx,
-(void*)-1);
-
- if (ClrFlsGetValue(g_CurrentStackGuardTlsIdx) != (void*)-1)
- return E_OUTOFMEMORY;
-
- return S_OK;
-
- }
-
- inline static BaseStackGuard* GetCurrentGuard()
- {
- WRAPPER_NO_CONTRACT;
- if (g_CurrentStackGuardTlsIdx != -1)
- return (BaseStackGuard*) ClrFlsGetValue(g_CurrentStackGuardTlsIdx);
- else
- return NULL;
- }
-
- inline static BOOL IsGuard(BaseStackGuard *probe)
- {
- return (probe != NULL);
- }
- static void SetCurrentGuard(BaseStackGuard* pGuard);
- static void ResetCurrentGuard(BaseStackGuard* pGuard);
-
- inline static BOOL IsProbeGuard(BaseStackGuard *probe)
- {
- LIMITED_METHOD_CONTRACT;
- return (IsGuard(probe) != NULL && probe->IsProbeGuard());
- }
-
- inline static BOOL IsBoundaryGuard(BaseStackGuard *probe)
- {
- LIMITED_METHOD_CONTRACT;
- return (IsGuard(probe) != NULL && probe->IsBoundaryGuard());
- }
-
- static void InitProbeReportingToFaultInjectionFramework();
- BOOL ReportProbeToFaultInjectionFramework();
-
- static void Terminate();
-
-
- static HMODULE m_hProbeCallBack;
- typedef BOOL (*ProbeCallbackType)(unsigned, const char *);
- static ProbeCallbackType m_pfnProbeCallback;
-
-};
-
-
-// Derived version, add a dtor that automatically calls Check_Stack, move convenient, but can't use with SEH.
-class AutoCleanupStackGuard : public BaseStackGuard
-{
-protected:
- AutoCleanupStackGuard()
- {
- _ASSERTE(!"No default construction allowed");
- }
-
-public:
- DEBUG_NOINLINE AutoCleanupStackGuard(const char *szFunction, const char *szFile, unsigned int lineNum) :
- BaseStackGuard(szFunction, szFile, lineNum)
- {
- SCAN_SCOPE_BEGIN;
- // This CANNOT be a STATIC_CONTRACT_SO_INTOLERANT b/c that isn't
- // really just a static contract, it is actually calls EnsureSOIntolerantOK
- // as well. Instead we just use the annotation.
- ANNOTATION_FN_SO_INTOLERANT;
- }
-
- DEBUG_NOINLINE ~AutoCleanupStackGuard()
- {
- SCAN_SCOPE_END;
- CheckStack();
- }
-};
-
-class DebugSOIntolerantTransitionHandlerBeginOnly
-{
- BOOL m_prevSOTolerantState;
- ClrDebugState* m_clrDebugState;
- char *m_ctorSP;
-
- public:
- DEBUG_NOINLINE DebugSOIntolerantTransitionHandlerBeginOnly(EEThreadHandle thread);
- DEBUG_NOINLINE ~DebugSOIntolerantTransitionHandlerBeginOnly();
-};
-
-
-
-extern DWORD g_InteriorProbeAmount;
-
-//=============================================================================
-// Macros for transition into SO_INTOLERANT code
-//=============================================================================
-
-FORCEINLINE DWORD DefaultEntryProbeAmount() { return g_EntryPointProbeAmount; }
-
-#define BEGIN_SO_INTOLERANT_CODE(pThread) \
- BEGIN_SO_INTOLERANT_CODE_FOR(pThread, g_EntryPointProbeAmount) \
-
-#define BEGIN_SO_INTOLERANT_CODE_FOR(pThread, n) \
- { \
- /*_ASSERTE(pThread); */ \
- AutoCleanupStackGuard stack_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \
- stack_guard_XXX.RequiresNStackPagesThrowing(ADJUST_PROBE(n)); \
- /* work around unreachable code warning */ \
- if (true) \
- { \
- DebugSOIntolerantTransitionHandler __soIntolerantTransitionHandler; \
- ANNOTATION_SO_PROBE_BEGIN(DEFAULT_ENTRY_PROBE_AMOUNT); \
- /* work around unreachable code warning */ \
- if (true) \
- { \
- DEBUG_ASSURE_NO_RETURN_BEGIN(SO_INTOLERANT)
-
-#define BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, ActionOnSO) \
- { \
- /*_ASSERTE(pThread || IsGCSpecialThread());*/ \
- AutoCleanupStackGuard stack_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \
- if (! stack_guard_XXX.RequiresNStackPagesNoThrow(ADJUST_PROBE(g_EntryPointProbeAmount)))\
- { \
- stack_guard_XXX.SetNoException(); \
- ActionOnSO; \
- } \
- /* work around unreachable code warning */ \
- else \
- { \
- DebugSOIntolerantTransitionHandler __soIntolerantTransitionHandler; \
- ANNOTATION_SO_PROBE_BEGIN(DEFAULT_ENTRY_PROBE_AMOUNT); \
- /* work around unreachable code warning */ \
- if (true) \
- { \
- DEBUG_ASSURE_NO_RETURN_BEGIN(SO_INTOLERANT)
-
-
-// This is defined just for using in the InternalSetupForComCall macro which
-// doesn't have a corresponding end macro because no exception will pass through it
-// It should not be used in any situation where an exception could pass through
-// the transition.
-#define SO_INTOLERANT_CODE_NOTHROW(pThread, ActionOnSO) \
- AutoCleanupStackGuard stack_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \
- if (! stack_guard_XXX.RequiresNStackPagesNoThrow(ADJUST_PROBE(g_EntryPointProbeAmount)))\
- { \
- ActionOnSO; \
- } \
- stack_guard_XXX.SetNoException(); \
- DebugSOIntolerantTransitionHandlerBeginOnly __soIntolerantTransitionHandler(pThread); \
- ANNOTATION_SO_PROBE_BEGIN(DEFAULT_ENTRY_PROBE_AMOUNT);
-
-
-// For some codepaths used during the handling of an SO, we need to guarantee a
-// minimal stack consumption to avoid an SO on that codepath. These are typically host
-// APIS such as allocation. The host is going to use < 1/4 page, so make sure
-// we have that amount before calling. Then use the BACKOUT_VALIDATION to ensure
-// that we don't overrun it. We call ReportStackOverflow, which will generate a hard
-// SO if we have less than a page left.
-
-#define MINIMAL_STACK_PROBE_CHECK_THREAD(pThread) \
- if (IsStackProbingEnabled()) \
- { \
- Thread *__pThread = pThread; \
- if (__pThread && ! __pThread->IsStackSpaceAvailable(MINIMUM_STACK_REQUIREMENT)) \
- { \
- ReportStackOverflow(); \
- } \
- } \
- CONTRACT_VIOLATION(SOToleranceViolation);
-
-// We don't use the DebugSOIntolerantTransitionHandler here because we don't need to transition into
-// SO-intolerant code. We're already there. We also don't need to annotate as having probed,
-// because this only matters for entry point functions.
-// We have a way to separate the declaration from the actual probing for cases where need
-// to do a test, such as IsGCThread(), to decide if should probe.
-#define DECLARE_INTERIOR_STACK_PROBE \
- { \
- AutoCleanupStackGuard stack_guard_XXX(__FUNCTION__, __FILE__, __LINE__);\
- DEBUG_ASSURE_NO_RETURN_BEGIN(STACK_PROBE)
-
-
-// A function containing an interior probe is implicilty SO-Intolerant because we
-// assume that it is not behind a probe. So confirm that we are in the correct state.
-#define DO_INTERIOR_STACK_PROBE_FOR(pThread, n) \
- _ASSERTE(pThread != NULL); \
- stack_guard_XXX.RequiresNStackPagesThrowing(ADJUST_PROBE(n)); \
- EnsureSOIntolerantOK(__FUNCTION__, __FILE__, __LINE__);
-
-#define DO_INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n) \
- if (ShouldProbeOnThisThread()) \
- { \
- DO_INTERIOR_STACK_PROBE_FOR(GetThread(), g_InteriorProbeAmount); \
- }
-
-// A function containing an interior probe is implicilty SO-Intolerant because we
-// assume that it is not behind a probe. So confirm that we are in the correct state.
-#define DO_INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, actionOnSO) \
- _ASSERTE(pThread != NULL); \
- if (! stack_guard_XXX.RequiresNStackPagesNoThrow(ADJUST_PROBE(n))) \
- { \
- stack_guard_XXX.SetNoException(); \
- actionOnSO; \
- } \
- EnsureSOIntolerantOK(__FUNCTION__, __FILE__, __LINE__);
-
-#define DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, actionOnSO) \
- if (ShouldProbeOnThisThread()) \
- { \
- DO_INTERIOR_STACK_PROBE_FOR_NOTHROW(GetThread(), n, actionOnSO); \
- }
-
-
-#define INTERIOR_STACK_PROBE_FOR(pThread, n) \
- DECLARE_INTERIOR_STACK_PROBE; \
- DO_INTERIOR_STACK_PROBE_FOR(pThread, n)
-
-#define INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n) \
- DECLARE_INTERIOR_STACK_PROBE; \
- DO_INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n)
-
-#define INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, ActionOnSO) \
- DECLARE_INTERIOR_STACK_PROBE; \
- DO_INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, ActionOnSO)
-
-#define INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, ActionOnSO) \
- DECLARE_INTERIOR_STACK_PROBE; \
- DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, ActionOnSO)
-
-
-#define INTERIOR_STACK_PROBE(pThread) \
- INTERIOR_STACK_PROBE_FOR(pThread, g_InteriorProbeAmount)
-
-#define INTERIOR_STACK_PROBE_CHECK_THREAD \
- INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(g_InteriorProbeAmount)
-
-#define INTERIOR_STACK_PROBE_NOTHROW(pThread, ActionOnSO) \
- INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, g_InteriorProbeAmount, ActionOnSO)
-
-#define INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(ActionOnSO) \
- INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(g_InteriorProbeAmount, ActionOnSO)
-
-
-#define END_INTERIOR_STACK_PROBE \
- DEBUG_ASSURE_NO_RETURN_END(STACK_PROBE) \
- stack_guard_XXX.SetNoException(); \
- }
-
-#define RETURN_FROM_INTERIOR_PROBE(x) \
- DEBUG_OK_TO_RETURN_BEGIN(STACK_PROBE) \
- stack_guard_XXX.SetNoException(); \
- RETURN(x); \
- DEBUG_OK_TO_RETURN_END(STACK_PROBE)
-
-
-// This is used for EH code where we are about to throw.
-// To avoid taking an SO during EH processing, want to include it in our probe limits
-// So we will just do a big probe and then throw.
-#define STACK_PROBE_FOR_THROW(pThread) \
- AutoCleanupStackGuard stack_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \
- if (pThread != NULL) \
- { \
- DO_INTERIOR_STACK_PROBE_FOR(pThread, ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT)); \
- }
-
-// This is used for throws where we cannot use a dtor-based probe.
-#define PUSH_STACK_PROBE_FOR_THROW(pThread) \
- BaseStackGuard stack_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \
- stack_guard_XXX.RequiresNStackPagesThrowing(ADJUST_PROBE(g_EntryPointProbeAmount));
-
-#define SAVE_ADDRESS_OF_STACK_PROBE_FOR_THROW(pGuard) \
- pGuard = &stack_guard_XXX;
-
-#define RESET_EXCEPTION_FROM_STACK_PROBE_FOR_THROW(pGuard) \
- pGuard->SetNoException ();
-
-#define POP_STACK_PROBE_FOR_THROW(pGuard) \
- pGuard->CheckStack();
-
-//=============================================================================
-// Macros for transition into SO_TOLERANT code
-//=============================================================================
-// @todo : put this assert in when all probes are in place.
-// _ASSERTE(! pThread->IsSOTolerant());
-
-//*********************************************************************************
-
-// A boundary stack guard is pushed onto the probe stack when we leave the EE and
-// popped when we return. It is used for 1) restoring the original probe's cookie
-// when we return, as managed code could trash it and 2) marking a boundary so that
-// we know not to check for over-written probes before it when install a real probe.
-//
-class BoundaryStackGuard : public BaseStackGuard
-{
-protected:
- BoundaryStackGuard()
- {
- LIMITED_METHOD_CONTRACT;
-
- _ASSERTE(!"No default construction allowed");
- }
-
-public:
- DEBUG_NOINLINE BoundaryStackGuard(const char *szFunction, const char *szFile, unsigned int lineNum)
- : BaseStackGuard(szFunction, szFile, lineNum)
- {
- SCAN_SCOPE_BEGIN;
- ANNOTATION_FN_SO_TOLERANT;
-
- m_isBoundaryGuard = TRUE;
- }
-
- DEBUG_NOINLINE void Push();
- DEBUG_NOINLINE void Pop();
-
- DEBUG_NOINLINE void SetNoExceptionNoPop()
- {
- SCAN_SCOPE_END;
- SetNoException();
- }
-
-};
-
-// Derived version, add a dtor that automatically calls Pop, more convenient, but can't use with SEH.
-class AutoCleanupBoundaryStackGuard : public BoundaryStackGuard
-{
-protected:
- AutoCleanupBoundaryStackGuard()
- {
- _ASSERTE(!"No default construction allowed");
- }
-
-public:
- DEBUG_NOINLINE AutoCleanupBoundaryStackGuard(const char *szFunction, const char *szFile, unsigned int lineNum) :
- BoundaryStackGuard(szFunction, szFile, lineNum)
- {
- SCAN_SCOPE_BEGIN;
- ANNOTATION_FN_SO_TOLERANT;
- }
-
- DEBUG_NOINLINE ~AutoCleanupBoundaryStackGuard()
- {
- SCAN_SCOPE_END;
- Pop();
- }
-};
-
-
-class DebugSOTolerantTransitionHandler
-{
- BOOL m_prevSOTolerantState;
- ClrDebugState* m_clrDebugState;
-
- public:
- void EnterSOTolerantCode(Thread *pThread);
- void ReturnFromSOTolerantCode();
-};
-
-class AutoCleanupDebugSOTolerantTransitionHandler : DebugSOTolerantTransitionHandler
-{
- BOOL m_prevSOTolerantState;
- ClrDebugState* m_clrDebugState;
-
- public:
- DEBUG_NOINLINE AutoCleanupDebugSOTolerantTransitionHandler(Thread *pThread)
- {
- SCAN_SCOPE_BEGIN;
- ANNOTATION_FN_SO_INTOLERANT;
-
- EnterSOTolerantCode(pThread);
- }
- DEBUG_NOINLINE ~AutoCleanupDebugSOTolerantTransitionHandler()
- {
- SCAN_SCOPE_END;
-
- ReturnFromSOTolerantCode();
- }
-};
-
-
-// When we enter SO-tolerant code, we
-// 1) probe to make sure that we will have enough stack to run our backout code. We don't
-// need to check that the cookie was overrun because we only care that we had enough stack.
-// But we do anyway, to pop off the guard.s
-// The backout code infrastcture ensures that we stay below the BACKOUT_CODE_STACK_LIMIT.
-// 2) Install a boundary guard, which will preserve our cookie and prevent spurious checks if
-// we call back into the EE.
-// 3) Formally transition into SO-tolerant code so that we can make sure we are probing if we call
-// back into the EE.
-//
-
-#undef OPTIONAL_SO_CLEANUP_UNWIND
-#define OPTIONAL_SO_CLEANUP_UNWIND(pThread, pFrame)
-
-#define BSTC_RECOVER_STACK 0x1
-#define BSTC_IS_SO 0x2
-#define BSTC_IS_SOFT_SO 0x4
-#define BSTC_TRIGGERING_UNWIND_FOR_SO 0x8
-
-#define BEGIN_SO_TOLERANT_CODE(pThread) \
- { /* add an outer scope so that we'll restore our state as soon as we return */ \
- Thread * const __pThread = pThread; \
- DWORD __dwFlags = 0; \
- Frame * __pSafeForSOFrame = __pThread ? __pThread->GetFrame() : NULL; \
- SCAN_BLOCKMARKER(); \
- SCAN_BLOCKMARKER_MARK(); \
- BoundaryStackGuard boundary_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \
- boundary_guard_XXX.Push(); \
- DebugSOTolerantTransitionHandler __soTolerantTransitionHandler; \
- __soTolerantTransitionHandler.EnterSOTolerantCode(__pThread); \
- __try \
- { \
- SCAN_EHMARKER(); \
- __try \
- { \
- SCAN_EHMARKER_TRY(); \
- DEBUG_ASSURE_NO_RETURN_BEGIN(STACK_PROBE) \
- __try \
- {
-
-
-// We need to catch any hard SO that comes through in order to get our stack back and make sure that we can run our backout code.
-// Also can't allow a hard SO to propogate into SO-intolerant code, as we can't tell where it came from and would have to rip the process.
-// So install a filter and catch hard SO and rethrow a C++ SO. Note that we don't check the host policy here it only applies to exceptions
-// that will leak back into managed code.
-#define END_SO_TOLERANT_CODE \
- } \
- __finally \
- { \
- STATIC_CONTRACT_SO_TOLERANT; \
- if (__dwFlags & BSTC_TRIGGERING_UNWIND_FOR_SO) \
- { \
- OPTIONAL_SO_CLEANUP_UNWIND(__pThread, __pSafeForSOFrame) \
- } \
- } \
- DEBUG_ASSURE_NO_RETURN_END(STACK_PROBE) \
- boundary_guard_XXX.SetNoException(); \
- SCAN_EHMARKER_END_TRY(); \
- } \
- __except(SOTolerantBoundaryFilter(GetExceptionInformation(), &__dwFlags)) \
- { \
- SCAN_EHMARKER_CATCH(); \
- __soTolerantTransitionHandler.ReturnFromSOTolerantCode(); \
- SOTolerantCode_ExceptBody(&__dwFlags, __pSafeForSOFrame); \
- SCAN_EHMARKER_END_CATCH(); \
- } \
- /* This will correctly set the annotation back to SOIntolerant if needed */ \
- SCAN_BLOCKMARKER_USE(); \
- if (__dwFlags & BSTC_RECOVER_STACK) \
- { \
- SOTolerantCode_RecoverStack(__dwFlags); \
- } \
- } \
- __finally \
- { \
- __soTolerantTransitionHandler.ReturnFromSOTolerantCode(); \
- boundary_guard_XXX.Pop(); \
- } \
- /* This is actually attached to the SCAN_BLOCKMARKER_USE() in the try scope */ \
- /* but should hopefully chain the right annotations for a call to a __finally */ \
- SCAN_BLOCKMARKER_END_USE(); \
- }
-
-extern unsigned __int64 getTimeStamp();
-
-INDEBUG(void AddHostCallsStaticMarker();)
-
-// This is used for calling into host
-// We only need to install the boundary guard, and transition into SO-tolerant code.
-#define BEGIN_SO_TOLERANT_CODE_CALLING_HOST(pThread) \
- { \
- ULONGLONG __entryTime = 0; \
- __int64 __entryTimeStamp = 0; \
- _ASSERTE(CanThisThreadCallIntoHost()); \
- _ASSERTE((pThread == NULL) || \
- (pThread->GetClrDebugState() == NULL) || \
- ((pThread->GetClrDebugState()->ViolationMask() & \
- (HostViolation|BadDebugState)) != 0) || \
- (pThread->GetClrDebugState()->IsHostCaller())); \
- INDEBUG(AddHostCallsStaticMarker();) \
- _ASSERTE(pThread == NULL || !pThread->IsInForbidSuspendRegion()); \
- { \
- AutoCleanupBoundaryStackGuard boundary_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \
- boundary_guard_XXX.Push(); \
- AutoCleanupDebugSOTolerantTransitionHandler __soTolerantTransitionHandler(pThread); \
- DEBUG_ASSURE_NO_RETURN_BEGIN(STACK_PROBE); \
-
-#define END_SO_TOLERANT_CODE_CALLING_HOST \
- DEBUG_ASSURE_NO_RETURN_END(STACK_PROBE) \
- boundary_guard_XXX.SetNoExceptionNoPop(); \
- } \
- }
-
-//-----------------------------------------------------------------------------
-// Startup & Shutdown stack guard subsystem
-//-----------------------------------------------------------------------------
-void InitStackProbes();
-void TerminateStackProbes();
-
-#elif defined(STACK_GUARDS_RELEASE)
-//=============================================================================
-// Release - really streamlined,
-//=============================================================================
-
-void InitStackProbesRetail();
-inline void InitStackProbes()
-{
- InitStackProbesRetail();
-}
-
-inline void TerminateStackProbes()
-{
- LIMITED_METHOD_CONTRACT;
-}
-
-
-//=============================================================================
-// Macros for transition into SO_INTOLERANT code
-//=============================================================================
-
-FORCEINLINE DWORD DefaultEntryProbeAmount() { return DEFAULT_ENTRY_PROBE_AMOUNT; }
-
-#define BEGIN_SO_INTOLERANT_CODE(pThread) \
-{ \
- if (IsStackProbingEnabled()) DefaultRetailStackProbeWorker(pThread); \
- /* match with the else used in other macros */ \
- if (true) { \
- SOIntolerantTransitionHandler __soIntolerantTransitionHandler; \
- /* work around unreachable code warning */ \
- if (true) { \
- DEBUG_ASSURE_NO_RETURN_BEGIN(SO_INTOLERANT)
-
-#define BEGIN_SO_INTOLERANT_CODE_FOR(pThread, n) \
-{ \
- if (IsStackProbingEnabled()) RetailStackProbeWorker(ADJUST_PROBE(n), pThread); \
- /* match with the else used in other macros */ \
- if (true) { \
- SOIntolerantTransitionHandler __soIntolerantTransitionHandler; \
- /* work around unreachable code warning */ \
- if (true) { \
- DEBUG_ASSURE_NO_RETURN_BEGIN(SO_INTOLERANT)
-
-#define BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, ActionOnSO) \
-{ \
- if (IsStackProbingEnabled() && !RetailStackProbeNoThrowWorker(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), pThread)) \
- { \
- ActionOnSO; \
- } else { \
- SOIntolerantTransitionHandler __soIntolerantTransitionHandler; \
- /* work around unreachable code warning */ \
- if (true) { \
- DEBUG_ASSURE_NO_RETURN_BEGIN(SO_INTOLERANT)
-
-
-// This is defined just for using in the InternalSetupForComCall macro which
-// doesn't have a corresponding end macro because no exception will pass through it
-// It should not be used in any situation where an exception could pass through
-// the transition.
-#define SO_INTOLERANT_CODE_NOTHROW(pThread, ActionOnSO) \
- if (IsStackProbingEnabled() && !RetailStackProbeNoThrowWorker(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), pThread)) \
- { \
- ActionOnSO; \
- } \
-
-#define MINIMAL_STACK_PROBE_CHECK_THREAD(pThread) \
- if (IsStackProbingEnabled()) \
- { \
- Thread *__pThread = pThread; \
- if (__pThread && ! __pThread->IsStackSpaceAvailable(MINIMUM_STACK_REQUIREMENT)) \
- { \
- ReportStackOverflow(); \
- } \
- }
-
-#define DECLARE_INTERIOR_STACK_PROBE
-
-
-#define DO_INTERIOR_STACK_PROBE_FOR(pThread, n) \
- if (IsStackProbingEnabled()) \
- { \
- RetailStackProbeWorker(ADJUST_PROBE(n), pThread); \
- }
-
-#define DO_INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n) \
- if (IsStackProbingEnabled() && ShouldProbeOnThisThread()) \
- { \
- RetailStackProbeWorker(ADJUST_PROBE(n), GetThread()); \
- }
-
-#define DO_INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, ActionOnSO) \
- if (IsStackProbingEnabled()) \
- { \
- if (!RetailStackProbeNoThrowWorker(ADJUST_PROBE(n), pThread)) \
- { \
- ActionOnSO; \
- } \
- }
-
-#define DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, ActionOnSO) \
- if (IsStackProbingEnabled() && ShouldProbeOnThisThread()) \
- { \
- if (!RetailStackProbeNoThrowWorker(ADJUST_PROBE(n), GetThread())) \
- { \
- ActionOnSO; \
- } \
- }
-
-
-#define INTERIOR_STACK_PROBE_FOR(pThread, n) \
- DECLARE_INTERIOR_STACK_PROBE; \
- DO_INTERIOR_STACK_PROBE_FOR(pThread, n)
-
-#define INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n) \
- DECLARE_INTERIOR_STACK_PROBE; \
- DO_INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n)
-
-#define INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, ActionOnSO) \
- DECLARE_INTERIOR_STACK_PROBE; \
- DO_INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, ActionOnSO)
-
-#define INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, ActionOnSO) \
- DECLARE_INTERIOR_STACK_PROBE; \
- DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, ActionOnSO)
-
-
-#define INTERIOR_STACK_PROBE(pThread) \
- INTERIOR_STACK_PROBE_FOR(pThread, DEFAULT_INTERIOR_PROBE_AMOUNT)
-
-#define INTERIOR_STACK_PROBE_CHECK_THREAD \
- INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(DEFAULT_INTERIOR_PROBE_AMOUNT)
-
-#define INTERIOR_STACK_PROBE_NOTHROW(pThread, ActionOnSO) \
- INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, DEFAULT_INTERIOR_PROBE_AMOUNT, ActionOnSO)
-
-#define INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(ActionOnSO) \
- INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(DEFAULT_INTERIOR_PROBE_AMOUNT, ActionOnSO)
-
-
-#define END_INTERIOR_STACK_PROBE
-
-#define RETURN_FROM_INTERIOR_PROBE(x) RETURN(x)
-
-
-// This is used for EH code where we are about to throw
-// To avoid taking an SO during EH processing, want to include it in our probe limits
-// So we will just do a big probe and then throw.
-#define STACK_PROBE_FOR_THROW(pThread) \
- if (pThread != NULL) \
- { \
- RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), pThread); \
- } \
-
-#define PUSH_STACK_PROBE_FOR_THROW(pThread) \
- RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), pThread);
-
-#define SAVE_ADDRESS_OF_STACK_PROBE_FOR_THROW(pGuard)
-
-#define POP_STACK_PROBE_FOR_THROW(pGuard)
-
-
-//=============================================================================
-// Macros for transition into SO_TOLERANT code
-//=============================================================================
-
-#undef OPTIONAL_SO_CLEANUP_UNWIND
-#define OPTIONAL_SO_CLEANUP_UNWIND(pThread, pFrame)
-
-#define BSTC_RECOVER_STACK 0x1
-#define BSTC_IS_SO 0x2
-#define BSTC_IS_SOFT_SO 0x4
-#define BSTC_TRIGGERING_UNWIND_FOR_SO 0x8
-
-
-#define BEGIN_SO_TOLERANT_CODE(pThread) \
-{ \
- Thread * __pThread = pThread; \
- DWORD __dwFlags = 0; \
- Frame * __pSafeForSOFrame = __pThread ? __pThread->GetFrame() : NULL; \
- SCAN_BLOCKMARKER(); \
- SCAN_BLOCKMARKER_MARK(); \
- SCAN_EHMARKER(); \
- __try \
- { \
- SCAN_EHMARKER_TRY() \
- __try \
- {
-
-// We need to catch any hard SO that comes through in order to get our stack back and make sure that we can run our backout code.
-// Also can't allow a hard SO to propogate into SO-intolerant code, as we can't tell where it came from and would have to rip the process.
-// So install a filter and catch hard SO and rethrow a C++ SO.
-#define END_SO_TOLERANT_CODE \
- } \
- __finally \
- { \
- STATIC_CONTRACT_SO_TOLERANT; \
- if (__dwFlags & BSTC_TRIGGERING_UNWIND_FOR_SO) \
- { \
- OPTIONAL_SO_CLEANUP_UNWIND(__pThread, __pSafeForSOFrame) \
- } \
- } \
- SCAN_EHMARKER_END_TRY(); \
- } \
- __except(SOTolerantBoundaryFilter(GetExceptionInformation(), &__dwFlags)) \
- { \
- SCAN_EHMARKER_CATCH(); \
- SOTolerantCode_ExceptBody(&__dwFlags, __pSafeForSOFrame); \
- SCAN_EHMARKER_END_CATCH(); \
- } \
- SCAN_BLOCKMARKER_USE(); \
- if (__dwFlags & BSTC_RECOVER_STACK) \
- { \
- SOTolerantCode_RecoverStack(__dwFlags); \
- } \
- SCAN_BLOCKMARKER_END_USE(); \
-}
-
-#define BEGIN_SO_TOLERANT_CODE_CALLING_HOST(pThread) \
- { \
-
-#define END_SO_TOLERANT_CODE_CALLING_HOST \
- }
-
-#endif
-
-#else // FEATURE_STACK_PROBE && !DACCESS_COMPILE
-
-inline void InitStackProbes()
-{
- LIMITED_METHOD_CONTRACT;
-}
-
-inline void TerminateStackProbes()
-{
- LIMITED_METHOD_CONTRACT;
-}
-
-#define BEGIN_SO_INTOLERANT_CODE(pThread)
-#define BEGIN_SO_INTOLERANT_CODE_FOR(pThread, n)
-#define BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, ActionOnSO)
-#define SO_INTOLERANT_CODE_NOTHROW(pThread, ActionOnSO)
-#define MINIMAL_STACK_PROBE_CHECK_THREAD(pThread)
-
-#define DECLARE_INTERIOR_STACK_PROBE
-
-#define DO_INTERIOR_STACK_PROBE_FOR(pThread, n)
-#define DO_INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n)
-#define DO_INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, ActionOnSO)
-#define DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, ActionOnSO)
-
-#define INTERIOR_STACK_PROBE_FOR(pThread, n)
-#define INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n)
-#define INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, ActionOnSO)
-#define INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, ActionOnSO)
-
-#define INTERIOR_STACK_PROBE(pThread)
-#define INTERIOR_STACK_PROBE_CHECK_THREAD
-#define INTERIOR_STACK_PROBE_NOTHROW(pThread, ActionOnSO)
-#define INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(ActionOnSO)
-
-#define END_INTERIOR_STACK_PROBE
-#define RETURN_FROM_INTERIOR_PROBE(x) RETURN(x)
-
-#define STACK_PROBE_FOR_THROW(pThread)
-#define PUSH_STACK_PROBE_FOR_THROW(pThread)
-#define SAVE_ADDRESS_OF_STACK_PROBE_FOR_THROW(pGuard)
-#define POP_STACK_PROBE_FOR_THROW(pGuard)
-
-#define BEGIN_SO_TOLERANT_CODE(pThread)
-#define END_SO_TOLERANT_CODE
-#define RETURN_FROM_SO_TOLERANT_CODE_HAS_CATCH
-#define BEGIN_SO_TOLERANT_CODE_CALLING_HOST(pThread) \
- _ASSERTE(CanThisThreadCallIntoHost());
-
-#define END_SO_TOLERANT_CODE_CALLING_HOST
-
-#endif // FEATURE_STACK_PROBE && !DACCESS_COMPILE
-
-#endif // __STACKPROBE_h__
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-
-//
-
-//
-// ==--==
-//
-
-//
-//-----------------------------------------------------------------------------
-// Stack Probe Header for inline functions
-// Used to setup stack guards
-//-----------------------------------------------------------------------------
-#ifndef __STACKPROBE_inl__
-#define __STACKPROBE_inl__
-
-#include "stackprobe.h"
-#include "common.h"
-
-#if defined(FEATURE_STACK_PROBE) && !defined(DACCESS_COMPILE)
-
-// want to inline in retail, but out of line into stackprobe.cpp in debug
-#if !defined(_DEBUG) || defined(INCLUDE_RETAIL_STACK_PROBE)
-
-#ifndef _DEBUG
-#define INLINE_NONDEBUG_ONLY FORCEINLINE
-#else
-#define INLINE_NONDEBUG_ONLY
-#endif
-
-INLINE_NONDEBUG_ONLY BOOL ShouldProbeOnThisThread()
-{
- // we only want to probe on user threads, not any of our special threads
- return GetCurrentTaskType() == TT_USER;
-}
-
-#if defined(_DEBUG) && defined(STACK_GUARDS_DEBUG)
-
-DEBUG_NOINLINE void DebugSOTolerantTransitionHandler::EnterSOTolerantCode(Thread *pThread)
-{
- SCAN_SCOPE_BEGIN;
- ANNOTATION_FN_SO_TOLERANT;
-
- if (pThread)
- {
- m_clrDebugState = pThread->GetClrDebugState();
- }
- else
- {
- m_clrDebugState = GetClrDebugState();
- }
- if (m_clrDebugState)
- m_prevSOTolerantState = m_clrDebugState->BeginSOTolerant();
-}
-
-DEBUG_NOINLINE void DebugSOTolerantTransitionHandler::ReturnFromSOTolerantCode()
-{
- SCAN_SCOPE_END;
-
- if (m_clrDebugState)
- m_clrDebugState->SetSOTolerance(m_prevSOTolerantState);
-}
-
-#endif
-
-// Keep the main body out of line to keep code size down.
-NOINLINE BOOL RetailStackProbeNoThrowWorker(unsigned int n, Thread *pThread);
-NOINLINE void RetailStackProbeWorker(unsigned int n, Thread *pThread);
-
-INLINE_NONDEBUG_ONLY
-BOOL RetailStackProbeNoThrow(unsigned int n, Thread *pThread)
-{
- STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
-
-#ifdef STACK_GUARDS_RELEASE
- if(!IsStackProbingEnabled())
- {
- return TRUE;
- }
-#endif
-
- return RetailStackProbeNoThrowWorker(n, pThread);
-}
-
-INLINE_NONDEBUG_ONLY
-void RetailStackProbe(unsigned int n, Thread *pThread)
-{
- STATIC_CONTRACT_THROWS;
- STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
-
-#ifdef STACK_GUARDS_RELEASE
- if(!IsStackProbingEnabled())
- {
- return;
- }
-#endif
-
- if (RetailStackProbeNoThrowWorker(n, pThread))
- {
- return;
- }
- ReportStackOverflow();
-}
-
-INLINE_NONDEBUG_ONLY
-void RetailStackProbe(unsigned int n)
-{
- STATIC_CONTRACT_THROWS;
- STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
-
-#ifdef STACK_GUARDS_RELEASE
- if(!IsStackProbingEnabled())
- {
- return;
- }
-#endif
-
- if (RetailStackProbeNoThrowWorker(n, GetThread()))
- {
- return;
- }
- ReportStackOverflow();
-}
-
-#endif
-#endif
-
-
-#endif // __STACKPROBE_inl__
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
}
CONTRACTL_END;
return;
}
- BEGIN_SO_INTOLERANT_CODE(m_pThread);
-
// User asked us to sample after certain time.
m_pThread->UserSleep(m_nSampleAfter);
// TODO: Measure time to JIT using CycleTimer and subtract from the time we sleep every time.
m_pThread->UserSleep(m_nSampleEvery);
}
-
- END_SO_INTOLERANT_CODE;
}
// Find the most frequent method in the samples and JIT them.
#include "eetwain.h"
#include "codeman.h"
#include "eeconfig.h"
-#include "stackprobe.h"
#include "dbginterface.h"
#include "generics.h"
#ifdef FEATURE_INTERPRETER
MethodDesc* CrawlFrame::GetFunction()
{
LIMITED_METHOD_DAC_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
if (pFunc != NULL)
{
return pFunc;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
SUPPORTS_DAC;
}
GC_NOTRIGGER;
PRECONDITION(GetControlPC(pRD) == GetIP(pRD->pCurrentContext));
- SO_TOLERANT;
}
CONTRACTL_END;
GC_NOTRIGGER;
PRECONDITION(CheckPointer(pContext, NULL_NOT_OK));
PRECONDITION(CheckPointer(pContextPointers, NULL_OK));
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
PRECONDITION(CheckPointer(pContext, NULL_NOT_OK));
PRECONDITION(CheckPointer(pContextPointers, NULL_OK));
PRECONDITION(CheckPointer(pFunctionEntry, NULL_OK));
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
// that any C++ destructors pushed in this function will never execute, and it means that this function can
// never have a dynamic contract.
STATIC_CONTRACT_WRAPPER;
- STATIC_CONTRACT_SO_INTOLERANT;
SCAN_IGNORE_THROW; // see contract above
SCAN_IGNORE_TRIGGER; // see contract above
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
} CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
SUPPORTS_DAC;
}
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
SUPPORTS_DAC;
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pUnk));
PRECONDITION(IsInProcCCWTearOff(pUnk));
PRECONDITION(CheckPointer(ppv, NULL_OK));
GC_TRIGGERS;
MODE_PREEMPTIVE;
PRECONDITION(CheckPointer(pUnk));
- SO_TOLERANT;
}
CONTRACTL_END;
GC_TRIGGERS;
MODE_PREEMPTIVE;
PRECONDITION(CheckPointer(pUnk));
- SO_TOLERANT;
}
CONTRACTL_END;
GC_TRIGGERS;
MODE_PREEMPTIVE;
PRECONDITION(CheckPointer(pUnk));
- SO_TOLERANT;
}
CONTRACTL_END;
GC_TRIGGERS;
MODE_PREEMPTIVE;
PRECONDITION(CheckPointer(pUnk));
- SO_TOLERANT;
}
CONTRACTL_END;
MODE_PREEMPTIVE;
PRECONDITION(CheckPointer(pUnk));
PRECONDITION(IsSimpleTearOff(pUnk));
- SO_TOLERANT;
}
CONTRACTL_END;
MODE_PREEMPTIVE;
PRECONDITION(CheckPointer(pUnk));
PRECONDITION(IsSimpleTearOff(pUnk));
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pPropertyProvider));
PRECONDITION(IsSimpleTearOff(pPropertyProvider));
PRECONDITION(CheckPointer(ppProperty, NULL_OK));
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pPropertyProvider));
PRECONDITION(IsSimpleTearOff(pPropertyProvider));
PRECONDITION(CheckPointer(ppProperty, NULL_OK));
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pPropertyProvider));
PRECONDITION(IsSimpleTearOff(pPropertyProvider));
PRECONDITION(CheckPointer(phstrStringRepresentation, NULL_OK));
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pPropertyProvider));
PRECONDITION(IsSimpleTearOff(pPropertyProvider));
PRECONDITION(CheckPointer(pTypeIdentifier));
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pStringable));
PRECONDITION(IsSimpleTearOff(pStringable));
PRECONDITION(CheckPointer(pResult, NULL_OK));
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pWrap));
PRECONDITION(CheckPointer(pThread));
}
BOOL IsCurrentDomainValid(ComCallWrapper* pWrap)
{
- CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; SO_TOLERANT; } CONTRACTL_END;
+ CONTRACTL { NOTHROW; GC_TRIGGERS; MODE_ANY; } CONTRACTL_END;
return IsCurrentDomainValid(pWrap, GetThread());
}
DISABLED(NOTHROW);
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pWrap));
PRECONDITION(CheckPointer(pTarget));
PRECONDITION(CheckPointer(pArgs));
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(ptr));
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pUnk));
PRECONDITION(CheckPointer(ppv, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pUnk));
PRECONDITION(CheckPointer(ppv, NULL_OK));
}
ULONG __stdcall Unknown_AddRef(IUnknown* pUnk)
{
// Ensure the Thread is available for contracts and other users of the Thread, but don't do any of
- // the other "entering managed code" work like going to SO_INTOLERANT or checking for reentrancy.
+ // the other "entering managed code" work like checking for reentrancy.
// We don't really need to "enter" the runtime to do an interlocked increment on a refcount, so
// all of that stuff should be isolated to rare paths here.
SetupThreadForComCall(-1);
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- ENTRY_POINT; // implies SO_TOLERANT
+ ENTRY_POINT;
}
CONTRACTL_END;
ULONG __stdcall Unknown_Release(IUnknown* pUnk)
{
// Ensure the Thread is available for contracts and other users of the Thread, but don't do any of
- // the other "entering managed code" work like going to SO_INTOLERANT or checking for reentrancy.
+ // the other "entering managed code" work like checking for reentrancy.
// We don't really need to "enter" the runtime to do an interlocked decrement on a refcount, so
// all of that stuff should be isolated to rare paths here.
SetupThreadForComCall(-1);
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- ENTRY_POINT; // implies SO_TOLERANT
+ ENTRY_POINT;
}
CONTRACTL_END;
ULONG __stdcall Unknown_AddRefInner(IUnknown* pUnk)
{
// Ensure the Thread is available for contracts and other users of the Thread, but don't do any of
- // the other "entering managed code" work like going to SO_INTOLERANT or checking for reentrancy.
+ // the other "entering managed code" work like checking for reentrancy.
// We don't really need to "enter" the runtime to do an interlocked increment on a refcount, so
// all of that stuff should be isolated to rare paths here.
SetupThreadForComCall(-1);
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- ENTRY_POINT; // implies SO_TOLERANT
+ ENTRY_POINT;
}
CONTRACTL_END;
ULONG __stdcall Unknown_ReleaseInner(IUnknown* pUnk)
{
// Ensure the Thread is available for contracts and other users of the Thread, but don't do any of
- // the other "entering managed code" work like going to SO_INTOLERANT or checking for reentrancy.
+ // the other "entering managed code" work like checking for reentrancy.
// We don't really need to "enter" the runtime to do an interlocked decrement on a refcount, so
// all of that stuff should be isolated to rare paths here.
SetupThreadForComCall(-1);
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- ENTRY_POINT; // implies SO_TOLERANT
+ ENTRY_POINT;
}
CONTRACTL_END;
ULONG __stdcall Unknown_AddRefSpecial(IUnknown* pUnk)
{
// Ensure the Thread is available for contracts and other users of the Thread, but don't do any of
- // the other "entering managed code" work like going to SO_INTOLERANT or checking for reentrancy.
+ // the other "entering managed code" work like checking for reentrancy.
// We don't really need to "enter" the runtime to do an interlocked increment on a refcount, so
// all of that stuff should be isolated to rare paths here.
SetupThreadForComCall(-1);
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- ENTRY_POINT; // implies SO_TOLERANT
+ ENTRY_POINT;
}
CONTRACTL_END;
ULONG __stdcall Unknown_ReleaseSpecial(IUnknown* pUnk)
{
// Ensure the Thread is available for contracts and other users of the Thread, but don't do any of
- // the other "entering managed code" work like going to SO_INTOLERANT or checking for reentrancy.
+ // the other "entering managed code" work like checking for reentrancy.
// We don't really need to "enter" the runtime to do an interlocked decrement on a refcount, so
// all of that stuff should be isolated to rare paths here.
SetupThreadForComCall(-1);
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- ENTRY_POINT; // implies SO_TOLERANT
+ ENTRY_POINT;
}
CONTRACTL_END;
SetupForComCallHRNoCheckCanRunManagedCode();
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
HRESULT hr = S_OK;
if (!CanRunManagedCode(LoaderLockCheck::ForCorrectness))
SetupForComCallDWORDNoCheckCanRunManagedCode();
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
// <TODO>Address this violation in context of bug 27409</TODO>
CONTRACT_VIOLATION(GCViolation);
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pUnk));
PRECONDITION(CheckPointer(ppTI, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pUnk));
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pUnk));
PRECONDITION(CheckPointer(pbstrDescription, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pUnk));
PRECONDITION(CheckPointer(pguid, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pUnk));
PRECONDITION(CheckPointer(pdwHelpCtxt, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pUnk));
PRECONDITION(CheckPointer(pbstrHelpFile, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pUnk));
PRECONDITION(CheckPointer(pbstrSource, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pDisp));
PRECONDITION(CheckPointer(pctinfo, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pDisp));
PRECONDITION(CheckPointer(pptinfo, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pDisp));
PRECONDITION(CheckPointer(rgszNames, NULL_OK));
PRECONDITION(CheckPointer(rgdispid, NULL_OK));
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pDisp));
PRECONDITION(CheckPointer(rgszNames, NULL_OK));
PRECONDITION(CheckPointer(rgdispid, NULL_OK));
HRESULT hrRetVal = S_OK;
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
- // SetupForComCallHR uses "SO_INTOLERANT_CODE_NOTHROW" to setup the SO-Intolerant transition
- // for COM Interop. However, "SO_INTOLERANT_CODE_NOTHROW" expects that no exception can escape
- // through this boundary but all it does is (in addition to checking that no exception has escaped it)
- // do stack probing.
- //
- // However, Corrupting Exceptions [CE] can escape the COM Interop boundary. Thus, to address that scenario,
- // we use the macro below that uses BEGIN_SO_INTOLERANT_CODE_NOTHROW to do the equivalent of
- // SO_INTOLERANT_CODE_NOTHROW and yet allow for CEs to escape through. Since there will be a corresponding
- // END_SO_INTOLERANT_CODE, the call is splitted into two parts: the Begin and End (see below).
BeginSetupForComCallHRWithEscapingCorruptingExceptions();
#else // !FEATURE_CORRUPTING_EXCEPTIONS
SetupForComCallHR();
THROWS; // Dispatch_Invoke_CallBack can throw
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pDisp));
PRECONDITION(CheckPointer(pdispparams, NULL_OK));
PRECONDITION(CheckPointer(pvarResult, NULL_OK));
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pDisp));
PRECONDITION(CheckPointer(pdispparams, NULL_OK));
PRECONDITION(CheckPointer(pvarResult, NULL_OK));
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pDisp));
PRECONDITION(CheckPointer(pctinfo, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pDisp));
PRECONDITION(CheckPointer(pptinfo, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pDisp));
PRECONDITION(CheckPointer(rgszNames, NULL_OK));
PRECONDITION(CheckPointer(rgdispid, NULL_OK));
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pDisp));
PRECONDITION(CheckPointer(pdispparams, NULL_OK));
PRECONDITION(CheckPointer(pvarResult, NULL_OK));
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pDisp));
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pDisp));
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pDisp));
PRECONDITION(CheckPointer(pbstrName, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pDisp));
PRECONDITION(CheckPointer(pid, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pDisp));
PRECONDITION(CheckPointer(pgrfdex, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pDisp));
PRECONDITION(CheckPointer(ppunk, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pDisp));
PRECONDITION(CheckPointer(pid, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pDisp));
PRECONDITION(CheckPointer(pdp, NULL_OK));
PRECONDITION(CheckPointer(pVarRes, NULL_OK));
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pInsp));
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pInsp));
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pInsp));
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pRefSrc));
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pMarsh));
PRECONDITION(CheckPointer(pv, NULL_OK));
PRECONDITION(CheckPointer(pvDestContext, NULL_OK));
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pMarsh));
PRECONDITION(CheckPointer(pv, NULL_OK));
PRECONDITION(CheckPointer(pvDestContext, NULL_OK));
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pMarsh));
PRECONDITION(CheckPointer(pv, NULL_OK));
PRECONDITION(CheckPointer(pvDestContext, NULL_OK));
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pMarsh));
PRECONDITION(CheckPointer(pStm, NULL_OK));
PRECONDITION(CheckPointer(ppvObj, NULL_OK));
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pMarsh));
PRECONDITION(CheckPointer(pStm, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pMarsh));
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pUnk));
PRECONDITION(CheckPointer(ppEnum, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pUnk));
PRECONDITION(CheckPointer(ppCP, NULL_OK));
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pUnk));
PRECONDITION(CheckPointer(pdwSupportedOptions, NULL_OK));
PRECONDITION(CheckPointer(pdwEnabledOptions, NULL_OK));
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pUnk));
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pPropertyProvider));
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pPropertyProvider));
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pPropertyProvider));
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pPropertyProvider));
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pStringable));
}
CONTRACTL_END;
// We do not need to hook with host here
SetupForComCallDWORDNoHostNotif();
- WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
+ WRAPPER_NO_CONTRACT;
return ICCW_AddRefFromJupiter(pUnk);
}
// We do not need to hook with host here
SetupForComCallDWORDNoHostNotif();
- WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
+ WRAPPER_NO_CONTRACT;
return ICCW_ReleaseFromJupiter(pUnk);
}
// as we are most likely in the middle of a GC
SetupForComCallHRNoHostNotifNoCheckCanRunManagedCode();
- WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
+ WRAPPER_NO_CONTRACT;
return ICCW_Peg(pUnk);
}
// as we are most likely in the middle of a GC
SetupForComCallHRNoHostNotifNoCheckCanRunManagedCode();
- WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
+ WRAPPER_NO_CONTRACT;
return ICCW_Unpeg(pUnk);
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
{
QCALL_CONTRACT;
- BEGIN_QCALL_SO_TOLERANT;
+ BEGIN_QCALL;
ULONG cbRef = SafeReleasePreemp(pUnk);
LogInteropRelease(pUnk, cbRef, "InterfaceMarshalerBase::ClearNative: In/Out release");
- END_QCALL_SO_TOLERANT;
+ END_QCALL;
}
#include <optdefault.h>
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pExternalAddress));
}
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
} CONTRACTL_END;
if (! ClrSafeInt<SHORT>::addition(m_stackSize, FrameSizeIncrement, m_stackSize))
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
} CONTRACTL_END;
_ASSERTE(Op < sizeof(UnwindOpExtraSlotTable));
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END
NOTHROW;
CAN_TAKE_LOCK; // CheckIsStub_Internal can enter SimpleRWLock
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
return FALSE;
}
- CONTRACT_VIOLATION(SOToleranceViolation);
- // @todo : this might not have a thread
- // BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return FALSE);
-
struct Param
{
BOOL fIsStub;
#ifdef DACCESS_COMPILE
PAL_ENDTRY
#else
- EX_END_CATCH(SwallowAllExceptions);
-#endif
+ EX_END_CATCH(SwallowAllExceptions);
+#endif
- //END_SO_INTOLERANT_CODE;
-
return param.fIsStub;
}
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
// The addr is in unmanaged code. Used for Step-in from managed to native.
void InitForUnmanaged(PCODE addr)
{
- STATIC_CONTRACT_SO_TOLERANT;
this->type = TRACE_UNMANAGED;
this->address = addr;
this->stubManager = NULL;
// The addr is inside jitted code (eg, there's a JitManaged that will claim it)
void InitForManaged(PCODE addr)
{
- STATIC_CONTRACT_SO_TOLERANT;
this->type = TRACE_MANAGED;
this->address = addr;
this->stubManager = NULL;
// Initialize for an unmanaged entry stub.
void InitForUnmanagedStub(PCODE addr)
{
- STATIC_CONTRACT_SO_TOLERANT;
this->type = TRACE_ENTRY_STUB;
this->address = addr;
this->stubManager = NULL;
// Initialize for a stub.
void InitForStub(PCODE addr)
{
- STATIC_CONTRACT_SO_TOLERANT;
this->type = TRACE_STUB;
this->address = addr;
this->stubManager = NULL;
// call pStubManager->TraceManager() to get the next TraceDestination.
void InitForManagerPush(PCODE addr, StubManager * pStubManager)
{
- STATIC_CONTRACT_SO_TOLERANT;
this->type = TRACE_MGR_PUSH;
this->address = addr;
this->stubManager = pStubManager;
AwareLock::EnterHelperResult ObjHeader::EnterObjMonitorHelperSpin(Thread* pCurThread)
{
CONTRACTL{
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
#ifndef DACCESS_COMPILE
if (!IsGCSpecialThread ()) {MODE_COOPERATIVE;} else {MODE_ANY;}
#endif
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_SUPPORTS_DAC;
ADIndex indx = GetRawAppDomainIndex();
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_MODE_COOPERATIVE;
-
+
DWORD bits = GetBits ();
Object * obj = GetBaseObject ();
BOOL bVerifyMore = g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_SYNCBLK;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC;
POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
}
FORCEINLINE bool AwareLock::TryEnterHelper(Thread* pCurThread)
{
CONTRACTL{
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
FORCEINLINE AwareLock::EnterHelperResult AwareLock::TryEnterBeforeSpinLoopHelper(Thread *pCurThread)
{
CONTRACTL{
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
FORCEINLINE AwareLock::EnterHelperResult AwareLock::TryEnterInsideSpinLoopHelper(Thread *pCurThread)
{
CONTRACTL{
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
FORCEINLINE bool AwareLock::TryEnterAfterSpinLoopHelper(Thread *pCurThread)
{
CONTRACTL{
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
FORCEINLINE AwareLock::EnterHelperResult ObjHeader::EnterObjMonitorHelper(Thread* pCurThread)
{
CONTRACTL{
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
FORCEINLINE AwareLock::LeaveHelperAction AwareLock::LeaveHelper(Thread* pCurThread)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
FORCEINLINE AwareLock::LeaveHelperAction ObjHeader::LeaveObjMonitorHelper(Thread* pCurThread)
{
CONTRACTL {
- SO_TOLERANT;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
// disallow creation of Crst before EE starts
// Can not assert here. ASP.Net uses our Threadpool before EE is started.
PRECONDITION((m_handle == INVALID_HANDLE_VALUE));
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
// disallow creation of Crst before EE starts
// Can not assert here. ASP.Net uses our Threadpool before EE is started.
PRECONDITION((m_handle == INVALID_HANDLE_VALUE));
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
// disallow creation of Crst before EE starts
// Can not assert here. ASP.Net uses our Threadpool before EE is started.
PRECONDITION((m_handle == INVALID_HANDLE_VALUE));
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
// disallow creation of Crst before EE starts
// Can not assert here. ASP.Net uses our Threadpool before EE is started.
PRECONDITION((m_handle == INVALID_HANDLE_VALUE));
{
NOTHROW;
if (IsInDeadlockDetection()) {GC_TRIGGERS;} else {GC_NOTRIGGER;}
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
PRECONDITION((m_handle != INVALID_HANDLE_VALUE));
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
PRECONDITION((m_handle != INVALID_HANDLE_VALUE));
}
CONTRACTL_END;
static DWORD CLREventWaitHelper2(HANDLE handle, DWORD dwMilliseconds, BOOL alertable)
{
STATIC_CONTRACT_THROWS;
- STATIC_CONTRACT_SO_TOLERANT;
-
+
return WaitForSingleObjectEx(handle,dwMilliseconds,alertable);
}
static DWORD CLREventWaitHelper(HANDLE handle, DWORD dwMilliseconds, BOOL alertable)
{
STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_SO_TOLERANT;
-
+
struct Param
{
HANDLE handle;
{
DISABLED(GC_TRIGGERS);
}
- SO_TOLERANT;
PRECONDITION(m_handle != INVALID_HANDLE_VALUE); // Handle has to be valid
}
CONTRACTL_END;
{
if (pThread && alertable) {
DWORD dwRet = WAIT_FAILED;
- BEGIN_SO_INTOLERANT_CODE_NOTHROW (pThread, return WAIT_FAILED;);
dwRet = pThread->DoAppropriateWait(1, &m_handle, FALSE, dwMilliseconds,
mode,
syncState);
- END_SO_INTOLERANT_CODE;
return dwRet;
}
else {
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
PRECONDITION(m_handle == INVALID_HANDLE_VALUE);
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
PRECONDITION(m_handle != INVALID_HANDLE_VALUE);
}
CONTRACTL_END;
{
DISABLED(GC_TRIGGERS);
}
- SO_TOLERANT;
PRECONDITION(m_handle != INVALID_HANDLE_VALUE); // Invalid to have invalid handle
}
CONTRACTL_END;
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
PRECONDITION(m_handle == INVALID_HANDLE_VALUE && m_handle != NULL);
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
PRECONDITION(m_handle != INVALID_HANDLE_VALUE && m_handle != NULL);
}
CONTRACTL_END;
NOTHROW;
MODE_ANY;
GC_NOTRIGGER;
- SO_TOLERANT; //Its ok for tis function to fail.
}
CONTRACTL_END;
CONTRACTL_END;
GCX_COOP();
- BEGIN_SO_INTOLERANT_CODE(pThread);
//
// NOTE: there is a potential race between the time we retrieve the app
// We should have released all locks.
_ASSERTE(g_fEEShutDown || pThread->m_dwLockCount == 0 || pThread->m_fRudeAborted);
- END_SO_INTOLERANT_CODE;
-
*foundWork = true;
}
else
{
CONTRACTL {
NOTHROW;
- SO_TOLERANT;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
}
CONTRACTL_END;
CONTRACTL {
THROWS;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
- SO_TOLERANT;
}
CONTRACTL_END;
if ((pThread = GetThread()) != NULL)
return pThread;
-#ifdef FEATURE_STACK_PROBE
- RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), NULL);
-#endif //FEATURE_STACK_PROBE
-
- CONTRACT_VIOLATION(SOToleranceViolation);
-
// For interop debugging, we must mark that we're in a can't-stop region
// b.c we may take Crsts here that may block the helper thread.
// We're especially fragile here b/c we don't have a Thread object yet
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- // @todo . We need to probe here, but can't introduce destructors etc.
- BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
-
// Clear any outstanding stale EH state that maybe still active on the thread.
#ifdef WIN64EXCEPTIONS
ExceptionTracker::PopTrackers((void*)-1);
ThreadStore::CheckForEEShutdown();
}
- END_CONTRACT_VIOLATION;
-
HANDLE hThread = GetThreadHandle();
SetThreadHandle (SWITCHOUT_HANDLE_VALUE);
while (m_dwThreadHandleBeingUsed > 0)
CONTRACTL {
NOTHROW;
DISABLED(GC_NOTRIGGER);
- SO_TOLERANT;
}
CONTRACTL_END;
- // @todo need a probe that tolerates not having a thread setup at all
- CONTRACT_VIOLATION(SOToleranceViolation);
-
_ASSERTE(!m_fPreemptiveGCDisabled); // can't use PreemptiveGCDisabled() here
// This is cheating a little. There is a pathway here from SetupThread, but only
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
- STATIC_CONTRACT_SO_INTOLERANT;
STATIC_CONTRACT_MODE_PREEMPTIVE;
_ASSERTE(GetThread() == this);
- BEGIN_SO_TOLERANT_CODE(this);
- // BEGIN_SO_TOLERANT_CODE wraps a __try/__except around this call, so if the OS were to allow
- // an exception to leak through to us, we'll catch it.
::CoUninitialize();
- END_SO_TOLERANT_CODE;
-
}// BaseCoUninitialize
#ifdef FEATURE_COMINTEROP
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
- STATIC_CONTRACT_SO_INTOLERANT;
STATIC_CONTRACT_MODE_PREEMPTIVE;
_ASSERTE(WinRTSupported());
_ASSERTE(GetThread() == this);
_ASSERTE(IsWinRTInitialized());
- BEGIN_SO_TOLERANT_CODE(this);
RoUninitialize();
- END_SO_TOLERANT_CODE;
}
#endif // FEATURE_COMINTEROP
// The true contract for GC trigger should be the following. But this puts a very strong restriction
// on contract for functions that call EnablePreemptiveGC.
//if (GetThread() && !ThreadStore::HoldingThreadStore(GetThread())) {GC_TRIGGERS;} else {GC_NOTRIGGER;}
- STATIC_CONTRACT_SO_INTOLERANT;
STATIC_CONTRACT_GC_TRIGGERS;
DWORD flags = 0;
_ASSERTE (g_fEEShutDown);
DWORD lastError = 0;
- BEGIN_SO_TOLERANT_CODE(pThread);
// If we're going to pump, we cannot use WAIT_ALL. That's because the wait would
// only be satisfied if a message arrives while the handles are signalled. If we
lastError = ::GetLastError();
- END_SO_TOLERANT_CODE;
-
- // END_SO_TOLERANT_CODE overwrites lasterror. Let's reset it.
::SetLastError(lastError);
return dwReturn;
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
-DWORD WaitForMultipleObjectsEx_SO_TOLERANT (DWORD nCount, HANDLE *lpHandles, BOOL bWaitAll,DWORD dwMilliseconds, BOOL bAlertable)
-{
- STATIC_CONTRACT_SO_INTOLERANT;
-
- DWORD dwRet = WAIT_FAILED;
- DWORD lastError = 0;
-
- BEGIN_SO_TOLERANT_CODE (GetThread ());
- dwRet = ::WaitForMultipleObjectsEx (nCount, lpHandles, bWaitAll, dwMilliseconds, bAlertable);
- lastError = ::GetLastError();
- END_SO_TOLERANT_CODE;
-
- // END_SO_TOLERANT_CODE overwrites lasterror. Let's reset it.
- ::SetLastError(lastError);
- return dwRet;
-}
-
//--------------------------------------------------------------------
// Do appropriate wait based on apartment state (STA or MTA)
DWORD Thread::DoAppropriateAptStateWait(int numWaiters, HANDLE* pHandles, BOOL bWaitAll,
CONTRACTL {
THROWS;
GC_TRIGGERS;
- SO_INTOLERANT;
}
CONTRACTL_END;
}
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
- return WaitForMultipleObjectsEx_SO_TOLERANT(numWaiters, pHandles, bWaitAll, timeout, alertable);
+ return WaitForMultipleObjectsEx(numWaiters, pHandles, bWaitAll, timeout, alertable);
}
// A helper called by our two flavors of DoAppropriateWaitWorker
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
if ((throwable == NULL) || CLRException::IsPreallocatedExceptionObject(throwable)) NOTHROW; else THROWS; // From CreateHandle
GC_NOTRIGGER;
if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
}
else
{
- BEGIN_SO_INTOLERANT_CODE(GetThread());
- {
- m_LastThrownObjectHandle = GetDomain()->CreateHandle(throwable);
- }
- END_SO_INTOLERANT_CODE;
+ m_LastThrownObjectHandle = GetDomain()->CreateHandle(throwable);
}
_ASSERTE(m_LastThrownObjectHandle != NULL);
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
if (throwable == NULL) MODE_ANY; else MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
SUPPORTS_DAC;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
SUPPORTS_DAC;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
{
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
- STATIC_CONTRACT_SO_TOLERANT;
// If we're waiting for shutdown, we don't want to abort/interrupt this thread
if (HasThreadStateNC(Thread::TSNC_BlockedForShutdown))
return;
- BEGIN_SO_INTOLERANT_CODE(this);
-
if ((m_UserInterrupt & TI_Abort) != 0)
{
// If the thread is waiting for AD unload to finish, and the thread is interrupted,
COMPlusThrow(kThreadInterruptedException);
}
- END_SO_INTOLERANT_CODE;
}
#ifdef _DEBUG
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
void Thread::ObjectRefFlush(Thread* thread)
{
+ // this is debug only code, so no need to validate
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_ENTRY_POINT;
- BEGIN_PRESERVE_LAST_ERROR;
-
- // The constructor and destructor of AutoCleanupSONotMainlineHolder (allocated by SO_NOT_MAINLINE_FUNCTION below)
- // may trash the last error, so we need to save and restore last error here. Also, we need to add a scope here
- // because we can't let the destructor run after we call SetLastError().
- {
- // this is debug only code, so no need to validate
- STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_ENTRY_POINT;
-
- _ASSERTE(thread->PreemptiveGCDisabled()); // Should have been in managed code
- memset(thread->dangerousObjRefs, 0, sizeof(thread->dangerousObjRefs));
- thread->m_allObjRefEntriesBad = FALSE;
- CLEANSTACKFORFASTGCSTRESS ();
- }
-
- END_PRESERVE_LAST_ERROR;
+ _ASSERTE(thread->PreemptiveGCDisabled()); // Should have been in managed code
+ memset(thread->dangerousObjRefs, 0, sizeof(thread->dangerousObjRefs));
+ thread->m_allObjRefEntriesBad = FALSE;
+ CLEANSTACKFORFASTGCSTRESS ();
}
#endif
{
NOTHROW;
GC_NOTRIGGER;
- SO_NOT_MAINLINE;
}
CONTRACTL_END;
// Called during fiber switch. Can not have non-static contract.
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
#ifndef FEATURE_PAL
MEMORY_BASIC_INFORMATION lowerBoundMemInfo;
// Called during fiber switch. Can not have non-static contract.
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
return ClrTeb::GetStackBase();
}
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
WRAPPER(NOTHROW);
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
}
-#ifdef FEATURE_STACK_PROBE
-/*
- * CanResetStackTo
- *
- * Given a target stack pointer, this function will tell us whether or not we could restore the guard page if we
- * unwound the stack that far.
- *
- * Parameters:
- * stackPointer -- stack pointer that we want to try to reset the thread's stack up to.
- *
- * Returns:
- * TRUE if there's enough room to reset the stack, false otherwise.
- */
-BOOL Thread::CanResetStackTo(LPCVOID stackPointer)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- SO_TOLERANT;
- }
- CONTRACTL_END;
-
- // How much space between the given stack pointer and the first guard page?
- //
- // This must be signed since the stack pointer might be in the guard region,
- // which is at a lower address than GetLastNormalStackAddress will return.
- INT_PTR iStackSpaceLeft = (INT_PTR)stackPointer - GetLastNormalStackAddress();
-
- // We need to have enough space to call back into the EE from the handler, so we use the twice the entry point amount.
- // We need enough to do work and enough that partway through that work we won't probe and COMPlusThrowSO.
-
- const INT_PTR iStackSizeThreshold = (ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT * 2) * GetOsPageSize());
-
- if (iStackSpaceLeft > iStackSizeThreshold)
- {
- return TRUE;
- }
- else
- {
- return FALSE;
- }
-}
-
-/*
- * IsStackSpaceAvailable
- *
- * Given a number of stack pages, this function will tell us whether or not we have that much space
- * before the top of the stack. If we are in the guard region we must be already handling an SO,
- * so we report how much space is left in the guard region
- *
- * Parameters:
- * numPages -- the number of pages that we need. This can be a fractional amount.
- *
- * Returns:
- * TRUE if there's that many pages of stack available
- */
-BOOL Thread::IsStackSpaceAvailable(float numPages)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- SO_TOLERANT;
- }
- CONTRACTL_END;
-
- // How much space between the current stack pointer and the first guard page?
- //
- // This must be signed since the stack pointer might be in the guard region,
- // which is at a lower address than GetLastNormalStackAddress will return.
- float iStackSpaceLeft = static_cast<float>((INT_PTR)GetCurrentSP() - (INT_PTR)GetLastNormalStackAddress());
-
- // If we have access to the stack guarantee (either in the guard region or we've tripped the guard page), then
- // use that.
- if ((iStackSpaceLeft/GetOsPageSize()) < numPages && !DetermineIfGuardPagePresent())
- {
- UINT_PTR stackGuarantee = GetStackGuarantee();
- // GetLastNormalStackAddress actually returns the 2nd to last stack page on the stack. We'll add that to our available
- // amount of stack, in addition to any sort of stack guarantee we might have.
- //
- // All these values are OS supplied, and will never overflow. (If they do, that means the stack is on the order
- // over GB, which isn't possible.
- iStackSpaceLeft += stackGuarantee + GetOsPageSize();
- }
- if ((iStackSpaceLeft/GetOsPageSize()) < numPages)
- {
- return FALSE;
- }
-
- return TRUE;
-}
-
-#endif // FEATURE_STACK_PROBE
-
/*
* GetStackGuarantee
*
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
- // Need a hard SO probe here.
- CONTRACT_VIOLATION(SOToleranceViolation);
-
BOOL bStackGuarded = DetermineIfGuardPagePresent();
// If the guard page is still there, then just return.
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_MODE_COOPERATIVE;
- STATIC_CONTRACT_SO_TOLERANT;
-
- // We have the probe outside the EX_TRY below since corresponding EX_CATCH
- // also invokes SO_INTOLERANT code.
- BEGIN_SO_INTOLERANT_CODE(GetThread());
EX_TRY_CPP_ONLY
{
}
}
EX_END_CATCH(SwallowAllExceptions);
-
- END_SO_INTOLERANT_CODE;
}
/*
}
LONG ret = -1;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return EXCEPTION_CONTINUE_SEARCH;);
// This will invoke the swallowing filter. If that returns EXCEPTION_CONTINUE_SEARCH,
// it will trigger unhandled exception processing.
}
}
-
- END_SO_INTOLERANT_CODE;
return ret;
}
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
{
CONTRACTL {
NOTHROW;
- if(!fNotFinalizerThread || fThreadObjectResetNeeded) {GC_TRIGGERS;SO_INTOLERANT;} else {GC_NOTRIGGER;SO_TOLERANT;}
+ if(!fNotFinalizerThread || fThreadObjectResetNeeded) {GC_TRIGGERS;} else {GC_NOTRIGGER;}
}
CONTRACTL_END;
{
NOTHROW;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
- SO_TOLERANT;
}
CONTRACTL_END;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW;);
EX_TRY
{
UserAbort(TAR_Thread, EEPolicy::TA_Safe, INFINITE, Thread::UAC_Host);
{
}
EX_END_CATCH(SwallowAllExceptions);
- END_SO_INTOLERANT_CODE;
return S_OK;
}
{
NOTHROW;
if (GetThread()) {GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
- SO_TOLERANT;
}
CONTRACTL_END;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
-
EX_TRY
{
UserAbort(TAR_Thread, EEPolicy::TA_Rude, INFINITE, Thread::UAC_Host);
}
EX_END_CATCH(SwallowAllExceptions);
- END_SO_INTOLERANT_CODE;
-
return S_OK;
}
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
THROWS;
GC_TRIGGERS;
- SO_TOLERANT;
}
CONTRACTL_END;
_ASSERTE (GetThread() == this);
- CONTRACT_VIOLATION(SOToleranceViolation);
-
}
{
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_SO_TOLERANT;
ETaskType TaskType = TT_UNKNOWN;
size_t type = (size_t)ClrFlsGetValue (TlsIdx_ThreadType);
return (m_State & TS_Detached);
}
-#ifdef FEATURE_STACK_PROBE
-//---------------------------------------------------------------------------------------
-//
-// IsSOTolerant - Is the current thread in SO Tolerant region?
-//
-// Arguments:
-// pLimitFrame: the limit of search for frames
-//
-// Return Value:
-// TRUE if in SO tolerant region.
-// FALSE if in SO intolerant region.
-//
-// Note:
-// We walk our frame chain to decide. If HelperMethodFrame is seen first, we are in tolerant
-// region. If EnterSOIntolerantCodeFrame is seen first, we are in intolerant region.
-//
- BOOL IsSOTolerant(void * pLimitFrame);
-#endif
-
#ifdef _DEBUG
class DisableSOCheckInHCALL
{
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
SUPPORTS_DAC;
}
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
SUPPORTS_DAC;
}
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_COOPERATIVE;
}
CONTRACTL_END;
DWORD GetThreadId()
{
- STATIC_CONTRACT_SO_TOLERANT;
LIMITED_METHOD_DAC_CONTRACT;
_ASSERTE(m_ThreadId != UNINITIALIZED_THREADID);
return m_ThreadId;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
// stack overflow exception.
BOOL DetermineIfGuardPagePresent();
-#ifdef FEATURE_STACK_PROBE
- // CanResetStackTo will return TRUE if the given stack pointer is far enough away from the guard page to proper
- // restore the guard page with RestoreGuardPage.
- BOOL CanResetStackTo(LPCVOID stackPointer);
-
- // IsStackSpaceAvailable will return true if there are the given number of stack pages available on the stack.
- BOOL IsStackSpaceAvailable(float numPages);
-
-#endif
-
// Returns the amount of stack available after an SO but before the OS rips the process.
static UINT_PTR GetStackGuarantee();
#endif // defined(GCCOVER_TOLERATE_SPURIOUS_AV)
#endif // HAVE_GCCOVER
-#if defined(_DEBUG) && defined(FEATURE_STACK_PROBE)
- class ::BaseStackGuard;
-private:
- // This field is used for debugging purposes to allow easy access to the stack guard
- // chain and also in SO-tolerance checking to quickly determine if a guard is in place.
- BaseStackGuard *m_pCurrentStackGuard;
-
-public:
- BaseStackGuard *GetCurrentStackGuard()
- {
- LIMITED_METHOD_CONTRACT;
- return m_pCurrentStackGuard;
- }
-
- void SetCurrentStackGuard(BaseStackGuard *pGuard)
- {
- LIMITED_METHOD_CONTRACT;
- m_pCurrentStackGuard = pGuard;
- }
-#endif
-
private:
BOOL m_fCompletionPortDrained;
public:
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
#endif // _DEBUG_IMPL
#endif // DACCESS_COMPILE
-#ifdef FEATURE_STACK_PROBE
-#ifdef _DEBUG_IMPL
-inline void NO_FORBIDGC_LOADER_USE_ThrowSO()
-{
- WRAPPER_NO_CONTRACT;
- if (FORBIDGC_LOADER_USE_ENABLED())
- {
- //if you hitting this assert maybe a failure was injected at the place
- // it won't occur in a real-world scenario, see VSW 397871
- // then again maybe it 's a bug at the place FORBIDGC_LOADER_USE_ENABLED was set
- _ASSERTE(!"Unexpected SO, please read the comment");
- }
- else
- COMPlusThrowSO();
-}
-#else
-inline void NO_FORBIDGC_LOADER_USE_ThrowSO()
-{
- COMPlusThrowSO();
-}
-#endif
-#endif
-
// There is an MDA which can detect illegal reentrancy into the CLR. For instance, if you call managed
// code from a native vectored exception handler, this might cause a reverse PInvoke to occur. But if the
// exception was triggered from code that was executing in cooperative GC mode, we now have GC holes and
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
SUPPORTS_DAC;
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
{
CONTRACTL
{
- SO_NOT_MAINLINE;
NOTHROW;
GC_NOTRIGGER;
}
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
inline void Thread::FinishSOWork()
{
WRAPPER_NO_CONTRACT;
-#ifdef FEATURE_STACK_PROBE
- if (HasThreadStateNC(TSNC_SOWorkNeeded))
- {
- ResetThreadStateNC(TSNC_SOWorkNeeded);
- }
-#else
_ASSERTE(!HasThreadStateNC(TSNC_SOWorkNeeded));
-#endif
}
#ifdef FEATURE_COMINTEROP
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_COOPERATIVE;
}
CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
} CONTRACTL_END;
if (pMT->IsDynamicStatics())
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC;
}
CONTRACTL_END;
{
THROWS;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
return TRUE;
}
- // This needs the probe with GenerateHardSO
- CONTRACT_VIOLATION(SOToleranceViolation);
-
if (GetThread() == this && HasThreadStateNC (TSNC_PreparingAbort) && !IsRudeAbort() )
{
STRESS_LOG0(LF_APPDOMAIN, LL_INFO10, "in Thread::ReadyForAbort PreparingAbort\n");
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
CONTRACTL {
NOTHROW;
- SO_TOLERANT;
DISABLED(GC_TRIGGERS); // I think this is actually wrong: prevents a p->c->p mode switch inside a NOTRIGGER region.
}
CONTRACTL_END;
- CONTRACT_VIOLATION(SOToleranceViolation);
-
if (IsAtProcessExit())
{
goto Exit;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS;
- STATIC_CONTRACT_SO_TOLERANT;
- BEGIN_SO_INTOLERANT_CODE(this);
TESTHOOKCALL(AppDomainCanBeUnloaded(GetDomain()->GetId().m_dwId,FALSE));
// It's possible we could go through here if we hit a hard SO and MC++ has called back
RaiseTheExceptionInternalOnly(exceptObj, FALSE);
}
- END_SO_INTOLERANT_CODE;
END_PRESERVE_LAST_ERROR;
}
case eRudeExitProcess:
case eDisableRuntime:
{
- // We're about to exit the process, if we take an SO here we'll just exit faster right???
- CONTRACT_VIOLATION(SOToleranceViolation);
-
GetEEPolicy()->NotifyHostOnDefaultAction(OPR_ThreadRudeAbortInCriticalRegion,action);
GetEEPolicy()->HandleExitProcessFromEscalation(action,HOST_E_EXITPROCESS_ADUNLOAD);
}
CONTRACTL {
NOTHROW;
DISABLED(GC_TRIGGERS); // I think this is actually wrong: prevents a p->c->p mode switch inside a NOTRIGGER region.
- SO_TOLERANT;
}
CONTRACTL_END;
// @todo - Needs a hard SO probe
- CONTRACT_VIOLATION(GCViolation|FaultViolation|SOToleranceViolation);
+ CONTRACT_VIOLATION(GCViolation|FaultViolation);
// If we have already received our PROCESS_DETACH during shutdown, there is only one thread in the
// process and no coordination is necessary.
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
Thread *pThread = GetThread();
_ASSERTE(pThread);
-#ifdef FEATURE_STACK_PROBE
- if (GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeUnloadAppDomain)
- {
- RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), pThread);
- }
-#endif
-
- BEGIN_CONTRACT_VIOLATION(SOToleranceViolation);
-
// Get the saved context
CONTEXT *pCtx = pThread->GetSavedRedirectContext();
_ASSERTE(pCtx);
}
#endif // _TARGET_X86_
-
- END_CONTRACT_VIOLATION;
-
}
//****************************************************************************************
_ASSERTE(pThread->PreemptiveGCDisabled());
-#ifdef FEATURE_STACK_PROBE
- if (GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeUnloadAppDomain)
- {
- RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), pThread);
- }
-#endif
-
// Check if we can start abort
// We use InducedThreadRedirect as a marker to tell stackwalker that a thread is redirected from JIT code.
// This is to distinguish a thread is in Preemptive mode and in JIT code.
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
CANNOT_TAKE_LOCK;
}
CONTRACTL_END;
CONTRACTL{
THROWS;
GC_TRIGGERS;
- SO_TOLERANT;
}
CONTRACTL_END;
#ifdef HIJACK_NONINTERRUPTIBLE_THREADS
Thread *thread = GetThread();
-#ifdef FEATURE_STACK_PROBE
- if (GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeUnloadAppDomain)
- {
- // Make sure default domain does not see SO.
- // probe for our entry point amount and throw if not enough stack
- RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), thread);
- }
-#endif // FEATURE_STACK_PROBE
-
- CONTRACT_VIOLATION(SOToleranceViolation);
-
thread->ResetThreadState(Thread::TS_Hijacked);
// Fix up our caller's stack, so it can resume from the hijack correctly
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
}
~CHrefOfTIHash()
{
- CONTRACTL { NOTHROW; SO_TOLERANT; } CONTRACTL_END;
+ CONTRACTL { NOTHROW; } CONTRACTL_END;
Clear();
}
void SigTypeContext::InitTypeContext(MethodDesc *md, Instantiation exactClassInst, Instantiation exactMethodInst, SigTypeContext *pRes)
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
MethodTable *pMT = md->GetMethodTable();
if (pMT->IsArray())
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
SUPPORTS_DAC;
PRECONDITION(CheckPointer(md));
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
SUPPORTS_DAC;
PRECONDITION(CheckPointer(md));
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
PRECONDITION(CheckPointer(md));
} CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
PRECONDITION(CheckPointer(declaringType, NULL_OK));
PRECONDITION(CheckPointer(pFD));
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
} CONTRACTL_END;
if (th.IsNull())
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
#include "compile.h"
#endif
#include "array.h"
-#include "stackprobe.h"
-
#ifndef DACCESS_COMPILE
#ifdef _DEBUG
_ASSERTE(GetInternalCorElementType() == ELEMENT_TYPE_FNPTR);
PTR_FnPtrTypeDesc asFnPtr = dac_cast<PTR_FnPtrTypeDesc>(this);
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), fFail = TRUE );
if (!fFail)
{
- retVal = ClassLoader::ComputeLoaderModuleForFunctionPointer(asFnPtr->GetRetAndArgTypesPointer(), asFnPtr->GetNumArgs()+1);
+ retVal = ClassLoader::ComputeLoaderModuleForFunctionPointer(asFnPtr->GetRetAndArgTypesPointer(), asFnPtr->GetNumArgs()+1);
}
- END_SO_INTOLERANT_CODE;
return retVal;
}
}
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
SUPPORTS_DAC;
// Function pointer types belong to no module
//PRECONDITION(GetInternalCorElementType() != ELEMENT_TYPE_FNPTR);
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
}
CONTRACTL_END
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
, m_pCCWTemplate(NULL)
#endif // FEATURE_COMINTEROP
{
- STATIC_CONTRACT_SO_TOLERANT;
WRAPPER_NO_CONTRACT;
INDEBUG(Verify());
}
void TypeHandle::NormalizeUnsharedArrayMT()
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT; // @TODO: This is probably incorrect
if (IsNull() || IsTypeDesc())
return;
CorElementType kind = AsMethodTable()->GetInternalCorElementType();
unsigned rank = AsMethodTable()->GetRank();
- // @todo This should be turned into a probe with a hard SO when we have one
- CONTRACT_VIOLATION(SOToleranceViolation);
// == FailIfNotLoadedOrNotRestored
TypeHandle arrayType = ClassLoader::LoadArrayTypeThrowing( elemType,
kind,
SUPPORTS_DAC;
Module* returnValue = NULL;
-
- INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(goto Exit;);
-
+
if (IsGenericVariable())
{
PTR_TypeVarTypeDesc pTyVar = dac_cast<PTR_TypeVarTypeDesc>(AsTypeDesc());
returnValue = GetMethodTable()->GetDefiningModuleForOpenType();
}
Exit:
- ;
- END_INTERIOR_STACK_PROBE;
return returnValue;
}
BOOL TypeHandle::ContainsGenericVariables(BOOL methodOnly /*=FALSE*/) const
{
- STATIC_CONTRACT_SO_TOLERANT;
STATIC_CONTRACT_NOTHROW;
SUPPORTS_DAC;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FORBID_FAULT;
- STATIC_CONTRACT_SO_INTOLERANT;
STATIC_CONTRACT_SUPPORTS_DAC;
if (IsTypeDesc())
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT;
INJECT_FAULT(COMPlusThrowOM(););
}
CONTRACTL_END
- INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(goto Exit;);
- {
-
if (IsTypeDesc())
{
AsTypeDesc()->GetName(result);
- goto Exit;
+ return;
}
AsMethodTable()->_GetFullyQualifiedNameForClass(result);
Instantiation inst = GetInstantiation();
if (!inst.IsEmpty())
TypeString::AppendInst(result, inst);
- }
-Exit:
- ;
- END_INTERIOR_STACK_PROBE;
}
TypeHandle TypeHandle::GetParent() const
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FORBID_FAULT;
- STATIC_CONTRACT_SO_TOLERANT;
if (IsTypeDesc())
return(AsTypeDesc()->GetParent());
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
FORBID_FAULT;
}
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_INTOLERANT;
INJECT_FAULT(COMPlusThrowOM());
}
Instantiation typicalInst;
SigTypeContext typeContext;
TypeHandle thParent;
-
- INTERIOR_STACK_PROBE_CHECK_THREAD;
//TODO: cache (positive?) result in methodtable using, say, enum_flag2_UNUSEDxxx
if (!thParent.IsNull() && !thParent.SatisfiesClassConstraints())
{
- returnValue = FALSE;
- goto Exit;
+ return FALSE;
}
if (!HasInstantiation())
{
- returnValue = TRUE;
- goto Exit;
+ return TRUE;
}
classInst = GetInstantiation();
typicalInst = thCanonical.GetInstantiation();
SigTypeContext::InitTypeContext(*this, &typeContext);
-
+
for (DWORD i = 0; i < classInst.GetNumArgs(); i++)
- {
+ {
TypeHandle thArg = classInst[i];
_ASSERTE(!thArg.IsNull());
if (!tyvar->SatisfiesConstraints(&typeContext, thArg))
{
- returnValue = FALSE;
- goto Exit;
+ return FALSE;
}
+ }
- }
- returnValue = TRUE;
-Exit:
- ;
- END_INTERIOR_STACK_PROBE;
-
- return returnValue;
+ return TRUE;
}
TypeKey TypeHandle::GetTypeKey() const
{
LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
PRECONDITION(!IsGenericVariable());
if (IsTypeDesc())
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FORBID_FAULT;
- STATIC_CONTRACT_SO_TOLERANT;
SUPPORTS_DAC;
STATIC_CONTRACT_CANNOT_TAKE_LOCK;
THROWS;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
PRECONDITION(CheckPointer(t));
PRECONDITION(!t.IsEncodedFixup());
SUPPORTS_DAC;
DWORD retVal = 0;
- INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(goto Exit;);
-
if (t.HasTypeParam())
{
retVal = HashParamType(level, t.GetInternalCorElementType(), t.GetTypeParam());
}
else
retVal = HashPossiblyInstantiatedType(level, t.GetCl(), Instantiation());
-
-#if defined(FEATURE_STACK_PROBE) && !defined(DACCESS_COMPILE)
-Exit:
- ;
-#endif
- END_INTERIOR_STACK_PROBE;
return retVal;
}
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
SUPPORTS_DAC;
}
#include "typeparse.h"
#include "typestring.h"
#include "assemblynative.hpp"
-#include "stackprobe.h"
#include "fstring.h"
//
{
CONTRACTL
{
- SO_TOLERANT;
WRAPPER(THROWS);
}CONTRACTL_END;
return E_INVALIDARG;
HRESULT hr = S_OK;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
*ppTypeName = NULL;
*pError = (DWORD)-1;
}
}
- END_SO_INTOLERANT_CODE;
-
return hr;
}
HRESULT hr = S_OK;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
-
ITypeNameBuilder* pTypeNameBuilder = new (nothrow) TypeNameBuilderWrapper();
if (pTypeNameBuilder)
hr = E_OUTOFMEMORY;
}
- END_SO_INTOLERANT_CODE;
-
return hr;
}
PRECONDITION(!bGenericArguments & !bSignature &! bAssemblySpec);
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return pBuf);
- {
TypeNameBuilder tnb(pBuf);
for (COUNT_T i = 0; i < m_names.GetCount(); i ++)
tnb.AddName(m_names[i]->GetUnicode());
- }
- END_SO_INTOLERANT_CODE;
return pBuf;
}
{
THROWS;
GC_TRIGGERS;
- SO_TOLERANT;
}
CONTRACTL_END;
- VALIDATE_BACKOUT_STACK_CONSUMPTION;
m_count--;
{
THROWS;
GC_TRIGGERS;
- SO_TOLERANT;
}
CONTRACTL_END;
- VALIDATE_BACKOUT_STACK_CONSUMPTION;
for(COUNT_T i = 0; i < m_genericArguments.GetCount(); i ++)
m_genericArguments[i]->Release();
{
CONTRACTL
{
- SO_TOLERANT;
WRAPPER(THROWS);
}CONTRACTL_END;
if (!bszName)
return E_INVALIDARG;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
- {
for (COUNT_T i = 0; i < m_names.GetCount(); i ++)
bszName[i] = SysAllocString(m_names[i]->GetUnicode());
- }
- END_SO_INTOLERANT_CODE;
return hr;
}
{
CONTRACTL
{
- SO_TOLERANT;
WRAPPER(THROWS);
}CONTRACTL_END;
if (pszAssemblyName == NULL)
return E_INVALIDARG;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
- {
*pszAssemblyName = SysAllocString(m_assembly.GetUnicode());
- }
- END_SO_INTOLERANT_CODE;
-
if (*pszAssemblyName == NULL)
- hr= E_OUTOFMEMORY;
+ hr = E_OUTOFMEMORY;
return hr;
}
if (pKeepAlive == NULL)
pAsmRef = NULL;
- //requires a lot of space
- DECLARE_INTERIOR_STACK_PROBE;
- // This function is recursive, so it must have an interior probe
- if (bThrowIfNotFound)
- {
- DO_INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(12);
- }
- else
- {
- DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(12, goto Exit;);
- }
-
// An explicit assembly has been specified so look for the type there
if (!GetAssembly()->IsEmpty())
{
Exit:
;
- END_INTERIOR_STACK_PROBE;
-
GCPROTECT_END();
RETURN th;
CONTRACTL
{
NOTHROW;
- SO_TOLERANT;
}
CONTRACTL_END;
- VALIDATE_BACKOUT_STACK_CONSUMPTION;
if (m_next)
delete m_next;
HRESULT hr = S_OK;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
-
m_parseState = ParseStateASSEMSPEC;
if (szAssemblySpec && *szAssemblySpec)
hr = S_OK;
}
- END_SO_INTOLERANT_CODE;
-
return hr;
}
}
CONTRACTL_END;
- CONTRACT_VIOLATION(SOToleranceViolation);
-
if (m_pStr)
{
m_pStr->Clear();
}
CONTRACT_END
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(COMPlusThrowSO());
{
TypeNameBuilder tnb(&ss, TypeNameBuilder::ParseStateNAME);
AppendTypeDef(tnb, pImport, td, format);
}
- END_SO_INTOLERANT_CODE;
RETURN;
}
}
CONTRACT_END
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(COMPlusThrowSO());
{
TypeNameBuilder tnb(&ss, TypeNameBuilder::ParseStateNAME);
if ((format & FormatAngleBrackets) != 0)
tnb.SetUseAngleBracketsForGenerics(TRUE);
AppendInst(tnb, inst, format);
}
- END_SO_INTOLERANT_CODE;
RETURN;
}
THROWS;
}
CONTRACT_END
-
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(COMPlusThrowSO());
+
{
TypeNameBuilder tnb(&ss);
if ((format & FormatAngleBrackets) != 0)
tnb.SetUseAngleBracketsForGenerics(TRUE);
AppendType(tnb, ty, typeInstantiation, format);
}
- END_SO_INTOLERANT_CODE;
RETURN;
}
}
CONTRACT_END
- INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(10);
-
BOOL bToString = (format & (FormatNamespace|FormatFullInst|FormatAssembly)) == FormatNamespace;
// It's null!
tnb.AddAssemblySpec(pAssemblyName.GetUnicode());
- }
-
- END_INTERIOR_STACK_PROBE;
-
+ }
- RETURN;
+ RETURN;
}
void TypeString::AppendMethod(SString& s, MethodDesc *pMD, Instantiation typeInstantiation, const DWORD format)
}
CONTRACTL_END
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(COMPlusThrowSO());
{
TypeHandle th;
}
}
}
- END_SO_INTOLERANT_CODE;
}
void TypeString::AppendField(SString& s, FieldDesc *pFD, Instantiation typeInstantiation, const DWORD format /* = FormatNamespace */)
}
CONTRACTL_END;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(COMPlusThrowSO());
{
TypeHandle th(pFD->GetApproxEnclosingMethodTable());
AppendType(s, th, typeInstantiation, format);
s.AppendUTF8(NAMESPACE_SEPARATOR_STR);
s.AppendUTF8(pFD->GetName());
}
- END_SO_INTOLERANT_CODE;
}
#ifdef _DEBUG
NOTHROW;
PRECONDITION(CheckPointer(t));
PRECONDITION(ss.Check());
- SO_NOT_MAINLINE;
}
CONTRACTL_END
NOTHROW;
PRECONDITION(CheckPointer(pTypeKey));
PRECONDITION(ss.Check());
- SO_NOT_MAINLINE;
}
CONTRACTL_END
THROWS;
if (format & (FormatAssembly|FormatFullInst)) GC_TRIGGERS; else GC_NOTRIGGER;
PRECONDITION(CheckPointer(pTypeKey));
- SO_INTOLERANT;
}
CONTRACT_END
PRECONDITION(CheckPointer(pTypeKey));
}
CONTRACT_END
-
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(COMPlusThrowSO());
+
{
TypeNameBuilder tnb(&ss);
AppendTypeKey(tnb, pTypeKey, format);
}
- END_SO_INTOLERANT_CODE;
RETURN;
}
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- SO_TOLERANT;
SUPPORTS_DAC_HOST_ONLY;
}
CONTRACTL_END;
LONG ref = 0;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
-
ref = InterlockedDecrement(&m_ref);
if (ref == 0)
delete this;
- END_SO_INTOLERANT_CODE;
-
return ref;
}
CONTRACTL_END;
HRESULT hr;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
hr = m_tnb.OpenGenericArguments();
- END_SO_INTOLERANT_CODE;
return hr;
}
CONTRACTL_END;
HRESULT hr;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
hr = m_tnb.CloseGenericArguments();
- END_SO_INTOLERANT_CODE;
return hr;
}
CONTRACTL_END;
HRESULT hr;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
hr = m_tnb.OpenGenericArgument();
- END_SO_INTOLERANT_CODE;
return hr;
}
CONTRACTL_END;
HRESULT hr;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
hr = m_tnb.CloseGenericArgument();
- END_SO_INTOLERANT_CODE;
return hr;
}
}
CONTRACTL_END;
- HRESULT hr;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
- hr = m_tnb.AddName(szName);
- END_SO_INTOLERANT_CODE;
- return hr;
+ return m_tnb.AddName(szName);
}
HRESULT STDMETHODCALLTYPE TypeNameBuilderWrapper::AddPointer()
}
CONTRACTL_END;
- HRESULT hr;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
- hr = m_tnb.AddPointer();
- END_SO_INTOLERANT_CODE;
- return hr;
+ return m_tnb.AddPointer();
}
HRESULT STDMETHODCALLTYPE TypeNameBuilderWrapper::AddByRef()
}
CONTRACTL_END;
- HRESULT hr;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
- hr = m_tnb.AddByRef();
- END_SO_INTOLERANT_CODE;
- return hr;
+ return m_tnb.AddByRef();
}
HRESULT STDMETHODCALLTYPE TypeNameBuilderWrapper::AddSzArray()
}
CONTRACTL_END;
- HRESULT hr;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
- hr = m_tnb.AddSzArray();
- END_SO_INTOLERANT_CODE;
- return hr;
+ return m_tnb.AddSzArray();
}
HRESULT STDMETHODCALLTYPE TypeNameBuilderWrapper::AddArray(DWORD rank)
}
CONTRACTL_END;
- HRESULT hr;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
- hr = m_tnb.AddArray(rank);
- END_SO_INTOLERANT_CODE;
- return hr;
+ return m_tnb.AddArray(rank);
}
HRESULT STDMETHODCALLTYPE TypeNameBuilderWrapper::AddAssemblySpec(LPCWSTR szAssemblySpec)
}
CONTRACTL_END;
- HRESULT hr;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
- hr = m_tnb.AddAssemblySpec(szAssemblySpec);
- END_SO_INTOLERANT_CODE;
- return hr;
+ return m_tnb.AddAssemblySpec(szAssemblySpec);
}
HRESULT STDMETHODCALLTYPE TypeNameBuilderWrapper::ToString(BSTR* pszStringRepresentation)
{
WRAPPER_NO_CONTRACT;
- HRESULT hr;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
- hr = m_tnb.ToString(pszStringRepresentation);
- END_SO_INTOLERANT_CODE;
- return hr;
+ return m_tnb.ToString(pszStringRepresentation);
}
HRESULT STDMETHODCALLTYPE TypeNameBuilderWrapper::Clear()
}
CONTRACTL_END;
- HRESULT hr;
- BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
- hr = m_tnb.Clear();
- END_SO_INTOLERANT_CODE;
- return hr;
+ return m_tnb.Clear();
}
{
THROWS;
GC_NOTRIGGER;
- SO_TOLERANT; // So long as we cleanup the heap when we're done, all the memory goes with it
INJECT_FAULT(COMPlusThrowOM(););
} CONTRACTL_END;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FORBID_FAULT;
- STATIC_CONTRACT_SO_TOLERANT;
HMODULE hMod = WszGetModuleHandle(lpModuleFileName);
return hMod;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_FORBID_FAULT;
- STATIC_CONTRACT_SO_TOLERANT;
HMODULE hMod = WszGetModuleHandle(NULL);
return hMod;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_FAULT;
- STATIC_CONTRACT_SO_TOLERANT;
HMODULE hMod;
UINT last = SetErrorMode(SEM_NOOPENFILEERRORBOX|SEM_FAILCRITICALERRORS);
DWORD dwLastError = 0;
HMODULE hmod = 0;
- // This method should be marked "throws" due to the probe here.
- STATIC_CONTRACT_VIOLATION(ThrowsViolation);
-
- BEGIN_SO_TOLERANT_CODE(GetThread());
hmod = CLRLoadLibraryWorker(lpLibFileName, &dwLastError);
- END_SO_TOLERANT_CODE;
SetLastError(dwLastError);
return hmod;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_FAULT;
- STATIC_CONTRACT_SO_TOLERANT;
HMODULE hMod;
UINT last = SetErrorMode(SEM_NOOPENFILEERRORBOX|SEM_FAILCRITICALERRORS);
DWORD lastError = ERROR_SUCCESS;
HMODULE hmod = NULL;
- BEGIN_SO_TOLERANT_CODE(GetThread());
hmod = CLRLoadLibraryExWorker(lpLibFileName, hFile, dwFlags, &lastError);
- END_SO_TOLERANT_CODE;
-
+
SetLastError(lastError);
return hmod;
}
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_FORBID_FAULT;
- STATIC_CONTRACT_SO_TOLERANT;
return FreeLibrary(hModule);
}
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_FORBID_FAULT;
- STATIC_CONTRACT_SO_TOLERANT;
// This is no-return
FreeLibraryAndExitThread(hModule,dwExitCode);
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_ANY;
- STATIC_CONTRACT_SO_TOLERANT;
struct Param
{
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_PREEMPTIVE;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_COOPERATIVE;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_INTOLERANT;
MODE_COOPERATIVE;
}
CONTRACTL_END;
// can be called on a COOP thread and it has a GC_NOTRIGGER contract.
// We should use the AD unload thread to call this function on.
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
Thread *pThread = NULL;
CONTRACT_VIOLATION(GCViolation);
- // I am returning TRUE here so the caller will NOT enable
- // ARM - if we can't take the thread store lock, something
- // is already kind of messed up so no need to proceed with
- // enabling ARM.
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(GetThread(), return TRUE);
// Take the thread store lock while we enumerate threads.
ThreadStoreLockHolder tsl ;
continue;
pThread->QueryThreadProcessorUsage();
}
-
- END_SO_INTOLERANT_CODE;
}
g_fEnableARM = TRUE;
}
#define InternalSetupForComCall(CannotEnterRetVal, OOMRetVal, SORetVal, CheckCanRunManagedCode) \
SetupThreadForComCall(OOMRetVal); \
if (CheckCanRunManagedCode && !CanRunManagedCode()) \
- return CannotEnterRetVal; \
-SO_INTOLERANT_CODE_NOTHROW(CURRENT_THREAD, return SORetVal)
+ return CannotEnterRetVal;
#define SetupForComCallHRNoHostNotif() InternalSetupForComCall(HOST_E_CLRNOTAVAILABLE, E_OUTOFMEMORY, COR_E_STACKOVERFLOW, true)
#define SetupForComCallHRNoHostNotifNoCheckCanRunManagedCode() InternalSetupForComCall(HOST_E_CLRNOTAVAILABLE, E_OUTOFMEMORY, COR_E_STACKOVERFLOW, false)
if (CheckCanRunManagedCode && !CanRunManagedCode()) \
return CannotEnterRetVal; \
SetupThreadForComCall(OOMRetVal); \
-BEGIN_SO_INTOLERANT_CODE_NOTHROW(CURRENT_THREAD, SORetVal) \
#define BeginSetupForComCallHRWithEscapingCorruptingExceptions() \
HRESULT __hr = S_OK; \
#define EndSetupForComCallHRWithEscapingCorruptingExceptions() \
} \
-END_SO_INTOLERANT_CODE; \
\
if (FAILED(__hr)) \
{ \
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
PRECONDITION(CheckPointer(pdwHashCode));
}
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
} CONTRACTL_END
#ifndef DACCESS_COMPILE
// LockedRangeList::IsInRangeWorker
// VirtualCallStubManager::isDispatchingStub
//
- CONTRACT_VIOLATION(SOToleranceViolation);
kind = pCur->getStubKind(stubAddress, usePredictStubKind);
if (kind != SK_UNKNOWN)
{
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pElem));
} CONTRACTL_END;
- // @todo - Remove this when have a probe that generates a hard SO.
- CONTRACT_VIOLATION(SOToleranceViolation);
g_resolveCache->PromoteChainEntry(pElem);
return pElem;
}
INJECT_FAULT(COMPlusThrowOM(););
PRECONDITION(CheckPointer(pTransitionBlock));
MODE_COOPERATIVE;
- SO_TOLERANT;
} CONTRACTL_END;
MAKE_CURRENT_THREAD_AVAILABLE();
#ifndef _TARGET_X86_
if (flags & SDF_ResolvePromoteChain)
{
- BEGIN_SO_INTOLERANT_CODE(CURRENT_THREAD);
-
ResolveCacheElem * pElem = (ResolveCacheElem *)token;
g_resolveCache->PromoteChainEntry(pElem);
target = (PCODE) pElem->target;
pMgr->BackPatchWorker(&callSite);
}
- END_SO_INTOLERANT_CODE;
-
return target;
}
#endif
MODE_COOPERATIVE;
PRECONDITION(CheckPointer(pMT));
POSTCONDITION(CheckPointer(RETVAL));
- SO_TOLERANT;
} CONTRACT_END;
// This is called when trying to create a HelperMethodFrame, which means there are
CONTRACTL {
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
PRECONDITION(CheckPointer(pMT));
} CONTRACTL_END
{
if (value)
{
- BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
value->Release();
- END_SO_TOLERANT_CODE_CALLING_HOST;
-
}
}
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_NOTRIGGER;
MODE_COOPERATIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
MaxWorkerThreads != 0 &&
MaxIOCompletionThreads != 0)
{
- BEGIN_SO_INTOLERANT_CODE(GetThread());
-
if (GetForceMaxWorkerThreadsValue() == 0)
{
MaxLimitTotalWorkerThreads = min(MaxWorkerThreads, (DWORD)ThreadCounter::MaxPossibleCount);
}
}
- END_SO_INTOLERANT_CODE;
-
MaxLimitTotalCPThreads = min(MaxIOCompletionThreads, (DWORD)ThreadCounter::MaxPossibleCount);
result = TRUE;
MinWorkerThreads <= (DWORD) MaxLimitTotalWorkerThreads &&
MinIOCompletionThreads <= (DWORD) MaxLimitTotalCPThreads)
{
- BEGIN_SO_INTOLERANT_CODE(GetThread());
-
if (GetForceMinWorkerThreadsValue() == 0)
{
MinLimitTotalWorkerThreads = max(1, min(MinWorkerThreads, (DWORD)ThreadCounter::MaxPossibleCount));
}
}
- END_SO_INTOLERANT_CODE;
-
MinLimitTotalCPThreads = max(1, min(MinIOCompletionThreads, (DWORD)ThreadCounter::MaxPossibleCount));
init_result = TRUE;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
NOTHROW;
MODE_ANY;
GC_TRIGGERS;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
MODE_ANY;
GC_TRIGGERS;
- SO_INTOLERANT;
}
CONTRACTL_END;
{
NOTHROW;
GC_NOTRIGGER;
- SO_TOLERANT;
MODE_ANY;
}
CONTRACTL_END;
DWORD WINAPI ThreadpoolMgr::intermediateThreadProc(PVOID arg)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_INTOLERANT;
offset_counter++;
if (offset_counter * offset_multiplier > (int)GetOsPageSize())
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_INTOLERANT;
}
CONTRACTL_END;
void ThreadpoolMgr::InsertNewWaitForSelf(WaitInfo* pArgs)
{
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_INTOLERANT;
WaitInfo* waitInfo = pArgs;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
return 0;
}
- BEGIN_SO_INTOLERANT_CODE(pThread); // we probe at the top of the thread so we can safely call anything below here.
{
// wait threads never die. (Why?)
for (;;)
}
}
}
- END_SO_INTOLERANT_CODE;
//This is unreachable...so no return required.
}
THROWS;
MODE_PREEMPTIVE;
GC_TRIGGERS;
- SO_TOLERANT;
}
CONTRACTL_END;
}
}
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, return ERROR_STACK_OVERFLOW);
{
AsyncCallback * asyncCallback = (AsyncCallback*) pArgs;
((WAITORTIMERCALLBACKFUNC) waitInfo->Callback)
( waitInfo->Context, asyncCallback->waitTimedOut != FALSE);
}
- END_SO_INTOLERANT_CODE;
return ERROR_SUCCESS;
}
void ThreadpoolMgr::DeregisterWait(WaitInfo* pArgs)
{
-
WRAPPER_NO_CONTRACT;
- STATIC_CONTRACT_SO_INTOLERANT;
WaitInfo* waitInfo = pArgs;
THROWS;
if (GetThread()) { MODE_PREEMPTIVE;} else { DISABLED(MODE_ANY);}
if (GetThread()) { GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
- SO_INTOLERANT;
}
CONTRACTL_END;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_ANY;
- STATIC_CONTRACT_SO_TOLERANT;
LPOVERLAPPED lpOverlapped=NULL;
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_MODE_ANY;
- STATIC_CONTRACT_SO_TOLERANT;
_ASSERTE(pThread);
GC_NOTRIGGER;
NOTHROW;
MODE_ANY;
- SO_TOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_INTOLERANT;
}
CONTRACTL_END;
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_TRIGGERS; // due to SetApartment
STATIC_CONTRACT_MODE_PREEMPTIVE;
- STATIC_CONTRACT_SO_INTOLERANT;
/* cannot use contract because of SEH
CONTRACTL
{
MODE_ANY;
}
CONTRACTL_END;
- STATIC_CONTRACT_SO_INTOLERANT;
_ASSERTE(pArg);
TimerInfo * timerInfo = pArg;
THROWS;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
}
CONTRACTL_END;
}
}
- BEGIN_SO_INTOLERANT_CODE(pThread);
{
TimerInfo* timerInfo = (TimerInfo*) pArgs;
((WAITORTIMERCALLBACKFUNC) timerInfo->Function) (timerInfo->Context, TRUE) ;
DeleteTimer(timerInfo);
}
}
- END_SO_INTOLERANT_CODE;
return ERROR_SUCCESS;
}
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_INTOLERANT;
}
CONTRACTL_END;
NOTHROW;
GC_TRIGGERS;
MODE_ANY;
- SO_TOLERANT;
} CONTRACTL_END;
SafeRelease((IUnknown*)value);
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- SO_TOLERANT;
} CONTRACTL_END;
SafeReleasePreemp((IUnknown*)value);
NOTHROW;
GC_NOTRIGGER;
FORBID_FAULT;
- SO_INTOLERANT;
}
CONTRACTL_END;
CompileStatus result = NOT_COMPILED;
- // This is an entry point into the JIT which can call back into the VM. There are methods in the
- // JIT that will swallow exceptions and only the VM guarentees that exceptions caught or swallowed
- // with restore the debug state of the stack guards. So it is necessary to ensure that the status
- // is restored on return from the call into the JIT, which this light-weight transition macro
- // will do.
- REMOVE_STACK_GUARD;
-
CORINFO_MODULE_HANDLE module;
// We only compile IL_STUBs from the current assembly
#ifdef ALLOW_SXS_JIT_NGEN
if (m_zapper->m_alternateJit)
{
- REMOVE_STACK_GUARD;
-
res = m_zapper->m_alternateJit->compileMethod( this,
&m_currentMethodInfo,
CORJIT_FLAGS::CORJIT_FLAG_CALL_GETJITFLAGS,
if (FAILED(res))
{
- REMOVE_STACK_GUARD;
-
ICorJitCompiler * pCompiler = m_zapper->m_pJitCompiler;
res = pCompiler->compileMethod(this,
&m_currentMethodInfo,
BEGIN_ENTRYPOINT_NOTHROW;
EX_TRY
{
- REMOVE_STACK_GUARD;
((DomainCallback *) pvArgs)->doCallback();
}
EX_CATCH_HRESULT(hr);