ASM_OFFSET( 0, 0, Thread, m_rgbAllocContextBuffer)
ASM_OFFSET( 28, 38, Thread, m_ThreadStateFlags)
ASM_OFFSET( 2c, 40, Thread, m_pTransitionFrame)
-ASM_OFFSET( 30, 48, Thread, m_pHackPInvokeTunnel)
+ASM_OFFSET( 30, 48, Thread, m_pDeferredTransitionFrame)
ASM_OFFSET( 40, 68, Thread, m_ppvHijackedReturnAddressLocation)
ASM_OFFSET( 44, 70, Thread, m_pvHijackedReturnAddress)
#ifdef HOST_64BIT
{
uintptr_t faultingIP = palContext->GetIp();
- ICodeManager * pCodeManager = GetRuntimeInstance()->FindCodeManagerByAddress((PTR_VOID)faultingIP);
+ ICodeManager * pCodeManager = GetRuntimeInstance()->GetCodeManagerForAddress((PTR_VOID)faultingIP);
bool translateToManagedException = false;
if (pCodeManager != NULL)
{
uintptr_t faultingIP = pExPtrs->ContextRecord->GetIp();
- ICodeManager * pCodeManager = GetRuntimeInstance()->FindCodeManagerByAddress((PTR_VOID)faultingIP);
+ ICodeManager * pCodeManager = GetRuntimeInstance()->GetCodeManagerForAddress((PTR_VOID)faultingIP);
bool translateToManagedException = false;
if (pCodeManager != NULL)
{
Thread * pCurThread = ThreadStore::GetCurrentThread();
- pCurThread->SetupHackPInvokeTunnel();
+ pCurThread->DeferTransitionFrame();
pCurThread->DisablePreemptiveMode();
ASSERT(!pCurThread->IsDoNotTriggerGcSet());
Thread * pCurThread = ThreadStore::GetCurrentThread();
- pCurThread->SetupHackPInvokeTunnel();
+ pCurThread->DeferTransitionFrame();
pCurThread->DisablePreemptiveMode();
int64_t ret = GCHeapUtilities::GetGCHeap()->GetTotalBytesInUse();
Thread *pCurThread = ThreadStore::GetCurrentThread();
ASSERT(!pCurThread->IsCurrentThreadInCooperativeMode());
- pCurThread->SetupHackPInvokeTunnel();
+ pCurThread->DeferTransitionFrame();
pCurThread->DisablePreemptiveMode();
int result = GCHeapUtilities::GetGCHeap()->StartNoGCRegion(totalSize, hasLohSize, lohSize, disallowFullBlockingGC);
{
Thread* pThread = ThreadStore::GetCurrentThread();
- pThread->SetupHackPInvokeTunnel();
+ pThread->DeferTransitionFrame();
pThread->DisablePreemptiveMode();
ASSERT(!pThread->IsDoNotTriggerGcSet());
{
Thread* pThread = ThreadStore::GetCurrentThread();
- pThread->SetupHackPInvokeTunnel();
+ pThread->DeferTransitionFrame();
pThread->DisablePreemptiveMode();
ASSERT(!pThread->IsDoNotTriggerGcSet());
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
-#pragma once
-#define ICODEMANAGER_INCLUDED
+#pragma once
+#include <rhbinder.h>
// TODO: Debugger/DAC support (look for TODO: JIT)
#define GC_CALL_INTERIOR 0x1
#define GC_CALL_PINNED 0x2
-#define GC_CALL_CHECK_APP_DOMAIN 0x4
-#define GC_CALL_STATIC 0x8
typedef void (*GCEnumCallback)(
void * hCallback, // callback data
};
#ifdef TARGET_ARM64
+// Verify that we can use bitwise shifts to convert from GCRefKind to PInvokeTransitionFrameFlags and back
+C_ASSERT(PTFF_X0_IS_GCREF == ((uint64_t)GCRK_Object << 32));
+C_ASSERT(PTFF_X0_IS_BYREF == ((uint64_t)GCRK_Byref << 32));
+C_ASSERT(PTFF_X1_IS_GCREF == ((uint64_t)GCRK_Scalar_Obj << 32));
+C_ASSERT(PTFF_X1_IS_BYREF == ((uint64_t)GCRK_Scalar_Byref << 32));
+
+inline uint64_t ReturnKindToTransitionFrameFlags(GCRefKind returnKind)
+{
+ if (returnKind == GCRK_Scalar)
+ return 0;
+
+ return PTFF_SAVE_X0 | PTFF_SAVE_X1 | ((uint64_t)returnKind << 32);
+}
+
+inline GCRefKind TransitionFrameFlagsToReturnKind(uint64_t transFrameFlags)
+{
+ GCRefKind returnKind = (GCRefKind)((transFrameFlags & (PTFF_X0_IS_GCREF | PTFF_X0_IS_BYREF | PTFF_X1_IS_GCREF | PTFF_X1_IS_BYREF)) >> 32);
+ ASSERT((returnKind == GCRK_Scalar) || ((transFrameFlags & PTFF_SAVE_X0) && (transFrameFlags & PTFF_SAVE_X1)));
+ return returnKind;
+}
+
// Extract individual GCRefKind components from a composite return kind
inline GCRefKind ExtractReg0ReturnKind(GCRefKind returnKind)
{
virtual bool UnwindStackFrame(MethodInfo * pMethodInfo,
REGDISPLAY * pRegisterSet, // in/out
- PTR_VOID * ppPreviousTransitionFrame) = 0; // out
+ PInvokeTransitionFrame** ppPreviousTransitionFrame) = 0; // out
virtual uintptr_t GetConservativeUpperBoundForOutgoingArgs(MethodInfo * pMethodInfo,
REGDISPLAY * pRegisterSet) = 0;
COOP_PINVOKE_HELPER(HANDLE, RhGetOSModuleFromPointer, (PTR_VOID pPointerVal))
{
- ICodeManager * pCodeManager = GetRuntimeInstance()->FindCodeManagerByAddress(pPointerVal);
+ ICodeManager * pCodeManager = GetRuntimeInstance()->GetCodeManagerForAddress(pPointerVal);
if (pCodeManager != NULL)
return (HANDLE)pCodeManager->GetOsModuleHandle();
{
// This must be called via p/invoke rather than RuntimeImport to make the stack crawlable.
- ThreadStore::GetCurrentThread()->SetupHackPInvokeTunnel();
+ ThreadStore::GetCurrentThread()->DeferTransitionFrame();
return RhpCalculateStackTraceWorker(pOutputBuffer, outputBufferLength, pAddressInCurrentFrame);
}
PTR_UInt8 RuntimeInstance::FindMethodStartAddress(PTR_VOID ControlPC)
{
- ICodeManager * pCodeManager = FindCodeManagerByAddress(ControlPC);
+ ICodeManager * pCodeManager = GetCodeManagerForAddress(ControlPC);
MethodInfo methodInfo;
if (pCodeManager != NULL && pCodeManager->FindMethodInfo(ControlPC, &methodInfo))
{
return NULL;
}
-ICodeManager * RuntimeInstance::FindCodeManagerByAddress(PTR_VOID pvAddress)
+// WARNING: This method is called by suspension while one thread is interrupted
+// in a random location, possibly holding random locks.
+// It is unsafe to use blocking APIs or allocate in this method.
+// Please ensure that all methods called by this one also have this warning.
+bool RuntimeInstance::IsManaged(PTR_VOID pvAddress)
{
- ReaderWriterLock::ReadHolder read(&m_ModuleListLock);
+ return (dac_cast<TADDR>(pvAddress) - dac_cast<TADDR>(m_pvManagedCodeStartRange) < m_cbManagedCodeRange);
+}
- // TODO: ICodeManager support in DAC
-#ifndef DACCESS_COMPILE
- for (CodeManagerEntry * pEntry = m_CodeManagerList.GetHead(); pEntry != NULL; pEntry = pEntry->m_pNext)
+ICodeManager * RuntimeInstance::GetCodeManagerForAddress(PTR_VOID pvAddress)
+{
+ if (!IsManaged(pvAddress))
{
- if (dac_cast<TADDR>(pvAddress) - dac_cast<TADDR>(pEntry->m_pvStartRange) < pEntry->m_cbRange)
- return pEntry->m_pCodeManager;
+ return NULL;
}
-#endif
- return NULL;
+ return m_CodeManager;
}
#ifndef DACCESS_COMPILE
ICodeManager * RuntimeInstance::FindCodeManagerForClasslibFunction(PTR_VOID address)
{
// Try looking up the code manager assuming the address is for code first. This is expected to be most common.
- ICodeManager * pCodeManager = FindCodeManagerByAddress(address);
+ ICodeManager * pCodeManager = GetCodeManagerForAddress(address);
if (pCodeManager != NULL)
return pCodeManager;
PTR_UInt8 RuntimeInstance::GetTargetOfUnboxingAndInstantiatingStub(PTR_VOID ControlPC)
{
- ICodeManager * pCodeManager = FindCodeManagerByAddress(ControlPC);
+ ICodeManager * pCodeManager = GetCodeManagerForAddress(ControlPC);
if (pCodeManager != NULL)
{
PTR_UInt8 pData = (PTR_UInt8)pCodeManager->GetAssociatedData(ControlPC);
GPTR_IMPL_INIT(RuntimeInstance, g_pTheRuntimeInstance, NULL);
+// WARNING: This method is called by suspension while one thread is interrupted
+// in a random location, possibly holding random locks.
+// It is unsafe to use blocking APIs or allocate in this method.
+// Please ensure that all methods called by this one also have this warning.
PTR_RuntimeInstance GetRuntimeInstance()
{
return g_pTheRuntimeInstance;
}
}
-void RuntimeInstance::SetLoopHijackFlags(uint32_t flag)
-{
- for (TypeManagerList::Iterator iter = m_TypeManagerList.Begin(); iter != m_TypeManagerList.End(); iter++)
- {
- iter->m_pTypeManager->SetLoopHijackFlag(flag);
- }
-}
-
RuntimeInstance::OsModuleList* RuntimeInstance::GetOsModuleList()
{
return dac_cast<DPTR(OsModuleList)>(dac_cast<TADDR>(this) + offsetof(RuntimeInstance, m_OsModuleList));
ReaderWriterLock& RuntimeInstance::GetTypeManagerLock()
{
- return m_ModuleListLock;
+ return m_TypeManagerLock;
}
#ifndef DACCESS_COMPILE
RuntimeInstance::RuntimeInstance() :
m_pThreadStore(NULL),
+ m_CodeManager(NULL),
m_conservativeStackReportingEnabled(false),
m_pUnboxingStubsRegion(NULL)
{
m_conservativeStackReportingEnabled = true;
}
-bool RuntimeInstance::RegisterCodeManager(ICodeManager * pCodeManager, PTR_VOID pvStartRange, uint32_t cbRange)
+void RuntimeInstance::RegisterCodeManager(ICodeManager * pCodeManager, PTR_VOID pvStartRange, uint32_t cbRange)
{
- CodeManagerEntry * pEntry = new (nothrow) CodeManagerEntry();
- if (NULL == pEntry)
- return false;
+ _ASSERTE(m_CodeManager == NULL);
+ _ASSERTE(pCodeManager != NULL);
- pEntry->m_pvStartRange = pvStartRange;
- pEntry->m_cbRange = cbRange;
- pEntry->m_pCodeManager = pCodeManager;
-
- {
- ReaderWriterLock::WriteHolder write(&m_ModuleListLock);
-
- m_CodeManagerList.PushHead(pEntry);
- }
-
- return true;
-}
-
-void RuntimeInstance::UnregisterCodeManager(ICodeManager * pCodeManager)
-{
- CodeManagerEntry * pEntry = NULL;
-
- {
- ReaderWriterLock::WriteHolder write(&m_ModuleListLock);
-
- for (CodeManagerList::Iterator i = m_CodeManagerList.Begin(), end = m_CodeManagerList.End(); i != end; i++)
- {
- if (i->m_pCodeManager == pCodeManager)
- {
- pEntry = *i;
-
- m_CodeManagerList.Remove(i);
- break;
- }
- }
- }
-
- ASSERT(pEntry != NULL);
- delete pEntry;
-}
-
-extern "C" bool __stdcall RegisterCodeManager(ICodeManager * pCodeManager, PTR_VOID pvStartRange, uint32_t cbRange)
-{
- return GetRuntimeInstance()->RegisterCodeManager(pCodeManager, pvStartRange, cbRange);
+ m_CodeManager = pCodeManager;
+ m_pvManagedCodeStartRange = pvStartRange;
+ m_cbManagedCodeRange = cbRange;
}
-extern "C" void __stdcall UnregisterCodeManager(ICodeManager * pCodeManager)
+extern "C" void __stdcall RegisterCodeManager(ICodeManager * pCodeManager, PTR_VOID pvStartRange, uint32_t cbRange)
{
- return GetRuntimeInstance()->UnregisterCodeManager(pCodeManager);
+ GetRuntimeInstance()->RegisterCodeManager(pCodeManager, pvStartRange, cbRange);
}
bool RuntimeInstance::RegisterUnboxingStubs(PTR_VOID pvStartRange, uint32_t cbRange)
pEntry->m_pTypeManager = pTypeManager;
{
- ReaderWriterLock::WriteHolder write(&m_ModuleListLock);
+ ReaderWriterLock::WriteHolder write(&m_TypeManagerLock);
m_TypeManagerList.PushHead(pEntry);
}
PTR_ThreadStore m_pThreadStore;
HANDLE m_hPalInstance; // this is the HANDLE passed into DllMain
- ReaderWriterLock m_ModuleListLock;
+ ReaderWriterLock m_TypeManagerLock;
public:
struct OsModuleEntry;
private:
OsModuleList m_OsModuleList;
- struct CodeManagerEntry;
- typedef DPTR(CodeManagerEntry) PTR_CodeManagerEntry;
+ ICodeManager* m_CodeManager;
- struct CodeManagerEntry
- {
- PTR_CodeManagerEntry m_pNext;
- PTR_VOID m_pvStartRange;
- uint32_t m_cbRange;
- ICodeManager * m_pCodeManager;
- };
-
- typedef SList<CodeManagerEntry> CodeManagerList;
- CodeManagerList m_CodeManagerList;
+ // we support only one code manager for now, so we just record the range.
+ void* m_pvManagedCodeStartRange;
+ uint32_t m_cbManagedCodeRange;
public:
struct TypeManagerEntry
void EnableConservativeStackReporting();
bool IsConservativeStackReportingEnabled() { return m_conservativeStackReportingEnabled; }
- bool RegisterCodeManager(ICodeManager * pCodeManager, PTR_VOID pvStartRange, uint32_t cbRange);
- void UnregisterCodeManager(ICodeManager * pCodeManager);
+ void RegisterCodeManager(ICodeManager * pCodeManager, PTR_VOID pvStartRange, uint32_t cbRange);
- ICodeManager * FindCodeManagerByAddress(PTR_VOID ControlPC);
+ ICodeManager * GetCodeManagerForAddress(PTR_VOID ControlPC);
PTR_VOID GetClasslibFunctionFromCodeAddress(PTR_VOID address, ClasslibFunctionId functionId);
bool RegisterTypeManager(TypeManager * pTypeManager);
bool RegisterUnboxingStubs(PTR_VOID pvStartRange, uint32_t cbRange);
bool IsUnboxingStub(uint8_t* pCode);
+ bool IsManaged(PTR_VOID pvAddress);
+
static bool Initialize(HANDLE hPalInstance);
void Destroy();
bool ShouldHijackCallsiteForGcStress(uintptr_t CallsiteIP);
bool ShouldHijackLoopForGcStress(uintptr_t CallsiteIP);
- void SetLoopHijackFlags(uint32_t flag);
};
typedef DPTR(RuntimeInstance) PTR_RuntimeInstance;
#define FAILFAST_OR_DAC_FAIL_UNCONDITIONALLY(msg) { ASSERT_UNCONDITIONALLY(msg); RhFailFast(); }
#endif
-PTR_PInvokeTransitionFrame GetPInvokeTransitionFrame(PTR_VOID pTransitionFrame)
-{
- return static_cast<PTR_PInvokeTransitionFrame>(pTransitionFrame);
-}
-
-StackFrameIterator::StackFrameIterator(Thread * pThreadToWalk, PTR_VOID pInitialTransitionFrame)
+StackFrameIterator::StackFrameIterator(Thread * pThreadToWalk, PInvokeTransitionFrame* pInitialTransitionFrame)
{
STRESS_LOG0(LF_STACKWALK, LL_INFO10000, "----Init---- [ GC ]\n");
ASSERT(!pThreadToWalk->DangerousCrossThreadIsHijacked());
- InternalInit(pThreadToWalk, GetPInvokeTransitionFrame(pInitialTransitionFrame), GcStackWalkFlags);
+ InternalInit(pThreadToWalk, pInitialTransitionFrame, GcStackWalkFlags);
PrepareToYieldFrame();
}
// NOTE: When the PC is in an assembly thunk, this function will unwind to the next managed
// frame and may publish a conservative stack range (if and only if any of the unwound
// thunks report a conservative range).
-void StackFrameIterator::InternalInit(Thread * pThreadToWalk, PTR_PInvokeTransitionFrame pFrame, uint32_t dwFlags)
+void StackFrameIterator::InternalInit(Thread * pThreadToWalk, PInvokeTransitionFrame* pFrame, uint32_t dwFlags)
{
// EH stackwalks are always required to unwind non-volatile floating point state. This
// state is never carried by PInvokeTransitionFrames, implying that they can never be used
if (category == InManagedCode)
{
- ASSERT(m_pInstance->FindCodeManagerByAddress(m_ControlPC));
+ ASSERT(m_pInstance->IsManaged(m_ControlPC));
}
else if (IsNonEHThunk(category))
{
UnwindNonEHThunkSequence();
- ASSERT(m_pInstance->FindCodeManagerByAddress(m_ControlPC));
+ ASSERT(m_pInstance->IsManaged(m_ControlPC));
}
else
{
{
STRESS_LOG0(LF_STACKWALK, LL_INFO10000, "----Init---- [ StackTrace ]\n");
Thread * pThreadToWalk = ThreadStore::GetCurrentThread();
- PTR_VOID pFrame = pThreadToWalk->GetTransitionFrameForStackTrace();
- InternalInit(pThreadToWalk, GetPInvokeTransitionFrame(pFrame), StackTraceStackWalkFlags);
+ PInvokeTransitionFrame* pFrame = pThreadToWalk->GetTransitionFrameForStackTrace();
+ InternalInit(pThreadToWalk, pFrame, StackTraceStackWalkFlags);
PrepareToYieldFrame();
}
// This codepath is used by the hijack stackwalk and we can get arbitrary ControlPCs from there. If this
// context has a non-managed control PC, then we're done.
- if (!m_pInstance->FindCodeManagerByAddress(dac_cast<PTR_VOID>(pCtx->GetIp())))
+ if (!m_pInstance->IsManaged(dac_cast<PTR_VOID>(pCtx->GetIp())))
return;
//
// We expect to be called by the runtime's C# EH implementation, and since this function's notion of how
// to unwind through the stub is brittle relative to the stub itself, we want to check as soon as we can.
- ASSERT(m_pInstance->FindCodeManagerByAddress(m_ControlPC) && "unwind from funclet invoke stub failed");
+ ASSERT(m_pInstance->IsManaged(m_ControlPC) && "unwind from funclet invoke stub failed");
#endif // defined(USE_PORTABLE_HELPERS)
}
// We expect the throw site to be in managed code, and since this function's notion of how to unwind
// through the stub is brittle relative to the stub itself, we want to check as soon as we can.
- ASSERT(m_pInstance->FindCodeManagerByAddress(m_ControlPC) && "unwind from throw site stub failed");
+ ASSERT(m_pInstance->IsManaged(m_ControlPC) && "unwind from throw site stub failed");
#endif // defined(USE_PORTABLE_HELPERS)
}
uintptr_t DEBUG_preUnwindSP = m_RegDisplay.GetSP();
#endif
- PTR_VOID pPreviousTransitionFrame;
+ PInvokeTransitionFrame* pPreviousTransitionFrame;
FAILFAST_OR_DAC_FAIL(GetCodeManager()->UnwindStackFrame(&m_methodInfo, &m_RegDisplay, &pPreviousTransitionFrame));
bool doingFuncletUnwind = GetCodeManager()->IsFunclet(&m_methodInfo);
// will unwind through the thunk and back to the nearest managed frame, and therefore may
// see a conservative range reported by one of the thunks encountered during this "nested"
// unwind.
- InternalInit(m_pThread, GetPInvokeTransitionFrame(pPreviousTransitionFrame), GcStackWalkFlags);
- ASSERT(m_pInstance->FindCodeManagerByAddress(m_ControlPC));
+ InternalInit(m_pThread, pPreviousTransitionFrame, GcStackWalkFlags);
+ ASSERT(m_pInstance->IsManaged(m_ControlPC));
}
m_dwFlags |= UnwoundReversePInvoke;
}
// from the iterator with the one and only exception being cases where a managed frame must
// be skipped due to funclet collapsing.
- ASSERT(m_pInstance->FindCodeManagerByAddress(m_ControlPC));
+ ASSERT(m_pInstance->IsManaged(m_ControlPC));
if (collapsingTargetFrame != NULL)
{
if (!IsValid())
return;
- ASSERT(m_pInstance->FindCodeManagerByAddress(m_ControlPC));
+ ASSERT(m_pInstance->IsManaged(m_ControlPC));
if (m_dwFlags & ApplyReturnAddressAdjustment)
{
// Assume that the caller is likely to be in the same module
if (m_pCodeManager == NULL || !m_pCodeManager->FindMethodInfo(m_ControlPC, &m_methodInfo))
{
- m_pCodeManager = dac_cast<PTR_ICodeManager>(m_pInstance->FindCodeManagerByAddress(m_ControlPC));
+ m_pCodeManager = dac_cast<PTR_ICodeManager>(m_pInstance->GetCodeManagerForAddress(m_ControlPC));
FAILFAST_OR_DAC_FAIL(m_pCodeManager);
FAILFAST_OR_DAC_FAIL(m_pCodeManager->FindMethodInfo(m_ControlPC, &m_methodInfo));
if (category == InThrowSiteThunk)
return true;
- return (NULL != GetRuntimeInstance()->FindCodeManagerByAddress(pvAddress));
+ return GetRuntimeInstance()->IsManaged(pvAddress);
}
// Support for conservatively reporting GC references in a stack range. This is used when managed methods with
public:
StackFrameIterator() {}
- StackFrameIterator(Thread * pThreadToWalk, PTR_VOID pInitialTransitionFrame);
+ StackFrameIterator(Thread * pThreadToWalk, PInvokeTransitionFrame* pInitialTransitionFrame);
StackFrameIterator(Thread * pThreadToWalk, PTR_PAL_LIMITED_CONTEXT pCtx);
m_pThreadStaticsDataSection = (uint8_t*)GetModuleSection(ReadyToRunSectionType::ThreadStaticRegion, &length);
m_pThreadStaticsGCInfo = (StaticGcDesc*)GetModuleSection(ReadyToRunSectionType::ThreadStaticGCDescRegion, &length);
m_pTlsIndex = (uint32_t*)GetModuleSection(ReadyToRunSectionType::ThreadStaticIndex, &length);
- m_pLoopHijackFlag = (uint32_t*)GetModuleSection(ReadyToRunSectionType::LoopHijackFlag, &length);
m_pDispatchMapTable = (DispatchMap **)GetModuleSection(ReadyToRunSectionType::InterfaceDispatchTable, &length);
}
uint32_t* m_pTlsIndex; // Pointer to TLS index if this module uses thread statics
void** m_pClasslibFunctions;
uint32_t m_nClasslibFunctions;
- uint32_t* m_pLoopHijackFlag;
TypeManager(HANDLE osModule, ReadyToRunHeader * pHeader, void** pClasslibFunctions, uint32_t nClasslibFunctions);
HANDLE GetOsModuleHandle();
void* GetClasslibFunction(ClasslibFunctionId functionId);
uint32_t* GetPointerToTlsIndex() { return m_pTlsIndex; }
- void SetLoopHijackFlag(uint32_t flag) { if (m_pLoopHijackFlag != nullptr) *m_pLoopHijackFlag = flag; }
private:
movdqa [rsp + 20h], xmm0
; link the frame into the Thread
- mov [threadReg + OFFSETOF__Thread__m_pHackPInvokeTunnel], trashReg
+ mov [threadReg + OFFSETOF__Thread__m_pDeferredTransitionFrame], trashReg
endm
;;
test dword ptr [rbx + OFFSETOF__Thread__m_ThreadStateFlags], TSF_SuppressGcStress + TSF_DoNotTriggerGc
jnz @F
- mov rcx, [rbx + OFFSETOF__Thread__m_pHackPInvokeTunnel]
+ mov rcx, [rbx + OFFSETOF__Thread__m_pDeferredTransitionFrame]
call RhpWaitForGCNoAbort
@@:
mov rbx, rdx
WaitForGCCompletion
- mov rax, [rbx + OFFSETOF__Thread__m_pHackPInvokeTunnel]
+ mov rax, [rbx + OFFSETOF__Thread__m_pDeferredTransitionFrame]
test dword ptr [rax + OFFSETOF__PInvokeTransitionFrame__m_Flags], PTFF_THREAD_ABORT
jnz Abort
POP_PROBE_FRAME 0
mov [rsp + 20h], rax ; save return address into PInvokeTransitionFrame
; Early out if GC stress is currently suppressed. Do this after we have computed the real address to
- ; return to but before we link the transition frame onto m_pHackPInvokeTunnel (because hitting this
+ ; return to but before we link the transition frame onto m_pDeferredTransitionFrame (because hitting this
; condition implies we're running restricted callouts during a GC itself and we could end up
; overwriting a co-op frame set by the code that caused the GC in the first place, e.g. a GC.Collect
; call).
; link the frame into the Thread
lea rcx, [rsp + sizeof_OutgoingScratchSpace] ; rcx <- PInvokeTransitionFrame*
- mov [rsi + OFFSETOF__Thread__m_pHackPInvokeTunnel], rcx
+ mov [rsi + OFFSETOF__Thread__m_pDeferredTransitionFrame], rcx
;;
;; Unhijack this thread, if necessary.
; Perform the rest of the PInvokeTransitionFrame initialization.
INIT_PROBE_FRAME $__PPF_ThreadReg, $trashReg, $BITMASK, PROBE_FRAME_SIZE
- str sp, [$__PPF_ThreadReg, #OFFSETOF__Thread__m_pHackPInvokeTunnel]
+ str sp, [$__PPF_ThreadReg, #OFFSETOF__Thread__m_pDeferredTransitionFrame]
MEND
; Simple macro to use when PROLOG_PROBE_FRAME was used to set up and initialize the prolog and
tst r2, #TSF_SuppressGcStress__OR__TSF_DoNotTriggerGC
bne %ft0
- ldr r2, [r4, #OFFSETOF__Thread__m_pHackPInvokeTunnel]
+ ldr r2, [r4, #OFFSETOF__Thread__m_pDeferredTransitionFrame]
bl RhpWaitForGCNoAbort
0
MEND
; TRASHES r1
INIT_PROBE_FRAME r2, r1, #PROBE_SAVE_FLAGS_R0_IS_GCREF, (PROBE_FRAME_SIZE + 8)
- str sp, [r2, #OFFSETOF__Thread__m_pHackPInvokeTunnel]
+ str sp, [r2, #OFFSETOF__Thread__m_pDeferredTransitionFrame]
MEND
;;
; Perform the rest of the PInvokeTransitionFrame initialization.
INIT_PROBE_FRAME $__PPF_ThreadReg, $trashReg, $savedRegsMask, $gcFlags, PROBE_FRAME_SIZE
mov $trashReg, sp
- str $trashReg, [$__PPF_ThreadReg, #OFFSETOF__Thread__m_pHackPInvokeTunnel]
+ str $trashReg, [$__PPF_ThreadReg, #OFFSETOF__Thread__m_pDeferredTransitionFrame]
MEND
; Simple macro to use when PROLOG_PROBE_FRAME was used to set up and initialize the prolog and
tst w2, #TSF_SuppressGcStress__OR__TSF_DoNotTriggerGC
bne %ft0
- ldr x9, [x4, #OFFSETOF__Thread__m_pHackPInvokeTunnel]
+ ldr x9, [x4, #OFFSETOF__Thread__m_pDeferredTransitionFrame]
bl RhpWaitForGCNoAbort
0
MEND
// pTransitionFrame- transition frame to make stack crawable
// Returns a pointer to the object allocated or NULL on failure.
-COOP_PINVOKE_HELPER(void*, RhpGcAlloc, (MethodTable* pEEType, uint32_t uFlags, uintptr_t numElements, void* pTransitionFrame))
+COOP_PINVOKE_HELPER(void*, RhpGcAlloc, (MethodTable* pEEType, uint32_t uFlags, uintptr_t numElements, PInvokeTransitionFrame* pTransitionFrame))
{
Thread* pThread = ThreadStore::GetCurrentThread();
- pThread->SetCurrentThreadPInvokeTunnelForGcAlloc(pTransitionFrame);
+ pThread->SetDeferredTransitionFrame(pTransitionFrame);
return GcAllocInternal(pEEType, uFlags, numElements, pThread);
}
continue;
#if !defined (ISOLATED_HEAPS)
- // @TODO: it is very bizarre that this IsThreadUsingAllocationContextHeap takes a copy of the
- // allocation context instead of a reference or a pointer to it. This seems very wasteful given how
- // large the alloc_context is.
if (!GCHeapUtilities::GetGCHeap()->IsThreadUsingAllocationContextHeap(pThread->GetAllocContext(),
sc->thread_number))
{
void PromoteCarefully(PTR_PTR_Object obj, uint32_t flags, EnumGcRefCallbackFunc * fnGcEnumRef, EnumGcRefScanContext * pSc)
{
//
- // Sanity check that the flags contain only these three values
+ // Sanity check that the flags contain only these values
//
- assert((flags & ~(GC_CALL_INTERIOR|GC_CALL_PINNED|GC_CALL_CHECK_APP_DOMAIN)) == 0);
+ assert((flags & ~(GC_CALL_INTERIOR|GC_CALL_PINNED)) == 0);
//
// Sanity check that GC_CALL_INTERIOR FLAG is set
void GcEnumObject(PTR_PTR_Object ppObj, uint32_t flags, EnumGcRefCallbackFunc * fnGcEnumRef, EnumGcRefScanContext * pSc)
{
//
- // Sanity check that the flags contain only these three values
+ // Sanity check that the flags contain only these values
//
- assert((flags & ~(GC_CALL_INTERIOR|GC_CALL_PINNED|GC_CALL_CHECK_APP_DOMAIN)) == 0);
+ assert((flags & ~(GC_CALL_INTERIOR|GC_CALL_PINNED)) == 0);
// for interior pointers, we optimize the case in which
// it points into the current threads stack area
;; All other registers trashed
;;
StressGC macro
- mov [ebx + OFFSETOF__Thread__m_pHackPInvokeTunnel], esp
+ mov [ebx + OFFSETOF__Thread__m_pDeferredTransitionFrame], esp
call REDHAWKGCINTERFACE__STRESSGC
endm
ThreadStaticOffsetRegion = 208,
ThreadStaticGCDescRegion = 209,
ThreadStaticIndex = 210,
- LoopHijackFlag = 211,
+ // 211 is unused - it was used by LoopHijackFlag
ImportAddressTables = 212,
// Sections 300 - 399 are reserved for RhFindBlob backwards compatibility
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
+#pragma once
+
//
// This header contains binder-generated data structures that the runtime consumes.
//
PTFF_THREAD_ABORT = 0x0000001000000000, // indicates that ThreadAbortException should be thrown when returning from the transition
};
-// TODO: Consider moving the PInvokeTransitionFrameFlags definition to a separate file to simplify header dependencies
-#ifdef ICODEMANAGER_INCLUDED
-// Verify that we can use bitwise shifts to convert from GCRefKind to PInvokeTransitionFrameFlags and back
-C_ASSERT(PTFF_X0_IS_GCREF == ((uint64_t)GCRK_Object << 32));
-C_ASSERT(PTFF_X0_IS_BYREF == ((uint64_t)GCRK_Byref << 32));
-C_ASSERT(PTFF_X1_IS_GCREF == ((uint64_t)GCRK_Scalar_Obj << 32));
-C_ASSERT(PTFF_X1_IS_BYREF == ((uint64_t)GCRK_Scalar_Byref << 32));
-
-inline uint64_t ReturnKindToTransitionFrameFlags(GCRefKind returnKind)
-{
- if (returnKind == GCRK_Scalar)
- return 0;
- return PTFF_SAVE_X0 | PTFF_SAVE_X1 | ((uint64_t)returnKind << 32);
-}
-
-inline GCRefKind TransitionFrameFlagsToReturnKind(uint64_t transFrameFlags)
-{
- GCRefKind returnKind = (GCRefKind)((transFrameFlags & (PTFF_X0_IS_GCREF | PTFF_X0_IS_BYREF | PTFF_X1_IS_GCREF | PTFF_X1_IS_BYREF)) >> 32);
- ASSERT((returnKind == GCRK_Scalar) || ((transFrameFlags & PTFF_SAVE_X0) && (transFrameFlags & PTFF_SAVE_X1)));
- return returnKind;
-}
-#endif // ICODEMANAGER_INCLUDED
#else // TARGET_ARM
enum PInvokeTransitionFrameFlags
{
SubSectionDesc subSection[/*subSectionCount*/1];
// UINT32 hotRVAofColdMethod[/*coldMethodCount*/];
};
+
/* create a new thread stress log buffer associated with pThread */
ThreadStressLog* StressLog::CreateThreadStressLog(Thread * pThread) {
-
if (theLog.facilitiesToLog == 0)
return NULL;
return msgs;
}
- //if we are not allowed to allocate stress log, we should not even try to take the lock
- if (pThread->IsInCantAllocStressLogRegion())
- {
- return NULL;
- }
-
// if it looks like we won't be allowed to allocate a new chunk, exit early
if (VolatileLoad(&theLog.deadCount) == 0 && !AllowNewChunk (0))
{
bool StressLog::AllowNewChunk (long numChunksInCurThread)
{
Thread* pCurrentThread = ThreadStore::RawGetCurrentThread();
- if (pCurrentThread->IsInCantAllocStressLogRegion())
- {
- return FALSE;
- }
_ASSERTE (numChunksInCurThread <= VolatileLoad(&theLog.totalChunk));
uint32_t perThreadLimit = theLog.MaxSizePerThread;
#endif // _MSC_VER
#endif //!DACCESS_COMPILE
-PTR_VOID Thread::GetTransitionFrame()
+PInvokeTransitionFrame* Thread::GetTransitionFrame()
{
if (ThreadStore::GetSuspendingThread() == this)
{
- // This thread is in cooperative mode, so we grab the transition frame
- // from the 'tunnel' location, which will have the frame from the most
+ // This thread is in cooperative mode, so we grab the deferred frame
+ // which is the frame from the most
// recent 'cooperative pinvoke' transition that brought us here.
- ASSERT(m_pHackPInvokeTunnel != NULL);
- return m_pHackPInvokeTunnel;
+ ASSERT(m_pDeferredTransitionFrame != NULL);
+ return m_pDeferredTransitionFrame;
}
ASSERT(m_pCachedTransitionFrame != NULL);
#ifndef DACCESS_COMPILE
-PTR_VOID Thread::GetTransitionFrameForStackTrace()
+PInvokeTransitionFrame* Thread::GetTransitionFrameForStackTrace()
{
ASSERT_MSG(ThreadStore::GetSuspendingThread() == NULL, "Not allowed when suspended for GC.");
ASSERT_MSG(this == ThreadStore::GetCurrentThread(), "Only supported for current thread.");
ASSERT(Thread::IsCurrentThreadInCooperativeMode());
- ASSERT(m_pHackPInvokeTunnel != NULL);
- return m_pHackPInvokeTunnel;
+ ASSERT(m_pDeferredTransitionFrame != NULL);
+ return m_pDeferredTransitionFrame;
}
void Thread::WaitForSuspend()
GetThreadStore()->WaitForSuspendComplete();
}
-void Thread::WaitForGC(void * pTransitionFrame)
+void Thread::WaitForGC(PInvokeTransitionFrame* pTransitionFrame)
{
ASSERT(!IsDoNotTriggerGcSet());
//
// Returns true if it sucessfully cached the transition frame (i.e. the thread was in unmanaged).
// Returns false otherwise.
-//
+//
+// WARNING: This method is called by suspension while one thread is interrupted
+// in a random location, possibly holding random locks.
+// It is unsafe to use blocking APIs or allocate in this method.
+// Please ensure that all methods called by this one also have this warning.
bool Thread::CacheTransitionFrameForSuspend()
{
if (m_pCachedTransitionFrame != NULL)
return true;
- PTR_VOID temp = m_pTransitionFrame; // volatile read
+ PInvokeTransitionFrame* temp = m_pTransitionFrame; // volatile read
if (temp == NULL)
return false;
void Thread::ResetCachedTransitionFrame()
{
- // @TODO: I don't understand this assert because ResumeAllThreads is clearly written
- // to be reseting other threads' cached transition frames.
-
- //ASSERT((ThreadStore::GetCurrentThreadIfAvailable() == this) ||
- // (m_pCachedTransitionFrame != NULL));
m_pCachedTransitionFrame = NULL;
}
// This function simulates a PInvoke transition using a frame pointer from somewhere further up the stack that
-// was passed in via the m_pHackPInvokeTunnel field. It is used to allow us to grandfather-in the set of GC
+// was passed in via the m_pDeferredTransitionFrame field. It is used to allow us to grandfather-in the set of GC
// code that runs in cooperative mode without having to rewrite it in managed code. The result is that the
// code that calls into this special mode must spill preserved registers as if it's going to PInvoke, but
-// record its transition frame pointer in m_pHackPInvokeTunnel and leave the thread in the cooperative
+// record its transition frame pointer in m_pDeferredTransitionFrame and leave the thread in the cooperative
// mode. Later on, when this function is called, we effect the state transition to 'unmanaged' using the
// previously setup transition frame.
void Thread::EnablePreemptiveMode()
{
ASSERT(ThreadStore::GetCurrentThread() == this);
#if !defined(HOST_WASM)
- ASSERT(m_pHackPInvokeTunnel != NULL);
+ ASSERT(m_pDeferredTransitionFrame != NULL);
#endif
Unhijack();
// ORDERING -- this write must occur before checking the trap
- m_pTransitionFrame = m_pHackPInvokeTunnel;
+ m_pTransitionFrame = m_pDeferredTransitionFrame;
// We need to prevent compiler reordering between above write and below read. Both the read and the write
// are volatile, so it's possible that the particular semantic for volatile that MSVC provides is enough,
if (ThreadStore::IsTrapThreadsRequested() && (this != ThreadStore::GetSuspendingThread()))
{
- WaitForGC(m_pHackPInvokeTunnel);
+ WaitForGC(m_pDeferredTransitionFrame);
}
}
#endif // !DACCESS_COMPILE
//When debugging we might be trying to enumerate with or without a transition frame
//on top of the stack. If there is one use it, otherwise the debugger provides a set of initial registers
//to use.
- PTR_VOID pTransitionFrame = GetTransitionFrame();
+ PInvokeTransitionFrame* pTransitionFrame = GetTransitionFrame();
if(pTransitionFrame != NULL)
{
- StackFrameIterator frameIterator(this, GetTransitionFrame());
+ StackFrameIterator frameIterator(this, pTransitionFrame);
GcScanRootsWorker(&GcScanRootsCallbackWrapper, &callbackDataWrapper, frameIterator);
}
else
PTR_VOID pLowerBound = dac_cast<PTR_VOID>(frameIterator.GetRegisterSet()->GetSP());
// Transition frame may contain callee saved registers that need to be reported as well
- PTR_VOID pTransitionFrame = GetTransitionFrame();
+ PInvokeTransitionFrame* pTransitionFrame = GetTransitionFrame();
ASSERT(pTransitionFrame != NULL);
if (pTransitionFrame < pLowerBound)
pLowerBound = pTransitionFrame;
// requires THREAD_SUSPEND_RESUME / THREAD_GET_CONTEXT / THREAD_SET_CONTEXT permissions
Thread* pCurrentThread = ThreadStore::GetCurrentThread();
- pCurrentThread->EnterCantAllocRegion();
uint32_t result = PalHijack(m_hPalThread, HijackCallback, this);
- pCurrentThread->LeaveCantAllocRegion();
return result == 0;
-
}
UInt32_BOOL Thread::HijackCallback(HANDLE /*hThread*/, PAL_LIMITED_CONTEXT* pThreadContext, void* pCallbackContext)
{
Thread* pThread = (Thread*) pCallbackContext;
- //
- // WARNING: The hijack operation will take a read lock on the RuntimeInstance's module list.
- // (This is done to find a Module based on an IP.) Therefore, if the thread we've just
- // suspended owns the write lock on the module list, we'll deadlock with it when we try to
- // take the read lock below. So we must attempt a non-blocking acquire of the read lock
- // early and fail the hijack if we can't get it. This will cause us to simply retry later.
- //
- if (GetRuntimeInstance()->m_ModuleListLock.DangerousTryPulseReadLock())
+ // we have a thread stopped, and we do not know where exactly.
+ // it could be in a system call or in our own runtime holding locks
+ // current thread should not block or allocate while we determine whether the location is in managed code.
+ if (pThread->CacheTransitionFrameForSuspend())
{
- if (pThread->CacheTransitionFrameForSuspend())
- {
- // IMPORTANT: GetThreadContext should not be trusted arbitrarily. We are careful here to recheck
- // the thread's state flag that indicates whether or not it has made it to unmanaged code. If
- // it has reached unmanaged code (even our own wait helper routines), then we cannot trust the
- // context returned by it. This is due to various races that occur updating the reported context
- // during syscalls.
- return TRUE;
- }
- else
- {
- return pThread->InternalHijack(pThreadContext, NormalHijackTargets) ? TRUE : FALSE;
- }
+ // This thread has already made it to preemptive (posted a transition frame)
+ // we do not need to hijack it
+ return true;
+ }
+
+ if (!GetRuntimeInstance()->IsManaged((PTR_VOID)pThreadContext->IP))
+ {
+ // Running in cooperative mode, but not managed.
+ // We cannot continue.
+ return false;
}
- return FALSE;
+ // TODO: attempt to redirect
+
+ return pThread->InternalHijack(pThreadContext, NormalHijackTargets);
}
#ifdef FEATURE_GC_STRESS
else
pvReturnAddress = *ppvReturnAddressLocation;
- ASSERT(NULL != GetRuntimeInstance()->FindCodeManagerByAddress(pvReturnAddress));
+ ASSERT(GetRuntimeInstance()->IsManaged(pvReturnAddress));
return pvReturnAddress;
}
# endif
#endif // HOST_64BIT
-#define TOP_OF_STACK_MARKER ((PTR_VOID)(uintptr_t)(intptr_t)-1)
+#define TOP_OF_STACK_MARKER ((PInvokeTransitionFrame*)(ptrdiff_t)-1)
#define DYNAMIC_TYPE_TLS_OFFSET_FLAG 0x80000000
PTR_PAL_LIMITED_CONTEXT m_pExContext;
PTR_Object m_exception; // actual object reference, specially reported by GcScanRootsWorker
ExKind m_kind;
- uint8_t m_passNumber;
- uint32_t m_idxCurClause;
+ uint8_t m_passNumber;
+ uint32_t m_idxCurClause;
StackFrameIterator m_frameIter;
volatile void* m_notifyDebuggerSP;
};
struct ThreadBuffer
{
- uint8_t m_rgbAllocContextBuffer[SIZEOF_ALLOC_CONTEXT];
- uint32_t volatile m_ThreadStateFlags; // see Thread::ThreadStateFlags enum
+ uint8_t m_rgbAllocContextBuffer[SIZEOF_ALLOC_CONTEXT];
+ uint32_t volatile m_ThreadStateFlags; // see Thread::ThreadStateFlags enum
#if DACCESS_COMPILE
- PTR_VOID m_pTransitionFrame;
+ volatile
+ PInvokeTransitionFrame* m_pTransitionFrame;
#else
- PTR_VOID volatile m_pTransitionFrame;
+ PInvokeTransitionFrame* m_pTransitionFrame;
#endif
- PTR_VOID m_pHackPInvokeTunnel; // see Thread::EnablePreemptiveMode
- PTR_VOID m_pCachedTransitionFrame;
+ PInvokeTransitionFrame* m_pDeferredTransitionFrame; // see Thread::EnablePreemptiveMode
+ PInvokeTransitionFrame* m_pCachedTransitionFrame;
PTR_Thread m_pNext; // used by ThreadStore's SList<Thread>
HANDLE m_hPalThread; // WARNING: this may legitimately be INVALID_HANDLE_VALUE
void ** m_ppvHijackedReturnAddressLocation;
PTR_VOID m_pStackLow;
PTR_VOID m_pStackHigh;
PTR_UInt8 m_pTEB; // Pointer to OS TEB structure for this thread
- uint64_t m_uPalThreadIdForLogging; // @TODO: likely debug-only
+ uint64_t m_uPalThreadIdForLogging; // @TODO: likely debug-only
EEThreadId m_threadId;
PTR_VOID m_pThreadStressLog; // pointer to head of thread's StressLogChunks
- uint32_t m_cantAlloc;
#ifdef FEATURE_GC_STRESS
- uint32_t m_uRand; // current per-thread random number
+ uint32_t m_uRand; // current per-thread random number
#endif // FEATURE_GC_STRESS
// Thread Statics Storage for dynamic types
struct ReversePInvokeFrame
{
- void* m_savedPInvokeTransitionFrame;
+ PInvokeTransitionFrame* m_savedPInvokeTransitionFrame;
Thread* m_savedThread;
};
//
// SyncState members
//
- PTR_VOID GetTransitionFrame();
+ PInvokeTransitionFrame* GetTransitionFrame();
void GcScanRootsWorker(void * pfnEnumCallback, void * pvCallbackData, StackFrameIterator & sfIter);
#ifndef DACCESS_COMPILE
void SetThreadStressLog(void * ptsl);
#endif // DACCESS_COMPILE
- void EnterCantAllocRegion();
- void LeaveCantAllocRegion();
- bool IsInCantAllocStressLogRegion();
#ifdef FEATURE_GC_STRESS
void SetRandomSeed(uint32_t seed);
uint32_t NextRand();
bool IsCurrentThreadInCooperativeMode();
- PTR_VOID GetTransitionFrameForStackTrace();
+ PInvokeTransitionFrame* GetTransitionFrameForStackTrace();
void * GetCurrentThreadPInvokeReturnAddress();
static bool IsHijackTarget(void * address);
void EnablePreemptiveMode();
void DisablePreemptiveMode();
- // Set the m_pHackPInvokeTunnel field for GC allocation helpers that setup transition frame
+ // Set the m_pDeferredTransitionFrame field for GC allocation helpers that setup transition frame
// in assembly code. Do not use anywhere else.
- void SetCurrentThreadPInvokeTunnelForGcAlloc(void * pTransitionFrame);
+ void SetDeferredTransitionFrame(PInvokeTransitionFrame* pTransitionFrame);
- // Setup the m_pHackPInvokeTunnel field for GC helpers entered via regular PInvoke.
+ // Setup the m_pDeferredTransitionFrame field for GC helpers entered via regular PInvoke.
// Do not use anywhere else.
- void SetupHackPInvokeTunnel();
+ void DeferTransitionFrame();
//
// GC support APIs - do not use except from GC itself
// Managed/unmanaged interop transitions support APIs
//
void WaitForSuspend();
- void WaitForGC(void * pTransitionFrame);
+ void WaitForGC(PInvokeTransitionFrame* pTransitionFrame);
void ReversePInvokeAttachOrTrapThread(ReversePInvokeFrame * pFrame);
// The .NET Foundation licenses this file to you under the MIT license.
#ifndef DACCESS_COMPILE
-inline void Thread::SetCurrentThreadPInvokeTunnelForGcAlloc(void * pTransitionFrame)
+inline void Thread::SetDeferredTransitionFrame(PInvokeTransitionFrame* pTransitionFrame)
{
ASSERT(ThreadStore::GetCurrentThread() == this);
ASSERT(Thread::IsCurrentThreadInCooperativeMode());
- m_pHackPInvokeTunnel = pTransitionFrame;
+ m_pDeferredTransitionFrame = pTransitionFrame;
}
-inline void Thread::SetupHackPInvokeTunnel()
+inline void Thread::DeferTransitionFrame()
{
ASSERT(ThreadStore::GetCurrentThread() == this);
ASSERT(!Thread::IsCurrentThreadInCooperativeMode());
- m_pHackPInvokeTunnel = m_pTransitionFrame;
+ m_pDeferredTransitionFrame = m_pTransitionFrame;
}
#endif // DACCESS_COMPILE
{
return m_pThreadStressLog;
}
-
-inline void Thread::EnterCantAllocRegion()
-{
- m_cantAlloc++;
-}
-
-inline void Thread::LeaveCantAllocRegion()
-{
- m_cantAlloc--;
-}
-
-inline bool Thread::IsInCantAllocStressLogRegion()
-{
- return m_cantAlloc != 0;
-}
// set the global trap for pinvoke leave and return
RhpTrapThreads |= (uint32_t)TrapThreadsFlags::TrapThreads;
- // Set each module's loop hijack flag
- GetRuntimeInstance()->SetLoopHijackFlags(RhpTrapThreads);
-
// Our lock-free algorithm depends on flushing write buffers of all processors running RH code. The
// reason for this is that we essentially implement Dekker's algorithm, which requires write ordering.
PalFlushProcessWriteBuffers();
if (!pTargetThread->CacheTransitionFrameForSuspend())
{
- // We drive all threads to preemptive mode by hijacking them with both a
- // return-address hijack and loop hijacks.
+ // We drive all threads to preemptive mode by hijacking them with return-address hijack.
keepWaiting = true;
pTargetThread->Hijack();
}
RhpTrapThreads &= ~(uint32_t)TrapThreadsFlags::TrapThreads;
- // Reset module's hijackLoops flag
- GetRuntimeInstance()->SetLoopHijackFlags(0);
-
RhpSuspendingThread = NULL;
if (waitForGCEvent)
{
{ 0 }, // m_rgbAllocContextBuffer
Thread::TSF_Unknown, // m_ThreadStateFlags
TOP_OF_STACK_MARKER, // m_pTransitionFrame
- TOP_OF_STACK_MARKER, // m_pHackPInvokeTunnel
+ TOP_OF_STACK_MARKER, // m_pDeferredTransitionFrame
0, // m_pCachedTransitionFrame
0, // m_pNext
INVALID_HANDLE_VALUE, // m_hPalThread
bool UnixNativeCodeManager::UnwindStackFrame(MethodInfo * pMethodInfo,
REGDISPLAY * pRegisterSet, // in/out
- PTR_VOID * ppPreviousTransitionFrame) // out
+ PInvokeTransitionFrame** ppPreviousTransitionFrame) // out
{
UnixNativeMethodInfo * pNativeMethodInfo = (UnixNativeMethodInfo *)pMethodInfo;
{
basePointer = dac_cast<TADDR>(pRegisterSet->GetFP());
}
- *ppPreviousTransitionFrame = *(void**)(basePointer + slot);
+
+ *ppPreviousTransitionFrame = *(PInvokeTransitionFrame**)(basePointer + slot);
return true;
}
return dac_cast<PTR_VOID>(p + *dac_cast<PTR_Int32>(p));
}
-extern "C" bool RegisterCodeManager(ICodeManager * pCodeManager, PTR_VOID pvStartRange, uint32_t cbRange);
-extern "C" void UnregisterCodeManager(ICodeManager * pCodeManager);
+extern "C" void RegisterCodeManager(ICodeManager * pCodeManager, PTR_VOID pvStartRange, uint32_t cbRange);
extern "C" bool RegisterUnboxingStubs(PTR_VOID pvStartRange, uint32_t cbRange);
extern "C"
if (pUnixNativeCodeManager == nullptr)
return false;
- if (!RegisterCodeManager(pUnixNativeCodeManager, pvManagedCodeStartRange, cbManagedCodeRange))
- return false;
+ RegisterCodeManager(pUnixNativeCodeManager, pvManagedCodeStartRange, cbManagedCodeRange);
if (!RegisterUnboxingStubs(pvUnboxingStubsStartRange, cbUnboxingStubsRange))
{
- UnregisterCodeManager(pUnixNativeCodeManager);
return false;
}
bool UnwindStackFrame(MethodInfo * pMethodInfo,
REGDISPLAY * pRegisterSet, // in/out
- PTR_VOID * ppPreviousTransitionFrame); // out
+ PInvokeTransitionFrame** ppPreviousTransitionFrame); // out
uintptr_t GetConservativeUpperBoundForOutgoingArgs(MethodInfo * pMethodInfo,
REGDISPLAY * pRegisterSet);
bool CoffNativeCodeManager::UnwindStackFrame(MethodInfo * pMethodInfo,
REGDISPLAY * pRegisterSet, // in/out
- PTR_VOID * ppPreviousTransitionFrame) // out
+ PInvokeTransitionFrame** ppPreviousTransitionFrame) // out
{
CoffNativeMethodInfo * pNativeMethodInfo = (CoffNativeMethodInfo *)pMethodInfo;
{
basePointer = dac_cast<TADDR>(pRegisterSet->GetFP());
}
- *ppPreviousTransitionFrame = *(void**)(basePointer + slot);
+
+ *ppPreviousTransitionFrame = *(PInvokeTransitionFrame**)(basePointer + slot);
return true;
}
return dac_cast<PTR_VOID>(m_moduleBase + dataRVA);
}
-extern "C" bool __stdcall RegisterCodeManager(ICodeManager * pCodeManager, PTR_VOID pvStartRange, uint32_t cbRange);
-extern "C" void __stdcall UnregisterCodeManager(ICodeManager * pCodeManager);
+extern "C" void __stdcall RegisterCodeManager(ICodeManager * pCodeManager, PTR_VOID pvStartRange, uint32_t cbRange);
extern "C" bool __stdcall RegisterUnboxingStubs(PTR_VOID pvStartRange, uint32_t cbRange);
extern "C"
if (pCoffNativeCodeManager == nullptr)
return false;
- if (!RegisterCodeManager(pCoffNativeCodeManager, pvManagedCodeStartRange, cbManagedCodeRange))
- return false;
+ RegisterCodeManager(pCoffNativeCodeManager, pvManagedCodeStartRange, cbManagedCodeRange);
if (!RegisterUnboxingStubs(pvUnboxingStubsStartRange, cbUnboxingStubsRange))
{
- UnregisterCodeManager(pCoffNativeCodeManager);
return false;
}
bool UnwindStackFrame(MethodInfo * pMethodInfo,
REGDISPLAY * pRegisterSet, // in/out
- PTR_VOID * ppPreviousTransitionFrame); // out
+ PInvokeTransitionFrame** ppPreviousTransitionFrame); // out
uintptr_t GetConservativeUpperBoundForOutgoingArgs(MethodInfo * pMethodInfo,
REGDISPLAY * pRegisterSet);
// The .NET Foundation licenses this file to you under the MIT license.
using System.Diagnostics;
+using System.Runtime.ConstrainedExecution;
namespace System.Runtime.InteropServices
{
- public abstract partial class SafeHandle
+ public abstract partial class SafeHandle : CriticalFinalizerObject, IDisposable
{
// The handle cannot be closed until we are sure that no other objects might
// be using it. In the case of finalization, there may be other objects in
// finalization cycle, but should be released in the next.
//
// This has the effect of delaying cleanup for much longer than would have
- // happened on the CLR. This also means that we may not close some handles
+ // happened on the CLR, which is an observable behavior change.
+ // This also means that we may not close some handles
// at shutdown, since there may not be another finalization cycle to run
// the delayed finalizer. If either of these end up being a problem, we should
- // consider adding more control over finalization order to MRT (or, better,
- // turning over control of finalization ordering to System.Private.CoreLib).
+ // consider implementing MethodTable::HasCriticalFinalizer
+ // Same applies to `CriticalHandle`
private sealed class DelayedFinalizer
{
ThreadStaticOffsetRegion = 208,
ThreadStaticGCDescRegion = 209,
ThreadStaticIndex = 210,
- LoopHijackFlag = 211,
+ // 211 is unused - it was used by LoopHijackFlag
ImportAddressTables = 212,
ModuleInitializerList = 213,