1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
10 #ifdef WIN64EXCEPTIONS
11 #include "exceptionhandling.h"
12 #include "dbginterface.h"
13 #include "asmconstants.h"
14 #include "eetoprofinterfacewrapper.inl"
15 #include "eedbginterfaceimpl.inl"
16 #include "eventtrace.h"
17 #include "virtualcallstub.h"
20 #if defined(_TARGET_X86_)
21 #define USE_CURRENT_CONTEXT_IN_FILTER
22 #endif // _TARGET_X86_
24 #if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
25 // ARM/ARM64 uses Caller-SP to locate PSPSym in the funclet frame.
26 #define USE_CALLER_SP_IN_FUNCLET
27 #endif // _TARGET_ARM_ || _TARGET_ARM64_
29 #if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) || defined(_TARGET_X86_)
30 #define ADJUST_PC_UNWOUND_TO_CALL
31 #define STACK_RANGE_BOUNDS_ARE_CALLER_SP
32 #define USE_FUNCLET_CALL_HELPER
33 // For ARM/ARM64, EstablisherFrame is Caller-SP (SP just before executing call instruction).
34 // This has been confirmed by AaronGi from the kernel team for Windows.
36 // For x86/Linux, RtlVirtualUnwind sets EstablisherFrame as Caller-SP.
37 #define ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
38 #endif // _TARGET_ARM_ || _TARGET_ARM64_ || _TARGET_X86_
42 ClrUnwindEx(EXCEPTION_RECORD* pExceptionRecord,
45 UINT_PTR TargetFrameSp);
46 #endif // !FEATURE_PAL
48 #ifdef USE_CURRENT_CONTEXT_IN_FILTER
49 inline void CaptureNonvolatileRegisters(PKNONVOLATILE_CONTEXT pNonvolatileContext, PCONTEXT pContext)
51 #define CALLEE_SAVED_REGISTER(reg) pNonvolatileContext->reg = pContext->reg;
52 ENUM_CALLEE_SAVED_REGISTERS();
53 #undef CALLEE_SAVED_REGISTER
56 inline void RestoreNonvolatileRegisters(PCONTEXT pContext, PKNONVOLATILE_CONTEXT pNonvolatileContext)
58 #define CALLEE_SAVED_REGISTER(reg) pContext->reg = pNonvolatileContext->reg;
59 ENUM_CALLEE_SAVED_REGISTERS();
60 #undef CALLEE_SAVED_REGISTER
63 inline void RestoreNonvolatileRegisterPointers(PT_KNONVOLATILE_CONTEXT_POINTERS pContextPointers, PKNONVOLATILE_CONTEXT pNonvolatileContext)
65 #define CALLEE_SAVED_REGISTER(reg) pContextPointers->reg = &pNonvolatileContext->reg;
66 ENUM_CALLEE_SAVED_REGISTERS();
67 #undef CALLEE_SAVED_REGISTER
70 #ifndef DACCESS_COMPILE
72 // o Functions and funclets are tightly associated. In fact, they are laid out in contiguous memory.
73 // They also present some interesting issues with respect to EH because we will see callstacks with
74 // both functions and funclets, but need to logically treat them as the original single IL function
77 // o All funclets are ripped out of line from the main function. Finally clause are pulled out of
78 // line and replaced by calls to the funclets. Catch clauses, however, are simply pulled out of
79 // line. !!!This causes a loss of nesting information in clause offsets.!!! A canonical example of
80 // two different functions which look identical due to clause removal is as shown in the code
81 // snippets below. The reason they look identical in the face of out-of-line funclets is that the
82 // region bounds for the "try A" region collapse and become identical to the region bounds for
83 // region "try B". This will look identical to the region information for Bar because Bar must
84 // have a separate entry for each catch clause, both of which will have the same try-region bounds.
86 // void Foo() void Bar()
105 // O The solution is to duplicate all clauses that logically cover the funclet in its parent
106 // method, but with the try-region covering the entire out-of-line funclet code range. This will
107 // differentiate the canonical example above because the CatchB funclet will have a try-clause
108 // covering it whose associated handler is CatchA. In Bar, there is no such duplication of any clauses.
110 // o The behavior of the personality routine depends upon the JIT to properly order the clauses from
111 // inside-out. This allows us to properly handle a situation where our control PC is covered by clauses
112 // that should not be considered because a more nested clause will catch the exception and resume within
113 // the scope of the outer clauses.
115 // o This sort of clause duplication for funclets should be done for all clause types, not just catches.
116 // Unfortunately, I cannot articulate why at the moment.
119 void DumpClauses(IJitManager* pJitMan, const METHODTOKEN& MethToken, UINT_PTR uMethodStartPC, UINT_PTR dwControlPc);
120 static void DoEHLog(DWORD lvl, __in_z const char *fmt, ...);
121 #define EH_LOG(expr) { DoEHLog expr ; }
126 TrackerAllocator g_theTrackerAllocator;
127 uint32_t g_exceptionCount;
129 bool FixNonvolatileRegisters(UINT_PTR uOriginalSP,
131 CONTEXT* pContextRecord,
135 void FixContext(PCONTEXT pContextRecord)
137 #define FIXUPREG(reg, value) \
139 STRESS_LOG2(LF_GCROOTS, LL_INFO100, "Updating " #reg " %p to %p\n", \
140 pContextRecord->reg, \
142 pContextRecord->reg = (value); \
146 size_t resumeSp = EECodeManager::GetResumeSp(pContextRecord);
147 FIXUPREG(Esp, resumeSp);
148 #endif // _TARGET_X86_
153 MethodDesc * GetUserMethodForILStub(Thread * pThread, UINT_PTR uStubSP, MethodDesc * pILStubMD, Frame ** ppFrameOut);
156 BOOL HandleHardwareException(PAL_SEHException* ex);
157 BOOL IsSafeToHandleHardwareException(PCONTEXT contextRecord, PEXCEPTION_RECORD exceptionRecord);
158 #endif // FEATURE_PAL
160 static ExceptionTracker* GetTrackerMemory()
170 return g_theTrackerAllocator.GetTrackerMemory();
173 void FreeTrackerMemory(ExceptionTracker* pTracker, TrackerMemoryType mem)
183 if (mem & memManaged)
185 pTracker->ReleaseResources();
188 if (mem & memUnmanaged)
190 g_theTrackerAllocator.FreeTrackerMemory(pTracker);
194 static inline void UpdatePerformanceMetrics(CrawlFrame *pcfThisFrame, BOOL bIsRethrownException, BOOL bIsNewException)
199 // Fire an exception thrown ETW event when an exception occurs
200 ETW::ExceptionLog::ExceptionThrown(pcfThisFrame, bIsRethrownException, bIsNewException);
204 static LONG volatile g_termination_triggered = 0;
206 void HandleTerminationRequest(int terminationExitCode)
208 // We set a non-zero exit code to indicate the process didn't terminate cleanly.
209 // This value can be changed by the user by setting Environment.ExitCode in the
210 // ProcessExit event. We only start termination on the first SIGTERM signal
211 // to ensure we don't overwrite an exit code already set in ProcessExit.
212 if (InterlockedCompareExchange(&g_termination_triggered, 1, 0) == 0)
214 SetLatchedExitCode(terminationExitCode);
216 ForceEEShutdown(SCA_ExitProcessWhenShutdownComplete);
221 void InitializeExceptionHandling()
223 EH_LOG((LL_INFO100, "InitializeExceptionHandling(): ExceptionTracker size: 0x%x bytes\n", sizeof(ExceptionTracker)));
225 InitSavedExceptionInfo();
227 CLRAddVectoredHandlers();
229 g_theTrackerAllocator.Init();
231 // Initialize the lock used for synchronizing access to the stacktrace in the exception object
232 g_StackTraceArrayLock.Init(LOCK_TYPE_DEFAULT, TRUE);
235 // Register handler of hardware exceptions like null reference in PAL
236 PAL_SetHardwareExceptionHandler(HandleHardwareException, IsSafeToHandleHardwareException);
238 // Register handler for determining whether the specified IP has code that is a GC marker for GCCover
239 PAL_SetGetGcMarkerExceptionCode(GetGcMarkerExceptionCode);
241 // Register handler for termination requests (e.g. SIGTERM)
242 PAL_SetTerminationRequestHandler(HandleTerminationRequest);
243 #endif // FEATURE_PAL
246 struct UpdateObjectRefInResumeContextCallbackState
249 Frame *pHighestFrameWithRegisters;
250 TADDR uResumeFrameFP;
251 TADDR uICFCalleeSavedFP;
259 // Stack unwind callback for UpdateObjectRefInResumeContext().
260 StackWalkAction UpdateObjectRefInResumeContextCallback(CrawlFrame* pCF, LPVOID pData)
270 UpdateObjectRefInResumeContextCallbackState *pState = (UpdateObjectRefInResumeContextCallbackState*)pData;
271 CONTEXT* pSrcContext = pCF->GetRegisterSet()->pCurrentContext;
273 INDEBUG(pState->nFrames++);
275 // Check to see if we have reached the resume frame.
276 if (pCF->IsFrameless())
278 // At this point, we are trying to find the managed frame containing the catch handler to be invoked.
279 // This is done by comparing the SP of the managed frame for which this callback was invoked with the
280 // SP the OS passed to our personality routine for the current managed frame. If they match, then we have
281 // reached the target frame.
283 // It is possible that a managed frame may execute a PInvoke after performing a stackalloc:
285 // 1) The ARM JIT will always inline the PInvoke in the managed frame, whether or not the frame
286 // contains EH. As a result, the ICF will live in the same frame which performs stackalloc.
288 // 2) JIT64 will only inline the PInvoke in the managed frame if the frame *does not* contain EH. If it does,
289 // then pinvoke will be performed via an ILStub and thus, stackalloc will be performed in a frame different
290 // from the one (ILStub) that contains the ICF.
292 // Thus, for the scenario where the catch handler lives in the frame that performed stackalloc, in case of
293 // ARM JIT, the SP returned by the OS will be the SP *after* the stackalloc has happened. However,
294 // the stackwalker will invoke this callback with the CrawlFrameSP that was initialized at the time ICF was setup, i.e.,
295 // it will be the SP after the prolog has executed (refer to InlinedCallFrame::UpdateRegDisplay).
297 // Thus, checking only the SP will not work for this scenario when using the ARM JIT.
299 // To address this case, the callback data also contains the frame pointer (FP) passed by the OS. This will
300 // be the value that is saved in the "CalleeSavedFP" field of the InlinedCallFrame during ICF
301 // initialization. When the stackwalker sees an ICF and invokes this callback, we copy the value of "CalleeSavedFP" in the data
302 // structure passed to this callback.
304 // Later, when the stackwalker invokes the callback for the managed frame containing the ICF, and the check
305 // for SP comaprison fails, we will compare the FP value we got from the ICF with the FP value the OS passed
306 // to us. If they match, then we have reached the resume frame.
308 // Note: This problem/scenario is not applicable to JIT64 since it does not perform pinvoke inlining if the
309 // method containing pinvoke also contains EH. Thus, the SP check will never fail for it.
310 if (pState->uResumeSP == GetSP(pSrcContext))
312 INDEBUG(pState->fFound = true);
317 // Perform the FP check, as explained above.
318 if ((pState->uICFCalleeSavedFP !=0) && (pState->uICFCalleeSavedFP == pState->uResumeFrameFP))
320 // FP from ICF is the one that was also copied to the FP register in InlinedCallFrame::UpdateRegDisplay.
321 _ASSERTE(pState->uICFCalleeSavedFP == GetFP(pSrcContext));
323 INDEBUG(pState->fFound = true);
328 // Reset the ICF FP in callback data
329 pState->uICFCalleeSavedFP = 0;
333 Frame *pFrame = pCF->GetFrame();
335 if (pFrame->NeedsUpdateRegDisplay())
337 CONSISTENCY_CHECK(pFrame >= pState->pHighestFrameWithRegisters);
338 pState->pHighestFrameWithRegisters = pFrame;
340 // Is this an InlinedCallFrame?
341 if (pFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr())
343 // If we are here, then ICF is expected to be active.
344 _ASSERTE(InlinedCallFrame::FrameHasActiveCall(pFrame));
346 // Copy the CalleeSavedFP to the data structure that is passed this callback
347 // by the stackwalker. This is the value of frame pointer when ICF is setup
348 // in a managed frame.
350 // Setting this value here is based upon the assumption (which holds true on X64 and ARM) that
351 // the stackwalker invokes the callback for explicit frames before their
352 // container/corresponding managed frame.
353 pState->uICFCalleeSavedFP = ((PTR_InlinedCallFrame)pFrame)->GetCalleeSavedFP();
357 // For any other frame, simply reset uICFCalleeSavedFP field
358 pState->uICFCalleeSavedFP = 0;
368 // Locates the locations of the nonvolatile registers. This will be used to
369 // retrieve the latest values of the object references before we resume
370 // execution from an exception.
373 bool ExceptionTracker::FindNonvolatileRegisterPointers(Thread* pThread, UINT_PTR uOriginalSP, REGDISPLAY* pRegDisplay, TADDR uResumeFrameFP)
384 // Find the highest frame below the resume frame that will update the
385 // REGDISPLAY. A normal StackWalkFrames will RtlVirtualUnwind through all
386 // managed frames on the stack, so this avoids some unnecessary work. The
387 // frame we find will have all of the nonvolatile registers/other state
388 // needed to start a managed unwind from that point.
390 Frame *pHighestFrameWithRegisters = NULL;
391 Frame *pFrame = pThread->GetFrame();
393 while ((UINT_PTR)pFrame < uOriginalSP)
395 if (pFrame->NeedsUpdateRegDisplay())
396 pHighestFrameWithRegisters = pFrame;
398 pFrame = pFrame->Next();
402 // Do a stack walk from this frame. This may find a higher frame within
403 // the resume frame (ex. inlined pinvoke frame). This will also update
404 // the REGDISPLAY pointers if any intervening managed frames saved
405 // nonvolatile registers.
408 UpdateObjectRefInResumeContextCallbackState state;
410 state.uResumeSP = uOriginalSP;
411 state.uResumeFrameFP = uResumeFrameFP;
412 state.uICFCalleeSavedFP = 0;
413 state.pHighestFrameWithRegisters = pHighestFrameWithRegisters;
415 INDEBUG(state.nFrames = 0);
416 INDEBUG(state.fFound = false);
418 pThread->StackWalkFramesEx(pRegDisplay, &UpdateObjectRefInResumeContextCallback, &state, 0, pHighestFrameWithRegisters);
420 // For managed exceptions, we should at least find a HelperMethodFrame (the one we put in IL_Throw()).
421 // For native exceptions such as AV's, we should at least find the FaultingExceptionFrame.
422 // If we don't find anything, then we must have hit an SO when we are trying to erect an HMF.
423 // Bail out in such situations.
425 // Note that pinvoke frames may be inlined in a managed method, so we cannot use the child SP (a.k.a. the current SP)
426 // to check for explicit frames "higher" on the stack ("higher" here means closer to the leaf frame). The stackwalker
427 // knows how to deal with inlined pinvoke frames, and it issues callbacks for them before issuing the callback for the
428 // containing managed method. So we have to do this check after we are done with the stackwalk.
429 pHighestFrameWithRegisters = state.pHighestFrameWithRegisters;
430 if (pHighestFrameWithRegisters == NULL)
435 CONSISTENCY_CHECK(state.nFrames);
436 CONSISTENCY_CHECK(state.fFound);
437 CONSISTENCY_CHECK(NULL != pHighestFrameWithRegisters);
440 // Now the REGDISPLAY has been unwound to the resume frame. The
441 // nonvolatile registers will either point into pHighestFrameWithRegisters,
442 // an inlined pinvoke frame, or into calling managed frames.
450 void ExceptionTracker::UpdateNonvolatileRegisters(CONTEXT *pContextRecord, REGDISPLAY *pRegDisplay, bool fAborting)
452 CONTEXT* pAbortContext = NULL;
455 pAbortContext = GetThread()->GetAbortContext();
459 #define HANDLE_NULL_CONTEXT_POINTER _ASSERTE(false)
461 #define HANDLE_NULL_CONTEXT_POINTER
462 #endif // FEATURE_PAL
464 #define UPDATEREG(reg) \
466 if (pRegDisplay->pCurrentContextPointers->reg != NULL) \
468 STRESS_LOG3(LF_GCROOTS, LL_INFO100, "Updating " #reg " %p to %p from %p\n", \
469 pContextRecord->reg, \
470 *pRegDisplay->pCurrentContextPointers->reg, \
471 pRegDisplay->pCurrentContextPointers->reg); \
472 pContextRecord->reg = *pRegDisplay->pCurrentContextPointers->reg; \
476 HANDLE_NULL_CONTEXT_POINTER; \
480 pAbortContext->reg = pContextRecord->reg; \
485 #if defined(_TARGET_X86_)
492 #elif defined(_TARGET_AMD64_)
496 #ifndef UNIX_AMD64_ABI
505 #elif defined(_TARGET_ARM_)
516 #elif defined(_TARGET_ARM64_)
531 PORTABILITY_ASSERT("ExceptionTracker::UpdateNonvolatileRegisters");
539 #define DebugLogExceptionRecord(pExceptionRecord)
541 #define LOG_FLAG(name) \
544 LOG((LF_EH, LL_INFO100, "" #name " ")); \
547 void DebugLogExceptionRecord(EXCEPTION_RECORD* pExceptionRecord)
549 ULONG flags = pExceptionRecord->ExceptionFlags;
551 EH_LOG((LL_INFO100, ">>exr: %p, code: %08x, addr: %p, flags: 0x%02x ", pExceptionRecord, pExceptionRecord->ExceptionCode, pExceptionRecord->ExceptionAddress, flags));
553 LOG_FLAG(EXCEPTION_NONCONTINUABLE);
554 LOG_FLAG(EXCEPTION_UNWINDING);
555 LOG_FLAG(EXCEPTION_EXIT_UNWIND);
556 LOG_FLAG(EXCEPTION_STACK_INVALID);
557 LOG_FLAG(EXCEPTION_NESTED_CALL);
558 LOG_FLAG(EXCEPTION_TARGET_UNWIND);
559 LOG_FLAG(EXCEPTION_COLLIDED_UNWIND);
561 LOG((LF_EH, LL_INFO100, "\n"));
565 LPCSTR DebugGetExceptionDispositionName(EXCEPTION_DISPOSITION disp)
570 case ExceptionContinueExecution: return "ExceptionContinueExecution";
571 case ExceptionContinueSearch: return "ExceptionContinueSearch";
572 case ExceptionNestedException: return "ExceptionNestedException";
573 case ExceptionCollidedUnwind: return "ExceptionCollidedUnwind";
575 UNREACHABLE_MSG("Invalid EXCEPTION_DISPOSITION!");
580 bool ExceptionTracker::IsStackOverflowException()
582 if (m_pThread->GetThrowableAsHandle() == g_pPreallocatedStackOverflowException)
590 UINT_PTR ExceptionTracker::CallCatchHandler(CONTEXT* pContextRecord, bool* pfAborting /*= NULL*/)
598 PRECONDITION(CheckPointer(pContextRecord, NULL_OK));
602 UINT_PTR uResumePC = 0;
603 ULONG_PTR ulRelOffset;
604 StackFrame sfStackFp = m_sfResumeStackFrame;
605 Thread* pThread = m_pThread;
606 MethodDesc* pMD = m_pMethodDescOfCatcher;
607 bool fIntercepted = false;
609 ThreadExceptionState* pExState = pThread->GetExceptionState();
611 #if defined(DEBUGGING_SUPPORTED)
613 // If the exception is intercepted, use the information stored in the DebuggerExState to resume the
614 // exception instead of calling the catch clause (there may not even be one).
615 if (pExState->GetFlags()->DebuggerInterceptInfo())
617 _ASSERTE(pMD != NULL);
619 // retrieve the interception information
620 pExState->GetDebuggerState()->GetDebuggerInterceptInfo(NULL, NULL, (PBYTE*)&(sfStackFp.SP), &ulRelOffset, NULL);
622 PCODE pStartAddress = pMD->GetNativeCode();
624 EECodeInfo codeInfo(pStartAddress);
625 _ASSERTE(codeInfo.IsValid());
627 // Note that the value returned for ulRelOffset is actually the offset,
628 // so we need to adjust it to get the actual IP.
629 _ASSERTE(FitsIn<DWORD>(ulRelOffset));
630 uResumePC = codeInfo.GetJitManager()->GetCodeAddressForRelOffset(codeInfo.GetMethodToken(), static_cast<DWORD>(ulRelOffset));
632 // Either we haven't set m_uResumeStackFrame (for unhandled managed exceptions), or we have set it
633 // and it equals to MemoryStackFp.
634 _ASSERTE(m_sfResumeStackFrame.IsNull() || m_sfResumeStackFrame == sfStackFp);
638 #endif // DEBUGGING_SUPPORTED
640 _ASSERTE(!sfStackFp.IsNull());
642 m_sfResumeStackFrame.Clear();
643 m_pMethodDescOfCatcher = NULL;
645 _ASSERTE(pContextRecord);
650 EH_LOG((LL_INFO100, " calling catch at 0x%p\n", m_uCatchToCallPC));
652 // do not call the catch clause if the exception is intercepted
655 _ASSERTE(m_uCatchToCallPC != 0 && m_pClauseForCatchToken != NULL);
656 uResumePC = CallHandler(m_uCatchToCallPC, sfStackFp, &m_ClauseForCatch, pMD, Catch X86_ARG(pContextRecord) ARM_ARG(pContextRecord) ARM64_ARG(pContextRecord));
660 // Since the exception has been intercepted and we could resuming execution at any
661 // user-specified arbitary location, reset the EH clause index and EstablisherFrame
662 // we may have saved for addressing any potential ThreadAbort raise.
664 // This is done since the saved EH clause index is related to the catch block executed,
665 // which does not happen in interception. As user specifies where we resume execution,
666 // we let that behaviour override the index and pretend as if we have no index available.
667 m_dwIndexClauseForCatch = 0;
668 m_sfEstablisherOfActualHandlerFrame.Clear();
669 m_sfCallerOfActualHandlerFrame.Clear();
672 EH_LOG((LL_INFO100, " resume address should be 0x%p\n", uResumePC));
675 // Our tracker may have gone away at this point, don't reference it.
678 return FinishSecondPass(pThread, uResumePC, sfStackFp, pContextRecord, this, pfAborting);
682 UINT_PTR ExceptionTracker::FinishSecondPass(
686 CONTEXT* pContextRecord,
687 ExceptionTracker* pTracker,
688 bool* pfAborting /*= NULL*/)
695 PRECONDITION(CheckPointer(pThread, NULL_NOT_OK));
696 PRECONDITION(CheckPointer((void*)uResumePC, NULL_NOT_OK));
697 PRECONDITION(CheckPointer(pContextRecord, NULL_OK));
701 // Between the time when we pop the ExceptionTracker for the current exception and the time
702 // when we actually resume execution, it is unsafe to start a funclet-skipping stackwalk.
703 // So we set a flag here to indicate that we are in this time window. The only user of this
704 // information right now is the profiler.
705 ThreadExceptionFlagHolder tefHolder(ThreadExceptionState::TEF_InconsistentExceptionState);
707 #ifdef DEBUGGING_SUPPORTED
708 // This must be done before we pop the trackers.
709 BOOL fIntercepted = pThread->GetExceptionState()->GetFlags()->DebuggerInterceptInfo();
710 #endif // DEBUGGING_SUPPORTED
712 // Since we may [re]raise ThreadAbort post the catch block execution,
713 // save the index, and Establisher, of the EH clause corresponding to the handler
714 // we just executed before we release the tracker. This will be used to ensure that reraise
715 // proceeds forward and not get stuck in a loop. Refer to
716 // ExceptionTracker::ProcessManagedCallFrame for details.
717 DWORD ehClauseCurrentHandlerIndex = pTracker->GetCatchHandlerExceptionClauseIndex();
718 StackFrame sfEstablisherOfActualHandlerFrame = pTracker->GetEstablisherOfActualHandlingFrame();
720 EH_LOG((LL_INFO100, "second pass finished\n"));
721 EH_LOG((LL_INFO100, "cleaning up ExceptionTracker state\n"));
723 // Release the exception trackers till the current (specified) frame.
724 ExceptionTracker::PopTrackers(sf, true);
726 // This will set the last thrown to be either null if we have handled all the exceptions in the nested chain or
727 // to whatever the current exception is.
729 // In a case when we're nested inside another catch block, the domain in which we're executing may not be the
730 // same as the one the domain of the throwable that was just made the current throwable above. Therefore, we
731 // make a special effort to preserve the domain of the throwable as we update the the last thrown object.
733 // If an exception is active, we dont want to reset the LastThrownObject to NULL as the active exception
734 // might be represented by a tracker created in the second pass (refer to
735 // CEHelper::SetupCorruptionSeverityForActiveExceptionInUnwindPass to understand how exception trackers can be
736 // created in the 2nd pass on 64bit) that does not have a throwable attached to it. Thus, if this exception
737 // is caught in the VM and it attempts to get the LastThrownObject using GET_THROWABLE macro, then it should be available.
739 // But, if the active exception tracker remains consistent in the 2nd pass (which will happen if the exception is caught
740 // in managed code), then the call to SafeUpdateLastThrownObject below will automatically update the LTO as per the
742 if (!pThread->GetExceptionState()->IsExceptionInProgress())
744 pThread->SafeSetLastThrownObject(NULL);
747 // Sync managed exception state, for the managed thread, based upon any active exception tracker
748 pThread->SyncManagedExceptionState(false);
751 // If we are aborting, we should not resume execution. Instead, we raise another
752 // exception. However, we do this by resuming execution at our thread redirecter
753 // function (RedirectForThrowControl), which is the same process we use for async
754 // thread stops. This redirecter function will cover the stack frame and register
755 // stack frame and then throw an exception. When we first see the exception thrown
756 // by this redirecter, we fixup the context for the thread stackwalk by copying
757 // pThread->m_OSContext into the dispatcher context and restarting the exception
758 // dispatch. As a result, we need to save off the "correct" resume context before
759 // we resume so the exception processing can work properly after redirect. A side
760 // benefit of this mechanism is that it makes synchronous and async thread abort
761 // use exactly the same codepaths.
763 UINT_PTR uAbortAddr = 0;
765 #if defined(DEBUGGING_SUPPORTED)
766 // Don't honour thread abort requests at this time for intercepted exceptions.
772 #endif // !DEBUGGING_SUPPORTED
774 CopyOSContext(pThread->m_OSContext, pContextRecord);
775 SetIP(pThread->m_OSContext, (PCODE)uResumePC);
776 uAbortAddr = (UINT_PTR)COMPlusCheckForAbort(uResumePC);
781 if (pfAborting != NULL)
786 EH_LOG((LL_INFO100, "thread abort in progress, resuming thread under control...\n"));
788 // We are aborting, so keep the reference to the current EH clause index.
789 // We will use this when the exception is reraised and we begin commencing
790 // exception dispatch. This is done in ExceptionTracker::ProcessOSExceptionNotification.
792 // The "if" condition below can be false if the exception has been intercepted (refer to
793 // ExceptionTracker::CallCatchHandler for details)
794 if ((ehClauseCurrentHandlerIndex > 0) && (!sfEstablisherOfActualHandlerFrame.IsNull()))
796 pThread->m_dwIndexClauseForCatch = ehClauseCurrentHandlerIndex;
797 pThread->m_sfEstablisherOfActualHandlerFrame = sfEstablisherOfActualHandlerFrame;
800 CONSISTENCY_CHECK(CheckPointer(pContextRecord));
802 STRESS_LOG1(LF_EH, LL_INFO10, "resume under control: ip: %p\n", uResumePC);
804 #ifdef _TARGET_AMD64_
805 pContextRecord->Rcx = uResumePC;
806 #elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
807 // On ARM & ARM64, we save off the original PC in Lr. This is the same as done
808 // in HandleManagedFault for H/W generated exceptions.
809 pContextRecord->Lr = uResumePC;
812 uResumePC = uAbortAddr;
815 CONSISTENCY_CHECK(pThread->DetermineIfGuardPagePresent());
817 EH_LOG((LL_INFO100, "FinishSecondPass complete, uResumePC = %p, current SP = %p\n", uResumePC, GetCurrentSP()));
821 // On CoreARM, the MemoryStackFp is ULONG when passed by RtlDispatchException,
822 // unlike its 64bit counterparts.
823 EXTERN_C EXCEPTION_DISPOSITION
824 ProcessCLRException(IN PEXCEPTION_RECORD pExceptionRecord
825 WIN64_ARG(IN ULONG64 MemoryStackFp)
826 NOT_WIN64_ARG(IN ULONG MemoryStackFp),
827 IN OUT PCONTEXT pContextRecord,
828 IN OUT PDISPATCHER_CONTEXT pDispatcherContext
832 // This method doesn't always return, so it will leave its
833 // state on the thread if using dynamic contracts.
835 STATIC_CONTRACT_MODE_ANY;
836 STATIC_CONTRACT_GC_TRIGGERS;
837 STATIC_CONTRACT_THROWS;
839 // We must preserve this so that GCStress=4 eh processing doesnt kill last error.
840 DWORD dwLastError = GetLastError();
842 EXCEPTION_DISPOSITION returnDisposition = ExceptionContinueSearch;
844 STRESS_LOG5(LF_EH, LL_INFO10, "Processing exception at establisher=%p, ip=%p disp->cxr: %p, sp: %p, cxr @ exception: %p\n",
845 MemoryStackFp, pDispatcherContext->ControlPc,
846 pDispatcherContext->ContextRecord,
847 GetSP(pDispatcherContext->ContextRecord), pContextRecord);
848 AMD64_ONLY(STRESS_LOG3(LF_EH, LL_INFO10, " rbx=%p, rsi=%p, rdi=%p\n", pContextRecord->Rbx, pContextRecord->Rsi, pContextRecord->Rdi));
850 // sample flags early on because we may change pExceptionRecord below
851 // if we are seeing a STATUS_UNWIND_CONSOLIDATE
852 DWORD dwExceptionFlags = pExceptionRecord->ExceptionFlags;
853 Thread* pThread = GetThread();
855 // Stack Overflow is handled specially by the CLR EH mechanism. In fact
856 // there are cases where we aren't in managed code, but aren't quite in
857 // known unmanaged code yet either...
859 // These "boundary code" cases include:
860 // - in JIT helper methods which don't have a frame
861 // - in JIT helper methods before/during frame setup
862 // - in FCALL before/during frame setup
864 // In those cases on x86 we take special care to start our unwind looking
865 // for a handler which is below the last explicit frame which has been
866 // established on the stack as it can't reliably crawl the stack frames
868 // NOTE: see code in the CLRVectoredExceptionHandler() routine.
870 // From the perspective of the EH subsystem, we can handle unwind correctly
871 // even without erecting a transition frame on WIN64. However, since the GC
872 // uses the stackwalker to update object references, and since the stackwalker
873 // relies on transition frame, we still cannot let an exception be handled
874 // by an unprotected managed frame.
876 // This code below checks to see if a SO has occurred outside of managed code.
877 // If it has, and if we don't have a transition frame higher up the stack, then
878 // we don't handle the SO.
879 if (!(dwExceptionFlags & EXCEPTION_UNWINDING))
881 if (pExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW)
883 // We don't need to unwind the frame chain here because we have backstop
884 // personality routines at the U2M boundary to handle do that. They are
885 // the personality routines of CallDescrWorker() and UMThunkStubCommon().
887 // See VSW 471619 for more information.
889 // We should be in cooperative mode if we are going to handle the SO.
890 // We track SO state for the thread.
891 EEPolicy::HandleStackOverflow(SOD_ManagedFrameHandler, (void*)MemoryStackFp);
892 FastInterlockAnd (&pThread->m_fPreemptiveGCDisabled, 0);
893 return ExceptionContinueSearch;
898 DWORD exceptionCode = pExceptionRecord->ExceptionCode;
900 if ((NTSTATUS)exceptionCode == STATUS_UNWIND)
901 // If exceptionCode is STATUS_UNWIND, RtlUnwind is called with a NULL ExceptionRecord,
902 // therefore OS uses a faked ExceptionRecord with STATUS_UNWIND code. Then we need to
903 // look at our saved exception code.
904 exceptionCode = GetCurrentExceptionCode();
906 if (exceptionCode == STATUS_STACK_OVERFLOW)
908 return ExceptionContinueSearch;
912 StackFrame sf((UINT_PTR)MemoryStackFp);
917 // Update the current establisher frame
918 if (dwExceptionFlags & EXCEPTION_UNWINDING)
920 ExceptionTracker *pCurrentTracker = pThread->GetExceptionState()->GetCurrentExceptionTracker();
921 if (pCurrentTracker != NULL)
923 pCurrentTracker->SetCurrentEstablisherFrame(sf);
928 Thread::ObjectRefFlush(pThread);
934 // begin Early Processing
937 #ifndef USE_REDIRECT_FOR_GCSTRESS
938 if (IsGcMarker(pContextRecord, pExceptionRecord))
940 returnDisposition = ExceptionContinueExecution;
943 #endif // !USE_REDIRECT_FOR_GCSTRESS
945 EH_LOG((LL_INFO100, "..................................................................................\n"));
946 EH_LOG((LL_INFO100, "ProcessCLRException enter, sp = 0x%p, ControlPc = 0x%p\n", MemoryStackFp, pDispatcherContext->ControlPc));
947 DebugLogExceptionRecord(pExceptionRecord);
949 if (STATUS_UNWIND_CONSOLIDATE == pExceptionRecord->ExceptionCode)
951 EH_LOG((LL_INFO100, "STATUS_UNWIND_CONSOLIDATE, retrieving stored exception record\n"));
952 _ASSERTE(pExceptionRecord->NumberParameters >= 7);
953 pExceptionRecord = (EXCEPTION_RECORD*)pExceptionRecord->ExceptionInformation[6];
954 DebugLogExceptionRecord(pExceptionRecord);
957 CONSISTENCY_CHECK_MSG(!DebugIsEECxxException(pExceptionRecord), "EE C++ Exception leaked into managed code!!\n");
960 // end Early Processing (tm) -- we're now into really processing an exception for managed code
963 if (!(dwExceptionFlags & EXCEPTION_UNWINDING))
965 // If the exception is a breakpoint, but outside of the runtime or managed code,
966 // let it go. It is not ours, so someone else will handle it, or we'll see
967 // it again as an unhandled exception.
968 if ((pExceptionRecord->ExceptionCode == STATUS_BREAKPOINT) ||
969 (pExceptionRecord->ExceptionCode == STATUS_SINGLE_STEP))
971 // It is a breakpoint; is it from the runtime or managed code?
972 PCODE ip = GetIP(pContextRecord); // IP of the fault.
974 BOOL fExternalException;
976 fExternalException = (!ExecutionManager::IsManagedCode(ip) &&
977 !IsIPInModule(g_pMSCorEE, ip));
979 if (fExternalException)
981 // The breakpoint was not ours. Someone else can handle it. (Or if not, we'll get it again as
982 // an unhandled exception.)
983 returnDisposition = ExceptionContinueSearch;
990 BOOL bAsynchronousThreadStop = IsThreadHijackedForThreadStop(pThread, pExceptionRecord);
992 // we already fixed the context in HijackHandler, so let's
993 // just clear the thread state.
994 pThread->ResetThrowControlForThread();
996 ExceptionTracker::StackTraceState STState;
998 ExceptionTracker* pTracker = ExceptionTracker::GetOrCreateTracker(
999 pDispatcherContext->ControlPc,
1003 bAsynchronousThreadStop,
1004 !(dwExceptionFlags & EXCEPTION_UNWINDING),
1007 #ifdef FEATURE_CORRUPTING_EXCEPTIONS
1008 // Only setup the Corruption Severity in the first pass
1009 if (!(dwExceptionFlags & EXCEPTION_UNWINDING))
1011 // Switch to COOP mode
1014 if (pTracker && pTracker->GetThrowable() != NULL)
1016 // Setup the state in current exception tracker indicating the corruption severity
1017 // of the active exception.
1018 CEHelper::SetupCorruptionSeverityForActiveException((STState == ExceptionTracker::STS_FirstRethrowFrame), (pTracker->GetPreviousExceptionTracker() != NULL),
1019 CEHelper::ShouldTreatActiveExceptionAsNonCorrupting());
1022 // Failfast if exception indicates corrupted process state
1023 if (pTracker->GetCorruptionSeverity() == ProcessCorrupting)
1025 OBJECTREF oThrowable = NULL;
1028 GCPROTECT_BEGIN(oThrowable);
1029 oThrowable = pTracker->GetThrowable();
1030 if (oThrowable != NULL)
1034 GetExceptionMessage(oThrowable, message);
1039 EX_END_CATCH(SwallowAllExceptions);
1043 EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(pExceptionRecord->ExceptionCode, (LPCWSTR)message);
1046 #endif // FEATURE_CORRUPTING_EXCEPTIONS
1049 // Switch to COOP mode since we are going to work
1052 if (pTracker->GetThrowable() != NULL)
1054 BOOL fIsThrownExceptionAV = FALSE;
1055 OBJECTREF oThrowable = NULL;
1056 GCPROTECT_BEGIN(oThrowable);
1057 oThrowable = pTracker->GetThrowable();
1059 // Check if we are dealing with AV or not and if we are,
1060 // ensure that this is a real AV and not managed AV exception
1061 if ((pExceptionRecord->ExceptionCode == STATUS_ACCESS_VIOLATION) &&
1062 (MscorlibBinder::GetException(kAccessViolationException) == oThrowable->GetMethodTable()))
1064 // Its an AV - set the flag
1065 fIsThrownExceptionAV = TRUE;
1070 // Did we get an AV?
1071 if (fIsThrownExceptionAV == TRUE)
1073 // Get the escalation policy action for handling AV
1074 EPolicyAction actionAV = GetEEPolicy()->GetActionOnFailure(FAIL_AccessViolation);
1076 // Valid actions are: eNoAction (default behviour) or eRudeExitProcess
1077 _ASSERTE(((actionAV == eNoAction) || (actionAV == eRudeExitProcess)));
1078 if (actionAV == eRudeExitProcess)
1080 LOG((LF_EH, LL_INFO100, "ProcessCLRException: AccessViolation handler found and doing RudeExitProcess due to escalation policy (eRudeExitProcess)\n"));
1082 // EEPolicy::HandleFatalError will help us RudeExit the process.
1083 // RudeExitProcess due to AV is to prevent a security risk - we are ripping
1084 // at the boundary, without looking for the handlers.
1085 EEPOLICY_HANDLE_FATAL_ERROR(COR_E_SECURITY);
1091 #ifndef FEATURE_PAL // Watson is on Windows only
1092 // Setup bucketing details for nested exceptions (rethrow and non-rethrow) only if we are in the first pass
1093 if (!(dwExceptionFlags & EXCEPTION_UNWINDING))
1095 ExceptionTracker *pPrevEHTracker = pTracker->GetPreviousExceptionTracker();
1096 if (pPrevEHTracker != NULL)
1098 SetStateForWatsonBucketing((STState == ExceptionTracker::STS_FirstRethrowFrame), pPrevEHTracker->GetThrowableAsHandle());
1101 #endif //!FEATURE_PAL
1103 CLRUnwindStatus status;
1105 #ifdef USE_PER_FRAME_PINVOKE_INIT
1106 // Refer to comment in ProcessOSExceptionNotification about ICF and codegen difference.
1107 InlinedCallFrame *pICFSetAsLimitFrame = NULL;
1108 #endif // USE_PER_FRAME_PINVOKE_INIT
1110 status = pTracker->ProcessOSExceptionNotification(
1118 #ifdef USE_PER_FRAME_PINVOKE_INIT
1119 , (PVOID)pICFSetAsLimitFrame
1120 #endif // USE_PER_FRAME_PINVOKE_INIT
1123 if (FirstPassComplete == status)
1125 EH_LOG((LL_INFO100, "first pass finished, found handler, TargetFrameSp = %p\n",
1126 pDispatcherContext->EstablisherFrame));
1128 SetLastError(dwLastError);
1132 // At this point (the end of the 1st pass) we don't know where
1133 // we are going to resume to. So, we pass in an address, which
1134 // lies in NULL pointer partition of the memory, as the target IP.
1136 // Once we reach the target frame in the second pass unwind, we call
1137 // the catch funclet that caused us to resume execution and it
1138 // tells us where we are resuming to. At that point, we patch
1139 // the context record with the resume IP and RtlUnwind2 finishes
1140 // by restoring our context at the right spot.
1142 // If we are unable to set the resume PC for some reason, then
1143 // the OS will try to resume at the NULL partition address and the
1144 // attempt will fail due to AV, resulting in failfast, helping us
1145 // isolate problems in patching the IP.
1147 ClrUnwindEx(pExceptionRecord,
1149 INVALID_RESUME_ADDRESS,
1150 pDispatcherContext->EstablisherFrame);
1157 // On Unix, we will return ExceptionStackUnwind back to the custom
1158 // exception dispatch system. When it sees this disposition, it will
1159 // know that we want to handle the exception and will commence unwind
1160 // via the custom unwinder.
1161 return ExceptionStackUnwind;
1163 #endif // FEATURE_PAL
1165 else if (SecondPassComplete == status)
1167 bool fAborting = false;
1168 UINT_PTR uResumePC = (UINT_PTR)-1;
1169 UINT_PTR uOriginalSP = GetSP(pContextRecord);
1171 Frame* pLimitFrame = pTracker->GetLimitFrame();
1173 pDispatcherContext->ContextRecord = pContextRecord;
1175 // We may be in COOP mode at this point - the indefinite switch was done
1176 // in ExceptionTracker::ProcessManagedCallFrame.
1178 // However, if a finally was invoked non-exceptionally and raised an exception
1179 // that was caught in its parent method, unwind will result in invoking any applicable termination
1180 // handlers in the finally funclet and thus, also switching the mode to COOP indefinitely.
1182 // Since the catch block to be executed will lie in the parent method,
1183 // we will skip frames till we reach the parent and in the process, switch back to PREEMP mode
1184 // as control goes back to the OS.
1186 // Upon reaching the target of unwind, we wont call ExceptionTracker::ProcessManagedCallFrame (since any
1187 // handlers in finally or surrounding it will be invoked when we unwind finally funclet). Thus,
1188 // we may not be in COOP mode.
1190 // Since CallCatchHandler expects to be in COOP mode, perform the switch here.
1192 uResumePC = pTracker->CallCatchHandler(pContextRecord, &fAborting);
1196 // GC must NOT occur after the handler has returned until
1197 // we resume at the new address because the stackwalker
1198 // EnumGcRefs would try and report things as live from the
1199 // try body, that were probably reported dead from the
1202 // GC must NOT occur once the frames have been popped because
1203 // the values in the unwound CONTEXT are not GC-protected.
1207 CONSISTENCY_CHECK((UINT_PTR)-1 != uResumePC);
1209 // Ensure we are not resuming to the invalid target IP we had set at the end of
1211 _ASSERTE_MSG(INVALID_RESUME_ADDRESS != uResumePC, "CallCatchHandler returned invalid resume PC!");
1214 // CallCatchHandler freed the tracker.
1216 INDEBUG(pTracker = (ExceptionTracker*)POISONC);
1218 // Note that we should only fail to fix up for SO.
1219 bool fFixedUp = FixNonvolatileRegisters(uOriginalSP, pThread, pContextRecord, fAborting);
1220 _ASSERTE(fFixedUp || (pExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW));
1223 CONSISTENCY_CHECK(pLimitFrame > dac_cast<PTR_VOID>(GetSP(pContextRecord)));
1224 #ifdef USE_PER_FRAME_PINVOKE_INIT
1225 if (pICFSetAsLimitFrame != NULL)
1227 _ASSERTE(pICFSetAsLimitFrame == pLimitFrame);
1229 // Mark the ICF as inactive (by setting the return address as NULL).
1230 // It will be marked as active at the next PInvoke callsite.
1232 // This ensures that any stackwalk post the catch handler but before
1233 // the next pinvoke callsite does not see the frame as active.
1234 pICFSetAsLimitFrame->Reset();
1236 #endif // USE_PER_FRAME_PINVOKE_INIT
1238 pThread->SetFrame(pLimitFrame);
1240 FixContext(pContextRecord);
1242 SetIP(pContextRecord, (PCODE)uResumePC);
1245 #ifdef STACK_GUARDS_DEBUG
1246 // We are transitioning back to managed code, so ensure that we are in
1247 // SO-tolerant mode before we do so.
1248 RestoreSOToleranceState();
1251 ExceptionTracker::ResumeExecution(pContextRecord,
1260 EH_LOG((LL_INFO100, "returning %s\n", DebugGetExceptionDispositionName(returnDisposition)));
1261 CONSISTENCY_CHECK( !((dwExceptionFlags & EXCEPTION_TARGET_UNWIND) && (ExceptionContinueSearch == returnDisposition)));
1263 if ((ExceptionContinueSearch == returnDisposition))
1265 GCX_PREEMP_NO_DTOR();
1268 SetLastError(dwLastError);
1270 return returnDisposition;
1273 // When we hit a native exception such as an AV in managed code, we put up a FaultingExceptionFrame which saves all the
1274 // non-volatile registers. The GC may update these registers if they contain object references. However, the CONTEXT
1275 // with which we are going to resume execution doesn't have these updated values. Thus, we need to fix up the non-volatile
1276 // registers in the CONTEXT with the updated ones stored in the FaultingExceptionFrame. To do so properly, we need
1277 // to perform a full stackwalk.
1278 bool FixNonvolatileRegisters(UINT_PTR uOriginalSP,
1280 CONTEXT* pContextRecord,
1294 // Ctor will initialize it to NULL
1297 pThread->FillRegDisplay(®disp, &_ctx);
1299 bool fFound = ExceptionTracker::FindNonvolatileRegisterPointers(pThread, uOriginalSP, ®disp, GetFP(pContextRecord));
1307 // GC must NOT occur once the frames have been popped because
1308 // the values in the unwound CONTEXT are not GC-protected.
1312 ExceptionTracker::UpdateNonvolatileRegisters(pContextRecord, ®disp, fAborting);
1322 void ExceptionTracker::InitializeCrawlFrameForExplicitFrame(CrawlFrame* pcfThisFrame, Frame* pFrame, MethodDesc *pMD)
1330 PRECONDITION(pFrame != FRAME_TOP);
1334 INDEBUG(memset(pcfThisFrame, 0xCC, sizeof(*pcfThisFrame)));
1336 pcfThisFrame->isFrameless = false;
1337 pcfThisFrame->pFrame = pFrame;
1338 pcfThisFrame->pFunc = pFrame->GetFunction();
1340 if (pFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr() &&
1341 !InlinedCallFrame::FrameHasActiveCall(pFrame))
1343 // Inactive ICFs in IL stubs contain the true interop MethodDesc which must be
1344 // reported in the stack trace.
1345 if (pMD->IsILStub() && pMD->AsDynamicMethodDesc()->HasMDContextArg())
1347 // Report interop MethodDesc
1348 pcfThisFrame->pFunc = ((InlinedCallFrame *)pFrame)->GetActualInteropMethodDesc();
1349 _ASSERTE(pcfThisFrame->pFunc != NULL);
1350 _ASSERTE(pcfThisFrame->pFunc->SanityCheck());
1354 pcfThisFrame->pFirstGSCookie = NULL;
1355 pcfThisFrame->pCurGSCookie = NULL;
1358 // This method will initialize the RegDisplay in the CrawlFrame with the correct state for current and caller context
1359 // See the long description of contexts and their validity in ExceptionTracker::InitializeCrawlFrame for details.
1360 void ExceptionTracker::InitializeCurrentContextForCrawlFrame(CrawlFrame* pcfThisFrame, PT_DISPATCHER_CONTEXT pDispatcherContext, StackFrame sfEstablisherFrame)
1367 PRECONDITION(IsInFirstPass());
1371 if (IsInFirstPass())
1373 REGDISPLAY *pRD = pcfThisFrame->pRD;
1375 #ifndef USE_CURRENT_CONTEXT_IN_FILTER
1376 INDEBUG(memset(pRD->pCurrentContext, 0xCC, sizeof(*(pRD->pCurrentContext))));
1377 // Ensure that clients can tell the current context isn't valid.
1378 SetIP(pRD->pCurrentContext, 0);
1379 #else // !USE_CURRENT_CONTEXT_IN_FILTER
1380 RestoreNonvolatileRegisters(pRD->pCurrentContext, pDispatcherContext->CurrentNonVolatileContextRecord);
1381 RestoreNonvolatileRegisterPointers(pRD->pCurrentContextPointers, pDispatcherContext->CurrentNonVolatileContextRecord);
1382 #endif // USE_CURRENT_CONTEXT_IN_FILTER
1384 *(pRD->pCallerContext) = *(pDispatcherContext->ContextRecord);
1385 pRD->IsCallerContextValid = TRUE;
1387 pRD->SP = sfEstablisherFrame.SP;
1388 pRD->ControlPC = pDispatcherContext->ControlPc;
1390 #ifdef ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
1391 pcfThisFrame->pRD->IsCallerSPValid = TRUE;
1393 // Assert our first pass assumptions for the Arm/Arm64
1394 _ASSERTE(sfEstablisherFrame.SP == GetSP(pDispatcherContext->ContextRecord));
1395 #endif // ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
1399 EH_LOG((LL_INFO100, "ExceptionTracker::InitializeCurrentContextForCrawlFrame: DispatcherContext->ControlPC = %p; IP in DispatcherContext->ContextRecord = %p.\n",
1400 pDispatcherContext->ControlPc, GetIP(pDispatcherContext->ContextRecord)));
1404 void ExceptionTracker::InitializeCrawlFrame(CrawlFrame* pcfThisFrame, Thread* pThread, StackFrame sf, REGDISPLAY* pRD,
1405 PDISPATCHER_CONTEXT pDispatcherContext, DWORD_PTR ControlPCForEHSearch,
1406 UINT_PTR* puMethodStartPC,
1407 ExceptionTracker *pCurrentTracker)
1417 INDEBUG(memset(pcfThisFrame, 0xCC, sizeof(*pcfThisFrame)));
1418 pcfThisFrame->pRD = pRD;
1420 #ifdef FEATURE_INTERPRETER
1421 pcfThisFrame->pFrame = NULL;
1422 #endif // FEATURE_INTERPRETER
1424 // Initialize the RegDisplay from DC->ContextRecord. DC->ControlPC always contains the IP
1425 // in the frame for which the personality routine was invoked.
1429 // During 1st pass, DC->ContextRecord contains the context of the caller of the frame for which personality
1430 // routine was invoked. On the other hand, in the 2nd pass, it contains the context of the frame for which
1431 // personality routine was invoked.
1437 // In the first pass on ARM & ARM64:
1439 // 1) EstablisherFrame (passed as 'sf' to this method) represents the SP at the time
1440 // the current managed method was invoked and thus, is the SP of the caller. This is
1441 // the value of DispatcherContext->EstablisherFrame as well.
1442 // 2) DispatcherContext->ControlPC is the pc in the current managed method for which personality
1443 // routine has been invoked.
1444 // 3) DispatcherContext->ContextRecord contains the context record of the caller (and thus, IP
1445 // in the caller). Most of the times, these values will be distinct. However, recursion
1446 // may result in them being the same (case "run2" of baseservices\Regression\V1\Threads\functional\CS_TryFinally.exe
1447 // is an example). In such a case, we ensure that EstablisherFrame value is the same as
1448 // the SP in DispatcherContext->ContextRecord (which is (1) above).
1450 // In second pass on ARM & ARM64:
1452 // 1) EstablisherFrame (passed as 'sf' to this method) represents the SP at the time
1453 // the current managed method was invoked and thus, is the SP of the caller. This is
1454 // the value of DispatcherContext->EstablisherFrame as well.
1455 // 2) DispatcherContext->ControlPC is the pc in the current managed method for which personality
1456 // routine has been invoked.
1457 // 3) DispatcherContext->ContextRecord contains the context record of the current managed method
1458 // for which the personality routine is invoked.
1461 pThread->InitRegDisplay(pcfThisFrame->pRD, pDispatcherContext->ContextRecord, true);
1463 bool fAdjustRegdisplayControlPC = false;
1465 // The "if" check below is trying to determine when we have a valid current context in DC->ContextRecord and whether, or not,
1466 // RegDisplay needs to be fixed up to set SP and ControlPC to have the values for the current frame for which personality routine
1469 // We do this based upon the current pass for the exception tracker as this will also handle the case when current frame
1470 // and its caller have the same return address (i.e. ControlPc). This can happen in cases when, due to certain JIT optimizations, the following callstack
1474 // Could get transformed to the one below when B is inlined in the first (left-most) A resulting in:
1478 // In this case, during 1st pass, when personality routine is invoked for the second A, DC->ControlPc could have the same
1479 // value as DC->ContextRecord->Rip even though the DC->ContextRecord actually represents caller context (of first A).
1480 // As a result, we will not initialize the value of SP and controlPC in RegDisplay for the current frame (frame for
1481 // which personality routine was invoked - second A in the optimized scenario above) resulting in frame specific lookup (e.g.
1482 // GenericArgType) to happen incorrectly (against first A).
1484 // Thus, we should always use the pass identification in ExceptionTracker to determine when we need to perform the fixup below.
1485 if (pCurrentTracker->IsInFirstPass())
1487 pCurrentTracker->InitializeCurrentContextForCrawlFrame(pcfThisFrame, pDispatcherContext, sf);
1491 #if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
1492 // See the comment above the call to InitRegDisplay for this assertion.
1493 _ASSERTE(pDispatcherContext->ControlPc == GetIP(pDispatcherContext->ContextRecord));
1494 #endif // _TARGET_ARM_ || _TARGET_ARM64_
1496 #ifdef ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
1497 // Simply setup the callerSP during the second pass in the caller context.
1498 // This is used in setting up the "EnclosingClauseCallerSP" in ExceptionTracker::ProcessManagedCallFrame
1499 // when the termination handlers are invoked.
1500 ::SetSP(pcfThisFrame->pRD->pCallerContext, sf.SP);
1501 pcfThisFrame->pRD->IsCallerSPValid = TRUE;
1502 #endif // ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
1505 #ifdef ADJUST_PC_UNWOUND_TO_CALL
1506 // Further below, we will adjust the ControlPC based upon whether we are at a callsite or not.
1507 // We need to do this for "RegDisplay.ControlPC" field as well so that when data structures like
1508 // EECodeInfo initialize themselves using this field, they will have the correct absolute value
1509 // that is in sync with the "relOffset" we calculate below.
1511 // However, we do this *only* when "ControlPCForEHSearch" is the same as "DispatcherContext->ControlPC",
1512 // indicating we are not using the thread-abort reraise loop prevention logic.
1514 if (pDispatcherContext->ControlPc == ControlPCForEHSearch)
1516 // Since DispatcherContext->ControlPc is used to initialize the
1517 // RegDisplay.ControlPC field, assert that it is the same
1518 // as the ControlPC we are going to use to initialize the CrawlFrame
1520 _ASSERTE(pcfThisFrame->pRD->ControlPC == ControlPCForEHSearch);
1521 fAdjustRegdisplayControlPC = true;
1524 #endif // ADJUST_PC_UNWOUND_TO_CALL
1526 #if defined(_TARGET_ARM_)
1527 // Remove the Thumb bit
1528 ControlPCForEHSearch = ThumbCodeToDataPointer<DWORD_PTR, DWORD_PTR>(ControlPCForEHSearch);
1531 #ifdef ADJUST_PC_UNWOUND_TO_CALL
1532 // If the OS indicated that the IP is a callsite, then adjust the ControlPC by decrementing it
1533 // by two. This is done because unwinding at callsite will make ControlPC point to the
1534 // instruction post the callsite. If a protected region ends "at" the callsite, then
1535 // not doing this adjustment will result in a one-off error that can result in us not finding
1538 // For async exceptions (e.g. AV), this will be false.
1540 // We decrement by two to be in accordance with how the kernel does as well.
1541 if (pDispatcherContext->ControlPcIsUnwound)
1543 ControlPCForEHSearch -= STACKWALK_CONTROLPC_ADJUST_OFFSET;
1544 if (fAdjustRegdisplayControlPC == true)
1546 // Once the check above is removed, the assignment below should
1547 // be done unconditionally.
1548 pcfThisFrame->pRD->ControlPC = ControlPCForEHSearch;
1549 // On ARM & ARM64, the IP is either at the callsite (post the adjustment above)
1550 // or at the instruction at which async exception took place.
1551 pcfThisFrame->isIPadjusted = true;
1554 #endif // ADJUST_PC_UNWOUND_TO_CALL
1556 pcfThisFrame->codeInfo.Init(ControlPCForEHSearch);
1558 if (pcfThisFrame->codeInfo.IsValid())
1560 pcfThisFrame->isFrameless = true;
1561 pcfThisFrame->pFunc = pcfThisFrame->codeInfo.GetMethodDesc();
1563 *puMethodStartPC = pcfThisFrame->codeInfo.GetStartAddress();
1567 pcfThisFrame->isFrameless = false;
1568 pcfThisFrame->pFunc = NULL;
1570 *puMethodStartPC = NULL;
1573 pcfThisFrame->pThread = pThread;
1574 pcfThisFrame->hasFaulted = false;
1576 Frame* pTopFrame = pThread->GetFrame();
1577 pcfThisFrame->isIPadjusted = (FRAME_TOP != pTopFrame) && (pTopFrame->GetVTablePtr() != FaultingExceptionFrame::GetMethodFrameVPtr());
1578 if (pcfThisFrame->isFrameless && (pcfThisFrame->isIPadjusted == false) && (pcfThisFrame->GetRelOffset() == 0))
1580 // If we are here, then either a hardware generated exception happened at the first instruction
1581 // of a managed method an exception was thrown at that location.
1583 // Adjusting IP in such a case will lead us into unknown code - it could be native code or some
1584 // other JITted code.
1586 // Hence, we will flag that the IP is already adjusted.
1587 pcfThisFrame->isIPadjusted = true;
1589 EH_LOG((LL_INFO100, "ExceptionTracker::InitializeCrawlFrame: Exception at offset zero of the method (MethodDesc %p); setting IP as adjusted.\n",
1590 pcfThisFrame->pFunc));
1593 pcfThisFrame->pFirstGSCookie = NULL;
1594 pcfThisFrame->pCurGSCookie = NULL;
1596 pcfThisFrame->isFilterFuncletCached = FALSE;
1599 bool ExceptionTracker::UpdateScannedStackRange(StackFrame sf, bool fIsFirstPass)
1603 // Since this function will modify the scanned stack range, which is also accessed during the GC stackwalk,
1604 // we invoke it in COOP mode so that that access to the range is synchronized.
1612 // collapse trackers if a nested exception passes a previous exception
1615 HandleNestedExceptionEscape(sf, fIsFirstPass);
1618 // update stack bounds
1620 BOOL fUnwindingToFindResumeFrame = m_ExceptionFlags.UnwindingToFindResumeFrame();
1622 if (m_ScannedStackRange.Contains(sf))
1624 // If we're unwinding to find the resume frame and we're examining the topmost previously scanned frame,
1625 // then we can't ignore it because we could resume here due to an escaped nested exception.
1626 if (!fUnwindingToFindResumeFrame || (m_ScannedStackRange.GetUpperBound() != sf))
1628 // been there, done that.
1629 EH_LOG((LL_INFO100, " IGNOREFRAME: This frame has been processed already\n"));
1635 if (sf < m_ScannedStackRange.GetLowerBound())
1637 m_ScannedStackRange.ExtendLowerBound(sf);
1640 if (sf > m_ScannedStackRange.GetUpperBound())
1642 m_ScannedStackRange.ExtendUpperBound(sf);
1645 DebugLogTrackerRanges(" C");
1651 void CheckForRudeAbort(Thread* pThread, bool fIsFirstPass)
1653 if (fIsFirstPass && pThread->IsRudeAbort())
1656 OBJECTREF rudeAbortThrowable = CLRException::GetPreallocatedRudeThreadAbortException();
1657 if (pThread->GetThrowable() != rudeAbortThrowable)
1659 pThread->SafeSetThrowables(rudeAbortThrowable);
1662 if (!pThread->IsRudeAbortInitiated())
1664 pThread->PreWorkForThreadAbort();
1669 void ExceptionTracker::FirstPassIsComplete()
1671 m_ExceptionFlags.ResetUnwindingToFindResumeFrame();
1672 m_pSkipToParentFunctionMD = NULL;
1675 void ExceptionTracker::SecondPassIsComplete(MethodDesc* pMD, StackFrame sfResumeStackFrame)
1677 EH_LOG((LL_INFO100, " second pass unwind completed\n"));
1679 m_pMethodDescOfCatcher = pMD;
1680 m_sfResumeStackFrame = sfResumeStackFrame;
1683 CLRUnwindStatus ExceptionTracker::ProcessOSExceptionNotification(
1684 PEXCEPTION_RECORD pExceptionRecord,
1685 PCONTEXT pContextRecord,
1686 PDISPATCHER_CONTEXT pDispatcherContext,
1687 DWORD dwExceptionFlags,
1690 StackTraceState STState
1691 #ifdef USE_PER_FRAME_PINVOKE_INIT
1692 , PVOID pICFSetAsLimitFrame
1693 #endif // USE_PER_FRAME_PINVOKE_INIT
1704 CLRUnwindStatus status = UnwindPending;
1706 CrawlFrame cfThisFrame;
1708 UINT_PTR uMethodStartPC;
1711 DWORD_PTR ControlPc = pDispatcherContext->ControlPc;
1713 ExceptionTracker::InitializeCrawlFrame(&cfThisFrame, pThread, sf, ®disp, pDispatcherContext, ControlPc, &uMethodStartPC, this);
1715 #ifndef ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
1716 uCallerSP = EECodeManager::GetCallerSp(cfThisFrame.pRD);
1717 #else // !ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
1719 #endif // ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
1721 EH_LOG((LL_INFO100, "ProcessCrawlFrame: PSP: " FMT_ADDR " EstablisherFrame: " FMT_ADDR "\n", DBG_ADDR(uCallerSP), DBG_ADDR(sf.SP)));
1723 bool fIsFirstPass = !(dwExceptionFlags & EXCEPTION_UNWINDING);
1724 bool fTargetUnwind = !!(dwExceptionFlags & EXCEPTION_TARGET_UNWIND);
1726 // If a thread abort was raised after a catch block's execution, we would have saved
1727 // the index and EstablisherFrame of the EH clause corresponding to the handler that executed.
1728 // Fetch that locally and reset the state against the thread if we are in the unwind pass.
1730 // It should be kept in mind that by the virtue of copying the information below, we will
1731 // have it available for the first frame seen during the unwind pass (which will be the
1732 // frame where ThreadAbort was raised after the catch block) for us to skip any termination
1733 // handlers that may be present prior to the EH clause whose index we saved.
1734 DWORD dwTACatchHandlerClauseIndex = pThread->m_dwIndexClauseForCatch;
1735 StackFrame sfEstablisherOfActualHandlerFrame = pThread->m_sfEstablisherOfActualHandlerFrame;
1738 pThread->m_dwIndexClauseForCatch = 0;
1739 pThread->m_sfEstablisherOfActualHandlerFrame.Clear();
1742 bool fProcessThisFrame = false;
1743 bool fCrawlFrameIsDirty = false;
1745 // <GC_FUNCLET_REFERENCE_REPORTING>
1747 // Refer to the detailed comment in ExceptionTracker::ProcessManagedCallFrame for more context.
1748 // In summary, if we have reached the target of the unwind, then we need to fix CallerSP (for
1749 // GC reference reporting) if we have been asked to.
1751 // This will be done only when we reach the frame that is handling the exception.
1753 // </GC_FUNCLET_REFERENCE_REPORTING>
1754 if (fTargetUnwind && (m_fFixupCallerSPForGCReporting == true))
1756 m_fFixupCallerSPForGCReporting = false;
1757 this->m_EnclosingClauseInfoForGCReporting.SetEnclosingClauseCallerSP(uCallerSP);
1760 #ifdef USE_PER_FRAME_PINVOKE_INIT
1761 // Refer to detailed comment below.
1762 PTR_Frame pICFForUnwindTarget = NULL;
1763 #endif // USE_PER_FRAME_PINVOKE_INIT
1765 CheckForRudeAbort(pThread, fIsFirstPass);
1767 bool fIsFrameLess = cfThisFrame.IsFrameless();
1768 GSCookie* pGSCookie = NULL;
1769 bool fSetLastUnwoundEstablisherFrame = false;
1772 // process any frame since the last frame we've seen
1775 GCX_COOP_THREAD_EXISTS(pThread);
1777 // UpdateScannedStackRange needs to be invoked in COOP mode since
1778 // the stack range can also be accessed during GC stackwalk.
1779 fProcessThisFrame = UpdateScannedStackRange(sf, fIsFirstPass);
1781 MethodDesc *pMD = cfThisFrame.GetFunction();
1783 Frame* pFrame = GetLimitFrame(); // next frame to process
1784 if (pFrame != FRAME_TOP)
1786 // The following function call sets the GS cookie pointers and checks the cookie.
1787 cfThisFrame.SetCurGSCookie(Frame::SafeGetGSCookiePtr(pFrame));
1790 while (((UINT_PTR)pFrame) < uCallerSP)
1792 #ifdef USE_PER_FRAME_PINVOKE_INIT
1793 // InlinedCallFrames (ICF) are allocated, initialized and linked to the Frame chain
1794 // by the code generated by the JIT for a method containing a PInvoke.
1796 // On X64, JIT generates code to dynamically link and unlink the ICF around
1797 // each PInvoke call. On ARM, on the other hand, JIT's codegen, in context of ICF,
1798 // is more inline with X86 and thus, it links in the ICF at the start of the method
1799 // and unlinks it towards the method end. Thus, ICF is present on the Frame chain
1800 // at any given point so long as the method containing the PInvoke is on the stack.
1802 // Now, if the method containing ICF catches an exception, we will reset the Frame chain
1803 // with the LimitFrame, that is computed below, after the catch handler returns. Since this
1804 // computation is done relative to the CallerSP (on both X64 and ARM), we will end up
1805 // removing the ICF from the Frame chain as that will always be below (stack growing down)
1806 // the CallerSP since it lives in the stack space of the current managed frame.
1808 // As a result, if there is another PInvoke call after the catch block, it will expect
1809 // the ICF to be present and without one, execution will go south.
1811 // To account for this ICF codegen difference, in the EH system we check if the current
1812 // Frame is an ICF or not. If it is and lies inside the current managed method, we
1813 // keep a reference to it and reset the LimitFrame to this saved reference before we
1814 // return back to invoke the catch handler.
1816 // Thus, if there is another PInvoke call post the catch handler, it will find ICF as expected.
1818 // This is based upon the following assumptions:
1820 // 1) There will be no other explicit Frame inserted above the ICF inside the
1821 // managed method containing ICF. That is, ICF is the top-most explicit frame
1822 // in the managed method (and thus, lies in the current managed frame).
1824 // 2) There is only one ICF per managed method containing one (or more) PInvoke(s).
1826 // 3) We only do this if the current frame is the one handling the exception. This is to
1827 // address the scenario of keeping any ICF from frames lower in the stack active.
1829 // 4) The ExceptionUnwind method of the ICF is a no-op. As noted above, we save a reference
1830 // to the ICF and yet continue to process the frame chain. During unwind, this implies
1831 // that we will end up invoking the ExceptionUnwind methods of all frames that lie
1832 // below the caller SP of the managed frame handling the exception. And since the handling
1833 // managed frame contains an ICF, it will be the topmost frame that will lie
1834 // below the callerSP for which we will invoke ExceptionUnwind.
1836 // Thus, ICF::ExceptionUnwind should not do anything significant. If any of these assumptions
1837 // break, then the next best thing will be to make the JIT link/unlink the frame dynamically.
1839 // If the current method executing is from precompiled ReadyToRun code, then the above is no longer
1840 // applicable because each PInvoke is wrapped by calls to the JIT_PInvokeBegin and JIT_PInvokeEnd
1841 // helpers, which push and pop the ICF to the current thread. Unlike jitted code, the ICF is not
1842 // linked during the method prolog, and unlinked at the epilog (it looks more like the X64 case).
1843 // In that case, we need to unlink the ICF during unwinding here.
1845 if (fTargetUnwind && (pFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr()))
1847 PTR_InlinedCallFrame pICF = (PTR_InlinedCallFrame)pFrame;
1848 // Does it live inside the current managed method? It will iff:
1850 // 1) ICF address is higher than the current frame's SP (which we get from DispatcherContext), AND
1851 // 2) ICF address is below callerSP.
1852 if ((GetSP(pDispatcherContext->ContextRecord) < (TADDR)pICF) &&
1853 ((UINT_PTR)pICF < uCallerSP))
1855 pICFForUnwindTarget = pFrame;
1857 // When unwinding an exception in ReadyToRun, the JIT_PInvokeEnd helper which unlinks the ICF from
1858 // the thread will be skipped. This is because unlike jitted code, each pinvoke is wrapped by calls
1859 // to the JIT_PInvokeBegin and JIT_PInvokeEnd helpers, which push and pop the ICF on the thread. The
1860 // ICF is not linked at the method prolog and unlined at the epilog when running R2R code. Since the
1861 // JIT_PInvokeEnd helper will be skipped, we need to unlink the ICF here. If the executing method
1862 // has another pinovoke, it will re-link the ICF again when the JIT_PInvokeBegin helper is called
1864 if (ExecutionManager::IsReadyToRunCode(((InlinedCallFrame*)pFrame)->m_pCallerReturnAddress))
1866 pICFForUnwindTarget = pICFForUnwindTarget->Next();
1870 #endif // USE_PER_FRAME_PINVOKE_INIT
1872 cfThisFrame.CheckGSCookies();
1874 if (fProcessThisFrame)
1876 ExceptionTracker::InitializeCrawlFrameForExplicitFrame(&cfThisFrame, pFrame, pMD);
1877 fCrawlFrameIsDirty = true;
1879 status = ProcessExplicitFrame(
1884 cfThisFrame.CheckGSCookies();
1890 // notify Frame of unwind
1892 pFrame->ExceptionUnwind();
1894 // If we have not yet set the initial explicit frame processed by this tracker, then
1896 if (m_pInitialExplicitFrame == NULL)
1898 m_pInitialExplicitFrame = pFrame;
1902 pFrame = pFrame->Next();
1903 m_pLimitFrame = pFrame;
1905 if (UnwindPending != status)
1911 if (fCrawlFrameIsDirty)
1913 // If crawlframe is dirty, it implies that it got modified as part of explicit frame processing. Thus, we shall
1914 // reinitialize it here.
1915 ExceptionTracker::InitializeCrawlFrame(&cfThisFrame, pThread, sf, ®disp, pDispatcherContext, ControlPc, &uMethodStartPC, this);
1920 pGSCookie = (GSCookie*)cfThisFrame.GetCodeManager()->GetGSCookieAddr(cfThisFrame.pRD,
1921 &cfThisFrame.codeInfo,
1922 &cfThisFrame.codeManState);
1925 // The following function call sets the GS cookie pointers and checks the cookie.
1926 cfThisFrame.SetCurGSCookie(pGSCookie);
1929 status = HandleFunclets(&fProcessThisFrame, fIsFirstPass,
1930 cfThisFrame.GetFunction(), cfThisFrame.IsFunclet(), sf);
1933 if ((!fIsFirstPass) && (!fProcessThisFrame))
1935 // If we are unwinding and not processing the current frame, it implies that
1936 // this frame has been unwound for one of the following reasons:
1938 // 1) We have already seen it due to nested exception processing, OR
1939 // 2) We are skipping frames to find a funclet's parent and thus, its been already
1942 // If the current frame is NOT the target of unwind, update the last unwound
1943 // establisher frame. We don't do this for "target of unwind" since it has the catch handler, for a
1944 // duplicate EH clause reported in the funclet, that needs to be invoked and thus, may have valid
1945 // references to report for GC reporting.
1947 // If we are not skipping the managed frame, then LastUnwoundEstablisherFrame will be updated later in this method,
1948 // just before we return back to our caller.
1951 SetLastUnwoundEstablisherFrame(sf);
1952 fSetLastUnwoundEstablisherFrame = true;
1956 // GCX_COOP_THREAD_EXISTS ends here and we may switch to preemp mode now (if applicable).
1960 // now process managed call frame if needed
1964 if (fProcessThisFrame)
1966 status = ProcessManagedCallFrame(
1969 StackFrame::FromEstablisherFrame(pDispatcherContext->EstablisherFrame),
1974 dwTACatchHandlerClauseIndex,
1975 sfEstablisherOfActualHandlerFrame);
1979 cfThisFrame.CheckGSCookies();
1983 if (fTargetUnwind && (UnwindPending == status))
1985 SecondPassIsComplete(cfThisFrame.GetFunction(), sf);
1986 status = SecondPassComplete;
1992 // If we are unwinding and have returned successfully from unwinding the frame, then mark it as the last unwound frame for the current
1993 // exception. We don't do this if the frame is target of unwind (i.e. handling the exception) since catch block invocation may have references to be
1994 // reported (if a GC happens during catch block invocation).
1996 // If an exception escapes out of a funclet (this is only possible for fault/finally/catch clauses), then we will not return here.
1997 // Since this implies that the funclet no longer has any valid references to report, we will need to set the LastUnwoundEstablisherFrame
1998 // close to the point we detect the exception has escaped the funclet. This is done in ExceptionTracker::CallHandler and marks the
1999 // frame that invoked (and thus, contained) the funclet as the LastUnwoundEstablisherFrame.
2001 // Note: Do no add any GC triggering code between the return from ProcessManagedCallFrame and setting of the LastUnwoundEstablisherFrame
2002 if ((!fIsFirstPass) && (!fTargetUnwind) && (!fSetLastUnwoundEstablisherFrame))
2005 SetLastUnwoundEstablisherFrame(sf);
2008 if (FirstPassComplete == status)
2010 FirstPassIsComplete();
2013 if (fTargetUnwind && (status == SecondPassComplete))
2015 #ifdef USE_PER_FRAME_PINVOKE_INIT
2016 // If we have got a ICF to set as the LimitFrame, do that now.
2017 // The Frame chain is still intact and would be updated using
2018 // the LimitFrame (done after the catch handler returns).
2020 // NOTE: This should be done as the last thing before we return
2021 // back to invoke the catch handler.
2022 if (pICFForUnwindTarget != NULL)
2024 m_pLimitFrame = pICFForUnwindTarget;
2025 pICFSetAsLimitFrame = (PVOID)pICFForUnwindTarget;
2027 #endif // USE_PER_FRAME_PINVOKE_INIT
2029 // Since second pass is complete and we have reached
2030 // the frame containing the catch funclet, reset the enclosing
2031 // clause SP for the catch funclet, if applicable, to be the CallerSP of the
2034 // Refer to the detailed comment about this code
2035 // in ExceptionTracker::ProcessManagedCallFrame.
2036 if (m_fResetEnclosingClauseSPForCatchFunclet)
2038 #ifdef ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
2039 // DispatcherContext->EstablisherFrame's value
2040 // represents the CallerSP of the current frame.
2041 UINT_PTR EnclosingClauseCallerSP = (UINT_PTR)pDispatcherContext->EstablisherFrame;
2042 #else // ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
2043 // Extract the CallerSP from RegDisplay
2044 REGDISPLAY *pRD = cfThisFrame.GetRegisterSet();
2045 _ASSERTE(pRD->IsCallerContextValid || pRD->IsCallerSPValid);
2046 UINT_PTR EnclosingClauseCallerSP = (UINT_PTR)GetSP(pRD->pCallerContext);
2047 #endif // !ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
2048 m_EnclosingClauseInfo = EnclosingClauseInfo(false, cfThisFrame.GetRelOffset(), EnclosingClauseCallerSP);
2050 m_fResetEnclosingClauseSPForCatchFunclet = FALSE;
2053 // If we are unwinding and the exception was not caught in managed code and we have reached the
2054 // topmost frame we saw in the first pass, then reset thread abort state if this is the last managed
2055 // code personality routine on the stack.
2056 if ((fIsFirstPass == false) && (this->GetTopmostStackFrameFromFirstPass() == sf) && (GetCatchToCallPC() == NULL))
2058 ExceptionTracker::ResetThreadAbortStatus(pThread, &cfThisFrame, sf);
2062 // fill in the out parameter
2068 void ExceptionTracker::DebugLogTrackerRanges(__in_z const char *pszTag)
2079 Thread* pThread = GetThread();
2080 ExceptionTracker* pTracker = pThread ? pThread->GetExceptionState()->m_pCurrentTracker : NULL;
2086 EH_LOG((LL_INFO100, "%s:|%02d| %p: (%p %p) %s\n", pszTag, i, pTracker, pTracker->m_ScannedStackRange.GetLowerBound().SP, pTracker->m_ScannedStackRange.GetUpperBound().SP,
2087 pTracker->IsInFirstPass() ? "1st pass" : "2nd pass"
2089 pTracker = pTracker->m_pPrevNestedInfo;
2096 bool ExceptionTracker::HandleNestedExceptionEscape(StackFrame sf, bool fIsFirstPass)
2100 // Since this function can modify the scanned stack range, which is also accessed during the GC stackwalk,
2101 // we invoke it in COOP mode so that that access to the range is synchronized.
2108 bool fResult = false;
2110 DebugLogTrackerRanges(" A");
2112 ExceptionTracker* pPreviousTracker = m_pPrevNestedInfo;
2114 while (pPreviousTracker && pPreviousTracker->m_ScannedStackRange.IsSupersededBy(sf))
2117 // If the previous tracker (representing exception E1 and whose scanned stack range is superseded by the current frame)
2118 // is in the first pass AND current tracker (representing exceptio E2) has not seen the current frame AND we are here,
2119 // it implies that we had a nested exception while the previous tracker was in the first pass.
2121 // This can happen in the following scenarios:
2123 // 1) An exception escapes a managed filter (which are invoked in the first pass). However,
2124 // that is not possible since any exception escaping them is swallowed by the runtime.
2125 // If someone does longjmp from within the filter, then that is illegal and unsupported.
2127 // 2) While processing an exception (E1), either us or native code caught it, triggering unwind. However, before the
2128 // first managed frame was processed for unwind, another native frame (below the first managed frame on the stack)
2129 // did a longjmp to go past us or raised another exception from one of their termination handlers.
2131 // Thus, we will never get a chance to switch our tracker for E1 to 2nd pass (which would be done when
2132 // ExceptionTracker::GetOrCreateTracker will be invoked for the first managed frame) since the longjmp, or the
2133 // new-exception would result in a new tracker being setup.
2135 // Below is an example of such a case that does longjmp
2136 // ----------------------------------------------------
2138 // NativeA (does setjmp) -> ManagedFunc -> NativeB
2141 // NativeB could be implemented as:
2143 // __try { // raise exception } __finally { longjmp(jmp1, 1); }
2145 // "jmp1" is the jmp_buf setup by NativeA by calling setjmp.
2147 // ManagedFunc could be implemented as:
2150 // try { NativeB(); }
2151 // finally { Console.WriteLine("Finally in ManagedFunc"); }
2153 // catch(Exception ex} { Console.WriteLine("Caught"); }
2156 // In case of nested exception, we combine the stack range (see below) since we have already seen those frames
2157 // in the specified pass for the previous tracker. However, in the example above, the current tracker (in 2nd pass)
2158 // has not see the frames which the previous tracker (which is in the first pass) has seen.
2160 // On a similar note, the __finally in the example above could also do a "throw 1;". In such a case, we would expect
2161 // that the catch in ManagedFunc would catch the exception (since "throw 1;" would be represented as SEHException in
2162 // the runtime). However, during first pass, when the exception enters ManagedFunc, the current tracker would not have
2163 // processed the ManagedFunc frame, while the previous tracker (for E1) would have. If we proceed to combine the stack
2164 // ranges, we will omit examining the catch clause in ManagedFunc.
2166 // Thus, we cannot combine the stack range yet and must let each frame, already scanned by the previous
2167 // tracker, be also processed by the current (longjmp) tracker if not already done.
2169 // Note: This is not a concern if the previous tracker (for exception E1) is in the second pass since any escaping exception (E2)
2170 // would come out of a finally/fault funclet and the runtime's funclet skipping logic will deal with it correctly.
2172 if (pPreviousTracker->IsInFirstPass() && (!this->m_ScannedStackRange.Contains(sf)))
2174 // Allow all stackframes seen by previous tracker to be seen by the current
2176 if (sf <= pPreviousTracker->m_ScannedStackRange.GetUpperBound())
2178 EH_LOG((LL_INFO100, " - not updating current tracker bounds for escaped exception since\n"));
2179 EH_LOG((LL_INFO100, " - active tracker (%p; %s) has not seen the current frame [", this, this->IsInFirstPass()?"FirstPass":"SecondPass"));
2180 EH_LOG((LL_INFO100, " - SP = %p", sf.SP));
2181 EH_LOG((LL_INFO100, "]\n"));
2182 EH_LOG((LL_INFO100, " - which the previous (%p) tracker has processed.\n", pPreviousTracker));
2187 EH_LOG((LL_INFO100, " nested exception ESCAPED\n"));
2188 EH_LOG((LL_INFO100, " - updating current tracker stack bounds\n"));
2189 m_ScannedStackRange.CombineWith(sf, &pPreviousTracker->m_ScannedStackRange);
2192 // Only the topmost tracker can be in the first pass.
2194 // (Except in the case where we have an exception thrown in a filter,
2195 // which should never escape the filter, and thus, will never supersede
2196 // the previous exception. This is why we cannot walk the entire list
2197 // of trackers to assert that they're all in the right mode.)
2199 // CONSISTENCY_CHECK(!pPreviousTracker->IsInFirstPass());
2201 // If our modes don't match, don't actually delete the supersceded exception.
2202 // If we did, we would lose valueable state on which frames have been scanned
2203 // on the second pass if an exception is thrown during the 2nd pass.
2205 // Advance the current tracker pointer now, since it may be deleted below.
2206 pPreviousTracker = pPreviousTracker->m_pPrevNestedInfo;
2211 // During unwind, at each frame we collapse exception trackers only once i.e. there cannot be multiple
2212 // exception trackers that are collapsed at each frame. Store the information of collapsed exception
2213 // tracker in current tracker to be able to find the parent frame when nested exception escapes.
2214 m_csfEHClauseOfCollapsedTracker = m_pPrevNestedInfo->m_EHClauseInfo.GetCallerStackFrameForEHClause();
2215 m_EnclosingClauseInfoOfCollapsedTracker = m_pPrevNestedInfo->m_EnclosingClauseInfoForGCReporting;
2217 EH_LOG((LL_INFO100, " - removing previous tracker\n"));
2219 ExceptionTracker* pTrackerToFree = m_pPrevNestedInfo;
2220 m_pPrevNestedInfo = pTrackerToFree->m_pPrevNestedInfo;
2222 #if defined(DEBUGGING_SUPPORTED)
2223 if (g_pDebugInterface != NULL)
2225 g_pDebugInterface->DeleteInterceptContext(pTrackerToFree->m_DebuggerExState.GetDebuggerInterceptContext());
2227 #endif // DEBUGGING_SUPPORTED
2229 CONSISTENCY_CHECK(pTrackerToFree->IsValid());
2230 FreeTrackerMemory(pTrackerToFree, memBoth);
2233 DebugLogTrackerRanges(" B");
2239 CLRUnwindStatus ExceptionTracker::ProcessExplicitFrame(
2240 CrawlFrame* pcfThisFrame,
2243 StackTraceState& STState
2251 PRECONDITION(!pcfThisFrame->IsFrameless());
2252 PRECONDITION(pcfThisFrame->GetFrame() != FRAME_TOP);
2256 Frame* pFrame = pcfThisFrame->GetFrame();
2258 EH_LOG((LL_INFO100, " [ ProcessExplicitFrame: pFrame: " FMT_ADDR " pMD: " FMT_ADDR " %s PASS ]\n", DBG_ADDR(pFrame), DBG_ADDR(pFrame->GetFunction()), fIsFirstPass ? "FIRST" : "SECOND"));
2260 if (FRAME_TOP == pFrame)
2265 if (!m_ExceptionFlags.UnwindingToFindResumeFrame())
2268 // update our exception stacktrace
2271 BOOL bReplaceStack = FALSE;
2272 BOOL bSkipLastElement = FALSE;
2274 if (STS_FirstRethrowFrame == STState)
2276 bSkipLastElement = TRUE;
2279 if (STS_NewException == STState)
2281 bReplaceStack = TRUE;
2284 // Normally, we need to notify the profiler in two cases:
2285 // 1) a brand new exception is thrown, and
2286 // 2) an exception is rethrown.
2287 // However, in this case, if the explicit frame doesn't correspond to a MD, we don't set STState to STS_Append,
2288 // so the next managed call frame we process will give another ExceptionThrown() callback to the profiler.
2289 // So we give the callback below, only in the case when we append to the stack trace.
2291 MethodDesc* pMD = pcfThisFrame->GetFunction();
2294 Thread* pThread = m_pThread;
2299 // notify profiler of new/rethrown exception
2301 if (bSkipLastElement || bReplaceStack)
2304 EEToProfilerExceptionInterfaceWrapper::ExceptionThrown(pThread);
2305 UpdatePerformanceMetrics(pcfThisFrame, bSkipLastElement, bReplaceStack);
2309 // Update stack trace
2311 m_StackTraceInfo.AppendElement(CanAllocateMemory(), NULL, sf.SP, pMD, pcfThisFrame);
2312 m_StackTraceInfo.SaveStackTrace(CanAllocateMemory(), m_hThrowable, bReplaceStack, bSkipLastElement);
2315 // make callback to debugger and/or profiler
2317 #if defined(DEBUGGING_SUPPORTED)
2318 if (ExceptionTracker::NotifyDebuggerOfStub(pThread, sf, pFrame))
2320 // Deliver the FirstChanceNotification after the debugger, if not already delivered.
2321 if (!this->DeliveredFirstChanceNotification())
2323 ExceptionNotifications::DeliverFirstChanceNotification();
2326 #endif // DEBUGGING_SUPPORTED
2328 STState = STS_Append;
2334 return UnwindPending;
2337 CLRUnwindStatus ExceptionTracker::HandleFunclets(bool* pfProcessThisFrame, bool fIsFirstPass,
2338 MethodDesc * pMD, bool fFunclet, StackFrame sf)
2348 BOOL fUnwindingToFindResumeFrame = m_ExceptionFlags.UnwindingToFindResumeFrame();
2351 // handle out-of-line finallys
2354 // In the second pass, we always want to execute this code.
2355 // In the first pass, we only execute this code if we are not unwinding to find the resume frame.
2356 // We do this to avoid calling the same filter more than once. Search for "UnwindingToFindResumeFrame"
2357 // to find a more elaborate comment in ProcessManagedCallFrame().
2359 // If we are in the first pass and we are unwinding to find the resume frame, then make sure the flag is cleared.
2360 if (fIsFirstPass && fUnwindingToFindResumeFrame)
2362 m_pSkipToParentFunctionMD = NULL;
2367 // this 'skip to parent function MD' code only seems to be needed
2368 // in the case where we call a finally funclet from the normal
2369 // execution codepath. Is there a better way to achieve the same
2370 // goal? Also, will recursion break us in any corner cases?
2371 // [ThrowInFinallyNestedInTryTest]
2372 // [GoryManagedPresentTest]
2376 // this was done for AMD64, but i don't understand why AMD64 needed the workaround..
2377 // (the workaround is the "double call on parent method" part.)
2381 // If we encounter a funclet, we need to skip all call frames up
2382 // to and including its parent method call frame. The reason
2383 // behind this is that a funclet is logically part of the parent
2384 // method has all the clauses that covered its logical location
2385 // in the parent covering its body.
2387 if (((UINT_PTR)m_pSkipToParentFunctionMD) & 1)
2389 EH_LOG((LL_INFO100, " IGNOREFRAME: SKIPTOPARENT: skipping to parent\n"));
2390 *pfProcessThisFrame = false;
2391 if ((((UINT_PTR)pMD) == (((UINT_PTR)m_pSkipToParentFunctionMD) & ~((UINT_PTR)1))) && !fFunclet)
2393 EH_LOG((LL_INFO100, " SKIPTOPARENT: found parent for funclet pMD = %p, sf.SP = %p, will stop skipping frames\n", pMD, sf.SP));
2394 _ASSERTE(0 == (((UINT_PTR)sf.SP) & 1));
2395 m_pSkipToParentFunctionMD = (MethodDesc*)sf.SP;
2397 _ASSERTE(!fUnwindingToFindResumeFrame);
2402 EH_LOG((LL_INFO100, " SKIPTOPARENT: found funclet pMD = %p, will start skipping frames\n", pMD));
2403 _ASSERTE(0 == (((UINT_PTR)pMD) & 1));
2404 m_pSkipToParentFunctionMD = (MethodDesc*)(((UINT_PTR)pMD) | 1);
2408 if (sf.SP == ((UINT_PTR)m_pSkipToParentFunctionMD))
2410 EH_LOG((LL_INFO100, " IGNOREFRAME: SKIPTOPARENT: got double call on parent method\n"));
2411 *pfProcessThisFrame = false;
2413 else if (m_pSkipToParentFunctionMD && (sf.SP > ((UINT_PTR)m_pSkipToParentFunctionMD)))
2415 EH_LOG((LL_INFO100, " SKIPTOPARENT: went past parent method\n"));
2416 m_pSkipToParentFunctionMD = NULL;
2421 return UnwindPending;
2424 CLRUnwindStatus ExceptionTracker::ProcessManagedCallFrame(
2425 CrawlFrame* pcfThisFrame,
2427 StackFrame sfEstablisherFrame,
2428 EXCEPTION_RECORD* pExceptionRecord,
2429 StackTraceState STState,
2430 UINT_PTR uMethodStartPC,
2431 DWORD dwExceptionFlags,
2432 DWORD dwTACatchHandlerClauseIndex,
2433 StackFrame sfEstablisherOfActualHandlerFrame
2441 PRECONDITION(pcfThisFrame->IsFrameless());
2445 UINT_PTR uControlPC = (UINT_PTR)GetControlPC(pcfThisFrame->GetRegisterSet());
2446 CLRUnwindStatus ReturnStatus = UnwindPending;
2448 MethodDesc* pMD = pcfThisFrame->GetFunction();
2450 bool fIsFirstPass = !(dwExceptionFlags & EXCEPTION_UNWINDING);
2451 bool fIsFunclet = pcfThisFrame->IsFunclet();
2453 CONSISTENCY_CHECK(IsValid());
2454 CONSISTENCY_CHECK(ThrowableIsValid() || !fIsFirstPass);
2455 CONSISTENCY_CHECK(pMD != 0);
2457 EH_LOG((LL_INFO100, " [ ProcessManagedCallFrame this=%p, %s PASS ]\n", this, (fIsFirstPass ? "FIRST" : "SECOND")));
2459 EH_LOG((LL_INFO100, " [ method: %s%s, %s ]\n",
2460 (fIsFunclet ? "FUNCLET of " : ""),
2461 pMD->m_pszDebugMethodName, pMD->m_pszDebugClassName));
2463 Thread *pThread = GetThread();
2466 INDEBUG( DumpClauses(pcfThisFrame->GetJitManager(), pcfThisFrame->GetMethodToken(), uMethodStartPC, uControlPC) );
2468 bool fIsILStub = pMD->IsILStub();
2469 bool fGiveDebuggerAndProfilerNotification = !fIsILStub;
2470 BOOL fUnwindingToFindResumeFrame = m_ExceptionFlags.UnwindingToFindResumeFrame();
2472 bool fIgnoreThisFrame = false;
2473 bool fProcessThisFrameToFindResumeFrameOnly = false;
2475 MethodDesc * pUserMDForILStub = NULL;
2476 Frame * pILStubFrame = NULL;
2477 if (fIsILStub && !fIsFunclet) // only make this callback on the main method body of IL stubs
2478 pUserMDForILStub = GetUserMethodForILStub(pThread, sf.SP, pMD, &pILStubFrame);
2480 #ifdef FEATURE_CORRUPTING_EXCEPTIONS
2481 BOOL fCanMethodHandleException = TRUE;
2482 CorruptionSeverity currentSeverity = NotCorrupting;
2484 // Switch to COOP mode since we are going to request throwable
2487 // We must defer to the MethodDesc of the user method instead of the IL stub
2488 // itself because the user can specify the policy on a per-method basis and
2489 // that won't be reflected via the IL stub's MethodDesc.
2490 MethodDesc * pMDWithCEAttribute = (pUserMDForILStub != NULL) ? pUserMDForILStub : pMD;
2492 // Check if the exception can be delivered to the method? It will check if the exception
2493 // is a CE or not. If it is, it will check if the method can process it or not.
2494 currentSeverity = pThread->GetExceptionState()->GetCurrentExceptionTracker()->GetCorruptionSeverity();
2495 fCanMethodHandleException = CEHelper::CanMethodHandleException(currentSeverity, pMDWithCEAttribute);
2497 #endif // FEATURE_CORRUPTING_EXCEPTIONS
2499 // Doing rude abort. Skip all non-constrained execution region code.
2500 // When rude abort is initiated, we cannot intercept any exceptions.
2501 if ((pThread->IsRudeAbortInitiated() && !pThread->IsWithinCer(pcfThisFrame)))
2503 // If we are unwinding to find the real resume frame, then we cannot ignore frames yet.
2504 // We need to make sure we find the correct resume frame before starting to ignore frames.
2505 if (fUnwindingToFindResumeFrame)
2507 fProcessThisFrameToFindResumeFrameOnly = true;
2511 EH_LOG((LL_INFO100, " IGNOREFRAME: rude abort/CE\n"));
2512 fIgnoreThisFrame = true;
2517 // BEGIN resume frame processing code
2519 // Often times, we'll run into the situation where the actual resume call frame
2520 // is not the same call frame that we see the catch clause in. The reason for this
2521 // is that catch clauses get duplicated down to cover funclet code ranges. When we
2522 // see a catch clause covering our control PC, but it is marked as a duplicate, we
2523 // need to continue to unwind until we find the same clause that isn't marked as a
2524 // duplicate. This will be the correct resume frame.
2526 // We actually achieve this skipping by observing that if we are catching at a
2527 // duplicated clause, all the call frames we should be skipping have already been
2528 // processed by a previous exception dispatch. So if we allow the unwind to
2529 // continue, we will immediately bump into the ExceptionTracker for the previous
2530 // dispatch, and our resume frame will be the last frame seen by that Tracker.
2532 // Note that we will have visited all the EH clauses for a particular method when we
2533 // see its first funclet (the funclet which is closest to the leaf). We need to make
2534 // sure we don't process any EH clause again when we see other funclets or the parent
2535 // method until we get to the real resume frame. The real resume frame may be another
2536 // funclet, which is why we can't blindly skip all funclets until we see the parent
2539 // If the exception is handled by the method, then UnwindingToFindResumeFrame takes
2540 // care of the skipping. We basically skip everything when we are unwinding to find
2541 // the resume frame. If the exception is not handled by the method, then we skip all the
2542 // funclets until we get to the parent method. The logic to handle this is in
2543 // HandleFunclets(). In the first pass, HandleFunclets() only kicks
2544 // in if we are not unwinding to find the resume frame.
2546 // Then on the second pass, we need to process frames up to the initial place where
2547 // we saw the catch clause, which means upto and including part of the resume stack
2548 // frame. Then we need to skip the call frames up to the real resume stack frame
2551 // In the second pass, we have the same problem with skipping funclets as in the first
2552 // pass. However, in this case, we know exactly which frame is our target unwind frame
2553 // (EXCEPTION_TARGET_UNWIND will be set). So we blindly unwind until we see the parent
2554 // method, or until the target unwind frame.
2555 PTR_EXCEPTION_CLAUSE_TOKEN pLimitClauseToken = NULL;
2556 if (!fIgnoreThisFrame && !fIsFirstPass && !m_sfResumeStackFrame.IsNull() && (sf >= m_sfResumeStackFrame))
2558 EH_LOG((LL_INFO100, " RESUMEFRAME: sf is %p and m_sfResumeStackFrame: %p\n", sf.SP, m_sfResumeStackFrame.SP));
2559 EH_LOG((LL_INFO100, " RESUMEFRAME: %s initial resume frame: %p\n", (sf == m_sfResumeStackFrame) ? "REACHED" : "PASSED" , m_sfResumeStackFrame.SP));
2561 // process this frame to call handlers
2562 EH_LOG((LL_INFO100, " RESUMEFRAME: Found last frame to process finallys in, need to process only part of call frame\n"));
2563 EH_LOG((LL_INFO100, " RESUMEFRAME: Limit clause token: %p\n", m_pClauseForCatchToken));
2564 pLimitClauseToken = m_pClauseForCatchToken;
2566 // The limit clause is the same as the clause we're catching at. It is used
2567 // as the last clause we process in the "inital resume frame". Anything further
2568 // down the list of clauses is skipped along with all call frames up to the actual
2570 CONSISTENCY_CHECK_MSG(sf == m_sfResumeStackFrame, "Passed initial resume frame and fIgnoreThisFrame wasn't set!");
2573 // END resume frame code
2576 if (!fIgnoreThisFrame)
2578 BOOL fFoundHandler = FALSE;
2579 DWORD_PTR dwHandlerStartPC = NULL;
2581 BOOL bReplaceStack = FALSE;
2582 BOOL bSkipLastElement = FALSE;
2583 bool fUnwindFinished = false;
2585 if (STS_FirstRethrowFrame == STState)
2587 bSkipLastElement = TRUE;
2590 if (STS_NewException == STState)
2592 bReplaceStack = TRUE;
2595 // We need to notify the profiler on the first pass in two cases:
2596 // 1) a brand new exception is thrown, and
2597 // 2) an exception is rethrown.
2598 if (fIsFirstPass && (bSkipLastElement || bReplaceStack))
2601 EEToProfilerExceptionInterfaceWrapper::ExceptionThrown(pThread);
2602 UpdatePerformanceMetrics(pcfThisFrame, bSkipLastElement, bReplaceStack);
2605 if (!fUnwindingToFindResumeFrame)
2608 // update our exception stacktrace, ignoring IL stubs
2610 if (fIsFirstPass && !pMD->IsILStub())
2614 m_StackTraceInfo.AppendElement(CanAllocateMemory(), uControlPC, sf.SP, pMD, pcfThisFrame);
2615 m_StackTraceInfo.SaveStackTrace(CanAllocateMemory(), m_hThrowable, bReplaceStack, bSkipLastElement);
2619 // make callback to debugger and/or profiler
2621 if (fGiveDebuggerAndProfilerNotification)
2625 EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionEnter(pMD);
2627 // Notfiy the debugger that we are on the first pass for a managed exception.
2628 // Note that this callback is made for every managed frame.
2629 EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedException(pThread, uControlPC, sf.SP);
2631 #if defined(DEBUGGING_SUPPORTED)
2632 _ASSERTE(this == pThread->GetExceptionState()->m_pCurrentTracker);
2634 // check if the current exception has been intercepted.
2635 if (m_ExceptionFlags.DebuggerInterceptInfo())
2637 // According to the x86 implementation, we don't need to call the ExceptionSearchFunctionLeave()
2638 // profiler callback.
2639 StackFrame sfInterceptStackFrame;
2640 m_DebuggerExState.GetDebuggerInterceptInfo(NULL, NULL,
2641 reinterpret_cast<PBYTE *>(&(sfInterceptStackFrame.SP)),
2644 // Save the target unwind frame just like we do when we find a catch clause.
2645 m_sfResumeStackFrame = sfInterceptStackFrame;
2646 ReturnStatus = FirstPassComplete;
2649 #endif // DEBUGGING_SUPPORTED
2651 // Attempt to deliver the first chance notification to the AD only *AFTER* the debugger
2652 // has done that, provided we have not already delivered it.
2653 if (!this->DeliveredFirstChanceNotification())
2655 ExceptionNotifications::DeliverFirstChanceNotification();
2660 #if defined(DEBUGGING_SUPPORTED)
2661 _ASSERTE(this == pThread->GetExceptionState()->m_pCurrentTracker);
2663 // check if the exception is intercepted.
2664 if (m_ExceptionFlags.DebuggerInterceptInfo())
2666 MethodDesc* pInterceptMD = NULL;
2667 StackFrame sfInterceptStackFrame;
2669 // check if we have reached the interception point yet
2670 m_DebuggerExState.GetDebuggerInterceptInfo(&pInterceptMD, NULL,
2671 reinterpret_cast<PBYTE *>(&(sfInterceptStackFrame.SP)),
2674 // If the exception has gone unhandled in the first pass, we wouldn't have a chance
2675 // to set the target unwind frame. Check for this case now.
2676 if (m_sfResumeStackFrame.IsNull())
2678 m_sfResumeStackFrame = sfInterceptStackFrame;
2680 _ASSERTE(m_sfResumeStackFrame == sfInterceptStackFrame);
2682 if ((pInterceptMD == pMD) &&
2683 (sfInterceptStackFrame == sf))
2685 // If we have reached the stack frame at which the exception is intercepted,
2686 // then finish the second pass prematurely.
2687 SecondPassIsComplete(pMD, sf);
2688 ReturnStatus = SecondPassComplete;
2692 #endif // DEBUGGING_SUPPORTED
2694 // According to the x86 implementation, we don't need to call the ExceptionUnwindFunctionEnter()
2695 // profiler callback when an exception is intercepted.
2696 EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionEnter(pMD);
2703 IJitManager* pJitMan = pcfThisFrame->GetJitManager();
2704 const METHODTOKEN& MethToken = pcfThisFrame->GetMethodToken();
2706 EH_CLAUSE_ENUMERATOR EnumState;
2709 #ifdef FEATURE_CORRUPTING_EXCEPTIONS
2710 // The method cannot handle the exception (e.g. cannot handle the CE), then simply bail out
2711 // without examining the EH clauses in it.
2712 if (!fCanMethodHandleException)
2714 LOG((LF_EH, LL_INFO100, "ProcessManagedCallFrame - CEHelper decided not to look for exception handlers in the method(MD:%p).\n", pMD));
2716 // Set the flag to skip this frame since the CE cannot be delivered
2717 _ASSERTE(currentSeverity == ProcessCorrupting);
2719 // Force EHClause count to be zero
2723 #endif // FEATURE_CORRUPTING_EXCEPTIONS
2725 EHCount = pJitMan->InitializeEHEnumeration(MethToken, &EnumState);
2731 // For a method that may have nested funclets, it is possible that a reference may be
2732 // dead at the point where control flow left the method but may become active once
2733 // a funclet is executed.
2735 // Upon returning from the funclet but before the next funclet is invoked, a GC
2736 // may happen if we are in preemptive mode. Since the GC stackwalk will commence
2737 // at the original IP at which control left the method, it can result in the reference
2738 // not being updated (since it was dead at the point control left the method) if the object
2739 // is moved during GC.
2741 // To address this, we will indefinitely switch to COOP mode while enumerating, and invoking,
2744 // This switch is also required for another scenario: we may be in unwind phase and the current frame
2745 // may not have any termination handlers to be invoked (i.e. it may have zero EH clauses applicable to
2746 // the unwind phase). If we do not switch to COOP mode for such a frame, we could remain in preemp mode.
2747 // Upon returning back from ProcessOSExceptionNotification in ProcessCLRException, when we attempt to
2748 // switch to COOP mode to update the LastUnwoundEstablisherFrame, we could get blocked due to an
2749 // active GC, prior to peforming the update.
2751 // In this case, if the GC stackwalk encounters the current frame and attempts to check if it has been
2752 // unwound by an exception, then while it has been unwound (especially since it had no termination handlers)
2753 // logically, it will not figure out as unwound and thus, GC stackwalk would attempt to report references from
2754 // it, which is incorrect.
2756 // Thus, when unwinding, we will always switch to COOP mode indefinitely, irrespective of whether
2757 // the frame has EH clauses to be processed or not.
2760 // We will also forbid any GC to happen between successive funclet invocations.
2761 // This will be automatically undone when the contract goes off the stack as the method
2762 // returns back to its caller.
2766 for (unsigned i = 0; i < EHCount; i++)
2768 EE_ILEXCEPTION_CLAUSE EHClause;
2769 PTR_EXCEPTION_CLAUSE_TOKEN pEHClauseToken = pJitMan->GetNextEHClause(&EnumState, &EHClause);
2771 EH_LOG((LL_INFO100, " considering %s clause [%x,%x), ControlPc is %s clause (offset %x)",
2772 (IsFault(&EHClause) ? "fault" :
2773 (IsFinally(&EHClause) ? "finally" :
2774 (IsFilterHandler(&EHClause) ? "filter" :
2775 (IsTypedHandler(&EHClause) ? "typed" : "unknown")))),
2776 EHClause.TryStartPC,
2778 (ClauseCoversPC(&EHClause, pcfThisFrame->GetRelOffset()) ? "inside" : "outside"),
2779 pcfThisFrame->GetRelOffset()
2782 LOG((LF_EH, LL_INFO100, "\n"));
2784 // If we have a valid EstablisherFrame for the managed frame where
2785 // ThreadAbort was raised after the catch block, then see if we
2786 // have reached that frame during the exception dispatch. If we
2787 // have, then proceed to skip applicable EH clauses.
2788 if ((!sfEstablisherOfActualHandlerFrame.IsNull()) && (sfEstablisherFrame == sfEstablisherOfActualHandlerFrame))
2790 // We should have a valid index of the EH clause (corresponding to a catch block) after
2791 // which thread abort was raised?
2792 _ASSERTE(dwTACatchHandlerClauseIndex > 0);
2794 // Since we have the index, check if the current EH clause index
2795 // is less then saved index. If it is, then it implies that
2796 // we are evaluating clauses that lie "before" the EH clause
2797 // for the catch block "after" which thread abort was raised.
2799 // Since ThreadAbort has to make forward progress, we will
2800 // skip evaluating any such EH clauses. Two things can happen:
2802 // 1) We will find clauses representing handlers beyond the
2803 // catch block after which ThreadAbort was raised. Since this is
2804 // what we want, we evaluate them.
2806 // 2) There wont be any more clauses implying that the catch block
2807 // after which the exception was raised was the outermost
2808 // handler in the method. Thus, the exception will escape out,
2809 // which is semantically the correct thing to happen.
2811 // The premise of this check is based upon a JIT compiler's implementation
2812 // detail: when it generates EH clauses, JIT compiler will order them from
2813 // top->bottom (when reading a method) and inside->out when reading nested
2816 // This assumption is not new since the basic EH type-matching is reliant
2817 // on this very assumption. However, now we have one more candidate that
2818 // gets to rely on it.
2820 // Eventually, this enables forward progress of thread abort exception.
2821 if (i <= (dwTACatchHandlerClauseIndex -1))
2823 EH_LOG((LL_INFO100, " skipping the evaluation of EH clause (index=%d) since we cannot process an exception in a handler\n", i));
2824 EH_LOG((LL_INFO100, " that exists prior to the one (index=%d) after which ThreadAbort was [re]raised.\n", dwTACatchHandlerClauseIndex));
2831 // see comment above where we set pLimitClauseToken
2832 if (pEHClauseToken == pLimitClauseToken)
2834 EH_LOG((LL_INFO100, " found limit clause, stopping clause enumeration\n"));
2836 // <GC_FUNCLET_REFERENCE_REPORTING>
2838 // If we are here, the exception has been identified to be handled by a duplicate catch clause
2839 // that is protecting the current funclet. The call to SetEnclosingClauseInfo (below)
2840 // will setup the CallerSP (for GC reference reporting) to be the SP of the
2841 // of the caller of current funclet (where the exception has happened, or is escaping from).
2843 // However, we need the CallerSP to be set as the SP of the caller of the
2844 // actual frame that will contain (and invoke) the catch handler corresponding to
2845 // the duplicate clause. But that isn't available right now and we can only know
2846 // once we unwind upstack to reach the target frame.
2848 // Thus, upon reaching the target frame and before invoking the catch handler,
2849 // we will fix up the CallerSP (for GC reporting) to be that of the caller of the
2850 // target frame that will be invoking the actual catch handler.
2852 // </GC_FUNCLET_REFERENCE_REPORTING>
2854 // for catch clauses
2855 SetEnclosingClauseInfo(fIsFunclet,
2856 pcfThisFrame->GetRelOffset(),
2857 GetSP(pcfThisFrame->GetRegisterSet()->pCallerContext));
2858 fUnwindFinished = true;
2859 m_fFixupCallerSPForGCReporting = true;
2863 BOOL fTermHandler = IsFaultOrFinally(&EHClause);
2864 fFoundHandler = FALSE;
2866 if (( fIsFirstPass && fTermHandler) ||
2867 (!fIsFirstPass && !fTermHandler))
2872 if (ClauseCoversPC(&EHClause, pcfThisFrame->GetRelOffset()))
2874 EH_LOG((LL_INFO100, " clause covers ControlPC\n"));
2876 dwHandlerStartPC = pJitMan->GetCodeAddressForRelOffset(MethToken, EHClause.HandlerStartPC);
2878 if (fUnwindingToFindResumeFrame)
2880 CONSISTENCY_CHECK(fIsFirstPass);
2883 // m_pClauseForCatchToken can only be NULL for continuable exceptions, but we should never
2884 // get here if we are handling continuable exceptions. fUnwindingToFindResumeFrame is
2885 // only true at the end of the first pass.
2886 _ASSERTE(m_pClauseForCatchToken != NULL);
2888 // handlers match and not duplicate?
2889 EH_LOG((LL_INFO100, " RESUMEFRAME: catch handler: [%x,%x], this handler: [%x,%x] %s\n",
2890 m_ClauseForCatch.HandlerStartPC,
2891 m_ClauseForCatch.HandlerEndPC,
2892 EHClause.HandlerStartPC,
2893 EHClause.HandlerEndPC,
2894 IsDuplicateClause(&EHClause) ? "[duplicate]" : ""));
2896 if ((m_ClauseForCatch.HandlerStartPC == EHClause.HandlerStartPC) &&
2897 (m_ClauseForCatch.HandlerEndPC == EHClause.HandlerEndPC))
2899 EH_LOG((LL_INFO100, " RESUMEFRAME: found clause with same handler as catch\n"));
2900 if (!IsDuplicateClause(&EHClause))
2902 CONSISTENCY_CHECK(fIsFirstPass);
2904 if (fProcessThisFrameToFindResumeFrameOnly)
2906 EH_LOG((LL_INFO100, " RESUMEFRAME: identified real resume frame, \
2907 but rude thread abort is initiated: %p\n", sf.SP));
2909 // We have found the real resume frame. However, rude thread abort
2910 // has been initiated. Thus, we need to continue the first pass
2911 // as if we have not found a handler yet. To do so, we need to
2912 // reset all the information we have saved when we find the handler.
2913 m_ExceptionFlags.ResetUnwindingToFindResumeFrame();
2915 m_uCatchToCallPC = NULL;
2916 m_pClauseForCatchToken = NULL;
2918 m_sfResumeStackFrame.Clear();
2919 ReturnStatus = UnwindPending;
2923 EH_LOG((LL_INFO100, " RESUMEFRAME: identified real resume frame: %p\n", sf.SP));
2925 // Save off the index and the EstablisherFrame of the EH clause of the non-duplicate handler
2926 // that decided to handle the exception. We may need it
2927 // if a ThreadAbort is raised after the catch block
2929 m_dwIndexClauseForCatch = i + 1;
2930 m_sfEstablisherOfActualHandlerFrame = sfEstablisherFrame;
2931 #ifndef ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
2932 m_sfCallerOfActualHandlerFrame = EECodeManager::GetCallerSp(pcfThisFrame->pRD);
2933 #else // !ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
2934 // On ARM & ARM64, the EstablisherFrame is the value of SP at the time a function was called and before it's prolog
2935 // executed. Effectively, it is the SP of the caller.
2936 m_sfCallerOfActualHandlerFrame = sfEstablisherFrame.SP;
2937 #endif // ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
2939 ReturnStatus = FirstPassComplete;
2946 else if (IsFilterHandler(&EHClause))
2948 DWORD_PTR dwResult = EXCEPTION_CONTINUE_SEARCH;
2949 DWORD_PTR dwFilterStartPC;
2951 dwFilterStartPC = pJitMan->GetCodeAddressForRelOffset(MethToken, EHClause.FilterOffset);
2953 EH_LOG((LL_INFO100, " calling filter\n"));
2955 // @todo : If user code throws a StackOveflowException and we have plenty of stack,
2956 // we probably don't want to be so strict in not calling handlers.
2957 if (! IsStackOverflowException())
2959 // Save the current EHClause Index and Establisher of the clause post which
2960 // ThreadAbort was raised. This is done an exception handled inside a filter
2961 // reset the state that was setup before the filter was invoked.
2963 // We dont have to do this for finally/fault clauses since they execute
2964 // in the second pass and by that time, we have already skipped the required
2965 // EH clauses in the applicable stackframe.
2966 DWORD dwPreFilterTACatchHandlerClauseIndex = dwTACatchHandlerClauseIndex;
2967 StackFrame sfPreFilterEstablisherOfActualHandlerFrame = sfEstablisherOfActualHandlerFrame;
2971 // We want to call filters even if the thread is aborting, so suppress abort
2972 // checks while the filter runs.
2973 ThreadPreventAsyncHolder preventAbort(TRUE);
2975 // for filter clauses
2976 SetEnclosingClauseInfo(fIsFunclet,
2977 pcfThisFrame->GetRelOffset(),
2978 GetSP(pcfThisFrame->GetRegisterSet()->pCallerContext));
2979 #ifdef USE_FUNCLET_CALL_HELPER
2980 // On ARM & ARM64, the OS passes us the CallerSP for the frame for which personality routine has been invoked.
2981 // Since IL filters are invoked in the first pass, we pass this CallerSP to the filter funclet which will
2982 // then lookup the actual frame pointer value using it since we dont have a frame pointer to pass to it
2985 // Assert our invariants (we had set them up in InitializeCrawlFrame):
2986 REGDISPLAY *pCurRegDisplay = pcfThisFrame->GetRegisterSet();
2988 CONTEXT *pContext = NULL;
2989 #ifndef USE_CURRENT_CONTEXT_IN_FILTER
2990 // 1) In first pass, we dont have a valid current context IP
2991 _ASSERTE(GetIP(pCurRegDisplay->pCurrentContext) == 0);
2992 pContext = pCurRegDisplay->pCallerContext;
2994 pContext = pCurRegDisplay->pCurrentContext;
2995 #endif // !USE_CURRENT_CONTEXT_IN_FILTER
2996 #ifdef USE_CALLER_SP_IN_FUNCLET
2997 // 2) Our caller context and caller SP are valid
2998 _ASSERTE(pCurRegDisplay->IsCallerContextValid && pCurRegDisplay->IsCallerSPValid);
2999 // 3) CallerSP is intact
3000 _ASSERTE(GetSP(pCurRegDisplay->pCallerContext) == GetRegdisplaySP(pCurRegDisplay));
3001 #endif // USE_CALLER_SP_IN_FUNCLET
3002 #endif // USE_FUNCLET_CALL_HELPER
3004 // CallHandler expects to be in COOP mode.
3006 dwResult = CallHandler(dwFilterStartPC, sf, &EHClause, pMD, Filter X86_ARG(pContext) ARM_ARG(pContext) ARM64_ARG(pContext));
3011 // We had an exception in filter invocation that remained unhandled.
3013 // Sync managed exception state, for the managed thread, based upon the active exception tracker.
3014 pThread->SyncManagedExceptionState(false);
3016 // we've returned from the filter abruptly, now out of managed code
3017 m_EHClauseInfo.SetManagedCodeEntered(FALSE);
3019 EH_LOG((LL_INFO100, " filter threw an exception\n"));
3022 EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFilterLeave();
3023 m_EHClauseInfo.ResetInfo();
3027 EX_END_CATCH(SwallowAllExceptions);
3029 // Reset the EH clause Index and Establisher of the TA reraise clause
3030 pThread->m_dwIndexClauseForCatch = dwPreFilterTACatchHandlerClauseIndex;
3031 pThread->m_sfEstablisherOfActualHandlerFrame = sfPreFilterEstablisherOfActualHandlerFrame;
3033 if (pThread->IsRudeAbortInitiated() && !pThread->IsWithinCer(pcfThisFrame))
3035 EH_LOG((LL_INFO100, " IGNOREFRAME: rude abort\n"));
3041 EH_LOG((LL_INFO100, " STACKOVERFLOW: filter not called due to lack of guard page\n"));
3045 if (EXCEPTION_EXECUTE_HANDLER == dwResult)
3047 fFoundHandler = TRUE;
3049 else if (EXCEPTION_CONTINUE_SEARCH != dwResult)
3052 // Behavior is undefined according to the spec. Let's not execute the handler.
3055 EH_LOG((LL_INFO100, " filter returned %s\n", (fFoundHandler ? "EXCEPTION_EXECUTE_HANDLER" : "EXCEPTION_CONTINUE_SEARCH")));
3057 else if (IsTypedHandler(&EHClause))
3061 TypeHandle thrownType = TypeHandle();
3062 OBJECTREF oThrowable = m_pThread->GetThrowable();
3063 if (oThrowable != NULL)
3065 oThrowable = PossiblyUnwrapThrowable(oThrowable, pcfThisFrame->GetAssembly());
3066 thrownType = oThrowable->GetTrueTypeHandle();
3069 if (!thrownType.IsNull())
3071 if (EHClause.ClassToken == mdTypeRefNil)
3073 // this is a catch(...)
3074 fFoundHandler = TRUE;
3081 typeHnd = pJitMan->ResolveEHClause(&EHClause, pcfThisFrame);
3083 EX_CATCH_EX(Exception)
3086 GET_EXCEPTION()->GetMessage(msg);
3087 msg.Insert(msg.Begin(), W("Cannot resolve EH clause:\n"));
3088 EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_FAILFAST, msg.GetUnicode());
3090 EX_END_CATCH(RethrowTransientExceptions);
3093 " clause type = %s\n",
3094 (!typeHnd.IsNull() ? typeHnd.GetMethodTable()->GetDebugClassName()
3095 : "<couldn't resolve>")));
3097 " thrown type = %s\n",
3098 thrownType.GetMethodTable()->GetDebugClassName()));
3100 fFoundHandler = !typeHnd.IsNull() && ExceptionIsOfRightType(typeHnd, thrownType);
3106 _ASSERTE(fTermHandler);
3107 fFoundHandler = TRUE;
3114 _ASSERTE(IsFilterHandler(&EHClause) || IsTypedHandler(&EHClause));
3116 EH_LOG((LL_INFO100, " found catch at 0x%p, sp = 0x%p\n", dwHandlerStartPC, sf.SP));
3117 m_uCatchToCallPC = dwHandlerStartPC;
3118 m_pClauseForCatchToken = pEHClauseToken;
3119 m_ClauseForCatch = EHClause;
3121 m_sfResumeStackFrame = sf;
3123 #if defined(DEBUGGING_SUPPORTED) || defined(PROFILING_SUPPORTED)
3125 // notify the debugger and profiler
3127 if (fGiveDebuggerAndProfilerNotification)
3129 EEToProfilerExceptionInterfaceWrapper::ExceptionSearchCatcherFound(pMD);
3135 // NotifyOfCHFFilter has two behaviors
3136 // * Notifify debugger, get interception info and unwind (function will not return)
3137 // In this case, m_sfResumeStackFrame is expected to be NULL or the frame of interception.
3138 // We NULL it out because we get the interception event after this point.
3139 // * Notifify debugger and return.
3140 // In this case the normal EH proceeds and we need to reset m_sfResumeStackFrame to the sf catch handler.
3141 // TODO: remove this call and try to report the IL catch handler in the IL stub itself.
3142 m_sfResumeStackFrame.Clear();
3143 EEToDebuggerExceptionInterfaceWrapper::NotifyOfCHFFilter((EXCEPTION_POINTERS*)&m_ptrs, pILStubFrame);
3144 m_sfResumeStackFrame = sf;
3148 // We don't need to do anything special for continuable exceptions after calling
3149 // this callback. We are going to start unwinding anyway.
3150 EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedExceptionCatcherFound(pThread, pMD, (TADDR) uMethodStartPC, sf.SP,
3154 // If the exception is intercepted, then the target unwind frame may not be the
3155 // stack frame we are currently processing, so clear it now. We'll set it
3156 // later in second pass.
3157 if (pThread->GetExceptionState()->GetFlags()->DebuggerInterceptInfo())
3159 m_sfResumeStackFrame.Clear();
3161 #endif //defined(DEBUGGING_SUPPORTED) || defined(PROFILING_SUPPORTED)
3164 // BEGIN resume frame code
3166 EH_LOG((LL_INFO100, " RESUMEFRAME: initial resume stack frame: %p\n", sf.SP));
3168 if (IsDuplicateClause(&EHClause))
3170 EH_LOG((LL_INFO100, " RESUMEFRAME: need to unwind to find real resume frame\n"));
3171 m_ExceptionFlags.SetUnwindingToFindResumeFrame();
3173 // This is a duplicate catch funclet. As a result, we will continue to let the
3174 // exception dispatch proceed upstack to find the actual frame where the
3177 // At the same time, we also need to save the CallerSP of the frame containing
3178 // the catch funclet (like we do for other funclets). If the current frame
3179 // represents a funclet that was invoked by JITted code, then we will save
3180 // the caller SP of the current frame when we see it during the 2nd pass -
3181 // refer to the use of "pLimitClauseToken" in the code above.
3183 // However, that is not the callerSP of the frame containing the catch funclet
3184 // as the actual frame containing the funclet (and where it will be executed)
3185 // is the one that will be the target of unwind during the first pass.
3187 // To correctly get that, we will determine if the current frame is a funclet
3188 // and if it was invoked from JITted code. If this is true, then current frame
3189 // represents a finally funclet invoked non-exceptionally (from its parent frame
3190 // or yet another funclet). In such a case, we will set a flag indicating that
3191 // we need to reset the enclosing clause SP for the catch funclet and later,
3192 // when 2nd pass reaches the actual frame containing the catch funclet to be
3193 // executed, we will update the enclosing clause SP if the
3194 // "m_fResetEnclosingClauseSPForCatchFunclet" flag is set, just prior to
3195 // invoking the catch funclet.
3198 REGDISPLAY* pCurRegDisplay = pcfThisFrame->GetRegisterSet();
3199 _ASSERTE(pCurRegDisplay->IsCallerContextValid);
3200 TADDR adrReturnAddressFromFunclet = PCODEToPINSTR(GetIP(pCurRegDisplay->pCallerContext)) - STACKWALK_CONTROLPC_ADJUST_OFFSET;
3201 m_fResetEnclosingClauseSPForCatchFunclet = ExecutionManager::IsManagedCode(adrReturnAddressFromFunclet);
3204 ReturnStatus = UnwindPending;
3208 EH_LOG((LL_INFO100, " RESUMEFRAME: no extra unwinding required, real resume frame: %p\n", sf.SP));
3210 // Save off the index and the EstablisherFrame of the EH clause of the non-duplicate handler
3211 // that decided to handle the exception. We may need it
3212 // if a ThreadAbort is raised after the catch block
3214 m_dwIndexClauseForCatch = i + 1;
3215 m_sfEstablisherOfActualHandlerFrame = sfEstablisherFrame;
3217 #ifndef ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
3218 m_sfCallerOfActualHandlerFrame = EECodeManager::GetCallerSp(pcfThisFrame->pRD);
3219 #else // !ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
3220 m_sfCallerOfActualHandlerFrame = sfEstablisherFrame.SP;
3221 #endif // ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP
3223 // END resume frame code
3226 ReturnStatus = FirstPassComplete;
3231 EH_LOG((LL_INFO100, " found finally/fault at 0x%p\n", dwHandlerStartPC));
3232 _ASSERTE(fTermHandler);
3234 // @todo : If user code throws a StackOveflowException and we have plenty of stack,
3235 // we probably don't want to be so strict in not calling handlers.
3236 if (!IsStackOverflowException())
3240 // for finally clauses
3241 SetEnclosingClauseInfo(fIsFunclet,
3242 pcfThisFrame->GetRelOffset(),
3243 GetSP(pcfThisFrame->GetRegisterSet()->pCallerContext));
3245 // We have switched to indefinite COOP mode just before this loop started.
3246 // Since we also forbid GC during second pass, disable it now since
3247 // invocation of managed code can result in a GC.
3249 dwStatus = CallHandler(dwHandlerStartPC, sf, &EHClause, pMD, FaultFinally X86_ARG(pcfThisFrame->GetRegisterSet()->pCurrentContext) ARM_ARG(pcfThisFrame->GetRegisterSet()->pCurrentContext) ARM64_ARG(pcfThisFrame->GetRegisterSet()->pCurrentContext));
3251 // Once we return from a funclet, forbid GC again (refer to comment before start of the loop for details)
3256 EH_LOG((LL_INFO100, " STACKOVERFLOW: finally not called due to lack of guard page\n"));
3261 // will continue to find next fault/finally in this call frame
3264 } // if fFoundHandler
3265 } // if clause covers PC
3266 } // foreach eh clause
3267 } // if stack frame is far enough away from guard page
3270 // notify the profiler
3272 if (fGiveDebuggerAndProfilerNotification)
3276 if (!fUnwindingToFindResumeFrame)
3278 EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pMD);
3283 if (!fUnwindFinished)
3285 EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionLeave(pMD);
3289 } // fIgnoreThisFrame
3292 return ReturnStatus;
3295 #undef OPTIONAL_SO_CLEANUP_UNWIND
3297 #define OPTIONAL_SO_CLEANUP_UNWIND(pThread, pFrame) if (pThread->GetFrame() < pFrame) { UnwindFrameChain(pThread, pFrame); }
3299 typedef DWORD_PTR (HandlerFn)(UINT_PTR uStackFrame, Object* pExceptionObj);
3301 #ifdef USE_FUNCLET_CALL_HELPER
3302 // This is an assembly helper that enables us to call into EH funclets.
3303 EXTERN_C DWORD_PTR STDCALL CallEHFunclet(Object *pThrowable, UINT_PTR pFuncletToInvoke, UINT_PTR *pFirstNonVolReg, UINT_PTR *pFuncletCallerSP);
3305 // This is an assembly helper that enables us to call into EH filter funclets.
3306 EXTERN_C DWORD_PTR STDCALL CallEHFilterFunclet(Object *pThrowable, TADDR CallerSP, UINT_PTR pFuncletToInvoke, UINT_PTR *pFuncletCallerSP);
3308 static inline UINT_PTR CastHandlerFn(HandlerFn *pfnHandler)
3311 return DataPointerToThumbCode<UINT_PTR, HandlerFn *>(pfnHandler);
3313 return (UINT_PTR)pfnHandler;
3317 static inline UINT_PTR *GetFirstNonVolatileRegisterAddress(PCONTEXT pContextRecord)
3319 #if defined(_TARGET_ARM_)
3320 return (UINT_PTR*)&(pContextRecord->R4);
3321 #elif defined(_TARGET_ARM64_)
3322 return (UINT_PTR*)&(pContextRecord->X19);
3323 #elif defined(_TARGET_X86_)
3324 return (UINT_PTR*)&(pContextRecord->Edi);
3326 PORTABILITY_ASSERT("GetFirstNonVolatileRegisterAddress");
3331 static inline TADDR GetFrameRestoreBase(PCONTEXT pContextRecord)
3333 #if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
3334 return GetSP(pContextRecord);
3335 #elif defined(_TARGET_X86_)
3336 return pContextRecord->Ebp;
3338 PORTABILITY_ASSERT("GetFrameRestoreBase");
3343 #endif // USE_FUNCLET_CALL_HELPER
3345 DWORD_PTR ExceptionTracker::CallHandler(
3346 UINT_PTR uHandlerStartPC,
3348 EE_ILEXCEPTION_CLAUSE* pEHClause,
3350 EHFuncletType funcletType
3351 X86_ARG(PCONTEXT pContextRecord)
3352 ARM_ARG(PCONTEXT pContextRecord)
3353 ARM64_ARG(PCONTEXT pContextRecord)
3356 STATIC_CONTRACT_THROWS;
3357 STATIC_CONTRACT_GC_TRIGGERS;
3358 STATIC_CONTRACT_MODE_COOPERATIVE;
3360 DWORD_PTR dwResumePC;
3361 OBJECTREF throwable;
3362 HandlerFn* pfnHandler = (HandlerFn*)uHandlerStartPC;
3364 EH_LOG((LL_INFO100, " calling handler at 0x%p, sp = 0x%p\n", uHandlerStartPC, sf.SP));
3366 Thread* pThread = GetThread();
3368 // The first parameter specifies whether we want to make callbacks before (true) or after (false)
3369 // calling the handler.
3370 MakeCallbacksRelatedToHandler(true, pThread, pMD, pEHClause, uHandlerStartPC, sf);
3372 _ASSERTE(pThread->DetermineIfGuardPagePresent());
3374 throwable = PossiblyUnwrapThrowable(pThread->GetThrowable(), pMD->GetAssembly());
3376 // Stores the current SP and BSP, which will be the caller SP and BSP for the funclet.
3377 // Note that we are making the assumption here that the SP and BSP don't change from this point
3378 // forward until we actually make the call to the funclet. If it's not the case then we will need
3379 // some sort of assembly wrappers to help us out.
3380 CallerStackFrame csfFunclet = CallerStackFrame((UINT_PTR)GetCurrentSP());
3381 this->m_EHClauseInfo.SetManagedCodeEntered(TRUE);
3382 this->m_EHClauseInfo.SetCallerStackFrame(csfFunclet);
3386 case EHFuncletType::Filter:
3387 ETW::ExceptionLog::ExceptionFilterBegin(pMD, (PVOID)uHandlerStartPC);
3389 case EHFuncletType::FaultFinally:
3390 ETW::ExceptionLog::ExceptionFinallyBegin(pMD, (PVOID)uHandlerStartPC);
3392 case EHFuncletType::Catch:
3393 ETW::ExceptionLog::ExceptionCatchBegin(pMD, (PVOID)uHandlerStartPC);
3397 #ifdef USE_FUNCLET_CALL_HELPER
3398 // Invoke the funclet. We pass throwable only when invoking the catch block.
3399 // Since the actual caller of the funclet is the assembly helper, pass the reference
3400 // to the CallerStackFrame instance so that it can be updated.
3401 CallerStackFrame* pCallerStackFrame = this->m_EHClauseInfo.GetCallerStackFrameForEHClauseReference();
3402 UINT_PTR *pFuncletCallerSP = &(pCallerStackFrame->SP);
3403 if (funcletType != EHFuncletType::Filter)
3405 dwResumePC = CallEHFunclet((funcletType == EHFuncletType::Catch)?OBJECTREFToObject(throwable):(Object *)NULL,
3406 CastHandlerFn(pfnHandler),
3407 GetFirstNonVolatileRegisterAddress(pContextRecord),
3412 // For invoking IL filter funclet, we pass the CallerSP to the funclet using which
3413 // it will retrieve the framepointer for accessing the locals in the parent
3415 dwResumePC = CallEHFilterFunclet(OBJECTREFToObject(throwable),
3416 GetFrameRestoreBase(pContextRecord),
3417 CastHandlerFn(pfnHandler),
3420 #else // USE_FUNCLET_CALL_HELPER
3422 // Invoke the funclet.
3424 dwResumePC = pfnHandler(sf.SP, OBJECTREFToObject(throwable));
3425 #endif // !USE_FUNCLET_CALL_HELPER
3429 case EHFuncletType::Filter:
3430 ETW::ExceptionLog::ExceptionFilterEnd();
3432 case EHFuncletType::FaultFinally:
3433 ETW::ExceptionLog::ExceptionFinallyEnd();
3435 case EHFuncletType::Catch:
3436 ETW::ExceptionLog::ExceptionCatchEnd();
3437 ETW::ExceptionLog::ExceptionThrownEnd();
3441 this->m_EHClauseInfo.SetManagedCodeEntered(FALSE);
3443 // The first parameter specifies whether we want to make callbacks before (true) or after (false)
3444 // calling the handler.
3445 MakeCallbacksRelatedToHandler(false, pThread, pMD, pEHClause, uHandlerStartPC, sf);
3450 #undef OPTIONAL_SO_CLEANUP_UNWIND
3451 #define OPTIONAL_SO_CLEANUP_UNWIND(pThread, pFrame)
3455 // this must be done after the second pass has run, it does not
3456 // reference anything on the stack, so it is safe to run in an
3457 // SEH __except clause as well as a C++ catch clause.
3460 void ExceptionTracker::PopTrackers(
3472 StackFrame sf((UINT_PTR)pStackFrameSP);
3474 // Only call into PopTrackers if we have a managed thread and we have an exception progress.
3475 // Otherwise, the call below (to PopTrackers) is a noop. If this ever changes, then this short-circuit needs to be fixed.
3476 Thread *pCurThread = GetThread();
3477 if ((pCurThread != NULL) && (pCurThread->GetExceptionState()->IsExceptionInProgress()))
3479 // Refer to the comment around ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException
3480 // for details on the usage of this COOP switch.
3483 PopTrackers(sf, false);
3488 // during the second pass, an exception might escape out to
3489 // unmanaged code where it is swallowed (or potentially rethrown).
3490 // The current tracker is abandoned in this case, and if a rethrow
3491 // does happen in unmanaged code, this is unfortunately treated as
3492 // a brand new exception. This is unavoidable because if two
3493 // exceptions escape out to unmanaged code in this manner, a subsequent
3494 // rethrow cannot be disambiguated as corresponding to the nested vs.
3495 // the original exception.
3496 void ExceptionTracker::PopTrackerIfEscaping(
3508 Thread* pThread = GetThread();
3509 ThreadExceptionState* pExState = pThread->GetExceptionState();
3510 ExceptionTracker* pTracker = pExState->m_pCurrentTracker;
3511 CONSISTENCY_CHECK((NULL == pTracker) || pTracker->IsValid());
3513 // If we are resuming in managed code (albeit further up the stack) we will still need this
3514 // tracker. Otherwise we are either propagating into unmanaged code -- with the rethrow
3515 // issues mentioned above -- or we are going unhandled.
3517 // Note that we don't distinguish unmanaged code in the EE vs. unmanaged code outside the
3518 // EE. We could use the types of the Frames above us to make this distinction. Without
3519 // this, the technique of EX_TRY/EX_CATCH/EX_RETHROW inside the EE will lose its tracker
3520 // and have to rely on LastThrownObject in the rethrow. Along the same lines, unhandled
3521 // exceptions only have access to LastThrownObject.
3523 // There may not be a current tracker if, for instance, UMThunk has dispatched into managed
3524 // code via CallDescr. In that case, CallDescr may pop the tracker, leaving UMThunk with
3527 if (pTracker && pTracker->m_sfResumeStackFrame.IsNull())
3529 StackFrame sf((UINT_PTR)pStackPointer);
3530 StackFrame sfTopMostStackFrameFromFirstPass = pTracker->GetTopmostStackFrameFromFirstPass();
3532 // Refer to the comment around ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException
3533 // for details on the usage of this COOP switch.
3535 ExceptionTracker::PopTrackers(sf, true);
3541 void ExceptionTracker::PopTrackers(
3542 StackFrame sfResumeFrame,
3548 // Refer to the comment around ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException
3549 // for details on the mode being COOP here.
3556 Thread* pThread = GetThread();
3557 ExceptionTracker* pTracker = (pThread ? pThread->GetExceptionState()->m_pCurrentTracker : NULL);
3561 // This method is a no-op when there is no managed Thread object. We detect such a case and short circuit out in ExceptionTrackers::PopTrackers.
3562 // If this ever changes, then please revisit that method and fix it up appropriately.
3564 // If this tracker does not have valid stack ranges and it is in the first pass,
3565 // then we came here likely when the tracker was being setup
3566 // and an exception took place.
3568 // In such a case, we will not pop off the tracker
3569 if (pTracker && pTracker->m_ScannedStackRange.IsEmpty() && pTracker->IsInFirstPass())
3571 // skip any others with empty ranges...
3574 pTracker = pTracker->m_pPrevNestedInfo;
3576 while (pTracker && pTracker->m_ScannedStackRange.IsEmpty());
3578 // pTracker is now the first non-empty one, make sure it doesn't need popping
3579 // if it does, then someone let an exception propagate out of the exception dispatch code
3581 _ASSERTE(!pTracker || (pTracker->m_ScannedStackRange.GetUpperBound() > sfResumeFrame));
3585 #if defined(DEBUGGING_SUPPORTED)
3586 DWORD_PTR dwInterceptStackFrame = 0;
3588 // This method may be called on an unmanaged thread, in which case no interception can be done.
3591 ThreadExceptionState* pExState = pThread->GetExceptionState();
3593 // If the exception is intercepted, then pop trackers according to the stack frame at which
3594 // the exception is intercepted. We must retrieve the frame pointer before we start popping trackers.
3595 if (pExState->GetFlags()->DebuggerInterceptInfo())
3597 pExState->GetDebuggerState()->GetDebuggerInterceptInfo(NULL, NULL, (PBYTE*)&dwInterceptStackFrame,
3601 #endif // DEBUGGING_SUPPORTED
3606 // When we are about to pop off a tracker, it should
3607 // have a stack range setup.
3608 // It is not true on PAL where the scanned stack range needs to
3609 // be reset after unwinding a sequence of native frames.
3610 _ASSERTE(!pTracker->m_ScannedStackRange.IsEmpty());
3611 #endif // FEATURE_PAL
3613 ExceptionTracker* pPrev = pTracker->m_pPrevNestedInfo;
3616 // with new tracker collapsing code, we will only ever pop one of these at a time
3617 // at the end of the 2nd pass. However, CLRException::HandlerState::SetupCatch
3618 // still uses this function and we still need to revisit how it interacts with
3619 // ExceptionTrackers
3622 if ((fPopWhenEqual && (pTracker->m_ScannedStackRange.GetUpperBound() == sfResumeFrame)) ||
3623 (pTracker->m_ScannedStackRange.GetUpperBound() < sfResumeFrame))
3625 #if defined(DEBUGGING_SUPPORTED)
3626 if (g_pDebugInterface != NULL)
3628 if (pTracker->m_ScannedStackRange.GetUpperBound().SP < dwInterceptStackFrame)
3630 g_pDebugInterface->DeleteInterceptContext(pTracker->m_DebuggerExState.GetDebuggerInterceptContext());
3634 _ASSERTE(dwInterceptStackFrame == 0 ||
3635 ( dwInterceptStackFrame == sfResumeFrame.SP &&
3636 dwInterceptStackFrame == pTracker->m_ScannedStackRange.GetUpperBound().SP ));
3639 #endif // DEBUGGING_SUPPORTED
3641 ExceptionTracker* pTrackerToFree = pTracker;
3642 EH_LOG((LL_INFO100, "Unlinking ExceptionTracker object 0x%p, thread = 0x%p\n", pTrackerToFree, pTrackerToFree->m_pThread));
3643 CONSISTENCY_CHECK(pTracker->IsValid());
3646 // free managed tracker resources causing notification -- do this before unlinking the tracker
3647 // this is necessary so that we know an exception is still in flight while we give the notification
3648 FreeTrackerMemory(pTrackerToFree, memManaged);
3650 // unlink the tracker from the thread
3651 pThread->GetExceptionState()->m_pCurrentTracker = pTracker;
3652 CONSISTENCY_CHECK((NULL == pTracker) || pTracker->IsValid());
3654 // free unmanaged tracker resources
3655 FreeTrackerMemory(pTrackerToFree, memUnmanaged);
3666 ExceptionTracker* ExceptionTracker::GetOrCreateTracker(
3669 EXCEPTION_RECORD* pExceptionRecord,
3670 CONTEXT* pContextRecord,
3671 BOOL bAsynchronousThreadStop,
3673 StackTraceState* pStackTraceState
3676 CONTRACT(ExceptionTracker*)
3681 PRECONDITION(CheckPointer(pStackTraceState));
3682 POSTCONDITION(CheckPointer(RETVAL));
3686 Thread* pThread = GetThread();
3687 ThreadExceptionState* pExState = pThread->GetExceptionState();
3688 ExceptionTracker* pTracker = pExState->m_pCurrentTracker;
3689 CONSISTENCY_CHECK((NULL == pTracker) || (pTracker->IsValid()));
3691 bool fCreateNewTracker = false;
3692 bool fIsRethrow = false;
3693 bool fTransitionFromSecondToFirstPass = false;
3695 // Initialize the out parameter.
3696 *pStackTraceState = STS_Append;
3698 if (NULL != pTracker)
3700 fTransitionFromSecondToFirstPass = fIsFirstPass && !pTracker->IsInFirstPass();
3703 // We don't check this on PAL where the scanned stack range needs to
3704 // be reset after unwinding a sequence of native frames.
3705 CONSISTENCY_CHECK(!pTracker->m_ScannedStackRange.IsEmpty());
3706 #endif // FEATURE_PAL
3708 if (pTracker->m_ExceptionFlags.IsRethrown())
3710 EH_LOG((LL_INFO100, ">>continued processing of RETHROWN exception\n"));
3711 // this is the first time we've seen a rethrown exception, reuse the tracker and reset some state
3713 fCreateNewTracker = true;
3717 if ((pTracker->m_ptrs.ExceptionRecord != pExceptionRecord) && fIsFirstPass)
3719 EH_LOG((LL_INFO100, ">>NEW exception (exception records do not match)\n"));
3720 fCreateNewTracker = true;
3723 if (sf >= pTracker->m_ScannedStackRange.GetUpperBound())
3725 // We can't have a transition from 1st pass to 2nd pass in this case.
3726 _ASSERTE( ( sf == pTracker->m_ScannedStackRange.GetUpperBound() ) ||
3727 ( fIsFirstPass || !pTracker->IsInFirstPass() ) );
3729 if (fTransitionFromSecondToFirstPass)
3731 // We just transition from 2nd pass to 1st pass without knowing it.
3732 // This means that some unmanaged frame outside of the EE catches the previous exception,
3733 // so we should trash the current tracker and create a new one.
3734 EH_LOG((LL_INFO100, ">>NEW exception (the previous second pass finishes at some unmanaged frame outside of the EE)\n"));
3737 ExceptionTracker::PopTrackers(sf, false);
3740 fCreateNewTracker = true;
3744 EH_LOG((LL_INFO100, ">>continued processing of PREVIOUS exception\n"));
3745 // previously seen exception, reuse the tracker
3747 *pStackTraceState = STS_Append;
3751 if (pTracker->m_ScannedStackRange.Contains(sf))
3753 EH_LOG((LL_INFO100, ">>continued processing of PREVIOUS exception (revisiting previously processed frames)\n"));
3758 EH_LOG((LL_INFO100, ">>new NESTED exception\n"));
3759 fCreateNewTracker = true;
3764 EH_LOG((LL_INFO100, ">>NEW exception\n"));
3765 fCreateNewTracker = true;
3768 if (fCreateNewTracker)
3771 if (STATUS_STACK_OVERFLOW == pExceptionRecord->ExceptionCode)
3773 CONSISTENCY_CHECK(pExceptionRecord->NumberParameters >= 2);
3774 UINT_PTR uFaultAddress = pExceptionRecord->ExceptionInformation[1];
3775 UINT_PTR uStackLimit = (UINT_PTR)pThread->GetCachedStackLimit();
3777 EH_LOG((LL_INFO100, "STATUS_STACK_OVERFLOW accessing address %p %s\n",
3780 UINT_PTR uDispatchStackAvailable;
3782 uDispatchStackAvailable = uFaultAddress - uStackLimit - HARD_GUARD_REGION_SIZE;
3784 EH_LOG((LL_INFO100, "%x bytes available for SO processing\n", uDispatchStackAvailable));
3786 else if ((IsComPlusException(pExceptionRecord)) &&
3787 (pThread->GetThrowableAsHandle() == g_pPreallocatedStackOverflowException))
3789 EH_LOG((LL_INFO100, "STACKOVERFLOW: StackOverflowException manually thrown\n"));
3793 ExceptionTracker* pNewTracker;
3795 pNewTracker = GetTrackerMemory();
3798 if (NULL != pExState->m_OOMTracker.m_pThread)
3800 // Fatal error: we spun and could not allocate another tracker
3801 // and our existing emergency tracker is in use.
3802 EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
3805 pNewTracker = &pExState->m_OOMTracker;
3808 new (pNewTracker) ExceptionTracker(ControlPc,
3812 CONSISTENCY_CHECK(pNewTracker->IsValid());
3813 CONSISTENCY_CHECK(pThread == pNewTracker->m_pThread);
3815 EH_LOG((LL_INFO100, "___________________________________________\n"));
3816 EH_LOG((LL_INFO100, "creating new tracker object 0x%p, thread = 0x%p\n", pNewTracker, pThread));
3820 // We always create a throwable in the first pass when we first see an exception.
3822 // On 64bit, every time the exception passes beyond a boundary (e.g. RPInvoke call, or CallDescrWorker call),
3823 // the exception trackers that were created below (stack growing down) that boundary are released, during the 2nd pass,
3824 // if the exception was not caught in managed code. This is because the catcher is in native code and managed exception
3825 // data structures are for use of VM only when the exception is caught in managed code. Also, passing by such
3826 // boundaries is our only opportunity to release such internal structures and not leak the memory.
3828 // However, in certain case, release of exception trackers at each boundary can prove to be a bit aggressive.
3829 // Take the example below where "VM" prefix refers to a VM frame and "M" prefix refers to a managed frame on the stack.
3831 // VM1 -> M1 - VM2 - (via RPinvoke) -> M2
3833 // Let M2 throw E2 that remains unhandled in managed code (i.e. M1 also does not catch it) but is caught in VM1.
3834 // Note that the acting of throwing an exception also sets it as the LastThrownObject (LTO) against the thread.
3836 // Since this is native code (as mentioned in the comments above, there is no distinction made between VM native
3837 // code and external native code) that caught the exception, when the unwind goes past the "Reverse Pinvoke" boundary,
3838 // its personality routine will release the tracker for E2. Thus, only the LTO (which is off the Thread object and not
3839 // the exception tracker) is indicative of type of the last exception thrown.
3841 // As the unwind goes up the stack, we come across M1 and, since the original tracker was released, we create a new
3842 // tracker in the 2nd pass that does not contain details like the active exception object. A managed finally executes in M1
3843 // that throws and catches E1 inside the finally block. Thus, LTO is updated to indicate E1 as the last exception thrown.
3844 // When the exception is caught in VM1 and VM attempts to get LTO, it gets E1, which is incorrect as it was handled within the finally.
3845 // Semantically, it should have got E2 as the LTO.
3847 // To address, this we will *also* create a throwable during second pass for most exceptions
3848 // since most of them have had the corresponding first pass. If we are processing
3849 // an exception's second pass, we would have processed its first pass as well and thus, already
3850 // created a throwable that would be setup as the LastThrownObject (LTO) against the Thread.
3852 // The only exception to this rule is the longjump - this exception only has second pass
3853 // Thus, if we are in second pass and exception in question is longjump, then do not create a throwable.
3855 // In the case of the scenario above, when we attempt to create a new exception tracker, during the unwind,
3856 // for M1, we will also setup E2 as the throwable in the tracker. As a result, when the finally in M1 throws
3857 // and catches the exception, the LTO is correctly updated against the thread (see SafeUpdateLastThrownObject)
3858 // and thus, when VM requests for the LTO, it gets E2 as expected.
3859 bool fCreateThrowableForCurrentPass = true;
3860 if (pExceptionRecord->ExceptionCode == STATUS_LONGJUMP)
3862 // Long jump is only in second pass of exception dispatch
3863 _ASSERTE(!fIsFirstPass);
3864 fCreateThrowableForCurrentPass = false;
3867 // When dealing with SQL Hosting like scenario, a real SO
3868 // may be caught in native code. As a result, CRT will perform
3869 // STATUS_UNWIND_CONSOLIDATE that will result in replacing
3870 // the exception record in ProcessCLRException. This replaced
3871 // exception record will point to the exception record for original
3872 // SO for which we will not have created a throwable in the first pass
3873 // due to the SO-specific early exit code in ProcessCLRException.
3875 // Thus, if we see that we are here for SO in the 2nd pass, then
3876 // we shouldn't attempt to create a throwable.
3877 if ((!fIsFirstPass) && (pExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW))
3879 fCreateThrowableForCurrentPass = false;
3883 if ((!fIsFirstPass) && (fCreateThrowableForCurrentPass == true))
3885 // We should have a LTO available if we are creating
3886 // a throwable during second pass.
3887 _ASSERTE(pThread->LastThrownObjectHandle() != NULL);
3891 bool fCreateThrowable = (fCreateThrowableForCurrentPass || (bAsynchronousThreadStop && !pThread->IsAsyncPrevented()));
3892 OBJECTREF oThrowable = NULL;
3894 if (fCreateThrowable)
3898 oThrowable = ObjectFromHandle(pTracker->m_hThrowable);
3902 // this can take a nested exception
3903 oThrowable = CreateThrowable(pExceptionRecord, bAsynchronousThreadStop);
3907 GCX_FORBID(); // we haven't protected oThrowable
3909 if (pExState->m_pCurrentTracker != pNewTracker) // OOM can make this false
3911 pNewTracker->m_pPrevNestedInfo = pExState->m_pCurrentTracker;
3912 pTracker = pNewTracker;
3913 pThread->GetExceptionState()->m_pCurrentTracker = pTracker;
3916 if (fCreateThrowable)
3918 CONSISTENCY_CHECK(oThrowable != NULL);
3919 CONSISTENCY_CHECK(NULL == pTracker->m_hThrowable);
3921 pThread->SafeSetThrowables(oThrowable);
3923 if (pTracker->CanAllocateMemory())
3925 pTracker->m_StackTraceInfo.AllocateStackTrace();
3928 INDEBUG(oThrowable = NULL);
3932 *pStackTraceState = STS_FirstRethrowFrame;
3936 *pStackTraceState = STS_NewException;
3939 _ASSERTE(pTracker->m_pLimitFrame == NULL);
3940 pTracker->ResetLimitFrame();
3946 // Refer to the comment around ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException
3947 // for details on the usage of this COOP switch.
3950 if (pTracker->IsInFirstPass())
3952 CONSISTENCY_CHECK_MSG(fCreateNewTracker || pTracker->m_ScannedStackRange.Contains(sf),
3953 "Tracker did not receive a first pass!");
3955 // Save the topmost StackFrame the tracker saw in the first pass before we reset the
3956 // scanned stack range.
3957 pTracker->m_sfFirstPassTopmostFrame = pTracker->m_ScannedStackRange.GetUpperBound();
3959 // We have to detect this transition because otherwise we break when unmanaged code
3960 // catches our exceptions.
3961 EH_LOG((LL_INFO100, ">>tracker transitioned to second pass\n"));
3962 pTracker->m_ScannedStackRange.Reset();
3964 pTracker->m_ExceptionFlags.SetUnwindHasStarted();
3965 if (pTracker->m_ExceptionFlags.UnwindingToFindResumeFrame())
3967 // UnwindingToFindResumeFrame means that in the first pass, we determine that a method
3968 // catches the exception, but the method frame we are inspecting is a funclet method frame
3969 // and is not the correct frame to resume execution. We need to resume to the correct
3970 // method frame before starting the second pass. The correct method frame is most likely
3971 // the parent method frame, but it can also be another funclet method frame.
3973 // If the exception transitions from first pass to second pass before we find the parent
3974 // method frame, there is only one possibility: some other thread has initiated a rude
3975 // abort on the current thread, causing us to skip processing of all method frames.
3976 _ASSERTE(pThread->IsRudeAbortInitiated());
3978 // Lean on the safe side and just reset everything unconditionally.
3979 pTracker->FirstPassIsComplete();
3981 EEToDebuggerExceptionInterfaceWrapper::ManagedExceptionUnwindBegin(pThread);
3983 pTracker->ResetLimitFrame();
3987 // In the second pass, there's a possibility that UMThunkUnwindFrameChainHandler() has
3988 // popped some frames off the frame chain underneath us. Check for this case here.
3989 if (pTracker->m_pLimitFrame < pThread->GetFrame())
3991 pTracker->ResetLimitFrame();
3996 #ifdef FEATURE_CORRUPTING_EXCEPTIONS
3997 if (fCreateNewTracker)
3999 // Exception tracker should be in the 2nd pass right now
4000 _ASSERTE(!pTracker->IsInFirstPass());
4002 // The corruption severity of a newly created tracker is NotSet
4003 _ASSERTE(pTracker->GetCorruptionSeverity() == NotSet);
4005 // See comment in CEHelper::SetupCorruptionSeverityForActiveExceptionInUnwindPass for details
4006 CEHelper::SetupCorruptionSeverityForActiveExceptionInUnwindPass(pThread, pTracker, FALSE, pExceptionRecord->ExceptionCode);
4008 #endif // FEATURE_CORRUPTING_EXCEPTIONS
4011 _ASSERTE(pTracker->m_pLimitFrame >= pThread->GetFrame());
4016 void ExceptionTracker::ResetLimitFrame()
4018 WRAPPER_NO_CONTRACT;
4020 m_pLimitFrame = m_pThread->GetFrame();
4025 void ExceptionTracker::ResumeExecution(
4026 CONTEXT* pContextRecord,
4027 EXCEPTION_RECORD* pExceptionRecord
4031 // This method never returns, so it will leave its
4032 // state on the thread if useing dynamic contracts.
4034 STATIC_CONTRACT_MODE_COOPERATIVE;
4035 STATIC_CONTRACT_GC_NOTRIGGER;
4036 STATIC_CONTRACT_NOTHROW;
4038 AMD64_ONLY(STRESS_LOG4(LF_GCROOTS, LL_INFO100, "Resuming after exception at %p, rbx=%p, rsi=%p, rdi=%p\n",
4039 GetIP(pContextRecord),
4040 pContextRecord->Rbx,
4041 pContextRecord->Rsi,
4042 pContextRecord->Rdi));
4044 EH_LOG((LL_INFO100, "resuming execution at 0x%p\n", GetIP(pContextRecord)));
4045 EH_LOG((LL_INFO100, "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n"));
4047 RtlRestoreContext(pContextRecord, pExceptionRecord);
4057 OBJECTREF ExceptionTracker::CreateThrowable(
4058 PEXCEPTION_RECORD pExceptionRecord,
4059 BOOL bAsynchronousThreadStop
4070 OBJECTREF oThrowable = NULL;
4071 Thread* pThread = GetThread();
4074 if ((!bAsynchronousThreadStop) && IsComPlusException(pExceptionRecord))
4076 oThrowable = pThread->LastThrownObject();
4080 oThrowable = CreateCOMPlusExceptionObject(pThread, pExceptionRecord, bAsynchronousThreadStop);
4088 BOOL ExceptionTracker::ClauseCoversPC(
4089 EE_ILEXCEPTION_CLAUSE* pEHClause,
4093 // TryStartPC and TryEndPC are offsets relative to the start
4094 // of the method so we can just compare them to the offset returned
4095 // by JitCodeToMethodInfo.
4097 return ((pEHClause->TryStartPC <= dwOffset) && (dwOffset < pEHClause->TryEndPC));
4100 #if defined(DEBUGGING_SUPPORTED)
4101 BOOL ExceptionTracker::NotifyDebuggerOfStub(Thread* pThread, StackFrame sf, Frame* pCurrentFrame)
4103 LIMITED_METHOD_CONTRACT;
4105 BOOL fDeliveredFirstChanceNotification = FALSE;
4108 // Remove this once SIS is fully enabled.
4110 extern bool g_EnableSIS;
4114 _ASSERTE(GetThread() == pThread);
4118 // For debugger, we may want to notify 1st chance exceptions if they're coming out of a stub.
4119 // We recognize stubs as Frames with a M2U transition type. The debugger's stackwalker also
4120 // recognizes these frames and publishes ICorDebugInternalFrames in the stackwalk. It's
4121 // important to use pFrame as the stack address so that the Exception callback matches up
4122 // w/ the ICorDebugInternlFrame stack range.
4123 if (CORDebuggerAttached())
4125 if (pCurrentFrame->GetTransitionType() == Frame::TT_M2U)
4127 // Use -1 for the backing store pointer whenever we use the address of a frame as the stack pointer.
4128 EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedException(pThread,
4130 (SIZE_T)pCurrentFrame);
4131 fDeliveredFirstChanceNotification = TRUE;
4136 return fDeliveredFirstChanceNotification;
4139 bool ExceptionTracker::IsFilterStartOffset(EE_ILEXCEPTION_CLAUSE* pEHClause, DWORD_PTR dwHandlerStartPC)
4141 EECodeInfo codeInfo((PCODE)dwHandlerStartPC);
4142 _ASSERTE(codeInfo.IsValid());
4144 return pEHClause->FilterOffset == codeInfo.GetRelOffset();
4147 void ExceptionTracker::MakeCallbacksRelatedToHandler(
4148 bool fBeforeCallingHandler,
4151 EE_ILEXCEPTION_CLAUSE* pEHClause,
4152 DWORD_PTR dwHandlerStartPC,
4156 // Here we need to make an extra check for filter handlers because we could be calling the catch handler
4157 // associated with a filter handler and yet the EH clause we have saved is for the filter handler.
4158 BOOL fIsFilterHandler = IsFilterHandler(pEHClause) && ExceptionTracker::IsFilterStartOffset(pEHClause, dwHandlerStartPC);
4159 BOOL fIsFaultOrFinallyHandler = IsFaultOrFinally(pEHClause);
4161 if (fBeforeCallingHandler)
4163 StackFrame sfToStore = sf;
4164 if ((this->m_pPrevNestedInfo != NULL) &&
4165 (this->m_pPrevNestedInfo->m_EnclosingClauseInfo == this->m_EnclosingClauseInfo))
4167 // If this is a nested exception which has the same enclosing clause as the previous exception,
4168 // we should just propagate the clause info from the previous exception.
4169 sfToStore = this->m_pPrevNestedInfo->m_EHClauseInfo.GetStackFrameForEHClause();
4171 m_EHClauseInfo.SetInfo(COR_PRF_CLAUSE_NONE, (UINT_PTR)dwHandlerStartPC, sfToStore);
4173 if (pMD->IsILStub())
4178 if (fIsFilterHandler)
4180 m_EHClauseInfo.SetEHClauseType(COR_PRF_CLAUSE_FILTER);
4181 EEToDebuggerExceptionInterfaceWrapper::ExceptionFilter(pMD, (TADDR) dwHandlerStartPC, pEHClause->FilterOffset, (BYTE*)sf.SP);
4183 EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFilterEnter(pMD);
4187 EEToDebuggerExceptionInterfaceWrapper::ExceptionHandle(pMD, (TADDR) dwHandlerStartPC, pEHClause->HandlerStartPC, (BYTE*)sf.SP);
4189 if (fIsFaultOrFinallyHandler)
4191 m_EHClauseInfo.SetEHClauseType(COR_PRF_CLAUSE_FINALLY);
4192 EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFinallyEnter(pMD);
4196 m_EHClauseInfo.SetEHClauseType(COR_PRF_CLAUSE_CATCH);
4197 EEToProfilerExceptionInterfaceWrapper::ExceptionCatcherEnter(pThread, pMD);
4199 DACNotify::DoExceptionCatcherEnterNotification(pMD, pEHClause->HandlerStartPC);
4205 if (pMD->IsILStub())
4210 if (fIsFilterHandler)
4212 EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFilterLeave();
4216 if (fIsFaultOrFinallyHandler)
4218 EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFinallyLeave();
4222 EEToProfilerExceptionInterfaceWrapper::ExceptionCatcherLeave();
4225 m_EHClauseInfo.ResetInfo();
4229 #ifdef DEBUGGER_EXCEPTION_INTERCEPTION_SUPPORTED
4230 //---------------------------------------------------------------------------------------
4232 // This function is called by DefaultCatchHandler() to intercept an exception and start an unwind.
4235 // pCurrentEstablisherFrame - unused on WIN64
4236 // pExceptionRecord - EXCEPTION_RECORD of the exception being intercepted
4239 // ExceptionContinueSearch if the exception cannot be intercepted
4242 // If the exception is intercepted, this function never returns.
4245 EXCEPTION_DISPOSITION ClrDebuggerDoUnwindAndIntercept(X86_FIRST_ARG(EXCEPTION_REGISTRATION_RECORD* pCurrentEstablisherFrame)
4246 EXCEPTION_RECORD* pExceptionRecord)
4248 if (!CheckThreadExceptionStateForInterception())
4250 return ExceptionContinueSearch;
4253 Thread* pThread = GetThread();
4254 ThreadExceptionState* pExState = pThread->GetExceptionState();
4256 UINT_PTR uInterceptStackFrame = 0;
4258 pExState->GetDebuggerState()->GetDebuggerInterceptInfo(NULL, NULL,
4259 (PBYTE*)&uInterceptStackFrame,
4262 ClrUnwindEx(pExceptionRecord, (UINT_PTR)pThread, INVALID_RESUME_ADDRESS, uInterceptStackFrame);
4266 #endif // DEBUGGER_EXCEPTION_INTERCEPTION_SUPPORTED
4267 #endif // DEBUGGING_SUPPORTED
4270 inline bool ExceptionTracker::IsValid()
4272 bool fRetVal = false;
4276 Thread* pThisThread = GetThread();
4277 if (m_pThread == pThisThread)
4285 EX_END_CATCH(SwallowAllExceptions);
4289 EH_LOG((LL_ERROR, "ExceptionTracker::IsValid() failed! this = 0x%p\n", this));
4294 BOOL ExceptionTracker::ThrowableIsValid()
4297 CONSISTENCY_CHECK(IsValid());
4299 BOOL isValid = FALSE;
4302 isValid = (m_pThread->GetThrowable() != NULL);
4308 UINT_PTR ExceptionTracker::DebugComputeNestingLevel()
4310 UINT_PTR uNestingLevel = 0;
4311 Thread* pThread = GetThread();
4315 ExceptionTracker* pTracker;
4316 pTracker = pThread->GetExceptionState()->m_pCurrentTracker;
4321 pTracker = pTracker->m_pPrevNestedInfo;
4325 return uNestingLevel;
4327 void DumpClauses(IJitManager* pJitMan, const METHODTOKEN& MethToken, UINT_PTR uMethodStartPC, UINT_PTR dwControlPc)
4329 EH_CLAUSE_ENUMERATOR EnumState;
4332 EH_LOG((LL_INFO1000, " | uMethodStartPC: %p, ControlPc at offset %x\n", uMethodStartPC, dwControlPc - uMethodStartPC));
4334 EHCount = pJitMan->InitializeEHEnumeration(MethToken, &EnumState);
4335 for (unsigned i = 0; i < EHCount; i++)
4337 EE_ILEXCEPTION_CLAUSE EHClause;
4338 pJitMan->GetNextEHClause(&EnumState, &EHClause);
4340 EH_LOG((LL_INFO1000, " | %s clause [%x, %x], handler: [%x, %x] %s",
4341 (IsFault(&EHClause) ? "fault" :
4342 (IsFinally(&EHClause) ? "finally" :
4343 (IsFilterHandler(&EHClause) ? "filter" :
4344 (IsTypedHandler(&EHClause) ? "typed" : "unknown")))),
4345 EHClause.TryStartPC , // + uMethodStartPC,
4346 EHClause.TryEndPC , // + uMethodStartPC,
4347 EHClause.HandlerStartPC , // + uMethodStartPC,
4348 EHClause.HandlerEndPC , // + uMethodStartPC
4349 (IsDuplicateClause(&EHClause) ? "[duplicate]" : "")
4352 if (IsFilterHandler(&EHClause))
4354 LOG((LF_EH, LL_INFO1000, " filter: [%x, ...]",
4355 EHClause.FilterOffset));// + uMethodStartPC
4358 LOG((LF_EH, LL_INFO1000, "\n"));
4363 #define STACK_ALLOC_ARRAY(numElements, type) \
4364 ((type *)_alloca((numElements)*(sizeof(type))))
4366 static void DoEHLog(
4368 __in_z const char *fmt,
4372 if (!LoggingOn(LF_EH, lvl))
4376 va_start(args, fmt);
4378 UINT_PTR nestinglevel = ExceptionTracker::DebugComputeNestingLevel();
4381 _ASSERTE(FitsIn<UINT_PTR>(2 * nestinglevel));
4382 UINT_PTR cch = 2 * nestinglevel;
4383 char* pPadding = STACK_ALLOC_ARRAY(cch + 1, char);
4384 memset(pPadding, '.', cch);
4387 LOG((LF_EH, lvl, pPadding));
4390 LogSpewValist(LF_EH, lvl, fmt, args);
4397 //---------------------------------------------------------------------------------------
4399 // This functions performs an unwind procedure for a managed exception. The stack is unwound
4400 // until the target frame is reached. For each frame we use its PC value to find
4401 // a handler using information that has been built by JIT.
4404 // ex - the PAL_SEHException representing the managed exception
4405 // unwindStartContext - the context that the unwind should start at. Either the original exception
4406 // context (when the exception didn't cross native frames) or the first managed
4407 // frame after crossing native frames.
4409 VOID UnwindManagedExceptionPass2(PAL_SEHException& ex, CONTEXT* unwindStartContext)
4413 EXCEPTION_DISPOSITION disposition;
4414 CONTEXT* currentFrameContext;
4415 CONTEXT* callerFrameContext;
4416 CONTEXT contextStorage;
4417 DISPATCHER_CONTEXT dispatcherContext;
4418 EECodeInfo codeInfo;
4419 UINT_PTR establisherFrame = NULL;
4422 // Indicate that we are performing second pass.
4423 ex.GetExceptionRecord()->ExceptionFlags = EXCEPTION_UNWINDING;
4425 currentFrameContext = unwindStartContext;
4426 callerFrameContext = &contextStorage;
4428 memset(&dispatcherContext, 0, sizeof(DISPATCHER_CONTEXT));
4429 disposition = ExceptionContinueSearch;
4433 controlPc = GetIP(currentFrameContext);
4435 codeInfo.Init(controlPc);
4437 dispatcherContext.FunctionEntry = codeInfo.GetFunctionEntry();
4438 dispatcherContext.ControlPc = controlPc;
4439 dispatcherContext.ImageBase = codeInfo.GetModuleBase();
4440 #ifdef ADJUST_PC_UNWOUND_TO_CALL
4441 dispatcherContext.ControlPcIsUnwound = !!(currentFrameContext->ContextFlags & CONTEXT_UNWOUND_TO_CALL);
4443 // Check whether we have a function table entry for the current controlPC.
4444 // If yes, then call RtlVirtualUnwind to get the establisher frame pointer.
4445 if (dispatcherContext.FunctionEntry != NULL)
4447 // Create a copy of the current context because we don't want
4448 // the current context record to be updated by RtlVirtualUnwind.
4449 memcpy(callerFrameContext, currentFrameContext, sizeof(CONTEXT));
4450 RtlVirtualUnwind(UNW_FLAG_EHANDLER,
4451 dispatcherContext.ImageBase,
4452 dispatcherContext.ControlPc,
4453 dispatcherContext.FunctionEntry,
4459 // Make sure that the establisher frame pointer is within stack boundaries
4460 // and we did not go below that target frame.
4461 // TODO: make sure the establisher frame is properly aligned.
4462 if (!Thread::IsAddressInCurrentStack((void*)establisherFrame) || establisherFrame > ex.TargetFrameSp)
4464 // TODO: add better error handling
4468 dispatcherContext.EstablisherFrame = establisherFrame;
4469 dispatcherContext.ContextRecord = currentFrameContext;
4471 EXCEPTION_RECORD* exceptionRecord = ex.GetExceptionRecord();
4473 if (establisherFrame == ex.TargetFrameSp)
4475 // We have reached the frame that will handle the exception.
4476 ex.GetExceptionRecord()->ExceptionFlags |= EXCEPTION_TARGET_UNWIND;
4477 ExceptionTracker* pTracker = GetThread()->GetExceptionState()->GetCurrentExceptionTracker();
4478 pTracker->TakeExceptionPointersOwnership(&ex);
4481 // Perform unwinding of the current frame
4482 disposition = ProcessCLRException(exceptionRecord,
4484 currentFrameContext,
4485 &dispatcherContext);
4487 if (disposition == ExceptionContinueSearch)
4489 // Exception handler not found. Try the parent frame.
4490 CONTEXT* temp = currentFrameContext;
4491 currentFrameContext = callerFrameContext;
4492 callerFrameContext = temp;
4501 Thread::VirtualUnwindLeafCallFrame(currentFrameContext);
4504 controlPc = GetIP(currentFrameContext);
4505 sp = (PVOID)GetSP(currentFrameContext);
4507 // Check whether we are crossing managed-to-native boundary
4508 if (!ExecutionManager::IsManagedCode(controlPc))
4510 // Return back to the UnwindManagedExceptionPass1 and let it unwind the native frames
4513 // Pop all frames that are below the block of native frames and that would be
4514 // in the unwound part of the stack when UnwindManagedExceptionPass2 is resumed
4515 // at the next managed frame.
4517 UnwindFrameChain(GetThread(), sp);
4518 // We are going to reclaim the stack range that was scanned by the exception tracker
4519 // until now. We need to reset the explicit frames range so that if GC fires before
4520 // we recreate the tracker at the first managed frame after unwinding the native
4521 // frames, it doesn't attempt to scan the reclaimed stack range.
4522 // We also need to reset the scanned stack range since the scanned frames will be
4523 // obsolete after the unwind of the native frames completes.
4524 ExceptionTracker* pTracker = GetThread()->GetExceptionState()->GetCurrentExceptionTracker();
4525 pTracker->CleanupBeforeNativeFramesUnwind();
4528 // Now we need to unwind the native frames until we reach managed frames again or the exception is
4529 // handled in the native code.
4530 STRESS_LOG2(LF_EH, LL_INFO100, "Unwinding native frames starting at IP = %p, SP = %p \n", controlPc, sp);
4531 PAL_ThrowExceptionFromContext(currentFrameContext, &ex);
4535 } while (Thread::IsAddressInCurrentStack(sp) && (establisherFrame != ex.TargetFrameSp));
4537 _ASSERTE(!"UnwindManagedExceptionPass2: Unwinding failed. Reached the end of the stack");
4538 EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
4541 //---------------------------------------------------------------------------------------
4543 // This functions performs dispatching of a managed exception.
4544 // It tries to find an exception handler by examining each frame in the call stack.
4545 // The search is started from the managed frame caused the exception to be thrown.
4546 // For each frame we use its PC value to find a handler using information that
4547 // has been built by JIT. If an exception handler is found then this function initiates
4548 // the second pass to unwind the stack and execute the handler.
4551 // ex - a PAL_SEHException that stores information about the managed
4552 // exception that needs to be dispatched.
4553 // frameContext - the context of the first managed frame of the exception call stack
4555 VOID DECLSPEC_NORETURN UnwindManagedExceptionPass1(PAL_SEHException& ex, CONTEXT* frameContext)
4557 CONTEXT unwindStartContext;
4558 EXCEPTION_DISPOSITION disposition;
4559 DISPATCHER_CONTEXT dispatcherContext;
4560 EECodeInfo codeInfo;
4562 UINT_PTR establisherFrame = NULL;
4565 #ifdef FEATURE_HIJACK
4566 GetThread()->UnhijackThread();
4569 controlPc = GetIP(frameContext);
4570 unwindStartContext = *frameContext;
4572 if (!ExecutionManager::IsManagedCode(GetIP(ex.GetContextRecord())))
4574 // This is the first time we see the managed exception, set its context to the managed frame that has caused
4575 // the exception to be thrown
4576 *ex.GetContextRecord() = *frameContext;
4577 ex.GetExceptionRecord()->ExceptionAddress = (VOID*)controlPc;
4580 ex.GetExceptionRecord()->ExceptionFlags = 0;
4582 memset(&dispatcherContext, 0, sizeof(DISPATCHER_CONTEXT));
4583 disposition = ExceptionContinueSearch;
4587 codeInfo.Init(controlPc);
4588 dispatcherContext.FunctionEntry = codeInfo.GetFunctionEntry();
4589 dispatcherContext.ControlPc = controlPc;
4590 dispatcherContext.ImageBase = codeInfo.GetModuleBase();
4591 #ifdef ADJUST_PC_UNWOUND_TO_CALL
4592 dispatcherContext.ControlPcIsUnwound = !!(frameContext->ContextFlags & CONTEXT_UNWOUND_TO_CALL);
4595 // Check whether we have a function table entry for the current controlPC.
4596 // If yes, then call RtlVirtualUnwind to get the establisher frame pointer
4597 // and then check whether an exception handler exists for the frame.
4598 if (dispatcherContext.FunctionEntry != NULL)
4600 #ifdef USE_CURRENT_CONTEXT_IN_FILTER
4601 KNONVOLATILE_CONTEXT currentNonVolatileContext;
4602 CaptureNonvolatileRegisters(¤tNonVolatileContext, frameContext);
4603 #endif // USE_CURRENT_CONTEXT_IN_FILTER
4605 RtlVirtualUnwind(UNW_FLAG_EHANDLER,
4606 dispatcherContext.ImageBase,
4607 dispatcherContext.ControlPc,
4608 dispatcherContext.FunctionEntry,
4614 // Make sure that the establisher frame pointer is within stack boundaries.
4615 // TODO: make sure the establisher frame is properly aligned.
4616 if (!Thread::IsAddressInCurrentStack((void*)establisherFrame))
4618 // TODO: add better error handling
4622 dispatcherContext.EstablisherFrame = establisherFrame;
4623 #ifdef USE_CURRENT_CONTEXT_IN_FILTER
4624 dispatcherContext.CurrentNonVolatileContextRecord = ¤tNonVolatileContext;
4625 #endif // USE_CURRENT_CONTEXT_IN_FILTER
4626 dispatcherContext.ContextRecord = frameContext;
4628 // Find exception handler in the current frame
4629 disposition = ProcessCLRException(ex.GetExceptionRecord(),
4631 ex.GetContextRecord(),
4632 &dispatcherContext);
4634 if (disposition == ExceptionContinueSearch)
4636 // Exception handler not found. Try the parent frame.
4637 controlPc = GetIP(frameContext);
4639 else if (disposition == ExceptionStackUnwind)
4641 // The first pass is complete. We have found the frame that
4642 // will handle the exception. Start the second pass.
4643 ex.TargetFrameSp = establisherFrame;
4644 UnwindManagedExceptionPass2(ex, &unwindStartContext);
4648 // TODO: This needs to implemented. Make it fail for now.
4654 controlPc = Thread::VirtualUnwindLeafCallFrame(frameContext);
4657 // Check whether we are crossing managed-to-native boundary
4658 while (!ExecutionManager::IsManagedCode(controlPc))
4660 #ifdef VSD_STUB_CAN_THROW_AV
4661 if (IsIPinVirtualStub(controlPc))
4663 AdjustContextForVirtualStub(NULL, frameContext);
4664 controlPc = GetIP(frameContext);
4667 #endif // VSD_STUB_CAN_THROW_AV
4669 UINT_PTR sp = GetSP(frameContext);
4671 BOOL success = PAL_VirtualUnwind(frameContext, NULL);
4674 _ASSERTE(!"UnwindManagedExceptionPass1: PAL_VirtualUnwind failed");
4675 EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
4678 controlPc = GetIP(frameContext);
4680 STRESS_LOG2(LF_EH, LL_INFO100, "Processing exception at native frame: IP = %p, SP = %p \n", controlPc, sp);
4684 if (!GetThread()->HasThreadStateNC(Thread::TSNC_ProcessedUnhandledException))
4686 LONG disposition = InternalUnhandledExceptionFilter_Worker(&ex.ExceptionPointers);
4687 _ASSERTE(disposition == EXCEPTION_CONTINUE_SEARCH);
4689 TerminateProcess(GetCurrentProcess(), 1);
4693 UINT_PTR parentSp = GetSP(frameContext);
4695 // Find all holders on this frame that are in scopes embedded in each other and call their filters.
4696 NativeExceptionHolderBase* holder = nullptr;
4697 while ((holder = NativeExceptionHolderBase::FindNextHolder(holder, (void*)sp, (void*)parentSp)) != nullptr)
4699 EXCEPTION_DISPOSITION disposition = holder->InvokeFilter(ex);
4700 if (disposition == EXCEPTION_EXECUTE_HANDLER)
4703 STRESS_LOG1(LF_EH, LL_INFO100, "First pass finished, found native handler, TargetFrameSp = %p\n", sp);
4705 ex.TargetFrameSp = sp;
4706 UnwindManagedExceptionPass2(ex, &unwindStartContext);
4710 // The EXCEPTION_CONTINUE_EXECUTION is not supported and should never be returned by a filter
4711 _ASSERTE(disposition == EXCEPTION_CONTINUE_SEARCH);
4715 } while (Thread::IsAddressInCurrentStack((void*)GetSP(frameContext)));
4717 _ASSERTE(!"UnwindManagedExceptionPass1: Failed to find a handler. Reached the end of the stack");
4718 EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
4722 VOID DECLSPEC_NORETURN DispatchManagedException(PAL_SEHException& ex, bool isHardwareException)
4728 // Unwind the context to the first managed frame
4729 CONTEXT frameContext;
4731 // If the exception is hardware exceptions, we use the exception's context record directly
4732 if (isHardwareException)
4734 frameContext = *ex.GetContextRecord();
4738 RtlCaptureContext(&frameContext);
4739 UINT_PTR currentSP = GetSP(&frameContext);
4741 if (Thread::VirtualUnwindToFirstManagedCallFrame(&frameContext) == 0)
4743 // There are no managed frames on the stack, so we need to continue unwinding using C++ exception
4748 UINT_PTR firstManagedFrameSP = GetSP(&frameContext);
4750 // Check if there is any exception holder in the skipped frames. If there is one, we need to unwind them
4751 // using the C++ handling. This is a special case when the UNINSTALL_MANAGED_EXCEPTION_DISPATCHER was
4752 // not at the managed to native boundary.
4753 if (NativeExceptionHolderBase::FindNextHolder(nullptr, (void*)currentSP, (void*)firstManagedFrameSP) != nullptr)
4759 if (ex.IsFirstPass())
4761 UnwindManagedExceptionPass1(ex, &frameContext);
4765 // This is a continuation of pass 2 after native frames unwinding.
4766 UnwindManagedExceptionPass2(ex, &frameContext);
4770 catch (PAL_SEHException& ex2)
4772 isHardwareException = false;
4773 ex = std::move(ex2);
4779 // Ensure that the corruption severity is set for exceptions that didn't pass through managed frames
4780 // yet and so there is no exception tracker.
4781 if (ex.IsFirstPass())
4783 // Get the thread and the thread exception state - they must exist at this point
4784 Thread *pCurThread = GetThread();
4785 _ASSERTE(pCurThread != NULL);
4787 ThreadExceptionState * pCurTES = pCurThread->GetExceptionState();
4788 _ASSERTE(pCurTES != NULL);
4790 #ifdef FEATURE_CORRUPTING_EXCEPTIONS
4791 ExceptionTracker* pEHTracker = pCurTES->GetCurrentExceptionTracker();
4792 if (pEHTracker == NULL)
4794 CorruptionSeverity severity = NotCorrupting;
4795 if (CEHelper::IsProcessCorruptedStateException(ex.GetExceptionRecord()->ExceptionCode))
4797 severity = ProcessCorrupting;
4800 pCurTES->SetLastActiveExceptionCorruptionSeverity(severity);
4802 #endif // FEATURE_CORRUPTING_EXCEPTIONS
4805 throw std::move(ex);
4808 #if defined(_TARGET_AMD64_) || defined(_TARGET_X86_)
4812 GetRegisterAddressByIndex
4814 Get address of a register in a context
4817 PCONTEXT pContext : context containing the registers
4818 UINT index : index of the register (Rax=0 .. R15=15)
4821 Pointer to the context member represeting the register
4823 VOID* GetRegisterAddressByIndex(PCONTEXT pContext, UINT index)
4825 return getRegAddr(index, pContext);
4830 GetRegisterValueByIndex
4832 Get value of a register in a context
4835 PCONTEXT pContext : context containing the registers
4836 UINT index : index of the register (Rax=0 .. R15=15)
4839 Value of the context member represeting the register
4841 DWORD64 GetRegisterValueByIndex(PCONTEXT pContext, UINT index)
4843 _ASSERTE(index < 16);
4844 return *(DWORD64*)GetRegisterAddressByIndex(pContext, index);
4849 GetModRMOperandValue
4851 Get value of an instruction operand represented by the ModR/M field
4854 BYTE rex : REX prefix, 0 if there was none
4855 BYTE* ip : instruction pointer pointing to the ModR/M field
4856 PCONTEXT pContext : context containing the registers
4857 bool is8Bit : true if the operand size is 8 bit
4858 bool hasOpSizePrefix : true if the instruction has op size prefix (0x66)
4861 Value of the context member represeting the register
4863 DWORD64 GetModRMOperandValue(BYTE rex, BYTE* ip, PCONTEXT pContext, bool is8Bit, bool hasOpSizePrefix)
4867 BYTE rex_b = (rex & 0x1); // high bit to modrm r/m field or SIB base field
4868 BYTE rex_x = (rex & 0x2) >> 1; // high bit to sib index field
4869 BYTE rex_r = (rex & 0x4) >> 2; // high bit to modrm reg field
4870 BYTE rex_w = (rex & 0x8) >> 3; // 1 = 64 bit operand size, 0 = operand size determined by hasOpSizePrefix
4874 _ASSERTE(modrm != 0);
4876 BYTE mod = (modrm & 0xC0) >> 6;
4877 BYTE reg = (modrm & 0x38) >> 3;
4878 BYTE rm = (modrm & 0x07);
4880 reg |= (rex_r << 3);
4881 BYTE rmIndex = rm | (rex_b << 3);
4883 // 8 bit idiv without the REX prefix uses registers AH, CH, DH, BH for rm 4..8
4884 // which is an exception from the regular register indexes.
4885 bool isAhChDhBh = is8Bit && (rex == 0) && (rm >= 4);
4887 // See: Tables A-15,16,17 in AMD Dev Manual 3 for information
4888 // about how the ModRM/SIB/REX bytes interact.
4895 if (rm == 4) // we have an SIB byte following
4898 // Get values from the SIB byte
4904 BYTE ss = (sib & 0xC0) >> 6;
4905 BYTE index = (sib & 0x38) >> 3;
4906 BYTE base = (sib & 0x07);
4908 index |= (rex_x << 3);
4909 base |= (rex_b << 3);
4912 // Get starting value
4914 if ((mod == 0) && (base == 5))
4920 result = GetRegisterValueByIndex(pContext, base);
4924 // Add in the [index]
4928 result += GetRegisterValueByIndex(pContext, index) << ss;
4932 // Finally add in the offset
4938 result += *((INT32*)ip);
4943 result += *((INT8*)ip);
4947 result += *((INT32*)ip);
4954 // Get the value we need from the register.
4957 // Check for RIP-relative addressing mode for AMD64
4958 // Check for Displacement only addressing mode for x86
4959 if ((mod == 0) && (rm == 5))
4961 #if defined(_TARGET_AMD64_)
4962 result = (DWORD64)ip + sizeof(INT32) + *(INT32*)ip;
4964 result = (DWORD64)(*(DWORD*)ip);
4965 #endif // _TARGET_AMD64_
4969 result = GetRegisterValueByIndex(pContext, rmIndex);
4973 result += *((INT8*)ip);
4977 result += *((INT32*)ip);
4986 // The operand is stored in a register.
4989 // 8 bit idiv without the REX prefix uses registers AH, CH, DH or BH for rm 4..8.
4990 // So we shift the register index to get the real register index.
4994 result = (DWORD64)GetRegisterAddressByIndex(pContext, rmIndex);
4998 // Move one byte higher to get an address of the AH, CH, DH or BH
5007 // Now dereference thru the result to get the resulting value.
5011 result = *((BYTE*)result);
5013 else if (rex_w != 0)
5015 result = *((DWORD64*)result);
5017 else if (hasOpSizePrefix)
5019 result = *((USHORT*)result);
5023 result = *((UINT32*)result);
5033 Skip all prefixes until the instruction code or the REX prefix is found
5036 BYTE** ip : Pointer to the current instruction pointer. Updated
5037 as the function walks the codes.
5038 bool* hasOpSizePrefix : Pointer to bool, on exit set to true if a op size prefix
5042 Code of the REX prefix or the instruction code after the prefixes.
5044 BYTE SkipPrefixes(BYTE **ip, bool* hasOpSizePrefix)
5046 *hasOpSizePrefix = false;
5050 BYTE code = *(*ip)++;
5054 case 0x66: // Operand-Size
5055 *hasOpSizePrefix = true;
5058 // Segment overrides
5067 case 0x67: // Address-Size
5072 // String REP prefixes
5073 case 0xf2: // REPNE/REPNZ
5078 // Return address of the nonprefix code
5086 IsDivByZeroAnIntegerOverflow
5088 Check if a division by zero exception is in fact a division overflow. The
5089 x64 processor generate the same exception in both cases for the IDIV / DIV
5090 instruction. So we need to decode the instruction argument and check
5091 whether it was zero or not.
5094 PCONTEXT pContext : context containing the registers
5095 PEXCEPTION_RECORD pExRecord : exception record of the exception
5098 true if the division error was an overflow
5100 bool IsDivByZeroAnIntegerOverflow(PCONTEXT pContext)
5102 BYTE * ip = (BYTE *)GetIP(pContext);
5104 bool hasOpSizePrefix = false;
5106 BYTE code = SkipPrefixes(&ip, &hasOpSizePrefix);
5108 // The REX prefix must directly preceed the instruction code
5109 if ((code & 0xF0) == 0x40)
5115 DWORD64 divisor = 0;
5117 // Check if the instruction is IDIV or DIV. The instruction code includes the three
5118 // 'reg' bits in the ModRM byte. These are 7 for IDIV and 6 for DIV
5119 BYTE regBits = (*ip & 0x38) >> 3;
5120 if ((code == 0xF7 || code == 0xF6) && (regBits == 7 || regBits == 6))
5122 bool is8Bit = (code == 0xF6);
5123 divisor = GetModRMOperandValue(rex, ip, pContext, is8Bit, hasOpSizePrefix);
5127 _ASSERTE(!"Invalid instruction (expected IDIV or DIV)");
5130 // If the division operand is zero, it was division by zero. Otherwise the failure
5131 // must have been an overflow.
5132 return divisor != 0;
5134 #endif // _TARGET_AMD64_ || _TARGET_X86_
5136 BOOL IsSafeToCallExecutionManager()
5138 Thread *pThread = GetThread();
5140 // It is safe to call the ExecutionManager::IsManagedCode only if the current thread is in
5141 // the cooperative mode. Otherwise ExecutionManager::IsManagedCode could deadlock if
5142 // the exception happened when the thread was holding the ExecutionManager's writer lock.
5143 // When the thread is in preemptive mode, we know for sure that it is not executing managed code.
5144 // Unfortunately, when running GC stress mode that invokes GC after every jitted or NGENed
5145 // instruction, we need to relax that to enable instrumentation of PInvoke stubs that switch to
5146 // preemptive GC mode at some point.
5147 return ((pThread != NULL) && pThread->PreemptiveGCDisabled()) ||
5148 GCStress<cfg_instr_jit>::IsEnabled() ||
5149 GCStress<cfg_instr_ngen>::IsEnabled();
5152 BOOL IsSafeToHandleHardwareException(PCONTEXT contextRecord, PEXCEPTION_RECORD exceptionRecord)
5154 PCODE controlPc = GetIP(contextRecord);
5155 return g_fEEStarted && (
5156 exceptionRecord->ExceptionCode == STATUS_BREAKPOINT ||
5157 exceptionRecord->ExceptionCode == STATUS_SINGLE_STEP ||
5158 (IsSafeToCallExecutionManager() && ExecutionManager::IsManagedCode(controlPc)) ||
5159 #ifdef VSD_STUB_CAN_THROW_AV
5160 IsIPinVirtualStub(controlPc) || // access violation comes from DispatchStub of Interface call
5161 #endif // VSD_STUB_CAN_THROW_AV
5162 IsIPInMarkedJitHelper(controlPc));
5165 #ifdef FEATURE_EMULATE_SINGLESTEP
5166 static inline BOOL HandleSingleStep(PCONTEXT pContext, PEXCEPTION_RECORD pExceptionRecord, Thread *pThread)
5168 // On ARM we don't have any reliable hardware support for single stepping so it is emulated in software.
5169 // The implementation will end up throwing an EXCEPTION_BREAKPOINT rather than an EXCEPTION_SINGLE_STEP
5170 // and leaves other aspects of the thread context in an invalid state. Therefore we use this opportunity
5171 // to fixup the state before any other part of the system uses it (we do it here since only the debugger
5172 // uses single step functionality).
5174 // First ask the emulation itself whether this exception occurred while single stepping was enabled. If so
5175 // it will fix up the context to be consistent again and return true. If so and the exception was
5176 // EXCEPTION_BREAKPOINT then we translate it to EXCEPTION_SINGLE_STEP (otherwise we leave it be, e.g. the
5177 // instruction stepped caused an access violation).
5178 if (pThread->HandleSingleStep(pContext, pExceptionRecord->ExceptionCode) && (pExceptionRecord->ExceptionCode == EXCEPTION_BREAKPOINT))
5180 pExceptionRecord->ExceptionCode = EXCEPTION_SINGLE_STEP;
5181 pExceptionRecord->ExceptionAddress = (void *)GetIP(pContext);
5186 #endif // FEATURE_EMULATE_SINGLESTEP
5188 BOOL HandleHardwareException(PAL_SEHException* ex)
5190 _ASSERTE(IsSafeToHandleHardwareException(ex->GetContextRecord(), ex->GetExceptionRecord()));
5192 if (ex->GetExceptionRecord()->ExceptionCode != STATUS_BREAKPOINT && ex->GetExceptionRecord()->ExceptionCode != STATUS_SINGLE_STEP)
5194 // A hardware exception is handled only if it happened in a jitted code or
5195 // in one of the JIT helper functions (JIT_MemSet, ...)
5196 PCODE controlPc = GetIP(ex->GetContextRecord());
5197 if (ExecutionManager::IsManagedCode(controlPc) && IsGcMarker(ex->GetContextRecord(), ex->GetExceptionRecord()))
5199 // Exception was handled, let the signal handler return to the exception context. Some registers in the context can
5200 // have been modified by the GC.
5204 #if defined(_TARGET_AMD64_) || defined(_TARGET_X86_)
5205 // It is possible that an overflow was mapped to a divide-by-zero exception.
5206 // This happens when we try to divide the maximum negative value of a
5207 // signed integer with -1.
5209 // Thus, we will attempt to decode the instruction @ RIP to determine if that
5210 // is the case using the faulting context.
5211 if ((ex->GetExceptionRecord()->ExceptionCode == EXCEPTION_INT_DIVIDE_BY_ZERO) &&
5212 IsDivByZeroAnIntegerOverflow(ex->GetContextRecord()))
5214 // The exception was an integer overflow, so augment the exception code.
5215 ex->GetExceptionRecord()->ExceptionCode = EXCEPTION_INT_OVERFLOW;
5217 #endif // _TARGET_AMD64_ || _TARGET_X86_
5219 // Create frame necessary for the exception handling
5220 FrameWithCookie<FaultingExceptionFrame> fef;
5221 *((&fef)->GetGSCookiePtr()) = GetProcessGSCookie();
5223 GCX_COOP(); // Must be cooperative to modify frame chain.
5224 if (IsIPInMarkedJitHelper(controlPc))
5226 // For JIT helpers, we need to set the frame to point to the
5227 // managed code that called the helper, otherwise the stack
5228 // walker would skip all the managed frames upto the next
5230 PAL_VirtualUnwind(ex->GetContextRecord(), NULL);
5231 ex->GetExceptionRecord()->ExceptionAddress = (PVOID)GetIP(ex->GetContextRecord());
5233 #ifdef VSD_STUB_CAN_THROW_AV
5234 else if (IsIPinVirtualStub(controlPc))
5236 AdjustContextForVirtualStub(ex->GetExceptionRecord(), ex->GetContextRecord());
5238 #endif // VSD_STUB_CAN_THROW_AV
5239 fef.InitAndLink(ex->GetContextRecord());
5242 DispatchManagedException(*ex, true /* isHardwareException */);
5247 // This is a breakpoint or single step stop, we report it to the debugger.
5248 Thread *pThread = GetThread();
5249 if (pThread != NULL && g_pDebugInterface != NULL)
5251 // On ARM and ARM64 Linux exception point to the break instruction.
5252 // See https://static.docs.arm.com/ddi0487/db/DDI0487D_b_armv8_arm.pdf#page=6916&zoom=100,0,152
5253 // at aarch64/exceptions/debug/AArch64.SoftwareBreakpoint
5254 // However, the rest of the code expects that it points to an instruction after the break.
5255 #if defined(__linux__) && (defined(_TARGET_ARM_) || defined(_TARGET_ARM64_))
5256 if (ex->GetExceptionRecord()->ExceptionCode == STATUS_BREAKPOINT)
5258 SetIP(ex->GetContextRecord(), GetIP(ex->GetContextRecord()) + CORDbg_BREAK_INSTRUCTION_SIZE);
5259 ex->GetExceptionRecord()->ExceptionAddress = (void *)GetIP(ex->GetContextRecord());
5263 #ifdef FEATURE_EMULATE_SINGLESTEP
5264 HandleSingleStep(ex->GetContextRecord(), ex->GetExceptionRecord(), pThread);
5266 if (ex->GetExceptionRecord()->ExceptionCode == STATUS_BREAKPOINT)
5268 // If this is breakpoint context, it is set up to point to an instruction after the break instruction.
5269 // But debugger expects to see context that points to the break instruction, that's why we correct it.
5270 SetIP(ex->GetContextRecord(), GetIP(ex->GetContextRecord()) - CORDbg_BREAK_INSTRUCTION_SIZE);
5271 ex->GetExceptionRecord()->ExceptionAddress = (void *)GetIP(ex->GetContextRecord());
5274 if (g_pDebugInterface->FirstChanceNativeException(ex->GetExceptionRecord(),
5275 ex->GetContextRecord(),
5276 ex->GetExceptionRecord()->ExceptionCode,
5279 // Exception was handled, let the signal handler return to the exception context. Some registers in the context can
5280 // have been modified by the debugger.
5289 #endif // FEATURE_PAL
5292 void ClrUnwindEx(EXCEPTION_RECORD* pExceptionRecord, UINT_PTR ReturnValue, UINT_PTR TargetIP, UINT_PTR TargetFrameSp)
5294 PVOID TargetFrame = (PVOID)TargetFrameSp;
5297 RtlUnwindEx(TargetFrame,
5300 (PVOID)ReturnValue, // ReturnValue
5302 NULL); // HistoryTable
5307 #endif // !FEATURE_PAL
5309 void TrackerAllocator::Init()
5311 void* pvFirstPage = (void*)new BYTE[TRACKER_ALLOCATOR_PAGE_SIZE];
5313 ZeroMemory(pvFirstPage, TRACKER_ALLOCATOR_PAGE_SIZE);
5315 m_pFirstPage = (Page*)pvFirstPage;
5317 _ASSERTE(NULL == m_pFirstPage->m_header.m_pNext);
5318 _ASSERTE(0 == m_pFirstPage->m_header.m_idxFirstFree);
5320 m_pCrst = new Crst(CrstException, CRST_UNSAFE_ANYMODE);
5322 EH_LOG((LL_INFO100, "TrackerAllocator::Init() succeeded..\n"));
5325 void TrackerAllocator::Terminate()
5327 Page* pPage = m_pFirstPage;
5331 Page* pDeleteMe = pPage;
5332 pPage = pPage->m_header.m_pNext;
5333 delete [] pDeleteMe;
5338 ExceptionTracker* TrackerAllocator::GetTrackerMemory()
5340 CONTRACT(ExceptionTracker*)
5345 POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
5349 _ASSERTE(NULL != m_pFirstPage);
5351 Page* pPage = m_pFirstPage;
5353 ExceptionTracker* pTracker = NULL;
5355 for (int i = 0; i < TRACKER_ALLOCATOR_MAX_OOM_SPINS; i++)
5357 { // open lock scope
5358 CrstHolder ch(m_pCrst);
5363 for (idx = 0; idx < NUM_TRACKERS_PER_PAGE; idx++)
5365 pTracker = &(pPage->m_rgTrackers[idx]);
5366 if (pTracker->m_pThread == NULL)
5372 if (idx < NUM_TRACKERS_PER_PAGE)
5378 if (NULL == pPage->m_header.m_pNext)
5380 Page* pNewPage = (Page*) new (nothrow) BYTE[TRACKER_ALLOCATOR_PAGE_SIZE];
5384 STRESS_LOG0(LF_EH, LL_INFO10, "TrackerAllocator: allocated page\n");
5385 pPage->m_header.m_pNext = pNewPage;
5386 ZeroMemory(pPage->m_header.m_pNext, TRACKER_ALLOCATOR_PAGE_SIZE);
5390 STRESS_LOG0(LF_EH, LL_WARNING, "TrackerAllocator: failed to allocate a page\n");
5395 pPage = pPage->m_header.m_pNext;
5401 Thread* pThread = GetThread();
5402 _ASSERTE(NULL != pPage);
5403 ZeroMemory(pTracker, sizeof(*pTracker));
5404 pTracker->m_pThread = pThread;
5405 EH_LOG((LL_INFO100, "TrackerAllocator: allocating tracker 0x%p, thread = 0x%p\n", pTracker, pTracker->m_pThread));
5411 // We could not allocate a new page of memory. This is a fatal error if it happens twice (nested)
5412 // on the same thread because we have only one m_OOMTracker. We will spin hoping for another thread
5413 // to give back to the pool or for the allocation to succeed.
5416 ClrSleepEx(TRACKER_ALLOCATOR_OOM_SPIN_DELAY, FALSE);
5417 STRESS_LOG1(LF_EH, LL_WARNING, "TrackerAllocator: retry #%d\n", i);
5423 void TrackerAllocator::FreeTrackerMemory(ExceptionTracker* pTracker)
5433 // mark this entry as free
5434 EH_LOG((LL_INFO100, "TrackerAllocator: freeing tracker 0x%p, thread = 0x%p\n", pTracker, pTracker->m_pThread));
5435 CONSISTENCY_CHECK(pTracker->IsValid());
5436 FastInterlockExchangePointer(&(pTracker->m_pThread), NULL);
5440 // This is Windows specific implementation as it is based upon the notion of collided unwind that is specific
5441 // to Windows 64bit.
5443 // If pContext is not NULL, then this function copies pContext to pDispatcherContext->ContextRecord. If pContext
5444 // is NULL, then this function assumes that pDispatcherContext->ContextRecord has already been fixed up. In any
5445 // case, this function then starts to update the various fields in pDispatcherContext.
5447 // In order to redirect the unwind, the OS requires us to provide a personality routine for the code at the
5448 // new context we are providing. If RtlVirtualUnwind can't determine the personality routine and using
5449 // the default managed code personality routine isn't appropriate (maybe you aren't returning to managed code)
5450 // specify pUnwindPersonalityRoutine. For instance the debugger uses this to unwind from ExceptionHijack back
5451 // to RaiseException in win32 and specifies an empty personality routine. For more details about this
5452 // see the comments in the code below.
5455 // AMD64 is more "advanced", in that the DISPATCHER_CONTEXT contains a field for the TargetIp. So we don't have
5456 // to use the control PC in pDispatcherContext->ContextRecord to indicate the target IP for the unwind. However,
5457 // this also means that pDispatcherContext->ContextRecord is expected to be consistent.
5458 // </AMD64-specific>
5460 // For more information, refer to vctools\crt\crtw32\misc\{ia64|amd64}\chandler.c for __C_specific_handler() and
5461 // nt\base\ntos\rtl\{ia64|amd64}\exdsptch.c for RtlUnwindEx().
5462 void FixupDispatcherContext(DISPATCHER_CONTEXT* pDispatcherContext, CONTEXT* pContext, LPVOID originalControlPC, PEXCEPTION_ROUTINE pUnwindPersonalityRoutine)
5466 STRESS_LOG1(LF_EH, LL_INFO10, "FDC: pContext: %p\n", pContext);
5467 CopyOSContext(pDispatcherContext->ContextRecord, pContext);
5470 pDispatcherContext->ControlPc = (UINT_PTR) GetIP(pDispatcherContext->ContextRecord);
5472 #if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
5473 // Since this routine is used to fixup contexts for async exceptions,
5474 // clear the CONTEXT_UNWOUND_TO_CALL flag since, semantically, frames
5475 // where such exceptions have happened do not have callsites. On a similar
5476 // note, also clear out the ControlPcIsUnwound field. Post discussion with
5477 // AaronGi from the kernel team, it's safe for us to have both of these
5480 // The OS will pick this up with the rest of the DispatcherContext state
5481 // when it processes collided unwind and thus, when our managed personality
5482 // routine is invoked, ExceptionTracker::InitializeCrawlFrame will adjust
5483 // ControlPC correctly.
5484 pDispatcherContext->ContextRecord->ContextFlags &= ~CONTEXT_UNWOUND_TO_CALL;
5485 pDispatcherContext->ControlPcIsUnwound = FALSE;
5487 // Also, clear out the debug-registers flag so that when this context is used by the
5488 // OS, it does not end up setting bogus access breakpoints. The kernel team will also
5489 // be fixing it at their end, in their implementation of collided unwind.
5490 pDispatcherContext->ContextRecord->ContextFlags &= ~CONTEXT_DEBUG_REGISTERS;
5493 // But keep the architecture flag set (its part of CONTEXT_DEBUG_REGISTERS)
5494 pDispatcherContext->ContextRecord->ContextFlags |= CONTEXT_ARM;
5495 #else // _TARGET_ARM64_
5496 // But keep the architecture flag set (its part of CONTEXT_DEBUG_REGISTERS)
5497 pDispatcherContext->ContextRecord->ContextFlags |= CONTEXT_ARM64;
5498 #endif // _TARGET_ARM_
5500 #endif // _TARGET_ARM_ || _TARGET_ARM64_
5502 INDEBUG(pDispatcherContext->FunctionEntry = (PT_RUNTIME_FUNCTION)INVALID_POINTER_CD);
5503 INDEBUG(pDispatcherContext->ImageBase = INVALID_POINTER_CD);
5505 pDispatcherContext->FunctionEntry = RtlLookupFunctionEntry(pDispatcherContext->ControlPc,
5506 &(pDispatcherContext->ImageBase),
5510 _ASSERTE(((PT_RUNTIME_FUNCTION)INVALID_POINTER_CD) != pDispatcherContext->FunctionEntry);
5511 _ASSERTE(INVALID_POINTER_CD != pDispatcherContext->ImageBase);
5514 // need to find the establisher frame by virtually unwinding
5516 CONTEXT tempContext;
5519 CopyOSContext(&tempContext, pDispatcherContext->ContextRecord);
5521 // RtlVirtualUnwind returns the language specific handler for the ControlPC in question
5522 // on ARM and AMD64.
5523 pDispatcherContext->LanguageHandler = RtlVirtualUnwind(
5524 NULL, // HandlerType
5525 pDispatcherContext->ImageBase,
5526 pDispatcherContext->ControlPc,
5527 pDispatcherContext->FunctionEntry,
5530 &(pDispatcherContext->EstablisherFrame),
5533 pDispatcherContext->HandlerData = NULL;
5534 pDispatcherContext->HistoryTable = NULL;
5537 // Why does the OS consider it invalid to have a NULL personality routine (or, why does
5538 // the OS assume that DispatcherContext returned from ExceptionCollidedUnwind will always
5539 // have a valid personality routine)?
5542 // We force the OS to pickup the DispatcherContext (that we fixed above) by returning
5543 // ExceptionCollidedUnwind. Per Dave Cutler, the only entity which is allowed to return
5544 // this exception disposition is the personality routine of the assembly helper which is used
5545 // to invoke the user (stack-based) personality routines. For such invocations made by the
5546 // OS assembly helper, the DispatcherContext it saves before invoking the user personality routine
5547 // will always have a valid personality routine reference and thus, when a real collided unwind happens
5548 // and this exception disposition is returned, OS exception dispatch will have a valid personality routine
5551 // By using this exception disposition to make the OS walk stacks we broke (for async exceptions), we are
5552 // simply abusing the semantic of this disposition. However, since we must use it, we should also check
5553 // that we are returning a valid personality routine reference back to the OS.
5554 if(pDispatcherContext->LanguageHandler == NULL)
5556 if (pUnwindPersonalityRoutine != NULL)
5558 pDispatcherContext->LanguageHandler = pUnwindPersonalityRoutine;
5562 // We would be here only for fixing up context for an async exception in managed code.
5563 // This implies that we should have got a personality routine returned from the call to
5564 // RtlVirtualUnwind above.
5566 // However, if the ControlPC happened to be in the prolog or epilog of a managed method,
5567 // then RtlVirtualUnwind will always return NULL. We cannot return this NULL back to the
5568 // OS as it is an invalid value which the OS does not expect (and attempting to do so will
5569 // result in the kernel exception dispatch going haywire).
5571 // We should be in jitted code
5572 TADDR adrRedirectedIP = PCODEToPINSTR(pDispatcherContext->ControlPc);
5573 _ASSERTE(ExecutionManager::IsManagedCode(adrRedirectedIP));
5576 // Set the personality routine to be returned as the one which is conventionally
5577 // invoked for exception dispatch.
5578 pDispatcherContext->LanguageHandler = (PEXCEPTION_ROUTINE)GetEEFuncEntryPoint(ProcessCLRException);
5579 STRESS_LOG1(LF_EH, LL_INFO10, "FDC: ControlPC was in prolog/epilog, so setting DC->LanguageHandler to %p\n", pDispatcherContext->LanguageHandler);
5583 _ASSERTE(pDispatcherContext->LanguageHandler != NULL);
5587 // See the comment above for the overloaded version of this function.
5588 void FixupDispatcherContext(DISPATCHER_CONTEXT* pDispatcherContext, CONTEXT* pContext, CONTEXT* pOriginalContext, PEXCEPTION_ROUTINE pUnwindPersonalityRoutine = NULL)
5590 _ASSERTE(pOriginalContext != NULL);
5591 FixupDispatcherContext(pDispatcherContext, pContext, (LPVOID)::GetIP(pOriginalContext), pUnwindPersonalityRoutine);
5595 BOOL FirstCallToHandler (
5596 DISPATCHER_CONTEXT *pDispatcherContext,
5597 CONTEXT **ppContextRecord)
5607 FaultingExceptionFrame *pFrame = GetFrameFromRedirectedStubStackFrame(pDispatcherContext);
5609 BOOL *pfFilterExecuted = pFrame->GetFilterExecutedFlag();
5610 BOOL fFilterExecuted = *pfFilterExecuted;
5612 STRESS_LOG4(LF_EH, LL_INFO10, "FirstCallToHandler: Fixing exception context for redirect stub, sp %p, establisher %p, flag %p -> %u\n",
5613 GetSP(pDispatcherContext->ContextRecord),
5614 pDispatcherContext->EstablisherFrame,
5618 *ppContextRecord = pFrame->GetExceptionContext();
5619 *pfFilterExecuted = TRUE;
5621 return !fFilterExecuted;
5625 EXTERN_C EXCEPTION_DISPOSITION
5626 HijackHandler(IN PEXCEPTION_RECORD pExceptionRecord
5627 WIN64_ARG(IN ULONG64 MemoryStackFp)
5628 NOT_WIN64_ARG(IN ULONG MemoryStackFp),
5629 IN OUT PCONTEXT pContextRecord,
5630 IN OUT PDISPATCHER_CONTEXT pDispatcherContext
5641 STRESS_LOG4(LF_EH, LL_INFO10, "HijackHandler: establisher: %p, disp->cxr: %p, sp %p, cxr @ exception: %p\n",
5642 pDispatcherContext->EstablisherFrame,
5643 pDispatcherContext->ContextRecord,
5644 GetSP(pDispatcherContext->ContextRecord),
5647 Thread* pThread = GetThread();
5648 CONTEXT *pNewContext = NULL;
5650 if (FirstCallToHandler(pDispatcherContext, &pNewContext))
5653 // We've pushed a Frame, but it is not initialized yet, so we
5654 // must not be in preemptive mode
5656 CONSISTENCY_CHECK(pThread->PreemptiveGCDisabled());
5659 // AdjustContextForThreadStop will reset the ThrowControlForThread state
5660 // on the thread, but we don't want to do that just yet. We need that
5661 // information in our personality routine, so we will reset it back to
5662 // InducedThreadStop and then clear it in our personality routine.
5664 CONSISTENCY_CHECK(IsThreadHijackedForThreadStop(pThread, pExceptionRecord));
5665 AdjustContextForThreadStop(pThread, pNewContext);
5666 pThread->SetThrowControlForThread(Thread::InducedThreadStop);
5669 FixupDispatcherContext(pDispatcherContext, pNewContext, pContextRecord);
5671 STRESS_LOG4(LF_EH, LL_INFO10, "HijackHandler: new establisher: %p, disp->cxr: %p, new ip: %p, new sp: %p\n",
5672 pDispatcherContext->EstablisherFrame,
5673 pDispatcherContext->ContextRecord,
5674 GetIP(pDispatcherContext->ContextRecord),
5675 GetSP(pDispatcherContext->ContextRecord));
5677 // Returning ExceptionCollidedUnwind will cause the OS to take our new context record
5678 // and dispatcher context and restart the exception dispatching on this call frame,
5679 // which is exactly the behavior we want in order to restore our thread's unwindability
5680 // (which was broken when we whacked the IP to get control over the thread)
5681 return ExceptionCollidedUnwind;
5685 EXTERN_C VOID FixContextForFaultingExceptionFrame (
5686 EXCEPTION_RECORD* pExceptionRecord,
5687 CONTEXT *pContextRecord);
5689 EXTERN_C EXCEPTION_DISPOSITION
5690 FixContextHandler(IN PEXCEPTION_RECORD pExceptionRecord
5691 WIN64_ARG(IN ULONG64 MemoryStackFp)
5692 NOT_WIN64_ARG(IN ULONG MemoryStackFp),
5693 IN OUT PCONTEXT pContextRecord,
5694 IN OUT PDISPATCHER_CONTEXT pDispatcherContext
5697 CONTEXT* pNewContext = NULL;
5699 if (FirstCallToHandler(pDispatcherContext, &pNewContext))
5702 // We've pushed a Frame, but it is not initialized yet, so we
5703 // must not be in preemptive mode
5705 CONSISTENCY_CHECK(GetThread()->PreemptiveGCDisabled());
5707 FixContextForFaultingExceptionFrame(pExceptionRecord, pNewContext);
5710 FixupDispatcherContext(pDispatcherContext, pNewContext, pContextRecord);
5712 // Returning ExceptionCollidedUnwind will cause the OS to take our new context record
5713 // and dispatcher context and restart the exception dispatching on this call frame,
5714 // which is exactly the behavior we want in order to restore our thread's unwindability
5715 // (which was broken when we whacked the IP to get control over the thread)
5716 return ExceptionCollidedUnwind;
5718 #endif // !FEATURE_PAL
5721 // IsSafeToUnwindFrameChain:
5723 // pThread the Thread* being unwound
5724 // MemoryStackFpForFrameChain the stack limit to unwind the Frames
5726 // FALSE if the value MemoryStackFpForFrameChain falls between a M2U transition frame
5727 // and its corresponding managed method stack pointer
5730 // If the managed method will *NOT* be unwound by the current exception
5731 // pass we have an error: with no Frame on the stack to report it, the
5732 // managed method will not be included in the next stack walk.
5733 // An example of running into this issue was DDBug 1133, where
5734 // TransparentProxyStubIA64 had a personality routine that removed a
5735 // transition frame. As a consequence the managed method did not
5736 // participate in the stack walk until the exception handler was called. At
5737 // that time the stack walking code was able to see the managed method again
5738 // but by this time all references from this managed method were stale.
5739 BOOL IsSafeToUnwindFrameChain(Thread* pThread, LPVOID MemoryStackFpForFrameChain)
5741 // Look for the last Frame to be removed that marks a managed-to-unmanaged transition
5742 Frame* pLastFrameOfInterest = FRAME_TOP;
5743 for (Frame* pf = pThread->m_pFrame; pf < MemoryStackFpForFrameChain; pf = pf->PtrNextFrame())
5745 PCODE retAddr = pf->GetReturnAddress();
5746 if (retAddr != NULL && ExecutionManager::IsManagedCode(retAddr))
5748 pLastFrameOfInterest = pf;
5752 // If there is none it's safe to remove all these Frames
5753 if (pLastFrameOfInterest == FRAME_TOP)
5758 // Otherwise "unwind" to managed method
5763 FillRegDisplay(&rd, &ctx);
5764 pLastFrameOfInterest->UpdateRegDisplay(&rd);
5766 // We're safe only if the managed method will be unwound also
5767 LPVOID managedSP = dac_cast<PTR_VOID>(GetRegdisplaySP(&rd));
5769 if (managedSP < MemoryStackFpForFrameChain)
5782 void CleanUpForSecondPass(Thread* pThread, bool fIsSO, LPVOID MemoryStackFpForFrameChain, LPVOID MemoryStackFp)
5784 WRAPPER_NO_CONTRACT;
5786 EH_LOG((LL_INFO100, "Exception is going into unmanaged code, unwinding frame chain to %p\n", MemoryStackFpForFrameChain));
5788 // On AMD64 the establisher pointer is the live stack pointer, but on
5789 // IA64 and ARM it's the caller's stack pointer. It makes no difference, since there
5790 // is no Frame anywhere in CallDescrWorker's region of stack.
5792 // First make sure that unwinding the frame chain does not remove any transition frames
5793 // that report managed methods that will not be unwound.
5794 // If this assert fires it's probably the personality routine of some assembly code that
5795 // incorrectly removed a transition frame (more details in IsSafeToUnwindFrameChain)
5796 // [Do not perform the IsSafeToUnwindFrameChain() check in the SO case, since
5797 // IsSafeToUnwindFrameChain() requires a large amount of stack space.]
5798 _ASSERTE(fIsSO || IsSafeToUnwindFrameChain(pThread, (Frame*)MemoryStackFpForFrameChain));
5800 UnwindFrameChain(pThread, (Frame*)MemoryStackFpForFrameChain);
5802 // Only pop the trackers if this is not an SO. It's not safe to pop the trackers during EH for an SO.
5803 // Instead, we rely on the END_SO_TOLERANT_CODE macro to call ClearExceptionStateAfterSO(). Of course,
5804 // we may leak in the UMThunkStubCommon() case where we don't have this macro lower on the stack
5805 // (stack grows up).
5808 ExceptionTracker::PopTrackerIfEscaping((void*)MemoryStackFp);
5814 // This is a personality routine for TheUMEntryPrestub and UMThunkStub Unix asm stubs.
5815 // An exception propagating through these stubs is an unhandled exception.
5816 // This function dumps managed stack trace and terminates the current process.
5817 EXTERN_C _Unwind_Reason_Code
5818 UnhandledExceptionHandlerUnix(
5820 IN _Unwind_Action action,
5821 IN uint64_t exceptionClass,
5822 IN struct _Unwind_Exception *exception,
5823 IN struct _Unwind_Context *context
5826 // Unhandled exception happened, so dump the managed stack trace and terminate the process
5828 DefaultCatchHandler(NULL /*pExceptionInfo*/, NULL /*Throwable*/, TRUE /*useLastThrownObject*/,
5829 TRUE /*isTerminating*/, FALSE /*isThreadBaseFIlter*/, FALSE /*sendAppDomainEvents*/);
5831 EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
5832 return _URC_FATAL_PHASE1_ERROR;
5835 #else // FEATURE_PAL
5837 EXTERN_C EXCEPTION_DISPOSITION
5838 UMThunkUnwindFrameChainHandler(IN PEXCEPTION_RECORD pExceptionRecord
5839 WIN64_ARG(IN ULONG64 MemoryStackFp)
5840 NOT_WIN64_ARG(IN ULONG MemoryStackFp),
5841 IN OUT PCONTEXT pContextRecord,
5842 IN OUT PDISPATCHER_CONTEXT pDispatcherContext
5845 Thread* pThread = GetThread();
5846 if (pThread == NULL) {
5847 return ExceptionContinueSearch;
5850 bool fIsSO = pExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW;
5852 if (IS_UNWINDING(pExceptionRecord->ExceptionFlags))
5856 if (!pThread->PreemptiveGCDisabled())
5858 pThread->DisablePreemptiveGC();
5861 CleanUpForSecondPass(pThread, fIsSO, (void*)MemoryStackFp, (void*)MemoryStackFp);
5864 // The asm stub put us into COOP mode, but we're about to scan unmanaged call frames
5865 // so unmanaged filters/handlers/etc can run and we must be in PREEMP mode for that.
5866 if (pThread->PreemptiveGCDisabled())
5870 // We don't have stack to do full-version EnablePreemptiveGC.
5871 FastInterlockAnd (&pThread->m_fPreemptiveGCDisabled, 0);
5875 pThread->EnablePreemptiveGC();
5879 return ExceptionContinueSearch;
5882 EXTERN_C EXCEPTION_DISPOSITION
5883 UMEntryPrestubUnwindFrameChainHandler(
5884 IN PEXCEPTION_RECORD pExceptionRecord
5885 WIN64_ARG(IN ULONG64 MemoryStackFp)
5886 NOT_WIN64_ARG(IN ULONG MemoryStackFp),
5887 IN OUT PCONTEXT pContextRecord,
5888 IN OUT PDISPATCHER_CONTEXT pDispatcherContext
5891 EXCEPTION_DISPOSITION disposition = UMThunkUnwindFrameChainHandler(
5901 EXTERN_C EXCEPTION_DISPOSITION
5902 UMThunkStubUnwindFrameChainHandler(
5903 IN PEXCEPTION_RECORD pExceptionRecord
5904 WIN64_ARG(IN ULONG64 MemoryStackFp)
5905 NOT_WIN64_ARG(IN ULONG MemoryStackFp),
5906 IN OUT PCONTEXT pContextRecord,
5907 IN OUT PDISPATCHER_CONTEXT pDispatcherContext
5912 // If the exception is escaping the last CLR personality routine on the stack,
5913 // then state a flag on the thread to indicate so.
5915 // We check for thread object since this function is the personality routine of the UMThunk
5916 // and we can landup here even when thread creation (within the thunk) fails.
5917 if (GetThread() != NULL)
5919 SetReversePInvokeEscapingUnhandledExceptionStatus(IS_UNWINDING(pExceptionRecord->ExceptionFlags),
5925 EXCEPTION_DISPOSITION disposition = UMThunkUnwindFrameChainHandler(
5936 // This is the personality routine setup for the assembly helper (CallDescrWorker) that calls into
5938 EXTERN_C EXCEPTION_DISPOSITION
5939 CallDescrWorkerUnwindFrameChainHandler(IN PEXCEPTION_RECORD pExceptionRecord
5940 WIN64_ARG(IN ULONG64 MemoryStackFp)
5941 NOT_WIN64_ARG(IN ULONG MemoryStackFp),
5942 IN OUT PCONTEXT pContextRecord,
5943 IN OUT PDISPATCHER_CONTEXT pDispatcherContext
5947 Thread* pThread = GetThread();
5950 if (pExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW)
5952 if (IS_UNWINDING(pExceptionRecord->ExceptionFlags))
5955 CleanUpForSecondPass(pThread, true, (void*)MemoryStackFp, (void*)MemoryStackFp);
5958 FastInterlockAnd (&pThread->m_fPreemptiveGCDisabled, 0);
5959 // We'll let the SO infrastructure handle this exception... at that point, we
5960 // know that we'll have enough stack to do it.
5961 return ExceptionContinueSearch;
5964 EXCEPTION_DISPOSITION retVal = ProcessCLRException(pExceptionRecord,
5967 pDispatcherContext);
5969 if (retVal == ExceptionContinueSearch)
5972 if (IS_UNWINDING(pExceptionRecord->ExceptionFlags))
5974 CleanUpForSecondPass(pThread, false, (void*)MemoryStackFp, (void*)MemoryStackFp);
5977 // We're scanning out from CallDescr and potentially through the EE and out to unmanaged.
5978 // So switch to preemptive mode.
5979 GCX_PREEMP_NO_DTOR();
5985 #endif // FEATURE_PAL
5987 #ifdef FEATURE_COMINTEROP
5988 EXTERN_C EXCEPTION_DISPOSITION
5989 ReverseComUnwindFrameChainHandler(IN PEXCEPTION_RECORD pExceptionRecord
5990 WIN64_ARG(IN ULONG64 MemoryStackFp)
5991 NOT_WIN64_ARG(IN ULONG MemoryStackFp),
5992 IN OUT PCONTEXT pContextRecord,
5993 IN OUT PDISPATCHER_CONTEXT pDispatcherContext
5996 if (IS_UNWINDING(pExceptionRecord->ExceptionFlags))
5998 ComMethodFrame::DoSecondPassHandlerCleanup(GetThread()->GetFrame());
6000 return ExceptionContinueSearch;
6002 #endif // FEATURE_COMINTEROP
6005 EXTERN_C EXCEPTION_DISPOSITION
6006 FixRedirectContextHandler(
6007 IN PEXCEPTION_RECORD pExceptionRecord
6008 WIN64_ARG(IN ULONG64 MemoryStackFp)
6009 NOT_WIN64_ARG(IN ULONG MemoryStackFp),
6010 IN OUT PCONTEXT pContextRecord,
6011 IN OUT PDISPATCHER_CONTEXT pDispatcherContext
6022 STRESS_LOG4(LF_EH, LL_INFO10, "FixRedirectContextHandler: sp %p, establisher %p, cxr: %p, disp cxr: %p\n",
6023 GetSP(pDispatcherContext->ContextRecord),
6024 pDispatcherContext->EstablisherFrame,
6026 pDispatcherContext->ContextRecord);
6028 CONTEXT *pRedirectedContext = GetCONTEXTFromRedirectedStubStackFrame(pDispatcherContext);
6030 FixupDispatcherContext(pDispatcherContext, pRedirectedContext, pContextRecord);
6032 // Returning ExceptionCollidedUnwind will cause the OS to take our new context record
6033 // and dispatcher context and restart the exception dispatching on this call frame,
6034 // which is exactly the behavior we want in order to restore our thread's unwindability
6035 // (which was broken when we whacked the IP to get control over the thread)
6036 return ExceptionCollidedUnwind;
6038 #endif // !FEATURE_PAL
6039 #endif // DACCESS_COMPILE
6041 void ExceptionTracker::StackRange::Reset()
6043 LIMITED_METHOD_CONTRACT;
6045 m_sfLowBound.SetMaxVal();
6046 m_sfHighBound.Clear();
6049 bool ExceptionTracker::StackRange::IsEmpty()
6051 LIMITED_METHOD_CONTRACT;
6052 return (m_sfLowBound.IsMaxVal() &&
6053 m_sfHighBound.IsNull());
6056 bool ExceptionTracker::StackRange::IsSupersededBy(StackFrame sf)
6058 LIMITED_METHOD_CONTRACT;
6059 CONSISTENCY_CHECK(IsConsistent());
6061 return (sf >= m_sfLowBound);
6064 void ExceptionTracker::StackRange::CombineWith(StackFrame sfCurrent, StackRange* pPreviousRange)
6066 LIMITED_METHOD_CONTRACT;
6068 if ((pPreviousRange->m_sfHighBound < sfCurrent) && IsEmpty())
6070 // This case comes from an unusual situation. It is possible for a new nested tracker to start its
6071 // first pass at a higher SP than any previously scanned frame in the previous "enclosing" tracker.
6072 // Typically this doesn't happen because the ProcessCLRException callback is made multiple times for
6073 // the frame where the nesting first occurs and that will ensure that the stack range of the new
6074 // nested exception is extended to contain the scan range of the previous tracker's scan. However,
6075 // if the exception dispatch calls a C++ handler (e.g. a finally) and then that handler tries to
6076 // reverse-pinvoke into the runtime, AND we trigger an exception (e.g. ThreadAbort)
6077 // before we reach another managed frame (which would have the CLR personality
6078 // routine associated with it), the first callback to ProcessCLRException for this new exception
6079 // will occur on a frame that has never been seen before by the current tracker.
6081 // So in this case, we'll see a sfCurrent that is larger than the previous tracker's high bound and
6082 // we'll have an empty scan range for the current tracker. And we'll just need to pre-init the
6083 // scanned stack range for the new tracker to the previous tracker's range. This maintains the
6084 // invariant that the scanned range for nested trackers completely cover the scanned range of their
6085 // previous tracker once they "escape" the previous tracker.
6086 STRESS_LOG3(LF_EH, LL_INFO100,
6087 "Initializing current StackRange with previous tracker's StackRange. sfCurrent: %p, prev low: %p, prev high: %p\n",
6088 sfCurrent.SP, pPreviousRange->m_sfLowBound.SP, pPreviousRange->m_sfHighBound.SP);
6090 *this = *pPreviousRange;
6095 // When the current range is empty, copy the low bound too. Otherwise a degenerate range would get
6096 // created and tests for stack frame in the stack range would always fail.
6097 // TODO: Check if we could enable it for non-PAL as well.
6100 m_sfLowBound = pPreviousRange->m_sfLowBound;
6102 #endif // FEATURE_PAL
6103 m_sfHighBound = pPreviousRange->m_sfHighBound;
6107 bool ExceptionTracker::StackRange::Contains(StackFrame sf)
6109 LIMITED_METHOD_CONTRACT;
6110 CONSISTENCY_CHECK(IsConsistent());
6112 return ((m_sfLowBound <= sf) &&
6113 (sf <= m_sfHighBound));
6116 void ExceptionTracker::StackRange::ExtendUpperBound(StackFrame sf)
6118 LIMITED_METHOD_CONTRACT;
6119 CONSISTENCY_CHECK(IsConsistent());
6120 CONSISTENCY_CHECK(sf > m_sfHighBound);
6125 void ExceptionTracker::StackRange::ExtendLowerBound(StackFrame sf)
6127 LIMITED_METHOD_CONTRACT;
6128 CONSISTENCY_CHECK(IsConsistent());
6129 CONSISTENCY_CHECK(sf < m_sfLowBound);
6134 void ExceptionTracker::StackRange::TrimLowerBound(StackFrame sf)
6136 LIMITED_METHOD_CONTRACT;
6137 CONSISTENCY_CHECK(IsConsistent());
6138 CONSISTENCY_CHECK(sf >= m_sfLowBound);
6143 StackFrame ExceptionTracker::StackRange::GetLowerBound()
6145 LIMITED_METHOD_CONTRACT;
6146 CONSISTENCY_CHECK(IsConsistent());
6148 return m_sfLowBound;
6151 StackFrame ExceptionTracker::StackRange::GetUpperBound()
6153 LIMITED_METHOD_CONTRACT;
6154 CONSISTENCY_CHECK(IsConsistent());
6156 return m_sfHighBound;
6160 bool ExceptionTracker::StackRange::IsDisjointWithAndLowerThan(StackRange* pOtherRange)
6162 CONSISTENCY_CHECK(IsConsistent());
6163 CONSISTENCY_CHECK(pOtherRange->IsConsistent());
6165 return m_sfHighBound < pOtherRange->m_sfLowBound;
6172 bool ExceptionTracker::StackRange::IsConsistent()
6174 LIMITED_METHOD_CONTRACT;
6175 if (m_sfLowBound.IsMaxVal() ||
6176 m_sfHighBound.IsNull())
6181 if (m_sfLowBound <= m_sfHighBound)
6186 LOG((LF_EH, LL_ERROR, "sp: low: %p high: %p\n", m_sfLowBound.SP, m_sfHighBound.SP));
6192 // Determine if the given StackFrame is in the stack region unwound by the specified ExceptionTracker.
6193 // This is used by the stackwalker to skip funclets. Refer to the calls to this method in StackWalkFramesEx()
6194 // for more information.
6196 // Effectively, this will make the stackwalker skip all the frames until it reaches the frame
6197 // containing the funclet. Details of the skipping logic are described in the method implementation.
6200 bool ExceptionTracker::IsInStackRegionUnwoundBySpecifiedException(CrawlFrame * pCF, PTR_ExceptionTracker pExceptionTracker)
6202 LIMITED_METHOD_CONTRACT;
6204 _ASSERTE(pCF != NULL);
6206 // The tracker must be in the second pass, and its stack range must not be empty.
6207 if ( (pExceptionTracker == NULL) ||
6208 pExceptionTracker->IsInFirstPass() ||
6209 pExceptionTracker->m_ScannedStackRange.IsEmpty())
6214 CallerStackFrame csfToCheck;
6215 if (pCF->IsFrameless())
6217 csfToCheck = CallerStackFrame::FromRegDisplay(pCF->GetRegisterSet());
6221 csfToCheck = CallerStackFrame((UINT_PTR)pCF->GetFrame());
6224 StackFrame sfLowerBound = pExceptionTracker->m_ScannedStackRange.GetLowerBound();
6225 StackFrame sfUpperBound = pExceptionTracker->m_ScannedStackRange.GetUpperBound();
6228 // Let's take an example callstack that grows from left->right:
6230 // M5 (50) -> M4 (40) -> M3 (30) -> M2 (20) -> M1 (10) ->throw
6232 // These are all managed frames, where M1 throws and the exception is caught
6233 // in M4. The numbers in the brackets are the values of the stack pointer after
6234 // the prolog is executed (or, in case of dynamic allocation, its SP after
6235 // dynamic allocation) and will be the SP at the time the callee function
6238 // When the stackwalker is asked to skip funclets during the stackwalk,
6239 // it will skip all the frames on the stack until it reaches the frame
6240 // containing the funclet after it has identified the funclet from
6241 // which the skipping of frames needs to commence.
6243 // At such a point, the exception tracker's scanned stack range's
6244 // lowerbound will correspond to the frame that had the exception
6245 // and the upper bound will correspond to the frame that had the funclet.
6246 // For scenarios like security stackwalk that may be triggered out of a
6247 // funclet (e.g. a catch block), skipping funclets and frames in this fashion
6248 // is expected to lead us to the parent frame containing the funclet as it
6249 // will contain an object of interest (e.g. security descriptor).
6251 // The check below ensures that we skip the frames from the one that
6252 // had exception to the one that is the callee of the method containing
6253 // the funclet of interest. In the example above, this would mean skipping
6256 // We use CallerSP of a given CrawlFrame to perform such a skip. On AMD64,
6257 // the first frame where CallerSP will be greater than SP of the frame
6258 // itself will be when we reach the lowest frame itself (i.e. M1). On a similar
6259 // note, the only time when CallerSP of a given CrawlFrame will be equal to the
6260 // upper bound is when we reach the callee of the frame containing the funclet.
6261 // Thus, our check for the skip range is done by the following clause:
6263 // if ((sfLowerBound < csfToCheck) && (csfToCheck <= sfUpperBound))
6265 // On ARM and ARM64, while the lower and upper bounds are populated using the Establisher
6266 // frame given by the OS during exception dispatch, they actually correspond to the
6267 // SP of the caller of a given frame, instead of being the SP of the given frame.
6268 // Thus, in the example, we will have lowerBound as 20 (corresponding to M1) and
6269 // upperBound as 50 (corresponding to M4 which contains the catch funclet).
6271 // Thus, to skip frames on ARM and ARM64 until we reach the frame containing funclet of
6272 // interest, the skipping will done by the following clause:
6274 // if ((sfLowerBound <= csfToCheck) && (csfToCheck < sfUpperBound))
6276 // The first time when CallerSP of a given CrawlFrame will be the same as lowerBound
6277 // is when we will reach the first frame to be skipped. Likewise, last frame whose
6278 // CallerSP will be less than the upperBound will be the callee of the frame
6279 // containing the funclet. When CallerSP is equal to the upperBound, we have reached
6280 // the frame containing the funclet and DO NOT want to skip it. Hence, "<"
6281 // in the 2nd part of the clause.
6283 // Remember that sfLowerBound and sfUpperBound are in the "OS format".
6284 // Refer to the comment for CallerStackFrame for more information.
6285 #ifndef STACK_RANGE_BOUNDS_ARE_CALLER_SP
6286 if ((sfLowerBound < csfToCheck) && (csfToCheck <= sfUpperBound))
6287 #else // !STACK_RANGE_BOUNDS_ARE_CALLER_SP
6288 if ((sfLowerBound <= csfToCheck) && (csfToCheck < sfUpperBound))
6289 #endif // STACK_RANGE_BOUNDS_ARE_CALLER_SP
6299 // Returns a bool indicating if the specified CrawlFrame has been unwound by the active exception.
6300 bool ExceptionTracker::IsInStackRegionUnwoundByCurrentException(CrawlFrame * pCF)
6302 LIMITED_METHOD_CONTRACT;
6304 Thread * pThread = pCF->pThread;
6305 PTR_ExceptionTracker pCurrentTracker = pThread->GetExceptionState()->GetCurrentExceptionTracker();
6306 return ExceptionTracker::IsInStackRegionUnwoundBySpecifiedException(pCF, pCurrentTracker);
6311 // Returns a bool indicating if the specified CrawlFrame has been unwound by any active (e.g. nested) exceptions.
6313 // This method uses various fields of the ExceptionTracker data structure to do its work. Since this code runs on the thread
6314 // performing the GC stackwalk, it must be ensured that these fields are not updated on another thread in parallel. Thus,
6315 // any access to the fields in question that may result in updating them should happen in COOP mode. This provides a high-level
6316 // synchronization with the GC thread since when GC stackwalk is active, attempt to enter COOP mode will result in the thread blocking
6317 // and thus, attempts to update such fields will be synchronized.
6319 // Currently, the following fields are used below:
6321 // m_ExceptionFlags, m_ScannedStackRange, m_sfCurrentEstablisherFrame, m_sfLastUnwoundEstablisherFrame,
6322 // m_pInitialExplicitFrame, m_pLimitFrame, m_pPrevNestedInfo.
6324 bool ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException(CrawlFrame * pCF)
6326 LIMITED_METHOD_CONTRACT;
6328 _ASSERTE(pCF != NULL);
6330 // Enumerate all (nested) exception trackers and see if any of them has unwound the
6331 // specified CrawlFrame.
6332 Thread * pTargetThread = pCF->pThread;
6333 PTR_ExceptionTracker pTopTracker = pTargetThread->GetExceptionState()->GetCurrentExceptionTracker();
6334 PTR_ExceptionTracker pCurrentTracker = pTopTracker;
6336 bool fHasFrameBeenUnwound = false;
6338 while (pCurrentTracker != NULL)
6340 bool fSkipCurrentTracker = false;
6342 // The tracker must be in the second pass, and its stack range must not be empty.
6343 if (pCurrentTracker->IsInFirstPass() ||
6344 pCurrentTracker->m_ScannedStackRange.IsEmpty())
6346 fSkipCurrentTracker = true;
6349 if (!fSkipCurrentTracker)
6351 CallerStackFrame csfToCheck;
6352 bool fFrameless = false;
6353 if (pCF->IsFrameless())
6355 csfToCheck = CallerStackFrame::FromRegDisplay(pCF->GetRegisterSet());
6360 csfToCheck = CallerStackFrame((UINT_PTR)pCF->GetFrame());
6363 STRESS_LOG4(LF_EH|LF_GCROOTS, LL_INFO100, "CrawlFrame (%p): Frameless: %s %s: %p\n",
6364 pCF, fFrameless ? "Yes" : "No", fFrameless ? "CallerSP" : "Address", csfToCheck.SP);
6366 StackFrame sfLowerBound = pCurrentTracker->m_ScannedStackRange.GetLowerBound();
6367 StackFrame sfUpperBound = pCurrentTracker->m_ScannedStackRange.GetUpperBound();
6368 StackFrame sfCurrentEstablisherFrame = pCurrentTracker->GetCurrentEstablisherFrame();
6369 StackFrame sfLastUnwoundEstablisherFrame = pCurrentTracker->GetLastUnwoundEstablisherFrame();
6371 STRESS_LOG4(LF_EH|LF_GCROOTS, LL_INFO100, "LowerBound/UpperBound/CurrentEstablisherFrame/LastUnwoundManagedFrame: %p/%p/%p/%p\n",
6372 sfLowerBound.SP, sfUpperBound.SP, sfCurrentEstablisherFrame.SP, sfLastUnwoundEstablisherFrame.SP);
6374 // Refer to the detailed comment in ExceptionTracker::IsInStackRegionUnwoundBySpecifiedException on the nature
6377 #ifndef STACK_RANGE_BOUNDS_ARE_CALLER_SP
6378 if ((sfLowerBound < csfToCheck) && (csfToCheck <= sfUpperBound))
6379 #else // !STACK_RANGE_BOUNDS_ARE_CALLER_SP
6380 if ((sfLowerBound <= csfToCheck) && (csfToCheck < sfUpperBound))
6381 #endif // STACK_RANGE_BOUNDS_ARE_CALLER_SP
6383 fHasFrameBeenUnwound = true;
6388 // The frame in question was not found to be covered by the scanned stack range of the exception tracker.
6389 // If the frame is managed, then it is possible that it forms the upper bound of the scanned stack range.
6391 // The scanned stack range is updated by our personality routine once ExceptionTracker::ProcessOSExceptionNotification is invoked.
6392 // However, it is possible that we have unwound a frame and returned back to the OS (in preemptive mode) and:
6394 // 1) Either our personality routine has been invoked for the subsequent upstack managed frame but it has not yet got a chance to update
6395 // the scanned stack range, OR
6396 // 2) We have simply returned to the kernel exception dispatch and yet to be invoked for a subsequent frame.
6398 // In such a window, if we have been asked to check if the frame forming the upper bound of the scanned stack range has been unwound, or not,
6399 // then do the needful validations.
6401 // This is applicable to managed frames only.
6404 #ifndef STACK_RANGE_BOUNDS_ARE_CALLER_SP
6405 // On X64, if the SP of the managed frame indicates that the frame is forming the upper bound,
6408 // For case (1) above, sfCurrentEstablisherFrame will be the same as the callerSP of the managed frame.
6409 // For case (2) above, sfLastUnwoundEstablisherFrame would be the same as the managed frame's SP (or upper bound)
6411 // For these scenarios, the frame is considered unwound.
6413 // For most cases which satisfy above condition GetRegdisplaySP(pCF->GetRegisterSet()) will be equal to sfUpperBound.SP.
6414 // However, frames where Sp is modified after prolog ( eg. localloc) this might not be the case. For those scenarios,
6415 // we need to check if sfUpperBound.SP is in between GetRegdisplaySP(pCF->GetRegisterSet()) & callerSp.
6416 if (GetRegdisplaySP(pCF->GetRegisterSet()) <= sfUpperBound.SP && sfUpperBound < csfToCheck)
6418 if (csfToCheck == sfCurrentEstablisherFrame)
6420 fHasFrameBeenUnwound = true;
6423 else if (sfUpperBound == sfLastUnwoundEstablisherFrame)
6425 fHasFrameBeenUnwound = true;
6429 #else // !STACK_RANGE_BOUNDS_ARE_CALLER_SP
6430 // On ARM, if the callerSP of the managed frame is the same as upper bound, then:
6432 // For case (1), sfCurrentEstablisherFrame will be above the callerSP of the managed frame (since EstablisherFrame is the caller SP for a given frame on ARM)
6433 // For case (2), upper bound will be the same as LastUnwoundEstablisherFrame.
6435 // For these scenarios, the frame is considered unwound.
6436 if (sfUpperBound == csfToCheck)
6438 if (csfToCheck < sfCurrentEstablisherFrame)
6440 fHasFrameBeenUnwound = true;
6443 else if (sfLastUnwoundEstablisherFrame == sfUpperBound)
6445 fHasFrameBeenUnwound = true;
6449 #endif // STACK_RANGE_BOUNDS_ARE_CALLER_SP
6452 // The frame in question does not appear in the current tracker's scanned stack range (of managed frames).
6453 // If the frame is an explicit frame, then check if it equal to (or greater) than the initial explicit frame
6454 // of the tracker. We can do this equality comparison because explicit frames are stack allocated.
6456 // Do keep in mind that InitialExplicitFrame is only set in the 2nd (unwind) pass, which works
6457 // fine for the purpose of this method since it operates on exception trackers in the second pass only.
6460 PTR_Frame pInitialExplicitFrame = pCurrentTracker->GetInitialExplicitFrame();
6461 PTR_Frame pLimitFrame = pCurrentTracker->GetLimitFrame();
6463 #if !defined(DACCESS_COMPILE)
6464 STRESS_LOG2(LF_EH|LF_GCROOTS, LL_INFO100, "InitialExplicitFrame: %p, LimitFrame: %p\n", pInitialExplicitFrame, pLimitFrame);
6465 #endif // !defined(DACCESS_COMPILE)
6467 // Ideally, we would like to perform a comparison check to determine if the
6468 // frame has been unwound. This, however, is based upon the premise that
6469 // each explicit frame that is added to the frame chain is at a lower
6470 // address than this predecessor.
6472 // This works for frames across function calls but if we have multiple
6473 // explicit frames in the same function, then the compiler is free to
6474 // assign an address it deems fit. Thus, its totally possible for a
6475 // frame at the head of the frame chain to be at a higher address than
6476 // its predecessor. This has been observed to be true with VC++ compiler
6477 // in the CLR ret build.
6479 // To address this, we loop starting from the InitialExplicitFrame until we reach
6480 // the LimitFrame. Since all frames starting from the InitialExplicitFrame, and prior
6481 // to the LimitFrame, have been unwound, we break out of the loop if we find
6482 // the frame we are looking for, setting a flag indicating that the frame in question
6485 /*if ((sfInitialExplicitFrame <= csfToCheck) && (csfToCheck < sfLimitFrame))
6487 // The explicit frame falls in the range of explicit frames unwound by this tracker.
6488 fHasFrameBeenUnwound = true;
6492 // The pInitialExplicitFrame can be NULL on Unix right after we've unwound a sequence
6493 // of native frames in the second pass of exception unwinding, since the pInitialExplicitFrame
6494 // is cleared to make sure that it doesn't point to a frame that was destroyed during the
6495 // native frames unwinding. At that point, the csfToCheck could not have been unwound,
6496 // so we don't need to do any check.
6497 if (pInitialExplicitFrame != NULL)
6499 PTR_Frame pFrameToCheck = (PTR_Frame)csfToCheck.SP;
6500 PTR_Frame pCurrentFrame = pInitialExplicitFrame;
6503 while((pCurrentFrame != FRAME_TOP) && (pCurrentFrame != pLimitFrame))
6505 if (pCurrentFrame == pFrameToCheck)
6507 fHasFrameBeenUnwound = true;
6511 pCurrentFrame = pCurrentFrame->PtrNextFrame();
6515 if (fHasFrameBeenUnwound == true)
6523 // Move to the next (previous) tracker
6524 pCurrentTracker = pCurrentTracker->GetPreviousExceptionTracker();
6527 if (fHasFrameBeenUnwound)
6528 STRESS_LOG0(LF_EH|LF_GCROOTS, LL_INFO100, "Has already been unwound\n");
6530 return fHasFrameBeenUnwound;
6533 //---------------------------------------------------------------------------------------
6535 // Given the CrawlFrame of the current frame, return a StackFrame representing the current frame.
6536 // This StackFrame should only be used in a check to see if the current frame is the parent method frame
6537 // of a particular funclet. Don't use the returned StackFrame in any other way except to pass it back to
6538 // ExceptionTracker::IsUnwoundToTargetParentFrame(). The comparison logic is very platform-dependent.
6541 // pCF - the CrawlFrame for the current frame
6544 // Return a StackFrame for parent frame check
6547 // Don't use the returned StackFrame in any other way.
6551 StackFrame ExceptionTracker::GetStackFrameForParentCheck(CrawlFrame * pCF)
6553 WRAPPER_NO_CONTRACT;
6555 StackFrame sfResult;
6557 // Returns the CrawlFrame's caller's SP - this is used to determine if we have
6558 // reached the intended CrawlFrame in question (or not).
6560 // sfParent is returned by the EH subsystem, which uses the OS format, i.e. the initial SP before
6561 // any dynamic stack allocation. The stackwalker uses the current SP, i.e. the SP after all
6562 // dynamic stack allocations. Thus, we cannot do an equality check. Instead, we get the
6563 // CallerStackFrame, which is the caller SP.
6564 sfResult = (StackFrame)CallerStackFrame::FromRegDisplay(pCF->GetRegisterSet());
6569 //---------------------------------------------------------------------------------------
6571 // Given the StackFrame of a parent method frame, determine if we have unwound to it during stackwalking yet.
6572 // The StackFrame should be the return value of one of the FindParentStackFrameFor*() functions.
6573 // Refer to the comment for UnwindStackFrame for more information.
6576 // pCF - the CrawlFrame of the current frame
6577 // sfParent - the StackFrame of the target parent method frame,
6578 // returned by one of the FindParentStackFrameFor*() functions
6581 // whether we have unwound to the target parent method frame
6585 bool ExceptionTracker::IsUnwoundToTargetParentFrame(CrawlFrame * pCF, StackFrame sfParent)
6592 PRECONDITION( CheckPointer(pCF, NULL_NOT_OK) );
6593 PRECONDITION( pCF->IsFrameless() );
6594 PRECONDITION( pCF->GetRegisterSet()->IsCallerContextValid || pCF->GetRegisterSet()->IsCallerSPValid );
6598 StackFrame sfToCheck = GetStackFrameForParentCheck(pCF);
6599 return IsUnwoundToTargetParentFrame(sfToCheck, sfParent);
6603 bool ExceptionTracker::IsUnwoundToTargetParentFrame(StackFrame sfToCheck, StackFrame sfParent)
6605 LIMITED_METHOD_CONTRACT;
6607 return (sfParent == sfToCheck);
6610 // Given the CrawlFrame for a funclet frame, return the frame pointer of the enclosing funclet frame.
6611 // For filter funclet frames and normal method frames, this function returns a NULL StackFrame.
6614 // It is not valid to call this function on an arbitrary funclet. You have to be doing a full stackwalk from
6615 // the leaf frame and skipping method frames as indicated by the return value of this function. This function
6616 // relies on the ExceptionTrackers, which are collapsed in the second pass when a nested exception escapes.
6617 // When this happens, we'll lose information on the funclet represented by the collapsed tracker.
6621 // StackFrame.IsNull() - no skipping is necessary
6622 // StackFrame.IsMaxVal() - skip one frame and then ask again
6623 // Anything else - skip to the method frame indicated by the return value and ask again
6626 StackFrame ExceptionTracker::FindParentStackFrameForStackWalk(CrawlFrame* pCF, bool fForGCReporting /*= false */)
6628 WRAPPER_NO_CONTRACT;
6630 // We should never skip filter funclets. However, if we are stackwalking for GC reference
6631 // reporting, then we need to get the stackframe of the parent frame (where the filter was
6632 // invoked from) so that when we reach it, we can indicate that the filter has already
6633 // performed the reporting.
6635 // Thus, for GC reporting purposes, get filter's parent frame.
6636 if (pCF->IsFilterFunclet() && (!fForGCReporting))
6638 return StackFrame();
6642 return FindParentStackFrameHelper(pCF, NULL, NULL, NULL, fForGCReporting);
6646 // Given the CrawlFrame for a filter funclet frame, return the frame pointer of the parent method frame.
6647 // It also returns the relative offset and the caller SP of the parent method frame.
6650 // The same warning for FindParentStackFrameForStackWalk() also applies here. Moreoever, although
6651 // this function seems to be more convenient, it may potentially trigger a full stackwalk! Do not
6652 // call this unless you know absolutely what you are doing. In most cases FindParentStackFrameForStackWalk()
6653 // is what you need.
6657 // StackFrame.IsNull() - no skipping is necessary
6658 // Anything else - the StackFrame of the parent method frame
6661 StackFrame ExceptionTracker::FindParentStackFrameEx(CrawlFrame* pCF,
6662 DWORD* pParentOffset,
6663 UINT_PTR* pParentCallerSP)
6670 PRECONDITION( pCF != NULL );
6671 PRECONDITION( pCF->IsFilterFunclet() );
6675 bool fRealParent = false;
6676 StackFrame sfResult = ExceptionTracker::FindParentStackFrameHelper(pCF, &fRealParent, pParentOffset, pParentCallerSP);
6680 // If the enclosing method is the parent method, then we are done.
6685 // Otherwise we need to do a full stackwalk to find the parent method frame.
6686 // This should only happen if we are calling a filter inside a funclet.
6687 return ExceptionTracker::RareFindParentStackFrame(pCF, pParentOffset, pParentCallerSP);
6692 StackFrame ExceptionTracker::GetCallerSPOfParentOfNonExceptionallyInvokedFunclet(CrawlFrame *pCF)
6699 PRECONDITION(pCF != NULL);
6700 PRECONDITION(pCF->IsFunclet() && (!pCF->IsFilterFunclet()));
6704 PREGDISPLAY pRD = pCF->GetRegisterSet();
6706 // Ensure that the caller Context is valid.
6707 _ASSERTE(pRD->IsCallerContextValid);
6709 // Make a copy of the caller context
6710 T_CONTEXT tempContext;
6711 CopyOSContext(&tempContext, pRD->pCallerContext);
6713 // Now unwind it to get the context of the caller's caller.
6714 EECodeInfo codeInfo(dac_cast<PCODE>(GetIP(pRD->pCallerContext)));
6715 Thread::VirtualUnwindCallFrame(&tempContext, NULL, &codeInfo);
6717 StackFrame sfRetVal = StackFrame((UINT_PTR)(GetSP(&tempContext)));
6718 _ASSERTE(!sfRetVal.IsNull() && !sfRetVal.IsMaxVal());
6724 StackFrame ExceptionTracker::FindParentStackFrameHelper(CrawlFrame* pCF,
6726 DWORD* pParentOffset,
6727 UINT_PTR* pParentCallerSP,
6728 bool fForGCReporting /* = false */)
6735 PRECONDITION( pCF != NULL );
6736 PRECONDITION( pCF->IsFunclet() );
6737 PRECONDITION( CheckPointer(pfRealParent, NULL_OK) );
6738 PRECONDITION( CheckPointer(pParentOffset, NULL_OK) );
6739 PRECONDITION( CheckPointer(pParentCallerSP, NULL_OK) );
6743 StackFrame sfResult;
6744 REGDISPLAY* pRegDisplay = pCF->GetRegisterSet();
6746 // At this point, we need a valid caller SP and the CallerStackFrame::FromRegDisplay
6747 // asserts that the RegDisplay contains one.
6748 CallerStackFrame csfCurrent = CallerStackFrame::FromRegDisplay(pRegDisplay);
6749 ExceptionTracker *pCurrentTracker = NULL;
6750 bool fIsFilterFunclet = pCF->IsFilterFunclet();
6752 // We can't do this on an unmanaged thread.
6753 Thread* pThread = pCF->pThread;
6754 if (pThread == NULL)
6756 _ASSERTE(!"FindParentStackFrame() called on an unmanaged thread");
6760 // Check for out-of-line finally funclets. Filter funclets can't be out-of-line.
6761 if (!fIsFilterFunclet)
6763 if (pRegDisplay->IsCallerContextValid)
6765 PCODE callerIP = dac_cast<PCODE>(GetIP(pRegDisplay->pCallerContext));
6766 BOOL fIsCallerInVM = FALSE;
6768 // Check if the caller IP is in mscorwks. If it is not, then it is an out-of-line finally.
6769 // Normally, the caller of a finally is ExceptionTracker::CallHandler().
6771 fIsCallerInVM = !ExecutionManager::IsManagedCode(callerIP);
6773 #if defined(DACCESS_COMPILE)
6774 HMODULE_TGT hEE = DacGlobalBase();
6775 #else // !DACCESS_COMPILE
6776 HMODULE_TGT hEE = g_pMSCorEE;
6777 #endif // !DACCESS_COMPILE
6778 fIsCallerInVM = IsIPInModule(hEE, callerIP);
6779 #endif // FEATURE_PAL
6783 if (!fForGCReporting)
6785 sfResult.SetMaxVal();
6790 // We have run into a non-exceptionally invoked finally funclet (aka out-of-line finally funclet).
6791 // Since these funclets are invoked from JITted code, we will not find their EnclosingClauseCallerSP
6792 // in an exception tracker as one does not exist (remember, these funclets are invoked "non"-exceptionally).
6794 // At this point, the caller context is that of the parent frame of the funclet. All we need is the CallerSP
6795 // of that parent. We leverage a helper function that will perform an unwind against the caller context
6796 // and return us the SP (of the caller of the funclet's parent).
6797 StackFrame sfCallerSPOfFuncletParent = ExceptionTracker::GetCallerSPOfParentOfNonExceptionallyInvokedFunclet(pCF);
6798 return sfCallerSPOfFuncletParent;
6804 for (pCurrentTracker = pThread->GetExceptionState()->m_pCurrentTracker;
6805 pCurrentTracker != NULL;
6806 pCurrentTracker = pCurrentTracker->m_pPrevNestedInfo)
6808 // Check if the tracker has just been created.
6809 if (pCurrentTracker->m_ScannedStackRange.IsEmpty())
6814 // Since the current frame is a non-filter funclet, determine if its caller is the same one
6815 // as was saved against the exception tracker before the funclet was invoked in ExceptionTracker::CallHandler.
6816 CallerStackFrame csfFunclet = pCurrentTracker->m_EHClauseInfo.GetCallerStackFrameForEHClause();
6817 if (csfCurrent == csfFunclet)
6819 // The EnclosingClauseCallerSP is initialized in ExceptionTracker::ProcessManagedCallFrame, just before
6820 // invoking the funclets. Basically, we are using the SP of the caller of the frame containing the funclet
6821 // to determine if we have reached the frame containing the funclet.
6822 EnclosingClauseInfo srcEnclosingClause = (fForGCReporting) ? pCurrentTracker->m_EnclosingClauseInfoForGCReporting
6823 : pCurrentTracker->m_EnclosingClauseInfo;
6824 sfResult = (StackFrame)(CallerStackFrame(srcEnclosingClause.GetEnclosingClauseCallerSP()));
6826 // Check whether the tracker has called any funclet yet.
6827 if (sfResult.IsNull())
6832 // Set the relevant information.
6833 if (pfRealParent != NULL)
6835 *pfRealParent = !srcEnclosingClause.EnclosingClauseIsFunclet();
6837 if (pParentOffset != NULL)
6839 *pParentOffset = srcEnclosingClause.GetEnclosingClauseOffset();
6841 if (pParentCallerSP != NULL)
6843 *pParentCallerSP = srcEnclosingClause.GetEnclosingClauseCallerSP();
6848 // Check if this tracker was collapsed with another tracker and if caller of funclet clause for collapsed exception tracker matches.
6849 else if (fForGCReporting && !(pCurrentTracker->m_csfEHClauseOfCollapsedTracker.IsNull()) && csfCurrent == pCurrentTracker->m_csfEHClauseOfCollapsedTracker)
6851 EnclosingClauseInfo srcEnclosingClause = pCurrentTracker->m_EnclosingClauseInfoOfCollapsedTracker;
6852 sfResult = (StackFrame)(CallerStackFrame(srcEnclosingClause.GetEnclosingClauseCallerSP()));
6854 _ASSERTE(!sfResult.IsNull());
6863 STRESS_LOG3(LF_EH|LF_GCROOTS, LL_INFO100, "Returning 0x%p as the parent stack frame for %s 0x%p\n",
6864 sfResult.SP, fIsFilterFunclet ? "filter funclet" : "funclet", csfCurrent.SP);
6869 struct RareFindParentStackFrameCallbackState
6871 StackFrame m_sfTarget;
6872 StackFrame m_sfParent;
6873 bool m_fFoundTarget;
6874 DWORD m_dwParentOffset;
6875 UINT_PTR m_uParentCallerSP;
6878 // This is the callback for the stackwalk to get the parent stack frame for a filter funclet.
6881 StackWalkAction ExceptionTracker::RareFindParentStackFrameCallback(CrawlFrame* pCF, LPVOID pData)
6891 RareFindParentStackFrameCallbackState* pState = (RareFindParentStackFrameCallbackState*)pData;
6893 // In all cases, we don't care about explicit frame.
6894 if (!pCF->IsFrameless())
6896 return SWA_CONTINUE;
6899 REGDISPLAY* pRegDisplay = pCF->GetRegisterSet();
6900 StackFrame sfCurrent = StackFrame::FromRegDisplay(pRegDisplay);
6902 // Check if we have reached the target already.
6903 if (!pState->m_fFoundTarget)
6905 if (sfCurrent != pState->m_sfTarget)
6907 return SWA_CONTINUE;
6910 pState->m_fFoundTarget = true;
6913 // We hae reached the target, now do the normal frames skipping.
6914 if (!pState->m_sfParent.IsNull())
6916 if (pState->m_sfParent.IsMaxVal() || IsUnwoundToTargetParentFrame(pCF, pState->m_sfParent))
6918 // We have reached the specified method frame to skip to.
6919 // Now clear the flag and ask again.
6920 pState->m_sfParent.Clear();
6924 if (pState->m_sfParent.IsNull() && pCF->IsFunclet())
6926 pState->m_sfParent = ExceptionTracker::FindParentStackFrameHelper(pCF, NULL, NULL, NULL);
6929 // If we still need to skip, then continue the stackwalk.
6930 if (!pState->m_sfParent.IsNull())
6932 return SWA_CONTINUE;
6935 // At this point, we are done.
6936 pState->m_sfParent = ExceptionTracker::GetStackFrameForParentCheck(pCF);
6937 pState->m_dwParentOffset = pCF->GetRelOffset();
6939 _ASSERTE(pRegDisplay->IsCallerContextValid);
6940 pState->m_uParentCallerSP = GetSP(pRegDisplay->pCallerContext);
6946 StackFrame ExceptionTracker::RareFindParentStackFrame(CrawlFrame* pCF,
6947 DWORD* pParentOffset,
6948 UINT_PTR* pParentCallerSP)
6955 PRECONDITION( pCF != NULL );
6956 PRECONDITION( pCF->IsFunclet() );
6957 PRECONDITION( CheckPointer(pParentOffset, NULL_OK) );
6958 PRECONDITION( CheckPointer(pParentCallerSP, NULL_OK) );
6962 Thread* pThread = pCF->pThread;
6964 RareFindParentStackFrameCallbackState state;
6965 state.m_sfParent.Clear();
6966 state.m_sfTarget = StackFrame::FromRegDisplay(pCF->GetRegisterSet());
6967 state.m_fFoundTarget = false;
6969 PTR_Frame pFrame = pCF->pFrame;
6972 CopyRegDisplay((const PREGDISPLAY)pCF->GetRegisterSet(), &rd, &ctx);
6974 pThread->StackWalkFramesEx(&rd, &ExceptionTracker::RareFindParentStackFrameCallback, &state, 0, pFrame);
6976 if (pParentOffset != NULL)
6978 *pParentOffset = state.m_dwParentOffset;
6980 if (pParentCallerSP != NULL)
6982 *pParentCallerSP = state.m_uParentCallerSP;
6984 return state.m_sfParent;
6987 ExceptionTracker::StackRange::StackRange()
6989 WRAPPER_NO_CONTRACT;
6991 #ifndef DACCESS_COMPILE
6993 #endif // DACCESS_COMPILE
6996 ExceptionTracker::EnclosingClauseInfo::EnclosingClauseInfo()
6998 LIMITED_METHOD_CONTRACT;
7000 m_fEnclosingClauseIsFunclet = false;
7001 m_dwEnclosingClauseOffset = 0;
7002 m_uEnclosingClauseCallerSP = 0;
7005 ExceptionTracker::EnclosingClauseInfo::EnclosingClauseInfo(bool fEnclosingClauseIsFunclet,
7006 DWORD dwEnclosingClauseOffset,
7007 UINT_PTR uEnclosingClauseCallerSP)
7009 LIMITED_METHOD_CONTRACT;
7011 m_fEnclosingClauseIsFunclet = fEnclosingClauseIsFunclet;
7012 m_dwEnclosingClauseOffset = dwEnclosingClauseOffset;
7013 m_uEnclosingClauseCallerSP = uEnclosingClauseCallerSP;
7016 bool ExceptionTracker::EnclosingClauseInfo::EnclosingClauseIsFunclet()
7018 LIMITED_METHOD_CONTRACT;
7019 return m_fEnclosingClauseIsFunclet;
7022 DWORD ExceptionTracker::EnclosingClauseInfo::GetEnclosingClauseOffset()
7024 LIMITED_METHOD_CONTRACT;
7025 return m_dwEnclosingClauseOffset;
7028 UINT_PTR ExceptionTracker::EnclosingClauseInfo::GetEnclosingClauseCallerSP()
7030 LIMITED_METHOD_CONTRACT;
7031 return m_uEnclosingClauseCallerSP;
7034 void ExceptionTracker::EnclosingClauseInfo::SetEnclosingClauseCallerSP(UINT_PTR callerSP)
7036 LIMITED_METHOD_CONTRACT;
7037 m_uEnclosingClauseCallerSP = callerSP;
7040 bool ExceptionTracker::EnclosingClauseInfo::operator==(const EnclosingClauseInfo & rhs)
7042 LIMITED_METHOD_CONTRACT;
7045 return ((this->m_fEnclosingClauseIsFunclet == rhs.m_fEnclosingClauseIsFunclet) &&
7046 (this->m_dwEnclosingClauseOffset == rhs.m_dwEnclosingClauseOffset) &&
7047 (this->m_uEnclosingClauseCallerSP == rhs.m_uEnclosingClauseCallerSP));
7050 void ExceptionTracker::ReleaseResources()
7052 #ifndef DACCESS_COMPILE
7055 if (!CLRException::IsPreallocatedExceptionHandle(m_hThrowable))
7057 DestroyHandle(m_hThrowable);
7059 m_hThrowable = NULL;
7061 m_StackTraceInfo.FreeStackTrace();
7064 // Clear any held Watson Bucketing details
7065 GetWatsonBucketTracker()->ClearWatsonBucketDetails();
7066 #else // !FEATURE_PAL
7067 if (m_fOwnsExceptionPointers)
7069 PAL_FreeExceptionRecords(m_ptrs.ExceptionRecord, m_ptrs.ContextRecord);
7070 m_fOwnsExceptionPointers = FALSE;
7072 #endif // !FEATURE_PAL
7073 #endif // DACCESS_COMPILE
7076 void ExceptionTracker::SetEnclosingClauseInfo(bool fEnclosingClauseIsFunclet,
7077 DWORD dwEnclosingClauseOffset,
7078 UINT_PTR uEnclosingClauseCallerSP)
7080 // Preserve the details of the current frame for GC reporting before
7081 // we apply the nested exception logic below.
7082 this->m_EnclosingClauseInfoForGCReporting = EnclosingClauseInfo(fEnclosingClauseIsFunclet,
7083 dwEnclosingClauseOffset,
7084 uEnclosingClauseCallerSP);
7085 if (this->m_pPrevNestedInfo != NULL)
7087 PTR_ExceptionTracker pPrevTracker = this->m_pPrevNestedInfo;
7088 CallerStackFrame csfPrevEHClause = pPrevTracker->m_EHClauseInfo.GetCallerStackFrameForEHClause();
7090 // Just propagate the information if this is a nested exception.
7091 if (csfPrevEHClause.SP == uEnclosingClauseCallerSP)
7093 this->m_EnclosingClauseInfo = pPrevTracker->m_EnclosingClauseInfo;
7098 this->m_EnclosingClauseInfo = EnclosingClauseInfo(fEnclosingClauseIsFunclet,
7099 dwEnclosingClauseOffset,
7100 uEnclosingClauseCallerSP);
7104 #ifdef DACCESS_COMPILE
7105 void ExceptionTracker::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
7107 // ExInfo is embedded so don't enum 'this'.
7108 OBJECTHANDLE_EnumMemoryRegions(m_hThrowable);
7109 m_ptrs.ExceptionRecord.EnumMem();
7110 m_ptrs.ContextRecord.EnumMem();
7112 #endif // DACCESS_COMPILE
7114 #ifndef DACCESS_COMPILE
7115 // This is a thin wrapper around ResetThreadAbortState. Its primarily used to
7116 // instantiate CrawlFrame, when required, for walking the stack on IA64.
7118 // The "when required" part are the set of conditions checked prior to the call to
7119 // this method in ExceptionTracker::ProcessOSExceptionNotification (and asserted in
7120 // ResetThreadabortState).
7122 // Also, since CrawlFrame ctor is protected, it can only be instantiated by friend
7123 // types (which ExceptionTracker is).
7126 void ExceptionTracker::ResetThreadAbortStatus(PTR_Thread pThread, CrawlFrame *pCf, StackFrame sfCurrentStackFrame)
7133 PRECONDITION(pThread != NULL);
7134 PRECONDITION(pCf != NULL);
7135 PRECONDITION(!sfCurrentStackFrame.IsNull());
7139 if (pThread->IsAbortRequested())
7141 ResetThreadAbortState(pThread, pCf, sfCurrentStackFrame);
7144 #endif //!DACCESS_COMPILE
7146 #endif // WIN64EXCEPTIONS