1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
13 Implementation of MACH exception API functions.
17 #include "pal/dbgmsg.h"
18 SET_DEFAULT_DEBUG_CHANNEL(EXCEPT); // some headers have code with asserts, so do this first
20 #include "pal/thread.hpp"
21 #include "pal/seh.hpp"
22 #include "pal/palinternal.h"
23 #if HAVE_MACH_EXCEPTIONS
24 #include "machexception.h"
25 #include "pal/critsect.h"
26 #include "pal/debug.h"
28 #include "pal/utils.h"
29 #include "pal/context.h"
30 #include "pal/malloc.hpp"
31 #include "pal/process.h"
32 #include "pal/virtual.h"
33 #include "pal/map.hpp"
34 #include "pal/environ.h"
36 #include "machmessage.h"
43 #include <mach-o/loader.h>
45 using namespace CorUnix;
47 // The port we use to handle exceptions and to set the thread context
48 mach_port_t s_ExceptionPort;
50 static BOOL s_DebugInitialized = FALSE;
52 static DWORD s_PalInitializeFlags = 0;
54 static const char * PAL_MACH_EXCEPTION_MODE = "PAL_MachExceptionMode";
56 // This struct is used to track the threads that need to have an exception forwarded
57 // to the next thread level port in the chain (if exists). An entry is added by the
58 // faulting sending a special message to the exception thread which saves it on an
59 // list that is searched when the restarted exception notification is received again.
60 struct ForwardedException
62 ForwardedException *m_next;
64 exception_type_t ExceptionType;
65 CPalThread *PalThread;
68 // The singly linked list and enumerator for the ForwardException struct
69 struct ForwardedExceptionList
72 ForwardedException *m_head;
73 ForwardedException *m_previous;
76 ForwardedException *Current;
78 ForwardedExceptionList()
92 return Current == NULL;
98 Current = Current->m_next;
101 void Add(ForwardedException *item)
103 item->m_next = m_head;
109 if (m_previous == NULL)
111 m_head = Current->m_next;
115 m_previous->m_next = Current->m_next;
124 enum MachExceptionMode
126 // special value to indicate we've not initialized yet
127 MachException_Uninitialized = -1,
129 // These can be combined with bitwise OR to incrementally turn off
130 // functionality for diagnostics purposes.
132 // In practice, the following values are probably useful:
133 // 1: Don't turn illegal instructions into SEH exceptions.
134 // On Intel, stack misalignment usually shows up as an
135 // illegal instruction. PAL client code shouldn't
136 // expect to see any of these, so this option should
137 // always be safe to set.
138 // 2: Don't listen for breakpoint exceptions. This makes an
139 // SEH-based debugger (i.e., managed debugger) unusable,
140 // but you may need this option if you find that native
141 // breakpoints you set in PAL-dependent code don't work
142 // (causing hangs or crashes in the native debugger).
143 // 3: Combination of the above.
144 // This is the typical setting for development
145 // (unless you're working on the managed debugger).
146 // 7: In addition to the above, don't turn bad accesses and
147 // arithmetic exceptions into SEH.
148 // This is the typical setting for stress.
149 MachException_SuppressIllegal = 1,
150 MachException_SuppressDebugging = 2,
151 MachException_SuppressManaged = 4,
153 // Default value to use if environment variable not set.
154 MachException_Default = 0,
161 Returns the mach exception mask for the exceptions to hook for a thread.
170 static MachExceptionMode exMode = MachException_Uninitialized;
172 if (exMode == MachException_Uninitialized)
174 exMode = MachException_Default;
176 char* exceptionSettings = EnvironGetenv(PAL_MACH_EXCEPTION_MODE);
177 if (exceptionSettings)
179 exMode = (MachExceptionMode)atoi(exceptionSettings);
180 free(exceptionSettings);
184 if (PAL_IsDebuggerPresent())
186 exMode = MachException_SuppressDebugging;
191 exception_mask_t machExceptionMask = 0;
192 if (!(exMode & MachException_SuppressIllegal))
194 machExceptionMask |= PAL_EXC_ILLEGAL_MASK;
196 if (!(exMode & MachException_SuppressDebugging) && (s_PalInitializeFlags & PAL_INITIALIZE_DEBUGGER_EXCEPTIONS))
198 #ifdef FEATURE_PAL_SXS
199 // Always hook exception ports for breakpoint exceptions.
200 // The reason is that we don't know when a managed debugger
201 // will attach, so we have to be prepared. We don't want
202 // to later go through the thread list and hook exception
203 // ports for exactly those threads that currently are in
205 machExceptionMask |= PAL_EXC_DEBUGGING_MASK;
206 #else // FEATURE_PAL_SXS
207 if (s_DebugInitialized)
209 machExceptionMask |= PAL_EXC_DEBUGGING_MASK;
211 #endif // FEATURE_PAL_SXS
213 if (!(exMode & MachException_SuppressManaged))
215 machExceptionMask |= PAL_EXC_MANAGED_MASK;
218 return machExceptionMask;
221 #ifdef FEATURE_PAL_SXS
225 CPalThread::EnableMachExceptions
227 Hook Mach exceptions, i.e., call thread_swap_exception_ports
228 to replace the thread's current exception ports with our own.
229 The previously active exception ports are saved. Called when
230 this thread enters a region of code that depends on this PAL.
233 ERROR_SUCCESS, if enabling succeeded
234 an error code, otherwise
236 PAL_ERROR CorUnix::CPalThread::EnableMachExceptions()
238 TRACE("%08X: Enter()\n", (unsigned int)(size_t)this);
240 exception_mask_t machExceptionMask = GetExceptionMask();
241 if (machExceptionMask != 0)
244 // verify that the arrays we've allocated to hold saved exception ports
245 // are the right size.
246 exception_mask_t countBits = PAL_EXC_ALL_MASK;
247 countBits = ((countBits & 0xAAAAAAAA) >> 1) + (countBits & 0x55555555);
248 countBits = ((countBits & 0xCCCCCCCC) >> 2) + (countBits & 0x33333333);
249 countBits = ((countBits & 0xF0F0F0F0) >> 4) + (countBits & 0x0F0F0F0F);
250 countBits = ((countBits & 0xFF00FF00) >> 8) + (countBits & 0x00FF00FF);
251 countBits = ((countBits & 0xFFFF0000) >> 16) + (countBits & 0x0000FFFF);
252 if (countBits != static_cast<exception_mask_t>(CThreadMachExceptionHandlers::s_nPortsMax))
254 ASSERT("s_nPortsMax is %u, but needs to be %u\n",
255 CThreadMachExceptionHandlers::s_nPortsMax, countBits);
259 NONPAL_TRACE("Enabling handlers for thread %08x exception mask %08x exception port %08x\n",
260 GetMachPortSelf(), machExceptionMask, s_ExceptionPort);
262 CThreadMachExceptionHandlers *pSavedHandlers = GetSavedMachHandlers();
264 // Swap current handlers into temporary storage first. That's because it's possible (even likely) that
265 // some or all of the handlers might still be ours. In those cases we don't want to overwrite the
266 // chain-back entries with these useless self-references.
267 kern_return_t machret;
268 kern_return_t machretDeallocate;
269 thread_port_t thread = mach_thread_self();
271 machret = thread_swap_exception_ports(
275 EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES,
277 pSavedHandlers->m_masks,
278 &pSavedHandlers->m_nPorts,
279 pSavedHandlers->m_handlers,
280 pSavedHandlers->m_behaviors,
281 pSavedHandlers->m_flavors);
283 machretDeallocate = mach_port_deallocate(mach_task_self(), thread);
284 CHECK_MACH("mach_port_deallocate", machretDeallocate);
286 if (machret != KERN_SUCCESS)
288 ASSERT("thread_swap_exception_ports failed: %d %s\n", machret, mach_error_string(machret));
289 return UTIL_MachErrorToPalError(machret);
293 NONPAL_TRACE("EnableMachExceptions: THREAD PORT count %d\n", pSavedHandlers->m_nPorts);
294 for (mach_msg_type_number_t i = 0; i < pSavedHandlers->m_nPorts; i++)
296 _ASSERTE(pSavedHandlers->m_handlers[i] != s_ExceptionPort);
297 NONPAL_TRACE("EnableMachExceptions: THREAD PORT mask %08x handler: %08x behavior %08x flavor %u\n",
298 pSavedHandlers->m_masks[i],
299 pSavedHandlers->m_handlers[i],
300 pSavedHandlers->m_behaviors[i],
301 pSavedHandlers->m_flavors[i]);
305 return ERROR_SUCCESS;
310 CPalThread::DisableMachExceptions
312 Unhook Mach exceptions, i.e., call thread_set_exception_ports
313 to restore the thread's exception ports with those we saved
314 in EnableMachExceptions. Called when this thread leaves a
315 region of code that depends on this PAL.
318 ERROR_SUCCESS, if disabling succeeded
319 an error code, otherwise
321 PAL_ERROR CorUnix::CPalThread::DisableMachExceptions()
323 TRACE("%08X: Leave()\n", (unsigned int)(size_t)this);
325 PAL_ERROR palError = NO_ERROR;
327 // We only store exceptions when we're installing exceptions.
328 if (0 == GetExceptionMask())
331 // Get the handlers to restore.
332 CThreadMachExceptionHandlers *savedPorts = GetSavedMachHandlers();
334 kern_return_t MachRet = KERN_SUCCESS;
335 for (int i = 0; i < savedPorts->m_nPorts; i++)
337 // If no handler was ever set, thread_swap_exception_ports returns
338 // MACH_PORT_NULL for the handler and zero values for behavior
339 // and flavor. Unfortunately, the latter are invalid even for
340 // MACH_PORT_NULL when you use thread_set_exception_ports.
341 exception_behavior_t behavior = savedPorts->m_behaviors[i] ? savedPorts->m_behaviors[i] : EXCEPTION_DEFAULT;
342 thread_state_flavor_t flavor = savedPorts->m_flavors[i] ? savedPorts->m_flavors[i] : MACHINE_THREAD_STATE;
343 thread_port_t thread = mach_thread_self();
344 MachRet = thread_set_exception_ports(thread,
345 savedPorts->m_masks[i],
346 savedPorts->m_handlers[i],
350 kern_return_t MachRetDeallocate = mach_port_deallocate(mach_task_self(), thread);
351 CHECK_MACH("mach_port_deallocate", MachRetDeallocate);
353 if (MachRet != KERN_SUCCESS)
357 if (MachRet != KERN_SUCCESS)
359 ASSERT("thread_set_exception_ports failed: %d\n", MachRet);
360 palError = UTIL_MachErrorToPalError(MachRet);
366 #else // FEATURE_PAL_SXS
370 SEHEnableMachExceptions
372 Enable SEH-related stuff related to mach exceptions
377 TRUE if enabling succeeded
380 BOOL SEHEnableMachExceptions()
382 exception_mask_t machExceptionMask = GetExceptionMask();
383 if (machExceptionMask != 0)
385 kern_return_t MachRet;
386 MachRet = task_set_exception_ports(mach_task_self(),
390 MACHINE_THREAD_STATE);
392 if (MachRet != KERN_SUCCESS)
394 ASSERT("task_set_exception_ports failed: %d\n", MachRet);
395 UTIL_SetLastErrorFromMach(MachRet);
404 SEHDisableMachExceptions
406 Disable SEH-related stuff related to mach exceptions
411 TRUE if enabling succeeded
414 BOOL SEHDisableMachExceptions()
416 exception_mask_t machExceptionMask = GetExceptionMask();
417 if (machExceptionMask != 0)
419 kern_return_t MachRet;
420 MachRet = task_set_exception_ports(mach_task_self(),
424 MACHINE_THREAD_STATE);
426 if (MachRet != KERN_SUCCESS)
428 ASSERT("task_set_exception_ports failed: %d\n", MachRet);
429 UTIL_SetLastErrorFromMach(MachRet);
436 #endif // FEATURE_PAL_SXS
438 #if !defined(_AMD64_)
440 void PAL_DispatchException(PCONTEXT pContext, PEXCEPTION_RECORD pExRecord, MachExceptionInfo *pMachExceptionInfo)
441 #else // defined(_AMD64_)
443 // Since HijackFaultingThread pushed the context, exception record and info on the stack, we need to adjust the
444 // signature of PAL_DispatchException such that the corresponding arguments are considered to be on the stack
445 // per GCC64 calling convention rules. Hence, the first 6 dummy arguments (corresponding to RDI, RSI, RDX,RCX, R8, R9).
447 void PAL_DispatchException(DWORD64 dwRDI, DWORD64 dwRSI, DWORD64 dwRDX, DWORD64 dwRCX, DWORD64 dwR8, DWORD64 dwR9, PCONTEXT pContext, PEXCEPTION_RECORD pExRecord, MachExceptionInfo *pMachExceptionInfo)
448 #endif // !defined(_AMD64_)
450 CPalThread *pThread = InternalGetCurrentThread();
453 if (!pThread->IsInPal())
455 // It's now possible to observe system exceptions in code running outside the PAL (as the result of a
456 // p/invoke since we no longer revert our Mach exception ports in this case). In that scenario we need
457 // to re-enter the PAL now as the exception signals the end of the p/invoke.
458 PAL_Reenter(PAL_BoundaryBottom);
460 #endif // FEATURE_PAL_SXS
462 CONTEXT *contextRecord;
463 EXCEPTION_RECORD *exceptionRecord;
464 AllocateExceptionRecords(&exceptionRecord, &contextRecord);
466 *contextRecord = *pContext;
467 *exceptionRecord = *pExRecord;
469 contextRecord->ContextFlags |= CONTEXT_EXCEPTION_ACTIVE;
470 bool continueExecution;
473 // The exception object takes ownership of the exceptionRecord and contextRecord
474 PAL_SEHException exception(exceptionRecord, contextRecord);
476 TRACE("PAL_DispatchException(EC %08x EA %p)\n", pExRecord->ExceptionCode, pExRecord->ExceptionAddress);
478 continueExecution = SEHProcessException(&exception);
479 if (continueExecution)
481 // Make a copy of the exception records so that we can free them before restoring the context
482 *pContext = *contextRecord;
483 *pExRecord = *exceptionRecord;
486 // The exception records are destroyed by the PAL_SEHException destructor now.
489 if (continueExecution)
491 RtlRestoreContext(pContext, pExRecord);
494 // Send the forward request to the exception thread to process
495 MachMessage sSendMessage;
496 sSendMessage.SendForwardException(s_ExceptionPort, pMachExceptionInfo, pThread);
498 // Spin wait until this thread is hijacked by the exception thread
505 #if defined(_X86_) || defined(_AMD64_)
506 extern "C" void PAL_DispatchExceptionWrapper();
507 extern "C" int PAL_DispatchExceptionReturnOffset;
508 #endif // _X86_ || _AMD64_
514 Sets up up an ExceptionRecord from an exception message
517 exceptionInfo - exception info to build the exception record
518 pExceptionRecord - exception record to setup
522 BuildExceptionRecord(
523 MachExceptionInfo& exceptionInfo, // [in] exception info
524 EXCEPTION_RECORD *pExceptionRecord) // [out] Used to return exception parameters
526 memset(pExceptionRecord, 0, sizeof(EXCEPTION_RECORD));
528 DWORD exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
530 switch(exceptionInfo.ExceptionType)
532 // Could not access memory. subcode contains the bad memory address.
534 if (exceptionInfo.SubcodeCount != 2)
536 NONPAL_RETAIL_ASSERT("Got an unexpected subcode");
537 exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
541 exceptionCode = EXCEPTION_ACCESS_VIOLATION;
543 pExceptionRecord->NumberParameters = 2;
544 pExceptionRecord->ExceptionInformation[0] = 0;
545 pExceptionRecord->ExceptionInformation[1] = exceptionInfo.Subcodes[1];
546 NONPAL_TRACE("subcodes[1] = %llx\n", exceptionInfo.Subcodes[1]);
550 // Instruction failed. Illegal or undefined instruction or operand.
551 case EXC_BAD_INSTRUCTION :
552 // TODO: Identify privileged instruction. Need to get the thread state and read the machine code. May
553 // be better to do this in the place that calls SEHProcessException, similar to how it's done on Linux.
554 exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
557 // Arithmetic exception; exact nature of exception is in subcode field.
559 if (exceptionInfo.SubcodeCount != 2)
561 NONPAL_RETAIL_ASSERT("Got an unexpected subcode");
562 exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
566 switch (exceptionInfo.Subcodes[0])
568 #if defined(_X86_) || defined(_AMD64_)
570 exceptionCode = EXCEPTION_INT_DIVIDE_BY_ZERO;
573 exceptionCode = EXCEPTION_INT_OVERFLOW;
575 case EXC_I386_EXTOVR:
576 exceptionCode = EXCEPTION_FLT_OVERFLOW;
579 exceptionCode = EXCEPTION_ARRAY_BOUNDS_EXCEEDED;
582 #error Trap code to exception mapping not defined for this architecture
585 exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
592 #if defined(_X86_) || defined(_AMD64_)
593 exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
596 #error Trap code to exception mapping not defined for this architecture
599 // Trace, breakpoint, etc. Details in subcode field.
601 #if defined(_X86_) || defined(_AMD64_)
602 if (exceptionInfo.Subcodes[0] == EXC_I386_SGL)
604 exceptionCode = EXCEPTION_SINGLE_STEP;
606 else if (exceptionInfo.Subcodes[0] == EXC_I386_BPT)
608 exceptionCode = EXCEPTION_BREAKPOINT;
611 #error Trap code to exception mapping not defined for this architecture
615 WARN("unexpected subcode %d for EXC_BREAKPOINT", exceptionInfo.Subcodes[0]);
616 exceptionCode = EXCEPTION_BREAKPOINT;
621 // System call requested. Details in subcode field.
623 exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
626 // System call with a number in the Mach call range requested. Details in subcode field.
627 case EXC_MACH_SYSCALL:
628 exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
632 NONPAL_ASSERT("Got unknown trap code %d\n", exceptionInfo.ExceptionType);
636 pExceptionRecord->ExceptionCode = exceptionCode;
642 exception_type_t exception
648 return "EXC_BAD_ACCESS";
650 case EXC_BAD_INSTRUCTION:
651 return "EXC_BAD_INSTRUCTION";
654 return "EXC_ARITHMETIC";
657 return "EXC_SOFTWARE";
660 return "EXC_BREAKPOINT";
663 return "EXC_SYSCALL";
665 case EXC_MACH_SYSCALL:
666 return "EXC_MACH_SYSCALL";
669 NONPAL_ASSERT("Got unknown trap code %d\n", exception);
672 return "INVALID CODE";
680 Sets the faulting thread up to return to PAL_DispatchException with an
681 ExceptionRecord and thread CONTEXT.
684 thread - thread the exception happened
685 task - task the exception happened
686 message - exception message
693 HijackFaultingThread(
694 mach_port_t thread, // [in] thread the exception happened on
695 mach_port_t task, // [in] task the exception happened on
696 MachMessage& message) // [in] exception message
698 MachExceptionInfo exceptionInfo(thread, message);
699 EXCEPTION_RECORD exceptionRecord;
700 CONTEXT threadContext;
701 kern_return_t machret;
703 // Fill in the exception record from the exception info
704 BuildExceptionRecord(exceptionInfo, &exceptionRecord);
707 threadContext.ContextFlags = CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS;
709 threadContext.ContextFlags = CONTEXT_FLOATING_POINT;
711 CONTEXT_GetThreadContextFromThreadState(x86_FLOAT_STATE, (thread_state_t)&exceptionInfo.FloatState, &threadContext);
713 threadContext.ContextFlags |= CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS;
714 CONTEXT_GetThreadContextFromThreadState(x86_THREAD_STATE, (thread_state_t)&exceptionInfo.ThreadState, &threadContext);
716 #if defined(CORECLR) && (defined(_X86_) || defined(_AMD64_))
717 // For CoreCLR we look more deeply at access violations to determine whether they're the result of a stack
718 // overflow. If so we'll terminate the process immediately (the current default policy of the CoreCLR EE).
719 // Otherwise we'll either A/V ourselves trying to set up the SEH exception record and context on the
720 // target thread's stack (unlike Windows there's no extra stack reservation to guarantee this can be done)
721 // or, and this the case we're trying to avoid, it's possible we'll succeed and the runtime will go ahead
722 // and process the SO like it was a simple AV. Since the runtime doesn't currently implement stack probing
723 // on non-Windows platforms, this could lead to data corruption (we have SO intolerant code in the runtime
724 // which manipulates global state under the assumption that an SO cannot occur due to a prior stack
727 // Determining whether an AV is really an SO is not quite straightforward. We can get stack bounds
728 // information from pthreads but (a) we only have the target Mach thread port and no way to map to a
729 // pthread easily and (b) the pthread functions lie about the bounds on the main thread.
731 // Instead we inspect the target thread SP we just retrieved above and compare it with the AV address. If
732 // they both lie in the same page or the SP is at a higher address than the AV but in the same VM region,
733 // then we'll consider the AV to be an SO. Note that we can't assume that SP will be in the same page as
734 // the AV on an SO, even though we force GCC to generate stack probes on stack extension (-fstack-check).
735 // That's because GCC currently generates the probe *before* altering SP. Since a given stack extension can
736 // involve multiple pages and GCC generates all the required probes before updating SP in a single
737 // operation, the faulting probe can be at an address that is far removed from the thread's current value
740 // In the case where the AV and SP aren't in the same or adjacent pages we check if the first page
741 // following the faulting address belongs in the same VM region as the current value of SP. Since all pages
742 // in a VM region have the same attributes this check eliminates the possibility that there's another guard
743 // page in the range between the fault and the SP, effectively establishing that the AV occurred in the
744 // guard page associated with the stack associated with the SP.
746 // We are assuming here that thread stacks are always allocated in a single VM region. I've seen no
747 // evidence thus far that this is not the case (and the mere fact we rely on Mach apis already puts us on
748 // brittle ground anyway).
750 // (a) SP always marks the current limit of the stack (in that all valid stack accesses will be of
751 // the form [SP + delta]). The Mac x86 ABI appears to guarantee this (or rather it does not
752 // guarantee that stack slots below SP will not be invalidated by asynchronous events such as
753 // interrupts, which mostly amounts to the same thing for user mode code). Note that the Mac PPC
754 // ABI does allow some (constrained) access below SP, but we're not currently supporting this
756 // (b) All code will extend the stack "carefully" (by which we mean that stack extensions of more
757 // than one page in size will touch at least one byte in each intervening page (in decreasing
758 // address order), to guarantee that the guard page is hit before memory beyond the guard page is
759 // corrupted). Our managed jits always generate code which does this as does MSVC. GCC, however,
760 // does not do this by default. We have to explicitly provide the -fstack-check compiler option
761 // to enable the behavior.
762 #if (defined(_X86_) || defined(_AMD64_)) && defined(__APPLE__)
763 if (exceptionRecord.ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
765 // Assume this AV isn't an SO to begin with.
766 bool fIsStackOverflow = false;
768 // Calculate the page base addresses for the fault and the faulting thread's SP.
769 int cbPage = getpagesize();
770 char *pFaultPage = (char*)(exceptionRecord.ExceptionInformation[1] & ~(cbPage - 1));
772 char *pStackTopPage = (char*)(threadContext.Esp & ~(cbPage - 1));
773 #elif defined(_AMD64_)
774 char *pStackTopPage = (char*)(threadContext.Rsp & ~(cbPage - 1));
777 if (pFaultPage == pStackTopPage || pFaultPage == (pStackTopPage - cbPage))
779 // The easy case is when the AV occurred in the same or adjacent page as the stack pointer.
780 fIsStackOverflow = true;
782 else if (pFaultPage < pStackTopPage)
784 // Calculate the address of the page immediately following the fault and check that it
785 // lies in the same VM region as the stack pointer.
786 vm_address_t vm_address;
788 vm_region_flavor_t vm_flavor;
789 mach_msg_type_number_t infoCnt;
791 vm_region_basic_info_data_64_t info;
792 infoCnt = VM_REGION_BASIC_INFO_COUNT_64;
793 vm_flavor = VM_REGION_BASIC_INFO_64;
795 vm_region_basic_info_data_t info;
796 infoCnt = VM_REGION_BASIC_INFO_COUNT;
797 vm_flavor = VM_REGION_BASIC_INFO;
799 mach_port_t object_name;
801 vm_address = (vm_address_t)(pFaultPage + cbPage);
804 machret = vm_region_64(
812 (vm_region_info_t)&info,
816 CHECK_MACH("vm_region", machret);
817 #elif defined(_AMD64_)
818 CHECK_MACH("vm_region_64", machret);
821 // If vm_region updated the address we gave it then that address was not part of a region at all
822 // (and so this cannot be an SO). Otherwise check that the ESP lies in the region returned.
823 char *pRegionStart = (char*)vm_address;
824 char *pRegionEnd = (char*)vm_address + vm_size;
825 if (pRegionStart == (pFaultPage + cbPage) && pStackTopPage < pRegionEnd)
826 fIsStackOverflow = true;
830 if (!fIsStackOverflow)
832 // Check if we can read pointer sizeD bytes below the target thread's stack pointer.
833 // If we are unable to, then it implies we have run into SO.
834 void **targetSP = (void **)threadContext.Rsp;
835 vm_address_t targetAddr = (mach_vm_address_t)(targetSP);
836 targetAddr -= sizeof(void *);
837 vm_size_t vm_size = sizeof(void *);
839 vm_size_t data_count = 8;
840 machret = vm_read_overwrite(mach_task_self(), targetAddr, vm_size, (pointer_t)arr, &data_count);
841 if (machret == KERN_INVALID_ADDRESS)
843 fIsStackOverflow = true;
848 if (fIsStackOverflow)
850 // We have a stack overflow. Abort the process immediately. It would be nice to let the VM do this
851 // but the Windows mechanism (where a stack overflow SEH exception is delivered on the faulting
852 // thread) will not work most of the time since non-Windows OSs don't keep a reserve stack
853 // extension allocated for this purpose.
855 // TODO: Once our event reporting story is further along we probably want to report something
856 // here. If our runtime policy for SO ever changes (the most likely candidate being "unload
857 // appdomain on SO) then we'll have to do something more complex here, probably involving a
858 // handshake with the runtime in order to report the SO without attempting to extend the faulting
859 // thread's stack any further. Note that we cannot call most PAL functions from the context of
860 // this thread since we're not a PAL thread.
862 write(STDERR_FILENO, StackOverflowMessage, sizeof(StackOverflowMessage) - 1);
866 #else // (_X86_ || _AMD64_) && __APPLE__
867 #error Platform not supported for correct stack overflow handling
868 #endif // (_X86_ || _AMD64_) && __APPLE__
869 #endif // CORECLR && _X86_
872 NONPAL_ASSERTE(exceptionInfo.ThreadState.tsh.flavor == x86_THREAD_STATE32);
874 // Make a copy of the thread state because the one in exceptionInfo needs to be preserved to restore
875 // the state if the exception is forwarded.
876 x86_thread_state32_t ts32 = exceptionInfo.ThreadState.uts.ts32;
878 // If we're in single step mode, disable it since we're going to call PAL_DispatchException
879 if (exceptionRecord.ExceptionCode == EXCEPTION_SINGLE_STEP)
881 ts32.eflags &= ~EFL_TF;
884 exceptionRecord.ExceptionFlags = EXCEPTION_IS_SIGNAL;
885 exceptionRecord.ExceptionRecord = NULL;
886 exceptionRecord.ExceptionAddress = (void *)ts32.eip;
888 void **FramePointer = (void **)ts32.esp;
890 *--FramePointer = (void *)ts32.eip;
892 // Construct a stack frame for a pretend activation of the function
893 // PAL_DispatchExceptionWrapper that serves only to make the stack
894 // correctly unwindable by the system exception unwinder.
895 // PAL_DispatchExceptionWrapper has an ebp frame, its local variables
896 // are the context and exception record, and it has just "called"
897 // PAL_DispatchException.
898 *--FramePointer = (void *)ts32.ebp;
899 ts32.ebp = (unsigned)FramePointer;
901 // Put the context on the stack
902 FramePointer = (void **)((ULONG_PTR)FramePointer - sizeof(CONTEXT));
903 // Make sure it's aligned - CONTEXT has 8-byte alignment
904 FramePointer = (void **)((ULONG_PTR)FramePointer - ((ULONG_PTR)FramePointer % 8));
905 CONTEXT *pContext = (CONTEXT *)FramePointer;
906 *pContext = threadContext;
908 // Put the exception record on the stack
909 FramePointer = (void **)((ULONG_PTR)FramePointer - sizeof(EXCEPTION_RECORD));
910 EXCEPTION_RECORD *pExceptionRecord = (EXCEPTION_RECORD *)FramePointer;
911 *pExceptionRecord = exceptionRecord;
913 FramePointer = (void **)((ULONG_PTR)FramePointer - sizeof(MachExceptionInfo));
914 MachExceptionInfo *pMachExceptionInfo = (MachExceptionInfo *)FramePointer;
915 *pMachExceptionInfo = exceptionInfo;
917 // Push arguments to PAL_DispatchException
918 FramePointer = (void **)((ULONG_PTR)FramePointer - 3 * sizeof(void *));
920 // Make sure it's aligned - ABI requires 16-byte alignment
921 FramePointer = (void **)((ULONG_PTR)FramePointer - ((ULONG_PTR)FramePointer % 16));
922 FramePointer[0] = pContext;
923 FramePointer[1] = pExceptionRecord;
924 FramePointer[2] = pMachExceptionInfo;
926 // Place the return address to right after the fake call in PAL_DispatchExceptionWrapper
927 FramePointer[-1] = (void *)((ULONG_PTR)PAL_DispatchExceptionWrapper + PAL_DispatchExceptionReturnOffset);
929 // Make the instruction register point to DispatchException
930 ts32.eip = (unsigned)PAL_DispatchException;
931 ts32.esp = (unsigned)&FramePointer[-1]; // skip return address
933 // Now set the thread state for the faulting thread so that PAL_DispatchException executes next
934 machret = thread_set_state(thread, x86_THREAD_STATE32, (thread_state_t)&ts32, x86_THREAD_STATE32_COUNT);
935 CHECK_MACH("thread_set_state(thread)", machret);
936 #elif defined(_AMD64_)
937 NONPAL_ASSERTE(exceptionInfo.ThreadState.tsh.flavor == x86_THREAD_STATE64);
939 // Make a copy of the thread state because the one in exceptionInfo needs to be preserved to restore
940 // the state if the exception is forwarded.
941 x86_thread_state64_t ts64 = exceptionInfo.ThreadState.uts.ts64;
943 // If we're in single step mode, disable it since we're going to call PAL_DispatchException
944 if (exceptionRecord.ExceptionCode == EXCEPTION_SINGLE_STEP)
946 ts64.__rflags &= ~EFL_TF;
949 exceptionRecord.ExceptionFlags = EXCEPTION_IS_SIGNAL;
950 exceptionRecord.ExceptionRecord = NULL;
951 exceptionRecord.ExceptionAddress = (void *)ts64.__rip;
953 void **FramePointer = (void **)ts64.__rsp;
955 *--FramePointer = (void *)ts64.__rip;
957 // Construct a stack frame for a pretend activation of the function
958 // PAL_DispatchExceptionWrapper that serves only to make the stack
959 // correctly unwindable by the system exception unwinder.
960 // PAL_DispatchExceptionWrapper has an ebp frame, its local variables
961 // are the context and exception record, and it has just "called"
962 // PAL_DispatchException.
963 *--FramePointer = (void *)ts64.__rbp;
964 ts64.__rbp = (SIZE_T)FramePointer;
966 // Put the context on the stack
967 FramePointer = (void **)((ULONG_PTR)FramePointer - sizeof(CONTEXT));
968 // Make sure it's aligned - CONTEXT has 16-byte alignment
969 FramePointer = (void **)((ULONG_PTR)FramePointer - ((ULONG_PTR)FramePointer % 16));
970 CONTEXT *pContext = (CONTEXT *)FramePointer;
971 *pContext = threadContext;
973 // Put the exception record on the stack
974 FramePointer = (void **)((ULONG_PTR)FramePointer - sizeof(EXCEPTION_RECORD));
975 EXCEPTION_RECORD *pExceptionRecord = (EXCEPTION_RECORD *)FramePointer;
976 *pExceptionRecord = exceptionRecord;
978 FramePointer = (void **)((ULONG_PTR)FramePointer - sizeof(MachExceptionInfo));
979 MachExceptionInfo *pMachExceptionInfo = (MachExceptionInfo *)FramePointer;
980 *pMachExceptionInfo = exceptionInfo;
982 // Push arguments to PAL_DispatchException
983 FramePointer = (void **)((ULONG_PTR)FramePointer - 3 * sizeof(void *));
985 // Make sure it's aligned - ABI requires 16-byte alignment
986 FramePointer = (void **)((ULONG_PTR)FramePointer - ((ULONG_PTR)FramePointer % 16));
987 FramePointer[0] = pContext;
988 FramePointer[1] = pExceptionRecord;
989 FramePointer[2] = pMachExceptionInfo;
991 // Place the return address to right after the fake call in PAL_DispatchExceptionWrapper
992 FramePointer[-1] = (void *)((ULONG_PTR)PAL_DispatchExceptionWrapper + PAL_DispatchExceptionReturnOffset);
994 // Make the instruction register point to DispatchException
995 ts64.__rip = (SIZE_T)PAL_DispatchException;
996 ts64.__rsp = (SIZE_T)&FramePointer[-1]; // skip return address
998 // Now set the thread state for the faulting thread so that PAL_DispatchException executes next
999 machret = thread_set_state(thread, x86_THREAD_STATE64, (thread_state_t)&ts64, x86_THREAD_STATE64_COUNT);
1000 CHECK_MACH("thread_set_state(thread)", machret);
1002 #error HijackFaultingThread not defined for this architecture
1010 Suspend the specified thread.
1013 thread - mach thread port
1020 SuspendMachThread(thread_act_t thread)
1022 kern_return_t machret;
1026 machret = thread_suspend(thread);
1027 CHECK_MACH("thread_suspend", machret);
1029 // Ensure that if the thread was running in the kernel, the kernel operation
1030 // is safely aborted so that it can be restarted later.
1031 machret = thread_abort_safely(thread);
1032 if (machret == KERN_SUCCESS)
1037 // The thread was running in the kernel executing a non-atomic operation
1038 // that cannot be restarted, so we need to resume the thread and retry
1039 machret = thread_resume(thread);
1040 CHECK_MACH("thread_resume", machret);
1048 Entry point for the thread that will listen for exception in any other thread.
1050 #ifdef FEATURE_PAL_SXS
1051 NOTE: This thread is not a PAL thread, and it must not be one. If it was,
1052 exceptions on this thread would be delivered to the port this thread itself
1055 In particular, if another thread overflows its stack, the exception handling
1056 thread receives a message. It will try to create a PAL_DispatchException
1057 frame on the faulting thread, which will likely fault. If the exception
1058 processing thread is not a PAL thread, the process gets terminated with a
1059 bus error; if the exception processing thread was a PAL thread, we would see
1060 a hang (since no thread is listening for the exception message that gets sent).
1061 Of the two ugly behaviors, the bus error is definitely favorable.
1063 This means: no printf, no TRACE, no PAL allocation, no ExitProcess,
1064 no LastError in this function and its helpers. To report fatal failure,
1065 use NONPAL_RETAIL_ASSERT.
1066 #endif // FEATURE_PAL_SXS
1069 void *args - not used
1075 SEHExceptionThread(void *args)
1077 ForwardedExceptionList feList;
1078 MachMessage sReplyOrForward;
1079 MachMessage sMessage;
1080 kern_return_t machret;
1081 thread_act_t thread;
1083 // Loop processing incoming messages forever.
1086 // Receive the next message.
1087 sMessage.Receive(s_ExceptionPort);
1089 NONPAL_TRACE("Received message %s (%08x) from (remote) %08x to (local) %08x\n",
1090 sMessage.GetMessageTypeName(),
1091 sMessage.GetMessageType(),
1092 sMessage.GetRemotePort(),
1093 sMessage.GetLocalPort());
1095 if (sMessage.IsSetThreadRequest())
1097 // Handle a request to set the thread context for the specified target thread.
1099 thread = sMessage.GetThreadContext(&sContext);
1101 // Suspend the target thread
1102 SuspendMachThread(thread);
1104 machret = CONTEXT_SetThreadContextOnPort(thread, &sContext);
1105 CHECK_MACH("CONTEXT_SetThreadContextOnPort", machret);
1107 machret = thread_resume(thread);
1108 CHECK_MACH("thread_resume", machret);
1110 else if (sMessage.IsExceptionNotification())
1112 // This is a notification of an exception occurring on another thread.
1113 exception_type_t exceptionType = sMessage.GetException();
1114 thread = sMessage.GetThread();
1117 if (NONPAL_TRACE_ENABLED)
1119 NONPAL_TRACE("ExceptionNotification %s (%u) thread %08x flavor %u\n",
1120 GetExceptionString(exceptionType),
1123 sMessage.GetThreadStateFlavor());
1125 int subcode_count = sMessage.GetExceptionCodeCount();
1126 for (int i = 0; i < subcode_count; i++)
1127 NONPAL_TRACE("ExceptionNotification subcode[%d] = %llx\n", i, sMessage.GetExceptionCode(i));
1129 x86_thread_state64_t threadStateActual;
1130 unsigned int count = sizeof(threadStateActual) / sizeof(unsigned);
1131 machret = thread_get_state(thread, x86_THREAD_STATE64, (thread_state_t)&threadStateActual, &count);
1132 CHECK_MACH("thread_get_state", machret);
1134 NONPAL_TRACE("ExceptionNotification actual rip %016llx rsp %016llx rbp %016llx rax %016llx r15 %016llx eflags %08llx\n",
1135 threadStateActual.__rip,
1136 threadStateActual.__rsp,
1137 threadStateActual.__rbp,
1138 threadStateActual.__rax,
1139 threadStateActual.__r15,
1140 threadStateActual.__rflags);
1142 x86_exception_state64_t threadExceptionState;
1143 unsigned int ehStateCount = sizeof(threadExceptionState) / sizeof(unsigned);
1144 machret = thread_get_state(thread, x86_EXCEPTION_STATE64, (thread_state_t)&threadExceptionState, &ehStateCount);
1145 CHECK_MACH("thread_get_state", machret);
1147 NONPAL_TRACE("ExceptionNotification trapno %04x cpu %04x err %08x faultAddr %016llx\n",
1148 threadExceptionState.__trapno,
1149 threadExceptionState.__cpu,
1150 threadExceptionState.__err,
1151 threadExceptionState.__faultvaddr);
1155 bool feFound = false;
1158 while (!feList.IsEOL())
1160 mach_port_type_t ePortType;
1161 if (mach_port_type(mach_task_self(), feList.Current->Thread, &ePortType) != KERN_SUCCESS || (ePortType & MACH_PORT_TYPE_DEAD_NAME))
1163 NONPAL_TRACE("Forwarded exception: invalid thread port %08x\n", feList.Current->Thread);
1165 // Unlink and delete the forwarded exception instance
1170 if (feList.Current->Thread == thread)
1172 bool isSameException = feList.Current->ExceptionType == exceptionType;
1175 // Locate the record of previously installed handlers that the target thread keeps.
1176 CThreadMachExceptionHandlers *pHandlers = feList.Current->PalThread->GetSavedMachHandlers();
1178 // Unlink and delete the forwarded exception instance
1181 // Check if the current exception type matches the forwarded one and whether
1182 // there's a handler for the particular exception we've been handed.
1183 MachExceptionHandler sHandler;
1184 if (isSameException && pHandlers->GetHandler(exceptionType, &sHandler))
1186 NONPAL_TRACE("ForwardNotification thread %08x to handler %08x\n", thread, sHandler.m_handler);
1187 sReplyOrForward.ForwardNotification(&sHandler, sMessage);
1191 NONPAL_TRACE("ReplyToNotification KERN_FAILURE thread %08x port %08x sameException %d\n",
1192 thread, sMessage.GetRemotePort(), isSameException);
1193 sReplyOrForward.ReplyToNotification(sMessage, KERN_FAILURE);
1204 NONPAL_TRACE("HijackFaultingThread thread %08x\n", thread);
1205 HijackFaultingThread(thread, mach_task_self(), sMessage);
1207 // Send the result of handling the exception back in a reply.
1208 NONPAL_TRACE("ReplyToNotification KERN_SUCCESS thread %08x port %08x\n", thread, sMessage.GetRemotePort());
1209 sReplyOrForward.ReplyToNotification(sMessage, KERN_SUCCESS);
1212 else if (sMessage.IsForwardExceptionRequest())
1214 thread = sMessage.GetThread();
1216 NONPAL_TRACE("ForwardExceptionRequest for thread %08x\n", thread);
1218 // Suspend the faulting thread.
1219 SuspendMachThread(thread);
1221 // Set the context back to the original faulting state.
1222 MachExceptionInfo *pExceptionInfo = sMessage.GetExceptionInfo();
1223 pExceptionInfo->RestoreState(thread);
1225 // Allocate an forwarded exception entry
1226 ForwardedException *pfe = (ForwardedException *)malloc(sizeof(ForwardedException));
1229 NONPAL_RETAIL_ASSERT("Exception thread ran out of memory to track forwarded exception notifications");
1232 // Save the forwarded exception entry away for the restarted exception message
1233 pfe->Thread = thread;
1234 pfe->ExceptionType = pExceptionInfo->ExceptionType;
1235 pfe->PalThread = sMessage.GetPalThread();
1238 // Now let the thread run at the original exception context to restart the exception
1239 NONPAL_TRACE("ForwardExceptionRequest resuming thread %08x exception type %08x\n", thread, pfe->ExceptionType);
1240 machret = thread_resume(thread);
1241 CHECK_MACH("thread_resume", machret);
1245 NONPAL_RETAIL_ASSERT("Unknown message type: %u", sMessage.GetMessageType());
1252 MachExceptionInfo constructor
1254 Saves the exception info from the exception notification message and
1255 the current thread state.
1258 thread - thread port to restore
1259 message - exception message
1264 MachExceptionInfo::MachExceptionInfo(mach_port_t thread, MachMessage& message)
1266 kern_return_t machret;
1268 ExceptionType = message.GetException();
1269 SubcodeCount = message.GetExceptionCodeCount();
1270 NONPAL_RETAIL_ASSERTE(SubcodeCount >= 0 && SubcodeCount <= 2);
1272 for (int i = 0; i < SubcodeCount; i++)
1273 Subcodes[i] = message.GetExceptionCode(i);
1275 mach_msg_type_number_t count = x86_THREAD_STATE_COUNT;
1276 machret = thread_get_state(thread, x86_THREAD_STATE, (thread_state_t)&ThreadState, &count);
1277 CHECK_MACH("thread_get_state", machret);
1279 count = x86_FLOAT_STATE_COUNT;
1280 machret = thread_get_state(thread, x86_FLOAT_STATE, (thread_state_t)&FloatState, &count);
1281 CHECK_MACH("thread_get_state(float)", machret);
1283 count = x86_DEBUG_STATE_COUNT;
1284 machret = thread_get_state(thread, x86_DEBUG_STATE, (thread_state_t)&DebugState, &count);
1285 CHECK_MACH("thread_get_state(debug)", machret);
1290 MachExceptionInfo::RestoreState
1292 Restore the thread to the saved exception info state.
1295 thread - thread port to restore
1300 void MachExceptionInfo::RestoreState(mach_port_t thread)
1302 // If we are restarting a breakpoint, we need to bump the IP back one to
1303 // point at the actual int 3 instructions.
1304 if (ExceptionType == EXC_BREAKPOINT)
1306 if (Subcodes[0] == EXC_I386_BPT)
1309 ThreadState.uts.ts32.eip--;
1310 #elif defined(_AMD64_)
1311 ThreadState.uts.ts64.__rip--;
1313 #error Platform not supported
1317 kern_return_t machret = thread_set_state(thread, x86_THREAD_STATE, (thread_state_t)&ThreadState, x86_THREAD_STATE_COUNT);
1318 CHECK_MACH("thread_set_state(thread)", machret);
1320 machret = thread_set_state(thread, x86_FLOAT_STATE, (thread_state_t)&FloatState, x86_FLOAT_STATE_COUNT);
1321 CHECK_MACH("thread_set_state(float)", machret);
1323 machret = thread_set_state(thread, x86_DEBUG_STATE, (thread_state_t)&DebugState, x86_DEBUG_STATE_COUNT);
1324 CHECK_MACH("thread_set_state(debug)", machret);
1329 MachSetThreadContext
1331 Sets the context of the current thread by sending a notification
1332 to the exception thread.
1335 lpContext - the CONTEXT to set the current thread
1342 MachSetThreadContext(CONTEXT *lpContext)
1344 // We need to send a message to the worker thread so that it can set our thread context.
1345 MachMessage sRequest;
1346 sRequest.SendSetThread(s_ExceptionPort, lpContext);
1348 // Make sure we don't do anything
1358 SEHInitializeMachExceptions
1360 Initialize all SEH-related stuff related to mach exceptions
1362 flags - PAL_INITIALIZE flags
1365 TRUE if SEH support initialization succeeded
1369 SEHInitializeMachExceptions(DWORD flags)
1371 pthread_t exception_thread;
1372 kern_return_t machret;
1374 s_PalInitializeFlags = flags;
1376 // Allocate a mach port that will listen in on exceptions
1377 machret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &s_ExceptionPort);
1378 if (machret != KERN_SUCCESS)
1380 ASSERT("mach_port_allocate failed: %d\n", machret);
1381 UTIL_SetLastErrorFromMach(machret);
1385 // Insert the send right into the task
1386 machret = mach_port_insert_right(mach_task_self(), s_ExceptionPort, s_ExceptionPort, MACH_MSG_TYPE_MAKE_SEND);
1387 if (machret != KERN_SUCCESS)
1389 ASSERT("mach_port_insert_right failed: %d\n", machret);
1390 UTIL_SetLastErrorFromMach(machret);
1394 // Create the thread that will listen to the exception for all threads
1395 int createret = pthread_create(&exception_thread, NULL, SEHExceptionThread, NULL);
1398 ERROR("pthread_create failed, error is %d (%s)\n", createret, strerror(createret));
1399 SetLastError(ERROR_NOT_ENOUGH_MEMORY);
1404 if (NONPAL_TRACE_ENABLED)
1406 CThreadMachExceptionHandlers taskHandlers;
1407 machret = task_get_exception_ports(mach_task_self(),
1409 taskHandlers.m_masks,
1410 &taskHandlers.m_nPorts,
1411 taskHandlers.m_handlers,
1412 taskHandlers.m_behaviors,
1413 taskHandlers.m_flavors);
1415 if (machret == KERN_SUCCESS)
1417 NONPAL_TRACE("SEHInitializeMachExceptions: TASK PORT count %d\n", taskHandlers.m_nPorts);
1418 for (mach_msg_type_number_t i = 0; i < taskHandlers.m_nPorts; i++)
1420 NONPAL_TRACE("SEHInitializeMachExceptions: TASK PORT mask %08x handler: %08x behavior %08x flavor %u\n",
1421 taskHandlers.m_masks[i],
1422 taskHandlers.m_handlers[i],
1423 taskHandlers.m_behaviors[i],
1424 taskHandlers.m_flavors[i]);
1429 NONPAL_TRACE("SEHInitializeMachExceptions: task_get_exception_ports FAILED %d %s\n", machret, mach_error_string(machret));
1434 #ifndef FEATURE_PAL_SXS
1435 if (!SEHEnableMachExceptions())
1439 #endif // !FEATURE_PAL_SXS
1441 // Tell the system to ignore SIGPIPE signals rather than use the default
1442 // behavior of terminating the process. Ignoring SIGPIPE will cause
1443 // calls that would otherwise raise that signal to return EPIPE instead.
1444 // The PAL expects EPIPE from those functions and won't handle a
1446 signal(SIGPIPE, SIG_IGN);
1454 MachExceptionInitializeDebug
1456 Initialize the mach exception handlers necessary for a managed debugger
1462 void MachExceptionInitializeDebug(void)
1464 if (s_DebugInitialized == FALSE)
1466 #ifndef FEATURE_PAL_SXS
1467 kern_return_t MachRet;
1468 MachRet = task_set_exception_ports(mach_task_self(),
1469 PAL_EXC_DEBUGGING_MASK,
1472 MACHINE_THREAD_STATE);
1473 if (MachRet != KERN_SUCCESS)
1475 ASSERT("task_set_exception_ports failed: %d\n", MachRet);
1476 TerminateProcess(GetCurrentProcess(), (UINT)(-1));
1478 #endif // !FEATURE_PAL_SXS
1479 s_DebugInitialized = TRUE;
1485 SEHCleanupExceptionPort
1487 Restore default exception port handler
1489 (no parameters, no return value)
1492 During PAL_Terminate, we reach a point where SEH isn't possible any more
1493 (handle manager is off, etc). Past that point, we can't avoid crashing on
1497 SEHCleanupExceptionPort(void)
1499 TRACE("Restoring default exception ports\n");
1500 #ifndef FEATURE_PAL_SXS
1501 SEHDisableMachExceptions();
1502 #endif // !FEATURE_PAL_SXS
1503 s_DebugInitialized = FALSE;
1508 ActivationHandler(CONTEXT* context)
1510 if (g_activationFunction != NULL)
1512 g_activationFunction(context);
1515 RtlRestoreContext(context, NULL);
1519 extern "C" void ActivationHandlerWrapper();
1520 extern "C" int ActivationHandlerReturnOffset;
1524 InjectActivationInternal
1526 Sets up the specified thread to call the ActivationHandler.
1529 pThread - PAL thread instance
1535 InjectActivationInternal(CPalThread* pThread)
1539 mach_port_t threadPort = pThread->GetMachPortSelf();
1540 kern_return_t MachRet = thread_suspend(threadPort);
1541 palError = (MachRet == KERN_SUCCESS) ? NO_ERROR : ERROR_GEN_FAILURE;
1543 if (palError == NO_ERROR)
1545 mach_msg_type_number_t count;
1547 x86_exception_state64_t ExceptionState;
1548 count = x86_EXCEPTION_STATE64_COUNT;
1549 MachRet = thread_get_state(threadPort,
1550 x86_EXCEPTION_STATE64,
1551 (thread_state_t)&ExceptionState,
1553 _ASSERT_MSG(MachRet == KERN_SUCCESS, "thread_get_state for x86_EXCEPTION_STATE64\n");
1555 // Inject the activation only if the thread doesn't have a pending hardware exception
1556 static const int MaxHardwareExceptionVector = 31;
1557 if (ExceptionState.__trapno > MaxHardwareExceptionVector)
1559 x86_thread_state64_t ThreadState;
1560 count = x86_THREAD_STATE64_COUNT;
1561 MachRet = thread_get_state(threadPort,
1563 (thread_state_t)&ThreadState,
1565 _ASSERT_MSG(MachRet == KERN_SUCCESS, "thread_get_state for x86_THREAD_STATE64\n");
1567 if ((g_safeActivationCheckFunction != NULL) && g_safeActivationCheckFunction(ThreadState.__rip, /* checkingCurrentThread */ FALSE))
1569 // TODO: it would be nice to preserve the red zone in case a jitter would want to use it
1570 // Do we really care about unwinding through the wrapper?
1571 size_t* sp = (size_t*)ThreadState.__rsp;
1572 *(--sp) = ThreadState.__rip;
1573 *(--sp) = ThreadState.__rbp;
1574 size_t rbpAddress = (size_t)sp;
1575 size_t contextAddress = (((size_t)sp) - sizeof(CONTEXT)) & ~15;
1576 size_t returnAddressAddress = contextAddress - sizeof(size_t);
1577 *(size_t*)(returnAddressAddress) = ActivationHandlerReturnOffset + (size_t)ActivationHandlerWrapper;
1579 // Fill in the context in the helper frame with the full context of the suspended thread.
1580 // The ActivationHandler will use the context to resume the execution of the thread
1581 // after the activation function returns.
1582 CONTEXT *pContext = (CONTEXT *)contextAddress;
1583 pContext->ContextFlags = CONTEXT_FULL | CONTEXT_SEGMENTS;
1584 MachRet = CONTEXT_GetThreadContextFromPort(threadPort, pContext);
1585 _ASSERT_MSG(MachRet == KERN_SUCCESS, "CONTEXT_GetThreadContextFromPort\n");
1587 // Make the instruction register point to ActivationHandler
1588 ThreadState.__rip = (size_t)ActivationHandler;
1589 ThreadState.__rsp = returnAddressAddress;
1590 ThreadState.__rbp = rbpAddress;
1591 ThreadState.__rdi = contextAddress;
1593 MachRet = thread_set_state(threadPort,
1595 (thread_state_t)&ThreadState,
1597 _ASSERT_MSG(MachRet == KERN_SUCCESS, "thread_set_state\n");
1601 MachRet = thread_resume(threadPort);
1602 palError = (MachRet == ERROR_SUCCESS) ? NO_ERROR : ERROR_GEN_FAILURE;
1606 printf("Suspension failed with error 0x%x\n", palError);
1612 #endif // HAVE_MACH_EXCEPTIONS