1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
13 Implementation of MACH exception API functions.
17 #include "pal/thread.hpp"
18 #include "pal/seh.hpp"
19 #include "pal/palinternal.h"
20 #if HAVE_MACH_EXCEPTIONS
21 #include "machexception.h"
22 #include "pal/dbgmsg.h"
23 #include "pal/critsect.h"
24 #include "pal/debug.h"
26 #include "pal/utils.h"
27 #include "pal/context.h"
28 #include "pal/malloc.hpp"
29 #include "pal/process.h"
30 #include "pal/virtual.h"
31 #include "pal/map.hpp"
32 #include "pal/environ.h"
34 #include "machmessage.h"
41 #include <mach-o/loader.h>
43 using namespace CorUnix;
45 SET_DEFAULT_DEBUG_CHANNEL(EXCEPT);
47 // The port we use to handle exceptions and to set the thread context
48 mach_port_t s_ExceptionPort;
50 static BOOL s_DebugInitialized = FALSE;
52 static const char * PAL_MACH_EXCEPTION_MODE = "PAL_MachExceptionMode";
54 // This struct is used to track the threads that need to have an exception forwarded
55 // to the next thread level port in the chain (if exists). An entry is added by the
56 // faulting sending a special message to the exception thread which saves it on an
57 // list that is searched when the restarted exception notification is received again.
58 struct ForwardedException
60 ForwardedException *m_next;
62 exception_type_t ExceptionType;
63 CPalThread *PalThread;
66 // The singly linked list and enumerator for the ForwardException struct
67 struct ForwardedExceptionList
70 ForwardedException *m_head;
71 ForwardedException *m_previous;
74 ForwardedException *Current;
76 ForwardedExceptionList()
90 return Current == NULL;
96 Current = Current->m_next;
99 void Add(ForwardedException *item)
101 item->m_next = m_head;
107 if (m_previous == NULL)
109 m_head = Current->m_next;
113 m_previous->m_next = Current->m_next;
122 enum MachExceptionMode
124 // special value to indicate we've not initialized yet
125 MachException_Uninitialized = -1,
127 // These can be combined with bitwise OR to incrementally turn off
128 // functionality for diagnostics purposes.
130 // In practice, the following values are probably useful:
131 // 1: Don't turn illegal instructions into SEH exceptions.
132 // On Intel, stack misalignment usually shows up as an
133 // illegal instruction. PAL client code shouldn't
134 // expect to see any of these, so this option should
135 // always be safe to set.
136 // 2: Don't listen for breakpoint exceptions. This makes an
137 // SEH-based debugger (i.e., managed debugger) unusable,
138 // but you may need this option if you find that native
139 // breakpoints you set in PAL-dependent code don't work
140 // (causing hangs or crashes in the native debugger).
141 // 3: Combination of the above.
142 // This is the typical setting for development
143 // (unless you're working on the managed debugger).
144 // 7: In addition to the above, don't turn bad accesses and
145 // arithmetic exceptions into SEH.
146 // This is the typical setting for stress.
147 MachException_SuppressIllegal = 1,
148 MachException_SuppressDebugging = 2,
149 MachException_SuppressManaged = 4,
151 // Default value to use if environment variable not set.
152 MachException_Default = 0,
159 Returns the mach exception mask for the exceptions to hook for a thread.
168 static MachExceptionMode exMode = MachException_Uninitialized;
170 if (exMode == MachException_Uninitialized)
172 exMode = MachException_Default;
174 char* exceptionSettings = EnvironGetenv(PAL_MACH_EXCEPTION_MODE);
175 if (exceptionSettings)
177 exMode = (MachExceptionMode)atoi(exceptionSettings);
178 InternalFree(exceptionSettings);
182 if (PAL_IsDebuggerPresent())
184 exMode = MachException_SuppressDebugging;
189 exception_mask_t machExceptionMask = 0;
190 if (!(exMode & MachException_SuppressIllegal))
192 machExceptionMask |= PAL_EXC_ILLEGAL_MASK;
194 if (!(exMode & MachException_SuppressDebugging))
196 #ifdef FEATURE_PAL_SXS
197 // Always hook exception ports for breakpoint exceptions.
198 // The reason is that we don't know when a managed debugger
199 // will attach, so we have to be prepared. We don't want
200 // to later go through the thread list and hook exception
201 // ports for exactly those threads that currently are in
203 machExceptionMask |= PAL_EXC_DEBUGGING_MASK;
204 #else // FEATURE_PAL_SXS
205 if (s_DebugInitialized)
207 machExceptionMask |= PAL_EXC_DEBUGGING_MASK;
209 #endif // FEATURE_PAL_SXS
211 if (!(exMode & MachException_SuppressManaged))
213 machExceptionMask |= PAL_EXC_MANAGED_MASK;
216 return machExceptionMask;
219 #ifdef FEATURE_PAL_SXS
223 CPalThread::EnableMachExceptions
225 Hook Mach exceptions, i.e., call thread_swap_exception_ports
226 to replace the thread's current exception ports with our own.
227 The previously active exception ports are saved. Called when
228 this thread enters a region of code that depends on this PAL.
231 ERROR_SUCCESS, if enabling succeeded
232 an error code, otherwise
234 PAL_ERROR CorUnix::CPalThread::EnableMachExceptions()
236 TRACE("%08X: Enter()\n", (unsigned int)(size_t)this);
238 exception_mask_t machExceptionMask = GetExceptionMask();
239 if (machExceptionMask != 0)
242 // verify that the arrays we've allocated to hold saved exception ports
243 // are the right size.
244 exception_mask_t countBits = PAL_EXC_ALL_MASK;
245 countBits = ((countBits & 0xAAAAAAAA) >> 1) + (countBits & 0x55555555);
246 countBits = ((countBits & 0xCCCCCCCC) >> 2) + (countBits & 0x33333333);
247 countBits = ((countBits & 0xF0F0F0F0) >> 4) + (countBits & 0x0F0F0F0F);
248 countBits = ((countBits & 0xFF00FF00) >> 8) + (countBits & 0x00FF00FF);
249 countBits = ((countBits & 0xFFFF0000) >> 16) + (countBits & 0x0000FFFF);
250 if (countBits != static_cast<exception_mask_t>(CThreadMachExceptionHandlers::s_nPortsMax))
252 ASSERT("s_nPortsMax is %u, but needs to be %u\n",
253 CThreadMachExceptionHandlers::s_nPortsMax, countBits);
257 NONPAL_TRACE("Enabling handlers for thread %08x exception mask %08x exception port %08x\n",
258 GetMachPortSelf(), machExceptionMask, s_ExceptionPort);
260 CThreadMachExceptionHandlers *pSavedHandlers = GetSavedMachHandlers();
262 // Swap current handlers into temporary storage first. That's because it's possible (even likely) that
263 // some or all of the handlers might still be ours. In those cases we don't want to overwrite the
264 // chain-back entries with these useless self-references.
265 kern_return_t machret;
266 kern_return_t machretDeallocate;
267 thread_port_t thread = mach_thread_self();
269 machret = thread_swap_exception_ports(
273 EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES,
275 pSavedHandlers->m_masks,
276 &pSavedHandlers->m_nPorts,
277 pSavedHandlers->m_handlers,
278 pSavedHandlers->m_behaviors,
279 pSavedHandlers->m_flavors);
281 machretDeallocate = mach_port_deallocate(mach_task_self(), thread);
282 CHECK_MACH("mach_port_deallocate", machretDeallocate);
284 if (machret != KERN_SUCCESS)
286 ASSERT("thread_swap_exception_ports failed: %d %s\n", machret, mach_error_string(machret));
287 return UTIL_MachErrorToPalError(machret);
291 NONPAL_TRACE("EnableMachExceptions: THREAD PORT count %d\n", pSavedHandlers->m_nPorts);
292 for (mach_msg_type_number_t i = 0; i < pSavedHandlers->m_nPorts; i++)
294 _ASSERTE(pSavedHandlers->m_handlers[i] != s_ExceptionPort);
295 NONPAL_TRACE("EnableMachExceptions: THREAD PORT mask %08x handler: %08x behavior %08x flavor %u\n",
296 pSavedHandlers->m_masks[i],
297 pSavedHandlers->m_handlers[i],
298 pSavedHandlers->m_behaviors[i],
299 pSavedHandlers->m_flavors[i]);
303 return ERROR_SUCCESS;
308 CPalThread::DisableMachExceptions
310 Unhook Mach exceptions, i.e., call thread_set_exception_ports
311 to restore the thread's exception ports with those we saved
312 in EnableMachExceptions. Called when this thread leaves a
313 region of code that depends on this PAL.
316 ERROR_SUCCESS, if disabling succeeded
317 an error code, otherwise
319 PAL_ERROR CorUnix::CPalThread::DisableMachExceptions()
321 TRACE("%08X: Leave()\n", (unsigned int)(size_t)this);
323 PAL_ERROR palError = NO_ERROR;
325 // We only store exceptions when we're installing exceptions.
326 if (0 == GetExceptionMask())
329 // Get the handlers to restore.
330 CThreadMachExceptionHandlers *savedPorts = GetSavedMachHandlers();
332 kern_return_t MachRet = KERN_SUCCESS;
333 for (int i = 0; i < savedPorts->m_nPorts; i++)
335 // If no handler was ever set, thread_swap_exception_ports returns
336 // MACH_PORT_NULL for the handler and zero values for behavior
337 // and flavor. Unfortunately, the latter are invalid even for
338 // MACH_PORT_NULL when you use thread_set_exception_ports.
339 exception_behavior_t behavior = savedPorts->m_behaviors[i] ? savedPorts->m_behaviors[i] : EXCEPTION_DEFAULT;
340 thread_state_flavor_t flavor = savedPorts->m_flavors[i] ? savedPorts->m_flavors[i] : MACHINE_THREAD_STATE;
341 thread_port_t thread = mach_thread_self();
342 MachRet = thread_set_exception_ports(thread,
343 savedPorts->m_masks[i],
344 savedPorts->m_handlers[i],
348 kern_return_t MachRetDeallocate = mach_port_deallocate(mach_task_self(), thread);
349 CHECK_MACH("mach_port_deallocate", MachRetDeallocate);
351 if (MachRet != KERN_SUCCESS)
355 if (MachRet != KERN_SUCCESS)
357 ASSERT("thread_set_exception_ports failed: %d\n", MachRet);
358 palError = UTIL_MachErrorToPalError(MachRet);
364 #else // FEATURE_PAL_SXS
368 SEHEnableMachExceptions
370 Enable SEH-related stuff related to mach exceptions
375 TRUE if enabling succeeded
378 BOOL SEHEnableMachExceptions()
380 exception_mask_t machExceptionMask = GetExceptionMask();
381 if (machExceptionMask != 0)
383 kern_return_t MachRet;
384 MachRet = task_set_exception_ports(mach_task_self(),
388 MACHINE_THREAD_STATE);
390 if (MachRet != KERN_SUCCESS)
392 ASSERT("task_set_exception_ports failed: %d\n", MachRet);
393 UTIL_SetLastErrorFromMach(MachRet);
402 SEHDisableMachExceptions
404 Disable SEH-related stuff related to mach exceptions
409 TRUE if enabling succeeded
412 BOOL SEHDisableMachExceptions()
414 exception_mask_t machExceptionMask = GetExceptionMask();
415 if (machExceptionMask != 0)
417 kern_return_t MachRet;
418 MachRet = task_set_exception_ports(mach_task_self(),
422 MACHINE_THREAD_STATE);
424 if (MachRet != KERN_SUCCESS)
426 ASSERT("task_set_exception_ports failed: %d\n", MachRet);
427 UTIL_SetLastErrorFromMach(MachRet);
434 #endif // FEATURE_PAL_SXS
436 #if !defined(_AMD64_)
438 void PAL_DispatchException(PCONTEXT pContext, PEXCEPTION_RECORD pExRecord, MachExceptionInfo *pMachExceptionInfo)
439 #else // defined(_AMD64_)
441 // Since HijackFaultingThread pushed the context, exception record and info on the stack, we need to adjust the
442 // signature of PAL_DispatchException such that the corresponding arguments are considered to be on the stack
443 // per GCC64 calling convention rules. Hence, the first 6 dummy arguments (corresponding to RDI, RSI, RDX,RCX, R8, R9).
445 void PAL_DispatchException(DWORD64 dwRDI, DWORD64 dwRSI, DWORD64 dwRDX, DWORD64 dwRCX, DWORD64 dwR8, DWORD64 dwR9, PCONTEXT pContext, PEXCEPTION_RECORD pExRecord, MachExceptionInfo *pMachExceptionInfo)
446 #endif // !defined(_AMD64_)
448 CPalThread *pThread = InternalGetCurrentThread();
451 if (!pThread->IsInPal())
453 // It's now possible to observe system exceptions in code running outside the PAL (as the result of a
454 // p/invoke since we no longer revert our Mach exception ports in this case). In that scenario we need
455 // to re-enter the PAL now as the exception signals the end of the p/invoke.
456 PAL_Reenter(PAL_BoundaryBottom);
458 #endif // FEATURE_PAL_SXS
460 EXCEPTION_POINTERS pointers;
461 pointers.ExceptionRecord = pExRecord;
462 pointers.ContextRecord = pContext;
464 TRACE("PAL_DispatchException(EC %08x EA %p)\n", pExRecord->ExceptionCode, pExRecord->ExceptionAddress);
465 SEHProcessException(&pointers);
467 // Send the forward request to the exception thread to process
468 MachMessage sSendMessage;
469 sSendMessage.SendForwardException(s_ExceptionPort, pMachExceptionInfo, pThread);
471 // Spin wait until this thread is hijacked by the exception thread
478 #if defined(_X86_) || defined(_AMD64_)
479 extern "C" void PAL_DispatchExceptionWrapper();
480 extern "C" int PAL_DispatchExceptionReturnOffset;
481 #endif // _X86_ || _AMD64_
487 Sets up up an ExceptionRecord from an exception message
490 exceptionInfo - exception info to build the exception record
491 pExceptionRecord - exception record to setup
495 BuildExceptionRecord(
496 MachExceptionInfo& exceptionInfo, // [in] exception info
497 EXCEPTION_RECORD *pExceptionRecord) // [out] Used to return exception parameters
499 memset(pExceptionRecord, 0, sizeof(EXCEPTION_RECORD));
501 DWORD exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
503 switch(exceptionInfo.ExceptionType)
505 // Could not access memory. subcode contains the bad memory address.
507 if (exceptionInfo.SubcodeCount != 2)
509 NONPAL_RETAIL_ASSERT("Got an unexpected subcode");
510 exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
514 exceptionCode = EXCEPTION_ACCESS_VIOLATION;
516 pExceptionRecord->NumberParameters = 2;
517 pExceptionRecord->ExceptionInformation[0] = 0;
518 pExceptionRecord->ExceptionInformation[1] = exceptionInfo.Subcodes[1];
519 NONPAL_TRACE("subcodes[1] = %llx\n", exceptionInfo.Subcodes[1]);
523 // Instruction failed. Illegal or undefined instruction or operand.
524 case EXC_BAD_INSTRUCTION :
525 // TODO: Identify privileged instruction. Need to get the thread state and read the machine code. May
526 // be better to do this in the place that calls SEHProcessException, similar to how it's done on Linux.
527 exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
530 // Arithmetic exception; exact nature of exception is in subcode field.
532 if (exceptionInfo.SubcodeCount != 2)
534 NONPAL_RETAIL_ASSERT("Got an unexpected subcode");
535 exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
539 switch (exceptionInfo.Subcodes[0])
541 #if defined(_X86_) || defined(_AMD64_)
543 exceptionCode = EXCEPTION_INT_DIVIDE_BY_ZERO;
546 exceptionCode = EXCEPTION_INT_OVERFLOW;
548 case EXC_I386_EXTOVR:
549 exceptionCode = EXCEPTION_FLT_OVERFLOW;
552 exceptionCode = EXCEPTION_ARRAY_BOUNDS_EXCEEDED;
555 #error Trap code to exception mapping not defined for this architecture
558 exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
565 #if defined(_X86_) || defined(_AMD64_)
566 exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
569 #error Trap code to exception mapping not defined for this architecture
572 // Trace, breakpoint, etc. Details in subcode field.
574 #if defined(_X86_) || defined(_AMD64_)
575 if (exceptionInfo.Subcodes[0] == EXC_I386_SGL)
577 exceptionCode = EXCEPTION_SINGLE_STEP;
579 else if (exceptionInfo.Subcodes[0] == EXC_I386_BPT)
581 exceptionCode = EXCEPTION_BREAKPOINT;
584 #error Trap code to exception mapping not defined for this architecture
588 WARN("unexpected subcode %d for EXC_BREAKPOINT", exceptionInfo.Subcodes[0]);
589 exceptionCode = EXCEPTION_BREAKPOINT;
594 // System call requested. Details in subcode field.
596 exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
599 // System call with a number in the Mach call range requested. Details in subcode field.
600 case EXC_MACH_SYSCALL:
601 exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
605 NONPAL_ASSERT("Got unknown trap code %d\n", exceptionInfo.ExceptionType);
609 pExceptionRecord->ExceptionCode = exceptionCode;
615 exception_type_t exception
621 return "EXC_BAD_ACCESS";
623 case EXC_BAD_INSTRUCTION:
624 return "EXC_BAD_INSTRUCTION";
627 return "EXC_ARITHMETIC";
630 return "EXC_SOFTWARE";
633 return "EXC_BREAKPOINT";
636 return "EXC_SYSCALL";
638 case EXC_MACH_SYSCALL:
639 return "EXC_MACH_SYSCALL";
642 NONPAL_ASSERT("Got unknown trap code %d\n", exception);
645 return "INVALID CODE";
653 Sets the faulting thread up to return to PAL_DispatchException with an
654 ExceptionRecord and thread CONTEXT.
657 thread - thread the exception happened
658 task - task the exception happened
659 message - exception message
666 HijackFaultingThread(
667 mach_port_t thread, // [in] thread the exception happened on
668 mach_port_t task, // [in] task the exception happened on
669 MachMessage& message) // [in] exception message
671 MachExceptionInfo exceptionInfo(thread, message);
672 EXCEPTION_RECORD exceptionRecord;
673 CONTEXT threadContext;
674 kern_return_t machret;
676 // Fill in the exception record from the exception info
677 BuildExceptionRecord(exceptionInfo, &exceptionRecord);
680 threadContext.ContextFlags = CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS;
682 threadContext.ContextFlags = CONTEXT_FLOATING_POINT;
684 CONTEXT_GetThreadContextFromThreadState(x86_FLOAT_STATE, (thread_state_t)&exceptionInfo.FloatState, &threadContext);
686 threadContext.ContextFlags |= CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS;
687 CONTEXT_GetThreadContextFromThreadState(x86_THREAD_STATE, (thread_state_t)&exceptionInfo.ThreadState, &threadContext);
689 #if defined(CORECLR) && (defined(_X86_) || defined(_AMD64_))
690 // For CoreCLR we look more deeply at access violations to determine whether they're the result of a stack
691 // overflow. If so we'll terminate the process immediately (the current default policy of the CoreCLR EE).
692 // Otherwise we'll either A/V ourselves trying to set up the SEH exception record and context on the
693 // target thread's stack (unlike Windows there's no extra stack reservation to guarantee this can be done)
694 // or, and this the case we're trying to avoid, it's possible we'll succeed and the runtime will go ahead
695 // and process the SO like it was a simple AV. Since the runtime doesn't currently implement stack probing
696 // on non-Windows platforms, this could lead to data corruption (we have SO intolerant code in the runtime
697 // which manipulates global state under the assumption that an SO cannot occur due to a prior stack
700 // Determining whether an AV is really an SO is not quite straightforward. We can get stack bounds
701 // information from pthreads but (a) we only have the target Mach thread port and no way to map to a
702 // pthread easily and (b) the pthread functions lie about the bounds on the main thread.
704 // Instead we inspect the target thread SP we just retrieved above and compare it with the AV address. If
705 // they both lie in the same page or the SP is at a higher address than the AV but in the same VM region,
706 // then we'll consider the AV to be an SO. Note that we can't assume that SP will be in the same page as
707 // the AV on an SO, even though we force GCC to generate stack probes on stack extension (-fstack-check).
708 // That's because GCC currently generates the probe *before* altering SP. Since a given stack extension can
709 // involve multiple pages and GCC generates all the required probes before updating SP in a single
710 // operation, the faulting probe can be at an address that is far removed from the thread's current value
713 // In the case where the AV and SP aren't in the same or adjacent pages we check if the first page
714 // following the faulting address belongs in the same VM region as the current value of SP. Since all pages
715 // in a VM region have the same attributes this check eliminates the possibility that there's another guard
716 // page in the range between the fault and the SP, effectively establishing that the AV occurred in the
717 // guard page associated with the stack associated with the SP.
719 // We are assuming here that thread stacks are always allocated in a single VM region. I've seen no
720 // evidence thus far that this is not the case (and the mere fact we rely on Mach apis already puts us on
721 // brittle ground anyway).
723 // (a) SP always marks the current limit of the stack (in that all valid stack accesses will be of
724 // the form [SP + delta]). The Mac x86 ABI appears to guarantee this (or rather it does not
725 // guarantee that stack slots below SP will not be invalidated by asynchronous events such as
726 // interrupts, which mostly amounts to the same thing for user mode code). Note that the Mac PPC
727 // ABI does allow some (constrained) access below SP, but we're not currently supporting this
729 // (b) All code will extend the stack "carefully" (by which we mean that stack extensions of more
730 // than one page in size will touch at least one byte in each intervening page (in decreasing
731 // address order), to guarantee that the guard page is hit before memory beyond the guard page is
732 // corrupted). Our managed jits always generate code which does this as does MSVC. GCC, however,
733 // does not do this by default. We have to explicitly provide the -fstack-check compiler option
734 // to enable the behavior.
735 #if (defined(_X86_) || defined(_AMD64_)) && defined(__APPLE__)
736 if (exceptionRecord.ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
738 // Assume this AV isn't an SO to begin with.
739 bool fIsStackOverflow = false;
741 // Calculate the page base addresses for the fault and the faulting thread's SP.
742 int cbPage = getpagesize();
743 char *pFaultPage = (char*)(exceptionRecord.ExceptionInformation[1] & ~(cbPage - 1));
745 char *pStackTopPage = (char*)(threadContext.Esp & ~(cbPage - 1));
746 #elif defined(_AMD64_)
747 char *pStackTopPage = (char*)(threadContext.Rsp & ~(cbPage - 1));
750 if (pFaultPage == pStackTopPage || pFaultPage == (pStackTopPage - cbPage))
752 // The easy case is when the AV occurred in the same or adjacent page as the stack pointer.
753 fIsStackOverflow = true;
755 else if (pFaultPage < pStackTopPage)
757 // Calculate the address of the page immediately following the fault and check that it
758 // lies in the same VM region as the stack pointer.
759 vm_address_t vm_address;
761 vm_region_flavor_t vm_flavor;
762 mach_msg_type_number_t infoCnt;
764 vm_region_basic_info_data_64_t info;
765 infoCnt = VM_REGION_BASIC_INFO_COUNT_64;
766 vm_flavor = VM_REGION_BASIC_INFO_64;
768 vm_region_basic_info_data_t info;
769 infoCnt = VM_REGION_BASIC_INFO_COUNT;
770 vm_flavor = VM_REGION_BASIC_INFO;
772 mach_port_t object_name;
774 vm_address = (vm_address_t)(pFaultPage + cbPage);
777 machret = vm_region_64(
785 (vm_region_info_t)&info,
789 CHECK_MACH("vm_region", machret);
790 #elif defined(_AMD64_)
791 CHECK_MACH("vm_region_64", machret);
794 // If vm_region updated the address we gave it then that address was not part of a region at all
795 // (and so this cannot be an SO). Otherwise check that the ESP lies in the region returned.
796 char *pRegionStart = (char*)vm_address;
797 char *pRegionEnd = (char*)vm_address + vm_size;
798 if (pRegionStart == (pFaultPage + cbPage) && pStackTopPage < pRegionEnd)
799 fIsStackOverflow = true;
803 if (!fIsStackOverflow)
805 // Check if we can read pointer sizeD bytes below the target thread's stack pointer.
806 // If we are unable to, then it implies we have run into SO.
807 void **targetSP = (void **)threadContext.Rsp;
808 vm_address_t targetAddr = (mach_vm_address_t)(targetSP);
809 targetAddr -= sizeof(void *);
810 vm_size_t vm_size = sizeof(void *);
812 vm_size_t data_count = 8;
813 machret = vm_read_overwrite(mach_task_self(), targetAddr, vm_size, (pointer_t)arr, &data_count);
814 if (machret == KERN_INVALID_ADDRESS)
816 fIsStackOverflow = true;
821 if (fIsStackOverflow)
823 // We have a stack overflow. Abort the process immediately. It would be nice to let the VM do this
824 // but the Windows mechanism (where a stack overflow SEH exception is delivered on the faulting
825 // thread) will not work most of the time since non-Windows OSs don't keep a reserve stack
826 // extension allocated for this purpose.
828 // TODO: Once our event reporting story is further along we probably want to report something
829 // here. If our runtime policy for SO ever changes (the most likely candidate being "unload
830 // appdomain on SO) then we'll have to do something more complex here, probably involving a
831 // handshake with the runtime in order to report the SO without attempting to extend the faulting
832 // thread's stack any further. Note that we cannot call most PAL functions from the context of
833 // this thread since we're not a PAL thread.
835 write(STDERR_FILENO, StackOverflowMessage, sizeof(StackOverflowMessage) - 1);
839 #else // (_X86_ || _AMD64_) && __APPLE__
840 #error Platform not supported for correct stack overflow handling
841 #endif // (_X86_ || _AMD64_) && __APPLE__
842 #endif // CORECLR && _X86_
845 NONPAL_ASSERTE(exceptionInfo.ThreadState.tsh.flavor == x86_THREAD_STATE32);
847 // Make a copy of the thread state because the one in exceptionInfo needs to be preserved to restore
848 // the state if the exception is forwarded.
849 x86_thread_state32_t ts32 = exceptionInfo.ThreadState.uts.ts32;
851 // If we're in single step mode, disable it since we're going to call PAL_DispatchException
852 if (exceptionRecord.ExceptionCode == EXCEPTION_SINGLE_STEP)
854 ts32.eflags &= ~EFL_TF;
857 exceptionRecord.ExceptionFlags = EXCEPTION_IS_SIGNAL;
858 exceptionRecord.ExceptionRecord = NULL;
859 exceptionRecord.ExceptionAddress = (void *)ts32.eip;
861 void **FramePointer = (void **)ts32.esp;
863 *--FramePointer = (void *)ts32.eip;
865 // Construct a stack frame for a pretend activation of the function
866 // PAL_DispatchExceptionWrapper that serves only to make the stack
867 // correctly unwindable by the system exception unwinder.
868 // PAL_DispatchExceptionWrapper has an ebp frame, its local variables
869 // are the context and exception record, and it has just "called"
870 // PAL_DispatchException.
871 *--FramePointer = (void *)ts32.ebp;
872 ts32.ebp = (unsigned)FramePointer;
874 // Put the context on the stack
875 FramePointer = (void **)((ULONG_PTR)FramePointer - sizeof(CONTEXT));
876 // Make sure it's aligned - CONTEXT has 8-byte alignment
877 FramePointer = (void **)((ULONG_PTR)FramePointer - ((ULONG_PTR)FramePointer % 8));
878 CONTEXT *pContext = (CONTEXT *)FramePointer;
879 *pContext = threadContext;
881 // Put the exception record on the stack
882 FramePointer = (void **)((ULONG_PTR)FramePointer - sizeof(EXCEPTION_RECORD));
883 EXCEPTION_RECORD *pExceptionRecord = (EXCEPTION_RECORD *)FramePointer;
884 *pExceptionRecord = exceptionRecord;
886 FramePointer = (void **)((ULONG_PTR)FramePointer - sizeof(MachExceptionInfo));
887 MachExceptionInfo *pMachExceptionInfo = (MachExceptionInfo *)FramePointer;
888 *pMachExceptionInfo = exceptionInfo;
890 // Push arguments to PAL_DispatchException
891 FramePointer = (void **)((ULONG_PTR)FramePointer - 3 * sizeof(void *));
893 // Make sure it's aligned - ABI requires 16-byte alignment
894 FramePointer = (void **)((ULONG_PTR)FramePointer - ((ULONG_PTR)FramePointer % 16));
895 FramePointer[0] = pContext;
896 FramePointer[1] = pExceptionRecord;
897 FramePointer[2] = pMachExceptionInfo;
899 // Place the return address to right after the fake call in PAL_DispatchExceptionWrapper
900 FramePointer[-1] = (void *)((ULONG_PTR)PAL_DispatchExceptionWrapper + PAL_DispatchExceptionReturnOffset);
902 // Make the instruction register point to DispatchException
903 ts32.eip = (unsigned)PAL_DispatchException;
904 ts32.esp = (unsigned)&FramePointer[-1]; // skip return address
906 // Now set the thread state for the faulting thread so that PAL_DispatchException executes next
907 machret = thread_set_state(thread, x86_THREAD_STATE32, (thread_state_t)&ts32, x86_THREAD_STATE32_COUNT);
908 CHECK_MACH("thread_set_state(thread)", machret);
909 #elif defined(_AMD64_)
910 NONPAL_ASSERTE(exceptionInfo.ThreadState.tsh.flavor == x86_THREAD_STATE64);
912 // Make a copy of the thread state because the one in exceptionInfo needs to be preserved to restore
913 // the state if the exception is forwarded.
914 x86_thread_state64_t ts64 = exceptionInfo.ThreadState.uts.ts64;
916 // If we're in single step mode, disable it since we're going to call PAL_DispatchException
917 if (exceptionRecord.ExceptionCode == EXCEPTION_SINGLE_STEP)
919 ts64.__rflags &= ~EFL_TF;
922 exceptionRecord.ExceptionFlags = EXCEPTION_IS_SIGNAL;
923 exceptionRecord.ExceptionRecord = NULL;
924 exceptionRecord.ExceptionAddress = (void *)ts64.__rip;
926 void **FramePointer = (void **)ts64.__rsp;
928 *--FramePointer = (void *)ts64.__rip;
930 // Construct a stack frame for a pretend activation of the function
931 // PAL_DispatchExceptionWrapper that serves only to make the stack
932 // correctly unwindable by the system exception unwinder.
933 // PAL_DispatchExceptionWrapper has an ebp frame, its local variables
934 // are the context and exception record, and it has just "called"
935 // PAL_DispatchException.
936 *--FramePointer = (void *)ts64.__rbp;
937 ts64.__rbp = (SIZE_T)FramePointer;
939 // Put the context on the stack
940 FramePointer = (void **)((ULONG_PTR)FramePointer - sizeof(CONTEXT));
941 // Make sure it's aligned - CONTEXT has 16-byte alignment
942 FramePointer = (void **)((ULONG_PTR)FramePointer - ((ULONG_PTR)FramePointer % 16));
943 CONTEXT *pContext = (CONTEXT *)FramePointer;
944 *pContext = threadContext;
946 // Put the exception record on the stack
947 FramePointer = (void **)((ULONG_PTR)FramePointer - sizeof(EXCEPTION_RECORD));
948 EXCEPTION_RECORD *pExceptionRecord = (EXCEPTION_RECORD *)FramePointer;
949 *pExceptionRecord = exceptionRecord;
951 FramePointer = (void **)((ULONG_PTR)FramePointer - sizeof(MachExceptionInfo));
952 MachExceptionInfo *pMachExceptionInfo = (MachExceptionInfo *)FramePointer;
953 *pMachExceptionInfo = exceptionInfo;
955 // Push arguments to PAL_DispatchException
956 FramePointer = (void **)((ULONG_PTR)FramePointer - 3 * sizeof(void *));
958 // Make sure it's aligned - ABI requires 16-byte alignment
959 FramePointer = (void **)((ULONG_PTR)FramePointer - ((ULONG_PTR)FramePointer % 16));
960 FramePointer[0] = pContext;
961 FramePointer[1] = pExceptionRecord;
962 FramePointer[2] = pMachExceptionInfo;
964 // Place the return address to right after the fake call in PAL_DispatchExceptionWrapper
965 FramePointer[-1] = (void *)((ULONG_PTR)PAL_DispatchExceptionWrapper + PAL_DispatchExceptionReturnOffset);
967 // Make the instruction register point to DispatchException
968 ts64.__rip = (SIZE_T)PAL_DispatchException;
969 ts64.__rsp = (SIZE_T)&FramePointer[-1]; // skip return address
971 // Now set the thread state for the faulting thread so that PAL_DispatchException executes next
972 machret = thread_set_state(thread, x86_THREAD_STATE64, (thread_state_t)&ts64, x86_THREAD_STATE64_COUNT);
973 CHECK_MACH("thread_set_state(thread)", machret);
975 #error HijackFaultingThread not defined for this architecture
983 Suspend the specified thread.
986 thread - mach thread port
993 SuspendMachThread(thread_act_t thread)
995 kern_return_t machret;
999 machret = thread_suspend(thread);
1000 CHECK_MACH("thread_suspend", machret);
1002 // Ensure that if the thread was running in the kernel, the kernel operation
1003 // is safely aborted so that it can be restarted later.
1004 machret = thread_abort_safely(thread);
1005 if (machret == KERN_SUCCESS)
1010 // The thread was running in the kernel executing a non-atomic operation
1011 // that cannot be restarted, so we need to resume the thread and retry
1012 machret = thread_resume(thread);
1013 CHECK_MACH("thread_resume", machret);
1021 Entry point for the thread that will listen for exception in any other thread.
1023 #ifdef FEATURE_PAL_SXS
1024 NOTE: This thread is not a PAL thread, and it must not be one. If it was,
1025 exceptions on this thread would be delivered to the port this thread itself
1028 In particular, if another thread overflows its stack, the exception handling
1029 thread receives a message. It will try to create a PAL_DispatchException
1030 frame on the faulting thread, which will likely fault. If the exception
1031 processing thread is not a PAL thread, the process gets terminated with a
1032 bus error; if the exception processing thread was a PAL thread, we would see
1033 a hang (since no thread is listening for the exception message that gets sent).
1034 Of the two ugly behaviors, the bus error is definitely favorable.
1036 This means: no printf, no TRACE, no PAL allocation, no ExitProcess,
1037 no LastError in this function and its helpers. To report fatal failure,
1038 use NONPAL_RETAIL_ASSERT.
1039 #endif // FEATURE_PAL_SXS
1042 void *args - not used
1048 SEHExceptionThread(void *args)
1050 ForwardedExceptionList feList;
1051 MachMessage sReplyOrForward;
1052 MachMessage sMessage;
1053 kern_return_t machret;
1054 thread_act_t thread;
1056 // Loop processing incoming messages forever.
1059 // Receive the next message.
1060 sMessage.Receive(s_ExceptionPort);
1062 NONPAL_TRACE("Received message %s (%08x) from (remote) %08x to (local) %08x\n",
1063 sMessage.GetMessageTypeName(),
1064 sMessage.GetMessageType(),
1065 sMessage.GetRemotePort(),
1066 sMessage.GetLocalPort());
1068 if (sMessage.IsSetThreadRequest())
1070 // Handle a request to set the thread context for the specified target thread.
1072 thread = sMessage.GetThreadContext(&sContext);
1074 // Suspend the target thread
1075 SuspendMachThread(thread);
1077 machret = CONTEXT_SetThreadContextOnPort(thread, &sContext);
1078 CHECK_MACH("CONTEXT_SetThreadContextOnPort", machret);
1080 machret = thread_resume(thread);
1081 CHECK_MACH("thread_resume", machret);
1083 else if (sMessage.IsExceptionNotification())
1085 // This is a notification of an exception occurring on another thread.
1086 exception_type_t exceptionType = sMessage.GetException();
1087 thread = sMessage.GetThread();
1090 if (NONPAL_TRACE_ENABLED)
1092 NONPAL_TRACE("ExceptionNotification %s (%u) thread %08x flavor %u\n",
1093 GetExceptionString(exceptionType),
1096 sMessage.GetThreadStateFlavor());
1098 int subcode_count = sMessage.GetExceptionCodeCount();
1099 for (int i = 0; i < subcode_count; i++)
1100 NONPAL_TRACE("ExceptionNotification subcode[%d] = %llx\n", i, sMessage.GetExceptionCode(i));
1102 x86_thread_state64_t threadStateActual;
1103 unsigned int count = sizeof(threadStateActual) / sizeof(unsigned);
1104 machret = thread_get_state(thread, x86_THREAD_STATE64, (thread_state_t)&threadStateActual, &count);
1105 CHECK_MACH("thread_get_state", machret);
1107 NONPAL_TRACE("ExceptionNotification actual rip %016llx rsp %016llx rbp %016llx rax %016llx r15 %016llx eflags %08llx\n",
1108 threadStateActual.__rip,
1109 threadStateActual.__rsp,
1110 threadStateActual.__rbp,
1111 threadStateActual.__rax,
1112 threadStateActual.__r15,
1113 threadStateActual.__rflags);
1115 x86_exception_state64_t threadExceptionState;
1116 unsigned int ehStateCount = sizeof(threadExceptionState) / sizeof(unsigned);
1117 machret = thread_get_state(thread, x86_EXCEPTION_STATE64, (thread_state_t)&threadExceptionState, &ehStateCount);
1118 CHECK_MACH("thread_get_state", machret);
1120 NONPAL_TRACE("ExceptionNotification trapno %04x cpu %04x err %08x faultAddr %016llx\n",
1121 threadExceptionState.__trapno,
1122 threadExceptionState.__cpu,
1123 threadExceptionState.__err,
1124 threadExceptionState.__faultvaddr);
1128 bool feFound = false;
1131 while (!feList.IsEOL())
1133 mach_port_type_t ePortType;
1134 if (mach_port_type(mach_task_self(), feList.Current->Thread, &ePortType) != KERN_SUCCESS || (ePortType & MACH_PORT_TYPE_DEAD_NAME))
1136 NONPAL_TRACE("Forwarded exception: invalid thread port %08x\n", feList.Current->Thread);
1138 // Unlink and delete the forwarded exception instance
1143 if (feList.Current->Thread == thread)
1145 bool isSameException = feList.Current->ExceptionType == exceptionType;
1148 // Locate the record of previously installed handlers that the target thread keeps.
1149 CThreadMachExceptionHandlers *pHandlers = feList.Current->PalThread->GetSavedMachHandlers();
1151 // Unlink and delete the forwarded exception instance
1154 // Check if the current exception type matches the forwarded one and whether
1155 // there's a handler for the particular exception we've been handed.
1156 MachExceptionHandler sHandler;
1157 if (isSameException && pHandlers->GetHandler(exceptionType, &sHandler))
1159 NONPAL_TRACE("ForwardNotification thread %08x to handler %08x\n", thread, sHandler.m_handler);
1160 sReplyOrForward.ForwardNotification(&sHandler, sMessage);
1164 NONPAL_TRACE("ReplyToNotification KERN_FAILURE thread %08x port %08x sameException %d\n",
1165 thread, sMessage.GetRemotePort(), isSameException);
1166 sReplyOrForward.ReplyToNotification(sMessage, KERN_FAILURE);
1177 NONPAL_TRACE("HijackFaultingThread thread %08x\n", thread);
1178 HijackFaultingThread(thread, mach_task_self(), sMessage);
1180 // Send the result of handling the exception back in a reply.
1181 NONPAL_TRACE("ReplyToNotification KERN_SUCCESS thread %08x port %08x\n", thread, sMessage.GetRemotePort());
1182 sReplyOrForward.ReplyToNotification(sMessage, KERN_SUCCESS);
1185 else if (sMessage.IsForwardExceptionRequest())
1187 thread = sMessage.GetThread();
1189 NONPAL_TRACE("ForwardExceptionRequest for thread %08x\n", thread);
1191 // Suspend the faulting thread.
1192 SuspendMachThread(thread);
1194 // Set the context back to the original faulting state.
1195 MachExceptionInfo *pExceptionInfo = sMessage.GetExceptionInfo();
1196 pExceptionInfo->RestoreState(thread);
1198 // Allocate an forwarded exception entry
1199 ForwardedException *pfe = (ForwardedException *)malloc(sizeof(ForwardedException));
1202 NONPAL_RETAIL_ASSERT("Exception thread ran out of memory to track forwarded exception notifications");
1205 // Save the forwarded exception entry away for the restarted exception message
1206 pfe->Thread = thread;
1207 pfe->ExceptionType = pExceptionInfo->ExceptionType;
1208 pfe->PalThread = sMessage.GetPalThread();
1211 // Now let the thread run at the original exception context to restart the exception
1212 NONPAL_TRACE("ForwardExceptionRequest resuming thread %08x exception type %08x\n", thread, pfe->ExceptionType);
1213 machret = thread_resume(thread);
1214 CHECK_MACH("thread_resume", machret);
1218 NONPAL_RETAIL_ASSERT("Unknown message type: %u", sMessage.GetMessageType());
1225 MachExceptionInfo constructor
1227 Saves the exception info from the exception notification message and
1228 the current thread state.
1231 thread - thread port to restore
1232 message - exception message
1237 MachExceptionInfo::MachExceptionInfo(mach_port_t thread, MachMessage& message)
1239 kern_return_t machret;
1241 ExceptionType = message.GetException();
1242 SubcodeCount = message.GetExceptionCodeCount();
1243 NONPAL_RETAIL_ASSERTE(SubcodeCount >= 0 && SubcodeCount <= 2);
1245 for (int i = 0; i < SubcodeCount; i++)
1246 Subcodes[i] = message.GetExceptionCode(i);
1248 mach_msg_type_number_t count = x86_THREAD_STATE_COUNT;
1249 machret = thread_get_state(thread, x86_THREAD_STATE, (thread_state_t)&ThreadState, &count);
1250 CHECK_MACH("thread_get_state", machret);
1252 count = x86_FLOAT_STATE_COUNT;
1253 machret = thread_get_state(thread, x86_FLOAT_STATE, (thread_state_t)&FloatState, &count);
1254 CHECK_MACH("thread_get_state(float)", machret);
1256 count = x86_DEBUG_STATE_COUNT;
1257 machret = thread_get_state(thread, x86_DEBUG_STATE, (thread_state_t)&DebugState, &count);
1258 CHECK_MACH("thread_get_state(debug)", machret);
1263 MachExceptionInfo::RestoreState
1265 Restore the thread to the saved exception info state.
1268 thread - thread port to restore
1273 void MachExceptionInfo::RestoreState(mach_port_t thread)
1275 // If we are restarting a breakpoint, we need to bump the IP back one to
1276 // point at the actual int 3 instructions.
1277 if (ExceptionType == EXC_BREAKPOINT)
1279 if (Subcodes[0] == EXC_I386_BPT)
1282 ThreadState.uts.ts32.eip--;
1283 #elif defined(_AMD64_)
1284 ThreadState.uts.ts64.__rip--;
1286 #error Platform not supported
1290 kern_return_t machret = thread_set_state(thread, x86_THREAD_STATE, (thread_state_t)&ThreadState, x86_THREAD_STATE_COUNT);
1291 CHECK_MACH("thread_set_state(thread)", machret);
1293 machret = thread_set_state(thread, x86_FLOAT_STATE, (thread_state_t)&FloatState, x86_FLOAT_STATE_COUNT);
1294 CHECK_MACH("thread_set_state(float)", machret);
1296 machret = thread_set_state(thread, x86_DEBUG_STATE, (thread_state_t)&DebugState, x86_DEBUG_STATE_COUNT);
1297 CHECK_MACH("thread_set_state(debug)", machret);
1302 MachSetThreadContext
1304 Sets the context of the current thread by sending a notification
1305 to the exception thread.
1308 lpContext - the CONTEXT to set the current thread
1315 MachSetThreadContext(CONTEXT *lpContext)
1317 // We need to send a message to the worker thread so that it can set our thread context.
1318 MachMessage sRequest;
1319 sRequest.SendSetThread(s_ExceptionPort, lpContext);
1321 // Make sure we don't do anything
1330 SEHInitializeMachExceptions
1332 Initialize all SEH-related stuff related to mach exceptions
1337 TRUE if SEH support initialization succeeded
1341 SEHInitializeMachExceptions(void)
1343 pthread_t exception_thread;
1344 kern_return_t machret;
1346 // Allocate a mach port that will listen in on exceptions
1347 machret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &s_ExceptionPort);
1348 if (machret != KERN_SUCCESS)
1350 ASSERT("mach_port_allocate failed: %d\n", machret);
1351 UTIL_SetLastErrorFromMach(machret);
1355 // Insert the send right into the task
1356 machret = mach_port_insert_right(mach_task_self(), s_ExceptionPort, s_ExceptionPort, MACH_MSG_TYPE_MAKE_SEND);
1357 if (machret != KERN_SUCCESS)
1359 ASSERT("mach_port_insert_right failed: %d\n", machret);
1360 UTIL_SetLastErrorFromMach(machret);
1364 // Create the thread that will listen to the exception for all threads
1365 int createret = pthread_create(&exception_thread, NULL, SEHExceptionThread, NULL);
1368 ERROR("pthread_create failed, error is %d (%s)\n", createret, strerror(createret));
1369 SetLastError(ERROR_NOT_ENOUGH_MEMORY);
1374 if (NONPAL_TRACE_ENABLED)
1376 CThreadMachExceptionHandlers taskHandlers;
1377 machret = task_get_exception_ports(mach_task_self(),
1379 taskHandlers.m_masks,
1380 &taskHandlers.m_nPorts,
1381 taskHandlers.m_handlers,
1382 taskHandlers.m_behaviors,
1383 taskHandlers.m_flavors);
1385 if (machret == KERN_SUCCESS)
1387 NONPAL_TRACE("SEHInitializeMachExceptions: TASK PORT count %d\n", taskHandlers.m_nPorts);
1388 for (mach_msg_type_number_t i = 0; i < taskHandlers.m_nPorts; i++)
1390 NONPAL_TRACE("SEHInitializeMachExceptions: TASK PORT mask %08x handler: %08x behavior %08x flavor %u\n",
1391 taskHandlers.m_masks[i],
1392 taskHandlers.m_handlers[i],
1393 taskHandlers.m_behaviors[i],
1394 taskHandlers.m_flavors[i]);
1399 NONPAL_TRACE("SEHInitializeMachExceptions: task_get_exception_ports FAILED %d %s\n", machret, mach_error_string(machret));
1404 #ifndef FEATURE_PAL_SXS
1405 if (!SEHEnableMachExceptions())
1409 #endif // !FEATURE_PAL_SXS
1411 // Tell the system to ignore SIGPIPE signals rather than use the default
1412 // behavior of terminating the process. Ignoring SIGPIPE will cause
1413 // calls that would otherwise raise that signal to return EPIPE instead.
1414 // The PAL expects EPIPE from those functions and won't handle a
1416 signal(SIGPIPE, SIG_IGN);
1424 MachExceptionInitializeDebug
1426 Initialize the mach exception handlers necessary for a managed debugger
1432 void MachExceptionInitializeDebug(void)
1434 if (s_DebugInitialized == FALSE)
1436 #ifndef FEATURE_PAL_SXS
1437 kern_return_t MachRet;
1438 MachRet = task_set_exception_ports(mach_task_self(),
1439 PAL_EXC_DEBUGGING_MASK,
1442 MACHINE_THREAD_STATE);
1443 if (MachRet != KERN_SUCCESS)
1445 ASSERT("task_set_exception_ports failed: %d\n", MachRet);
1446 TerminateProcess(GetCurrentProcess(), (UINT)(-1));
1448 #endif // !FEATURE_PAL_SXS
1449 s_DebugInitialized = TRUE;
1455 SEHCleanupExceptionPort
1457 Restore default exception port handler
1459 (no parameters, no return value)
1462 During PAL_Terminate, we reach a point where SEH isn't possible any more
1463 (handle manager is off, etc). Past that point, we can't avoid crashing on
1467 SEHCleanupExceptionPort(void)
1469 TRACE("Restoring default exception ports\n");
1470 #ifndef FEATURE_PAL_SXS
1471 SEHDisableMachExceptions();
1472 #endif // !FEATURE_PAL_SXS
1473 s_DebugInitialized = FALSE;
1478 ActivationHandler(CONTEXT* context)
1480 if (g_activationFunction != NULL)
1482 g_activationFunction(context);
1485 RtlRestoreContext(context, NULL);
1489 extern "C" void ActivationHandlerWrapper();
1490 extern "C" int ActivationHandlerReturnOffset;
1494 InjectActivationInternal
1496 Sets up the specified thread to call the ActivationHandler.
1499 pThread - PAL thread instance
1505 InjectActivationInternal(CPalThread* pThread)
1509 mach_port_t threadPort = pThread->GetMachPortSelf();
1510 kern_return_t MachRet = thread_suspend(threadPort);
1511 palError = (MachRet == KERN_SUCCESS) ? NO_ERROR : ERROR_GEN_FAILURE;
1513 if (palError == NO_ERROR)
1515 mach_msg_type_number_t count;
1517 x86_exception_state64_t ExceptionState;
1518 count = x86_EXCEPTION_STATE64_COUNT;
1519 MachRet = thread_get_state(threadPort,
1520 x86_EXCEPTION_STATE64,
1521 (thread_state_t)&ExceptionState,
1523 _ASSERT_MSG(MachRet == KERN_SUCCESS, "thread_get_state for x86_EXCEPTION_STATE64\n");
1525 // Inject the activation only if the thread doesn't have a pending hardware exception
1526 static const int MaxHardwareExceptionVector = 31;
1527 if (ExceptionState.__trapno > MaxHardwareExceptionVector)
1529 x86_thread_state64_t ThreadState;
1530 count = x86_THREAD_STATE64_COUNT;
1531 MachRet = thread_get_state(threadPort,
1533 (thread_state_t)&ThreadState,
1535 _ASSERT_MSG(MachRet == KERN_SUCCESS, "thread_get_state for x86_THREAD_STATE64\n");
1537 if ((g_safeActivationCheckFunction != NULL) && g_safeActivationCheckFunction(ThreadState.__rip, /* checkingCurrentThread */ FALSE))
1539 // TODO: it would be nice to preserve the red zone in case a jitter would want to use it
1540 // Do we really care about unwinding through the wrapper?
1541 size_t* sp = (size_t*)ThreadState.__rsp;
1542 *(--sp) = ThreadState.__rip;
1543 *(--sp) = ThreadState.__rbp;
1544 size_t rbpAddress = (size_t)sp;
1545 size_t contextAddress = (((size_t)sp) - sizeof(CONTEXT)) & ~15;
1546 size_t returnAddressAddress = contextAddress - sizeof(size_t);
1547 *(size_t*)(returnAddressAddress) = ActivationHandlerReturnOffset + (size_t)ActivationHandlerWrapper;
1549 // Fill in the context in the helper frame with the full context of the suspended thread.
1550 // The ActivationHandler will use the context to resume the execution of the thread
1551 // after the activation function returns.
1552 CONTEXT *pContext = (CONTEXT *)contextAddress;
1553 pContext->ContextFlags = CONTEXT_FULL | CONTEXT_SEGMENTS;
1554 MachRet = CONTEXT_GetThreadContextFromPort(threadPort, pContext);
1555 _ASSERT_MSG(MachRet == KERN_SUCCESS, "CONTEXT_GetThreadContextFromPort\n");
1557 // Make the instruction register point to ActivationHandler
1558 ThreadState.__rip = (size_t)ActivationHandler;
1559 ThreadState.__rsp = returnAddressAddress;
1560 ThreadState.__rbp = rbpAddress;
1561 ThreadState.__rdi = contextAddress;
1563 MachRet = thread_set_state(threadPort,
1565 (thread_state_t)&ThreadState,
1567 _ASSERT_MSG(MachRet == KERN_SUCCESS, "thread_set_state\n");
1571 MachRet = thread_resume(threadPort);
1572 palError = (MachRet == ERROR_SUCCESS) ? NO_ERROR : ERROR_GEN_FAILURE;
1576 printf("Suspension failed with error 0x%x\n", palError);
1582 #endif // HAVE_MACH_EXCEPTIONS