1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
15 Implementation of MACH exception API functions.
21 #include "pal/thread.hpp"
22 #include "pal/seh.hpp"
23 #include "pal/palinternal.h"
24 #if HAVE_MACH_EXCEPTIONS
25 #include "machexception.h"
26 #include "pal/dbgmsg.h"
27 #include "pal/critsect.h"
28 #include "pal/debug.h"
30 #include "pal/utils.h"
31 #include "pal/context.h"
32 #include "pal/malloc.hpp"
33 #include "pal/process.h"
34 #include "pal/virtual.h"
35 #include "pal/map.hpp"
37 #include "machmessage.h"
44 #include <mach-o/loader.h>
46 using namespace CorUnix;
48 SET_DEFAULT_DEBUG_CHANNEL(EXCEPT);
50 void ForwardMachException(CPalThread *pThread, MachMessage *pMessage);
52 // The port we use to handle exceptions and to set the thread context
53 mach_port_t s_ExceptionPort;
55 static BOOL s_DebugInitialized = FALSE;
57 static const char * PAL_MACH_EXCEPTION_MODE = "PAL_MachExceptionMode";
59 enum MachExceptionMode
61 // special value to indicate we've not initialized yet
62 MachException_Uninitialized = -1,
64 // These can be combined with bitwise OR to incrementally turn off
65 // functionality for diagnostics purposes.
67 // In practice, the following values are probably useful:
68 // 1: Don't turn illegal instructions into SEH exceptions.
69 // On Intel, stack misalignment usually shows up as an
70 // illegal instruction. PAL client code shouldn't
71 // expect to see any of these, so this option should
72 // always be safe to set.
73 // 2: Don't listen for breakpoint exceptions. This makes an
74 // SEH-based debugger (i.e., managed debugger) unusable,
75 // but you may need this option if you find that native
76 // breakpoints you set in PAL-dependent code don't work
77 // (causing hangs or crashes in the native debugger).
78 // 3: Combination of the above.
79 // This is the typical setting for development
80 // (unless you're working on the managed debugger).
81 // 7: In addition to the above, don't turn bad accesses and
82 // arithmetic exceptions into SEH.
83 // This is the typical setting for stress.
84 MachException_SuppressIllegal = 1,
85 MachException_SuppressDebugging = 2,
86 MachException_SuppressManaged = 4,
88 // Default value to use if environment variable not set.
89 MachException_Default = 0,
92 static exception_mask_t GetExceptionMask()
94 static MachExceptionMode exMode = MachException_Uninitialized;
96 if (exMode == MachException_Uninitialized)
98 exMode = MachException_Default;
100 const char * exceptionSettings = getenv(PAL_MACH_EXCEPTION_MODE);
101 if (exceptionSettings)
103 exMode = (MachExceptionMode)atoi(exceptionSettings);
107 if (PAL_IsDebuggerPresent())
109 exMode = MachException_SuppressDebugging;
114 exception_mask_t machExceptionMask = 0;
115 if (!(exMode & MachException_SuppressIllegal))
117 machExceptionMask |= PAL_EXC_ILLEGAL_MASK;
119 if (!(exMode & MachException_SuppressDebugging))
121 #ifdef FEATURE_PAL_SXS
122 // Always hook exception ports for breakpoint exceptions.
123 // The reason is that we don't know when a managed debugger
124 // will attach, so we have to be prepared. We don't want
125 // to later go through the thread list and hook exception
126 // ports for exactly those threads that currently are in
128 machExceptionMask |= PAL_EXC_DEBUGGING_MASK;
129 #else // FEATURE_PAL_SXS
130 if (s_DebugInitialized)
132 machExceptionMask |= PAL_EXC_DEBUGGING_MASK;
134 #endif // FEATURE_PAL_SXS
136 if (!(exMode & MachException_SuppressManaged))
138 machExceptionMask |= PAL_EXC_MANAGED_MASK;
141 return machExceptionMask;
144 #define NONPAL_RETAIL_ASSERT(...) \
147 PAL_EnterHolder enterHolder; \
148 PAL_printf(__VA_ARGS__); \
149 PAL_DisplayDialogFormatted("NON-PAL ASSERT", __VA_ARGS__); \
155 #define CHECK_MACH(function, machret) \
156 if (machret != KERN_SUCCESS) \
158 NONPAL_RETAIL_ASSERT(function " failed: %08X: %s\n", machret, mach_error_string(machret)); \
161 #ifdef FEATURE_PAL_SXS
165 CPalThread::EnableMachExceptions
167 Hook Mach exceptions, i.e., call thread_swap_exception_ports
168 to replace the thread's current exception ports with our own.
169 The previously active exception ports are saved. Called when
170 this thread enters a region of code that depends on this PAL.
173 ERROR_SUCCESS, if enabling succeeded
174 an error code, otherwise
176 PAL_ERROR CorUnix::CPalThread::EnableMachExceptions()
178 TRACE("%08X: Enter()\n", (unsigned int)(size_t)this);
180 exception_mask_t machExceptionMask = GetExceptionMask();
181 if (machExceptionMask != 0)
184 // verify that the arrays we've allocated to hold saved exception ports
185 // are the right size.
186 exception_mask_t countBits = PAL_EXC_ALL_MASK;
187 countBits = ((countBits & 0xAAAAAAAA) >> 1) + (countBits & 0x55555555);
188 countBits = ((countBits & 0xCCCCCCCC) >> 2) + (countBits & 0x33333333);
189 countBits = ((countBits & 0xF0F0F0F0) >> 4) + (countBits & 0x0F0F0F0F);
190 countBits = ((countBits & 0xFF00FF00) >> 8) + (countBits & 0x00FF00FF);
191 countBits = ((countBits & 0xFFFF0000) >> 16) + (countBits & 0x0000FFFF);
192 if (countBits != static_cast<exception_mask_t>(
193 CThreadMachExceptionHandlerNode::s_nPortsMax))
195 ASSERT("s_nPortsMax is %u, but needs to be %u\n",
196 CThreadMachExceptionHandlerNode::s_nPortsMax, countBits);
200 // We store a set of previous handlers and register an exception port that is unique to both (to help
201 // us get the correct chain-back semantics in as many scenarios as possible). The following call tells
202 // us which we should do.
203 CThreadMachExceptionHandlerNode *pSavedHandlers = m_sMachExceptionHandlers.GetNodeForInitialization();
204 NONPAL_TRACE("Enabling handlers for thread %08X exception port %08X\n", GetMachPortSelf(), s_ExceptionPort);
206 // Swap current handlers into temporary storage first. That's because it's possible (even likely) that
207 // some or all of the handlers might still be ours. In those cases we don't want to overwrite the
208 // chain-back entries with these useless self-references.
209 kern_return_t MachRet;
210 mach_msg_type_number_t oldCount = CThreadMachExceptionHandlerNode::s_nPortsMax;
211 exception_mask_t rgMasks[CThreadMachExceptionHandlerNode::s_nPortsMax];
212 exception_handler_t rgHandlers[CThreadMachExceptionHandlerNode::s_nPortsMax];
213 exception_behavior_t rgBehaviors[CThreadMachExceptionHandlerNode::s_nPortsMax];
214 thread_state_flavor_t rgFlavors[CThreadMachExceptionHandlerNode::s_nPortsMax];
215 thread_port_t thread = mach_thread_self();
216 exception_behavior_t excepBehavior = EXCEPTION_STATE_IDENTITY;
218 MachRet = thread_swap_exception_ports(thread,
222 MACHINE_THREAD_STATE,
229 kern_return_t MachRetDeallocate = mach_port_deallocate(mach_task_self(), thread);
230 CHECK_MACH("mach_port_deallocate", MachRetDeallocate);
232 if (MachRet != KERN_SUCCESS)
234 ASSERT("thread_swap_exception_ports failed: %d\n", MachRet);
235 return UTIL_MachErrorToPalError(MachRet);
238 // Scan through the returned handlers looking for those that are ours.
239 for (mach_msg_type_number_t i = 0; i < oldCount; i++)
241 if (rgHandlers[i] == s_ExceptionPort)
243 // We were already registered for the exceptions indicated by rgMasks[i]. Look through each
244 // exception (set bit in the mask) separately, checking whether we previously had a (non-CLR)
245 // registration for that handle.
246 for (size_t j = 0; j < (sizeof(exception_mask_t) * 8); j++)
248 // Skip unset bits (exceptions not covered by this entry).
249 exception_mask_t bmException = rgMasks[i] & (1 << j);
250 if (bmException == 0)
253 // Find record in the previous data that covers this exception.
254 bool fFoundPreviousHandler = false;
255 for (int k = 0; k < pSavedHandlers->m_nPorts; k++)
257 // Skip records for different exceptions.
258 if (!(pSavedHandlers->m_masks[k] & bmException))
261 // Found one. By definition it shouldn't be one of our handlers.
262 if (pSavedHandlers->m_handlers[k] == s_ExceptionPort)
263 ASSERT("Stored our own handlers in Mach exception chain-back info.\n");
265 // We need to replicate the handling details back into our temporary data in place of
266 // the CLR record. There are several things that can happen:
267 // 1) One of the other entries has the same handler, behavior and flavor (for a
268 // different set of exceptions). We could merge the data for this exception into
269 // that record (set another bit in the masks array entry).
270 // 2) This was the only exception in the current entry (only one bit was set in the
271 // mask) and we can simply re-use this entry (overwrite the handler, behavior and
273 // 3) Multiple exceptions were covered by this entry. In this case we should add a new
274 // entry covering just the current exception. We're guaranteed to have space to do
275 // this since we allocated enough entries to cover one exception per-entry and we
276 // have at least one entry with two or more exceptions (this one).
277 // It turns out we can ignore case 1 (which involves complicating our logic still
278 // further) since we have no requirement to tightly pack all the entries for the same
279 // handler/behavior/flavor (like thread_swap_exception_ports does). We're perfectly
280 // happy having six entries for six exceptions handled by identical handlers rather
281 // than a single entry with six bits set in the exception mask.
282 if (rgMasks[i] == bmException)
284 // Entry was only for this exception. Simply overwrite handler/behavior and flavor
285 // with the stored values.
286 rgHandlers[i] = pSavedHandlers->m_handlers[k];
287 rgBehaviors[i] = pSavedHandlers->m_behaviors[k];
288 rgFlavors[i] = pSavedHandlers->m_flavors[k];
292 // More than one exception handled by this record. Store the old data in a new
293 // cell of the temporary data and remove the exception from the old cell.
294 if ((int)oldCount == CThreadMachExceptionHandlerNode::s_nPortsMax)
295 ASSERT("Ran out of space to expand exception handlers. This shouldn't happen.\n");
297 rgMasks[oldCount] = bmException;
298 rgHandlers[oldCount] = pSavedHandlers->m_handlers[k];
299 rgBehaviors[oldCount] = pSavedHandlers->m_behaviors[k];
300 rgFlavors[oldCount] = pSavedHandlers->m_flavors[k];
302 // The old cell no longer describes this exception.
303 rgMasks[i] &= ~bmException;
309 fFoundPreviousHandler = true;
313 // If we didn't find a match then we still don't want to record our own handler. Just
314 // reset the bit in the masks value (implicitly recording that we have no-chain back entry
315 // for this exception).
316 if (!fFoundPreviousHandler)
317 rgMasks[i] &= ~bmException;
322 // We've cleaned any mention of our own handlers from the data. It's safe to persist it.
323 pSavedHandlers->m_nPorts = oldCount;
324 memcpy(pSavedHandlers->m_masks, rgMasks, sizeof(rgMasks));
325 memcpy(pSavedHandlers->m_handlers, rgHandlers, sizeof(rgHandlers));
326 memcpy(pSavedHandlers->m_behaviors, rgBehaviors, sizeof(rgBehaviors));
327 memcpy(pSavedHandlers->m_flavors, rgFlavors, sizeof(rgFlavors));
329 return ERROR_SUCCESS;
334 CPalThread::DisableMachExceptions
336 Unhook Mach exceptions, i.e., call thread_set_exception_ports
337 to restore the thread's exception ports with those we saved
338 in EnableMachExceptions. Called when this thread leaves a
339 region of code that depends on this PAL.
342 ERROR_SUCCESS, if disabling succeeded
343 an error code, otherwise
345 PAL_ERROR CorUnix::CPalThread::DisableMachExceptions()
347 TRACE("%08X: Leave()\n", (unsigned int)(size_t)this);
349 PAL_ERROR palError = NO_ERROR;
351 // We only store exceptions when we're installing exceptions.
352 if (0 == GetExceptionMask())
355 // Get the handlers to restore. It isn't really as simple as this. We keep two sets of handlers (which
356 // improves our ability to chain correctly in more scenarios) but this means we can encounter dilemmas
357 // where we've recorded two different handlers for the same port and can only re-register one of them
358 // (with a very high chance that it does not chain to the other). I don't believe it matters much today:
359 // in the absence of CoreCLR shutdown we don't throw away our thread context until a thread dies (in fact
360 // usually a bit later than this). Hopefully by the time this changes we'll have a better design for
361 // hardware exception handling overall.
362 CThreadMachExceptionHandlerNode *savedPorts = m_sMachExceptionHandlers.GetNodeForCleanup();
364 kern_return_t MachRet = KERN_SUCCESS;
365 for (int i = 0; i < savedPorts->m_nPorts; i++)
367 // If no handler was ever set, thread_swap_exception_ports returns
368 // MACH_PORT_NULL for the handler and zero values for behavior
369 // and flavor. Unfortunately, the latter are invalid even for
370 // MACH_PORT_NULL when you use thread_set_exception_ports.
371 exception_behavior_t behavior =
372 savedPorts->m_behaviors[i] ? savedPorts->m_behaviors[i] : EXCEPTION_DEFAULT;
373 thread_state_flavor_t flavor =
374 savedPorts->m_flavors[i] ? savedPorts->m_flavors[i] : MACHINE_THREAD_STATE;
375 thread_port_t thread = mach_thread_self();
376 MachRet = thread_set_exception_ports(thread,
377 savedPorts->m_masks[i],
378 savedPorts->m_handlers[i],
381 kern_return_t MachRetDeallocate = mach_port_deallocate(mach_task_self(), thread);
382 CHECK_MACH("mach_port_deallocate", MachRetDeallocate);
384 if (MachRet != KERN_SUCCESS)
388 if (MachRet != KERN_SUCCESS)
390 ASSERT("thread_set_exception_ports failed: %d\n", MachRet);
391 palError = UTIL_MachErrorToPalError(MachRet);
397 #else // FEATURE_PAL_SXS
401 SEHEnableMachExceptions
403 Enable SEH-related stuff related to mach exceptions
408 TRUE if enabling succeeded
411 BOOL SEHEnableMachExceptions()
413 exception_mask_t machExceptionMask = GetExceptionMask();
414 if (machExceptionMask != 0)
416 kern_return_t MachRet;
417 MachRet = task_set_exception_ports(mach_task_self(),
421 MACHINE_THREAD_STATE);
423 if (MachRet != KERN_SUCCESS)
425 ASSERT("task_set_exception_ports failed: %d\n", MachRet);
426 UTIL_SetLastErrorFromMach(MachRet);
435 SEHDisableMachExceptions
437 Disable SEH-related stuff related to mach exceptions
442 TRUE if enabling succeeded
445 BOOL SEHDisableMachExceptions()
447 exception_mask_t machExceptionMask = GetExceptionMask();
448 if (machExceptionMask != 0)
450 kern_return_t MachRet;
451 MachRet = task_set_exception_ports(mach_task_self(),
455 MACHINE_THREAD_STATE);
457 if (MachRet != KERN_SUCCESS)
459 ASSERT("task_set_exception_ports failed: %d\n", MachRet);
460 UTIL_SetLastErrorFromMach(MachRet);
467 #endif // FEATURE_PAL_SXS
469 #if !defined(_AMD64_)
470 void PAL_DispatchException(PCONTEXT pContext, PEXCEPTION_RECORD pExRecord, MachMessage *pMessage)
471 #else // defined(_AMD64_)
473 // Since HijackFaultingThread pushed the context, exception record and mach exception message on the stack,
474 // we need to adjust the signature of PAL_DispatchException such that the corresponding arguments are considered
475 // to be on the stack per GCC64 calling convention rules. Hence, the first 6 dummy arguments (corresponding to RDI,
476 // RSI, RDX,RCX, R8, R9).
477 void PAL_DispatchException(DWORD64 dwRDI, DWORD64 dwRSI, DWORD64 dwRDX, DWORD64 dwRCX, DWORD64 dwR8, DWORD64 dwR9, PCONTEXT pContext, PEXCEPTION_RECORD pExRecord, MachMessage *pMessage)
478 #endif // !defined(_AMD64_)
480 CPalThread *pThread = InternalGetCurrentThread();
483 if (!pThread->IsInPal())
485 // It's now possible to observe system exceptions in code running outside the PAL (as the result of a
486 // p/invoke since we no longer revert our Mach exception ports in this case). In that scenario we need
487 // to re-enter the PAL now as the exception signals the end of the p/invoke.
488 PAL_Reenter(PAL_BoundaryBottom);
490 #endif // FEATURE_PAL_SXS
492 EXCEPTION_POINTERS pointers;
493 pointers.ExceptionRecord = pExRecord;
494 pointers.ContextRecord = pContext;
496 TRACE("PAL_DispatchException(EC %08x EA %p)\n", pExRecord->ExceptionCode, pExRecord->ExceptionAddress);
497 SEHProcessException(&pointers);
499 // Chain the exception to the next PAL
500 ForwardMachException(pThread, pMessage);
503 #if defined(_X86_) || defined(_AMD64_)
504 extern "C" void PAL_DispatchExceptionWrapper();
505 extern "C" int PAL_DispatchExceptionReturnOffset;
506 #endif // _X86_ || _AMD64_
510 ExceptionRecordFromMessage
512 Setups up an ExceptionRecord from an exception message
515 message - exception message to build the exception record
516 pExceptionRecord - exception record to setup
519 ExceptionRecordFromMessage(
520 MachMessage &message, // [in] exception message
521 EXCEPTION_RECORD *pExceptionRecord) // [out] Used to return exception parameters
523 exception_type_t exception = message.GetException();
524 MACH_EH_TYPE(exception_data_type_t) subcodes[2];
525 mach_msg_type_number_t subcode_count;
527 subcode_count = message.GetExceptionCodeCount();
528 if (subcode_count < 0 || subcode_count > 2)
529 NONPAL_RETAIL_ASSERT("Bad exception subcode count: %d", subcode_count);
531 for (int i = 0; i < subcode_count; i++)
532 subcodes[i] = message.GetExceptionCode(i);
534 memset(pExceptionRecord, 0, sizeof(EXCEPTION_RECORD));
536 DWORD exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
540 // Could not access memory. subcode contains the bad memory address.
542 if (subcode_count != 2)
544 NONPAL_RETAIL_ASSERT("Got an unexpected subcode");
545 exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
549 exceptionCode = EXCEPTION_ACCESS_VIOLATION;
551 pExceptionRecord->NumberParameters = 2;
552 pExceptionRecord->ExceptionInformation[0] = 0;
553 pExceptionRecord->ExceptionInformation[1] = subcodes[1];
554 NONPAL_TRACE("subcodes[1] = %llx\n", subcodes[1]);
558 // Instruction failed. Illegal or undefined instruction or operand.
559 case EXC_BAD_INSTRUCTION :
560 // TODO: Identify privileged instruction. Need to get the thread state and read the machine code. May
561 // be better to do this in the place that calls SEHProcessException, similar to how it's done on Linux.
562 exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
565 // Arithmetic exception; exact nature of exception is in subcode field.
567 if (subcode_count != 2)
569 NONPAL_RETAIL_ASSERT("Got an unexpected subcode");
570 exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
576 #if defined(_X86_) || defined(_AMD64_)
578 exceptionCode = EXCEPTION_INT_DIVIDE_BY_ZERO;
581 exceptionCode = EXCEPTION_INT_OVERFLOW;
583 case EXC_I386_EXTOVR:
584 exceptionCode = EXCEPTION_FLT_OVERFLOW;
587 exceptionCode = EXCEPTION_ARRAY_BOUNDS_EXCEEDED;
590 #error Trap code to exception mapping not defined for this architecture
593 exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
600 #if defined(_X86_) || defined(_AMD64_)
601 exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
604 #error Trap code to exception mapping not defined for this architecture
607 // Trace, breakpoint, etc. Details in subcode field.
609 #if defined(_X86_) || defined(_AMD64_)
610 if (subcodes[0] == EXC_I386_SGL)
612 exceptionCode = EXCEPTION_SINGLE_STEP;
614 else if (subcodes[0] == EXC_I386_BPT)
616 exceptionCode = EXCEPTION_BREAKPOINT;
619 #error Trap code to exception mapping not defined for this architecture
623 WARN("unexpected subcode %d for EXC_BREAKPOINT", subcodes[0]);
624 exceptionCode = EXCEPTION_BREAKPOINT;
629 // System call requested. Details in subcode field.
631 exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
634 // System call with a number in the Mach call range requested. Details in subcode field.
635 case EXC_MACH_SYSCALL:
636 exceptionCode = EXCEPTION_ILLEGAL_INSTRUCTION;
640 ASSERT("Got unknown trap code %d\n", exception);
644 pExceptionRecord->ExceptionCode = exceptionCode;
650 exception_type_t exception
656 return "EXC_BAD_ACCESS";
658 case EXC_BAD_INSTRUCTION:
659 return "EXC_BAD_INSTRUCTION";
662 return "EXC_ARITHMETIC";
665 return "EXC_SOFTWARE";
668 return "EXC_BREAKPOINT";
671 return "EXC_SYSCALL";
673 case EXC_MACH_SYSCALL:
674 return "EXC_MACH_SYSCALL";
677 ASSERT("Got unknown trap code %d\n", exception);
680 return "INVALID CODE";
688 Sets the faulting thread up to return to PAL_DispatchException with an
689 ExceptionRecord, thread CONTEXT and the exception MachMessage.
692 thread - thread the exception happened
693 task - task the exception happened
694 message - exception message
702 HijackFaultingThread(
703 mach_port_t thread, // [in] thread the exception happened on
704 mach_port_t task, // [in] task the exception happened on
705 MachMessage &message) // [in] exception message
707 thread_state_flavor_t threadStateFlavor;
708 x86_thread_state_t threadState;
709 EXCEPTION_RECORD exceptionRecord;
710 CONTEXT threadContext;
711 kern_return_t machret;
714 // Fill in the exception record from the exception message
715 ExceptionRecordFromMessage(message, &exceptionRecord);
717 // Get the thread state from the exception message and convert the count of bytes into
719 threadStateFlavor = message.GetThreadStateFlavor();
720 count = message.GetThreadState(threadStateFlavor, (thread_state_t)&threadState, thread) / sizeof(natural_t);
723 threadContext.ContextFlags = CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS
725 threadContext.ContextFlags = CONTEXT_FLOATING_POINT;
727 // Get just the floating point registers directly from the thread because the message context is only
728 // the general registers.
729 machret = CONTEXT_GetThreadContextFromPort(thread, &threadContext);
730 CHECK_MACH("CONTEXT_GetThreadContextFromPort", machret);
732 // Now get the rest of the registers from the exception message. Don't save/restore the debug registers
733 // because loading them on OSx causes a privileged instruction fault. The "DE" in CR4 is set.
734 threadContext.ContextFlags |= CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS;
735 CONTEXT_GetThreadContextFromThreadState(threadStateFlavor, (thread_state_t)&threadState, &threadContext);
737 #if defined(CORECLR) && (defined(_X86_) || defined(_AMD64_))
738 // For CoreCLR we look more deeply at access violations to determine whether they're the result of a stack
739 // overflow. If so we'll terminate the process immediately (the current default policy of the CoreCLR EE).
740 // Otherwise we'll either A/V ourselves trying to set up the SEH exception record and context on the
741 // target thread's stack (unlike Windows there's no extra stack reservation to guarantee this can be done)
742 // or, and this the case we're trying to avoid, it's possible we'll succeed and the runtime will go ahead
743 // and process the SO like it was a simple AV. Since the runtime doesn't currently implement stack probing
744 // on non-Windows platforms, this could lead to data corruption (we have SO intolerant code in the runtime
745 // which manipulates global state under the assumption that an SO cannot occur due to a prior stack
748 // Determining whether an AV is really an SO is not quite straightforward. We can get stack bounds
749 // information from pthreads but (a) we only have the target Mach thread port and no way to map to a
750 // pthread easily and (b) the pthread functions lie about the bounds on the main thread.
752 // Instead we inspect the target thread SP we just retrieved above and compare it with the AV address. If
753 // they both lie in the same page or the SP is at a higher address than the AV but in the same VM region,
754 // then we'll consider the AV to be an SO. Note that we can't assume that SP will be in the same page as
755 // the AV on an SO, even though we force GCC to generate stack probes on stack extension (-fstack-check).
756 // That's because GCC currently generates the probe *before* altering SP. Since a given stack extension can
757 // involve multiple pages and GCC generates all the required probes before updating SP in a single
758 // operation, the faulting probe can be at an address that is far removed from the thread's current value
761 // In the case where the AV and SP aren't in the same or adjacent pages we check if the first page
762 // following the faulting address belongs in the same VM region as the current value of SP. Since all pages
763 // in a VM region have the same attributes this check eliminates the possibility that there's another guard
764 // page in the range between the fault and the SP, effectively establishing that the AV occurred in the
765 // guard page associated with the stack associated with the SP.
767 // We are assuming here that thread stacks are always allocated in a single VM region. I've seen no
768 // evidence thus far that this is not the case (and the mere fact we rely on Mach apis already puts us on
769 // brittle ground anyway).
771 // (a) SP always marks the current limit of the stack (in that all valid stack accesses will be of
772 // the form [SP + delta]). The Mac x86 ABI appears to guarantee this (or rather it does not
773 // guarantee that stack slots below SP will not be invalidated by asynchronous events such as
774 // interrupts, which mostly amounts to the same thing for user mode code). Note that the Mac PPC
775 // ABI does allow some (constrained) access below SP, but we're not currently supporting this
777 // (b) All code will extend the stack "carefully" (by which we mean that stack extensions of more
778 // than one page in size will touch at least one byte in each intervening page (in decreasing
779 // address order), to guarantee that the guard page is hit before memory beyond the guard page is
780 // corrupted). Our managed jits always generate code which does this as does MSVC. GCC, however,
781 // does not do this by default. We have to explicitly provide the -fstack-check compiler option
782 // to enable the behavior.
783 #if (defined(_X86_) || defined(_AMD64_)) && defined(__APPLE__)
784 if (exceptionRecord.ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
786 // Assume this AV isn't an SO to begin with.
787 bool fIsStackOverflow = false;
789 // Calculate the page base addresses for the fault and the faulting thread's SP.
790 int cbPage = getpagesize();
791 char *pFaultPage = (char*)(exceptionRecord.ExceptionInformation[1] & ~(cbPage - 1));
793 char *pStackTopPage = (char*)(threadContext.Esp & ~(cbPage - 1));
794 #elif defined(_AMD64_)
795 char *pStackTopPage = (char*)(threadContext.Rsp & ~(cbPage - 1));
798 if (pFaultPage == pStackTopPage || pFaultPage == (pStackTopPage - cbPage))
800 // The easy case is when the AV occurred in the same or adjacent page as the stack pointer.
801 fIsStackOverflow = true;
803 else if (pFaultPage < pStackTopPage)
805 // Calculate the address of the page immediately following the fault and check that it
806 // lies in the same VM region as the stack pointer.
807 vm_address_t vm_address;
809 vm_region_flavor_t vm_flavor;
810 mach_msg_type_number_t infoCnt;
812 vm_region_basic_info_data_64_t info;
813 infoCnt = VM_REGION_BASIC_INFO_COUNT_64;
814 vm_flavor = VM_REGION_BASIC_INFO_64;
816 vm_region_basic_info_data_t info;
817 infoCnt = VM_REGION_BASIC_INFO_COUNT;
818 vm_flavor = VM_REGION_BASIC_INFO;
820 mach_port_t object_name;
822 vm_address = (vm_address_t)(pFaultPage + cbPage);
825 machret = vm_region_64(
833 (vm_region_info_t)&info,
837 CHECK_MACH("vm_region", machret);
838 #elif defined(_AMD64_)
839 CHECK_MACH("vm_region_64", machret);
842 // If vm_region updated the address we gave it then that address was not part of a region at all
843 // (and so this cannot be an SO). Otherwise check that the ESP lies in the region returned.
844 char *pRegionStart = (char*)vm_address;
845 char *pRegionEnd = (char*)vm_address + vm_size;
846 if (pRegionStart == (pFaultPage + cbPage) && pStackTopPage < pRegionEnd)
847 fIsStackOverflow = true;
851 if (!fIsStackOverflow)
853 // Check if we can read pointer sizeD bytes below the target thread's stack pointer.
854 // If we are unable to, then it implies we have run into SO.
855 void **targetSP = (void **)threadState.uts.ts64.__rsp;
856 vm_address_t targetAddr = (mach_vm_address_t)(targetSP);
857 targetAddr -= sizeof(void *);
858 vm_size_t vm_size = sizeof(void *);
860 vm_size_t data_count = 8;
861 machret = vm_read_overwrite(mach_task_self(), targetAddr, vm_size, (pointer_t)arr, &data_count);
862 if (machret == KERN_INVALID_ADDRESS)
864 fIsStackOverflow = true;
869 if (fIsStackOverflow)
871 // We have a stack overflow. Abort the process immediately. It would be nice to let the VM do this
872 // but the Windows mechanism (where a stack overflow SEH exception is delivered on the faulting
873 // thread) will not work most of the time since non-Windows OSs don't keep a reserve stack
874 // extension allocated for this purpose.
876 // TODO: Once our event reporting story is further along we probably want to report something
877 // here. If our runtime policy for SO ever changes (the most likely candidate being "unload
878 // appdomain on SO) then we'll have to do something more complex here, probably involving a
879 // handshake with the runtime in order to report the SO without attempting to extend the faulting
880 // thread's stack any further. Note that we cannot call most PAL functions from the context of
881 // this thread since we're not a PAL thread.
883 write(STDERR_FILENO, StackOverflowMessage, sizeof(StackOverflowMessage) - 1);
887 #else // (_X86_ || _AMD64_) && __APPLE__
888 #error Platform not supported for correct stack overflow handling
889 #endif // (_X86_ || _AMD64_) && __APPLE__
890 #endif // CORECLR && _X86_
893 _ASSERTE((threadStateFlavor == x86_THREAD_STATE32) || ((threadStateFlavor == x86_THREAD_STATE) && (threadState.tsh.flavor == x86_THREAD_STATE32)));
895 // If we're in single step mode, disable it since we're going to call PAL_DispatchException
896 if (exceptionRecord.ExceptionCode == EXCEPTION_SINGLE_STEP)
898 threadState.uts.ts32.eflags &= ~EFL_TF;
901 exceptionRecord.ExceptionFlags = EXCEPTION_IS_SIGNAL;
902 exceptionRecord.ExceptionRecord = NULL;
903 exceptionRecord.ExceptionAddress = (void *)threadContext.Eip;
905 void **FramePointer = (void **)threadState.uts.ts32.esp;
907 *--FramePointer = (void *)((ULONG_PTR)threadState.uts.ts32.eip);
909 // Construct a stack frame for a pretend activation of the function
910 // PAL_DispatchExceptionWrapper that serves only to make the stack
911 // correctly unwindable by the system exception unwinder.
912 // PAL_DispatchExceptionWrapper has an ebp frame, its local variables
913 // are the context and exception record, and it has just "called"
914 // PAL_DispatchException.
915 *--FramePointer = (void *)threadState.uts.ts32.ebp;
916 threadState.uts.ts32.ebp = (unsigned)FramePointer;
918 // Put the context on the stack
919 FramePointer = (void **)((ULONG_PTR)FramePointer - sizeof(CONTEXT));
920 // Make sure it's aligned - CONTEXT has 8-byte alignment
921 FramePointer = (void **)((ULONG_PTR)FramePointer - ((ULONG_PTR)FramePointer % 8));
922 CONTEXT *pContext = (CONTEXT *)FramePointer;
923 *pContext = threadContext;
925 // Put the exception record on the stack
926 FramePointer = (void **)((ULONG_PTR)FramePointer - sizeof(EXCEPTION_RECORD));
927 EXCEPTION_RECORD *pExceptionRecord = (EXCEPTION_RECORD *)FramePointer;
928 *pExceptionRecord = exceptionRecord;
930 // Put the exception message on the stack
931 FramePointer = (void **)((ULONG_PTR)FramePointer - sizeof(MachMessage));
932 MachMessage *pMessage = (MachMessage *)FramePointer;
933 pMessage->InitializeFrom(message);
935 // Push arguments to PAL_DispatchException
936 FramePointer = (void **)((ULONG_PTR)FramePointer - 3 * sizeof(void *));
938 // Make sure it's aligned - ABI requires 16-byte alignment
939 FramePointer = (void **)((ULONG_PTR)FramePointer - ((ULONG_PTR)FramePointer % 16));
940 FramePointer[0] = pContext;
941 FramePointer[1] = pExceptionRecord;
942 FramePointer[2] = pMessage;
944 // Place the return address to right after the fake call in PAL_DispatchExceptionWrapper
945 FramePointer[-1] = (void *)((ULONG_PTR)PAL_DispatchExceptionWrapper + PAL_DispatchExceptionReturnOffset);
947 // Make the instruction register point to DispatchException
948 threadState.uts.ts32.eip = (unsigned)PAL_DispatchException;
949 threadState.uts.ts32.esp = (unsigned)&FramePointer[-1]; // skip return address
950 #elif defined(_AMD64_)
951 _ASSERTE((threadStateFlavor == x86_THREAD_STATE64) || ((threadStateFlavor == x86_THREAD_STATE) && (threadState.tsh.flavor == x86_THREAD_STATE64)));
953 // If we're in single step mode, disable it since we're going to call PAL_DispatchException
954 if (exceptionRecord.ExceptionCode == EXCEPTION_SINGLE_STEP)
956 threadState.uts.ts64.__rflags &= ~EFL_TF;
959 exceptionRecord.ExceptionFlags = EXCEPTION_IS_SIGNAL;
960 exceptionRecord.ExceptionRecord = NULL;
961 exceptionRecord.ExceptionAddress = (void *)threadContext.Rip;
963 void **FramePointer = (void **)threadState.uts.ts64.__rsp;
965 *--FramePointer = (void *)((ULONG_PTR)threadState.uts.ts64.__rip);
967 // Construct a stack frame for a pretend activation of the function
968 // PAL_DispatchExceptionWrapper that serves only to make the stack
969 // correctly unwindable by the system exception unwinder.
970 // PAL_DispatchExceptionWrapper has an ebp frame, its local variables
971 // are the context and exception record, and it has just "called"
972 // PAL_DispatchException.
973 *--FramePointer = (void *)threadState.uts.ts64.__rbp;
974 threadState.uts.ts64.__rbp = (SIZE_T)FramePointer;
976 // Put the context on the stack
977 FramePointer = (void **)((ULONG_PTR)FramePointer - sizeof(CONTEXT));
978 // Make sure it's aligned - CONTEXT has 16-byte alignment
979 FramePointer = (void **)((ULONG_PTR)FramePointer - ((ULONG_PTR)FramePointer % 16));
980 CONTEXT *pContext = (CONTEXT *)FramePointer;
981 *pContext = threadContext;
983 // Put the exception record on the stack
984 FramePointer = (void **)((ULONG_PTR)FramePointer - sizeof(EXCEPTION_RECORD));
985 EXCEPTION_RECORD *pExceptionRecord = (EXCEPTION_RECORD *)FramePointer;
986 *pExceptionRecord = exceptionRecord;
988 // Put the exception message on the stack
989 FramePointer = (void **)((ULONG_PTR)FramePointer - sizeof(MachMessage));
990 MachMessage *pMessage = (MachMessage *)FramePointer;
991 pMessage->InitializeFrom(message);
993 // Push arguments to PAL_DispatchException
994 FramePointer = (void **)((ULONG_PTR)FramePointer - 3 * sizeof(void *));
996 // Make sure it's aligned - ABI requires 16-byte alignment
997 FramePointer = (void **)((ULONG_PTR)FramePointer - ((ULONG_PTR)FramePointer % 16));
998 FramePointer[0] = pContext;
999 FramePointer[1] = pExceptionRecord;
1000 FramePointer[2] = pMessage;
1002 // Place the return address to right after the fake call in PAL_DispatchExceptionWrapper
1003 FramePointer[-1] = (void *)((ULONG_PTR)PAL_DispatchExceptionWrapper + PAL_DispatchExceptionReturnOffset);
1005 // Make the instruction register point to DispatchException
1006 threadState.uts.ts64.__rip = (SIZE_T)PAL_DispatchException;
1007 threadState.uts.ts64.__rsp = (SIZE_T)&FramePointer[-1]; // skip return address
1009 #error HijackFaultingThread not defined for this architecture
1012 // Now set the thread state for the faulting thread so that PAL_DispatchException executes next
1013 machret = thread_set_state(thread, threadStateFlavor, (thread_state_t)&threadState, count);
1014 CHECK_MACH("thread_set_state", machret);
1021 Entry point for the thread that will listen for exception in any other thread.
1023 #ifdef FEATURE_PAL_SXS
1024 NOTE: This thread is not a PAL thread, and it must not be one. If it was,
1025 exceptions on this thread would be delivered to the port this thread itself
1028 In particular, if another thread overflows its stack, the exception handling
1029 thread receives a message. It will try to create a PAL_DispatchException
1030 frame on the faulting thread, which will likely fault. If the exception
1031 processing thread is not a PAL thread, the process gets terminated with a
1032 bus error; if the exception processing thread was a PAL thread, we would see
1033 a hang (since no thread is listening for the exception message that gets sent).
1034 Of the two ugly behaviors, the bus error is definitely favorable.
1036 This means: no printf, no TRACE, no PAL allocation, no ExitProcess,
1037 no LastError in this function and its helpers. To report fatal failure,
1038 use NONPAL_RETAIL_ASSERT.
1039 #endif // FEATURE_PAL_SXS
1042 void *args - not used
1047 void *SEHExceptionThread(void *args)
1049 MachMessage sMessage;
1050 MachMessage sReplyOrForward;
1052 kern_return_t machret;
1053 thread_act_t hThread;
1055 // Loop processing incoming messages forever.
1058 // Receive the next message.
1059 sMessage.Receive(s_ExceptionPort);
1061 NONPAL_TRACE("Received message %s from %08x to %08x\n",
1062 sMessage.GetMessageTypeName(),
1063 sMessage.GetRemotePort(),
1064 sMessage.GetLocalPort());
1066 if (sMessage.IsSetThreadRequest())
1068 // Handle a request to set the thread context for the specified target thread.
1070 hThread = sMessage.GetThreadContext(&sContext);
1074 machret = thread_suspend(hThread);
1075 CHECK_MACH("thread_suspend", machret);
1077 // Ensure that if the thread was running in the kernel, the kernel operation
1078 // is safely aborted so that it can be restarted later.
1079 machret = thread_abort_safely(hThread);
1080 if (machret == KERN_SUCCESS)
1085 // The thread was running in the kernel executing a non-atomic operation
1086 // that cannot be restarted, so we need to resume the thread and retry
1087 machret = thread_resume(hThread);
1088 CHECK_MACH("thread_resume", machret);
1091 machret = CONTEXT_SetThreadContextOnPort(hThread, &sContext);
1092 CHECK_MACH("CONTEXT_SetThreadContextOnPort", machret);
1094 machret = thread_resume(hThread);
1095 CHECK_MACH("thread_resume", machret);
1097 else if (sMessage.IsExceptionNotification())
1099 // This is a notification of an exception occurring on another thread.
1100 hThread = sMessage.GetThread();
1102 NONPAL_TRACE("Notification is for exception %u (%s) on thread port %08x flavor %d\n",
1103 sMessage.GetException(),
1104 GetExceptionString(sMessage.GetException()),
1106 sMessage.GetThreadStateFlavor());
1108 HijackFaultingThread(hThread, mach_task_self(), sMessage);
1110 // Send the result of handling the exception back in a reply.
1111 sReplyOrForward.ReplyToNotification(&sMessage, KERN_SUCCESS);
1113 else if (sMessage.IsExceptionReply())
1115 NONPAL_TRACE("Exception reply - ignored\n");
1119 NONPAL_RETAIL_ASSERT("Unknown message type: %u", sMessage.GetMessageType());
1124 void ForwardMachException(CPalThread *pThread, MachMessage *pMessage)
1126 thread_act_t hThread = pThread->GetMachPortSelf();
1127 MachMessage sReplyOrForward;
1129 // Locate the record of previously installed handlers that the target thread keeps.
1130 CorUnix::CThreadMachExceptionHandlers *pHandlers = pThread->GetSavedMachHandlers();
1132 // Check whether there's even a handler for the particular exception we've been handed.
1133 CorUnix::MachExceptionHandler sHandler;
1134 if (pHandlers->GetHandler(pMessage->GetException(), &sHandler))
1136 NONPAL_TRACE("Forward request to port %08x\n", sHandler.m_handler);
1138 // Forward the notification
1139 sReplyOrForward.ForwardNotification(&sHandler, pMessage);
1141 // Spin wait until this thread is hijacked or the process is aborted.
1149 // There's no previous handler to forward this notification to.
1150 NONPAL_TRACE("Unhandled exception and no chain-back - aborting process\n");
1156 void MachSetThreadContext(CONTEXT *lpContext)
1158 // We need to send a message to the worker thread so that it can set our thread context
1159 // It is responsible for deallocating the thread port.
1160 MachMessage sRequest;
1161 sRequest.SendSetThread(s_ExceptionPort, mach_thread_self(), lpContext);
1163 // Make sure we don't do anything
1172 SEHInitializeMachExceptions
1174 Initialize all SEH-related stuff related to mach exceptions
1179 TRUE if SEH support initialization succeeded
1182 BOOL SEHInitializeMachExceptions (void)
1184 kern_return_t MachRet;
1186 pthread_t exception_thread;
1188 // Allocate a mach port that will listen in on exceptions
1189 MachRet = mach_port_allocate(mach_task_self(),
1190 MACH_PORT_RIGHT_RECEIVE,
1193 if (MachRet != KERN_SUCCESS)
1195 ASSERT("mach_port_allocate failed: %d\n", MachRet);
1196 UTIL_SetLastErrorFromMach(MachRet);
1200 // Insert the send right into the task
1201 MachRet = mach_port_insert_right(mach_task_self(),
1204 MACH_MSG_TYPE_MAKE_SEND);
1206 if (MachRet != KERN_SUCCESS)
1208 ASSERT("mach_port_insert_right failed: %d\n", MachRet);
1209 UTIL_SetLastErrorFromMach(MachRet);
1213 // Create the thread that will listen to the exception for all threads
1214 CreateRet = pthread_create(&exception_thread, NULL, SEHExceptionThread, NULL);
1216 if ( CreateRet != 0 )
1218 ERROR("pthread_create failed, error is %d (%s)\n", CreateRet, strerror(CreateRet));
1219 SetLastError(ERROR_NOT_ENOUGH_MEMORY);
1223 #ifndef FEATURE_PAL_SXS
1224 if (!SEHEnableMachExceptions())
1228 #endif // !FEATURE_PAL_SXS
1230 // Tell the system to ignore SIGPIPE signals rather than use the default
1231 // behavior of terminating the process. Ignoring SIGPIPE will cause
1232 // calls that would otherwise raise that signal to return EPIPE instead.
1233 // The PAL expects EPIPE from those functions and won't handle a
1235 signal(SIGPIPE, SIG_IGN);
1243 MachExceptionInitializeDebug
1245 Initialize the mach exception handlers necessary for a managed debugger
1251 void MachExceptionInitializeDebug(void)
1253 if (s_DebugInitialized == FALSE)
1255 #ifndef FEATURE_PAL_SXS
1256 kern_return_t MachRet;
1257 MachRet = task_set_exception_ports(mach_task_self(),
1258 PAL_EXC_DEBUGGING_MASK,
1261 MACHINE_THREAD_STATE);
1262 if (MachRet != KERN_SUCCESS)
1264 ASSERT("task_set_exception_ports failed: %d\n", MachRet);
1265 TerminateProcess(GetCurrentProcess(), (UINT)(-1));
1267 #endif // !FEATURE_PAL_SXS
1268 s_DebugInitialized = TRUE;
1274 SEHCleanupExceptionPort
1276 Restore default exception port handler
1278 (no parameters, no return value)
1281 During PAL_Terminate, we reach a point where SEH isn't possible any more
1282 (handle manager is off, etc). Past that point, we can't avoid crashing on
1285 void SEHCleanupExceptionPort(void)
1287 TRACE("Restoring default exception ports\n");
1288 #ifndef FEATURE_PAL_SXS
1289 SEHDisableMachExceptions();
1290 #endif // !FEATURE_PAL_SXS
1291 s_DebugInitialized = FALSE;
1294 extern "C" void ActivationHandler(CONTEXT* context)
1296 if (g_activationFunction != NULL)
1298 g_activationFunction(context);
1301 RtlRestoreContext(context, NULL);
1305 extern "C" void ActivationHandlerWrapper();
1306 extern "C" int ActivationHandlerReturnOffset;
1308 PAL_ERROR InjectActivationInternal(CPalThread* pThread)
1312 mach_port_t threadPort = pThread->GetMachPortSelf();
1313 kern_return_t MachRet = thread_suspend(threadPort);
1314 palError = (MachRet == KERN_SUCCESS) ? NO_ERROR : ERROR_GEN_FAILURE;
1316 if (palError == NO_ERROR)
1318 mach_msg_type_number_t count;
1320 x86_exception_state64_t ExceptionState;
1321 count = x86_EXCEPTION_STATE64_COUNT;
1322 MachRet = thread_get_state(threadPort,
1323 x86_EXCEPTION_STATE64,
1324 (thread_state_t)&ExceptionState,
1326 _ASSERT_MSG(MachRet == KERN_SUCCESS, "thread_get_state for x86_EXCEPTION_STATE64\n");
1328 // Inject the activation only if the thread doesn't have a pending hardware exception
1329 static const int MaxHardwareExceptionVector = 31;
1330 if (ExceptionState.__trapno > MaxHardwareExceptionVector)
1332 x86_thread_state64_t ThreadState;
1333 count = x86_THREAD_STATE64_COUNT;
1334 MachRet = thread_get_state(threadPort,
1336 (thread_state_t)&ThreadState,
1338 _ASSERT_MSG(MachRet == KERN_SUCCESS, "thread_get_state for x86_THREAD_STATE64\n");
1340 if ((g_safeActivationCheckFunction != NULL) && g_safeActivationCheckFunction(ThreadState.__rip))
1342 // TODO: it would be nice to preserve the red zone in case a jitter would want to use it
1343 // Do we really care about unwinding through the wrapper?
1344 size_t* sp = (size_t*)ThreadState.__rsp;
1345 *(--sp) = ThreadState.__rip;
1346 *(--sp) = ThreadState.__rbp;
1347 size_t rbpAddress = (size_t)sp;
1348 size_t contextAddress = (((size_t)sp) - sizeof(CONTEXT)) & ~15;
1349 size_t returnAddressAddress = contextAddress - sizeof(size_t);
1350 *(size_t*)(returnAddressAddress) = ActivationHandlerReturnOffset + (size_t)ActivationHandlerWrapper;
1352 // Fill in the context in the helper frame with the full context of the suspended thread.
1353 // The ActivationHandler will use the context to resume the execution of the thread
1354 // after the activation function returns.
1355 CONTEXT *pContext = (CONTEXT *)contextAddress;
1356 pContext->ContextFlags = CONTEXT_FULL | CONTEXT_SEGMENTS;
1357 MachRet = CONTEXT_GetThreadContextFromPort(threadPort, pContext);
1358 _ASSERT_MSG(MachRet == KERN_SUCCESS, "CONTEXT_GetThreadContextFromPort\n");
1360 // Make the instruction register point to ActivationHandler
1361 ThreadState.__rip = (size_t)ActivationHandler;
1362 ThreadState.__rsp = returnAddressAddress;
1363 ThreadState.__rbp = rbpAddress;
1364 ThreadState.__rdi = contextAddress;
1366 MachRet = thread_set_state(threadPort,
1368 (thread_state_t)&ThreadState,
1370 _ASSERT_MSG(MachRet == KERN_SUCCESS, "thread_set_state\n");
1374 MachRet = thread_resume(threadPort);
1375 palError = (MachRet == ERROR_SUCCESS) ? NO_ERROR : ERROR_GEN_FAILURE;
1379 printf("Suspension failed with error 0x%x\n", palError);
1385 #endif // HAVE_MACH_EXCEPTIONS