add_definitions(-DFEATURE_DATABREAKPOINT)
endif(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_I386)
endif(WIN32)
+
+if(CLR_CMAKE_PLATFORM_DARWIN)
+ add_definitions(-DFEATURE_WRITEBARRIER_COPY)
+endif(CLR_CMAKE_PLATFORM_DARWIN)
#endif //HAVE_VM_ALLOCATE
+BOOL IsRunningOnMojaveHardenedRuntime();
+
#ifdef __cplusplus
}
#endif // __cplusplus
_ASSERTE(pPEBaseAddress != NULL);
PAL_ERROR palError = NO_ERROR;
- LPVOID pvBaseAddress = NULL;
-
off_t adjust = offset & (GetVirtualPageSize() - 1);
+ LPVOID pvBaseAddress = static_cast<char *>(addr) - adjust;
- pvBaseAddress = mmap(static_cast<char *>(addr) - adjust, len + adjust, prot, flags, fd, offset - adjust);
- if (MAP_FAILED == pvBaseAddress)
+#ifdef __APPLE__
+ if ((prot & PROT_EXEC) != 0 && IsRunningOnMojaveHardenedRuntime())
{
- ERROR_(LOADER)( "mmap failed with code %d: %s.\n", errno, strerror( errno ) );
- palError = FILEGetLastErrorFromErrno();
+ // Mojave hardened runtime doesn't allow executable mappings of a file. So we have to create an
+ // anonymous mapping and read the file contents into it instead.
+
+ // Set the requested mapping with forced PROT_WRITE to ensure data from the file can be read there,
+ // read the data in and finally remove the forced PROT_WRITE
+ if ((mprotect(pvBaseAddress, len + adjust, prot | PROT_WRITE) == -1) ||
+ (pread(fd, pvBaseAddress, len + adjust, offset - adjust) == -1) ||
+ (((prot & PROT_WRITE) == 0) && mprotect(pvBaseAddress, len + adjust, prot) == -1))
+ {
+ palError = FILEGetLastErrorFromErrno();
+ }
}
else
+#endif
+ {
+ pvBaseAddress = mmap(static_cast<char *>(addr) - adjust, len + adjust, prot, flags, fd, offset - adjust);
+ if (MAP_FAILED == pvBaseAddress)
+ {
+ ERROR_(LOADER)( "mmap failed with code %d: %s.\n", errno, strerror( errno ) );
+ palError = FILEGetLastErrorFromErrno();
+ }
+ }
+
+ if (NO_ERROR == palError)
{
palError = MAPRecordMapping(pMappingObject, pPEBaseAddress, pvBaseAddress, len, prot);
if (NO_ERROR != palError)
#endif // FEATURE_ENABLE_NO_ADDRESS_SPACE_RANDOMIZATION
// MAC64 requires we pass MAP_SHARED (or MAP_PRIVATE) flags - otherwise, the call is failed.
// Refer to mmap documentation at http://www.manpagez.com/man/2/mmap/ for details.
- loadedBase = mmap(usedBaseAddr, virtualSize, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
+ int mapFlags = MAP_ANON|MAP_PRIVATE;
+#ifdef __APPLE__
+ if (IsRunningOnMojaveHardenedRuntime())
+ {
+ mapFlags |= MAP_JIT;
+ }
+#endif // __APPLE__
+ loadedBase = mmap(usedBaseAddr, virtualSize, PROT_NONE, mapFlags, -1, 0);
}
if (MAP_FAILED == loadedBase)
if (pRetVal == NULL)
{
// Try to reserve memory from the OS
+ if ((flProtect & 0xff) == PAGE_EXECUTE_READWRITE)
+ {
+ flAllocationType |= MEM_RESERVE_EXECUTABLE;
+ }
pRetVal = ReserveVirtualMemory(pthrCurrent, (LPVOID)StartBoundary, MemSize, flAllocationType);
}
// Most platforms will only commit memory if it is dirtied,
// so this should not consume too much swap space.
- int mmapFlags = 0;
-
-#if HAVE_VM_ALLOCATE
- // Allocate with vm_allocate first, then map at the fixed address.
- int result = vm_allocate(mach_task_self(),
- &StartBoundary,
- MemSize,
- ((LPVOID) StartBoundary != nullptr) ? FALSE : TRUE);
-
- if (result != KERN_SUCCESS)
- {
- ERROR("vm_allocate failed to allocated the requested region!\n");
- pthrCurrent->SetLastError(ERROR_INVALID_ADDRESS);
- return nullptr;
- }
-
- mmapFlags |= MAP_FIXED;
-#endif // HAVE_VM_ALLOCATE
+ int mmapFlags = MAP_ANON | MAP_PRIVATE;
if ((fAllocationType & MEM_LARGE_PAGES) != 0)
{
#endif
}
- mmapFlags |= MAP_ANON | MAP_PRIVATE;
+#ifdef __APPLE__
+ if ((fAllocationType & MEM_RESERVE_EXECUTABLE) && IsRunningOnMojaveHardenedRuntime())
+ {
+ mmapFlags |= MAP_JIT;
+ }
+#endif
LPVOID pRetVal = mmap((LPVOID) StartBoundary,
MemSize,
{
ERROR( "Failed due to insufficient memory.\n" );
-#if HAVE_VM_ALLOCATE
- vm_deallocate(mach_task_self(), StartBoundary, MemSize);
-#endif // HAVE_VM_ALLOCATE
-
pthrCurrent->SetLastError(ERROR_NOT_ENOUGH_MEMORY);
return nullptr;
}
// Do actual memory reservation.
do
{
- m_startAddress = ReserveVirtualMemory(pthrCurrent, (void*)preferredStartAddress, sizeOfAllocation, 0 /* fAllocationType */);
+ m_startAddress = ReserveVirtualMemory(pthrCurrent, (void*)preferredStartAddress, sizeOfAllocation, MEM_RESERVE_EXECUTABLE);
if (m_startAddress != nullptr)
{
break;
// - The code heap allocator for the JIT can allocate from this address space. Beyond this reservation, one can use
// the COMPlus_CodeHeapReserveForJumpStubs environment variable to reserve space for jump stubs.
sizeOfAllocation = MaxExecutableMemorySize;
- m_startAddress = ReserveVirtualMemory(pthrCurrent, nullptr, sizeOfAllocation, 0 /* fAllocationType */);
+ m_startAddress = ReserveVirtualMemory(pthrCurrent, nullptr, sizeOfAllocation, MEM_RESERVE_EXECUTABLE);
if (m_startAddress == nullptr)
{
return;
#include <mach/message.h>
#endif //HAVE_VM_ALLOCATE
+#include <sys/mman.h>
+
#include "pal/utils.h"
#include "pal/file.h"
}
#endif //HAVE_VM_ALLOCATE
+#ifdef __APPLE__
+
+/*++
+Function:
+ IsRunningOnMojaveHardenedRuntime() - Test if the current process is running on Mojave hardened runtime
+--*/
+BOOL IsRunningOnMojaveHardenedRuntime()
+{
+ static volatile int isRunningOnMojaveHardenedRuntime = -1;
+
+ if (isRunningOnMojaveHardenedRuntime == -1)
+ {
+ BOOL mhrDetected = FALSE;
+ int pageSize = sysconf(_SC_PAGE_SIZE);
+ // Try to map a page with read-write-execute protection. It should fail on Mojave hardened runtime.
+ void* testPage = mmap(NULL, pageSize, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ if (testPage == MAP_FAILED && (errno == EACCES))
+ {
+ // The mapping has failed with EACCES, check if making the same mapping with MAP_JIT flag works
+ testPage = mmap(NULL, pageSize, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE | MAP_JIT, -1, 0);
+ if (testPage != MAP_FAILED)
+ {
+ mhrDetected = TRUE;
+ }
+ }
+
+ if (testPage != MAP_FAILED)
+ {
+ munmap(testPage, pageSize);
+ }
+
+ isRunningOnMojaveHardenedRuntime = (int)mhrDetected;
+ }
+
+ return (BOOL)isRunningOnMojaveHardenedRuntime;
+}
+
+#endif // __APPLE__
ret
LEAF_END_MARKED JIT_ByRefWriteBarrier, _TEXT
+#ifdef FEATURE_WRITEBARRIER_COPY
+ // When JIT_WriteBarrier is copied into an allocated page, the JIT_Stelem_Ref and its
+ // helpers use this global variable to jump to it. This variable is set in InitThreadManager.
+ .global _JIT_WriteBarrier_Loc
+ .zerofill __DATA,__common,_JIT_WriteBarrier_Loc,8,3
+#endif // FEATURE_WRITEBARRIER_COPY
+
// TODO: put definition for this in asmconstants.h
#define CanCast 1
mov rsi, rdx
// JIT_WriteBarrier(Object** dst, Object* src)
+#ifdef FEATURE_WRITEBARRIER_COPY
+ jmp [rip + C_FUNC(JIT_WriteBarrier_Loc)]
+#else
jmp C_FUNC(JIT_WriteBarrier)
+#endif
LOCAL_LABEL(AssigningNull):
// write barrier is not needed for assignment of NULL references
mov rsi, rdx
// JIT_WriteBarrier(Object** dst, Object* src)
+#ifdef FEATURE_WRITEBARRIER_COPY
+ jmp [rip + C_FUNC(JIT_WriteBarrier_Loc)]
+#else
jmp C_FUNC(JIT_WriteBarrier)
+#endif
LOCAL_LABEL(NeedCheck):
jmp C_FUNC(JIT_Stelem_Ref__ArrayStoreCheck_Helper)
RESET_FRAME_WITH_RBP
// JIT_WriteBarrier(Object** dst, Object* src)
+#ifdef FEATURE_WRITEBARRIER_COPY
+ jmp [rip + C_FUNC(JIT_WriteBarrier_Loc)]
+#else
jmp C_FUNC(JIT_WriteBarrier)
+#endif
LEAF_END JIT_Stelem_Ref__ArrayStoreCheck_Helper, _TEXT
// the label should always come after the entrypoint for this funtion
_ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", (LPBYTE)label > (LPBYTE)base);
- return ((LPBYTE)GetEEFuncEntryPoint(JIT_WriteBarrier) + ((LPBYTE)GetEEFuncEntryPoint(label) - (LPBYTE)GetEEFuncEntryPoint(base) + offset));
+ return (GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier) + ((LPBYTE)GetEEFuncEntryPoint(label) - (LPBYTE)GetEEFuncEntryPoint(base) + offset));
}
+
int WriteBarrierManager::ChangeWriteBarrierTo(WriteBarrierType newWriteBarrier, bool isRuntimeSuspended)
{
GCX_MAYBE_COOP_NO_THREAD_BROKEN((!isRuntimeSuspended && GetThread() != NULL));
// the memcpy must come before the switch statment because the asserts inside the switch
// are actually looking into the JIT_WriteBarrier buffer
- memcpy((PVOID)JIT_WriteBarrier, (LPVOID)GetCurrentWriteBarrierCode(), GetCurrentWriteBarrierSize());
+ memcpy(GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier), (LPVOID)GetCurrentWriteBarrierCode(), GetCurrentWriteBarrierSize());
switch (newWriteBarrier)
{
void FlushWriteBarrierInstructionCache()
{
- FlushInstructionCache(GetCurrentProcess(), (PVOID)JIT_WriteBarrier, g_WriteBarrierManager.GetCurrentWriteBarrierSize());
+ FlushInstructionCache(GetCurrentProcess(), GetWriteBarrierCodeLocation((PVOID)JIT_WriteBarrier), g_WriteBarrierManager.GetCurrentWriteBarrierSize());
}
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
GSCookie * pGSCookiePtr = GetProcessGSCookiePtr();
+#ifdef FEATURE_PAL
+ // On Unix, the GS cookie is stored in a read only data segment
+ DWORD newProtection = PAGE_READWRITE;
+#else // FEATURE_PAL
+ DWORD newProtection = PAGE_EXECUTE_READWRITE;
+#endif // !FEATURE_PAL
+
DWORD oldProtection;
- if(!ClrVirtualProtect((LPVOID)pGSCookiePtr, sizeof(GSCookie), PAGE_EXECUTE_READWRITE, &oldProtection))
+ if(!ClrVirtualProtect((LPVOID)pGSCookiePtr, sizeof(GSCookie), newProtection, &oldProtection))
{
ThrowLastError();
}
+#ifdef FEATURE_PAL
+ // PAL layer is unable to extract old protection for regions that were not allocated using VirtualAlloc
+ oldProtection = PAGE_READONLY;
+#endif // FEATURE_PAL
+
#ifndef FEATURE_PAL
// The GSCookie cannot be in a writeable page
assert(((oldProtection & (PAGE_READWRITE|PAGE_WRITECOPY|PAGE_EXECUTE_READWRITE|
{
WRAPPER_NO_CONTRACT;
+ PCODE ip = GetIP(pContext);
+
+#ifdef FEATURE_WRITEBARRIER_COPY
+ if (IsIPInWriteBarrierCodeCopy(ip))
+ {
+ // Pretend we were executing the barrier function at its original location so that the unwinder can unwind the frame
+ ip = AdjustWriteBarrierIP(ip);
+ SetIP(pContext, ip);
+ }
+#endif // FEATURE_WRITEBARRIER_COPY
+
#ifdef FEATURE_DATABREAKPOINT
// If pExceptionRecord is null, it means it is called from EEDbgInterfaceImpl::AdjustContextForWriteBarrierForDebugger()
if (pExceptionRecord == nullptr)
{
- PCODE ip = GetIP(pContext);
#if defined(_TARGET_X86_)
bool withinWriteBarrierGroup = ((ip >= (PCODE) JIT_WriteBarrierGroup) && (ip <= (PCODE) JIT_WriteBarrierGroup_End));
bool withinPatchedWriteBarrierGroup = ((ip >= (PCODE) JIT_PatchedWriteBarrierGroup) && (ip <= (PCODE) JIT_PatchedWriteBarrierGroup_End));
}
#endif // VSD_STUB_CAN_THROW_AV
+#ifdef FEATURE_WRITEBARRIER_COPY
+ if (IsIPInWriteBarrierCodeCopy(controlPc))
+ {
+ // Pretend we were executing the barrier function at its original location so that the unwinder can unwind the frame
+ controlPc = AdjustWriteBarrierIP(controlPc);
+ SetIP(frameContext, controlPc);
+ }
+#endif // FEATURE_WRITEBARRIER_COPY
+
UINT_PTR sp = GetSP(frameContext);
BOOL success = PAL_VirtualUnwind(frameContext, NULL);
BOOL IsSafeToHandleHardwareException(PCONTEXT contextRecord, PEXCEPTION_RECORD exceptionRecord)
{
PCODE controlPc = GetIP(contextRecord);
+
+#ifdef FEATURE_WRITEBARRIER_COPY
+ if (IsIPInWriteBarrierCodeCopy(controlPc))
+ {
+ // Pretend we were executing the barrier function at its original location
+ controlPc = AdjustWriteBarrierIP(controlPc);
+ }
+#endif // FEATURE_WRITEBARRIER_COPY
+
return g_fEEStarted && (
exceptionRecord->ExceptionCode == STATUS_BREAKPOINT ||
exceptionRecord->ExceptionCode == STATUS_SINGLE_STEP ||
*((&fef)->GetGSCookiePtr()) = GetProcessGSCookie();
{
GCX_COOP(); // Must be cooperative to modify frame chain.
+
+#ifdef FEATURE_WRITEBARRIER_COPY
+ if (IsIPInWriteBarrierCodeCopy(controlPc))
+ {
+ // Pretend we were executing the barrier function at its original location so that the unwinder can unwind the frame
+ controlPc = AdjustWriteBarrierIP(controlPc);
+ SetIP(ex->GetContextRecord(), controlPc);
+ }
+#endif // FEATURE_WRITEBARRIER_COPY
+
if (IsIPInMarkedJitHelper(controlPc))
{
// For JIT helpers, we need to set the frame to point to the
// get our caller's PSP, or our caller's caller's SP.
while (!ExecutionManager::IsManagedCode(uControlPc))
{
+#ifdef FEATURE_WRITEBARRIER_COPY
+ if (IsIPInWriteBarrierCodeCopy(uControlPc))
+ {
+ // Pretend we were executing the barrier function at its original location so that the unwinder can unwind the frame
+ uControlPc = AdjustWriteBarrierIP(uControlPc);
+ SetIP(pContext, uControlPc);
+ }
+#endif // FEATURE_WRITEBARRIER_COPY
+
#ifndef FEATURE_PAL
uControlPc = VirtualUnwindCallFrame(pContext);
#else // !FEATURE_PAL
extern "C" void STDCALL JIT_PatchedCodeStart();
extern "C" void STDCALL JIT_PatchedCodeLast();
+#ifdef FEATURE_WRITEBARRIER_COPY
+
+static void* s_barrierCopy = NULL;
+
+BYTE* GetWriteBarrierCodeLocation(VOID* barrier)
+{
+ return (BYTE*)s_barrierCopy + ((BYTE*)barrier - (BYTE*)JIT_PatchedCodeStart);
+}
+
+BOOL IsIPInWriteBarrierCodeCopy(PCODE controlPc)
+{
+ return (s_barrierCopy <= (void*)controlPc && (void*)controlPc < ((BYTE*)s_barrierCopy + ((BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart)));
+}
+
+PCODE AdjustWriteBarrierIP(PCODE controlPc)
+{
+ _ASSERTE(IsIPInWriteBarrierCodeCopy(controlPc));
+
+ // Pretend we were executing the barrier function at its original location so that the unwinder can unwind the frame
+ return (PCODE)JIT_PatchedCodeStart + (controlPc - (PCODE)s_barrierCopy);
+}
+
+#endif // FEATURE_WRITEBARRIER_COPY
+
+extern "C" void *JIT_WriteBarrier_Loc;
+
//---------------------------------------------------------------------------
// One-time initialization. Called during Dll initialization. So
// be careful what you do in here!
// If you hit this assert on retail build, there is most likely problem with BBT script.
_ASSERTE_ALL_BUILDS("clr/src/VM/threads.cpp", (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart < (ptrdiff_t)GetOsPageSize());
+#ifdef FEATURE_WRITEBARRIER_COPY
+ s_barrierCopy = ClrVirtualAlloc(NULL, g_SystemInfo.dwAllocationGranularity, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
+ if (s_barrierCopy == NULL)
+ {
+ _ASSERTE(!"ClrVirtualAlloc of GC barrier code page failed");
+ COMPlusThrowWin32();
+ }
+
+ memcpy(s_barrierCopy, (BYTE*)JIT_PatchedCodeStart, (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart);
+
+ // Store the JIT_WriteBarrier copy location to a global variable so that the JIT_Stelem_Ref and its helpers
+ // can jump to it.
+ JIT_WriteBarrier_Loc = GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier);
+
+ SetJitHelperFunction(CORINFO_HELP_ASSIGN_REF, GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier));
+#else // FEATURE_WRITEBARRIER_COPY
+
// I am using virtual protect to cover the entire range that this code falls in.
//
_ASSERTE(!"ClrVirtualProtect of code page failed");
COMPlusThrowWin32();
}
+#endif // FEATURE_WRITEBARRIER_COPY
#ifndef FEATURE_PAL
_ASSERTE(GetThread() == NULL);
BOOL Debug_IsLockedViaThreadSuspension();
+#ifdef FEATURE_WRITEBARRIER_COPY
+
+BYTE* GetWriteBarrierCodeLocation(VOID* barrier);
+BOOL IsIPInWriteBarrierCodeCopy(PCODE controlPc);
+PCODE AdjustWriteBarrierIP(PCODE controlPc);
+
+#else // FEATURE_WRITEBARRIER_COPY
+
+#define GetWriteBarrierCodeLocation(barrier) ((BYTE*)(barrier))
+
+#endif // FEATURE_WRITEBARRIER_COPY
+
#endif //__threads_h__