Port to Release/3.1 - enabling MHR support (#26803)
authorJan Vorlicek <janvorli@microsoft.com>
Fri, 27 Sep 2019 08:47:27 +0000 (10:47 +0200)
committerGitHub <noreply@github.com>
Fri, 27 Sep 2019 08:47:27 +0000 (10:47 +0200)
* Move JIT_WriteBarrier that is modified at runtime to a dynamically
allocated memory instead of making a page in libcoreclr.dylib RWX.
* Fix JIT_Stelem_Ref calls to JIT_WriteBarrier
* Update PAL to add MEM_JIT flag for allocations and reservations of
executable memory.
* Update native runtime in EH and stack unwinding areas so that it can
unwind from the write barrier copy. That code has no unwind info, so
without special handling, runtime would not be able to unwind from
it.

13 files changed:
clrdefinitions.cmake
src/pal/src/include/pal/utils.h
src/pal/src/map/map.cpp
src/pal/src/map/virtual.cpp
src/pal/src/misc/utils.cpp
src/vm/amd64/jithelpers_fast.S
src/vm/amd64/jitinterfaceamd64.cpp
src/vm/ceemain.cpp
src/vm/excep.cpp
src/vm/exceptionhandling.cpp
src/vm/stackwalk.cpp
src/vm/threads.cpp
src/vm/threads.h

index 22c9c59..12dd736 100644 (file)
@@ -241,3 +241,7 @@ if(WIN32)
     add_definitions(-DFEATURE_DATABREAKPOINT)
   endif(CLR_CMAKE_TARGET_ARCH_AMD64 OR CLR_CMAKE_TARGET_ARCH_I386)
 endif(WIN32)
+
+if(CLR_CMAKE_PLATFORM_DARWIN)
+  add_definitions(-DFEATURE_WRITEBARRIER_COPY)
+endif(CLR_CMAKE_PLATFORM_DARWIN)
index f381d95..115cf06 100644 (file)
@@ -182,6 +182,8 @@ void UTIL_SetLastErrorFromMach(kern_return_t MachReturn);
 
 #endif //HAVE_VM_ALLOCATE
 
+BOOL IsRunningOnMojaveHardenedRuntime();
+
 #ifdef __cplusplus
 }
 #endif // __cplusplus
index f6a15f2..a8d8af2 100644 (file)
@@ -2157,17 +2157,36 @@ MAPmmapAndRecord(
     _ASSERTE(pPEBaseAddress != NULL);
 
     PAL_ERROR palError = NO_ERROR;
-    LPVOID pvBaseAddress = NULL;
-
     off_t adjust = offset & (GetVirtualPageSize() - 1);
+    LPVOID pvBaseAddress = static_cast<char *>(addr) - adjust;
 
-    pvBaseAddress = mmap(static_cast<char *>(addr) - adjust, len + adjust, prot, flags, fd, offset - adjust);
-    if (MAP_FAILED == pvBaseAddress)
+#ifdef __APPLE__
+    if ((prot & PROT_EXEC) != 0 && IsRunningOnMojaveHardenedRuntime())
     {
-        ERROR_(LOADER)( "mmap failed with code %d: %s.\n", errno, strerror( errno ) );
-        palError = FILEGetLastErrorFromErrno();
+        // Mojave hardened runtime doesn't allow executable mappings of a file. So we have to create an 
+        // anonymous mapping and read the file contents into it instead.
+
+        // Set the requested mapping with forced PROT_WRITE to ensure data from the file can be read there,
+        // read the data in and finally remove the forced PROT_WRITE
+        if ((mprotect(pvBaseAddress, len + adjust, prot | PROT_WRITE) == -1) ||
+            (pread(fd, pvBaseAddress, len + adjust, offset - adjust) == -1) ||
+            (((prot & PROT_WRITE) == 0) && mprotect(pvBaseAddress, len + adjust, prot) == -1))
+        {
+            palError = FILEGetLastErrorFromErrno();
+        }
     }
     else
+#endif
+    {
+        pvBaseAddress = mmap(static_cast<char *>(addr) - adjust, len + adjust, prot, flags, fd, offset - adjust);
+        if (MAP_FAILED == pvBaseAddress)
+        {
+            ERROR_(LOADER)( "mmap failed with code %d: %s.\n", errno, strerror( errno ) );
+            palError = FILEGetLastErrorFromErrno();
+        }
+    }
+
+    if (NO_ERROR == palError)
     {
         palError = MAPRecordMapping(pMappingObject, pPEBaseAddress, pvBaseAddress, len, prot);
         if (NO_ERROR != palError)
@@ -2359,7 +2378,14 @@ void * MAPMapPEFile(HANDLE hFile)
 #endif // FEATURE_ENABLE_NO_ADDRESS_SPACE_RANDOMIZATION
         // MAC64 requires we pass MAP_SHARED (or MAP_PRIVATE) flags - otherwise, the call is failed.
         // Refer to mmap documentation at http://www.manpagez.com/man/2/mmap/ for details.
-        loadedBase = mmap(usedBaseAddr, virtualSize, PROT_NONE, MAP_ANON|MAP_PRIVATE, -1, 0);
+        int mapFlags = MAP_ANON|MAP_PRIVATE;
+#ifdef __APPLE__
+        if (IsRunningOnMojaveHardenedRuntime())
+        {
+            mapFlags |= MAP_JIT;
+        }
+#endif // __APPLE__
+        loadedBase = mmap(usedBaseAddr, virtualSize, PROT_NONE, mapFlags, -1, 0);
     }
 
     if (MAP_FAILED == loadedBase)
index ff5cde9..ca27b73 100644 (file)
@@ -916,6 +916,10 @@ static LPVOID VIRTUALReserveMemory(
     if (pRetVal == NULL)
     {
         // Try to reserve memory from the OS
+        if ((flProtect & 0xff) == PAGE_EXECUTE_READWRITE)
+        {
+             flAllocationType |= MEM_RESERVE_EXECUTABLE;
+        }
         pRetVal = ReserveVirtualMemory(pthrCurrent, (LPVOID)StartBoundary, MemSize, flAllocationType);
     }
 
@@ -969,24 +973,7 @@ static LPVOID ReserveVirtualMemory(
 
     // Most platforms will only commit memory if it is dirtied,
     // so this should not consume too much swap space.
-    int mmapFlags = 0;
-
-#if HAVE_VM_ALLOCATE
-    // Allocate with vm_allocate first, then map at the fixed address.
-    int result = vm_allocate(mach_task_self(),
-                             &StartBoundary,
-                             MemSize,
-                             ((LPVOID) StartBoundary != nullptr) ? FALSE : TRUE);
-
-    if (result != KERN_SUCCESS)
-    {
-        ERROR("vm_allocate failed to allocated the requested region!\n");
-        pthrCurrent->SetLastError(ERROR_INVALID_ADDRESS);
-        return nullptr;
-    }
-
-    mmapFlags |= MAP_FIXED;
-#endif // HAVE_VM_ALLOCATE
+    int mmapFlags = MAP_ANON | MAP_PRIVATE;
 
     if ((fAllocationType & MEM_LARGE_PAGES) != 0)
     {
@@ -1001,7 +988,12 @@ static LPVOID ReserveVirtualMemory(
 #endif
     }
 
-    mmapFlags |= MAP_ANON | MAP_PRIVATE;
+#ifdef __APPLE__
+    if ((fAllocationType & MEM_RESERVE_EXECUTABLE) && IsRunningOnMojaveHardenedRuntime())
+    {
+        mmapFlags |= MAP_JIT;
+    }
+#endif
 
     LPVOID pRetVal = mmap((LPVOID) StartBoundary,
                           MemSize,
@@ -1014,10 +1006,6 @@ static LPVOID ReserveVirtualMemory(
     {
         ERROR( "Failed due to insufficient memory.\n" );
 
-#if HAVE_VM_ALLOCATE
-        vm_deallocate(mach_task_self(), StartBoundary, MemSize);
-#endif // HAVE_VM_ALLOCATE
-
         pthrCurrent->SetLastError(ERROR_NOT_ENOUGH_MEMORY);
         return nullptr;
     }
@@ -2160,7 +2148,7 @@ void ExecutableMemoryAllocator::TryReserveInitialMemory()
     // Do actual memory reservation.
     do
     {
-        m_startAddress = ReserveVirtualMemory(pthrCurrent, (void*)preferredStartAddress, sizeOfAllocation, 0 /* fAllocationType */);
+        m_startAddress = ReserveVirtualMemory(pthrCurrent, (void*)preferredStartAddress, sizeOfAllocation, MEM_RESERVE_EXECUTABLE);
         if (m_startAddress != nullptr)
         {
             break;
@@ -2190,7 +2178,7 @@ void ExecutableMemoryAllocator::TryReserveInitialMemory()
         //   - The code heap allocator for the JIT can allocate from this address space. Beyond this reservation, one can use
         //     the COMPlus_CodeHeapReserveForJumpStubs environment variable to reserve space for jump stubs.
         sizeOfAllocation = MaxExecutableMemorySize;
-        m_startAddress = ReserveVirtualMemory(pthrCurrent, nullptr, sizeOfAllocation, 0 /* fAllocationType */);
+        m_startAddress = ReserveVirtualMemory(pthrCurrent, nullptr, sizeOfAllocation, MEM_RESERVE_EXECUTABLE);
         if (m_startAddress == nullptr)
         {
             return;
index 4eefd74..f1d1236 100644 (file)
@@ -26,6 +26,8 @@ SET_DEFAULT_DEBUG_CHANNEL(MISC); // some headers have code with asserts, so do t
 #include <mach/message.h>
 #endif //HAVE_VM_ALLOCATE
 
+#include <sys/mman.h>
+
 #include "pal/utils.h"
 #include "pal/file.h"
 
@@ -323,3 +325,41 @@ void UTIL_SetLastErrorFromMach(kern_return_t MachReturn)
 }
 #endif //HAVE_VM_ALLOCATE
 
+#ifdef __APPLE__
+
+/*++
+Function:
+  IsRunningOnMojaveHardenedRuntime() - Test if the current process is running on Mojave hardened runtime
+--*/
+BOOL IsRunningOnMojaveHardenedRuntime()
+{
+    static volatile int isRunningOnMojaveHardenedRuntime = -1;
+
+    if (isRunningOnMojaveHardenedRuntime == -1)
+    {
+        BOOL mhrDetected = FALSE;
+        int pageSize = sysconf(_SC_PAGE_SIZE);
+        // Try to map a page with read-write-execute protection. It should fail on Mojave hardened runtime.
+        void* testPage = mmap(NULL, pageSize, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+        if (testPage == MAP_FAILED && (errno == EACCES))
+        {
+            // The mapping has failed with EACCES, check if making the same mapping with MAP_JIT flag works
+            testPage = mmap(NULL, pageSize, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE | MAP_JIT, -1, 0);
+            if (testPage != MAP_FAILED)
+            {
+                mhrDetected = TRUE;
+            }
+        }
+
+        if (testPage != MAP_FAILED)
+        {
+            munmap(testPage, pageSize);
+        }
+
+        isRunningOnMojaveHardenedRuntime = (int)mhrDetected;
+    }
+
+    return (BOOL)isRunningOnMojaveHardenedRuntime;
+}
+
+#endif // __APPLE__
index 9503333..b81ee37 100644 (file)
@@ -37,11 +37,17 @@ LEAF_ENTRY JIT_CheckedWriteBarrier, _TEXT
         .byte 0x72, 0x0e
         PREPARE_EXTERNAL_VAR g_highest_address, rax
         cmp     rdi, [rax]
+
+#ifdef FEATURE_WRITEBARRIER_COPY
+        // jnb     NotInHeap
+        .byte 0x73, 0x06
+        jmp     [rip + C_FUNC(JIT_WriteBarrier_Loc)]
+#else
         // jnb     NotInHeap
         .byte 0x73, 0x02
-        
-        // call C_FUNC(JIT_WriteBarrier)
+        // jmp C_FUNC(JIT_WriteBarrier)
         .byte 0xeb, 0x05
+#endif
 
     NotInHeap:
         // See comment above about possible AV
@@ -388,6 +394,13 @@ LEAF_ENTRY JIT_ByRefWriteBarrier, _TEXT
         ret
 LEAF_END_MARKED JIT_ByRefWriteBarrier, _TEXT
 
+#ifdef FEATURE_WRITEBARRIER_COPY
+        // When JIT_WriteBarrier is copied into an allocated page, the JIT_Stelem_Ref and its
+        // helpers use this global variable to jump to it. This variable is set in InitThreadManager.
+        .global _JIT_WriteBarrier_Loc
+        .zerofill __DATA,__common,_JIT_WriteBarrier_Loc,8,3
+#endif // FEATURE_WRITEBARRIER_COPY
+
 // TODO: put definition for this in asmconstants.h
 #define CanCast 1
 
@@ -424,7 +437,11 @@ LEAF_ENTRY JIT_Stelem_Ref, _TEXT
         mov     rsi, rdx
 
         // JIT_WriteBarrier(Object** dst, Object* src)
+#ifdef FEATURE_WRITEBARRIER_COPY
+        jmp     [rip + C_FUNC(JIT_WriteBarrier_Loc)]
+#else
         jmp     C_FUNC(JIT_WriteBarrier)
+#endif
 
     LOCAL_LABEL(AssigningNull):
         // write barrier is not needed for assignment of NULL references
@@ -478,7 +495,11 @@ LEAF_ENTRY JIT_Stelem_Ref__ObjIsInstanceOfNoGC_Helper, _TEXT
         mov     rsi, rdx
 
         // JIT_WriteBarrier(Object** dst, Object* src)
+#ifdef FEATURE_WRITEBARRIER_COPY
+        jmp     [rip + C_FUNC(JIT_WriteBarrier_Loc)]
+#else
         jmp     C_FUNC(JIT_WriteBarrier)
+#endif
 
     LOCAL_LABEL(NeedCheck):
         jmp     C_FUNC(JIT_Stelem_Ref__ArrayStoreCheck_Helper)
@@ -510,5 +531,9 @@ LEAF_ENTRY JIT_Stelem_Ref__ArrayStoreCheck_Helper, _TEXT
         RESET_FRAME_WITH_RBP
         
         // JIT_WriteBarrier(Object** dst, Object* src)
+#ifdef FEATURE_WRITEBARRIER_COPY
+        jmp     [rip + C_FUNC(JIT_WriteBarrier_Loc)]
+#else
         jmp     C_FUNC(JIT_WriteBarrier)
+#endif
 LEAF_END JIT_Stelem_Ref__ArrayStoreCheck_Helper, _TEXT
index bffec81..d6de7dc 100644 (file)
@@ -275,9 +275,10 @@ PBYTE WriteBarrierManager::CalculatePatchLocation(LPVOID base, LPVOID label, int
     // the label should always come after the entrypoint for this funtion
     _ASSERTE_ALL_BUILDS("clr/src/VM/AMD64/JITinterfaceAMD64.cpp", (LPBYTE)label > (LPBYTE)base);
 
-    return ((LPBYTE)GetEEFuncEntryPoint(JIT_WriteBarrier) + ((LPBYTE)GetEEFuncEntryPoint(label) - (LPBYTE)GetEEFuncEntryPoint(base) + offset));
+    return (GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier) + ((LPBYTE)GetEEFuncEntryPoint(label) - (LPBYTE)GetEEFuncEntryPoint(base) + offset));
 }
 
+
 int WriteBarrierManager::ChangeWriteBarrierTo(WriteBarrierType newWriteBarrier, bool isRuntimeSuspended)
 {
     GCX_MAYBE_COOP_NO_THREAD_BROKEN((!isRuntimeSuspended && GetThread() != NULL));
@@ -293,7 +294,7 @@ int WriteBarrierManager::ChangeWriteBarrierTo(WriteBarrierType newWriteBarrier,
 
     // the memcpy must come before the switch statment because the asserts inside the switch 
     // are actually looking into the JIT_WriteBarrier buffer
-    memcpy((PVOID)JIT_WriteBarrier, (LPVOID)GetCurrentWriteBarrierCode(), GetCurrentWriteBarrierSize());
+    memcpy(GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier), (LPVOID)GetCurrentWriteBarrierCode(), GetCurrentWriteBarrierSize());
     
     switch (newWriteBarrier)
     {
@@ -722,7 +723,7 @@ int StompWriteBarrierResize(bool isRuntimeSuspended, bool bReqUpperBoundsCheck)
 
 void FlushWriteBarrierInstructionCache()
 {
-    FlushInstructionCache(GetCurrentProcess(), (PVOID)JIT_WriteBarrier, g_WriteBarrierManager.GetCurrentWriteBarrierSize());
+    FlushInstructionCache(GetCurrentProcess(), GetWriteBarrierCodeLocation((PVOID)JIT_WriteBarrier), g_WriteBarrierManager.GetCurrentWriteBarrierSize());
 }
 
 #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
index 5086937..1c5d4f8 100644 (file)
@@ -510,12 +510,24 @@ void InitGSCookie()
 
     GSCookie * pGSCookiePtr = GetProcessGSCookiePtr();
 
+#ifdef FEATURE_PAL
+    // On Unix, the GS cookie is stored in a read only data segment
+    DWORD newProtection = PAGE_READWRITE;
+#else // FEATURE_PAL
+    DWORD newProtection = PAGE_EXECUTE_READWRITE;
+#endif // !FEATURE_PAL
+
     DWORD oldProtection;
-    if(!ClrVirtualProtect((LPVOID)pGSCookiePtr, sizeof(GSCookie), PAGE_EXECUTE_READWRITE, &oldProtection))
+    if(!ClrVirtualProtect((LPVOID)pGSCookiePtr, sizeof(GSCookie), newProtection, &oldProtection))
     {
         ThrowLastError();
     }
 
+#ifdef FEATURE_PAL
+    // PAL layer is unable to extract old protection for regions that were not allocated using VirtualAlloc
+    oldProtection = PAGE_READONLY;
+#endif // FEATURE_PAL
+
 #ifndef FEATURE_PAL
     // The GSCookie cannot be in a writeable page
     assert(((oldProtection & (PAGE_READWRITE|PAGE_WRITECOPY|PAGE_EXECUTE_READWRITE|
index e5c73d3..61eb7b5 100644 (file)
@@ -6792,6 +6792,17 @@ AdjustContextForWriteBarrier(
 {
     WRAPPER_NO_CONTRACT;
 
+    PCODE ip = GetIP(pContext);
+
+#ifdef FEATURE_WRITEBARRIER_COPY
+    if (IsIPInWriteBarrierCodeCopy(ip))
+    {
+        // Pretend we were executing the barrier function at its original location so that the unwinder can unwind the frame
+        ip = AdjustWriteBarrierIP(ip);
+        SetIP(pContext, ip);
+    }
+#endif // FEATURE_WRITEBARRIER_COPY
+
 #ifdef FEATURE_DATABREAKPOINT
 
     // If pExceptionRecord is null, it means it is called from EEDbgInterfaceImpl::AdjustContextForWriteBarrierForDebugger()
@@ -6802,7 +6813,6 @@ AdjustContextForWriteBarrier(
 
     if (pExceptionRecord == nullptr)
     {
-        PCODE ip = GetIP(pContext);
 #if defined(_TARGET_X86_)
         bool withinWriteBarrierGroup = ((ip >= (PCODE) JIT_WriteBarrierGroup) && (ip <= (PCODE) JIT_WriteBarrierGroup_End));
         bool withinPatchedWriteBarrierGroup = ((ip >= (PCODE) JIT_PatchedWriteBarrierGroup) && (ip <= (PCODE) JIT_PatchedWriteBarrierGroup_End));
index ba50f71..9cdb8a5 100644 (file)
@@ -4666,6 +4666,15 @@ VOID DECLSPEC_NORETURN UnwindManagedExceptionPass1(PAL_SEHException& ex, CONTEXT
             }
 #endif // VSD_STUB_CAN_THROW_AV
 
+#ifdef FEATURE_WRITEBARRIER_COPY            
+            if (IsIPInWriteBarrierCodeCopy(controlPc))
+            {
+                // Pretend we were executing the barrier function at its original location so that the unwinder can unwind the frame
+                controlPc = AdjustWriteBarrierIP(controlPc);
+                SetIP(frameContext, controlPc);
+            }
+#endif // FEATURE_WRITEBARRIER_COPY
+
             UINT_PTR sp = GetSP(frameContext);
 
             BOOL success = PAL_VirtualUnwind(frameContext, NULL);
@@ -5152,6 +5161,15 @@ BOOL IsSafeToCallExecutionManager()
 BOOL IsSafeToHandleHardwareException(PCONTEXT contextRecord, PEXCEPTION_RECORD exceptionRecord)
 {
     PCODE controlPc = GetIP(contextRecord);
+
+#ifdef FEATURE_WRITEBARRIER_COPY
+    if (IsIPInWriteBarrierCodeCopy(controlPc))
+    {
+        // Pretend we were executing the barrier function at its original location
+        controlPc = AdjustWriteBarrierIP(controlPc);
+    }
+#endif // FEATURE_WRITEBARRIER_COPY
+
     return g_fEEStarted && (
         exceptionRecord->ExceptionCode == STATUS_BREAKPOINT || 
         exceptionRecord->ExceptionCode == STATUS_SINGLE_STEP ||
@@ -5221,6 +5239,16 @@ BOOL HandleHardwareException(PAL_SEHException* ex)
         *((&fef)->GetGSCookiePtr()) = GetProcessGSCookie();
         {
             GCX_COOP();     // Must be cooperative to modify frame chain.
+
+#ifdef FEATURE_WRITEBARRIER_COPY
+            if (IsIPInWriteBarrierCodeCopy(controlPc))
+            {
+                // Pretend we were executing the barrier function at its original location so that the unwinder can unwind the frame
+                controlPc = AdjustWriteBarrierIP(controlPc);
+                SetIP(ex->GetContextRecord(), controlPc);
+            }
+#endif // FEATURE_WRITEBARRIER_COPY
+
             if (IsIPInMarkedJitHelper(controlPc))
             {
                 // For JIT helpers, we need to set the frame to point to the
index f0ad195..a2cf484 100644 (file)
@@ -766,6 +766,15 @@ UINT_PTR Thread::VirtualUnwindToFirstManagedCallFrame(T_CONTEXT* pContext)
     // get our caller's PSP, or our caller's caller's SP.
     while (!ExecutionManager::IsManagedCode(uControlPc))
     {
+#ifdef FEATURE_WRITEBARRIER_COPY        
+        if (IsIPInWriteBarrierCodeCopy(uControlPc))
+        {
+            // Pretend we were executing the barrier function at its original location so that the unwinder can unwind the frame
+            uControlPc = AdjustWriteBarrierIP(uControlPc);
+            SetIP(pContext, uControlPc);
+        }
+#endif // FEATURE_WRITEBARRIER_COPY
+
 #ifndef FEATURE_PAL
         uControlPc = VirtualUnwindCallFrame(pContext);
 #else // !FEATURE_PAL
index bde5cf8..931c208 100644 (file)
@@ -1046,6 +1046,32 @@ DWORD_PTR Thread::OBJREF_HASH = OBJREF_TABSIZE;
 extern "C" void STDCALL JIT_PatchedCodeStart();
 extern "C" void STDCALL JIT_PatchedCodeLast();
 
+#ifdef FEATURE_WRITEBARRIER_COPY
+
+static void* s_barrierCopy = NULL;
+
+BYTE* GetWriteBarrierCodeLocation(VOID* barrier)
+{
+    return (BYTE*)s_barrierCopy + ((BYTE*)barrier - (BYTE*)JIT_PatchedCodeStart);
+}
+
+BOOL IsIPInWriteBarrierCodeCopy(PCODE controlPc)
+{
+    return (s_barrierCopy <= (void*)controlPc && (void*)controlPc < ((BYTE*)s_barrierCopy + ((BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart)));
+}
+
+PCODE AdjustWriteBarrierIP(PCODE controlPc)
+{
+    _ASSERTE(IsIPInWriteBarrierCodeCopy(controlPc));
+
+    // Pretend we were executing the barrier function at its original location so that the unwinder can unwind the frame
+    return (PCODE)JIT_PatchedCodeStart + (controlPc - (PCODE)s_barrierCopy);
+}
+
+#endif // FEATURE_WRITEBARRIER_COPY
+
+extern "C" void *JIT_WriteBarrier_Loc;
+
 //---------------------------------------------------------------------------
 // One-time initialization. Called during Dll initialization. So
 // be careful what you do in here!
@@ -1064,6 +1090,23 @@ void InitThreadManager()
     // If you hit this assert on retail build, there is most likely problem with BBT script.
     _ASSERTE_ALL_BUILDS("clr/src/VM/threads.cpp", (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart < (ptrdiff_t)GetOsPageSize());
 
+#ifdef FEATURE_WRITEBARRIER_COPY
+    s_barrierCopy = ClrVirtualAlloc(NULL, g_SystemInfo.dwAllocationGranularity, MEM_COMMIT, PAGE_EXECUTE_READWRITE);
+    if (s_barrierCopy == NULL)
+    {
+        _ASSERTE(!"ClrVirtualAlloc of GC barrier code page failed");
+        COMPlusThrowWin32();
+    }
+
+    memcpy(s_barrierCopy, (BYTE*)JIT_PatchedCodeStart, (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart);
+
+    // Store the JIT_WriteBarrier copy location to a global variable so that the JIT_Stelem_Ref and its helpers
+    // can jump to it.
+    JIT_WriteBarrier_Loc = GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier);
+
+    SetJitHelperFunction(CORINFO_HELP_ASSIGN_REF, GetWriteBarrierCodeLocation((void*)JIT_WriteBarrier));
+#else // FEATURE_WRITEBARRIER_COPY
+
     // I am using virtual protect to cover the entire range that this code falls in.
     // 
 
@@ -1077,6 +1120,7 @@ void InitThreadManager()
         _ASSERTE(!"ClrVirtualProtect of code page failed");
         COMPlusThrowWin32();
     }
+#endif // FEATURE_WRITEBARRIER_COPY
 
 #ifndef FEATURE_PAL
     _ASSERTE(GetThread() == NULL);
index 5b2e398..e6ed1df 100644 (file)
@@ -6765,4 +6765,16 @@ private:
 
 BOOL Debug_IsLockedViaThreadSuspension();
 
+#ifdef FEATURE_WRITEBARRIER_COPY
+
+BYTE* GetWriteBarrierCodeLocation(VOID* barrier);
+BOOL IsIPInWriteBarrierCodeCopy(PCODE controlPc);
+PCODE AdjustWriteBarrierIP(PCODE controlPc);
+
+#else // FEATURE_WRITEBARRIER_COPY
+
+#define GetWriteBarrierCodeLocation(barrier) ((BYTE*)(barrier))
+
+#endif // FEATURE_WRITEBARRIER_COPY
+
 #endif //__threads_h__