Remove always defined FEATURE_CORECLR
[platform/upstream/coreclr.git] / src / vm / codeman.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
4 // codeman.cpp - a managment class for handling multiple code managers
5 //
6
7 //
8
9 #include "common.h"
10 #include "jitinterface.h"
11 #include "corjit.h"
12 #include "jithost.h"
13 #include "eetwain.h"
14 #include "eeconfig.h"
15 #include "excep.h"
16 #include "appdomain.hpp"
17 #include "codeman.h"
18 #include "nibblemapmacros.h"
19 #include "generics.h"
20 #include "dynamicmethod.h"
21 #include "eemessagebox.h"
22 #include "eventtrace.h"
23 #include "threadsuspend.h"
24
25 #include "exceptionhandling.h"
26
27 #include "rtlfunctions.h"
28
29 #include "jitperf.h"
30 #include "shimload.h"
31 #include "debuginfostore.h"
32 #include "strsafe.h"
33
34 #include "configuration.h"
35
36 #ifdef _WIN64
37 #define CHECK_DUPLICATED_STRUCT_LAYOUTS
38 #include "../debug/daccess/fntableaccess.h"
39 #endif // _WIN64
40
41 #define MAX_M_ALLOCATED         (16 * 1024)
42
43 // Default number of jump stubs in a jump stub block
44 #define DEFAULT_JUMPSTUBS_PER_BLOCK  32
45
46 SPTR_IMPL(EECodeManager, ExecutionManager, m_pDefaultCodeMan);
47
48 SPTR_IMPL(EEJitManager, ExecutionManager, m_pEEJitManager);
49 #ifdef FEATURE_PREJIT
50 SPTR_IMPL(NativeImageJitManager, ExecutionManager, m_pNativeImageJitManager);
51 #endif
52 #ifdef FEATURE_READYTORUN
53 SPTR_IMPL(ReadyToRunJitManager, ExecutionManager, m_pReadyToRunJitManager);
54 #endif
55
56 #ifndef DACCESS_COMPILE
57 Volatile<RangeSection *> ExecutionManager::m_CodeRangeList = NULL;
58 Volatile<LONG> ExecutionManager::m_dwReaderCount = 0;
59 Volatile<LONG> ExecutionManager::m_dwWriterLock = 0;
60 #else
61 SPTR_IMPL(RangeSection, ExecutionManager, m_CodeRangeList);
62 SVAL_IMPL(LONG, ExecutionManager, m_dwReaderCount);
63 SVAL_IMPL(LONG, ExecutionManager, m_dwWriterLock);
64 #endif
65
66 #ifndef DACCESS_COMPILE
67
68 CrstStatic ExecutionManager::m_JumpStubCrst;
69 CrstStatic ExecutionManager::m_RangeCrst;
70
71 unsigned   ExecutionManager::m_normal_JumpStubLookup;
72 unsigned   ExecutionManager::m_normal_JumpStubUnique;
73 unsigned   ExecutionManager::m_normal_JumpStubBlockAllocCount;
74 unsigned   ExecutionManager::m_normal_JumpStubBlockFullCount;
75
76 unsigned   ExecutionManager::m_LCG_JumpStubLookup;
77 unsigned   ExecutionManager::m_LCG_JumpStubUnique;
78 unsigned   ExecutionManager::m_LCG_JumpStubBlockAllocCount;
79 unsigned   ExecutionManager::m_LCG_JumpStubBlockFullCount;
80
81 #endif // DACCESS_COMPILE
82
83 #if defined(_TARGET_AMD64_) && !defined(DACCESS_COMPILE) // We don't do this on ARM just amd64
84
85 // Support for new style unwind information (to allow OS to stack crawl JIT compiled code).
86
87 typedef NTSTATUS (WINAPI* RtlAddGrowableFunctionTableFnPtr) (
88         PVOID *DynamicTable, PRUNTIME_FUNCTION FunctionTable, ULONG EntryCount, 
89         ULONG MaximumEntryCount, ULONG_PTR rangeStart, ULONG_PTR rangeEnd);
90 typedef VOID (WINAPI* RtlGrowFunctionTableFnPtr) (PVOID DynamicTable, ULONG NewEntryCount); 
91 typedef VOID (WINAPI* RtlDeleteGrowableFunctionTableFnPtr) (PVOID DynamicTable);
92
93 // OS entry points (only exist on Win8 and above)
94 static RtlAddGrowableFunctionTableFnPtr pRtlAddGrowableFunctionTable;
95 static RtlGrowFunctionTableFnPtr pRtlGrowFunctionTable;
96 static RtlDeleteGrowableFunctionTableFnPtr pRtlDeleteGrowableFunctionTable;
97 static Volatile<bool> RtlUnwindFtnsInited;
98
99 // statics for UnwindInfoTable 
100 Crst* UnwindInfoTable::s_pUnwindInfoTableLock = NULL;
101 Volatile<bool>      UnwindInfoTable::s_publishingActive = false;
102
103
104 #if _DEBUG
105 // Fake functions on Win7 checked build to excercize the code paths, they are no-ops
106 NTSTATUS WINAPI FakeRtlAddGrowableFunctionTable (
107         PVOID *DynamicTable, PT_RUNTIME_FUNCTION FunctionTable, ULONG EntryCount, 
108         ULONG MaximumEntryCount, ULONG_PTR rangeStart, ULONG_PTR rangeEnd) { *DynamicTable = (PVOID) 1; return 0; }
109 VOID WINAPI FakeRtlGrowFunctionTable (PVOID DynamicTable, ULONG NewEntryCount) { }
110 VOID WINAPI FakeRtlDeleteGrowableFunctionTable (PVOID DynamicTable) {}
111 #endif
112
113 /****************************************************************************/
114 // initialize the entry points for new win8 unwind info publishing functions.
115 // return true if the initialize is successful (the functions exist)
116
117 bool InitUnwindFtns()
118 {
119     CONTRACTL {
120         NOTHROW;
121     } CONTRACTL_END;
122
123 #ifndef FEATURE_PAL
124     if (!RtlUnwindFtnsInited)
125     {
126         HINSTANCE hNtdll = WszGetModuleHandle(W("ntdll.dll"));
127         if (hNtdll != NULL) 
128         {
129             void* growFunctionTable = GetProcAddress(hNtdll, "RtlGrowFunctionTable");
130             void* deleteGrowableFunctionTable = GetProcAddress(hNtdll, "RtlDeleteGrowableFunctionTable");
131             void* addGrowableFunctionTable = GetProcAddress(hNtdll, "RtlAddGrowableFunctionTable");
132
133             // All or nothing AddGroableFunctionTable is last (marker)
134             if (growFunctionTable != NULL && 
135                 deleteGrowableFunctionTable != NULL && 
136                 addGrowableFunctionTable != NULL)
137             {
138                 pRtlGrowFunctionTable = (RtlGrowFunctionTableFnPtr) growFunctionTable;
139                 pRtlDeleteGrowableFunctionTable = (RtlDeleteGrowableFunctionTableFnPtr) deleteGrowableFunctionTable;
140                 pRtlAddGrowableFunctionTable = (RtlAddGrowableFunctionTableFnPtr) addGrowableFunctionTable;
141             } 
142             // Don't call FreeLibrary(hNtdll) because GetModuleHandle did *NOT* increment the reference count!
143         }
144         else 
145         {
146 #if _DEBUG
147             pRtlGrowFunctionTable = FakeRtlGrowFunctionTable;
148             pRtlDeleteGrowableFunctionTable = FakeRtlDeleteGrowableFunctionTable;
149             pRtlAddGrowableFunctionTable = FakeRtlAddGrowableFunctionTable;
150 #endif
151         }
152         RtlUnwindFtnsInited = true;
153     }
154     return (pRtlAddGrowableFunctionTable != NULL);
155 #else // !FEATURE_PAL
156     return false;
157 #endif // !FEATURE_PAL
158 }
159
160 /****************************************************************************/
161 UnwindInfoTable::UnwindInfoTable(ULONG_PTR rangeStart, ULONG_PTR rangeEnd, ULONG size)
162 {
163     STANDARD_VM_CONTRACT;
164     _ASSERTE(s_pUnwindInfoTableLock->OwnedByCurrentThread());
165     _ASSERTE((rangeEnd - rangeStart) <= 0x7FFFFFFF);
166
167     cTableCurCount = 0;
168     cTableMaxCount = size;
169     cDeletedEntries = 0;
170     iRangeStart = rangeStart;
171     iRangeEnd = rangeEnd;
172     hHandle = NULL;
173     pTable = new T_RUNTIME_FUNCTION[cTableMaxCount];
174 }
175
176 /****************************************************************************/
177 UnwindInfoTable::~UnwindInfoTable()
178 {
179     CONTRACTL {
180         NOTHROW;
181         GC_NOTRIGGER;   
182     } CONTRACTL_END;
183     _ASSERTE(s_publishingActive);
184
185     // We do this lock free to because too many places still want no-trigger.   It should be OK
186     // It would be cleaner if we could take the lock (we did not have to be GC_NOTRIGGER)
187     UnRegister();
188     delete[] pTable;
189 }
190
191 /*****************************************************************************/
192 void UnwindInfoTable::Register()
193 {
194     _ASSERTE(s_pUnwindInfoTableLock->OwnedByCurrentThread());
195     EX_TRY
196     {
197         hHandle = NULL;
198         NTSTATUS ret = pRtlAddGrowableFunctionTable(&hHandle, pTable, cTableCurCount, cTableMaxCount, iRangeStart, iRangeEnd);    
199         if (ret != STATUS_SUCCESS)  
200         {
201             _ASSERTE(!"Failed to publish UnwindInfo (ignorable)");
202             hHandle = NULL;
203             STRESS_LOG3(LF_JIT, LL_ERROR, "UnwindInfoTable::Register ERROR %x creating table [%p, %p]\n", ret, iRangeStart, iRangeEnd);
204         }
205         else  
206         {
207             STRESS_LOG3(LF_JIT, LL_INFO100, "UnwindInfoTable::Register Handle: %p [%p, %p]\n", hHandle, iRangeStart, iRangeEnd);
208         }
209     }
210     EX_CATCH
211     {
212         hHandle = NULL;
213         STRESS_LOG2(LF_JIT, LL_ERROR, "UnwindInfoTable::Register Exception while creating table [%p, %p]\n",
214             iRangeStart, iRangeEnd);
215         _ASSERTE(!"Failed to publish UnwindInfo (ignorable)");
216     }
217     EX_END_CATCH(SwallowAllExceptions)
218 }
219
220 /*****************************************************************************/
221 void UnwindInfoTable::UnRegister()
222 {
223     PVOID handle = hHandle;
224     hHandle = 0;
225     if (handle != 0) 
226     {
227         STRESS_LOG3(LF_JIT, LL_INFO100, "UnwindInfoTable::UnRegister Handle: %p [%p, %p]\n", handle, iRangeStart, iRangeEnd);
228         pRtlDeleteGrowableFunctionTable(handle);
229     }
230 }
231
232 /*****************************************************************************/
233 // Add 'data' to the linked list whose head is pointed at by 'unwindInfoPtr'
234 //
235 /* static */ 
236 void UnwindInfoTable::AddToUnwindInfoTable(UnwindInfoTable** unwindInfoPtr, PT_RUNTIME_FUNCTION data,
237                                           TADDR rangeStart, TADDR rangeEnd)    
238 {
239     CONTRACTL
240     {
241         THROWS;
242         GC_TRIGGERS;
243     }
244     CONTRACTL_END;
245     _ASSERTE(data->BeginAddress <= RUNTIME_FUNCTION__EndAddress(data, rangeStart));
246     _ASSERTE(RUNTIME_FUNCTION__EndAddress(data, rangeStart) <=  (rangeEnd-rangeStart));
247     _ASSERTE(unwindInfoPtr != NULL);
248
249     if (!s_publishingActive)
250         return;
251
252     CrstHolder ch(s_pUnwindInfoTableLock);
253
254     UnwindInfoTable* unwindInfo = *unwindInfoPtr; 
255     // was the original list null, If so lazy initialize. 
256     if (unwindInfo == NULL) 
257     {
258         // We can choose the average method size estimate dynamically based on past experience
259         // 128 is the estimated size of an average method, so we can accurately predict
260         // how many RUNTIME_FUNCTION entries are in each chunk we allocate.
261
262         ULONG size = (ULONG) ((rangeEnd - rangeStart) / 128) + 1;
263
264         // To insure the test the growing logic in debug code make the size much smaller.  
265         INDEBUG(size = size / 4 + 1);
266         unwindInfo = (PTR_UnwindInfoTable)new UnwindInfoTable(rangeStart, rangeEnd, size);
267         unwindInfo->Register();
268         *unwindInfoPtr = unwindInfo;
269     }
270     _ASSERTE(unwindInfo != NULL);        // If new had failed, we would have thrown OOM
271     _ASSERTE(unwindInfo->cTableCurCount <= unwindInfo->cTableMaxCount);
272     _ASSERTE(unwindInfo->iRangeStart == rangeStart);
273     _ASSERTE(unwindInfo->iRangeEnd == rangeEnd);
274
275     // Means we had a failure publishing to the OS, in this case we give up
276     if (unwindInfo->hHandle == NULL)
277         return;
278
279     // Check for the fast path: we are adding the the end of an UnwindInfoTable with space
280     if (unwindInfo->cTableCurCount < unwindInfo->cTableMaxCount)
281     {
282         if (unwindInfo->cTableCurCount == 0 ||
283             unwindInfo->pTable[unwindInfo->cTableCurCount-1].BeginAddress < data->BeginAddress)
284         {
285             // Yeah, we can simply add to the end of table and we are done!
286             unwindInfo->pTable[unwindInfo->cTableCurCount] = *data;
287             unwindInfo->cTableCurCount++;
288
289             // Add to the function table
290             pRtlGrowFunctionTable(unwindInfo->hHandle, unwindInfo->cTableCurCount);
291
292             STRESS_LOG5(LF_JIT, LL_INFO1000, "AddToUnwindTable Handle: %p [%p, %p] ADDING 0x%xp TO END, now 0x%x entries\n",
293                 unwindInfo->hHandle, unwindInfo->iRangeStart, unwindInfo->iRangeEnd, 
294                 data->BeginAddress, unwindInfo->cTableCurCount);
295             return;
296         }
297     }
298
299     // OK we need to rellocate the table and reregister.  First figure out our 'desiredSpace'
300     // We could imagine being much more efficient for 'bulk' updates, but we don't try
301     // because we assume that this is rare and we want to keep the code simple
302
303     int usedSpace = unwindInfo->cTableCurCount - unwindInfo->cDeletedEntries;
304     int desiredSpace = usedSpace * 5 / 4 + 1;        // Increase by 20%
305     // Be more aggresive if we used all of our space; 
306     if (usedSpace == unwindInfo->cTableMaxCount)
307         desiredSpace = usedSpace * 3 / 2 + 1;        // Increase by 50%
308
309     STRESS_LOG7(LF_JIT, LL_INFO100, "AddToUnwindTable Handle: %p [%p, %p] SLOW Realloc Cnt 0x%x Max 0x%x NewMax 0x%x, Adding %x\n",
310         unwindInfo->hHandle, unwindInfo->iRangeStart, unwindInfo->iRangeEnd, 
311         unwindInfo->cTableCurCount, unwindInfo->cTableMaxCount, desiredSpace, data->BeginAddress);
312
313     UnwindInfoTable* newTab = new UnwindInfoTable(unwindInfo->iRangeStart, unwindInfo->iRangeEnd, desiredSpace);
314
315     // Copy in the entries, removing deleted entries and adding the new entry wherever it belongs
316     int toIdx = 0;
317     bool inserted = false;    // Have we inserted 'data' into the table
318     for(ULONG fromIdx = 0; fromIdx < unwindInfo->cTableCurCount; fromIdx++)
319     {
320         if (!inserted && data->BeginAddress < unwindInfo->pTable[fromIdx].BeginAddress)
321         {
322             STRESS_LOG1(LF_JIT, LL_INFO100, "AddToUnwindTable Inserted at MID position 0x%x\n", toIdx);
323             newTab->pTable[toIdx++] = *data;
324             inserted = true;
325         }
326         if (unwindInfo->pTable[fromIdx].UnwindData != 0)        // A 'non-deleted' entry
327             newTab->pTable[toIdx++] = unwindInfo->pTable[fromIdx];
328     }
329     if (!inserted) 
330     {
331         STRESS_LOG1(LF_JIT, LL_INFO100, "AddToUnwindTable Inserted at END position 0x%x\n", toIdx);
332         newTab->pTable[toIdx++] = *data;
333     }
334     newTab->cTableCurCount = toIdx;
335     STRESS_LOG2(LF_JIT, LL_INFO100, "AddToUnwindTable New size 0x%x max 0x%x\n",
336         newTab->cTableCurCount, newTab->cTableMaxCount);
337     _ASSERTE(newTab->cTableCurCount <= newTab->cTableMaxCount);
338
339     // Unregister the old table
340     *unwindInfoPtr = 0;
341     unwindInfo->UnRegister();
342
343     // Note that there is a short time when we are not publishing...
344
345     // Register the new table
346     newTab->Register();
347     *unwindInfoPtr = newTab;
348     
349     delete unwindInfo;
350 }
351         
352 /*****************************************************************************/
353 /* static */ void UnwindInfoTable::RemoveFromUnwindInfoTable(UnwindInfoTable** unwindInfoPtr, TADDR baseAddress, TADDR entryPoint)
354 {
355     CONTRACTL {
356         NOTHROW;
357         GC_TRIGGERS;
358     } CONTRACTL_END;
359     _ASSERTE(unwindInfoPtr != NULL);
360
361     if (!s_publishingActive)
362         return;
363     CrstHolder ch(s_pUnwindInfoTableLock);
364
365     UnwindInfoTable* unwindInfo = *unwindInfoPtr;
366     if (unwindInfo != NULL)
367     {
368         DWORD relativeEntryPoint = (DWORD)(entryPoint - baseAddress);
369         STRESS_LOG3(LF_JIT, LL_INFO100, "RemoveFromUnwindInfoTable Removing %p BaseAddress %p rel %x\n",
370             entryPoint, baseAddress, relativeEntryPoint);
371         for(ULONG i = 0; i < unwindInfo->cTableCurCount; i++)
372         {
373             if (unwindInfo->pTable[i].BeginAddress <= relativeEntryPoint && 
374                 relativeEntryPoint < RUNTIME_FUNCTION__EndAddress(&unwindInfo->pTable[i], unwindInfo->iRangeStart))
375             {
376                 if (unwindInfo->pTable[i].UnwindData != 0)
377                     unwindInfo->cDeletedEntries++;
378                 unwindInfo->pTable[i].UnwindData = 0;        // Mark the entry for deletion
379                 STRESS_LOG1(LF_JIT, LL_INFO100, "RemoveFromUnwindInfoTable Removed entry 0x%x\n", i);
380                 return;
381             }
382         }
383     }
384     STRESS_LOG2(LF_JIT, LL_WARNING, "RemoveFromUnwindInfoTable COULD NOT FIND %p BaseAddress %p\n",
385         entryPoint, baseAddress);
386 }
387
388 /****************************************************************************/
389 // Publish the stack unwind data 'data' which is relative 'baseAddress' 
390 // to the operating system in a way ETW stack tracing can use.
391
392 /* static */ void UnwindInfoTable::PublishUnwindInfoForMethod(TADDR baseAddress, PT_RUNTIME_FUNCTION unwindInfo, int unwindInfoCount)
393 {
394     STANDARD_VM_CONTRACT;
395     if (!s_publishingActive)
396         return;
397
398     TADDR entry = baseAddress + unwindInfo->BeginAddress;
399     RangeSection * pRS = ExecutionManager::FindCodeRange(entry, ExecutionManager::GetScanFlags());
400     _ASSERTE(pRS != NULL);
401     if (pRS != NULL)
402     {
403         for(int i = 0; i < unwindInfoCount; i++)
404             AddToUnwindInfoTable(&pRS->pUnwindInfoTable, &unwindInfo[i], pRS->LowAddress, pRS->HighAddress);
405     }
406 }
407     
408 /*****************************************************************************/
409 /* static */ void UnwindInfoTable::UnpublishUnwindInfoForMethod(TADDR entryPoint)
410 {
411     CONTRACTL {
412         NOTHROW;
413         GC_TRIGGERS;
414     } CONTRACTL_END;
415     if (!s_publishingActive)
416         return;
417
418     RangeSection * pRS = ExecutionManager::FindCodeRange(entryPoint, ExecutionManager::GetScanFlags());
419     _ASSERTE(pRS != NULL);
420     if (pRS != NULL)
421     {
422         _ASSERTE(pRS->pjit->GetCodeType() == (miManaged | miIL));
423         if (pRS->pjit->GetCodeType() == (miManaged | miIL))
424         {
425             // This cast is justified because only EEJitManager's have the code type above.
426             EEJitManager* pJitMgr = (EEJitManager*)(pRS->pjit);
427             CodeHeader * pHeader = pJitMgr->GetCodeHeaderFromStartAddress(entryPoint);
428             for(ULONG i = 0; i < pHeader->GetNumberOfUnwindInfos(); i++)
429                 RemoveFromUnwindInfoTable(&pRS->pUnwindInfoTable, pRS->LowAddress, pRS->LowAddress + pHeader->GetUnwindInfo(i)->BeginAddress);
430         }
431     }
432 }
433
434 #ifdef STUBLINKER_GENERATES_UNWIND_INFO
435 extern StubUnwindInfoHeapSegment *g_StubHeapSegments;
436 #endif // STUBLINKER_GENERATES_UNWIND_INFO
437
438 extern CrstStatic g_StubUnwindInfoHeapSegmentsCrst;
439 /*****************************************************************************/
440 // Publish all existing JIT compiled methods by iterating through the code heap
441 // Note that because we need to keep the entries in order we have to hold 
442 // s_pUnwindInfoTableLock so that all entries get inserted in the correct order.
443 // (we rely on heapIterator walking the methods in a heap section in order).
444
445 /* static */ void UnwindInfoTable::PublishUnwindInfoForExistingMethods() 
446 {
447     STANDARD_VM_CONTRACT;
448     {
449         // CodeHeapIterator holds the m_CodeHeapCritSec, which insures code heaps don't get deallocated while being walked
450         EEJitManager::CodeHeapIterator heapIterator(NULL, NULL);
451
452         // Currently m_CodeHeapCritSec is given the CRST_UNSAFE_ANYMODE flag which allows it to be taken in a GC_NOTRIGGER
453         // region but also disallows GC_TRIGGERS.  We need GC_TRIGGERS because we take annother lock.   Ideally we would
454         // fix m_CodeHeapCritSec to not have the CRST_UNSAFE_ANYMODE flag, but I currently reached my threshold for fixing
455         // contracts.
456         CONTRACT_VIOLATION(GCViolation);
457
458         while(heapIterator.Next())
459         {
460             MethodDesc *pMD = heapIterator.GetMethod();
461             if(pMD)
462             { 
463                 PCODE methodEntry =(PCODE) heapIterator.GetMethodCode();
464                 RangeSection * pRS = ExecutionManager::FindCodeRange(methodEntry, ExecutionManager::GetScanFlags());
465                 _ASSERTE(pRS != NULL);
466                 _ASSERTE(pRS->pjit->GetCodeType() == (miManaged | miIL));
467                 if (pRS != NULL && pRS->pjit->GetCodeType() == (miManaged | miIL))
468                 {
469                     // This cast is justified because only EEJitManager's have the code type above.
470                     EEJitManager* pJitMgr = (EEJitManager*)(pRS->pjit);
471                     CodeHeader * pHeader = pJitMgr->GetCodeHeaderFromStartAddress(methodEntry);
472                     int unwindInfoCount = pHeader->GetNumberOfUnwindInfos();
473                     for(int i = 0; i < unwindInfoCount; i++)
474                         AddToUnwindInfoTable(&pRS->pUnwindInfoTable, pHeader->GetUnwindInfo(i), pRS->LowAddress, pRS->HighAddress);
475                 }
476             }
477         }
478     }
479
480 #ifdef STUBLINKER_GENERATES_UNWIND_INFO
481     // Enumerate all existing stubs
482     CrstHolder crst(&g_StubUnwindInfoHeapSegmentsCrst);
483     for (StubUnwindInfoHeapSegment* pStubHeapSegment = g_StubHeapSegments; pStubHeapSegment; pStubHeapSegment = pStubHeapSegment->pNext)
484     {
485         // The stubs are in reverse order, so we reverse them so they are in memory order
486         CQuickArrayList<StubUnwindInfoHeader*> list;
487         for (StubUnwindInfoHeader *pHeader = pStubHeapSegment->pUnwindHeaderList; pHeader; pHeader = pHeader->pNext)
488             list.Push(pHeader);
489
490         for(int i = (int) list.Size()-1; i >= 0; --i)
491         {
492             StubUnwindInfoHeader *pHeader = list[i];
493             AddToUnwindInfoTable(&pStubHeapSegment->pUnwindInfoTable, &pHeader->FunctionEntry, 
494                 (TADDR) pStubHeapSegment->pbBaseAddress, (TADDR) pStubHeapSegment->pbBaseAddress + pStubHeapSegment->cbSegment);
495         }
496     }
497 #endif // STUBLINKER_GENERATES_UNWIND_INFO
498 }
499
500 /*****************************************************************************/
501 // turn on the publishing of unwind info.  Called when the ETW rundown provider
502 // is turned on. 
503
504 /* static */ void UnwindInfoTable::PublishUnwindInfo(bool publishExisting)
505 {
506     CONTRACTL {
507         NOTHROW;
508         GC_TRIGGERS;
509     } CONTRACTL_END;
510
511     if (s_publishingActive)
512         return;
513
514     // If we don't have the APIs we need, give up
515     if (!InitUnwindFtns())
516         return;
517
518     EX_TRY
519     {
520         // Create the lock 
521         Crst* newCrst = new Crst(CrstUnwindInfoTableLock);
522         if (InterlockedCompareExchangeT(&s_pUnwindInfoTableLock, newCrst, NULL) == NULL)
523         {
524             s_publishingActive = true;
525             if (publishExisting)
526                 PublishUnwindInfoForExistingMethods();
527         }
528         else 
529             delete newCrst;    // we were in a race and failed, throw away the Crst we made.
530
531     } EX_CATCH {
532         STRESS_LOG1(LF_JIT, LL_ERROR, "Exception happened when doing unwind Info rundown. EIP of last AV = %p\n", g_LastAccessViolationEIP);
533         _ASSERTE(!"Exception thrown while publishing 'catchup' ETW unwind information");
534         s_publishingActive = false;     // Try to minimize damage.  
535     } EX_END_CATCH(SwallowAllExceptions);
536 }
537
538 #endif // defined(_TARGET_AMD64_) && !defined(DACCESS_COMPILE)
539
540 /*-----------------------------------------------------------------------------
541  This is a listing of which methods uses which synchronization mechanism
542  in the EEJitManager.
543 //-----------------------------------------------------------------------------
544
545 Setters of EEJitManager::m_CodeHeapCritSec
546 -----------------------------------------------
547 allocCode
548 allocGCInfo
549 allocEHInfo
550 allocJumpStubBlock
551 ResolveEHClause
552 RemoveJitData
553 Unload
554 ReleaseReferenceToHeap
555 JitCodeToMethodInfo
556
557
558 Need EEJitManager::m_CodeHeapCritSec to be set
559 -----------------------------------------------
560 NewCodeHeap
561 allocCodeRaw
562 GetCodeHeapList
563 GetCodeHeap
564 RemoveCodeHeapFromDomainList
565 DeleteCodeHeap
566 AddRangeToJitHeapCache
567 DeleteJitHeapCache
568
569 */
570
571
572 #if !defined(DACCESS_COMPILE)
573 EEJitManager::CodeHeapIterator::CodeHeapIterator(BaseDomain *pDomainFilter, LoaderAllocator *pLoaderAllocatorFilter)
574     : m_lockHolder(&(ExecutionManager::GetEEJitManager()->m_CodeHeapCritSec)), m_Iterator(NULL, 0, NULL, 0)
575 {
576     CONTRACTL
577     {
578         NOTHROW;
579         GC_NOTRIGGER;
580         MODE_ANY;
581     }
582     CONTRACTL_END;
583
584     m_pHeapList = NULL;
585     m_pDomain = pDomainFilter;
586     m_pLoaderAllocator = pLoaderAllocatorFilter;
587     m_pHeapList = ExecutionManager::GetEEJitManager()->GetCodeHeapList();
588     if(m_pHeapList)
589         new (&m_Iterator) MethodSectionIterator((const void *)m_pHeapList->mapBase, (COUNT_T)m_pHeapList->maxCodeHeapSize, m_pHeapList->pHdrMap, (COUNT_T)HEAP2MAPSIZE(ROUND_UP_TO_PAGE(m_pHeapList->maxCodeHeapSize)));
590 };
591
592 EEJitManager::CodeHeapIterator::~CodeHeapIterator()
593 {
594     CONTRACTL
595     {
596         NOTHROW;
597         GC_NOTRIGGER;
598         MODE_ANY;
599     }
600     CONTRACTL_END;
601 }
602
603 BOOL EEJitManager::CodeHeapIterator::Next()
604 {
605     CONTRACTL
606     {
607         NOTHROW;
608         GC_NOTRIGGER;
609         MODE_ANY;
610     }
611     CONTRACTL_END;
612
613     if(!m_pHeapList)
614         return FALSE;
615
616     while(1)
617     {
618         if(!m_Iterator.Next())
619         {
620             m_pHeapList = m_pHeapList->GetNext();
621             if(!m_pHeapList)
622                 return FALSE;
623             new (&m_Iterator) MethodSectionIterator((const void *)m_pHeapList->mapBase, (COUNT_T)m_pHeapList->maxCodeHeapSize, m_pHeapList->pHdrMap, (COUNT_T)HEAP2MAPSIZE(ROUND_UP_TO_PAGE(m_pHeapList->maxCodeHeapSize)));
624         }
625         else
626         {
627             BYTE * code = m_Iterator.GetMethodCode();
628             CodeHeader * pHdr = (CodeHeader *)(code - sizeof(CodeHeader));
629             m_pCurrent = !pHdr->IsStubCodeBlock() ? pHdr->GetMethodDesc() : NULL;
630             if (m_pDomain && m_pCurrent)
631             {
632                 BaseDomain *pCurrentBaseDomain = m_pCurrent->GetDomain();
633                 if(pCurrentBaseDomain != m_pDomain)
634                     continue;
635             }
636
637             // LoaderAllocator filter
638             if (m_pLoaderAllocator && m_pCurrent)
639             {
640                 LoaderAllocator *pCurrentLoaderAllocator = m_pCurrent->GetLoaderAllocatorForCode();
641                 if(pCurrentLoaderAllocator != m_pLoaderAllocator)
642                     continue;
643             }
644
645             return TRUE;
646         }
647     }
648 }
649 #endif // !DACCESS_COMPILE
650
651 #ifndef DACCESS_COMPILE
652
653 //---------------------------------------------------------------------------------------
654 //
655 // ReaderLockHolder::ReaderLockHolder takes the reader lock, checks for the writer lock 
656 // and either aborts if the writer lock is held, or yields until the writer lock is released,
657 // keeping the reader lock.  This is normally called in the constructor for the
658 // ReaderLockHolder.
659 // 
660 // The writer cannot be taken if there are any readers. The WriterLockHolder functions take the
661 // writer lock and check for any readers. If there are any, the WriterLockHolder functions
662 // release the writer and yield to wait for the readers to be done.
663
664 ExecutionManager::ReaderLockHolder::ReaderLockHolder(HostCallPreference hostCallPreference /*=AllowHostCalls*/)
665 {
666     CONTRACTL {
667         NOTHROW;
668         if (hostCallPreference == AllowHostCalls) { HOST_CALLS; } else { HOST_NOCALLS; }
669         GC_NOTRIGGER;
670         SO_TOLERANT;
671         CAN_TAKE_LOCK;
672     } CONTRACTL_END;
673
674     IncCantAllocCount();
675
676     FastInterlockIncrement(&m_dwReaderCount);
677
678     EE_LOCK_TAKEN(GetPtrForLockContract());
679
680     if (VolatileLoad(&m_dwWriterLock) != 0)
681     {
682         if (hostCallPreference != AllowHostCalls)
683         {
684             // Rats, writer lock is held. Gotta bail. Since the reader count was already
685             // incremented, we're technically still blocking writers at the moment. But
686             // the holder who called us is about to call DecrementReader in its
687             // destructor and unblock writers.
688             return;
689         }
690
691         YIELD_WHILE ((VolatileLoad(&m_dwWriterLock) != 0));
692     }
693 }
694
695 //---------------------------------------------------------------------------------------
696 //
697 // See code:ExecutionManager::ReaderLockHolder::ReaderLockHolder. This just decrements the reader count. 
698
699 ExecutionManager::ReaderLockHolder::~ReaderLockHolder()
700 {
701     CONTRACTL
702     {
703         NOTHROW;
704         GC_NOTRIGGER;
705         SO_TOLERANT;
706         MODE_ANY;
707     }
708     CONTRACTL_END;
709
710     FastInterlockDecrement(&m_dwReaderCount);
711     DecCantAllocCount();
712
713     EE_LOCK_RELEASED(GetPtrForLockContract());
714 }
715
716 //---------------------------------------------------------------------------------------
717 //
718 // Returns whether the reader lock is acquired
719
720 BOOL ExecutionManager::ReaderLockHolder::Acquired()
721 {
722     LIMITED_METHOD_CONTRACT;
723     return VolatileLoad(&m_dwWriterLock) == 0;
724 }
725
726 ExecutionManager::WriterLockHolder::WriterLockHolder()
727 {
728     CONTRACTL {
729         NOTHROW;
730         GC_NOTRIGGER;
731         CAN_TAKE_LOCK;
732     } CONTRACTL_END;
733
734     _ASSERTE(m_dwWriterLock == 0);
735
736     // Signal to a debugger that this thread cannot stop now
737     IncCantStopCount();
738
739     IncCantAllocCount();
740
741     DWORD dwSwitchCount = 0;
742     while (TRUE)
743     {
744         // While this thread holds the writer lock, we must not try to suspend it
745         // or allow a profiler to walk its stack
746         Thread::IncForbidSuspendThread();
747
748         FastInterlockIncrement(&m_dwWriterLock);
749         if (m_dwReaderCount == 0)
750             break;
751         FastInterlockDecrement(&m_dwWriterLock);
752
753         // Before we loop and retry, it's safe to suspend or hijack and inspect
754         // this thread
755         Thread::DecForbidSuspendThread();
756
757         __SwitchToThread(0, ++dwSwitchCount);
758     }
759     EE_LOCK_TAKEN(GetPtrForLockContract());
760 }
761
762 ExecutionManager::WriterLockHolder::~WriterLockHolder()
763 {
764     LIMITED_METHOD_CONTRACT;
765     
766     FastInterlockDecrement(&m_dwWriterLock);
767
768     // Writer lock released, so it's safe again for this thread to be
769     // suspended or have its stack walked by a profiler
770     Thread::DecForbidSuspendThread();
771
772     DecCantAllocCount();
773
774     // Signal to a debugger that it's again safe to stop this thread
775     DecCantStopCount();
776
777     EE_LOCK_RELEASED(GetPtrForLockContract());
778 }
779
780 #else
781
782 // For DAC builds, we only care whether the writer lock is held.
783 // If it is, we will assume the locked data is in an inconsistent 
784 // state and throw. We never actually take the lock.
785 // Note: Throws
786 ExecutionManager::ReaderLockHolder::ReaderLockHolder(HostCallPreference hostCallPreference /*=AllowHostCalls*/)
787 {
788     SUPPORTS_DAC;
789
790     if (m_dwWriterLock != 0) 
791     {
792         ThrowHR(CORDBG_E_PROCESS_NOT_SYNCHRONIZED); 
793     }
794 }
795
796 ExecutionManager::ReaderLockHolder::~ReaderLockHolder()
797 {
798 }
799
800 #endif // DACCESS_COMPILE
801
802 /*-----------------------------------------------------------------------------
803  This is a listing of which methods uses which synchronization mechanism
804  in the ExecutionManager
805 //-----------------------------------------------------------------------------
806
807 ==============================================================================
808 ExecutionManger::ReaderLockHolder and ExecutionManger::WriterLockHolder
809 Protects the callers of ExecutionManager::GetRangeSection from heap deletions
810 while walking RangeSections.  You need to take a reader lock before reading the
811 values: m_CodeRangeList and hold it while walking the lists
812
813 Uses ReaderLockHolder (allows multiple reeaders with no writers)
814 -----------------------------------------
815 ExecutionManager::FindCodeRange
816 ExecutionManager::FindZapModule
817 ExecutionManager::EnumMemoryRegions
818
819 Uses WriterLockHolder (allows single writer and no readers)
820 -----------------------------------------
821 ExecutionManager::AddRangeHelper
822 ExecutionManager::DeleteRangeHelper
823
824 */
825
826 //-----------------------------------------------------------------------------
827
828 #if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
829 #define EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS
830 #endif
831
832 #if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS)
833 // The function fragments can be used in Hot/Cold splitting, expressing Large Functions or in 'ShrinkWrapping', which is 
834 // delaying saving and restoring some callee-saved registers later inside the body of the method.
835 // (It's assumed that JIT will not emit any ShrinkWrapping-style methods)
836 // For these cases multiple RUNTIME_FUNCTION entries (a.k.a function fragments) are used to define 
837 // all the regions of the function or funclet. And one of these function fragments cover the beginning of the function/funclet,
838 // including the prolog section and is referred as the 'Host Record'. 
839 // This function returns TRUE if the inspected RUNTIME_FUNCTION entry is NOT a host record
840
841 BOOL IsFunctionFragment(TADDR baseAddress, PTR_RUNTIME_FUNCTION pFunctionEntry)
842 {
843     LIMITED_METHOD_DAC_CONTRACT;
844
845     _ASSERTE((pFunctionEntry->UnwindData & 3) == 0);   // The unwind data must be an RVA; we don't support packed unwind format
846     DWORD unwindHeader = *(PTR_DWORD)(baseAddress + pFunctionEntry->UnwindData);
847     _ASSERTE((0 == ((unwindHeader >> 18) & 3)) || !"unknown unwind data format, version != 0");
848 #if defined(_TARGET_ARM_)
849
850     // On ARM, It's assumed that the prolog is always at the beginning of the function and cannot be split.
851     // Given that, there are 4 possible ways to fragment a function:
852     // 1. Prolog only: 
853     // 2. Prolog and some epilogs: 
854     // 3. Epilogs only: 
855     // 4. No Prolog or epilog
856     //
857     // Function fragments describing 1 & 2 are host records, 3 & 4 are not.
858     // for 3 & 4, the .xdata record's F bit is set to 1, marking clearly what is NOT a host record
859
860     _ASSERTE((pFunctionEntry->BeginAddress & THUMB_CODE) == THUMB_CODE);   // Sanity check: it's a thumb address
861     DWORD Fbit = (unwindHeader >> 22) & 0x1;    // F "fragment" bit
862     return (Fbit == 1);
863 #elif defined(_TARGET_ARM64_)
864
865     // ARM64 is a little bit more flexible, in the sense that it supports partial prologs. However only one of the 
866     // prolog regions are allowed to alter SP and that's the Host Record. Partial prologs are used in ShrinkWrapping
867     // scenarios which is not supported, hence we don't need to worry about them. discarding partial prologs
868     // simplifies identifying a host record a lot.
869     // 
870     // 1. Prolog only: The host record. Epilog Count and E bit are all 0.
871     // 2. Prolog and some epilogs: The host record with acompannying epilog-only records
872     // 3. Epilogs only: First unwind code is Phantom prolog (Starting with an end_c, indicating an empty prolog)
873     // 4. No prologs or epilogs: First unwind code is Phantom prolog  (Starting with an end_c, indicating an empty prolog)
874     //
875
876     int EpilogCount = (int)(unwindHeader >> 22) & 0x1F;
877     int CodeWords = unwindHeader >> 27;
878     PTR_DWORD pUnwindCodes = (PTR_DWORD)(baseAddress + pFunctionEntry->UnwindData);
879     // Skip header.
880     pUnwindCodes++;
881
882     // Skip extended header.
883     if ((CodeWords == 0) && (EpilogCount == 0))
884     {
885         EpilogCount = (*pUnwindCodes) & 0xFFFF;
886         pUnwindCodes++;
887     }
888
889     // Skip epilog scopes.
890     BOOL Ebit = (unwindHeader >> 21) & 0x1;
891     if (!Ebit && (EpilogCount != 0))
892     {
893         // EpilogCount is the number of exception scopes defined right after the unwindHeader
894         pUnwindCodes += EpilogCount;
895     }
896
897     return ((*pUnwindCodes & 0xFF) == 0xE5);
898 #else
899     PORTABILITY_ASSERT("IsFunctionFragnent - NYI on this platform");
900 #endif
901 }
902
903 #endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS
904
905
906 #ifndef DACCESS_COMPILE
907
908 //**********************************************************************************
909 //  IJitManager
910 //**********************************************************************************
911 IJitManager::IJitManager()
912 {
913     LIMITED_METHOD_CONTRACT;
914
915     m_runtimeSupport   = ExecutionManager::GetDefaultCodeManager();
916 }
917
918 #endif // #ifndef DACCESS_COMPILE
919
920 // When we unload an appdomain, we need to make sure that any threads that are crawling through
921 // our heap or rangelist are out. For cooperative-mode threads, we know that they will have
922 // been stopped when we suspend the EE so they won't be touching an element that is about to be deleted.
923 // However for pre-emptive mode threads, they could be stalled right on top of the element we want
924 // to delete, so we need to apply the reader lock to them and wait for them to drain.
925 ExecutionManager::ScanFlag ExecutionManager::GetScanFlags()
926 {
927     CONTRACTL {
928         NOTHROW;
929         GC_NOTRIGGER;
930         SO_TOLERANT;
931         HOST_NOCALLS;
932         SUPPORTS_DAC;
933     } CONTRACTL_END;
934
935 #if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
936     BEGIN_GETTHREAD_ALLOWED;
937
938     Thread *pThread = GetThread();
939
940     if (!pThread)
941         return ScanNoReaderLock;
942
943     // If this thread is hijacked by a profiler and crawling its own stack,
944     // we do need to take the lock
945     if (pThread->GetProfilerFilterContext() != NULL)
946         return ScanReaderLock;
947     
948     if (pThread->PreemptiveGCDisabled() || (pThread == ThreadSuspend::GetSuspensionThread()))
949         return ScanNoReaderLock;
950
951     END_GETTHREAD_ALLOWED;
952
953     return ScanReaderLock;
954 #else
955     return ScanNoReaderLock;
956 #endif
957 }
958
959 #ifdef DACCESS_COMPILE
960
961 void IJitManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
962 {
963     DAC_ENUM_VTHIS();
964     if (m_runtimeSupport.IsValid())
965     {
966         m_runtimeSupport->EnumMemoryRegions(flags);
967     }
968 }
969
970 #endif // #ifdef DACCESS_COMPILE
971
972 #if defined(WIN64EXCEPTIONS)
973
974 PTR_VOID GetUnwindDataBlob(TADDR moduleBase, PTR_RUNTIME_FUNCTION pRuntimeFunction, /* out */ SIZE_T * pSize)
975 {
976     LIMITED_METHOD_CONTRACT;
977
978 #if defined(_TARGET_AMD64_)
979     PTR_UNWIND_INFO pUnwindInfo(dac_cast<PTR_UNWIND_INFO>(moduleBase + RUNTIME_FUNCTION__GetUnwindInfoAddress(pRuntimeFunction)));
980
981     *pSize = ALIGN_UP(offsetof(UNWIND_INFO, UnwindCode) +
982         sizeof(UNWIND_CODE) * pUnwindInfo->CountOfUnwindCodes +
983         sizeof(ULONG) /* personality routine is always present */,
984             sizeof(DWORD));
985
986     return pUnwindInfo;
987
988 #elif defined(_TARGET_ARM_)
989
990     // if this function uses packed unwind data then at least one of the two least significant bits
991     // will be non-zero.  if this is the case then there will be no xdata record to enumerate.
992     _ASSERTE((pRuntimeFunction->UnwindData & 0x3) == 0);
993
994     // compute the size of the unwind info
995     PTR_ULONG xdata = dac_cast<PTR_ULONG>(pRuntimeFunction->UnwindData + moduleBase);
996
997     ULONG epilogScopes = 0;
998     ULONG unwindWords = 0;
999     ULONG size = 0;
1000
1001     if ((xdata[0] >> 23) != 0)
1002     {
1003         size = 4;
1004         epilogScopes = (xdata[0] >> 23) & 0x1f;
1005         unwindWords = (xdata[0] >> 28) & 0x0f;
1006     }
1007     else
1008     {
1009         size = 8;
1010         epilogScopes = xdata[1] & 0xffff;
1011         unwindWords = (xdata[1] >> 16) & 0xff;
1012     }
1013
1014     if (!(xdata[0] & (1 << 21)))
1015         size += 4 * epilogScopes;
1016
1017     size += 4 * unwindWords;
1018
1019     _ASSERTE(xdata[0] & (1 << 20)); // personality routine should be always present
1020     size += 4;
1021
1022     *pSize = size;
1023     return xdata;
1024
1025 #elif defined(_TARGET_ARM64_)
1026         // if this function uses packed unwind data then at least one of the two least significant bits
1027         // will be non-zero.  if this is the case then there will be no xdata record to enumerate.
1028         _ASSERTE((pRuntimeFunction->UnwindData & 0x3) == 0);
1029
1030     // compute the size of the unwind info
1031     PTR_ULONG xdata    = dac_cast<PTR_ULONG>(pRuntimeFunction->UnwindData + moduleBase);
1032     ULONG epilogScopes = 0;
1033     ULONG unwindWords  = 0;
1034     ULONG size = 0;
1035
1036     //If both Epilog Count and Code Word is not zero
1037     //Info of Epilog and Unwind scopes are given by 1 word header
1038     //Otherwise this info is given by a 2 word header
1039     if ((xdata[0] >> 27) != 0) 
1040     {
1041         size = 4;
1042         epilogScopes = (xdata[0] >> 22) & 0x1f;
1043         unwindWords = (xdata[0] >> 27) & 0x0f;
1044     }
1045     else 
1046     {
1047         size = 8;
1048         epilogScopes = xdata[1] & 0xffff;
1049         unwindWords = (xdata[1] >> 16) & 0xff;
1050     }
1051
1052     if (!(xdata[0] & (1 << 21))) 
1053         size += 4 * epilogScopes;
1054
1055     size += 4 * unwindWords;
1056
1057     _ASSERTE(xdata[0] & (1 << 20)); // personality routine should be always present
1058     size += 4;                      // exception handler RVA
1059
1060     *pSize = size;
1061     return xdata;
1062
1063
1064 #else
1065     PORTABILITY_ASSERT("GetUnwindDataBlob");
1066     return NULL;
1067 #endif
1068 }
1069
1070 // GetFuncletStartAddress returns the starting address of the function or funclet indicated by the EECodeInfo address.
1071 TADDR IJitManager::GetFuncletStartAddress(EECodeInfo * pCodeInfo)
1072 {
1073     PTR_RUNTIME_FUNCTION pFunctionEntry = pCodeInfo->GetFunctionEntry();
1074
1075 #ifdef _TARGET_AMD64_
1076     _ASSERTE((pFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0);
1077 #endif
1078
1079     TADDR baseAddress = pCodeInfo->GetModuleBase();
1080     TADDR funcletStartAddress = baseAddress + RUNTIME_FUNCTION__BeginAddress(pFunctionEntry);
1081
1082 #if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS)
1083     // Is the RUNTIME_FUNCTION a fragment? If so, we need to walk backwards until we find the first
1084     // non-fragment RUNTIME_FUNCTION, and use that one. This happens when we have very large functions
1085     // and multiple RUNTIME_FUNCTION entries per function or funclet. However, all but the first will
1086     // have the "F" bit set in the unwind data, indicating a fragment (with phantom prolog unwind codes).
1087
1088     for (;;)
1089     {
1090         if (!IsFunctionFragment(baseAddress, pFunctionEntry))
1091         {
1092             // This is not a fragment; we're done
1093             break;
1094         }
1095
1096         // We found a fragment. Walk backwards in the RUNTIME_FUNCTION array until we find a non-fragment.
1097         // We're guaranteed to find one, because we require that a fragment live in a function or funclet
1098         // that has a prolog, which will have non-fragment .xdata.
1099         --pFunctionEntry;
1100
1101         funcletStartAddress = baseAddress + RUNTIME_FUNCTION__BeginAddress(pFunctionEntry);
1102     }
1103 #endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS
1104
1105     return funcletStartAddress;
1106 }
1107
1108 BOOL IJitManager::IsFunclet(EECodeInfo * pCodeInfo)
1109 {
1110     CONTRACTL {
1111         NOTHROW;
1112         GC_NOTRIGGER;
1113         MODE_ANY;
1114     }
1115     CONTRACTL_END;
1116
1117     TADDR funcletStartAddress = GetFuncletStartAddress(pCodeInfo);
1118     TADDR methodStartAddress = pCodeInfo->GetStartAddress();
1119
1120     return (funcletStartAddress != methodStartAddress);
1121 }
1122
1123 BOOL IJitManager::IsFilterFunclet(EECodeInfo * pCodeInfo)
1124 {
1125     CONTRACTL {
1126         NOTHROW;
1127         GC_NOTRIGGER;
1128         MODE_ANY;
1129     }
1130     CONTRACTL_END;
1131
1132     if (!pCodeInfo->IsFunclet())
1133         return FALSE;
1134
1135     TADDR funcletStartAddress = GetFuncletStartAddress(pCodeInfo);
1136
1137     // This assumes no hot/cold splitting for funclets
1138
1139     _ASSERTE(FitsInU4(pCodeInfo->GetCodeAddress() - funcletStartAddress));
1140     DWORD relOffsetWithinFunclet = static_cast<DWORD>(pCodeInfo->GetCodeAddress() - funcletStartAddress);
1141
1142     _ASSERTE(pCodeInfo->GetRelOffset() >= relOffsetWithinFunclet);
1143     DWORD funcletStartOffset = pCodeInfo->GetRelOffset() - relOffsetWithinFunclet;
1144
1145     EH_CLAUSE_ENUMERATOR pEnumState;
1146     unsigned EHCount = InitializeEHEnumeration(pCodeInfo->GetMethodToken(), &pEnumState);
1147     _ASSERTE(EHCount > 0);
1148
1149     EE_ILEXCEPTION_CLAUSE EHClause;
1150     for (ULONG i = 0; i < EHCount; i++)
1151     {
1152          GetNextEHClause(&pEnumState, &EHClause);
1153
1154         // Duplicate clauses are always listed at the end, so when we hit a duplicate clause,
1155         // we have already visited all of the normal clauses.
1156         if (IsDuplicateClause(&EHClause))
1157         {
1158             break;
1159         }
1160
1161         if (IsFilterHandler(&EHClause))
1162         {
1163             if (EHClause.FilterOffset == funcletStartOffset)
1164             {
1165                 return true;
1166             }
1167         }
1168     }
1169
1170     return false;
1171 }
1172
1173 #else // WIN64EXCEPTIONS
1174
1175 PTR_VOID GetUnwindDataBlob(TADDR moduleBase, PTR_RUNTIME_FUNCTION pRuntimeFunction, /* out */ SIZE_T * pSize)
1176 {
1177     *pSize = 0;
1178     return dac_cast<PTR_VOID>(pRuntimeFunction->UnwindData + moduleBase);
1179 }
1180
1181 #endif // WIN64EXCEPTIONS
1182
1183
1184 #ifndef CROSSGEN_COMPILE
1185
1186 #ifndef DACCESS_COMPILE
1187
1188 //**********************************************************************************
1189 //  EEJitManager
1190 //**********************************************************************************
1191
1192 EEJitManager::EEJitManager()
1193     :
1194     // CRST_DEBUGGER_THREAD - We take this lock on debugger thread during EnC add method, among other things
1195     // CRST_TAKEN_DURING_SHUTDOWN - We take this lock during shutdown if ETW is on (to do rundown)
1196     m_CodeHeapCritSec( CrstSingleUseLock,
1197                         CrstFlags(CRST_UNSAFE_ANYMODE|CRST_DEBUGGER_THREAD|CRST_TAKEN_DURING_SHUTDOWN)),
1198     m_CPUCompileFlags(),
1199     m_EHClauseCritSec( CrstSingleUseLock )
1200 {
1201     CONTRACTL {
1202         THROWS;
1203         GC_NOTRIGGER;
1204     } CONTRACTL_END;
1205
1206     m_pCodeHeap = NULL;
1207     m_jit = NULL;
1208     m_JITCompiler      = NULL;
1209 #ifdef _TARGET_AMD64_
1210     m_pEmergencyJumpStubReserveList = NULL;
1211 #endif
1212 #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
1213     m_JITCompilerOther = NULL;
1214 #endif
1215     m_fLegacyJitUsed   = FALSE;
1216
1217 #ifdef ALLOW_SXS_JIT
1218     m_alternateJit     = NULL;
1219     m_AltJITCompiler   = NULL;
1220     m_AltJITRequired   = false;
1221 #endif
1222
1223     m_cleanupList = NULL;
1224 }
1225
1226 #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
1227
1228 bool DoesOSSupportAVX()
1229 {
1230     LIMITED_METHOD_CONTRACT;
1231
1232 #ifndef FEATURE_PAL
1233     // On Windows we have an api(GetEnabledXStateFeatures) to check if AVX is supported
1234     typedef DWORD64 (WINAPI *PGETENABLEDXSTATEFEATURES)();
1235     PGETENABLEDXSTATEFEATURES pfnGetEnabledXStateFeatures = NULL;
1236     
1237     HMODULE hMod = WszLoadLibraryEx(WINDOWS_KERNEL32_DLLNAME_W, NULL, LOAD_LIBRARY_SEARCH_SYSTEM32);
1238     if(hMod == NULL)
1239         return FALSE;
1240         
1241     pfnGetEnabledXStateFeatures = (PGETENABLEDXSTATEFEATURES)GetProcAddress(hMod, "GetEnabledXStateFeatures");
1242         
1243     if (pfnGetEnabledXStateFeatures == NULL)
1244     {
1245         return FALSE;
1246     }
1247     
1248     DWORD64 FeatureMask = pfnGetEnabledXStateFeatures();
1249     if ((FeatureMask & XSTATE_MASK_AVX) == 0)
1250     {
1251         return FALSE;
1252     }
1253 #endif // !FEATURE_PAL
1254
1255     return TRUE;
1256 }
1257
1258 #endif // defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
1259
1260 void EEJitManager::SetCpuInfo()
1261 {
1262     LIMITED_METHOD_CONTRACT;
1263
1264     //
1265     // NOTE: This function needs to be kept in sync with Zapper::CompileAssembly()
1266     //
1267
1268     CORJIT_FLAGS CPUCompileFlags;
1269
1270 #if defined(_TARGET_X86_)
1271     // NOTE: if you're adding any flags here, you probably should also be doing it
1272     // for ngen (zapper.cpp)
1273     CORINFO_CPU cpuInfo;
1274     GetSpecificCpuInfo(&cpuInfo);
1275
1276     switch (CPU_X86_FAMILY(cpuInfo.dwCPUType))
1277     {
1278     case CPU_X86_PENTIUM_4:
1279         CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_TARGET_P4);
1280         break;
1281     default:
1282         break;
1283     }
1284
1285     if (CPU_X86_USE_CMOV(cpuInfo.dwFeatures))
1286     {
1287         CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_CMOV);
1288         CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_FCOMI);
1289     }
1290
1291     if (CPU_X86_USE_SSE2(cpuInfo.dwFeatures))
1292     {
1293         CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE2);
1294     }
1295 #endif // _TARGET_X86_
1296
1297 #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
1298     unsigned char buffer[16];
1299     DWORD maxCpuId = getcpuid(0, buffer);
1300     if (maxCpuId >= 0)
1301     {
1302         // getcpuid executes cpuid with eax set to its first argument, and ecx cleared.
1303         // It returns the resulting eax in buffer[0-3], ebx in buffer[4-7], ecx in buffer[8-11],
1304         // and edx in buffer[12-15].
1305         // We will set the following flags:
1306         // CORJIT_FLAG_USE_SSE3_4 if the following feature bits are set (input EAX of 1)
1307         //    SSE3 - ECX bit 0     (buffer[8]  & 0x01)
1308         //    SSSE3 - ECX bit 9    (buffer[9]  & 0x02)
1309         //    SSE4.1 - ECX bit 19  (buffer[10] & 0x08)
1310         //    SSE4.2 - ECX bit 20  (buffer[10] & 0x10)
1311         // CORJIT_FLAG_USE_AVX if the following feature bits are set (input EAX of 1), and xmmYmmStateSupport returns 1:
1312         //    OSXSAVE - ECX bit 27 (buffer[11] & 0x08)
1313         //    AVX - ECX bit 28     (buffer[11] & 0x10)
1314         // CORJIT_FLAG_USE_AVX2 if the following feature bit is set (input EAX of 0x07 and input ECX of 0):
1315         //    AVX2 - EBX bit 5     (buffer[4]  & 0x20)
1316         // CORJIT_FLAG_USE_AVX_512 is not currently set, but defined so that it can be used in future without
1317         // synchronously updating VM and JIT.
1318         (void) getcpuid(1, buffer);
1319         // If SSE2 is not enabled, there is no point in checking the rest.
1320         // SSE2 is bit 26 of EDX   (buffer[15] & 0x04)
1321         // TODO: Determine whether we should break out the various SSE options further.
1322         if ((buffer[15] & 0x04) != 0)               // SSE2
1323         {
1324             if (((buffer[8]  & 0x01) != 0) &&       // SSE3
1325                 ((buffer[9]  & 0x02) != 0) &&       // SSSE3
1326                 ((buffer[10] & 0x08) != 0) &&       // SSE4.1
1327                 ((buffer[10] & 0x10) != 0))         // SSE4.2
1328             {
1329                 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE3_4);
1330             }
1331             if ((buffer[11] & 0x18) == 0x18)
1332             {
1333                 if(DoesOSSupportAVX())
1334                 {
1335                     if (xmmYmmStateSupport() == 1)
1336                     {
1337                         CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_AVX);
1338                         if (maxCpuId >= 0x07)
1339                         {
1340                             (void) getextcpuid(0, 0x07, buffer);
1341                             if ((buffer[4]  & 0x20) != 0)
1342                             {
1343                                 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_AVX2);
1344                             }
1345                         }
1346                     }
1347                 }
1348             }
1349             static ConfigDWORD fFeatureSIMD;
1350             if (fFeatureSIMD.val(CLRConfig::EXTERNAL_FeatureSIMD) != 0)
1351             {
1352                 CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_FEATURE_SIMD);
1353             }
1354         }
1355     }
1356 #endif // defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
1357
1358     m_CPUCompileFlags = CPUCompileFlags;
1359 }
1360
1361 // Define some data that we can use to get a better idea of what happened when we get a Watson dump that indicates the JIT failed to load.
1362 // This will be used and updated by the JIT loading and initialization functions, and the data written will get written into a Watson dump.
1363
1364 enum JIT_LOAD_JIT_ID
1365 {
1366     JIT_LOAD_MAIN = 500,    // The "main" JIT. Normally, this is named "clrjit.dll". Start at a number that is somewhat uncommon (i.e., not zero or 1) to help distinguish from garbage, in process dumps.
1367     JIT_LOAD_LEGACY,        // The "legacy" JIT. Normally, this is named "compatjit.dll". This applies to AMD64 on Windows desktop, or x86 on Windows .NET Core.
1368     JIT_LOAD_ALTJIT         // An "altjit". By default, named "protojit.dll". Used both internally, as well as externally for JIT CTP builds.
1369 };
1370
1371 enum JIT_LOAD_STATUS
1372 {
1373     JIT_LOAD_STATUS_STARTING = 1001,                   // The JIT load process is starting. Start at a number that is somewhat uncommon (i.e., not zero or 1) to help distinguish from garbage, in process dumps.
1374     JIT_LOAD_STATUS_DONE_LOAD,                         // LoadLibrary of the JIT dll succeeded.
1375     JIT_LOAD_STATUS_DONE_GET_SXSJITSTARTUP,            // GetProcAddress for "sxsJitStartup" succeeded.
1376     JIT_LOAD_STATUS_DONE_CALL_SXSJITSTARTUP,           // Calling sxsJitStartup() succeeded.
1377     JIT_LOAD_STATUS_DONE_GET_JITSTARTUP,               // GetProcAddress for "jitStartup" succeeded.
1378     JIT_LOAD_STATUS_DONE_CALL_JITSTARTUP,              // Calling jitStartup() succeeded.
1379     JIT_LOAD_STATUS_DONE_GET_GETJIT,                   // GetProcAddress for "getJit" succeeded.
1380     JIT_LOAD_STATUS_DONE_CALL_GETJIT,                  // Calling getJit() succeeded.
1381     JIT_LOAD_STATUS_DONE_CALL_GETVERSIONIDENTIFIER,    // Calling ICorJitCompiler::getVersionIdentifier() succeeded.
1382     JIT_LOAD_STATUS_DONE_VERSION_CHECK,                // The JIT-EE version identifier check succeeded.
1383     JIT_LOAD_STATUS_DONE,                              // The JIT load is complete, and successful.
1384 };
1385
1386 struct JIT_LOAD_DATA
1387 {
1388     JIT_LOAD_JIT_ID     jld_id;         // Which JIT are we currently loading?
1389     JIT_LOAD_STATUS     jld_status;     // The current load status of a JIT load attempt.
1390     HRESULT             jld_hr;         // If the JIT load fails, the last jld_status will be JIT_LOAD_STATUS_STARTING.
1391                                         //   In that case, this will contain the HRESULT returned by LoadLibrary.
1392                                         //   Otherwise, this will be S_OK (which is zero).
1393 };
1394
1395 // Here's the global data for JIT load and initialization state.
1396 JIT_LOAD_DATA g_JitLoadData;
1397
1398 #if !defined(FEATURE_MERGE_JIT_AND_ENGINE)
1399
1400 // Global that holds the path to custom JIT location
1401 extern "C" LPCWSTR g_CLRJITPath = nullptr;
1402
1403 #endif // defined(FEATURE_CORECLR) && !defined(FEATURE_MERGE_JIT_AND_ENGINE)
1404
1405
1406 // LoadAndInitializeJIT: load the JIT dll into the process, and initialize it (call the UtilCode initialization function,
1407 // check the JIT-EE interface GUID, etc.)
1408 //
1409 // Parameters:
1410 //
1411 // pwzJitName        - The filename of the JIT .dll file to load. E.g., "altjit.dll".
1412 // phJit             - On return, *phJit is the Windows module handle of the loaded JIT dll. It will be NULL if the load failed.
1413 // ppICorJitCompiler - On return, *ppICorJitCompiler is the ICorJitCompiler* returned by the JIT's getJit() entrypoint.
1414 //                     It is NULL if the JIT returns a NULL interface pointer, or if the JIT-EE interface GUID is mismatched.
1415 //                     Note that if the given JIT is loaded, but the interface is mismatched, then *phJit will be legal and non-NULL
1416 //                     even though *ppICorJitCompiler is NULL. This allows the caller to unload the JIT dll, if necessary
1417 //                     (nobody does this today).
1418 // pJitLoadData      - Pointer to a structure that we update as we load and initialize the JIT to indicate how far we've gotten. This
1419 //                     is used to help understand problems we see with JIT loading that come in via Watson dumps. Since we don't throw
1420 //                     an exception immediately upon failure, we can lose information about what the failure was if we don't store this
1421 //                     information in a way that persists into a process dump.
1422 // 
1423
1424 static void LoadAndInitializeJIT(LPCWSTR pwzJitName, OUT HINSTANCE* phJit, OUT ICorJitCompiler** ppICorJitCompiler, IN OUT JIT_LOAD_DATA* pJitLoadData)
1425 {
1426     STANDARD_VM_CONTRACT;
1427
1428     _ASSERTE(phJit != NULL);
1429     _ASSERTE(ppICorJitCompiler != NULL);
1430     _ASSERTE(pJitLoadData != NULL);
1431
1432     pJitLoadData->jld_status = JIT_LOAD_STATUS_STARTING;
1433     pJitLoadData->jld_hr     = S_OK;
1434
1435     *phJit = NULL;
1436     *ppICorJitCompiler = NULL;
1437
1438     HRESULT hr = E_FAIL;
1439
1440     PathString CoreClrFolderHolder;
1441     extern HINSTANCE g_hThisInst;
1442     bool havePath = false;
1443
1444 #if !defined(FEATURE_MERGE_JIT_AND_ENGINE)
1445     if (g_CLRJITPath != nullptr)
1446     {
1447         // If we have been asked to load a specific JIT binary, load from that path.
1448         // The main JIT load will use exactly that name because pwzJitName will have
1449         // been computed as the last component of g_CLRJITPath by ExecutionManager::GetJitName().
1450         // Non-primary JIT names (such as compatjit or altjit) will be loaded from the
1451         // same directory.
1452         // (Ideally, g_CLRJITPath would just be the JIT path without the filename component,
1453         // but that's not how the JIT_PATH variable was originally defined.)
1454         CoreClrFolderHolder.Set(g_CLRJITPath);
1455         havePath = true;
1456     }
1457     else 
1458 #endif // !defined(FEATURE_MERGE_JIT_AND_ENGINE)
1459     if (WszGetModuleFileName(g_hThisInst, CoreClrFolderHolder))
1460     {
1461         // Load JIT from next to CoreCLR binary
1462         havePath = true;
1463     }
1464
1465     if (havePath && !CoreClrFolderHolder.IsEmpty())
1466     {
1467         SString::Iterator iter = CoreClrFolderHolder.End();
1468         BOOL findSep = CoreClrFolderHolder.FindBack(iter, DIRECTORY_SEPARATOR_CHAR_W);
1469         if (findSep)
1470         {
1471             SString sJitName(pwzJitName);
1472             CoreClrFolderHolder.Replace(iter + 1, CoreClrFolderHolder.End() - (iter + 1), sJitName);
1473
1474             *phJit = CLRLoadLibrary(CoreClrFolderHolder.GetUnicode());
1475             if (*phJit != NULL)
1476             {
1477                 hr = S_OK;
1478             }
1479         }
1480     }
1481
1482
1483     if (SUCCEEDED(hr))
1484     {
1485         pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_LOAD;
1486
1487         EX_TRY
1488         {
1489             bool fContinueToLoadJIT = false;
1490             // For CoreCLR, we never use "sxsJitStartup" as that is Desktop utilcode initialization
1491             // specific. Thus, assume we always got 
1492             fContinueToLoadJIT = true;
1493
1494             if (fContinueToLoadJIT)
1495             {
1496                 typedef void (__stdcall* pjitStartup)(ICorJitHost*);
1497                 pjitStartup jitStartupFn = (pjitStartup) GetProcAddress(*phJit, "jitStartup");
1498
1499                 if (jitStartupFn)
1500                 {
1501                     pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_GET_JITSTARTUP;
1502
1503                     (*jitStartupFn)(JitHost::getJitHost());
1504
1505                     pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_CALL_JITSTARTUP;
1506                 }
1507
1508                 typedef ICorJitCompiler* (__stdcall* pGetJitFn)();
1509                 pGetJitFn getJitFn = (pGetJitFn) GetProcAddress(*phJit, "getJit");
1510
1511                 if (getJitFn)
1512                 {
1513                     pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_GET_GETJIT;
1514
1515                     ICorJitCompiler* pICorJitCompiler = (*getJitFn)();
1516                     if (pICorJitCompiler != NULL)
1517                     {
1518                         pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_CALL_GETJIT;
1519
1520                         GUID versionId;
1521                         memset(&versionId, 0, sizeof(GUID));
1522                         pICorJitCompiler->getVersionIdentifier(&versionId);
1523
1524                         pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_CALL_GETVERSIONIDENTIFIER;
1525
1526                         if (memcmp(&versionId, &JITEEVersionIdentifier, sizeof(GUID)) == 0)
1527                         {
1528                             pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE_VERSION_CHECK;
1529
1530                             // The JIT has loaded and passed the version identifier test, so publish the JIT interface to the caller.
1531                             *ppICorJitCompiler = pICorJitCompiler;
1532
1533                             // The JIT is completely loaded and initialized now.
1534                             pJitLoadData->jld_status = JIT_LOAD_STATUS_DONE;
1535                         }
1536                         else
1537                         {
1538                             // Mismatched version ID. Fail the load.
1539                             LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: mismatched JIT version identifier in %S\n", pwzJitName));
1540                         }
1541                     }
1542                     else
1543                     {
1544                         LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: failed to get ICorJitCompiler in %S\n", pwzJitName));
1545                     }
1546                 }
1547                 else
1548                 {
1549                     LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: failed to find 'getJit' entrypoint in %S\n", pwzJitName));
1550                 }
1551             }
1552             else
1553             {
1554                 LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: failed to find 'sxsJitStartup' entrypoint in %S\n", pwzJitName));
1555             }
1556         }
1557         EX_CATCH
1558         {
1559             LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: caught an exception trying to initialize %S\n", pwzJitName));
1560         }
1561         EX_END_CATCH(SwallowAllExceptions)
1562     }
1563     else
1564     {
1565         pJitLoadData->jld_hr = hr;
1566         LOG((LF_JIT, LL_FATALERROR, "LoadAndInitializeJIT: failed to load %S, hr=0x%08x\n", pwzJitName, hr));
1567     }
1568 }
1569
1570 #ifdef FEATURE_MERGE_JIT_AND_ENGINE
1571 EXTERN_C void __stdcall jitStartup(ICorJitHost* host);
1572 EXTERN_C ICorJitCompiler* __stdcall getJit();
1573 #endif // FEATURE_MERGE_JIT_AND_ENGINE
1574
1575 // Set this to the result of LoadJIT as a courtesy to code:CorCompileGetRuntimeDll
1576 extern HMODULE s_ngenCompilerDll;
1577
1578 BOOL EEJitManager::LoadJIT()
1579 {
1580     STANDARD_VM_CONTRACT;
1581
1582     // If the JIT is already loaded, don't take the lock.
1583     if (IsJitLoaded())
1584         return TRUE;
1585
1586     // Abuse m_EHClauseCritSec to ensure that the JIT is loaded on one thread only 
1587     CrstHolder chRead(&m_EHClauseCritSec);
1588
1589     // Did someone load the JIT before we got the lock?
1590     if (IsJitLoaded())
1591         return TRUE;
1592
1593     SetCpuInfo();
1594
1595     ICorJitCompiler* newJitCompiler = NULL;
1596
1597 #ifdef FEATURE_MERGE_JIT_AND_ENGINE
1598
1599     EX_TRY
1600     {
1601         jitStartup(JitHost::getJitHost());
1602
1603         newJitCompiler = getJit();
1604
1605         // We don't need to call getVersionIdentifier(), since the JIT is linked together with the VM.
1606     }
1607     EX_CATCH
1608     {
1609     }
1610     EX_END_CATCH(SwallowAllExceptions)
1611
1612 #else // !FEATURE_MERGE_JIT_AND_ENGINE
1613
1614     m_JITCompiler = NULL;
1615 #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
1616     m_JITCompilerOther = NULL;
1617 #endif
1618
1619     g_JitLoadData.jld_id = JIT_LOAD_MAIN;
1620     LoadAndInitializeJIT(ExecutionManager::GetJitName(), &m_JITCompiler, &newJitCompiler, &g_JitLoadData);
1621
1622     // Set as a courtesy to code:CorCompileGetRuntimeDll
1623     s_ngenCompilerDll = m_JITCompiler;
1624
1625 #if defined(_TARGET_X86_)
1626     // If COMPlus_UseLegacyJit=1, then we fall back to compatjit.dll.
1627     //
1628     // This fallback mechanism was introduced for Visual Studio "14" Preview, when JIT64 (the legacy JIT) was replaced with
1629     // RyuJIT. It was desired to provide a fallback mechanism in case comptibility problems (or other bugs)
1630     // were discovered by customers. Setting this COMPLUS variable to 1 does not affect NGEN: existing NGEN images continue
1631     // to be used, and all subsequent NGEN compilations continue to use the new JIT.
1632     //
1633     // If this is a compilation process, then we don't allow specifying a fallback JIT. This is a case where, when NGEN'ing,
1634     // we sometimes need to JIT some things (such as when we are NGEN'ing mscorlib). In that case, we want to use exactly
1635     // the same JIT as NGEN uses. And NGEN doesn't follow the COMPlus_UseLegacyJit=1 switch -- it always uses clrjit.dll.
1636     //
1637     // Note that we always load and initialize the default JIT. This is to handle cases where obfuscators rely on
1638     // LoadLibrary("clrjit.dll") returning the module handle of the JIT, and then they call GetProcAddress("getJit") to get
1639     // the EE-JIT interface. They also do this without also calling sxsJitStartup()!
1640     //
1641     // In addition, for reasons related to servicing, we only use RyuJIT when the registry value UseRyuJIT (type DWORD), under
1642     // key HKLM\SOFTWARE\Microsoft\.NETFramework, is set to 1. Otherwise, we fall back to JIT64. Note that if this value
1643     // is set, we also must use JIT64 for all NGEN compilations as well.
1644     //
1645     // See the document "RyuJIT Compatibility Fallback Specification.docx" for details.
1646     //
1647     // For .NET Core 1.2, RyuJIT for x86 is the primary jit (clrjit.dll) and JIT32 for x86 is the fallback, legacy JIT (compatjit.dll).
1648     // Thus, the COMPlus_useLegacyJit=1 mechanism has been enabled for x86 CoreCLR. This scenario does not have the UseRyuJIT
1649     // registry key, nor the AppX binder mode.
1650
1651     bool fUseRyuJit = true;
1652
1653     if ((!IsCompilationProcess() || !fUseRyuJit) &&     // Use RyuJIT for all NGEN, unless we're falling back to JIT64 for everything.
1654         (newJitCompiler != nullptr))    // the main JIT must successfully load before we try loading the fallback JIT
1655     {
1656         BOOL fUsingCompatJit = FALSE;
1657
1658         if (!fUseRyuJit)
1659         {
1660             fUsingCompatJit = TRUE;
1661         }
1662
1663         if (!fUsingCompatJit)
1664         {
1665             DWORD useLegacyJit = Configuration::GetKnobBooleanValue(W("System.JIT.UseWindowsX86CoreLegacyJit"), CLRConfig::EXTERNAL_UseWindowsX86CoreLegacyJit);
1666             if (useLegacyJit == 1)
1667             {
1668                 fUsingCompatJit = TRUE;
1669             }
1670         }
1671
1672 #if defined(FEATURE_APPX_BINDER)
1673         if (!fUsingCompatJit)
1674         {
1675             // AppX applications don't have a .config file for per-app configuration. So, we allow the placement of a single
1676             // distinguished file, "UseLegacyJit.txt" in the root of the app's package to indicate that the app should fall
1677             // back to JIT64. This same file is also used to prevent this app from participating in AutoNgen.
1678             if (AppX::IsAppXProcess())
1679             {
1680                 WCHAR szPathName[MAX_LONGPATH];
1681                 UINT32 cchPathName = MAX_LONGPATH;
1682                 if (AppX::FindFileInCurrentPackage(L"UseLegacyJit.txt", &cchPathName, szPathName, PACKAGE_FILTER_HEAD) == S_OK)
1683                 {
1684                     fUsingCompatJit = TRUE;
1685                 }
1686             }
1687         }
1688 #endif // FEATURE_APPX_BINDER
1689
1690         if (fUsingCompatJit)
1691         {
1692             // Now, load the compat jit and initialize it.
1693
1694             LPCWSTR pwzJitName = MAKEDLLNAME_W(W("compatjit"));
1695
1696             // Note: if the compatjit fails to load, we ignore it, and continue to use the main JIT for
1697             // everything. You can imagine a policy where if the user requests the compatjit, and we fail
1698             // to load it, that we fail noisily. We don't do that currently.
1699             ICorJitCompiler* fallbackICorJitCompiler;
1700             g_JitLoadData.jld_id = JIT_LOAD_LEGACY;
1701             LoadAndInitializeJIT(pwzJitName, &m_JITCompilerOther, &fallbackICorJitCompiler, &g_JitLoadData);
1702             if (fallbackICorJitCompiler != nullptr)
1703             {
1704                 // Tell the main JIT to fall back to the "fallback" JIT compiler, in case some
1705                 // obfuscator tries to directly call the main JIT's getJit() function.
1706                 newJitCompiler->setRealJit(fallbackICorJitCompiler);
1707
1708                 // Now, the compat JIT will be used.
1709                 m_fLegacyJitUsed = TRUE;
1710             }
1711         }
1712     }
1713 #endif // (defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE) && !defined(FEATURE_CORECLR)) || (defined(_TARGET_X86_) && defined(FEATURE_CORECLR))
1714
1715 #endif // !FEATURE_MERGE_JIT_AND_ENGINE
1716
1717 #ifdef ALLOW_SXS_JIT
1718
1719     // Do not load altjit.dll unless COMPlus_AltJit is set.
1720     // Even if the main JIT fails to load, if the user asks for an altjit we try to load it.
1721     // This allows us to display load error messages for loading altjit.
1722
1723     ICorJitCompiler* newAltJitCompiler = NULL;
1724
1725     LPWSTR altJitConfig;
1726     IfFailThrow(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_AltJit, &altJitConfig));
1727
1728     m_AltJITCompiler = NULL;
1729
1730     if (altJitConfig != NULL)
1731     {
1732         // Load the altjit into the system.
1733         // Note: altJitName must be declared as a const otherwise assigning the string
1734         // constructed by MAKEDLLNAME_W() to altJitName will cause a build break on Unix.
1735         LPCWSTR altJitName;
1736         IfFailThrow(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_AltJitName, (LPWSTR*)&altJitName));
1737
1738         if (altJitName == NULL)
1739         {
1740             altJitName = MAKEDLLNAME_W(W("protojit"));
1741         }
1742
1743         g_JitLoadData.jld_id = JIT_LOAD_ALTJIT;
1744         LoadAndInitializeJIT(altJitName, &m_AltJITCompiler, &newAltJitCompiler, &g_JitLoadData);
1745     }
1746
1747 #endif // ALLOW_SXS_JIT 
1748
1749     // Publish the compilers.
1750
1751 #ifdef ALLOW_SXS_JIT
1752     m_AltJITRequired = (altJitConfig != NULL);
1753     m_alternateJit = newAltJitCompiler;
1754 #endif // ALLOW_SXS_JIT
1755
1756     m_jit = newJitCompiler;
1757
1758     // Failing to load the main JIT is a failure.
1759     // If the user requested an altjit and we failed to load an altjit, that is also a failure.
1760     // In either failure case, we'll rip down the VM (so no need to clean up (unload) either JIT that did load successfully.
1761     return IsJitLoaded();
1762 }
1763
1764 #ifndef CROSSGEN_COMPILE
1765 //**************************************************************************
1766
1767 CodeFragmentHeap::CodeFragmentHeap(LoaderAllocator * pAllocator, StubCodeBlockKind kind)
1768     : m_pAllocator(pAllocator), m_pFreeBlocks(NULL), m_kind(kind),
1769     // CRST_DEBUGGER_THREAD - We take this lock on debugger thread during EnC add meth
1770     m_CritSec(CrstCodeFragmentHeap, CrstFlags(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD))
1771 {
1772     WRAPPER_NO_CONTRACT;
1773 }
1774
1775 void CodeFragmentHeap::AddBlock(VOID * pMem, size_t dwSize)
1776 {
1777     LIMITED_METHOD_CONTRACT;
1778     FreeBlock * pBlock = (FreeBlock *)pMem;
1779     pBlock->m_pNext = m_pFreeBlocks;
1780     pBlock->m_dwSize = dwSize;
1781     m_pFreeBlocks = pBlock;
1782 }
1783
1784 void CodeFragmentHeap::RemoveBlock(FreeBlock ** ppBlock)
1785 {
1786     LIMITED_METHOD_CONTRACT;
1787     FreeBlock * pBlock = *ppBlock;
1788     *ppBlock = pBlock->m_pNext;
1789     ZeroMemory(pBlock, sizeof(FreeBlock));
1790 }
1791
1792 TaggedMemAllocPtr CodeFragmentHeap::RealAllocAlignedMem(size_t  dwRequestedSize
1793                     ,unsigned  dwAlignment
1794 #ifdef _DEBUG
1795                     ,__in __in_z const char *szFile
1796                     ,int  lineNum
1797 #endif
1798                     )
1799 {
1800     CrstHolder ch(&m_CritSec);
1801
1802     dwRequestedSize = ALIGN_UP(dwRequestedSize, sizeof(TADDR));
1803
1804     if (dwRequestedSize < sizeof(FreeBlock))
1805         dwRequestedSize = sizeof(FreeBlock);
1806
1807     // We will try to batch up allocation of small blocks into one large allocation
1808 #define SMALL_BLOCK_THRESHOLD 0x100
1809     SIZE_T nFreeSmallBlocks = 0;
1810
1811     FreeBlock ** ppBestFit = NULL;
1812     FreeBlock ** ppFreeBlock = &m_pFreeBlocks;
1813     while (*ppFreeBlock != NULL)
1814     {
1815         FreeBlock * pFreeBlock = *ppFreeBlock;
1816         if (((BYTE *)pFreeBlock + pFreeBlock->m_dwSize) - (BYTE *)ALIGN_UP(pFreeBlock, dwAlignment) >= (SSIZE_T)dwRequestedSize)
1817         {
1818             if (ppBestFit == NULL || pFreeBlock->m_dwSize < (*ppBestFit)->m_dwSize)
1819                 ppBestFit = ppFreeBlock;
1820         }
1821         else
1822         {
1823             if (pFreeBlock->m_dwSize < SMALL_BLOCK_THRESHOLD)
1824                 nFreeSmallBlocks++;
1825         }
1826         ppFreeBlock = &(*ppFreeBlock)->m_pNext;
1827     }
1828
1829     VOID * pMem;
1830     SIZE_T dwSize;
1831     if (ppBestFit != NULL)
1832     {
1833         pMem = *ppBestFit;
1834         dwSize = (*ppBestFit)->m_dwSize;
1835
1836         RemoveBlock(ppBestFit);
1837     }
1838     else
1839     {
1840         dwSize = dwRequestedSize;
1841         if (dwSize < SMALL_BLOCK_THRESHOLD)
1842             dwSize = 4 * SMALL_BLOCK_THRESHOLD;
1843         pMem = ExecutionManager::GetEEJitManager()->allocCodeFragmentBlock(dwSize, dwAlignment, m_pAllocator, m_kind);
1844     }
1845
1846     SIZE_T dwExtra = (BYTE *)ALIGN_UP(pMem, dwAlignment) - (BYTE *)pMem;
1847     _ASSERTE(dwSize >= dwExtra + dwRequestedSize);
1848     SIZE_T dwRemaining = dwSize - (dwExtra + dwRequestedSize);
1849
1850     // Avoid accumulation of too many small blocks. The more small free blocks we have, the more picky we are going to be about adding new ones.
1851     if ((dwRemaining >= max(sizeof(FreeBlock), sizeof(StubPrecode)) + (SMALL_BLOCK_THRESHOLD / 0x10) * nFreeSmallBlocks) || (dwRemaining >= SMALL_BLOCK_THRESHOLD))
1852     {
1853         AddBlock((BYTE *)pMem + dwExtra + dwRequestedSize, dwRemaining);
1854         dwSize -= dwRemaining;
1855     }
1856
1857     TaggedMemAllocPtr tmap;
1858     tmap.m_pMem             = pMem;
1859     tmap.m_dwRequestedSize  = dwSize;
1860     tmap.m_pHeap            = this;
1861     tmap.m_dwExtra          = dwExtra;
1862 #ifdef _DEBUG
1863     tmap.m_szFile           = szFile;
1864     tmap.m_lineNum          = lineNum;
1865 #endif
1866     return tmap;
1867 }
1868
1869 void CodeFragmentHeap::RealBackoutMem(void *pMem
1870                     , size_t dwSize
1871 #ifdef _DEBUG
1872                     , __in __in_z const char *szFile
1873                     , int lineNum
1874                     , __in __in_z const char *szAllocFile
1875                     , int allocLineNum
1876 #endif
1877                     )
1878 {
1879     CrstHolder ch(&m_CritSec);
1880
1881     _ASSERTE(dwSize >= sizeof(FreeBlock));
1882
1883     ZeroMemory((BYTE *)pMem, dwSize);
1884
1885     //
1886     // Try to coalesce blocks if possible
1887     //
1888     FreeBlock ** ppFreeBlock = &m_pFreeBlocks;
1889     while (*ppFreeBlock != NULL)
1890     {
1891         FreeBlock * pFreeBlock = *ppFreeBlock;
1892
1893         if ((BYTE *)pFreeBlock == (BYTE *)pMem + dwSize)
1894         {
1895             // pMem = pMem;
1896             dwSize += pFreeBlock->m_dwSize;
1897             RemoveBlock(ppFreeBlock);
1898             continue;
1899         }
1900         else
1901         if ((BYTE *)pFreeBlock + pFreeBlock->m_dwSize == (BYTE *)pMem)
1902         {
1903             pMem = pFreeBlock;
1904             dwSize += pFreeBlock->m_dwSize;
1905             RemoveBlock(ppFreeBlock);
1906             continue;
1907         }
1908
1909         ppFreeBlock = &(*ppFreeBlock)->m_pNext;
1910     }
1911
1912     AddBlock(pMem, dwSize);
1913 }
1914 #endif // !CROSSGEN_COMPILE
1915
1916 //**************************************************************************
1917
1918 LoaderCodeHeap::LoaderCodeHeap(size_t * pPrivatePCLBytes)
1919     : m_LoaderHeap(pPrivatePCLBytes,
1920                    0,                       // RangeList *pRangeList
1921                    TRUE),                   // BOOL fMakeExecutable
1922     m_cbMinNextPad(0)
1923 {
1924     WRAPPER_NO_CONTRACT;
1925 }
1926
1927 void ThrowOutOfMemoryWithinRange()
1928 {
1929     CONTRACTL {
1930         THROWS;
1931         GC_NOTRIGGER;
1932     } CONTRACTL_END;
1933
1934     // Allow breaking into debugger or terminating the process when this exception occurs
1935     switch (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnOutOfMemoryWithinRange))
1936     {
1937     case 1:
1938         DebugBreak();
1939         break;
1940     case 2:
1941         EEPOLICY_HANDLE_FATAL_ERROR(COR_E_OUTOFMEMORY);
1942         break;   
1943     default:
1944         break;
1945     }
1946
1947     EX_THROW(EEMessageException, (kOutOfMemoryException, IDS_EE_OUT_OF_MEMORY_WITHIN_RANGE));
1948 }
1949
1950 #ifdef _TARGET_AMD64_
1951 BYTE * EEJitManager::AllocateFromEmergencyJumpStubReserve(const BYTE * loAddr, const BYTE * hiAddr, SIZE_T * pReserveSize)
1952 {
1953     CONTRACTL {
1954         NOTHROW;
1955         GC_NOTRIGGER;
1956         PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
1957     } CONTRACTL_END;
1958
1959     for (EmergencyJumpStubReserve ** ppPrev = &m_pEmergencyJumpStubReserveList; *ppPrev != NULL; ppPrev = &(*ppPrev)->m_pNext)
1960     {
1961         EmergencyJumpStubReserve * pList = *ppPrev;
1962
1963         if (loAddr <= pList->m_ptr &&
1964             pList->m_ptr + pList->m_size < hiAddr)
1965         {
1966             *ppPrev = pList->m_pNext;
1967
1968             BYTE * pBlock = pList->m_ptr;
1969             *pReserveSize = pList->m_size;
1970
1971             delete pList;
1972
1973             return pBlock;
1974         }
1975     }
1976
1977     return NULL;
1978 }
1979
1980 VOID EEJitManager::EnsureJumpStubReserve(BYTE * pImageBase, SIZE_T imageSize, SIZE_T reserveSize)
1981 {
1982     CONTRACTL {
1983         THROWS;
1984         GC_NOTRIGGER;
1985     } CONTRACTL_END;
1986
1987     CrstHolder ch(&m_CodeHeapCritSec);
1988
1989     BYTE * loAddr = pImageBase + imageSize + INT32_MIN;
1990     if (loAddr > pImageBase) loAddr = NULL; // overflow
1991
1992     BYTE * hiAddr = pImageBase + INT32_MAX;
1993     if (hiAddr < pImageBase) hiAddr = (BYTE *)UINT64_MAX; // overflow
1994
1995     for (EmergencyJumpStubReserve * pList = m_pEmergencyJumpStubReserveList; pList != NULL; pList = pList->m_pNext)
1996     {
1997         if (loAddr <= pList->m_ptr &&
1998             pList->m_ptr + pList->m_size < hiAddr)
1999         {
2000             SIZE_T used = min(reserveSize, pList->m_free);
2001             pList->m_free -= used;
2002
2003             reserveSize -= used;
2004             if (reserveSize == 0)
2005                 return;
2006         }        
2007     }
2008
2009     // Try several different strategies - the most efficient one first
2010     int allocMode = 0;
2011
2012     // Try to reserve at least 16MB at a time
2013     SIZE_T allocChunk = max(ALIGN_UP(reserveSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY), 16*1024*1024);
2014
2015     while (reserveSize > 0)
2016     {
2017         NewHolder<EmergencyJumpStubReserve> pNewReserve(new EmergencyJumpStubReserve());
2018
2019         for (;;)
2020         {
2021             BYTE * loAddrCurrent = loAddr;
2022             BYTE * hiAddrCurrent = hiAddr;
2023
2024             switch (allocMode)
2025             {
2026             case 0:
2027                 // First, try to allocate towards the center of the allowed range. It is more likely to 
2028                 // satisfy subsequent reservations.
2029                 loAddrCurrent = loAddr + (hiAddr - loAddr) / 8;
2030                 hiAddrCurrent = hiAddr - (hiAddr - loAddr) / 8;
2031                 break;
2032             case 1:
2033                 // Try the whole allowed range
2034                 break;
2035             case 2:
2036                 // If the large allocation failed, retry with small chunk size
2037                 allocChunk = VIRTUAL_ALLOC_RESERVE_GRANULARITY;
2038                 break;
2039             default:
2040                 return; // Unable to allocate the reserve - give up
2041             }
2042
2043             pNewReserve->m_ptr = ClrVirtualAllocWithinRange(loAddrCurrent, hiAddrCurrent,
2044                                                allocChunk, MEM_RESERVE, PAGE_NOACCESS);
2045
2046             if (pNewReserve->m_ptr != NULL)
2047                 break;
2048
2049             // Retry with the next allocation strategy
2050             allocMode++;
2051         }
2052
2053         SIZE_T used = min(allocChunk, reserveSize);
2054         reserveSize -= used;
2055
2056         pNewReserve->m_size = allocChunk;
2057         pNewReserve->m_free = allocChunk - used;
2058
2059         // Add it to the list
2060         pNewReserve->m_pNext = m_pEmergencyJumpStubReserveList;
2061         m_pEmergencyJumpStubReserveList = pNewReserve.Extract();
2062     }
2063 }
2064 #endif // _TARGET_AMD64_
2065
2066 HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap *pJitMetaHeap)
2067 {
2068     CONTRACT(HeapList *) {
2069         THROWS;
2070         GC_NOTRIGGER;
2071         POSTCONDITION(CheckPointer(RETVAL));
2072     } CONTRACT_END;
2073
2074     size_t * pPrivatePCLBytes   = NULL;
2075     size_t   reserveSize        = pInfo->getReserveSize();
2076     size_t   initialRequestSize = pInfo->getRequestSize();
2077     const BYTE *   loAddr       = pInfo->m_loAddr;
2078     const BYTE *   hiAddr       = pInfo->m_hiAddr;
2079
2080     // Make sure that what we are reserving will fix inside a DWORD
2081     if (reserveSize != (DWORD) reserveSize)
2082     {
2083         _ASSERTE(!"reserveSize does not fit in a DWORD");
2084         EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
2085     }
2086
2087 #ifdef ENABLE_PERF_COUNTERS
2088     pPrivatePCLBytes   = &(GetPerfCounters().m_Loading.cbLoaderHeapSize);
2089 #endif
2090
2091     LOG((LF_JIT, LL_INFO100,
2092          "Request new LoaderCodeHeap::CreateCodeHeap(%08x, %08x, for loader allocator" FMT_ADDR "in" FMT_ADDR ".." FMT_ADDR ")\n",
2093          (DWORD) reserveSize, (DWORD) initialRequestSize, DBG_ADDR(pInfo->m_pAllocator), DBG_ADDR(loAddr), DBG_ADDR(hiAddr)
2094                                 ));
2095
2096     NewHolder<LoaderCodeHeap> pCodeHeap(new LoaderCodeHeap(pPrivatePCLBytes));
2097
2098     BYTE * pBaseAddr = NULL;
2099     DWORD dwSizeAcquiredFromInitialBlock = 0;
2100     bool fAllocatedFromEmergencyJumpStubReserve = false;
2101
2102     pBaseAddr = (BYTE *)pInfo->m_pAllocator->GetCodeHeapInitialBlock(loAddr, hiAddr, (DWORD)initialRequestSize, &dwSizeAcquiredFromInitialBlock);
2103     if (pBaseAddr != NULL)
2104     {
2105         pCodeHeap->m_LoaderHeap.SetReservedRegion(pBaseAddr, dwSizeAcquiredFromInitialBlock, FALSE);
2106     }
2107     else
2108     {
2109         if (loAddr != NULL || hiAddr != NULL)
2110         {
2111             pBaseAddr = ClrVirtualAllocWithinRange(loAddr, hiAddr,
2112                                                    reserveSize, MEM_RESERVE, PAGE_NOACCESS);
2113
2114             if (!pBaseAddr)
2115             {
2116 #ifdef _TARGET_AMD64_
2117                 pBaseAddr = ExecutionManager::GetEEJitManager()->AllocateFromEmergencyJumpStubReserve(loAddr, hiAddr, &reserveSize);
2118                 if (!pBaseAddr)
2119                     ThrowOutOfMemoryWithinRange();
2120                 fAllocatedFromEmergencyJumpStubReserve = true;                
2121 #else
2122                 ThrowOutOfMemoryWithinRange();
2123 #endif // _TARGET_AMD64_
2124             }
2125         }
2126         else
2127         {
2128             pBaseAddr = ClrVirtualAllocExecutable(reserveSize, MEM_RESERVE, PAGE_NOACCESS);
2129             if (!pBaseAddr)
2130                 ThrowOutOfMemory();
2131         }
2132         pCodeHeap->m_LoaderHeap.SetReservedRegion(pBaseAddr, reserveSize, TRUE);
2133     }
2134
2135
2136     // this first allocation is critical as it sets up correctly the loader heap info
2137     HeapList *pHp = (HeapList*)pCodeHeap->m_LoaderHeap.AllocMem(sizeof(HeapList));
2138
2139     pHp->pHeap = pCodeHeap;
2140
2141     size_t heapSize = pCodeHeap->m_LoaderHeap.GetReservedBytesFree();
2142     size_t nibbleMapSize = HEAP2MAPSIZE(ROUND_UP_TO_PAGE(heapSize));
2143
2144     pHp->startAddress    = (TADDR)pHp + sizeof(HeapList);
2145
2146     pHp->endAddress      = pHp->startAddress;
2147     pHp->maxCodeHeapSize = heapSize;
2148
2149     _ASSERTE(heapSize >= initialRequestSize);
2150
2151     // We do not need to memset this memory, since ClrVirtualAlloc() guarantees that the memory is zero.
2152     // Furthermore, if we avoid writing to it, these pages don't come into our working set
2153
2154     pHp->bFull           = fAllocatedFromEmergencyJumpStubReserve;
2155     pHp->bFullForJumpStubs = false;
2156
2157     pHp->cBlocks         = 0;
2158
2159     pHp->mapBase         = ROUND_DOWN_TO_PAGE(pHp->startAddress);  // round down to next lower page align
2160     pHp->pHdrMap         = (DWORD*)(void*)pJitMetaHeap->AllocMem(S_SIZE_T(nibbleMapSize));
2161
2162     LOG((LF_JIT, LL_INFO100,
2163          "Created new CodeHeap(" FMT_ADDR ".." FMT_ADDR ")\n",
2164          DBG_ADDR(pHp->startAddress), DBG_ADDR(pHp->startAddress+pHp->maxCodeHeapSize)
2165          ));
2166
2167 #ifdef _WIN64
2168     emitJump((LPBYTE)pHp->CLRPersonalityRoutine, (void *)ProcessCLRException);
2169 #endif
2170
2171     pCodeHeap.SuppressRelease();
2172     RETURN pHp;
2173 }
2174
2175 void * LoaderCodeHeap::AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment)
2176 {
2177     CONTRACTL {
2178         NOTHROW;
2179         GC_NOTRIGGER;
2180     } CONTRACTL_END;
2181
2182     if (m_cbMinNextPad > (SSIZE_T)header) header = m_cbMinNextPad;
2183
2184     void * p = m_LoaderHeap.AllocMemForCode_NoThrow(header, size, alignment);
2185     if (p == NULL)
2186         return NULL;
2187
2188     // If the next allocation would have started in the same nibble map entry, allocate extra space to prevent it from happening
2189     // Note that m_cbMinNextPad can be negative
2190     m_cbMinNextPad = ALIGN_UP((SIZE_T)p + 1, BYTES_PER_BUCKET) - ((SIZE_T)p + size);
2191
2192     return p;
2193 }
2194
2195 void CodeHeapRequestInfo::Init()
2196
2197     CONTRACTL {
2198         NOTHROW;
2199         GC_NOTRIGGER;
2200         PRECONDITION((m_hiAddr == 0) ||
2201                      ((m_loAddr < m_hiAddr) &&
2202                       ((m_loAddr + m_requestSize) < m_hiAddr)));
2203     } CONTRACTL_END;
2204
2205     if (m_pAllocator == NULL)
2206         m_pAllocator = m_pMD->GetLoaderAllocatorForCode();
2207     m_isDynamicDomain = (m_pMD != NULL) ? m_pMD->IsLCGMethod() : false;
2208     m_isCollectible = m_pAllocator->IsCollectible() ? true : false;
2209 }
2210
2211 #ifdef WIN64EXCEPTIONS
2212
2213 #ifdef _WIN64
2214 extern "C" PT_RUNTIME_FUNCTION GetRuntimeFunctionCallback(IN ULONG64   ControlPc,
2215                                                         IN PVOID     Context)
2216 #else
2217 extern "C" PT_RUNTIME_FUNCTION GetRuntimeFunctionCallback(IN ULONG     ControlPc,
2218                                                         IN PVOID     Context)
2219 #endif
2220 {
2221     WRAPPER_NO_CONTRACT;
2222
2223     PT_RUNTIME_FUNCTION prf = NULL;
2224
2225     // We must preserve this so that GCStress=4 eh processing doesnt kill last error.
2226     BEGIN_PRESERVE_LAST_ERROR;
2227
2228 #ifdef ENABLE_CONTRACTS
2229     // See comment in code:Thread::SwitchIn and SwitchOut.
2230     Thread *pThread = GetThread();
2231     if (!(pThread && pThread->HasThreadStateNC(Thread::TSNC_InTaskSwitch)))
2232     {
2233
2234     // Some 64-bit OOM tests use the hosting interface to re-enter the CLR via
2235     // RtlVirtualUnwind to track unique stacks at each failure point. RtlVirtualUnwind can
2236     // result in the EEJitManager taking a reader lock. This, in turn, results in a
2237     // CANNOT_TAKE_LOCK contract violation if a CANNOT_TAKE_LOCK function were on the stack
2238     // at the time. While it's theoretically possible for "real" hosts also to re-enter the
2239     // CLR via RtlVirtualUnwind, generally they don't, and we'd actually like to catch a real
2240     // host causing such a contract violation. Therefore, we'd like to suppress such contract
2241     // asserts when these OOM tests are running, but continue to enforce the contracts by
2242     // default. This function returns whether to suppress locking violations.
2243     CONDITIONAL_CONTRACT_VIOLATION(
2244         TakesLockViolation, 
2245         g_pConfig->SuppressLockViolationsOnReentryFromOS());
2246 #endif // ENABLE_CONTRACTS
2247
2248     EECodeInfo codeInfo((PCODE)ControlPc);
2249     if (codeInfo.IsValid())
2250         prf = codeInfo.GetFunctionEntry();
2251
2252     LOG((LF_EH, LL_INFO1000000, "GetRuntimeFunctionCallback(%p) returned %p\n", ControlPc, prf));
2253
2254 #ifdef ENABLE_CONTRACTS
2255     }
2256 #endif // ENABLE_CONTRACTS
2257
2258     END_PRESERVE_LAST_ERROR;
2259
2260     return  prf;
2261 }
2262 #endif // WIN64EXCEPTIONS
2263
2264 HeapList* EEJitManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHeapList *pADHeapList)
2265 {
2266     CONTRACT(HeapList *) {
2267         THROWS;
2268         GC_NOTRIGGER;
2269         PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
2270         POSTCONDITION(CheckPointer(RETVAL));
2271     } CONTRACT_END;
2272
2273     size_t initialRequestSize = pInfo->getRequestSize();
2274     size_t minReserveSize = VIRTUAL_ALLOC_RESERVE_GRANULARITY; //     ( 64 KB)           
2275
2276 #ifdef _WIN64
2277     if (pInfo->m_hiAddr == 0)
2278     {
2279         if (pADHeapList->m_CodeHeapList.Count() > CODE_HEAP_SIZE_INCREASE_THRESHOLD)
2280         {
2281             minReserveSize *= 4; // Increase the code heap size to 256 KB for workloads with a lot of code.
2282         }
2283
2284         // For non-DynamicDomains that don't have a loAddr/hiAddr range
2285         // we bump up the reserve size for the 64-bit platforms
2286         if (!pInfo->IsDynamicDomain())
2287         {
2288             minReserveSize *= 4; // CodeHeaps are larger on AMD64 (256 KB to 1024 KB)
2289         }
2290     }
2291 #endif
2292
2293     // <BUGNUM> VSW 433293 </BUGNUM>
2294     // SETUP_NEW_BLOCK reserves the first sizeof(LoaderHeapBlock) bytes for LoaderHeapBlock.
2295     // In other word, the first m_pAllocPtr starts at sizeof(LoaderHeapBlock) bytes 
2296     // after the allocated memory. Therefore, we need to take it into account.
2297     size_t requestAndHeadersSize = sizeof(LoaderHeapBlock) + sizeof(HeapList) + initialRequestSize;
2298
2299     size_t reserveSize = requestAndHeadersSize;
2300     if (reserveSize < minReserveSize)
2301         reserveSize = minReserveSize;
2302     reserveSize = ALIGN_UP(reserveSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY);
2303
2304     pInfo->setReserveSize(reserveSize);
2305
2306     HeapList *pHp = NULL;
2307
2308     DWORD flags = RangeSection::RANGE_SECTION_CODEHEAP;
2309
2310     if (pInfo->IsDynamicDomain())
2311     {
2312         flags |= RangeSection::RANGE_SECTION_COLLECTIBLE;
2313         pHp = HostCodeHeap::CreateCodeHeap(pInfo, this);
2314     }
2315     else
2316     {
2317         LoaderHeap *pJitMetaHeap = pADHeapList->m_pAllocator->GetLowFrequencyHeap();
2318
2319         if (pInfo->IsCollectible())
2320             flags |= RangeSection::RANGE_SECTION_COLLECTIBLE;
2321
2322         pHp = LoaderCodeHeap::CreateCodeHeap(pInfo, pJitMetaHeap);
2323     }
2324
2325     _ASSERTE (pHp != NULL);
2326     _ASSERTE (pHp->maxCodeHeapSize >= initialRequestSize);
2327
2328     pHp->SetNext(GetCodeHeapList());
2329
2330     EX_TRY
2331     {
2332         TADDR pStartRange = (TADDR) pHp;
2333         TADDR pEndRange = (TADDR) &((BYTE*)pHp->startAddress)[pHp->maxCodeHeapSize];
2334
2335         ExecutionManager::AddCodeRange(pStartRange,
2336                                        pEndRange,
2337                                        this,
2338                                        (RangeSection::RangeSectionFlags)flags,
2339                                        pHp);
2340         //
2341         // add a table to cover each range in the range list
2342         //
2343         InstallEEFunctionTable(
2344                   (PVOID)pStartRange,   // this is just an ID that gets passed to RtlDeleteFunctionTable;
2345                   (PVOID)pStartRange,
2346                   (ULONG)((ULONG64)pEndRange - (ULONG64)pStartRange),
2347                   GetRuntimeFunctionCallback,
2348                   this,
2349                   DYNFNTABLE_JIT);
2350     }
2351     EX_CATCH
2352     {
2353         // If we failed to alloc memory in ExecutionManager::AddCodeRange()
2354         // then we will delete the LoaderHeap that we allocated
2355
2356         // pHp is allocated in pHeap, so only need to delete the LoaderHeap itself
2357         delete pHp->pHeap;
2358
2359         pHp = NULL;
2360     }
2361     EX_END_CATCH(SwallowAllExceptions)
2362
2363     if (pHp == NULL)
2364     {
2365         ThrowOutOfMemory();
2366     }
2367
2368     m_pCodeHeap = pHp;
2369
2370     HeapList **ppHeapList = pADHeapList->m_CodeHeapList.AppendThrowing();
2371     *ppHeapList = pHp;
2372
2373     RETURN(pHp);
2374 }
2375
2376 void* EEJitManager::allocCodeRaw(CodeHeapRequestInfo *pInfo,
2377                                  size_t header, size_t blockSize, unsigned align, 
2378                                  HeapList ** ppCodeHeap /* Writeback, Can be null */ )
2379 {
2380     CONTRACT(void *) {
2381         THROWS;
2382         GC_NOTRIGGER;
2383         PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
2384         POSTCONDITION(CheckPointer(RETVAL));
2385     } CONTRACT_END;
2386
2387     pInfo->setRequestSize(header+blockSize+(align-1));
2388
2389     // Initialize the writeback value to NULL if a non-NULL pointer was provided
2390     if (ppCodeHeap)
2391         *ppCodeHeap = NULL;
2392
2393     void *      mem       = NULL;
2394
2395     bool bForJumpStubs = (pInfo->m_loAddr != 0) || (pInfo->m_hiAddr != 0);
2396     bool bUseCachedDynamicCodeHeap = pInfo->IsDynamicDomain();
2397
2398     HeapList * pCodeHeap;
2399
2400     for (;;)
2401     {
2402         // Avoid going through the full list in the common case - try to use the most recently used codeheap
2403         if (bUseCachedDynamicCodeHeap)
2404         {
2405             pCodeHeap = (HeapList *)pInfo->m_pAllocator->m_pLastUsedDynamicCodeHeap;
2406             pInfo->m_pAllocator->m_pLastUsedDynamicCodeHeap = NULL;
2407         }
2408         else
2409         {
2410             pCodeHeap = (HeapList *)pInfo->m_pAllocator->m_pLastUsedCodeHeap;
2411             pInfo->m_pAllocator->m_pLastUsedCodeHeap = NULL;
2412         }
2413
2414
2415         // If we will use a cached code heap for jump stubs, ensure that the code heap meets the loAddr and highAddr constraint
2416         if (bForJumpStubs && pCodeHeap && !CanUseCodeHeap(pInfo, pCodeHeap))
2417         {
2418             pCodeHeap = NULL;
2419         }
2420
2421         // If we don't have a cached code heap or can't use it, get a code heap
2422         if (pCodeHeap == NULL)
2423         {
2424             pCodeHeap = GetCodeHeap(pInfo);
2425             if (pCodeHeap == NULL)
2426                 break;
2427         }
2428
2429 #ifdef _WIN64
2430         if (!bForJumpStubs)
2431         {
2432             //
2433             // Keep a small reserve at the end of the codeheap for jump stubs. It should reduce
2434             // chance that we won't be able allocate jump stub because of lack of suitable address space.
2435             //
2436             // It is not a perfect solution. Ideally, we would be able to either ensure that jump stub
2437             // allocation won't fail or handle jump stub allocation gracefully (see DevDiv #381823 and 
2438             // related bugs for details).
2439             //
2440             static ConfigDWORD configCodeHeapReserveForJumpStubs;
2441             int percentReserveForJumpStubs = configCodeHeapReserveForJumpStubs.val(CLRConfig::INTERNAL_CodeHeapReserveForJumpStubs);
2442
2443             size_t reserveForJumpStubs = percentReserveForJumpStubs * (pCodeHeap->maxCodeHeapSize / 100);
2444
2445             size_t minReserveForJumpStubs = sizeof(CodeHeader) +
2446                 sizeof(JumpStubBlockHeader) + (size_t) DEFAULT_JUMPSTUBS_PER_BLOCK * BACK_TO_BACK_JUMP_ALLOCATE_SIZE +
2447                 CODE_SIZE_ALIGN + BYTES_PER_BUCKET;
2448
2449             // Reserve only if the size can fit a cluster of jump stubs
2450             if (reserveForJumpStubs > minReserveForJumpStubs)
2451             {
2452                 size_t occupiedSize = pCodeHeap->endAddress - pCodeHeap->startAddress;
2453
2454                 if (occupiedSize + pInfo->getRequestSize() + reserveForJumpStubs > pCodeHeap->maxCodeHeapSize)
2455                 {
2456                     pCodeHeap->SetHeapFull();
2457                     continue;
2458                 }
2459             }
2460         }
2461 #endif
2462
2463         mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align);
2464         if (mem != NULL)
2465             break;
2466
2467         // The current heap couldn't handle our request. Mark it as full.
2468         if (bForJumpStubs)
2469             pCodeHeap->SetHeapFullForJumpStubs();
2470         else
2471             pCodeHeap->SetHeapFull();
2472     }
2473
2474     if (mem == NULL)
2475     {
2476         // Let us create a new heap.
2477
2478         DomainCodeHeapList *pList = GetCodeHeapList(pInfo->m_pMD, pInfo->m_pAllocator);
2479         if (pList == NULL)
2480         {
2481             // not found so need to create the first one
2482             pList = CreateCodeHeapList(pInfo);
2483             _ASSERTE(pList == GetCodeHeapList(pInfo->m_pMD, pInfo->m_pAllocator));
2484         }
2485         _ASSERTE(pList);
2486
2487         pCodeHeap = NewCodeHeap(pInfo, pList);
2488         _ASSERTE(pCodeHeap);
2489
2490         mem = (pCodeHeap->pHeap)->AllocMemForCode_NoThrow(header, blockSize, align);
2491         if (mem == NULL)
2492             ThrowOutOfMemory();
2493         _ASSERTE(mem);
2494     }
2495     
2496     if (bUseCachedDynamicCodeHeap)
2497     {
2498         pInfo->m_pAllocator->m_pLastUsedDynamicCodeHeap = pCodeHeap;
2499     }
2500     else
2501     {
2502         pInfo->m_pAllocator->m_pLastUsedCodeHeap = pCodeHeap;
2503     }
2504
2505
2506     // Record the pCodeHeap value into ppCodeHeap, if a non-NULL pointer was provided
2507     if (ppCodeHeap)
2508         *ppCodeHeap = pCodeHeap;
2509
2510     _ASSERTE((TADDR)mem >= pCodeHeap->startAddress);
2511
2512     if (((TADDR) mem)+blockSize > (TADDR)pCodeHeap->endAddress)
2513     {
2514         // Update the CodeHeap endAddress
2515         pCodeHeap->endAddress = (TADDR)mem+blockSize;
2516     }
2517
2518     RETURN(mem);
2519 }
2520
2521 CodeHeader* EEJitManager::allocCode(MethodDesc* pMD, size_t blockSize, CorJitAllocMemFlag flag
2522 #ifdef WIN64EXCEPTIONS
2523                                     , UINT nUnwindInfos
2524                                     , TADDR * pModuleBase
2525 #endif
2526                                     )
2527 {
2528     CONTRACT(CodeHeader *) {
2529         THROWS;
2530         GC_NOTRIGGER;
2531         POSTCONDITION(CheckPointer(RETVAL));
2532     } CONTRACT_END;
2533
2534     //
2535     // Alignment
2536     //
2537
2538     unsigned alignment = CODE_SIZE_ALIGN;
2539     
2540     if ((flag & CORJIT_ALLOCMEM_FLG_16BYTE_ALIGN) != 0) 
2541     {
2542         alignment = max(alignment, 16);
2543     }
2544     
2545 #if defined(_TARGET_X86_)
2546     // when not optimizing for code size, 8-byte align the method entry point, so that
2547     // the JIT can in turn 8-byte align the loop entry headers.
2548     // 
2549     // when ReJIT is enabled, 8-byte-align the method entry point so that we may use an
2550     // 8-byte interlocked operation to atomically poke the top most bytes (e.g., to
2551     // redirect the rejit jmp-stamp at the top of the method from the prestub to the
2552     // rejitted code, or to reinstate original code on a revert).
2553     else if ((g_pConfig->GenOptimizeType() != OPT_SIZE) ||
2554         ReJitManager::IsReJITEnabled())
2555     {
2556         alignment = max(alignment, 8);
2557     }
2558 #endif
2559
2560     //
2561     // Compute header layout
2562     //
2563
2564     SIZE_T totalSize = blockSize;
2565
2566 #if defined(USE_INDIRECT_CODEHEADER)
2567     SIZE_T realHeaderSize = offsetof(RealCodeHeader, unwindInfos[0]) + (sizeof(T_RUNTIME_FUNCTION) * nUnwindInfos); 
2568
2569     // if this is a LCG method then we will be allocating the RealCodeHeader
2570     // following the code so that the code block can be removed easily by 
2571     // the LCG code heap.
2572     if (pMD->IsLCGMethod())
2573     {
2574         totalSize = ALIGN_UP(totalSize, sizeof(void*)) + realHeaderSize;
2575         static_assert_no_msg(CODE_SIZE_ALIGN >= sizeof(void*));
2576     }
2577 #endif  // USE_INDIRECT_CODEHEADER
2578
2579     CodeHeader * pCodeHdr = NULL;
2580
2581     CodeHeapRequestInfo requestInfo(pMD);
2582
2583     // Scope the lock
2584     {
2585         CrstHolder ch(&m_CodeHeapCritSec);
2586
2587         HeapList *pCodeHeap = NULL;
2588
2589         TADDR pCode = (TADDR) allocCodeRaw(&requestInfo, sizeof(CodeHeader), totalSize, alignment, &pCodeHeap);
2590
2591         _ASSERTE(pCodeHeap);
2592
2593         if (pMD->IsLCGMethod())
2594         {
2595             pMD->AsDynamicMethodDesc()->GetLCGMethodResolver()->m_recordCodePointer = (void*) pCode;
2596         }
2597
2598         _ASSERTE(IS_ALIGNED(pCode, alignment));
2599
2600         JIT_PERF_UPDATE_X86_CODE_SIZE(totalSize);
2601
2602         // Initialize the CodeHeader *BEFORE* we publish this code range via the nibble
2603         // map so that we don't have to harden readers against uninitialized data.
2604         // However because we hold the lock, this initialization should be fast and cheap!
2605
2606         pCodeHdr = ((CodeHeader *)pCode) - 1;
2607
2608 #ifdef USE_INDIRECT_CODEHEADER
2609         if (pMD->IsLCGMethod())
2610         {
2611             pCodeHdr->SetRealCodeHeader((BYTE*)pCode + ALIGN_UP(blockSize, sizeof(void*)));
2612         }
2613         else
2614         {
2615             // TODO: think about the CodeHeap carrying around a RealCodeHeader chunking mechanism
2616             //
2617             // allocate the real header in the low frequency heap
2618             BYTE* pRealHeader = (BYTE*)(void*)pMD->GetLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(realHeaderSize));
2619             pCodeHdr->SetRealCodeHeader(pRealHeader);
2620         }
2621 #endif
2622
2623         pCodeHdr->SetDebugInfo(NULL);
2624         pCodeHdr->SetEHInfo(NULL);
2625         pCodeHdr->SetGCInfo(NULL);
2626         pCodeHdr->SetMethodDesc(pMD);
2627 #ifdef WIN64EXCEPTIONS
2628         pCodeHdr->SetNumberOfUnwindInfos(nUnwindInfos);
2629         *pModuleBase = (TADDR)pCodeHeap;
2630 #endif
2631
2632         NibbleMapSet(pCodeHeap, pCode, TRUE);
2633     }
2634
2635     RETURN(pCodeHdr);
2636 }
2637
2638 EEJitManager::DomainCodeHeapList *EEJitManager::GetCodeHeapList(MethodDesc *pMD, LoaderAllocator *pAllocator, BOOL fDynamicOnly)
2639 {
2640     CONTRACTL {
2641         NOTHROW;
2642         GC_NOTRIGGER;
2643         PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
2644     } CONTRACTL_END;
2645
2646     DomainCodeHeapList *pList = NULL;
2647     DomainCodeHeapList **ppList = NULL;
2648     int count = 0;
2649
2650     // get the appropriate list of heaps
2651     // pMD is NULL for NGen modules during Module::LoadTokenTables
2652     if (fDynamicOnly || (pMD != NULL && pMD->IsLCGMethod()))
2653     {
2654         ppList = m_DynamicDomainCodeHeaps.Table();
2655         count = m_DynamicDomainCodeHeaps.Count();
2656     }
2657     else
2658     {
2659         ppList = m_DomainCodeHeaps.Table();
2660         count = m_DomainCodeHeaps.Count();
2661     }
2662
2663     // this is a virtual call - pull it out of the loop
2664     BOOL fCanUnload = pAllocator->CanUnload();
2665
2666     // look for a DomainCodeHeapList
2667     for (int i=0; i < count; i++)
2668     {
2669         if (ppList[i]->m_pAllocator == pAllocator ||
2670             (!fCanUnload && !ppList[i]->m_pAllocator->CanUnload()))
2671         {
2672             pList = ppList[i];
2673             break;
2674         }
2675     }
2676     return pList;
2677 }
2678
2679 HeapList* EEJitManager::GetCodeHeap(CodeHeapRequestInfo *pInfo)
2680 {
2681     CONTRACT(HeapList *) {
2682         NOTHROW;
2683         GC_NOTRIGGER;
2684         PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
2685     } CONTRACT_END;
2686
2687     HeapList *pResult = NULL;
2688
2689     _ASSERTE(pInfo->m_pAllocator != NULL);
2690
2691     // loop through the m_DomainCodeHeaps to find the AppDomain
2692     // if not found, then create it
2693     DomainCodeHeapList *pList = GetCodeHeapList(pInfo->m_pMD, pInfo->m_pAllocator);
2694     if (pList)
2695     {
2696         // Set pResult to the largest non-full HeapList
2697         // that also satisfies the [loAddr..hiAddr] constraint
2698         for (int i=0; i < pList->m_CodeHeapList.Count(); i++)
2699         {
2700             HeapList *pCurrent   = pList->m_CodeHeapList[i];
2701
2702             // Validate that the code heap can be used for the current request
2703             if(CanUseCodeHeap(pInfo, pCurrent))
2704             {
2705                 if (pResult == NULL)
2706                 {
2707                     // pCurrent is the first (and possibly only) heap that would satistfy
2708                     pResult = pCurrent;
2709                 }
2710                 // We use the initial creation size as a discriminator (i.e largest heap)
2711                 else if (pResult->maxCodeHeapSize < pCurrent->maxCodeHeapSize)
2712                 {
2713                     pResult = pCurrent;
2714                 }
2715             }
2716         }
2717     }
2718
2719     RETURN (pResult);
2720 }
2721
2722 bool EEJitManager::CanUseCodeHeap(CodeHeapRequestInfo *pInfo, HeapList *pCodeHeap)
2723 {
2724     CONTRACTL {
2725         NOTHROW;
2726         GC_NOTRIGGER;
2727         PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
2728     } CONTRACTL_END;
2729
2730     bool retVal = false;
2731
2732     if ((pInfo->m_loAddr == 0) && (pInfo->m_hiAddr == 0))
2733     {
2734         if (!pCodeHeap->IsHeapFull())
2735         {
2736             // We have no constraint so this non empty heap will be able to satistfy our request
2737             if (pInfo->IsDynamicDomain())
2738             {
2739                 retVal = true;
2740             }
2741             else
2742             {
2743                 BYTE * lastAddr = (BYTE *) pCodeHeap->startAddress + pCodeHeap->maxCodeHeapSize;
2744
2745                 BYTE * loRequestAddr  = (BYTE *) pCodeHeap->endAddress;
2746                 BYTE * hiRequestAddr  = loRequestAddr + pInfo->getRequestSize() + BYTES_PER_BUCKET;
2747                 if (hiRequestAddr <= lastAddr)
2748                 {
2749                     retVal = true;
2750                 }
2751             }
2752         }
2753     }
2754     else
2755     {
2756         if (!pCodeHeap->IsHeapFullForJumpStubs())
2757         {
2758             // We also check to see if an allocation in this heap would satistfy
2759             // the [loAddr..hiAddr] requirement
2760             
2761             // Calculate the byte range that can ever be returned by
2762             // an allocation in this HeapList element
2763             //
2764             BYTE * firstAddr      = (BYTE *) pCodeHeap->startAddress;
2765             BYTE * lastAddr       = (BYTE *) pCodeHeap->startAddress + pCodeHeap->maxCodeHeapSize;
2766
2767             _ASSERTE(pCodeHeap->startAddress <= pCodeHeap->endAddress);
2768             _ASSERTE(firstAddr <= lastAddr);
2769             
2770             if (pInfo->IsDynamicDomain())
2771             {
2772                 // We check to see if every allocation in this heap
2773                 // will satistfy the [loAddr..hiAddr] requirement.
2774                 //
2775                 // Dynamic domains use a free list allocator, 
2776                 // thus we can receive any address in the range
2777                 // when calling AllocMemory with a DynamicDomain
2778             
2779                 // [firstaddr .. lastAddr] must be entirely within
2780                 // [pInfo->m_loAddr .. pInfo->m_hiAddr]
2781                 //
2782                 if ((pInfo->m_loAddr <= firstAddr)   &&
2783                     (lastAddr        <= pInfo->m_hiAddr))
2784                 {
2785                     // This heap will always satisfy our constraint
2786                     retVal = true;
2787                 }
2788             }
2789             else // non-DynamicDomain
2790             {
2791                 // Calculate the byte range that would be allocated for the
2792                 // next allocation request into [loRequestAddr..hiRequestAddr]
2793                 //
2794                 BYTE * loRequestAddr  = (BYTE *) pCodeHeap->endAddress;
2795                 BYTE * hiRequestAddr  = loRequestAddr + pInfo->getRequestSize() + BYTES_PER_BUCKET;
2796                 _ASSERTE(loRequestAddr <= hiRequestAddr);
2797
2798                 // loRequestAddr and hiRequestAddr must be entirely within
2799                 // [pInfo->m_loAddr .. pInfo->m_hiAddr]
2800                 // additionally hiRequestAddr must also be less than
2801                 // or equal to lastAddr
2802                 //
2803                 if ((pInfo->m_loAddr <= loRequestAddr)   &&
2804                     (hiRequestAddr   <= pInfo->m_hiAddr) &&
2805                     (hiRequestAddr   <= lastAddr))
2806                 {
2807                    // This heap will be able to satistfy our constraint
2808                    retVal = true;
2809                 }
2810             }
2811        }
2812    }
2813
2814    return retVal; 
2815 }
2816
2817 EEJitManager::DomainCodeHeapList * EEJitManager::CreateCodeHeapList(CodeHeapRequestInfo *pInfo)
2818 {
2819     CONTRACTL {
2820         THROWS;
2821         GC_NOTRIGGER;
2822         PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
2823     } CONTRACTL_END;
2824
2825     NewHolder<DomainCodeHeapList> pNewList(new DomainCodeHeapList());
2826     pNewList->m_pAllocator = pInfo->m_pAllocator;
2827
2828     DomainCodeHeapList **ppList = NULL;
2829     if (pInfo->IsDynamicDomain())
2830         ppList = m_DynamicDomainCodeHeaps.AppendThrowing();
2831     else
2832         ppList = m_DomainCodeHeaps.AppendThrowing();
2833     *ppList = pNewList;
2834
2835     return pNewList.Extract();
2836 }
2837
2838 LoaderHeap *EEJitManager::GetJitMetaHeap(MethodDesc *pMD)
2839 {
2840     CONTRACTL {
2841         NOTHROW;
2842         GC_NOTRIGGER;
2843     } CONTRACTL_END;
2844
2845     LoaderAllocator *pAllocator = pMD->GetLoaderAllocator();
2846     _ASSERTE(pAllocator);
2847
2848     return pAllocator->GetLowFrequencyHeap();
2849 }
2850
2851 BYTE* EEJitManager::allocGCInfo(CodeHeader* pCodeHeader, DWORD blockSize, size_t * pAllocationSize)
2852 {
2853     CONTRACTL {
2854         THROWS;
2855         GC_NOTRIGGER;
2856     } CONTRACTL_END;
2857
2858     MethodDesc* pMD = pCodeHeader->GetMethodDesc();
2859     // sadly for light code gen I need the check in here. We should change GetJitMetaHeap
2860     if (pMD->IsLCGMethod()) 
2861     {
2862         CrstHolder ch(&m_CodeHeapCritSec);
2863         pCodeHeader->SetGCInfo((BYTE*)(void*)pMD->AsDynamicMethodDesc()->GetResolver()->GetJitMetaHeap()->New(blockSize));
2864     }
2865     else
2866     {
2867         pCodeHeader->SetGCInfo((BYTE*) (void*)GetJitMetaHeap(pMD)->AllocMem(S_SIZE_T(blockSize)));
2868     }
2869     _ASSERTE(pCodeHeader->GetGCInfo()); // AllocMem throws if there's not enough memory
2870     JIT_PERF_UPDATE_X86_CODE_SIZE(blockSize);
2871
2872     * pAllocationSize = blockSize;  // Store the allocation size so we can backout later.
2873     
2874     return(pCodeHeader->GetGCInfo());
2875 }
2876
2877 void* EEJitManager::allocEHInfoRaw(CodeHeader* pCodeHeader, DWORD blockSize, size_t * pAllocationSize)
2878 {
2879     CONTRACTL {
2880         THROWS;
2881         GC_NOTRIGGER;
2882     } CONTRACTL_END;
2883
2884     MethodDesc* pMD = pCodeHeader->GetMethodDesc();
2885     void * mem = NULL;
2886
2887     // sadly for light code gen I need the check in here. We should change GetJitMetaHeap
2888     if (pMD->IsLCGMethod()) 
2889     {
2890         CrstHolder ch(&m_CodeHeapCritSec);
2891         mem = (void*)pMD->AsDynamicMethodDesc()->GetResolver()->GetJitMetaHeap()->New(blockSize);
2892     }
2893     else 
2894     {
2895         mem = (void*)GetJitMetaHeap(pMD)->AllocMem(S_SIZE_T(blockSize));
2896     }
2897     _ASSERTE(mem);   // AllocMem throws if there's not enough memory
2898
2899     JIT_PERF_UPDATE_X86_CODE_SIZE(blockSize);
2900
2901     * pAllocationSize = blockSize; // Store the allocation size so we can backout later.
2902     
2903     return(mem);
2904 }
2905
2906
2907 EE_ILEXCEPTION* EEJitManager::allocEHInfo(CodeHeader* pCodeHeader, unsigned numClauses, size_t * pAllocationSize)
2908 {
2909     CONTRACTL {
2910         THROWS;
2911         GC_NOTRIGGER;
2912     } CONTRACTL_END;
2913
2914     // Note - pCodeHeader->phdrJitEHInfo - sizeof(size_t) contains the number of EH clauses
2915
2916     DWORD temp =  EE_ILEXCEPTION::Size(numClauses);
2917     DWORD blockSize = 0;
2918     if (!ClrSafeInt<DWORD>::addition(temp, sizeof(size_t), blockSize))
2919         COMPlusThrowOM();
2920
2921     BYTE *EHInfo = (BYTE*)allocEHInfoRaw(pCodeHeader, blockSize, pAllocationSize);
2922
2923     pCodeHeader->SetEHInfo((EE_ILEXCEPTION*) (EHInfo + sizeof(size_t)));
2924     pCodeHeader->GetEHInfo()->Init(numClauses);
2925     *((size_t *)EHInfo) = numClauses;
2926     return(pCodeHeader->GetEHInfo());
2927 }
2928
2929 JumpStubBlockHeader *  EEJitManager::allocJumpStubBlock(MethodDesc* pMD, DWORD numJumps, 
2930                                                         BYTE * loAddr, BYTE * hiAddr,
2931                                                         LoaderAllocator *pLoaderAllocator)
2932 {
2933     CONTRACT(JumpStubBlockHeader *) {
2934         THROWS;
2935         GC_NOTRIGGER;
2936         PRECONDITION(loAddr < hiAddr);
2937         PRECONDITION(pLoaderAllocator != NULL);
2938         POSTCONDITION(CheckPointer(RETVAL));
2939     } CONTRACT_END;
2940
2941     _ASSERTE((sizeof(JumpStubBlockHeader) % CODE_SIZE_ALIGN) == 0);
2942     _ASSERTE(numJumps < MAX_M_ALLOCATED);
2943
2944     size_t blockSize = sizeof(JumpStubBlockHeader) + (size_t) numJumps * BACK_TO_BACK_JUMP_ALLOCATE_SIZE;
2945
2946     HeapList *pCodeHeap = NULL;
2947     CodeHeapRequestInfo    requestInfo(pMD, pLoaderAllocator, loAddr, hiAddr);
2948
2949     TADDR                  mem;
2950     JumpStubBlockHeader *  pBlock;
2951
2952     // Scope the lock
2953     {
2954         CrstHolder ch(&m_CodeHeapCritSec);
2955
2956         mem = (TADDR) allocCodeRaw(&requestInfo, sizeof(TADDR), blockSize, CODE_SIZE_ALIGN, &pCodeHeap);
2957
2958         // CodeHeader comes immediately before the block
2959         CodeHeader * pCodeHdr = (CodeHeader *) (mem - sizeof(CodeHeader));
2960         pCodeHdr->SetStubCodeBlockKind(STUB_CODE_BLOCK_JUMPSTUB);
2961
2962         NibbleMapSet(pCodeHeap, mem, TRUE);
2963
2964         pBlock = (JumpStubBlockHeader *)mem;
2965
2966         _ASSERTE(IS_ALIGNED(pBlock, CODE_SIZE_ALIGN));
2967     
2968         JIT_PERF_UPDATE_X86_CODE_SIZE(blockSize);
2969     }
2970
2971     pBlock->m_next            = NULL;
2972     pBlock->m_used            = 0;
2973     pBlock->m_allocated       = numJumps;
2974     if (pMD && pMD->IsLCGMethod())
2975         pBlock->SetHostCodeHeap(static_cast<HostCodeHeap*>(pCodeHeap->pHeap));
2976     else
2977         pBlock->SetLoaderAllocator(pLoaderAllocator);
2978
2979     LOG((LF_JIT, LL_INFO1000, "Allocated new JumpStubBlockHeader for %d stubs at" FMT_ADDR " in loader allocator " FMT_ADDR "\n",
2980          numJumps, DBG_ADDR(pBlock) , DBG_ADDR(pLoaderAllocator) ));
2981
2982     RETURN(pBlock);
2983 }
2984
2985 void * EEJitManager::allocCodeFragmentBlock(size_t blockSize, unsigned alignment, LoaderAllocator *pLoaderAllocator, StubCodeBlockKind kind)
2986 {
2987     CONTRACT(void *) {
2988         THROWS;
2989         GC_NOTRIGGER;
2990         PRECONDITION(pLoaderAllocator != NULL);
2991         POSTCONDITION(CheckPointer(RETVAL));
2992     } CONTRACT_END;
2993
2994     HeapList *pCodeHeap = NULL;
2995     CodeHeapRequestInfo    requestInfo(NULL, pLoaderAllocator, NULL, NULL);
2996
2997     TADDR                  mem;
2998
2999     // Scope the lock
3000     {
3001         CrstHolder ch(&m_CodeHeapCritSec);
3002
3003         mem = (TADDR) allocCodeRaw(&requestInfo, sizeof(CodeHeader), blockSize, alignment, &pCodeHeap);
3004
3005         // CodeHeader comes immediately before the block
3006         CodeHeader * pCodeHdr = (CodeHeader *) (mem - sizeof(CodeHeader));
3007         pCodeHdr->SetStubCodeBlockKind(kind);
3008
3009         NibbleMapSet(pCodeHeap, (TADDR)mem, TRUE);
3010     }
3011
3012     RETURN((void *)mem);
3013 }
3014
3015 #endif // !DACCESS_COMPILE
3016
3017
3018 GCInfoToken EEJitManager::GetGCInfoToken(const METHODTOKEN& MethodToken)
3019 {
3020     CONTRACTL {
3021         NOTHROW;
3022         GC_NOTRIGGER;
3023         HOST_NOCALLS;
3024         SUPPORTS_DAC;
3025     } CONTRACTL_END;
3026
3027     // The JIT-ed code always has the current version of GCInfo 
3028     return{ GetCodeHeader(MethodToken)->GetGCInfo(), GCINFO_VERSION };
3029 }
3030
3031 // creates an enumeration and returns the number of EH clauses
3032 unsigned EEJitManager::InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState)
3033 {
3034     LIMITED_METHOD_CONTRACT;
3035     EE_ILEXCEPTION * EHInfo = GetCodeHeader(MethodToken)->GetEHInfo();
3036
3037     pEnumState->iCurrentPos = 0;     // since the EH info is not compressed, the clause number is used to do the enumeration
3038     pEnumState->pExceptionClauseArray = NULL;
3039
3040     if (!EHInfo)
3041         return 0;
3042
3043     pEnumState->pExceptionClauseArray = dac_cast<TADDR>(EHInfo->EHClause(0));
3044     return *(dac_cast<PTR_unsigned>(dac_cast<TADDR>(EHInfo) - sizeof(size_t)));
3045 }
3046
3047 PTR_EXCEPTION_CLAUSE_TOKEN EEJitManager::GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState,
3048                               EE_ILEXCEPTION_CLAUSE* pEHClauseOut)
3049 {
3050     CONTRACTL {
3051         NOTHROW;
3052         GC_NOTRIGGER;
3053     } CONTRACTL_END;
3054
3055     unsigned iCurrentPos = pEnumState->iCurrentPos;
3056     pEnumState->iCurrentPos++;
3057
3058     EE_ILEXCEPTION_CLAUSE* pClause = &(dac_cast<PTR_EE_ILEXCEPTION_CLAUSE>(pEnumState->pExceptionClauseArray)[iCurrentPos]);
3059     *pEHClauseOut = *pClause;
3060     return dac_cast<PTR_EXCEPTION_CLAUSE_TOKEN>(pClause);
3061 }
3062
3063 #ifndef DACCESS_COMPILE
3064 TypeHandle EEJitManager::ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause,
3065                                          CrawlFrame *pCf)
3066 {
3067     // We don't want to use a runtime contract here since this codepath is used during
3068     // the processing of a hard SO. Contracts use a significant amount of stack
3069     // which we can't afford for those cases.
3070     STATIC_CONTRACT_THROWS;
3071     STATIC_CONTRACT_GC_TRIGGERS;
3072
3073     _ASSERTE(NULL != pCf);
3074     _ASSERTE(NULL != pEHClause);
3075     _ASSERTE(IsTypedHandler(pEHClause));
3076     
3077
3078     TypeHandle typeHnd = TypeHandle();
3079     mdToken typeTok = mdTokenNil;
3080
3081     {
3082         CrstHolder chRead(&m_EHClauseCritSec);
3083         if (HasCachedTypeHandle(pEHClause))
3084         {
3085             typeHnd = TypeHandle::FromPtr(pEHClause->TypeHandle);
3086         }
3087         else
3088         {
3089             typeTok = pEHClause->ClassToken;
3090         }
3091     }
3092
3093     if (!typeHnd.IsNull())
3094     {
3095         return typeHnd;
3096     }
3097     
3098     MethodDesc* pMD = pCf->GetFunction();
3099     Module* pModule = pMD->GetModule();
3100     PREFIX_ASSUME(pModule != NULL);
3101
3102     SigTypeContext typeContext(pMD);
3103     VarKind k = hasNoVars;
3104     
3105     // In the vast majority of cases the code under the "if" below
3106     // will not be executed.
3107     //
3108     // First grab the representative instantiations.  For code
3109     // shared by multiple generic instantiations these are the
3110     // canonical (representative) instantiation.
3111     if (TypeFromToken(typeTok) == mdtTypeSpec)
3112     {
3113         PCCOR_SIGNATURE pSig;
3114         ULONG cSig;
3115         IfFailThrow(pModule->GetMDImport()->GetTypeSpecFromToken(typeTok, &pSig, &cSig));
3116         
3117         SigPointer psig(pSig, cSig);
3118         k = psig.IsPolyType(&typeContext);
3119         
3120         // Grab the active class and method instantiation.  This exact instantiation is only
3121         // needed in the corner case of "generic" exception catching in shared
3122         // generic code.  We don't need the exact instantiation if the token
3123         // doesn't contain E_T_VAR or E_T_MVAR.
3124         if ((k & hasSharableVarsMask) != 0)
3125         {
3126             Instantiation classInst;
3127             Instantiation methodInst;
3128             pCf->GetExactGenericInstantiations(&classInst, &methodInst);
3129             SigTypeContext::InitTypeContext(pMD,classInst, methodInst,&typeContext);
3130         }
3131     }
3132
3133     typeHnd = ClassLoader::LoadTypeDefOrRefOrSpecThrowing(pModule, typeTok, &typeContext, 
3134                                                           ClassLoader::ReturnNullIfNotFound);
3135
3136     // If the type (pModule,typeTok) was not loaded or not
3137     // restored then the exception object won't have this type, because an
3138     // object of this type has not been allocated.    
3139     if (typeHnd.IsNull())
3140         return typeHnd;
3141
3142     // We can cache any exception specification except:
3143     //   - If the type contains type variables in generic code,
3144     //     e.g. catch E<T> where T is a type variable.
3145     // We CANNOT cache E<T> in non-shared instantiations of generic code because
3146     // there is only one EHClause cache for the IL, shared across all instantiations.
3147     //
3148     if((k & hasAnyVarsMask) == 0)
3149     {
3150         CrstHolder chWrite(&m_EHClauseCritSec);
3151     
3152         // Note another thread might have beaten us to it ...
3153         if (!HasCachedTypeHandle(pEHClause))
3154         {
3155             // We should never cache a NULL typeHnd.
3156             _ASSERTE(!typeHnd.IsNull());
3157             pEHClause->TypeHandle = typeHnd.AsPtr();
3158             SetHasCachedTypeHandle(pEHClause);            
3159         }
3160         else
3161         {
3162             // If we raced in here with aother thread and got held up on the lock, then we just need to return the
3163             // type handle that the other thread put into the clause.
3164             // The typeHnd we found and the typeHnd the racing thread found should always be the same
3165             _ASSERTE(typeHnd.AsPtr() == pEHClause->TypeHandle);
3166             typeHnd = TypeHandle::FromPtr(pEHClause->TypeHandle);
3167         }
3168     }
3169     return typeHnd;
3170 }
3171
3172 void EEJitManager::RemoveJitData (CodeHeader * pCHdr, size_t GCinfo_len, size_t EHinfo_len)
3173 {
3174     CONTRACTL {
3175         NOTHROW;
3176         GC_TRIGGERS;
3177     } CONTRACTL_END;
3178
3179     MethodDesc* pMD = pCHdr->GetMethodDesc();
3180
3181     if (pMD->IsLCGMethod()) {
3182
3183         void * codeStart = (pCHdr + 1);
3184
3185         {
3186             CrstHolder ch(&m_CodeHeapCritSec);
3187
3188             LCGMethodResolver * pResolver = pMD->AsDynamicMethodDesc()->GetLCGMethodResolver();
3189
3190             // Clear the pointer only if it matches what we are about to free. There may be cases where the JIT is reentered and
3191             // the method JITed multiple times.
3192             if (pResolver->m_recordCodePointer == codeStart)
3193                 pResolver->m_recordCodePointer = NULL;
3194         }
3195
3196 #if defined(_TARGET_AMD64_)
3197         // Remove the unwind information (if applicable)
3198         UnwindInfoTable::UnpublishUnwindInfoForMethod((TADDR)codeStart);
3199 #endif // defined(_TARGET_AMD64_)
3200
3201         HostCodeHeap* pHeap = HostCodeHeap::GetCodeHeap((TADDR)codeStart);
3202         FreeCodeMemory(pHeap, codeStart);
3203
3204         // We are leaking GCInfo and EHInfo. They will be freed once the dynamic method is destroyed.
3205
3206         return;
3207     }
3208
3209     {
3210         CrstHolder ch(&m_CodeHeapCritSec);
3211
3212         HeapList *pHp = GetCodeHeapList();
3213
3214         while (pHp && ((pHp->startAddress > (TADDR)pCHdr) ||
3215                         (pHp->endAddress < (TADDR)pCHdr + sizeof(CodeHeader))))
3216         {
3217             pHp = pHp->GetNext();
3218         }
3219
3220         _ASSERTE(pHp && pHp->pHdrMap);
3221         _ASSERTE(pHp && pHp->cBlocks);
3222
3223         // Better to just return than AV?
3224         if (pHp == NULL)
3225             return;
3226
3227         NibbleMapSet(pHp, (TADDR)(pCHdr + 1), FALSE);
3228     }
3229
3230     // Backout the GCInfo  
3231     if (GCinfo_len > 0) {
3232         GetJitMetaHeap(pMD)->BackoutMem(pCHdr->GetGCInfo(), GCinfo_len);
3233     }
3234     
3235     // Backout the EHInfo  
3236     BYTE *EHInfo = (BYTE *)pCHdr->GetEHInfo();
3237     if (EHInfo) {
3238         EHInfo -= sizeof(size_t);
3239
3240         _ASSERTE(EHinfo_len>0);
3241         GetJitMetaHeap(pMD)->BackoutMem(EHInfo, EHinfo_len);
3242     }  
3243
3244     // <TODO>
3245     // TODO: Although we have backout the GCInfo and EHInfo, we haven't actually backout the
3246     //       code buffer itself. As a result, we might leak the CodeHeap if jitting fails after
3247     //       the code buffer is allocated.
3248     // 
3249     //       However, it appears non-trival to fix this.
3250     //       Here are some of the reasons:
3251     //       (1) AllocCode calls in AllocCodeRaw to alloc code buffer in the CodeHeap. The exact size 
3252     //           of the code buffer is not known until the alignment is calculated deep on the stack.
3253     //       (2) AllocCodeRaw is called in 3 different places. We might need to remember the 
3254     //           information for these places.
3255     //       (3) AllocCodeRaw might create a new CodeHeap. We should remember exactly which 
3256     //           CodeHeap is used to allocate the code buffer.
3257     //
3258     //       Fortunately, this is not a severe leak since the CodeHeap will be reclaimed on appdomain unload.
3259     //
3260     // </TODO>
3261     return;
3262 }
3263
3264 // appdomain is being unloaded, so delete any data associated with it. We have to do this in two stages.
3265 // On the first stage, we remove the elements from the list. On the second stage, which occurs after a GC
3266 // we know that only threads who were in preemptive mode prior to the GC could possibly still be looking
3267 // at an element that is about to be deleted. All such threads are guarded with a reader count, so if the
3268 // count is 0, we can safely delete, otherwise we must add to the cleanup list to be deleted later. We know
3269 // there can only be one unload at a time, so we can use a single var to hold the unlinked, but not deleted,
3270 // elements.
3271 void EEJitManager::Unload(LoaderAllocator *pAllocator)
3272 {
3273     CONTRACTL {
3274         NOTHROW;
3275         GC_NOTRIGGER;
3276     } CONTRACTL_END;
3277
3278     CrstHolder ch(&m_CodeHeapCritSec);
3279
3280     DomainCodeHeapList **ppList = m_DomainCodeHeaps.Table();
3281     int count = m_DomainCodeHeaps.Count();
3282
3283     for (int i=0; i < count; i++) {
3284         if (ppList[i]->m_pAllocator== pAllocator) {
3285             DomainCodeHeapList *pList = ppList[i];
3286             m_DomainCodeHeaps.DeleteByIndex(i);
3287
3288             // pHeapList is allocated in pHeap, so only need to delete the LoaderHeap itself
3289             count = pList->m_CodeHeapList.Count();
3290             for (i=0; i < count; i++) {
3291                 HeapList *pHeapList = pList->m_CodeHeapList[i];
3292                 DeleteCodeHeap(pHeapList);
3293             }
3294
3295             // this is ok to do delete as anyone accessing the DomainCodeHeapList structure holds the critical section.
3296             delete pList;
3297
3298             break;
3299         }
3300     }
3301     ppList = m_DynamicDomainCodeHeaps.Table();
3302     count = m_DynamicDomainCodeHeaps.Count();
3303     for (int i=0; i < count; i++) {
3304         if (ppList[i]->m_pAllocator== pAllocator) {
3305             DomainCodeHeapList *pList = ppList[i];
3306             m_DynamicDomainCodeHeaps.DeleteByIndex(i);
3307
3308             // pHeapList is allocated in pHeap, so only need to delete the CodeHeap itself
3309             count = pList->m_CodeHeapList.Count();
3310             for (i=0; i < count; i++) {
3311                 HeapList *pHeapList = pList->m_CodeHeapList[i];
3312                 // m_DynamicDomainCodeHeaps should only contain HostCodeHeap.
3313                 RemoveFromCleanupList(static_cast<HostCodeHeap*>(pHeapList->pHeap));
3314                 DeleteCodeHeap(pHeapList);
3315             }
3316
3317             // this is ok to do delete as anyone accessing the DomainCodeHeapList structure holds the critical section.
3318             delete pList;
3319
3320             break;
3321         }
3322     }
3323
3324     ResetCodeAllocHint();
3325 }
3326
3327 EEJitManager::DomainCodeHeapList::DomainCodeHeapList()
3328 {
3329     LIMITED_METHOD_CONTRACT;
3330     m_pAllocator = NULL;
3331 }
3332
3333 EEJitManager::DomainCodeHeapList::~DomainCodeHeapList()
3334 {
3335     LIMITED_METHOD_CONTRACT;
3336 }
3337
3338 void EEJitManager::RemoveCodeHeapFromDomainList(CodeHeap *pHeap, LoaderAllocator *pAllocator)
3339 {
3340     CONTRACTL {
3341         NOTHROW;
3342         GC_NOTRIGGER;
3343         PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
3344     } CONTRACTL_END;
3345
3346     // get the AppDomain heap list for pAllocator in m_DynamicDomainCodeHeaps
3347     DomainCodeHeapList *pList = GetCodeHeapList(NULL, pAllocator, TRUE);
3348
3349     // go through the heaps and find and remove pHeap
3350     int count = pList->m_CodeHeapList.Count();
3351     for (int i = 0; i < count; i++) {
3352         HeapList *pHeapList = pList->m_CodeHeapList[i];
3353         if (pHeapList->pHeap == pHeap) {
3354             // found the heap to remove. If this is the only heap we remove the whole DomainCodeHeapList
3355             // otherwise we just remove this heap
3356             if (count == 1) {
3357                 m_DynamicDomainCodeHeaps.Delete(pList);
3358                 delete pList;
3359             }
3360             else
3361                 pList->m_CodeHeapList.Delete(i);
3362
3363             // if this heaplist is cached in the loader allocator, we must clear it
3364             if (pAllocator->m_pLastUsedDynamicCodeHeap == ((void *) pHeapList))
3365             {
3366                 pAllocator->m_pLastUsedDynamicCodeHeap = NULL;
3367             }
3368
3369             break;
3370         }
3371     }
3372 }
3373
3374 void EEJitManager::FreeCodeMemory(HostCodeHeap *pCodeHeap, void * codeStart)
3375 {
3376     CONTRACTL
3377     {
3378         NOTHROW;
3379         GC_NOTRIGGER;
3380     }
3381     CONTRACTL_END;
3382
3383     CrstHolder ch(&m_CodeHeapCritSec);
3384
3385     // FreeCodeMemory is only supported on LCG methods,
3386     // so pCodeHeap can only be a HostCodeHeap.
3387
3388     // clean up the NibbleMap
3389     NibbleMapSet(pCodeHeap->m_pHeapList, (TADDR)codeStart, FALSE);
3390
3391     // The caller of this method doesn't call HostCodeHeap->FreeMemForCode
3392     // directly because the operation should be protected by m_CodeHeapCritSec.
3393     pCodeHeap->FreeMemForCode(codeStart);
3394 }
3395
3396 void ExecutionManager::CleanupCodeHeaps()
3397 {
3398     CONTRACTL
3399     {
3400         NOTHROW;
3401         GC_NOTRIGGER;
3402     }
3403     CONTRACTL_END;
3404
3405     _ASSERTE (g_fProcessDetach || (GCHeapUtilities::IsGCInProgress()  && ::IsGCThread()));
3406
3407     GetEEJitManager()->CleanupCodeHeaps();
3408 }
3409
3410 void EEJitManager::CleanupCodeHeaps()
3411 {
3412     CONTRACTL
3413     {
3414         NOTHROW;
3415         GC_NOTRIGGER;
3416     }
3417     CONTRACTL_END;
3418
3419     _ASSERTE (g_fProcessDetach || (GCHeapUtilities::IsGCInProgress() && ::IsGCThread()));
3420
3421         // Quick out, don't even take the lock if we have not cleanup to do.
3422         // This is important because ETW takes the CodeHeapLock when it is doing
3423         // rundown, and if there are many JIT compiled methods, this can take a while.
3424         // Because cleanup is called synchronously before a GC, this means GCs get
3425         // blocked while ETW is doing rundown.   By not taking the lock we avoid
3426         // this stall most of the time since cleanup is rare, and ETW rundown is rare
3427         // the likelihood of both is very very rare.   
3428         if (m_cleanupList == NULL)
3429                 return;
3430
3431     CrstHolder ch(&m_CodeHeapCritSec);
3432
3433     if (m_cleanupList == NULL)
3434         return;
3435
3436     HostCodeHeap *pHeap = m_cleanupList;
3437     m_cleanupList = NULL;
3438
3439     while (pHeap)
3440     {
3441         HostCodeHeap *pNextHeap = pHeap->m_pNextHeapToRelease;
3442
3443         DWORD allocCount = pHeap->m_AllocationCount;
3444         if (allocCount == 0)
3445         {
3446             LOG((LF_BCL, LL_INFO100, "Level2 - Destryoing CodeHeap [0x%p, vt(0x%x)] - ref count 0\n", pHeap, *(size_t*)pHeap));
3447             RemoveCodeHeapFromDomainList(pHeap, pHeap->m_pAllocator);
3448             DeleteCodeHeap(pHeap->m_pHeapList);
3449         }
3450         else
3451         {
3452             LOG((LF_BCL, LL_INFO100, "Level2 - Restoring CodeHeap [0x%p, vt(0x%x)] - ref count %d\n", pHeap, *(size_t*)pHeap, allocCount));
3453         }
3454         pHeap = pNextHeap;
3455     }
3456 }
3457
3458 void EEJitManager::RemoveFromCleanupList(HostCodeHeap *pCodeHeap)
3459 {
3460     CONTRACTL {
3461         NOTHROW;
3462         GC_NOTRIGGER;
3463         PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
3464     } CONTRACTL_END;
3465
3466     HostCodeHeap *pHeap = m_cleanupList;
3467     HostCodeHeap *pPrevHeap = NULL;
3468     while (pHeap)
3469     {
3470         if (pHeap == pCodeHeap)
3471         {
3472             if (pPrevHeap)
3473             {
3474                 // remove current heap from list
3475                 pPrevHeap->m_pNextHeapToRelease = pHeap->m_pNextHeapToRelease;
3476             }
3477             else
3478             {
3479                 m_cleanupList = pHeap->m_pNextHeapToRelease;
3480             }
3481             break;
3482         }
3483         pPrevHeap = pHeap;
3484         pHeap = pHeap->m_pNextHeapToRelease;
3485     }
3486 }
3487
3488 void EEJitManager::AddToCleanupList(HostCodeHeap *pCodeHeap)
3489 {
3490     CONTRACTL {
3491         NOTHROW;
3492         GC_NOTRIGGER;
3493         PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
3494     } CONTRACTL_END;
3495
3496     // it may happen that the current heap count goes to 0 and later on, before it is destroyed, it gets reused 
3497     // for another dynamic method. 
3498     // It's then possible that the ref count reaches 0 multiple times. If so we simply don't add it again
3499     // Also on cleanup we check the the ref count is actually 0.
3500     HostCodeHeap *pHeap = m_cleanupList;
3501     while (pHeap)
3502     {
3503         if (pHeap == pCodeHeap)
3504         {
3505             LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p, vt(0x%x)] - Already in list\n", pCodeHeap, *(size_t*)pCodeHeap));
3506             break;
3507         }
3508         pHeap = pHeap->m_pNextHeapToRelease;
3509     }
3510     if (pHeap == NULL)
3511     {
3512         pCodeHeap->m_pNextHeapToRelease = m_cleanupList;
3513         m_cleanupList = pCodeHeap;
3514         LOG((LF_BCL, LL_INFO100, "Level2 - CodeHeap [0x%p, vt(0x%x)] - ref count %d - Adding to cleanup list\n", pCodeHeap, *(size_t*)pCodeHeap, pCodeHeap->m_AllocationCount));
3515     }
3516 }
3517
3518 void EEJitManager::DeleteCodeHeap(HeapList *pHeapList)
3519 {
3520     CONTRACTL {
3521         NOTHROW;
3522         GC_NOTRIGGER;
3523         PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
3524     } CONTRACTL_END;
3525
3526     HeapList *pHp = GetCodeHeapList();
3527     if (pHp == pHeapList)
3528         m_pCodeHeap = pHp->GetNext();
3529     else
3530     {
3531         HeapList *pHpNext = pHp->GetNext();
3532         
3533         while (pHpNext != pHeapList)
3534         {
3535             pHp = pHpNext;
3536             _ASSERTE(pHp != NULL);  // should always find the HeapList
3537             pHpNext = pHp->GetNext();
3538         }
3539         pHp->SetNext(pHeapList->GetNext());
3540     }
3541
3542     DeleteEEFunctionTable((PVOID)pHeapList);
3543
3544     ExecutionManager::DeleteRange((TADDR)pHeapList);
3545
3546     LOG((LF_JIT, LL_INFO100, "DeleteCodeHeap start" FMT_ADDR "end" FMT_ADDR "\n",
3547                               (const BYTE*)pHeapList->startAddress, 
3548                               (const BYTE*)pHeapList->endAddress     ));
3549
3550     // pHeapList is allocated in pHeap, so only need to delete the CodeHeap itself
3551     // !!! For SoC, compiler inserts code to write a special cookie at pHeapList->pHeap after delete operator, at least for debug code.
3552     // !!! Since pHeapList is deleted at the same time as pHeap, this causes AV.
3553     // delete pHeapList->pHeap;
3554     CodeHeap* pHeap = pHeapList->pHeap;
3555     delete pHeap;
3556 }
3557
3558 #endif // #ifndef DACCESS_COMPILE
3559
3560 static CodeHeader * GetCodeHeaderFromDebugInfoRequest(const DebugInfoRequest & request)
3561 {
3562     CONTRACTL {
3563         NOTHROW;
3564         GC_NOTRIGGER;
3565         SUPPORTS_DAC;
3566     } CONTRACTL_END;
3567     
3568     TADDR address = (TADDR) request.GetStartAddress();
3569     _ASSERTE(address != NULL);
3570
3571     CodeHeader * pHeader = dac_cast<PTR_CodeHeader>(address & ~3) - 1;
3572     _ASSERTE(pHeader != NULL);    
3573
3574     return pHeader;
3575 }
3576
3577 //-----------------------------------------------------------------------------
3578 // Get vars from Jit Store
3579 //-----------------------------------------------------------------------------
3580 BOOL EEJitManager::GetBoundariesAndVars(
3581         const DebugInfoRequest & request,
3582         IN FP_IDS_NEW fpNew, IN void * pNewData,
3583         OUT ULONG32 * pcMap, 
3584         OUT ICorDebugInfo::OffsetMapping **ppMap,
3585         OUT ULONG32 * pcVars, 
3586         OUT ICorDebugInfo::NativeVarInfo **ppVars)
3587 {
3588     CONTRACTL {
3589         THROWS;       // on OOM.
3590         GC_NOTRIGGER; // getting vars shouldn't trigger
3591         SUPPORTS_DAC;
3592     } CONTRACTL_END;
3593
3594     CodeHeader * pHdr = GetCodeHeaderFromDebugInfoRequest(request);
3595     _ASSERTE(pHdr != NULL);
3596
3597     PTR_BYTE pDebugInfo = pHdr->GetDebugInfo();
3598     
3599     // No header created, which means no jit information is available.
3600     if (pDebugInfo == NULL)
3601         return FALSE;
3602
3603     // Uncompress. This allocates memory and may throw.
3604     CompressDebugInfo::RestoreBoundariesAndVars(
3605         fpNew, pNewData, // allocators
3606         pDebugInfo,      // input
3607         pcMap, ppMap,
3608         pcVars, ppVars); // output
3609
3610     return TRUE;
3611 }
3612
3613 #ifdef DACCESS_COMPILE
3614 void CodeHeader::EnumMemoryRegions(CLRDataEnumMemoryFlags flags, IJitManager* pJitMan)
3615 {
3616     CONTRACTL
3617     {
3618         NOTHROW;
3619         GC_NOTRIGGER;
3620         SUPPORTS_DAC;
3621     }
3622     CONTRACTL_END;    
3623
3624     DAC_ENUM_DTHIS();
3625     
3626 #ifdef USE_INDIRECT_CODEHEADER
3627     this->pRealCodeHeader.EnumMem();
3628 #endif // USE_INDIRECT_CODEHEADER
3629
3630     if (this->GetDebugInfo() != NULL)
3631     {
3632         CompressDebugInfo::EnumMemoryRegions(flags, this->GetDebugInfo());
3633     }
3634 }
3635
3636 //-----------------------------------------------------------------------------
3637 // Enumerate for minidumps.
3638 //-----------------------------------------------------------------------------
3639 void EEJitManager::EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD)
3640 {
3641     CONTRACTL
3642     {
3643         NOTHROW;
3644         GC_NOTRIGGER;
3645         SUPPORTS_DAC;
3646     }
3647     CONTRACTL_END;
3648
3649     DebugInfoRequest request;
3650     PCODE addrCode = pMD->GetNativeCode();
3651     request.InitFromStartingAddr(pMD, addrCode);
3652
3653     CodeHeader * pHeader = GetCodeHeaderFromDebugInfoRequest(request);
3654
3655     pHeader->EnumMemoryRegions(flags, NULL);
3656 }
3657 #endif // DACCESS_COMPILE
3658
3659 PCODE EEJitManager::GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset)
3660 {
3661     WRAPPER_NO_CONTRACT;
3662
3663     CodeHeader * pHeader = GetCodeHeader(MethodToken);
3664     return pHeader->GetCodeStartAddress() + relOffset;
3665 }
3666
3667 BOOL EEJitManager::JitCodeToMethodInfo(
3668         RangeSection * pRangeSection,
3669         PCODE currentPC,
3670         MethodDesc ** ppMethodDesc,
3671         EECodeInfo * pCodeInfo)
3672 {
3673     CONTRACTL {
3674         NOTHROW;
3675         GC_NOTRIGGER;
3676         SO_TOLERANT;
3677         SUPPORTS_DAC;
3678     } CONTRACTL_END;
3679
3680     _ASSERTE(pRangeSection != NULL);
3681
3682     TADDR start = dac_cast<PTR_EEJitManager>(pRangeSection->pjit)->FindMethodCode(pRangeSection, currentPC);
3683     if (start == NULL)
3684         return FALSE;
3685
3686     CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader));
3687     if (pCHdr->IsStubCodeBlock())
3688         return FALSE;
3689
3690     _ASSERTE(pCHdr->GetMethodDesc()->SanityCheck());
3691
3692     if (pCodeInfo)
3693     {
3694         pCodeInfo->m_methodToken = METHODTOKEN(pRangeSection, dac_cast<TADDR>(pCHdr));
3695
3696         // This can be counted on for Jitted code. For NGEN code in the case
3697         // where we have hot/cold splitting this isn't valid and we need to
3698         // take into account cold code.
3699         pCodeInfo->m_relOffset = (DWORD)(PCODEToPINSTR(currentPC) - pCHdr->GetCodeStartAddress());
3700
3701 #ifdef WIN64EXCEPTIONS
3702         // Computed lazily by code:EEJitManager::LazyGetFunctionEntry
3703         pCodeInfo->m_pFunctionEntry = NULL;
3704 #endif
3705     }
3706
3707     if (ppMethodDesc)
3708     {
3709         *ppMethodDesc = pCHdr->GetMethodDesc();
3710     }
3711     return TRUE;
3712 }
3713
3714 StubCodeBlockKind EEJitManager::GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC)
3715 {
3716     CONTRACTL {
3717         NOTHROW;
3718         GC_NOTRIGGER;
3719         SO_TOLERANT;
3720         SUPPORTS_DAC;
3721     } CONTRACTL_END;
3722
3723     TADDR start = dac_cast<PTR_EEJitManager>(pRangeSection->pjit)->FindMethodCode(pRangeSection, currentPC);
3724     if (start == NULL)
3725         return STUB_CODE_BLOCK_NOCODE;
3726     CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader));
3727     return pCHdr->IsStubCodeBlock() ? pCHdr->GetStubCodeBlockKind() : STUB_CODE_BLOCK_MANAGED;
3728 }
3729
3730 TADDR EEJitManager::FindMethodCode(PCODE currentPC)
3731 {
3732     CONTRACTL {
3733         NOTHROW;
3734         GC_NOTRIGGER;
3735         SO_TOLERANT;
3736         SUPPORTS_DAC;
3737     } CONTRACTL_END;
3738
3739     RangeSection * pRS = ExecutionManager::FindCodeRange(currentPC, ExecutionManager::GetScanFlags());
3740     if (pRS == NULL || (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP) == 0)
3741         return STUB_CODE_BLOCK_NOCODE;
3742     return dac_cast<PTR_EEJitManager>(pRS->pjit)->FindMethodCode(pRS, currentPC);
3743 }
3744
3745 // Finds the header corresponding to the code at offset "delta".
3746 // Returns NULL if there is no header for the given "delta"
3747
3748 TADDR EEJitManager::FindMethodCode(RangeSection * pRangeSection, PCODE currentPC)
3749 {
3750     LIMITED_METHOD_DAC_CONTRACT;
3751
3752     _ASSERTE(pRangeSection != NULL);
3753
3754     HeapList *pHp = dac_cast<PTR_HeapList>(pRangeSection->pHeapListOrZapModule);
3755
3756     if ((currentPC < pHp->startAddress) ||
3757         (currentPC > pHp->endAddress))
3758     {
3759         return NULL;
3760     }
3761
3762     TADDR base = pHp->mapBase;
3763     TADDR delta = currentPC - base;
3764     PTR_DWORD pMap = pHp->pHdrMap;
3765     PTR_DWORD pMapStart = pMap;
3766
3767     DWORD tmp;
3768
3769     size_t startPos = ADDR2POS(delta);  // align to 32byte buckets
3770                                         // ( == index into the array of nibbles)
3771     DWORD  offset   = ADDR2OFFS(delta); // this is the offset inside the bucket + 1
3772
3773     _ASSERTE(offset == (offset & NIBBLE_MASK));
3774
3775     pMap += (startPos >> LOG2_NIBBLES_PER_DWORD); // points to the proper DWORD of the map
3776
3777     // get DWORD and shift down our nibble
3778
3779     PREFIX_ASSUME(pMap != NULL);
3780     tmp = VolatileLoadWithoutBarrier<DWORD>(pMap) >> POS2SHIFTCOUNT(startPos);
3781
3782     if ((tmp & NIBBLE_MASK) && ((tmp & NIBBLE_MASK) <= offset) )
3783     {
3784         return base + POSOFF2ADDR(startPos, tmp & NIBBLE_MASK);
3785     }
3786
3787     // Is there a header in the remainder of the DWORD ?
3788     tmp = tmp >> NIBBLE_SIZE;
3789
3790     if (tmp)
3791     {
3792         startPos--;
3793         while (!(tmp & NIBBLE_MASK))
3794         {
3795             tmp = tmp >> NIBBLE_SIZE;
3796             startPos--;
3797         }
3798         return base + POSOFF2ADDR(startPos, tmp & NIBBLE_MASK);
3799     }
3800
3801     // We skipped the remainder of the DWORD,
3802     // so we must set startPos to the highest position of
3803     // previous DWORD, unless we are already on the first DWORD
3804
3805     if (startPos < NIBBLES_PER_DWORD)
3806         return NULL;
3807
3808     startPos = ((startPos >> LOG2_NIBBLES_PER_DWORD) << LOG2_NIBBLES_PER_DWORD) - 1;
3809
3810     // Skip "headerless" DWORDS
3811
3812     while (pMapStart < pMap && 0 == (tmp = VolatileLoadWithoutBarrier<DWORD>(--pMap)))
3813     {
3814         startPos -= NIBBLES_PER_DWORD;
3815     }
3816
3817     // This helps to catch degenerate error cases. This relies on the fact that
3818     // startPos cannot ever be bigger than MAX_UINT
3819     if (((INT_PTR)startPos) < 0)
3820         return NULL;
3821
3822     // Find the nibble with the header in the DWORD
3823
3824     while (startPos && !(tmp & NIBBLE_MASK))
3825     {
3826         tmp = tmp >> NIBBLE_SIZE;
3827         startPos--;
3828     }
3829
3830     if (startPos == 0 && tmp == 0)
3831         return NULL;
3832
3833     return base + POSOFF2ADDR(startPos, tmp & NIBBLE_MASK);
3834 }
3835
3836 #if !defined(DACCESS_COMPILE)
3837 void EEJitManager::NibbleMapSet(HeapList * pHp, TADDR pCode, BOOL bSet) 
3838 {
3839     CONTRACTL {
3840         NOTHROW;
3841         GC_NOTRIGGER;
3842     } CONTRACTL_END;
3843
3844     // Currently all callers to this method ensure EEJitManager::m_CodeHeapCritSec
3845     // is held.
3846     _ASSERTE(m_CodeHeapCritSec.OwnedByCurrentThread());
3847
3848     _ASSERTE(pCode >= pHp->mapBase);
3849
3850     size_t delta = pCode - pHp->mapBase;
3851
3852     size_t pos  = ADDR2POS(delta); 
3853     DWORD value = bSet?ADDR2OFFS(delta):0;
3854
3855     DWORD index = (DWORD) (pos >> LOG2_NIBBLES_PER_DWORD);
3856     DWORD mask  = ~((DWORD) HIGHEST_NIBBLE_MASK >> ((pos & NIBBLES_PER_DWORD_MASK) << LOG2_NIBBLE_SIZE));
3857
3858     value = value << POS2SHIFTCOUNT(pos);
3859
3860     PTR_DWORD pMap = pHp->pHdrMap;
3861
3862     // assert that we don't overwrite an existing offset
3863     // (it's a reset or it is empty)
3864     _ASSERTE(!value || !((*(pMap+index))& ~mask));
3865
3866     // It is important for this update to be atomic. Synchronization would be required with FindMethodCode otherwise.
3867     *(pMap+index) = ((*(pMap+index))&mask)|value;
3868
3869     pHp->cBlocks += (bSet ? 1 : -1);
3870 }
3871 #endif // !DACCESS_COMPILE
3872
3873 #if defined(WIN64EXCEPTIONS)
3874 PTR_RUNTIME_FUNCTION EEJitManager::LazyGetFunctionEntry(EECodeInfo * pCodeInfo)
3875 {
3876     CONTRACTL {
3877         NOTHROW;
3878         GC_NOTRIGGER;
3879         SO_TOLERANT;
3880         SUPPORTS_DAC;
3881     } CONTRACTL_END;
3882
3883     if (!pCodeInfo->IsValid())
3884     {
3885         return NULL;
3886     }
3887
3888     CodeHeader * pHeader = GetCodeHeader(pCodeInfo->GetMethodToken());
3889
3890     DWORD address = RUNTIME_FUNCTION__BeginAddress(pHeader->GetUnwindInfo(0)) + pCodeInfo->GetRelOffset();
3891
3892     // We need the module base address to calculate the end address of a function from the functionEntry.
3893     // Thus, save it off right now.
3894     TADDR baseAddress = pCodeInfo->GetModuleBase();
3895
3896     // NOTE: We could binary search here, if it would be helpful (e.g., large number of funclets)
3897     for (UINT iUnwindInfo = 0; iUnwindInfo < pHeader->GetNumberOfUnwindInfos(); iUnwindInfo++)
3898     {
3899         PTR_RUNTIME_FUNCTION pFunctionEntry = pHeader->GetUnwindInfo(iUnwindInfo);
3900
3901         if (RUNTIME_FUNCTION__BeginAddress(pFunctionEntry) <= address && address < RUNTIME_FUNCTION__EndAddress(pFunctionEntry, baseAddress))
3902         {
3903             return pFunctionEntry;
3904         }
3905     }
3906
3907     return NULL;
3908 }
3909
3910 DWORD EEJitManager::GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength)
3911 {
3912     CONTRACTL
3913     {
3914         NOTHROW;
3915         GC_NOTRIGGER;
3916     }
3917     CONTRACTL_END;
3918
3919     CodeHeader * pCH = GetCodeHeader(MethodToken);
3920     TADDR moduleBase = JitTokenToModuleBase(MethodToken);
3921
3922     _ASSERTE(pCH->GetNumberOfUnwindInfos() >= 1);
3923
3924     DWORD parentBeginRva = RUNTIME_FUNCTION__BeginAddress(pCH->GetUnwindInfo(0));
3925
3926     DWORD nFunclets = 0;
3927     for (COUNT_T iUnwindInfo = 1; iUnwindInfo < pCH->GetNumberOfUnwindInfos(); iUnwindInfo++)
3928     {
3929         PTR_RUNTIME_FUNCTION pFunctionEntry = pCH->GetUnwindInfo(iUnwindInfo);
3930
3931 #if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS)
3932         if (IsFunctionFragment(moduleBase, pFunctionEntry))
3933         {
3934             // This is a fragment (not the funclet beginning); skip it
3935             continue;
3936         }
3937 #endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS
3938
3939         DWORD funcletBeginRva = RUNTIME_FUNCTION__BeginAddress(pFunctionEntry);
3940         DWORD relParentOffsetToFunclet = funcletBeginRva - parentBeginRva;
3941
3942         if (nFunclets < dwLength)
3943             pStartFuncletOffsets[nFunclets] = relParentOffsetToFunclet;
3944         nFunclets++;
3945     }
3946
3947     return nFunclets;
3948 }
3949
3950 #if defined(DACCESS_COMPILE)
3951 // This function is basically like RtlLookupFunctionEntry(), except that it works with DAC 
3952 // to read the function entries out of process.  Also, it can only look up function entries
3953 // inside mscorwks.dll, since DAC doesn't know anything about other unmanaged dll's.
3954 void GetUnmanagedStackWalkInfo(IN  ULONG64   ControlPc,
3955                                OUT UINT_PTR* pModuleBase,
3956                                OUT UINT_PTR* pFuncEntry)
3957 {
3958     WRAPPER_NO_CONTRACT;
3959
3960     if (pModuleBase)
3961     {
3962         *pModuleBase = NULL;
3963     }
3964
3965     if (pFuncEntry)
3966     {
3967         *pFuncEntry = NULL;
3968     }
3969
3970     PEDecoder peDecoder(DacGlobalBase());
3971
3972     SIZE_T baseAddr = dac_cast<TADDR>(peDecoder.GetBase());
3973     SIZE_T cbSize   = (SIZE_T)peDecoder.GetVirtualSize();
3974
3975     // Check if the control PC is inside mscorwks.
3976     if ( (baseAddr <= ControlPc) && 
3977          (ControlPc < (baseAddr + cbSize))
3978        )
3979     {
3980         if (pModuleBase)
3981         {
3982             *pModuleBase = baseAddr;
3983         }
3984
3985         if (pFuncEntry)
3986         {
3987             // Check if there is a static function table.
3988             COUNT_T cbSize = 0;
3989             TADDR   pExceptionDir = peDecoder.GetDirectoryEntryData(IMAGE_DIRECTORY_ENTRY_EXCEPTION, &cbSize);
3990
3991             if (pExceptionDir != NULL)
3992             {
3993                 // Do a binary search on the static function table of mscorwks.dll.
3994                 HRESULT hr = E_FAIL;
3995                 TADDR   taFuncEntry;
3996                 T_RUNTIME_FUNCTION functionEntry;
3997
3998                 DWORD dwLow  = 0;
3999                 DWORD dwHigh = cbSize / sizeof(T_RUNTIME_FUNCTION);
4000                 DWORD dwMid  = 0;
4001
4002                 while (dwLow <= dwHigh)
4003                 {
4004                     dwMid = (dwLow + dwHigh) >> 1;
4005                     taFuncEntry = pExceptionDir + dwMid * sizeof(T_RUNTIME_FUNCTION);
4006                     hr = DacReadAll(taFuncEntry, &functionEntry, sizeof(functionEntry), false);
4007                     if (FAILED(hr))
4008                     {
4009                         return;
4010                     }
4011
4012                     if (ControlPc < baseAddr + functionEntry.BeginAddress)
4013                     {
4014                         dwHigh = dwMid - 1;
4015                     }
4016                     else if (ControlPc >= baseAddr + RUNTIME_FUNCTION__EndAddress(&functionEntry, baseAddr))
4017                     {
4018                         dwLow = dwMid + 1;
4019                     }
4020                     else
4021                     {
4022                         _ASSERTE(pFuncEntry);
4023                         *pFuncEntry = (UINT_PTR)(T_RUNTIME_FUNCTION*)PTR_RUNTIME_FUNCTION(taFuncEntry);
4024                         break;
4025                     }
4026                 }
4027
4028                 if (dwLow > dwHigh)
4029                 {
4030                     _ASSERTE(*pFuncEntry == NULL);
4031                 }
4032             }
4033         }
4034     }
4035 }
4036 #endif // DACCESS_COMPILE
4037
4038 extern "C" void GetRuntimeStackWalkInfo(IN  ULONG64   ControlPc,
4039                                         OUT UINT_PTR* pModuleBase,
4040                                         OUT UINT_PTR* pFuncEntry)
4041 {
4042
4043     WRAPPER_NO_CONTRACT;
4044
4045     BEGIN_PRESERVE_LAST_ERROR;
4046
4047     BEGIN_ENTRYPOINT_VOIDRET;
4048
4049     if (pModuleBase)
4050         *pModuleBase = NULL;
4051     if (pFuncEntry)
4052         *pFuncEntry = NULL;
4053
4054     EECodeInfo codeInfo((PCODE)ControlPc);
4055     if (!codeInfo.IsValid())
4056     {
4057 #if defined(DACCESS_COMPILE)
4058         GetUnmanagedStackWalkInfo(ControlPc, pModuleBase, pFuncEntry);
4059 #endif // DACCESS_COMPILE
4060         goto Exit;
4061     }
4062
4063     if (pModuleBase)
4064     {
4065         *pModuleBase = (UINT_PTR)codeInfo.GetModuleBase();
4066     }
4067
4068     if (pFuncEntry)
4069     {
4070         *pFuncEntry = (UINT_PTR)(PT_RUNTIME_FUNCTION)codeInfo.GetFunctionEntry();
4071     }
4072
4073 Exit:
4074     END_ENTRYPOINT_VOIDRET;
4075
4076     END_PRESERVE_LAST_ERROR;
4077 }
4078 #endif // WIN64EXCEPTIONS
4079
4080 #ifdef DACCESS_COMPILE
4081
4082 void EEJitManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
4083 {
4084     IJitManager::EnumMemoryRegions(flags);
4085
4086     //
4087     // Save all of the code heaps.
4088     //
4089
4090     HeapList* heap;
4091
4092     for (heap = m_pCodeHeap; heap; heap = heap->GetNext())
4093     {
4094         DacEnumHostDPtrMem(heap);
4095
4096         if (heap->pHeap.IsValid())
4097         {
4098             heap->pHeap->EnumMemoryRegions(flags);
4099         }
4100
4101         DacEnumMemoryRegion(heap->startAddress, (ULONG32)
4102                             (heap->endAddress - heap->startAddress));
4103
4104         if (heap->pHdrMap.IsValid())
4105         {
4106             ULONG32 nibbleMapSize = (ULONG32)
4107                 HEAP2MAPSIZE(ROUND_UP_TO_PAGE(heap->maxCodeHeapSize));
4108             DacEnumMemoryRegion(dac_cast<TADDR>(heap->pHdrMap), nibbleMapSize);
4109         }
4110     }
4111 }
4112 #endif // #ifdef DACCESS_COMPILE
4113
4114 #else // CROSSGEN_COMPILE
4115 // stub for compilation
4116 BOOL EEJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection,
4117     PCODE currentPC,
4118     MethodDesc ** ppMethodDesc,
4119     EECodeInfo * pCodeInfo)
4120 {
4121     _ASSERTE(FALSE);
4122     return FALSE;
4123 }
4124 #endif // !CROSSGEN_COMPILE
4125
4126
4127 #ifndef DACCESS_COMPILE
4128
4129 //*******************************************************
4130 // Execution Manager
4131 //*******************************************************
4132
4133 // Init statics
4134 void ExecutionManager::Init()
4135 {
4136     CONTRACTL {
4137         THROWS;
4138         GC_NOTRIGGER;
4139     } CONTRACTL_END;
4140
4141     m_JumpStubCrst.Init(CrstJumpStubCache, CrstFlags(CRST_UNSAFE_ANYMODE|CRST_DEBUGGER_THREAD));
4142
4143     m_RangeCrst.Init(CrstExecuteManRangeLock, CRST_UNSAFE_ANYMODE);
4144
4145     m_pDefaultCodeMan = new EECodeManager();
4146
4147 #ifndef CROSSGEN_COMPILE
4148     m_pEEJitManager = new EEJitManager();
4149 #endif
4150 #ifdef FEATURE_PREJIT
4151     m_pNativeImageJitManager = new NativeImageJitManager();
4152 #endif
4153
4154 #ifdef FEATURE_READYTORUN
4155     m_pReadyToRunJitManager = new ReadyToRunJitManager();
4156 #endif
4157 }
4158
4159 #endif // #ifndef DACCESS_COMPILE
4160
4161 //**************************************************************************
4162 RangeSection * 
4163 ExecutionManager::FindCodeRange(PCODE currentPC, ScanFlag scanFlag)
4164 {
4165     CONTRACTL {
4166         NOTHROW;
4167         GC_NOTRIGGER;
4168         SO_TOLERANT;
4169         SUPPORTS_DAC;
4170     } CONTRACTL_END;
4171
4172     if (currentPC == NULL)
4173         return NULL;
4174
4175     if (scanFlag == ScanReaderLock)
4176         return FindCodeRangeWithLock(currentPC);
4177
4178     return GetRangeSection(currentPC);
4179 }
4180
4181 //**************************************************************************
4182 NOINLINE // Make sure that the slow path with lock won't affect the fast path
4183 RangeSection *
4184 ExecutionManager::FindCodeRangeWithLock(PCODE currentPC)
4185 {
4186     CONTRACTL {
4187         NOTHROW;
4188         GC_NOTRIGGER;
4189         SO_TOLERANT;
4190         SUPPORTS_DAC;
4191     } CONTRACTL_END;
4192
4193     ReaderLockHolder rlh;
4194     return GetRangeSection(currentPC);
4195 }
4196
4197 //**************************************************************************
4198 MethodDesc * ExecutionManager::GetCodeMethodDesc(PCODE currentPC)
4199 {
4200     CONTRACTL
4201     {
4202         NOTHROW;
4203         GC_NOTRIGGER;
4204         FORBID_FAULT;
4205         SO_TOLERANT;
4206     }
4207     CONTRACTL_END
4208
4209     EECodeInfo codeInfo(currentPC);
4210     if (!codeInfo.IsValid())
4211         return NULL;
4212     return codeInfo.GetMethodDesc();
4213 }
4214
4215 //**************************************************************************
4216 BOOL ExecutionManager::IsManagedCode(PCODE currentPC)
4217 {
4218     CONTRACTL {
4219         NOTHROW;
4220         GC_NOTRIGGER;
4221         SO_TOLERANT;
4222     } CONTRACTL_END;
4223
4224     if (currentPC == NULL)
4225         return FALSE;
4226
4227     if (GetScanFlags() == ScanReaderLock) 
4228         return IsManagedCodeWithLock(currentPC);
4229
4230     return IsManagedCodeWorker(currentPC);
4231 }
4232
4233 //**************************************************************************
4234 NOINLINE // Make sure that the slow path with lock won't affect the fast path
4235 BOOL ExecutionManager::IsManagedCodeWithLock(PCODE currentPC)
4236 {
4237     CONTRACTL {
4238         NOTHROW;
4239         GC_NOTRIGGER;
4240         SO_TOLERANT;
4241     } CONTRACTL_END;
4242
4243     ReaderLockHolder rlh;
4244     return IsManagedCodeWorker(currentPC);
4245 }
4246
4247 //**************************************************************************
4248 BOOL ExecutionManager::IsManagedCode(PCODE currentPC, HostCallPreference hostCallPreference /*=AllowHostCalls*/, BOOL *pfFailedReaderLock /*=NULL*/)
4249 {
4250     CONTRACTL {
4251         NOTHROW;
4252         GC_NOTRIGGER;
4253         SO_TOLERANT;
4254     } CONTRACTL_END;
4255
4256 #ifdef DACCESS_COMPILE
4257     return IsManagedCode(currentPC);
4258 #else
4259     if (hostCallPreference == AllowHostCalls)
4260     {
4261         return IsManagedCode(currentPC);
4262     }
4263
4264     ReaderLockHolder rlh(hostCallPreference);
4265     if (!rlh.Acquired())
4266     {
4267         _ASSERTE(pfFailedReaderLock != NULL);
4268         *pfFailedReaderLock = TRUE;
4269         return FALSE;
4270     }
4271
4272     return IsManagedCodeWorker(currentPC);
4273 #endif
4274 }
4275
4276 //**************************************************************************
4277 // Assumes that the ExecutionManager reader/writer lock is taken or that 
4278 // it is safe not to take it.
4279 BOOL ExecutionManager::IsManagedCodeWorker(PCODE currentPC)
4280 {
4281     CONTRACTL {
4282         NOTHROW;
4283         GC_NOTRIGGER;
4284         SO_TOLERANT;
4285     } CONTRACTL_END;
4286
4287     // This may get called for arbitrary code addresses. Note that the lock is
4288     // taken over the call to JitCodeToMethodInfo too so that nobody pulls out 
4289     // the range section from underneath us.
4290
4291     RangeSection * pRS = GetRangeSection(currentPC);
4292     if (pRS == NULL)
4293         return FALSE;
4294
4295     if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP)
4296     {
4297 #ifndef CROSSGEN_COMPILE
4298         // Typically if we find a Jit Manager we are inside a managed method
4299         // but on we could also be in a stub, so we check for that 
4300         // as well and we don't consider stub to be real managed code.
4301         TADDR start = dac_cast<PTR_EEJitManager>(pRS->pjit)->FindMethodCode(pRS, currentPC);
4302         if (start == NULL)
4303             return FALSE;
4304         CodeHeader * pCHdr = PTR_CodeHeader(start - sizeof(CodeHeader));
4305         if (!pCHdr->IsStubCodeBlock())
4306             return TRUE;
4307 #endif
4308     }
4309 #ifdef FEATURE_READYTORUN
4310     else
4311     if (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN)
4312     {
4313         if (dac_cast<PTR_ReadyToRunJitManager>(pRS->pjit)->JitCodeToMethodInfo(pRS, currentPC, NULL, NULL))
4314             return TRUE;
4315     }
4316 #endif
4317     else
4318     {
4319 #ifdef FEATURE_PREJIT
4320         // Check that we are in the range with true managed code. We don't
4321         // consider jump stubs or precodes to be real managed code.
4322
4323         Module * pModule = dac_cast<PTR_Module>(pRS->pHeapListOrZapModule);
4324
4325         NGenLayoutInfo * pLayoutInfo = pModule->GetNGenLayoutInfo();
4326
4327         if (pLayoutInfo->m_CodeSections[0].IsInRange(currentPC) ||
4328             pLayoutInfo->m_CodeSections[1].IsInRange(currentPC) ||
4329             pLayoutInfo->m_CodeSections[2].IsInRange(currentPC))
4330         {
4331             return TRUE;
4332         }
4333 #endif
4334     }
4335
4336     return FALSE;
4337 }
4338
4339 #ifndef DACCESS_COMPILE
4340
4341 //**************************************************************************
4342 // Clear the caches for all JITs loaded.
4343 //
4344 void ExecutionManager::ClearCaches( void )
4345 {
4346     CONTRACTL {
4347         NOTHROW;
4348         GC_NOTRIGGER;
4349     } CONTRACTL_END;
4350
4351     GetEEJitManager()->ClearCache();
4352 }
4353
4354 //**************************************************************************
4355 // Check if caches for any JITs loaded need to be cleaned
4356 //
4357 BOOL ExecutionManager::IsCacheCleanupRequired( void )
4358 {
4359     CONTRACTL {
4360         NOTHROW;
4361         GC_NOTRIGGER;
4362     } CONTRACTL_END;
4363
4364     return GetEEJitManager()->IsCacheCleanupRequired();
4365 }
4366
4367 #ifndef FEATURE_MERGE_JIT_AND_ENGINE
4368 /*********************************************************************/
4369 // This static method returns the name of the jit dll
4370 //
4371 LPCWSTR ExecutionManager::GetJitName()
4372 {
4373     STANDARD_VM_CONTRACT;
4374
4375     LPCWSTR  pwzJitName = NULL;
4376
4377 #if !defined(CROSSGEN_COMPILE)
4378     if (g_CLRJITPath != nullptr)
4379     {
4380         const wchar_t* p = wcsrchr(g_CLRJITPath, DIRECTORY_SEPARATOR_CHAR_W);
4381         if (p != nullptr)
4382         {
4383             pwzJitName = p + 1; // Return just the filename, not the directory name
4384         }
4385         else
4386         {
4387             pwzJitName = g_CLRJITPath;
4388         }
4389     }
4390 #endif // !defined(CROSSGEN_COMPILE)
4391     
4392     if (NULL == pwzJitName)
4393     {
4394         pwzJitName = MAKEDLLNAME_W(W("clrjit"));
4395     }
4396
4397     return pwzJitName;
4398 }
4399 #endif // !FEATURE_MERGE_JIT_AND_ENGINE
4400
4401 #endif // #ifndef DACCESS_COMPILE
4402
4403 RangeSection* ExecutionManager::GetRangeSection(TADDR addr)
4404 {
4405     CONTRACTL {
4406         NOTHROW;
4407         HOST_NOCALLS;
4408         GC_NOTRIGGER;
4409         SO_TOLERANT;
4410         SUPPORTS_DAC;
4411     } CONTRACTL_END;
4412
4413     RangeSection * pHead = m_CodeRangeList;
4414
4415     if (pHead == NULL)
4416     {
4417         return NULL;
4418     }
4419
4420     RangeSection *pCurr = pHead;
4421     RangeSection *pLast = NULL;
4422
4423 #ifndef DACCESS_COMPILE
4424     RangeSection *pLastUsedRS = (pCurr != NULL) ? pCurr->pLastUsed : NULL;
4425
4426     if (pLastUsedRS != NULL)
4427     {
4428         // positive case
4429         if ((addr >= pLastUsedRS->LowAddress) &&
4430             (addr <  pLastUsedRS->HighAddress)   )
4431         {
4432             return pLastUsedRS;
4433         }
4434
4435         RangeSection * pNextAfterLastUsedRS = pLastUsedRS->pnext;
4436
4437         // negative case
4438         if ((addr <  pLastUsedRS->LowAddress) &&
4439             (pNextAfterLastUsedRS == NULL || addr >= pNextAfterLastUsedRS->HighAddress))
4440         {
4441             return NULL;
4442         }
4443     }
4444 #endif
4445
4446     while (pCurr != NULL)
4447     {
4448         // See if addr is in [pCurr->LowAddress .. pCurr->HighAddress)
4449         if (pCurr->LowAddress <= addr)
4450         {
4451             // Since we are sorted, once pCurr->HighAddress is less than addr
4452             // then all subsequence ones will also be lower, so we are done.
4453             if (addr >= pCurr->HighAddress)
4454             {
4455                 // we'll return NULL and put pLast into pLastUsed
4456                 pCurr = NULL;
4457             }
4458             else
4459             {
4460                 // addr must be in [pCurr->LowAddress .. pCurr->HighAddress)
4461                 _ASSERTE((pCurr->LowAddress <= addr) && (addr < pCurr->HighAddress));
4462
4463                 // Found the matching RangeSection
4464                 // we'll return pCurr and put it into pLastUsed
4465                 pLast = pCurr;
4466             }
4467
4468             break;
4469         }
4470         pLast = pCurr;
4471         pCurr = pCurr->pnext;
4472     }
4473
4474 #ifndef DACCESS_COMPILE
4475     // Cache pCurr as pLastUsed in the head node
4476     // Unless we are on an MP system with many cpus
4477     // where this sort of caching actually diminishes scaling during server GC
4478     // due to many processors writing to a common location
4479     if (g_SystemInfo.dwNumberOfProcessors < 4 || !GCHeapUtilities::IsServerHeap() || !GCHeapUtilities::IsGCInProgress())
4480         pHead->pLastUsed = pLast;
4481 #endif
4482
4483     return pCurr;
4484 }
4485
4486 RangeSection* ExecutionManager::GetRangeSectionAndPrev(RangeSection *pHead, TADDR addr, RangeSection** ppPrev)
4487 {
4488     WRAPPER_NO_CONTRACT;
4489
4490     RangeSection *pCurr;
4491     RangeSection *pPrev;
4492     RangeSection *result = NULL;
4493
4494     for (pPrev = NULL,  pCurr = pHead; 
4495          pCurr != NULL; 
4496          pPrev = pCurr, pCurr = pCurr->pnext)
4497     {
4498         // See if addr is in [pCurr->LowAddress .. pCurr->HighAddress)
4499         if (pCurr->LowAddress > addr)
4500             continue;
4501
4502         if (addr >= pCurr->HighAddress)
4503             break;
4504         
4505         // addr must be in [pCurr->LowAddress .. pCurr->HighAddress)
4506         _ASSERTE((pCurr->LowAddress <= addr) && (addr < pCurr->HighAddress));
4507     
4508         // Found the matching RangeSection
4509         result = pCurr;
4510
4511         // Write back pPrev to ppPrev if it is non-null
4512         if (ppPrev != NULL)
4513             *ppPrev = pPrev;
4514
4515         break;
4516     }
4517     
4518     // If we failed to find a match write NULL to ppPrev if it is non-null
4519     if ((ppPrev != NULL) && (result == NULL))
4520     {
4521         *ppPrev = NULL;
4522     }
4523
4524     return result;
4525 }
4526
4527 /* static */
4528 PTR_Module ExecutionManager::FindZapModule(TADDR currentData)
4529 {
4530     CONTRACTL
4531     {
4532         NOTHROW;
4533         GC_NOTRIGGER;
4534         SO_TOLERANT;
4535         MODE_ANY;
4536         STATIC_CONTRACT_HOST_CALLS;
4537         SUPPORTS_DAC;
4538     }
4539     CONTRACTL_END;
4540
4541     ReaderLockHolder rlh;
4542
4543     RangeSection * pRS = GetRangeSection(currentData);
4544     if (pRS == NULL)
4545         return NULL;
4546
4547     if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP)
4548         return NULL;
4549
4550 #ifdef FEATURE_READYTORUN
4551     if (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN)
4552         return NULL;
4553 #endif
4554
4555     return dac_cast<PTR_Module>(pRS->pHeapListOrZapModule);
4556 }
4557
4558 /* static */
4559 PTR_Module ExecutionManager::FindReadyToRunModule(TADDR currentData)
4560 {
4561     CONTRACTL
4562     {
4563         NOTHROW;
4564         GC_NOTRIGGER;
4565         SO_TOLERANT;
4566         MODE_ANY;
4567         STATIC_CONTRACT_HOST_CALLS;
4568         SUPPORTS_DAC;
4569     }
4570     CONTRACTL_END;
4571
4572 #ifdef FEATURE_READYTORUN
4573     ReaderLockHolder rlh;
4574
4575     RangeSection * pRS = GetRangeSection(currentData);
4576     if (pRS == NULL)
4577         return NULL;
4578
4579     if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP)
4580         return NULL;
4581
4582     if (pRS->flags & RangeSection::RANGE_SECTION_READYTORUN)
4583         return dac_cast<PTR_Module>(pRS->pHeapListOrZapModule);;
4584
4585     return NULL;
4586 #else
4587     return NULL;
4588 #endif
4589 }
4590
4591
4592 /* static */
4593 PTR_Module ExecutionManager::FindModuleForGCRefMap(TADDR currentData)
4594 {
4595     CONTRACTL
4596     {
4597         NOTHROW;
4598         GC_NOTRIGGER;
4599         SO_TOLERANT;
4600         SUPPORTS_DAC;
4601     }
4602     CONTRACTL_END;
4603
4604     RangeSection * pRS = FindCodeRange(currentData, ExecutionManager::GetScanFlags());
4605     if (pRS == NULL)
4606         return NULL;
4607
4608     if (pRS->flags & RangeSection::RANGE_SECTION_CODEHEAP)
4609         return NULL;
4610
4611 #ifdef FEATURE_READYTORUN
4612     // RANGE_SECTION_READYTORUN is intentionally not filtered out here
4613 #endif
4614
4615     return dac_cast<PTR_Module>(pRS->pHeapListOrZapModule);
4616 }
4617
4618 #ifndef DACCESS_COMPILE
4619
4620 /* NGenMem depends on this entrypoint */
4621 NOINLINE 
4622 void ExecutionManager::AddCodeRange(TADDR          pStartRange,
4623                                     TADDR          pEndRange,
4624                                     IJitManager *  pJit,
4625                                     RangeSection::RangeSectionFlags flags,
4626                                     void *         pHp)
4627 {
4628     CONTRACTL {
4629         THROWS;
4630         GC_NOTRIGGER;
4631         PRECONDITION(CheckPointer(pJit));
4632         PRECONDITION(CheckPointer(pHp));
4633     } CONTRACTL_END;
4634
4635     AddRangeHelper(pStartRange,
4636                    pEndRange,
4637                    pJit,
4638                    flags,
4639                    dac_cast<TADDR>(pHp));
4640 }
4641
4642 #ifdef FEATURE_PREJIT
4643
4644 void ExecutionManager::AddNativeImageRange(TADDR StartRange, 
4645                                            SIZE_T Size, 
4646                                            Module * pModule)
4647 {
4648     CONTRACTL {
4649         THROWS;
4650         GC_NOTRIGGER;
4651         PRECONDITION(CheckPointer(pModule));
4652     } CONTRACTL_END;
4653
4654     AddRangeHelper(StartRange,
4655                    StartRange + Size,
4656                    GetNativeImageJitManager(),
4657                    RangeSection::RANGE_SECTION_NONE,
4658                    dac_cast<TADDR>(pModule));
4659 }
4660 #endif
4661
4662 void ExecutionManager::AddRangeHelper(TADDR          pStartRange,
4663                                       TADDR          pEndRange,
4664                                       IJitManager *  pJit,
4665                                       RangeSection::RangeSectionFlags flags,
4666                                       TADDR          pHeapListOrZapModule)
4667 {
4668     CONTRACTL {
4669         THROWS;
4670         GC_NOTRIGGER;
4671         HOST_CALLS;
4672         PRECONDITION(pStartRange < pEndRange);
4673         PRECONDITION(pHeapListOrZapModule != NULL);
4674     } CONTRACTL_END;
4675
4676     RangeSection *pnewrange = new RangeSection;
4677
4678     _ASSERTE(pEndRange > pStartRange);
4679
4680     pnewrange->LowAddress  = pStartRange;
4681     pnewrange->HighAddress = pEndRange;
4682     pnewrange->pjit        = pJit;
4683     pnewrange->pnext       = NULL;
4684     pnewrange->flags       = flags;
4685     pnewrange->pLastUsed   = NULL;
4686     pnewrange->pHeapListOrZapModule = pHeapListOrZapModule;
4687 #if defined(_TARGET_AMD64_)
4688     pnewrange->pUnwindInfoTable = NULL;
4689 #endif // defined(_TARGET_AMD64_)
4690     {
4691         CrstHolder ch(&m_RangeCrst); // Acquire the Crst before linking in a new RangeList
4692
4693         RangeSection * current  = m_CodeRangeList;
4694         RangeSection * previous = NULL;
4695
4696         if (current != NULL)
4697         {
4698             while (true)
4699             {
4700                 // Sort addresses top down so that more recently created ranges
4701                 // will populate the top of the list
4702                 if (pnewrange->LowAddress > current->LowAddress)
4703                 {
4704                     // Asserts if ranges are overlapping
4705                     _ASSERTE(pnewrange->LowAddress >= current->HighAddress);
4706                     pnewrange->pnext = current;
4707
4708                     if (previous == NULL) // insert new head
4709                     {
4710                         m_CodeRangeList = pnewrange;
4711                     }
4712                     else
4713                     { // insert in the middle
4714                         previous->pnext  = pnewrange;
4715                     }
4716                     break;
4717                 }
4718
4719                 RangeSection * next = current->pnext;
4720                 if (next == NULL) // insert at end of list
4721                 {
4722                     current->pnext = pnewrange;
4723                     break;
4724                 }
4725
4726                 // Continue walking the RangeSection list
4727                 previous = current;
4728                 current  = next;
4729             }
4730         }
4731         else
4732         {
4733             m_CodeRangeList = pnewrange;
4734         }
4735     }
4736 }
4737
4738 // Deletes a single range starting at pStartRange
4739 void ExecutionManager::DeleteRange(TADDR pStartRange)
4740 {
4741     CONTRACTL {
4742         NOTHROW; // If this becomes throwing, then revisit the queuing of deletes below.
4743         GC_NOTRIGGER;
4744     } CONTRACTL_END;
4745
4746     RangeSection *pCurr = NULL;
4747     {
4748         // Acquire the Crst before unlinking a RangeList.
4749         // NOTE: The Crst must be acquired BEFORE we grab the writer lock, as the
4750         // writer lock forces us into a forbid suspend thread region, and it's illegal
4751         // to enter a Crst after the forbid suspend thread region is entered
4752         CrstHolder ch(&m_RangeCrst);
4753
4754         // Acquire the WriterLock and prevent any readers from walking the RangeList.
4755         // This also forces us to enter a forbid suspend thread region, to prevent
4756         // hijacking profilers from grabbing this thread and walking it (the walk may
4757         // require the reader lock, which would cause a deadlock).
4758         WriterLockHolder wlh;
4759
4760         RangeSection *pPrev = NULL;
4761
4762         pCurr = GetRangeSectionAndPrev(m_CodeRangeList, pStartRange, &pPrev);
4763
4764         // pCurr points at the Range that needs to be unlinked from the RangeList
4765         if (pCurr != NULL)
4766         {
4767
4768             // If pPrev is NULL the the head of this list is to be deleted
4769             if (pPrev == NULL)
4770             {
4771                 m_CodeRangeList = pCurr->pnext;          
4772             }
4773             else
4774             {
4775                 _ASSERT(pPrev->pnext == pCurr);
4776
4777                 pPrev->pnext = pCurr->pnext;
4778             }
4779
4780             // Clear the cache pLastUsed in the head node (if any)
4781             RangeSection * head = m_CodeRangeList;
4782             if (head != NULL)
4783             {
4784                 head->pLastUsed = NULL;
4785             }
4786
4787             //
4788             // Cannot delete pCurr here because we own the WriterLock and if this is
4789             // a hosted scenario then the hosting api callback cannot occur in a forbid
4790             // suspend region, which the writer lock is.
4791             //
4792         }
4793     }
4794
4795     //
4796     // Now delete the node
4797     //
4798     if (pCurr != NULL)
4799     {
4800 #if defined(_TARGET_AMD64_)
4801         if (pCurr->pUnwindInfoTable != 0)
4802             delete pCurr->pUnwindInfoTable;
4803 #endif // defined(_TARGET_AMD64_)
4804         delete pCurr;
4805     }
4806 }
4807
4808 #endif // #ifndef DACCESS_COMPILE
4809
4810 #ifdef DACCESS_COMPILE
4811
4812 void ExecutionManager::EnumRangeList(RangeSection* list,
4813                                      CLRDataEnumMemoryFlags flags)
4814 {
4815     while (list != NULL) 
4816     {
4817         // If we can't read the target memory, stop immediately so we don't work
4818         // with broken data.
4819         if (!DacEnumMemoryRegion(dac_cast<TADDR>(list), sizeof(*list)))
4820             break;
4821
4822         if (list->pjit.IsValid())
4823         {
4824             list->pjit->EnumMemoryRegions(flags);
4825         }
4826
4827         if (!(list->flags & RangeSection::RANGE_SECTION_CODEHEAP))
4828         {
4829             PTR_Module pModule = dac_cast<PTR_Module>(list->pHeapListOrZapModule);
4830
4831             if (pModule.IsValid())
4832             {
4833                 pModule->EnumMemoryRegions(flags, true);
4834             }
4835         }
4836
4837         list = list->pnext;
4838 #if defined (_DEBUG)
4839         // Test hook: when testing on debug builds, we want an easy way to test that the while
4840         // correctly terminates in the face of ridiculous stuff from the target.
4841         if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DumpGeneration_IntentionallyCorruptDataFromTarget) == 1)
4842         {
4843             // Force us to struggle on with something bad.
4844             if (list == NULL)
4845             {
4846                 list = (RangeSection *)&flags;
4847             }
4848         }
4849 #endif // (_DEBUG)
4850
4851     }
4852 }
4853
4854 void ExecutionManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
4855 {
4856     STATIC_CONTRACT_HOST_CALLS;
4857
4858     ReaderLockHolder rlh;
4859
4860     //
4861     // Report the global data portions.
4862     //
4863
4864     m_CodeRangeList.EnumMem();
4865     m_pDefaultCodeMan.EnumMem();
4866
4867     //
4868     // Walk structures and report.
4869     //
4870
4871     if (m_CodeRangeList.IsValid())
4872     {
4873         EnumRangeList(m_CodeRangeList, flags);
4874     }
4875 }
4876 #endif // #ifdef DACCESS_COMPILE
4877
4878 #if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
4879
4880 void ExecutionManager::Unload(LoaderAllocator *pLoaderAllocator)
4881 {
4882     CONTRACTL {
4883         NOTHROW;
4884         GC_NOTRIGGER;
4885     } CONTRACTL_END;
4886
4887     // a size of 0 is a signal to Nirvana to flush the entire cache 
4888     FlushInstructionCache(GetCurrentProcess(),0,0);
4889
4890     /* StackwalkCacheEntry::EIP is an address into code. Since we are
4891     unloading the code, we need to invalidate the cache. Otherwise,
4892     its possible that another appdomain might generate code at the very
4893     same address, and we might incorrectly think that the old
4894     StackwalkCacheEntry corresponds to it. So flush the cache.
4895     */
4896     StackwalkCache::Invalidate(pLoaderAllocator);
4897
4898     JumpStubCache * pJumpStubCache = (JumpStubCache *)pLoaderAllocator->m_pJumpStubCache;
4899     if (pJumpStubCache != NULL)
4900     {
4901         delete pJumpStubCache;
4902         pLoaderAllocator->m_pJumpStubCache = NULL;
4903     }
4904
4905     GetEEJitManager()->Unload(pLoaderAllocator);
4906 }
4907
4908 // This method is used by the JIT and the runtime for PreStubs. It will return
4909 // the address of a short jump thunk that will jump to the 'target' address.
4910 // It is only needed when the target architecture has a perferred call instruction
4911 // that doesn't actually span the full address space.  This is true for x64 where
4912 // the preferred call instruction is a 32-bit pc-rel call instruction. 
4913 // (This is also true on ARM64, but it not true for x86)
4914 //
4915 // For these architectures, in JITed code and in the prestub, we encode direct calls
4916 // using the preferred call instruction and we also try to insure that the Jitted
4917 // code is within the 32-bit pc-rel range of clr.dll to allow direct JIT helper calls.
4918 //
4919 // When the call target is too far away to encode using the preferred call instruction.
4920 // We will create a short code thunk that uncoditionally jumps to the target address. 
4921 // We call this jump thunk a "jumpStub" in the CLR code.
4922 // We have the requirement that the "jumpStub" that we create on demand be usable by
4923 // the preferred call instruction, this requires that on x64 the location in memory
4924 // where we create the "jumpStub" be within the 32-bit pc-rel range of the call that
4925 // needs it.
4926 //
4927 // The arguments to this method:
4928 //  pMD    - the MethodDesc for the currenty managed method in Jitted code 
4929 //           or for the target method for a PreStub
4930 //           It is required if calling from or to a dynamic method (LCG method)
4931 //  target - The call target address (this is the address that was too far to encode)
4932 //  loAddr
4933 //  hiAddr - The range of the address that we must place the jumpStub in, so that it
4934 //           can be used to encode the preferred call instruction.
4935 //  pLoaderAllocator 
4936 //         - The Loader allocator to use for allocations, this can be null.
4937 //           When it is null, then the pMD must be valid and is used to obtain
4938 //           the allocator.
4939 //
4940 // This method will either locate and return an existing jumpStub thunk that can be 
4941 // reused for this request, because it meets all of the requirements necessary.
4942 // Or it will allocate memory in the required region and create a new jumpStub that
4943 // meets all of the requirements necessary.
4944 //
4945 // Note that for dynamic methods (LCG methods) we cannot share the jumpStubs between
4946 // different methods. This is because we allow for the unloading (reclaiming) of
4947 // individual dynamic methods. And we associate the jumpStub memory allocated with 
4948 // the dynamic method that requested the jumpStub.
4949 //
4950
4951 PCODE ExecutionManager::jumpStub(MethodDesc* pMD, PCODE target,
4952                                  BYTE * loAddr,   BYTE * hiAddr,
4953                                  LoaderAllocator *pLoaderAllocator)
4954 {
4955     CONTRACT(PCODE) {
4956         THROWS;
4957         GC_NOTRIGGER;
4958         MODE_ANY;
4959         PRECONDITION(pLoaderAllocator != NULL || pMD != NULL);
4960         PRECONDITION(loAddr < hiAddr);
4961         POSTCONDITION(RETVAL != NULL);
4962     } CONTRACT_END;
4963
4964     PCODE jumpStub = NULL;
4965
4966     if (pLoaderAllocator == NULL)
4967         pLoaderAllocator = pMD->GetLoaderAllocatorForCode();
4968     _ASSERTE(pLoaderAllocator != NULL);
4969
4970     bool isLCG = pMD && pMD->IsLCGMethod();
4971
4972     CrstHolder ch(&m_JumpStubCrst);
4973
4974     JumpStubCache * pJumpStubCache = (JumpStubCache *)pLoaderAllocator->m_pJumpStubCache;
4975     if (pJumpStubCache == NULL)
4976     {
4977         pJumpStubCache = new JumpStubCache();
4978         pLoaderAllocator->m_pJumpStubCache = pJumpStubCache;
4979     }
4980
4981     if (isLCG)
4982     {
4983         // Increment counter of LCG jump stub lookup attempts
4984         m_LCG_JumpStubLookup++;
4985     }
4986     else
4987     {
4988         // Increment counter of normal jump stub lookup attempts
4989         m_normal_JumpStubLookup++;
4990     }
4991
4992     // search for a matching jumpstub in the jumpStubCache 
4993     //
4994     for (JumpStubTable::KeyIterator i = pJumpStubCache->m_Table.Begin(target), 
4995         end = pJumpStubCache->m_Table.End(target); i != end; i++)
4996     {
4997         jumpStub = i->m_jumpStub;
4998
4999         _ASSERTE(jumpStub != NULL);
5000
5001         // Is the matching entry with the requested range?
5002         if (((TADDR)loAddr <= jumpStub) && (jumpStub <= (TADDR)hiAddr))
5003         {
5004             RETURN(jumpStub);
5005         }
5006     }
5007
5008     // If we get here we need to create a new jump stub
5009     // add or change the jump stub table to point at the new one
5010     jumpStub = getNextJumpStub(pMD, target, loAddr, hiAddr, pLoaderAllocator);    // this statement can throw
5011
5012     _ASSERTE(((TADDR)loAddr <= jumpStub) && (jumpStub <= (TADDR)hiAddr));
5013
5014     LOG((LF_JIT, LL_INFO10000, "Add JumpStub to" FMT_ADDR "at" FMT_ADDR "\n",
5015             DBG_ADDR(target), DBG_ADDR(jumpStub) ));
5016
5017     RETURN(jumpStub);
5018 }
5019
5020 PCODE ExecutionManager::getNextJumpStub(MethodDesc* pMD, PCODE target,
5021                                         BYTE * loAddr, BYTE * hiAddr, LoaderAllocator *pLoaderAllocator)
5022 {
5023     CONTRACT(PCODE) {
5024         THROWS;
5025         GC_NOTRIGGER;
5026         PRECONDITION(pLoaderAllocator != NULL);
5027         PRECONDITION(m_JumpStubCrst.OwnedByCurrentThread());
5028         POSTCONDITION(RETVAL != NULL);
5029     } CONTRACT_END;
5030
5031     BYTE *                 jumpStub = NULL;
5032     bool                   isLCG    = pMD && pMD->IsLCGMethod();
5033     JumpStubBlockHeader ** ppHead   = isLCG ? &(pMD->AsDynamicMethodDesc()->GetLCGMethodResolver()->m_jumpStubBlock) : &(((JumpStubCache *)(pLoaderAllocator->m_pJumpStubCache))->m_pBlocks);
5034     JumpStubBlockHeader *  curBlock = *ppHead;
5035     
5036     // allocate a new jumpstub from 'curBlock' if it is not fully allocated
5037     //
5038     while (curBlock)
5039     {
5040         _ASSERTE(pLoaderAllocator == (isLCG ? curBlock->GetHostCodeHeap()->GetAllocator() : curBlock->GetLoaderAllocator()));
5041
5042         if (curBlock->m_used < curBlock->m_allocated)
5043         {
5044             jumpStub = (BYTE *) curBlock + sizeof(JumpStubBlockHeader) + ((size_t) curBlock->m_used * BACK_TO_BACK_JUMP_ALLOCATE_SIZE);
5045
5046             if ((loAddr <= jumpStub) && (jumpStub <= hiAddr))
5047             {
5048                 // We will update curBlock->m_used at "DONE"
5049                 goto DONE;
5050             }
5051         }
5052
5053         curBlock = curBlock->m_next;
5054     }
5055
5056     // If we get here then we need to allocate a new JumpStubBlock
5057
5058     if (isLCG)
5059     {
5060         // Increment counter of LCG jump stub block allocations
5061         m_LCG_JumpStubBlockAllocCount++;
5062     }
5063     else
5064     {
5065         // Increment counter of normal jump stub block allocations
5066         m_normal_JumpStubBlockAllocCount++;
5067     }
5068
5069     // allocJumpStubBlock will allocate from the LoaderCodeHeap for normal methods and HostCodeHeap for LCG methods
5070     // this can throw an OM exception
5071     curBlock = ExecutionManager::GetEEJitManager()->allocJumpStubBlock(pMD, DEFAULT_JUMPSTUBS_PER_BLOCK, loAddr, hiAddr, pLoaderAllocator);
5072
5073     jumpStub = (BYTE *) curBlock + sizeof(JumpStubBlockHeader) + ((size_t) curBlock->m_used * BACK_TO_BACK_JUMP_ALLOCATE_SIZE);
5074
5075     _ASSERTE((loAddr <= jumpStub) && (jumpStub <= hiAddr));
5076
5077     curBlock->m_next = *ppHead;
5078     *ppHead = curBlock;
5079
5080 DONE:
5081
5082     _ASSERTE((curBlock->m_used < curBlock->m_allocated));
5083
5084 #ifdef _TARGET_ARM64_
5085     // 8-byte alignment is required on ARM64
5086     _ASSERTE(((UINT_PTR)jumpStub & 7) == 0);
5087 #endif
5088
5089     emitBackToBackJump(jumpStub, (void*) target);
5090
5091     if (isLCG)
5092     {
5093         // always get a new jump stub for LCG method
5094         // We don't share jump stubs among different LCG methods so that the jump stubs used
5095         // by every LCG method can be cleaned up individually
5096         // There is not much benefit to share jump stubs within one LCG method anyway.
5097     }
5098     else
5099     {
5100         JumpStubCache * pJumpStubCache = (JumpStubCache *)pLoaderAllocator->m_pJumpStubCache;
5101         _ASSERTE(pJumpStubCache != NULL);
5102
5103         JumpStubEntry entry;
5104
5105         entry.m_target = target;
5106         entry.m_jumpStub = (PCODE)jumpStub;
5107
5108         pJumpStubCache->m_Table.Add(entry);
5109     }
5110
5111     curBlock->m_used++;    // record that we have used up one more jumpStub in the block
5112
5113     // Every time we create a new jumpStub thunk one of these counters is incremented
5114     if (isLCG)
5115     {
5116         // Increment counter of LCG unique jump stubs
5117         m_LCG_JumpStubUnique++;
5118     }
5119     else
5120     {
5121         // Increment counter of normal unique jump stubs 
5122         m_normal_JumpStubUnique++;
5123     }
5124
5125     // Is the 'curBlock' now completely full?
5126     if (curBlock->m_used == curBlock->m_allocated)
5127     {
5128         if (isLCG)
5129         {
5130             // Increment counter of LCG jump stub blocks that are full
5131             m_LCG_JumpStubBlockFullCount++;
5132
5133             // Log this "LCG JumpStubBlock filled" along with the four counter values
5134             STRESS_LOG4(LF_JIT, LL_INFO1000, "LCG JumpStubBlock filled - (%u, %u, %u, %u)\n",
5135                         m_LCG_JumpStubLookup, m_LCG_JumpStubUnique, 
5136                         m_LCG_JumpStubBlockAllocCount, m_LCG_JumpStubBlockFullCount);
5137         }
5138         else
5139         {
5140             // Increment counter of normal jump stub blocks that are full
5141             m_normal_JumpStubBlockFullCount++;
5142
5143             // Log this "normal JumpStubBlock filled" along with the four counter values
5144             STRESS_LOG4(LF_JIT, LL_INFO1000, "Normal JumpStubBlock filled - (%u, %u, %u, %u)\n",
5145                         m_normal_JumpStubLookup, m_normal_JumpStubUnique, 
5146                         m_normal_JumpStubBlockAllocCount, m_normal_JumpStubBlockFullCount);
5147
5148             if ((m_LCG_JumpStubLookup > 0) && ((m_normal_JumpStubBlockFullCount % 5) == 1))
5149             {
5150                 // Every 5 occurance of the above we also 
5151                 // Log "LCG JumpStubBlock status" along with the four counter values
5152                 STRESS_LOG4(LF_JIT, LL_INFO1000, "LCG JumpStubBlock status - (%u, %u, %u, %u)\n",
5153                             m_LCG_JumpStubLookup, m_LCG_JumpStubUnique, 
5154                             m_LCG_JumpStubBlockAllocCount, m_LCG_JumpStubBlockFullCount);
5155             }
5156         }
5157     }
5158     
5159     RETURN((PCODE)jumpStub);
5160 }
5161 #endif // !DACCESS_COMPILE && !CROSSGEN_COMPILE
5162
5163 #ifdef FEATURE_PREJIT
5164 //***************************************************************************************
5165 //***************************************************************************************
5166
5167 #ifndef DACCESS_COMPILE
5168
5169 NativeImageJitManager::NativeImageJitManager()
5170 {
5171     WRAPPER_NO_CONTRACT;
5172 }
5173
5174 #endif // #ifndef DACCESS_COMPILE
5175
5176 GCInfoToken NativeImageJitManager::GetGCInfoToken(const METHODTOKEN& MethodToken)
5177 {
5178     CONTRACTL {
5179         NOTHROW;
5180         GC_NOTRIGGER;
5181         HOST_NOCALLS;
5182         SUPPORTS_DAC;
5183     } CONTRACTL_END;
5184
5185     PTR_RUNTIME_FUNCTION pRuntimeFunction = dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader);
5186     TADDR baseAddress = JitTokenToModuleBase(MethodToken);
5187
5188 #ifndef DACCESS_COMPILE
5189     if (g_IBCLogger.InstrEnabled())
5190     {
5191         PTR_NGenLayoutInfo pNgenLayout = JitTokenToZapModule(MethodToken)->GetNGenLayoutInfo();
5192         PTR_MethodDesc pMD = NativeUnwindInfoLookupTable::GetMethodDesc(pNgenLayout, pRuntimeFunction, baseAddress);
5193         g_IBCLogger.LogMethodGCInfoAccess(pMD);
5194     }
5195 #endif
5196
5197     SIZE_T nUnwindDataSize;
5198     PTR_VOID pUnwindData = GetUnwindDataBlob(baseAddress, pRuntimeFunction, &nUnwindDataSize);
5199
5200     // GCInfo immediatelly follows unwind data
5201     // GCInfo from an NGEN-ed image is always the current version
5202     return{ dac_cast<PTR_BYTE>(pUnwindData) + nUnwindDataSize, GCINFO_VERSION };
5203 }
5204
5205 unsigned NativeImageJitManager::InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState)
5206 {
5207     CONTRACTL {
5208         NOTHROW;
5209         GC_NOTRIGGER;
5210     } CONTRACTL_END;
5211
5212     NGenLayoutInfo * pNgenLayout = JitTokenToZapModule(MethodToken)->GetNGenLayoutInfo();
5213
5214     //early out if the method doesn't have EH info bit set.
5215     if (!NativeUnwindInfoLookupTable::HasExceptionInfo(pNgenLayout, PTR_RUNTIME_FUNCTION(MethodToken.m_pCodeHeader)))
5216         return 0;
5217
5218     PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE pExceptionLookupTable = dac_cast<PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE>(pNgenLayout->m_ExceptionInfoLookupTable.StartAddress());
5219     _ASSERTE(pExceptionLookupTable != NULL);
5220
5221     SIZE_T size = pNgenLayout->m_ExceptionInfoLookupTable.Size();
5222     COUNT_T numLookupTableEntries = (COUNT_T)(size / sizeof(CORCOMPILE_EXCEPTION_LOOKUP_TABLE_ENTRY));
5223     // at least 2 entries (1 valid entry + 1 sentinal entry)
5224     _ASSERTE(numLookupTableEntries >= 2);
5225     
5226     DWORD methodStartRVA = (DWORD)(JitTokenToStartAddress(MethodToken) - JitTokenToModuleBase(MethodToken));
5227
5228     COUNT_T ehInfoSize = 0;
5229     DWORD exceptionInfoRVA = NativeExceptionInfoLookupTable::LookupExceptionInfoRVAForMethod(pExceptionLookupTable,
5230                                                                   numLookupTableEntries,
5231                                                                   methodStartRVA, 
5232                                                                   &ehInfoSize);
5233     if (exceptionInfoRVA == 0)
5234         return 0;
5235
5236     pEnumState->iCurrentPos = 0;
5237     pEnumState->pExceptionClauseArray = JitTokenToModuleBase(MethodToken) + exceptionInfoRVA;
5238
5239     return ehInfoSize / sizeof(CORCOMPILE_EXCEPTION_CLAUSE);
5240 }
5241
5242 PTR_EXCEPTION_CLAUSE_TOKEN NativeImageJitManager::GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState,
5243                               EE_ILEXCEPTION_CLAUSE* pEHClauseOut)
5244 {
5245     CONTRACTL {
5246         NOTHROW;
5247         GC_NOTRIGGER;
5248     } CONTRACTL_END;
5249
5250     unsigned iCurrentPos = pEnumState->iCurrentPos;
5251     pEnumState->iCurrentPos++;
5252
5253     CORCOMPILE_EXCEPTION_CLAUSE* pClause = &(dac_cast<PTR_CORCOMPILE_EXCEPTION_CLAUSE>(pEnumState->pExceptionClauseArray)[iCurrentPos]);
5254
5255     // copy to the input parmeter, this is a nice abstraction for the future
5256     // if we want to compress the Clause encoding, we can do without affecting the call sites
5257     pEHClauseOut->TryStartPC = pClause->TryStartPC; 
5258     pEHClauseOut->TryEndPC = pClause->TryEndPC; 
5259     pEHClauseOut->HandlerStartPC = pClause->HandlerStartPC; 
5260     pEHClauseOut->HandlerEndPC = pClause->HandlerEndPC;
5261     pEHClauseOut->Flags = pClause->Flags;
5262     pEHClauseOut->FilterOffset = pClause->FilterOffset;
5263
5264     return dac_cast<PTR_EXCEPTION_CLAUSE_TOKEN>(pClause);
5265 }
5266
5267 #ifndef DACCESS_COMPILE
5268
5269 TypeHandle NativeImageJitManager::ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause,
5270                                               CrawlFrame* pCf)
5271 {
5272     CONTRACTL {
5273         THROWS;
5274         GC_TRIGGERS;
5275     } CONTRACTL_END;
5276
5277     _ASSERTE(NULL != pCf);
5278     _ASSERTE(NULL != pEHClause);
5279     _ASSERTE(IsTypedHandler(pEHClause));
5280
5281     MethodDesc *pMD = PTR_MethodDesc(pCf->GetFunction());
5282
5283     _ASSERTE(pMD != NULL);
5284
5285     Module* pModule = pMD->GetModule();
5286     PREFIX_ASSUME(pModule != NULL);
5287
5288     SigTypeContext typeContext(pMD);
5289     VarKind k = hasNoVars;    
5290             
5291     mdToken typeTok = pEHClause->ClassToken;
5292
5293     // In the vast majority of cases the code under the "if" below
5294     // will not be executed.
5295     //
5296     // First grab the representative instantiations.  For code
5297     // shared by multiple generic instantiations these are the
5298     // canonical (representative) instantiation.
5299     if (TypeFromToken(typeTok) == mdtTypeSpec)
5300     {
5301         PCCOR_SIGNATURE pSig;
5302         ULONG cSig;
5303         IfFailThrow(pModule->GetMDImport()->GetTypeSpecFromToken(typeTok, &pSig, &cSig));
5304         
5305         SigPointer psig(pSig, cSig);
5306         k = psig.IsPolyType(&typeContext);
5307
5308         // Grab the active class and method instantiation.  This exact instantiation is only
5309         // needed in the corner case of "generic" exception catching in shared
5310         // generic code.  We don't need the exact instantiation if the token
5311         // doesn't contain E_T_VAR or E_T_MVAR.        
5312         if ((k & hasSharableVarsMask) != 0)
5313         {
5314             Instantiation classInst;
5315             Instantiation methodInst;
5316             pCf->GetExactGenericInstantiations(&classInst,&methodInst);
5317             SigTypeContext::InitTypeContext(pMD,classInst, methodInst,&typeContext);
5318         }
5319     }
5320
5321     return ClassLoader::LoadTypeDefOrRefOrSpecThrowing(pModule, typeTok, &typeContext, 
5322                                                           ClassLoader::ReturnNullIfNotFound);
5323 }
5324
5325 #endif // #ifndef DACCESS_COMPILE
5326
5327 //-----------------------------------------------------------------------------
5328 // Ngen info manager
5329 //-----------------------------------------------------------------------------
5330 BOOL NativeImageJitManager::GetBoundariesAndVars(
5331         const DebugInfoRequest & request,
5332         IN FP_IDS_NEW fpNew, IN void * pNewData,
5333         OUT ULONG32 * pcMap, 
5334         OUT ICorDebugInfo::OffsetMapping **ppMap,
5335         OUT ULONG32 * pcVars, 
5336         OUT ICorDebugInfo::NativeVarInfo **ppVars)
5337 {
5338     CONTRACTL {
5339         THROWS;       // on OOM.
5340         GC_NOTRIGGER; // getting vars shouldn't trigger
5341         SUPPORTS_DAC;
5342     } CONTRACTL_END;
5343
5344     // We want the module that the code is instantiated in, not necessarily the one
5345     // that it was declared in. This only matters for ngen-generics.        
5346     MethodDesc * pMD = request.GetMD();
5347     Module * pModule = pMD->GetZapModule();
5348     PREFIX_ASSUME(pModule != NULL);
5349
5350     PTR_BYTE pDebugInfo = pModule->GetNativeDebugInfo(pMD);
5351
5352     // No header created, which means no jit information is available.
5353     if (pDebugInfo == NULL)
5354         return FALSE;
5355
5356     // Uncompress. This allocates memory and may throw.
5357     CompressDebugInfo::RestoreBoundariesAndVars(
5358         fpNew, pNewData, // allocators
5359         pDebugInfo,      // input
5360         pcMap, ppMap,
5361         pcVars, ppVars); // output
5362
5363     return TRUE;
5364 }
5365
5366 #ifdef DACCESS_COMPILE
5367 //
5368 // Need to write out debug info
5369 //
5370 void NativeImageJitManager::EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD)
5371 {
5372     SUPPORTS_DAC;
5373
5374     Module * pModule = pMD->GetZapModule();
5375     PREFIX_ASSUME(pModule != NULL);
5376     PTR_BYTE pDebugInfo = pModule->GetNativeDebugInfo(pMD);
5377
5378     if (pDebugInfo != NULL)
5379     {
5380         CompressDebugInfo::EnumMemoryRegions(flags, pDebugInfo);
5381     }
5382 }
5383 #endif
5384
5385 PCODE NativeImageJitManager::GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset)
5386 {
5387     WRAPPER_NO_CONTRACT;
5388
5389     MethodRegionInfo methodRegionInfo;
5390     JitTokenToMethodRegionInfo(MethodToken, &methodRegionInfo);
5391
5392     if (relOffset < methodRegionInfo.hotSize)
5393         return methodRegionInfo.hotStartAddress + relOffset;
5394
5395     SIZE_T coldOffset = relOffset - methodRegionInfo.hotSize;
5396     _ASSERTE(coldOffset < methodRegionInfo.coldSize);
5397     return methodRegionInfo.coldStartAddress + coldOffset;
5398 }
5399
5400 BOOL NativeImageJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection,
5401                                             PCODE        currentPC,
5402                                             MethodDesc** ppMethodDesc,
5403                                             EECodeInfo * pCodeInfo)
5404 {
5405     CONTRACTL {
5406         SO_TOLERANT;
5407         NOTHROW;
5408         GC_NOTRIGGER;
5409         SUPPORTS_DAC;
5410     } CONTRACTL_END;
5411
5412     TADDR currentInstr = PCODEToPINSTR(currentPC);
5413
5414     Module * pModule = dac_cast<PTR_Module>(pRangeSection->pHeapListOrZapModule);
5415
5416     NGenLayoutInfo * pLayoutInfo = pModule->GetNGenLayoutInfo();
5417     DWORD iRange = 0;
5418
5419     if (pLayoutInfo->m_CodeSections[0].IsInRange(currentInstr))
5420     {
5421         iRange = 0;
5422     }
5423     else
5424     if (pLayoutInfo->m_CodeSections[1].IsInRange(currentInstr))
5425     {
5426         iRange = 1;
5427     }
5428     else
5429     if (pLayoutInfo->m_CodeSections[2].IsInRange(currentInstr))
5430     {
5431         iRange = 2;
5432     }
5433     else
5434     {
5435         return FALSE;
5436     }
5437
5438     TADDR ImageBase = pRangeSection->LowAddress;
5439
5440     DWORD RelativePc = (DWORD)(currentInstr - ImageBase);
5441
5442     PTR_RUNTIME_FUNCTION FunctionEntry;
5443
5444     if (iRange == 2)
5445     {
5446         int ColdMethodIndex = NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod(RelativePc,
5447                                                                                pLayoutInfo->m_pRuntimeFunctions[2],
5448                                                                                0,
5449                                                                                pLayoutInfo->m_nRuntimeFunctions[2] - 1);
5450
5451         if (ColdMethodIndex < 0)
5452             return FALSE;
5453
5454 #ifdef WIN64EXCEPTIONS
5455         // Save the raw entry
5456         int RawColdMethodIndex = ColdMethodIndex;
5457
5458         PTR_CORCOMPILE_COLD_METHOD_ENTRY pColdCodeMap = pLayoutInfo->m_ColdCodeMap;
5459
5460         while (pColdCodeMap[ColdMethodIndex].mainFunctionEntryRVA == 0)
5461             ColdMethodIndex--;
5462
5463         FunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(ImageBase + pColdCodeMap[ColdMethodIndex].mainFunctionEntryRVA);
5464 #else
5465         DWORD ColdUnwindData = pLayoutInfo->m_pRuntimeFunctions[2][ColdMethodIndex].UnwindData;
5466         _ASSERTE((ColdUnwindData & RUNTIME_FUNCTION_INDIRECT) != 0);
5467         FunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(ImageBase + (ColdUnwindData & ~RUNTIME_FUNCTION_INDIRECT));
5468 #endif
5469
5470         if (ppMethodDesc)
5471         {
5472             DWORD methodDescRVA;
5473
5474             COUNT_T iIndex = (COUNT_T)(FunctionEntry - pLayoutInfo->m_pRuntimeFunctions[0]);
5475             if (iIndex >= pLayoutInfo->m_nRuntimeFunctions[0])
5476             {
5477                 iIndex = (COUNT_T)(FunctionEntry - pLayoutInfo->m_pRuntimeFunctions[1]);
5478                 _ASSERTE(iIndex < pLayoutInfo->m_nRuntimeFunctions[1]);
5479                 methodDescRVA = pLayoutInfo->m_MethodDescs[1][iIndex];
5480             }
5481             else
5482             {
5483                 methodDescRVA = pLayoutInfo->m_MethodDescs[0][iIndex];
5484             }
5485             _ASSERTE(methodDescRVA != NULL);
5486
5487             // Note that the MethodDesc does not have to be restored. (It happens when we are called
5488             // from SetupGcCoverageForNativeMethod.)
5489             *ppMethodDesc = PTR_MethodDesc((methodDescRVA & ~HAS_EXCEPTION_INFO_MASK) + ImageBase);
5490         }
5491
5492         if (pCodeInfo)
5493         {
5494             PTR_RUNTIME_FUNCTION ColdFunctionTable = pLayoutInfo->m_pRuntimeFunctions[2];
5495
5496             PTR_RUNTIME_FUNCTION ColdFunctionEntry =  ColdFunctionTable + ColdMethodIndex;
5497             DWORD coldCodeOffset = (DWORD)(RelativePc - RUNTIME_FUNCTION__BeginAddress(ColdFunctionEntry));
5498             pCodeInfo->m_relOffset = pLayoutInfo->m_ColdCodeMap[ColdMethodIndex].hotCodeSize + coldCodeOffset;
5499
5500             // We are using RUNTIME_FUNCTION as METHODTOKEN
5501             pCodeInfo->m_methodToken = METHODTOKEN(pRangeSection, dac_cast<TADDR>(FunctionEntry));
5502
5503 #ifdef WIN64EXCEPTIONS
5504             PTR_RUNTIME_FUNCTION RawColdFunctionEntry = ColdFunctionTable + RawColdMethodIndex;
5505 #ifdef _TARGET_AMD64_
5506             if ((RawColdFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) != 0)
5507             {
5508                 RawColdFunctionEntry = PTR_RUNTIME_FUNCTION(ImageBase + (RawColdFunctionEntry->UnwindData & ~RUNTIME_FUNCTION_INDIRECT));
5509             }
5510 #endif // _TARGET_AMD64_
5511             pCodeInfo->m_pFunctionEntry = RawColdFunctionEntry;
5512 #endif
5513         }
5514     }
5515     else
5516     {
5517         PTR_DWORD pRuntimeFunctionLookupTable = dac_cast<PTR_DWORD>(pLayoutInfo->m_UnwindInfoLookupTable[iRange]);
5518
5519         _ASSERTE(pRuntimeFunctionLookupTable != NULL);
5520
5521         DWORD RelativeToCodeStart = (DWORD)(currentInstr - dac_cast<TADDR>(pLayoutInfo->m_CodeSections[iRange].StartAddress()));
5522         COUNT_T iStrideIndex = RelativeToCodeStart / RUNTIME_FUNCTION_LOOKUP_STRIDE;
5523
5524         // The lookup table may not be big enough to cover the entire code range if there was padding inserted during NGen image layout.
5525         // The last entry is lookup table entry covers the rest of the code range in this case.
5526         if (iStrideIndex >= pLayoutInfo->m_UnwindInfoLookupTableEntryCount[iRange])
5527             iStrideIndex = pLayoutInfo->m_UnwindInfoLookupTableEntryCount[iRange] - 1;
5528
5529         int Low = pRuntimeFunctionLookupTable[iStrideIndex];
5530         int High = pRuntimeFunctionLookupTable[iStrideIndex+1];
5531
5532         PTR_RUNTIME_FUNCTION FunctionTable = pLayoutInfo->m_pRuntimeFunctions[iRange];
5533         PTR_DWORD pMethodDescs = pLayoutInfo->m_MethodDescs[iRange];
5534
5535         int MethodIndex = NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod(RelativePc,
5536                                                                                FunctionTable,
5537                                                                                Low,
5538                                                                                High);
5539
5540         if (MethodIndex < 0)
5541             return FALSE;
5542
5543 #ifdef WIN64EXCEPTIONS
5544         // Save the raw entry
5545         PTR_RUNTIME_FUNCTION RawFunctionEntry = FunctionTable + MethodIndex;;
5546
5547         // Skip funclets to get the method desc
5548         while (pMethodDescs[MethodIndex] == 0)
5549             MethodIndex--;
5550 #endif
5551
5552         FunctionEntry = FunctionTable + MethodIndex;
5553
5554         if (ppMethodDesc)
5555         {
5556             DWORD methodDescRVA = pMethodDescs[MethodIndex];
5557             _ASSERTE(methodDescRVA != NULL);
5558
5559             // Note that the MethodDesc does not have to be restored. (It happens when we are called
5560             // from SetupGcCoverageForNativeMethod.)
5561             *ppMethodDesc = PTR_MethodDesc((methodDescRVA & ~HAS_EXCEPTION_INFO_MASK) + ImageBase);
5562
5563             // We are likely executing the code already or going to execute it soon. However, there are a few cases like
5564             // code:MethodTable::GetMethodDescForSlot where it is not the case. Log the code access here to avoid these
5565             // cases from touching cold code maps.
5566             g_IBCLogger.LogMethodCodeAccess(*ppMethodDesc);
5567         }
5568
5569         //Get the function entry that corresponds to the real method desc.
5570         _ASSERTE((RelativePc >= RUNTIME_FUNCTION__BeginAddress(FunctionEntry)));
5571     
5572         if (pCodeInfo)
5573         {
5574             pCodeInfo->m_relOffset = (DWORD)
5575                 (RelativePc - RUNTIME_FUNCTION__BeginAddress(FunctionEntry));
5576
5577             // We are using RUNTIME_FUNCTION as METHODTOKEN
5578             pCodeInfo->m_methodToken = METHODTOKEN(pRangeSection, dac_cast<TADDR>(FunctionEntry));
5579
5580 #ifdef WIN64EXCEPTIONS
5581             AMD64_ONLY(_ASSERTE((RawFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0));
5582             pCodeInfo->m_pFunctionEntry = RawFunctionEntry;
5583 #endif
5584         }
5585     }
5586
5587     return TRUE;
5588 }
5589
5590 #if defined(WIN64EXCEPTIONS)
5591 PTR_RUNTIME_FUNCTION NativeImageJitManager::LazyGetFunctionEntry(EECodeInfo * pCodeInfo)
5592 {
5593     CONTRACTL {
5594         NOTHROW;
5595         GC_NOTRIGGER;
5596     } CONTRACTL_END;
5597
5598     if (!pCodeInfo->IsValid())
5599     {
5600         return NULL;
5601     }
5602
5603     // code:NativeImageJitManager::JitCodeToMethodInfo computes PTR_RUNTIME_FUNCTION eagerly. This path is only 
5604     // reachable via EECodeInfo::GetMainFunctionInfo, and so we can just return the main entry.
5605     _ASSERTE(pCodeInfo->GetRelOffset() == 0);
5606
5607     return dac_cast<PTR_RUNTIME_FUNCTION>(pCodeInfo->GetMethodToken().m_pCodeHeader);
5608 }
5609
5610 TADDR NativeImageJitManager::GetFuncletStartAddress(EECodeInfo * pCodeInfo)
5611 {
5612     LIMITED_METHOD_DAC_CONTRACT;
5613
5614 #if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
5615     NGenLayoutInfo * pLayoutInfo = JitTokenToZapModule(pCodeInfo->GetMethodToken())->GetNGenLayoutInfo();
5616
5617     if (pLayoutInfo->m_CodeSections[2].IsInRange(pCodeInfo->GetCodeAddress()))
5618     {
5619         // If the address is in the cold section, then we assume it is cold main function
5620         // code, NOT a funclet. So, don't do the backward walk: just return the start address
5621         // of the main function.
5622         // @ARMTODO: Handle hot/cold splitting with EH funclets
5623         return pCodeInfo->GetStartAddress();
5624     }
5625 #endif
5626
5627     return IJitManager::GetFuncletStartAddress(pCodeInfo);
5628 }
5629
5630 static void GetFuncletStartOffsetsHelper(PCODE pCodeStart, SIZE_T size, SIZE_T ofsAdj,
5631                                          PTR_RUNTIME_FUNCTION pFunctionEntry, TADDR moduleBase,
5632                                          DWORD * pnFunclets, DWORD* pStartFuncletOffsets, DWORD dwLength)
5633 {
5634     _ASSERTE(FitsInU4((pCodeStart + size) - moduleBase));
5635     DWORD endAddress = (DWORD)((pCodeStart + size) - moduleBase);
5636
5637     // Entries are sorted and terminated by sentinel value (DWORD)-1
5638     for ( ; RUNTIME_FUNCTION__BeginAddress(pFunctionEntry) < endAddress; pFunctionEntry++)
5639     {
5640 #ifdef _TARGET_AMD64_
5641         _ASSERTE((pFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0);
5642 #endif
5643
5644 #if defined(EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS)
5645         if (IsFunctionFragment(moduleBase, pFunctionEntry))
5646         {
5647             // This is a fragment (not the funclet beginning); skip it
5648             continue;
5649         }
5650 #endif // EXCEPTION_DATA_SUPPORTS_FUNCTION_FRAGMENTS
5651
5652         if (*pnFunclets < dwLength)
5653         {
5654             TADDR funcletStartAddress = (moduleBase + RUNTIME_FUNCTION__BeginAddress(pFunctionEntry)) + ofsAdj;
5655             _ASSERTE(FitsInU4(funcletStartAddress - pCodeStart));
5656             pStartFuncletOffsets[*pnFunclets] = (DWORD)(funcletStartAddress - pCodeStart);
5657         }
5658         (*pnFunclets)++;
5659     }
5660 }
5661
5662 DWORD NativeImageJitManager::GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength)
5663 {
5664     CONTRACTL
5665     {
5666         NOTHROW;
5667         GC_NOTRIGGER;
5668     }
5669     CONTRACTL_END;
5670
5671     PTR_RUNTIME_FUNCTION pFirstFuncletFunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader) + 1;
5672     
5673     TADDR moduleBase = JitTokenToModuleBase(MethodToken);
5674     DWORD nFunclets = 0;
5675     MethodRegionInfo regionInfo;
5676     JitTokenToMethodRegionInfo(MethodToken, &regionInfo);
5677
5678     // pFirstFuncletFunctionEntry will work for ARM when passed to GetFuncletStartOffsetsHelper()
5679     // even if it is a fragment of the main body and not a RUNTIME_FUNCTION for the beginning
5680     // of the first hot funclet, because GetFuncletStartOffsetsHelper() will skip all the function
5681     // fragments until the first funclet, if any, is found.
5682
5683     GetFuncletStartOffsetsHelper(regionInfo.hotStartAddress, regionInfo.hotSize, 0,
5684         pFirstFuncletFunctionEntry, moduleBase,
5685         &nFunclets, pStartFuncletOffsets, dwLength);
5686
5687     // There are no funclets in cold section on ARM yet
5688     // @ARMTODO: support hot/cold splitting in functions with EH
5689 #if !defined(_TARGET_ARM_) && !defined(_TARGET_ARM64_)
5690     if (regionInfo.coldSize != NULL)
5691     {
5692         NGenLayoutInfo * pLayoutInfo = JitTokenToZapModule(MethodToken)->GetNGenLayoutInfo();
5693     
5694         int iColdMethodIndex = NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod(
5695                                                     (DWORD)(regionInfo.coldStartAddress - moduleBase),
5696                                                     pLayoutInfo->m_pRuntimeFunctions[2],
5697                                                     0,
5698                                                     pLayoutInfo->m_nRuntimeFunctions[2] - 1);
5699
5700         PTR_RUNTIME_FUNCTION pFunctionEntry = pLayoutInfo->m_pRuntimeFunctions[2] + iColdMethodIndex;
5701
5702         _ASSERTE(regionInfo.coldStartAddress == moduleBase + RUNTIME_FUNCTION__BeginAddress(pFunctionEntry));
5703
5704 #ifdef _TARGET_AMD64_
5705         // Skip cold part of the method body
5706         if ((pFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) != 0)
5707             pFunctionEntry++;
5708 #endif
5709
5710         GetFuncletStartOffsetsHelper(regionInfo.coldStartAddress, regionInfo.coldSize, regionInfo.hotSize,
5711             pFunctionEntry, moduleBase,
5712             &nFunclets, pStartFuncletOffsets, dwLength);
5713     }
5714 #endif // !_TARGET_ARM_ && !_TARGET_ARM64
5715
5716     return nFunclets;
5717 }
5718
5719 BOOL NativeImageJitManager::IsFilterFunclet(EECodeInfo * pCodeInfo)
5720 {
5721     CONTRACTL {
5722         NOTHROW;
5723         GC_NOTRIGGER;
5724         MODE_ANY;
5725     }
5726     CONTRACTL_END;
5727
5728     if (!pCodeInfo->IsFunclet())
5729         return FALSE;
5730
5731     //
5732     // The generic IsFilterFunclet implementation is touching exception handling tables.
5733     // It is bad for working set because of it is sometimes called during GC stackwalks.
5734     // The optimized version for native images does not touch exception handling tables.
5735     //
5736
5737     NGenLayoutInfo * pLayoutInfo = JitTokenToZapModule(pCodeInfo->GetMethodToken())->GetNGenLayoutInfo();
5738
5739     SIZE_T size;
5740     PTR_VOID pUnwindData = GetUnwindDataBlob(pCodeInfo->GetModuleBase(), pCodeInfo->GetFunctionEntry(), &size);
5741     _ASSERTE(pUnwindData != NULL);
5742
5743     // Personality routine is always the last element of the unwind data
5744     DWORD rvaPersonalityRoutine = *(dac_cast<PTR_DWORD>(dac_cast<TADDR>(pUnwindData) + size) - 1);
5745
5746     BOOL fRet = (pLayoutInfo->m_rvaFilterPersonalityRoutine == rvaPersonalityRoutine);
5747
5748     // Verify that the optimized implementation is in sync with the slow implementation
5749     _ASSERTE(fRet == IJitManager::IsFilterFunclet(pCodeInfo));
5750
5751     return fRet;
5752 }
5753
5754 #endif  // WIN64EXCEPTIONS
5755  
5756 StubCodeBlockKind NativeImageJitManager::GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC)
5757 {
5758     CONTRACTL
5759     {
5760         NOTHROW;
5761         GC_NOTRIGGER;
5762         SO_TOLERANT;
5763         MODE_ANY;
5764     }
5765     CONTRACTL_END;
5766
5767     Module * pZapModule = dac_cast<PTR_Module>(pRangeSection->pHeapListOrZapModule);
5768
5769     if (pZapModule->IsZappedPrecode(currentPC))
5770     {
5771         return STUB_CODE_BLOCK_PRECODE;
5772     }
5773
5774     NGenLayoutInfo * pLayoutInfo = pZapModule->GetNGenLayoutInfo();
5775     _ASSERTE(pLayoutInfo != NULL);
5776
5777     if (pLayoutInfo->m_JumpStubs.IsInRange(currentPC))
5778     {
5779         return STUB_CODE_BLOCK_JUMPSTUB;
5780     }
5781
5782     if (pLayoutInfo->m_StubLinkStubs.IsInRange(currentPC))
5783     {
5784         return STUB_CODE_BLOCK_STUBLINK;
5785     }
5786
5787     if (pLayoutInfo->m_VirtualMethodThunks.IsInRange(currentPC))
5788     {
5789         return STUB_CODE_BLOCK_VIRTUAL_METHOD_THUNK;
5790     }
5791
5792     if (pLayoutInfo->m_ExternalMethodThunks.IsInRange(currentPC))
5793     {
5794         return STUB_CODE_BLOCK_EXTERNAL_METHOD_THUNK;
5795     }
5796
5797     return STUB_CODE_BLOCK_UNKNOWN;
5798 }
5799
5800 PTR_Module NativeImageJitManager::JitTokenToZapModule(const METHODTOKEN& MethodToken)
5801 {
5802     LIMITED_METHOD_DAC_CONTRACT;
5803     return dac_cast<PTR_Module>(MethodToken.m_pRangeSection->pHeapListOrZapModule);
5804 }
5805 void NativeImageJitManager::JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken, 
5806                                                    MethodRegionInfo * methodRegionInfo)
5807 {
5808     CONTRACTL {
5809         NOTHROW;
5810         GC_NOTRIGGER;
5811         SUPPORTS_DAC;
5812     } CONTRACTL_END;
5813
5814     _ASSERTE(methodRegionInfo != NULL);
5815
5816     //
5817     // Initialize methodRegionInfo assuming that the method is entirely hot.  This is the common
5818     // case (either binary is not procedure split or the current method is all hot).  We can
5819     // adjust these values later if necessary.
5820     //
5821
5822     methodRegionInfo->hotStartAddress  = JitTokenToStartAddress(MethodToken);
5823     methodRegionInfo->hotSize          = GetCodeManager()->GetFunctionSize(GetGCInfoToken(MethodToken));
5824     methodRegionInfo->coldStartAddress = 0;
5825     methodRegionInfo->coldSize         = 0;
5826
5827     RangeSection *rangeSection = MethodToken.m_pRangeSection;
5828     PREFIX_ASSUME(rangeSection != NULL);
5829
5830     Module * pModule = dac_cast<PTR_Module>(rangeSection->pHeapListOrZapModule);
5831
5832     NGenLayoutInfo * pLayoutInfo = pModule->GetNGenLayoutInfo();
5833
5834     //
5835     // If this module is not procedure split, then we're done.
5836     //
5837     if (pLayoutInfo->m_CodeSections[2].Size() == 0)
5838         return;
5839
5840     //
5841     // Perform a binary search in the cold range section until we find our method
5842     //
5843
5844     TADDR ImageBase = rangeSection->LowAddress;
5845
5846     int Low = 0;
5847     int High = pLayoutInfo->m_nRuntimeFunctions[2] - 1;
5848
5849     PTR_RUNTIME_FUNCTION pRuntimeFunctionTable = pLayoutInfo->m_pRuntimeFunctions[2];
5850     PTR_CORCOMPILE_COLD_METHOD_ENTRY pColdCodeMap = pLayoutInfo->m_ColdCodeMap;
5851
5852     while (Low <= High)
5853     {
5854         int Middle = Low + (High - Low) / 2;
5855
5856         int ColdMethodIndex = Middle;
5857
5858         PTR_RUNTIME_FUNCTION FunctionEntry;
5859
5860 #ifdef WIN64EXCEPTIONS
5861         while (pColdCodeMap[ColdMethodIndex].mainFunctionEntryRVA == 0)
5862             ColdMethodIndex--;
5863
5864         FunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(ImageBase + pColdCodeMap[ColdMethodIndex].mainFunctionEntryRVA);
5865 #else
5866         DWORD ColdUnwindData = pRuntimeFunctionTable[ColdMethodIndex].UnwindData;
5867         _ASSERTE((ColdUnwindData & RUNTIME_FUNCTION_INDIRECT) != 0);
5868         FunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(ImageBase + (ColdUnwindData & ~RUNTIME_FUNCTION_INDIRECT));
5869 #endif
5870
5871         if (FunctionEntry == dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader))
5872         {
5873             PTR_RUNTIME_FUNCTION ColdFunctionEntry = pRuntimeFunctionTable + ColdMethodIndex;
5874
5875             methodRegionInfo->coldStartAddress = ImageBase + RUNTIME_FUNCTION__BeginAddress(ColdFunctionEntry);
5876
5877             //
5878             // At this point methodRegionInfo->hotSize is set to the total size of
5879             // the method obtained from the GC info (we set that in the init code above).
5880             // Use that and coldHeader->hotCodeSize to compute the hot and cold code sizes.
5881             //
5882
5883             ULONG hotCodeSize = pColdCodeMap[ColdMethodIndex].hotCodeSize;
5884
5885             methodRegionInfo->coldSize         = methodRegionInfo->hotSize - hotCodeSize;
5886             methodRegionInfo->hotSize          = hotCodeSize;
5887
5888             return;
5889         }
5890         else if (FunctionEntry < dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader))
5891         {
5892             Low = Middle + 1;
5893         }
5894         else
5895         {
5896             // Use ColdMethodIndex to take advantage of entries skipped while looking for method start
5897             High = ColdMethodIndex - 1;
5898         }
5899     }
5900
5901     //
5902     // We didn't find it.  Therefore this method doesn't have a cold section.
5903     //
5904
5905     return;
5906 }
5907
5908 #ifdef DACCESS_COMPILE
5909
5910 void NativeImageJitManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
5911 {
5912     IJitManager::EnumMemoryRegions(flags);
5913 }
5914
5915 #if defined(WIN64EXCEPTIONS)
5916
5917 // 
5918 // To locate an entry in the function entry table (the program exceptions data directory), the debugger
5919 // performs a binary search over the table.  This function reports the entries that are encountered in the
5920 // binary search.
5921 // 
5922 // Parameters:
5923 //   pRtf: The target function table entry to be located
5924 //   pNativeLayout: A pointer to the loaded native layout for the module containing pRtf
5925 //   
5926 static void EnumRuntimeFunctionEntriesToFindEntry(PTR_RUNTIME_FUNCTION pRtf, PTR_PEImageLayout pNativeLayout)
5927 {
5928     pRtf.EnumMem();
5929
5930     if (pNativeLayout == NULL)
5931     {
5932         return;
5933     }
5934
5935     IMAGE_DATA_DIRECTORY * pProgramExceptionsDirectory = pNativeLayout->GetDirectoryEntry(IMAGE_DIRECTORY_ENTRY_EXCEPTION);
5936     if (!pProgramExceptionsDirectory || 
5937         (pProgramExceptionsDirectory->Size == 0) ||
5938         (pProgramExceptionsDirectory->Size % sizeof(T_RUNTIME_FUNCTION) != 0))
5939     {
5940         // Program exceptions directory malformatted
5941         return;
5942     }
5943
5944     PTR_BYTE moduleBase(pNativeLayout->GetBase());
5945     PTR_RUNTIME_FUNCTION firstFunctionEntry(moduleBase + pProgramExceptionsDirectory->VirtualAddress);
5946
5947     if (pRtf < firstFunctionEntry ||
5948         ((dac_cast<TADDR>(pRtf) - dac_cast<TADDR>(firstFunctionEntry)) % sizeof(T_RUNTIME_FUNCTION) != 0))
5949     {
5950         // Program exceptions directory malformatted
5951         return;
5952     }
5953     
5954 // Review conversion of size_t to ULONG.
5955 #if defined(_MSC_VER)
5956 #pragma warning(push)
5957 #pragma warning(disable:4267)
5958 #endif // defined(_MSC_VER)
5959
5960     ULONG indexToLocate = pRtf - firstFunctionEntry;
5961
5962 #if defined(_MSC_VER)   
5963 #pragma warning(pop)
5964 #endif // defined(_MSC_VER)
5965
5966     ULONG low = 0; // index in the function entry table of low end of search range
5967     ULONG high = (pProgramExceptionsDirectory->Size)/sizeof(T_RUNTIME_FUNCTION) - 1; // index of high end of search range
5968     ULONG mid = (low + high) /2; // index of entry to be compared
5969
5970     if (indexToLocate > high)
5971     {
5972         return;
5973     }
5974
5975     while (indexToLocate != mid)
5976     {
5977         PTR_RUNTIME_FUNCTION functionEntry = firstFunctionEntry + mid;
5978         functionEntry.EnumMem();
5979         if (indexToLocate > mid)
5980         {
5981             low = mid + 1;
5982         }
5983         else
5984         {
5985             high = mid - 1;
5986         }
5987         mid = (low + high) /2;
5988         _ASSERTE( low <= mid && mid <= high );
5989     }
5990 }
5991
5992 //
5993 // EnumMemoryRegionsForMethodUnwindInfo - enumerate the memory necessary to read the unwind info for the
5994 // specified method.
5995 // 
5996 // Note that in theory, a dump generation library could save the unwind information itself without help
5997 // from us, since it's stored in the image in the standard function table layout for Win64.  However, 
5998 // dump-generation libraries assume that the image will be available at debug time, and if the image 
5999 // isn't available then it is acceptable for stackwalking to break.  For ngen images (which are created 
6000 // on the client), it usually isn't possible to have the image available at debug time, and so for minidumps
6001 // we must explicitly ensure the unwind information is saved into the dump. 
6002 // 
6003 // Arguments:
6004 //     flags - EnumMem flags
6005 //     pMD   - MethodDesc for the method in question
6006 //     
6007 void NativeImageJitManager::EnumMemoryRegionsForMethodUnwindInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo)
6008 {
6009     // Get the RUNTIME_FUNCTION entry for this method 
6010     PTR_RUNTIME_FUNCTION pRtf = pCodeInfo->GetFunctionEntry();
6011
6012     if (pRtf==NULL)
6013     {
6014         return;
6015     }
6016
6017     // Enumerate the function entry and other entries needed to locate it in the program exceptions directory
6018     Module * pModule = JitTokenToZapModule(pCodeInfo->GetMethodToken());
6019     EnumRuntimeFunctionEntriesToFindEntry(pRtf, pModule->GetFile()->GetLoadedNative());
6020
6021     SIZE_T size;
6022     PTR_VOID pUnwindData = GetUnwindDataBlob(pCodeInfo->GetModuleBase(), pRtf, &size);
6023     if (pUnwindData != NULL)
6024         DacEnumMemoryRegion(PTR_TO_TADDR(pUnwindData), size); 
6025 }
6026
6027 #endif //WIN64EXCEPTIONS
6028 #endif // #ifdef DACCESS_COMPILE
6029
6030 // Return start of exception info for a method, or 0 if the method has no EH info
6031 DWORD NativeExceptionInfoLookupTable::LookupExceptionInfoRVAForMethod(PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE pExceptionLookupTable,
6032                                                                               COUNT_T numLookupEntries,
6033                                                                               DWORD methodStartRVA,
6034                                                                               COUNT_T* pSize)
6035 {
6036     CONTRACTL {
6037         NOTHROW;
6038         GC_NOTRIGGER;
6039         SUPPORTS_DAC;
6040     } CONTRACTL_END;
6041
6042     _ASSERTE(pExceptionLookupTable != NULL);
6043
6044     COUNT_T start = 0;
6045     COUNT_T end = numLookupEntries - 2;
6046
6047     // The last entry in the lookup table (end-1) points to a sentinal entry.
6048     // The sentinal entry helps to determine the number of EH clauses for the last table entry.
6049     _ASSERTE(pExceptionLookupTable->ExceptionLookupEntry(numLookupEntries-1)->MethodStartRVA == (DWORD)-1);
6050
6051     // Binary search the lookup table
6052     // Using linear search is faster once we get down to small number of entries.
6053     while (end - start > 10)
6054     {
6055         COUNT_T middle = start + (end - start) / 2;
6056
6057         _ASSERTE(start < middle && middle < end);
6058
6059         DWORD rva = pExceptionLookupTable->ExceptionLookupEntry(middle)->MethodStartRVA;
6060
6061         if (methodStartRVA < rva)
6062         {
6063             end = middle - 1;
6064         }
6065         else 
6066         {
6067             start = middle;
6068         }
6069     }
6070
6071     for (COUNT_T i = start; i <= end; ++i)
6072     {
6073         DWORD rva = pExceptionLookupTable->ExceptionLookupEntry(i)->MethodStartRVA;
6074         if (methodStartRVA  == rva)
6075         {
6076             CORCOMPILE_EXCEPTION_LOOKUP_TABLE_ENTRY *pEntry = pExceptionLookupTable->ExceptionLookupEntry(i);
6077
6078             //Get the count of EH Clause entries
6079             CORCOMPILE_EXCEPTION_LOOKUP_TABLE_ENTRY * pNextEntry = pExceptionLookupTable->ExceptionLookupEntry(i + 1);
6080             *pSize = pNextEntry->ExceptionInfoRVA - pEntry->ExceptionInfoRVA;
6081
6082             return pEntry->ExceptionInfoRVA;
6083         }
6084     }
6085
6086     // Not found
6087     return 0;
6088 }
6089
6090 int NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod(DWORD RelativePc,
6091                                                            PTR_RUNTIME_FUNCTION pRuntimeFunctionTable,
6092                                                            int Low,
6093                                                            int High)
6094 {
6095     CONTRACTL {
6096         SO_TOLERANT;
6097         NOTHROW;
6098         GC_NOTRIGGER;
6099         SUPPORTS_DAC;
6100     } CONTRACTL_END;
6101
6102
6103 #ifdef _TARGET_ARM_
6104     RelativePc |= THUMB_CODE;
6105 #endif 
6106
6107     // Entries are sorted and terminated by sentinel value (DWORD)-1
6108
6109     // Binary search the RUNTIME_FUNCTION table
6110     // Use linear search once we get down to a small number of elements
6111     // to avoid Binary search overhead.
6112     while (High - Low > 10) 
6113     {
6114        int Middle = Low + (High - Low) / 2;
6115
6116        PTR_RUNTIME_FUNCTION pFunctionEntry = pRuntimeFunctionTable + Middle;
6117        if (RelativePc < pFunctionEntry->BeginAddress) 
6118        {
6119            High = Middle - 1;
6120        } 
6121        else 
6122        {
6123            Low = Middle;
6124        }
6125     }
6126
6127     for (int i = Low; i <= High; ++i)
6128     {
6129         // This is safe because of entries are terminated by sentinel value (DWORD)-1
6130         PTR_RUNTIME_FUNCTION pNextFunctionEntry = pRuntimeFunctionTable + (i + 1);
6131
6132         if (RelativePc < pNextFunctionEntry->BeginAddress)
6133         {
6134             PTR_RUNTIME_FUNCTION pFunctionEntry = pRuntimeFunctionTable + i;
6135             if (RelativePc >= pFunctionEntry->BeginAddress)
6136             {
6137                 return i;
6138             }
6139             break;
6140         }
6141     }
6142
6143     return -1;
6144 }
6145
6146 BOOL NativeUnwindInfoLookupTable::HasExceptionInfo(NGenLayoutInfo * pNgenLayout, PTR_RUNTIME_FUNCTION pMainRuntimeFunction)
6147 {
6148     LIMITED_METHOD_DAC_CONTRACT;
6149     DWORD methodDescRVA = NativeUnwindInfoLookupTable::GetMethodDescRVA(pNgenLayout, pMainRuntimeFunction);
6150     return (methodDescRVA & HAS_EXCEPTION_INFO_MASK);
6151 }
6152
6153 PTR_MethodDesc NativeUnwindInfoLookupTable::GetMethodDesc(NGenLayoutInfo * pNgenLayout, PTR_RUNTIME_FUNCTION pMainRuntimeFunction, TADDR moduleBase)
6154 {
6155     LIMITED_METHOD_DAC_CONTRACT;
6156     DWORD methodDescRVA = NativeUnwindInfoLookupTable::GetMethodDescRVA(pNgenLayout, pMainRuntimeFunction);
6157     return PTR_MethodDesc((methodDescRVA & ~HAS_EXCEPTION_INFO_MASK) + moduleBase);
6158 }
6159
6160 DWORD NativeUnwindInfoLookupTable::GetMethodDescRVA(NGenLayoutInfo * pNgenLayout, PTR_RUNTIME_FUNCTION pMainRuntimeFunction)
6161 {
6162     LIMITED_METHOD_DAC_CONTRACT;
6163
6164     COUNT_T iIndex = (COUNT_T)(pMainRuntimeFunction - pNgenLayout->m_pRuntimeFunctions[0]);
6165     DWORD rva = 0;
6166     if (iIndex >= pNgenLayout->m_nRuntimeFunctions[0])
6167     {
6168         iIndex = (COUNT_T)(pMainRuntimeFunction - pNgenLayout->m_pRuntimeFunctions[1]);
6169         _ASSERTE(iIndex < pNgenLayout->m_nRuntimeFunctions[1]);
6170         rva = pNgenLayout->m_MethodDescs[1][iIndex];
6171     }
6172     else
6173     {
6174         rva = pNgenLayout->m_MethodDescs[0][iIndex];
6175     }
6176     _ASSERTE(rva != 0);
6177
6178     return rva;
6179 }
6180
6181 #endif // FEATURE_PREJIT
6182
6183 #ifndef DACCESS_COMPILE
6184
6185 //-----------------------------------------------------------------------------
6186
6187
6188 // Nirvana Support
6189
6190 MethodDesc* __stdcall Nirvana_FindMethodDesc(PCODE ptr, BYTE*& hotStartAddress, size_t& hotSize, BYTE*& coldStartAddress, size_t & coldSize)
6191 {
6192     EECodeInfo codeInfo(ptr);
6193     if (!codeInfo.IsValid())
6194         return NULL;
6195
6196     IJitManager::MethodRegionInfo methodRegionInfo;
6197     codeInfo.GetMethodRegionInfo(&methodRegionInfo);
6198
6199     hotStartAddress  = (BYTE*)methodRegionInfo.hotStartAddress;
6200     hotSize          = methodRegionInfo.hotSize;
6201     coldStartAddress = (BYTE*)methodRegionInfo.coldStartAddress;
6202     coldSize         = methodRegionInfo.coldSize;
6203
6204     return codeInfo.GetMethodDesc();
6205 }
6206
6207
6208 bool Nirvana_GetMethodInfo(MethodDesc * pMD, BYTE*& hotStartAddress, size_t& hotSize, BYTE*& coldStartAddress, size_t & coldSize)
6209 {
6210     EECodeInfo codeInfo(pMD->GetNativeCode());
6211     if (!codeInfo.IsValid())
6212         return false;
6213
6214     IJitManager::MethodRegionInfo methodRegionInfo;
6215     codeInfo.GetMethodRegionInfo(&methodRegionInfo);
6216
6217     hotStartAddress  = (BYTE*)methodRegionInfo.hotStartAddress;
6218     hotSize          = methodRegionInfo.hotSize;
6219     coldStartAddress = (BYTE*)methodRegionInfo.coldStartAddress;
6220     coldSize         = methodRegionInfo.coldSize;
6221
6222     return true;
6223 }
6224
6225
6226 #include "sigformat.h"
6227
6228 __forceinline bool Nirvana_PrintMethodDescWorker(__in_ecount(iBuffer) char * szBuffer, size_t iBuffer, MethodDesc * pMD, const char * pSigString)
6229 {
6230     if (iBuffer == 0) 
6231         return false;
6232
6233     szBuffer[0] = '\0';
6234     pSigString = strchr(pSigString, ' ');
6235
6236     if (pSigString == NULL)
6237         return false;
6238
6239     ++pSigString;
6240
6241     LPCUTF8 pNamespace;
6242     LPCUTF8 pClassName = pMD->GetMethodTable()->GetFullyQualifiedNameInfo(&pNamespace);
6243
6244     if (pClassName == NULL)
6245         return false;
6246
6247     if (*pNamespace != 0)
6248     {
6249         if (_snprintf_s(szBuffer, iBuffer, _TRUNCATE, "%s.%s.%s", pNamespace, pClassName, pSigString) == -1)
6250             return false;
6251     }
6252     else
6253     {
6254         if (_snprintf_s(szBuffer, iBuffer, _TRUNCATE, "%s.%s", pClassName, pSigString) == -1)
6255             return false;
6256     }
6257
6258     _ASSERTE(szBuffer[0] != '\0');
6259
6260     return true;
6261 }
6262
6263 bool __stdcall Nirvana_PrintMethodDesc(__in_ecount(iBuffer) char * szBuffer, size_t iBuffer, MethodDesc * pMD)
6264 {
6265     bool fResult = false;
6266
6267     EX_TRY
6268     {
6269         NewHolder<SigFormat> pSig = new SigFormat(pMD, NULL, false);
6270         fResult = Nirvana_PrintMethodDescWorker(szBuffer, iBuffer, pMD, pSig->GetCString());
6271     }
6272     EX_CATCH
6273     {
6274         fResult = false;
6275     }
6276     EX_END_CATCH(SwallowAllExceptions)
6277     
6278     return fResult;
6279 };
6280
6281
6282 // Nirvana_Dummy() is a dummy function that is exported privately by ordinal only.
6283 // The sole purpose of this function is to reference Nirvana_FindMethodDesc(),
6284 // Nirvana_GetMethodInfo(), and Nirvana_PrintMethodDesc() so that they are not
6285 // inlined or removed by the compiler or the linker.
6286
6287 DWORD __stdcall Nirvana_Dummy()
6288 {
6289     LIMITED_METHOD_CONTRACT;
6290     void * funcs[] = { 
6291         (void*)Nirvana_FindMethodDesc,
6292         (void*)Nirvana_GetMethodInfo,
6293         (void*)Nirvana_PrintMethodDesc 
6294     };
6295
6296     size_t n = sizeof(funcs) / sizeof(funcs[0]);
6297
6298     size_t sum = 0;
6299     for (size_t i = 0; i < n; ++i)
6300         sum += (size_t)funcs[i];
6301
6302     return (DWORD)sum;
6303 }
6304
6305
6306 #endif // #ifndef DACCESS_COMPILE
6307
6308
6309 #ifdef FEATURE_PREJIT
6310
6311 MethodIterator::MethodIterator(PTR_Module pModule, MethodIteratorOptions mio)
6312 {
6313     CONTRACTL
6314     {
6315         THROWS;
6316         GC_NOTRIGGER;
6317     } CONTRACTL_END;
6318
6319     Init(pModule, pModule->GetNativeImage(),  mio);
6320 }
6321
6322 MethodIterator::MethodIterator(PTR_Module pModule, PEDecoder * pPEDecoder, MethodIteratorOptions mio)
6323 {
6324     CONTRACTL
6325     {
6326         THROWS;
6327         GC_NOTRIGGER;
6328     } CONTRACTL_END;
6329
6330     Init(pModule, pPEDecoder,  mio);
6331 }
6332
6333 void MethodIterator::Init(PTR_Module pModule, PEDecoder * pPEDecoder, MethodIteratorOptions mio)
6334 {
6335     CONTRACTL
6336     {
6337         THROWS;
6338         GC_NOTRIGGER;
6339     } CONTRACTL_END;
6340
6341     m_ModuleBase = dac_cast<TADDR>(pPEDecoder->GetBase());
6342
6343     methodIteratorOptions = mio;
6344
6345     m_pNgenLayout = pModule->GetNGenLayoutInfo();
6346
6347     m_fHotMethodsDone = FALSE;
6348     m_CurrentRuntimeFunctionIndex = -1;
6349     m_CurrentColdRuntimeFunctionIndex = 0;
6350 }
6351
6352 BOOL MethodIterator::Next()
6353 {
6354     CONTRACTL {
6355         NOTHROW;
6356         GC_NOTRIGGER;
6357     } CONTRACTL_END;
6358
6359     m_CurrentRuntimeFunctionIndex ++;
6360
6361     if (!m_fHotMethodsDone)
6362     {
6363         //iterate the hot methods 
6364         if (methodIteratorOptions & Hot)
6365         {
6366 #ifdef WIN64EXCEPTIONS
6367             //Skip to the next method.
6368             // skip over method fragments and funclets.
6369             while (m_CurrentRuntimeFunctionIndex < m_pNgenLayout->m_nRuntimeFunctions[0])
6370             {
6371                 if (m_pNgenLayout->m_MethodDescs[0][m_CurrentRuntimeFunctionIndex] != 0)
6372                     return TRUE;
6373                 m_CurrentRuntimeFunctionIndex++;
6374             }
6375 #else
6376             if (m_CurrentRuntimeFunctionIndex < m_pNgenLayout->m_nRuntimeFunctions[0])
6377                 return TRUE;
6378 #endif
6379         }
6380         m_CurrentRuntimeFunctionIndex = 0;
6381         m_fHotMethodsDone = TRUE;
6382     }
6383
6384     if (methodIteratorOptions & Unprofiled)
6385     {
6386 #ifdef WIN64EXCEPTIONS
6387          //Skip to the next method.
6388         // skip over method fragments and funclets.
6389         while (m_CurrentRuntimeFunctionIndex < m_pNgenLayout->m_nRuntimeFunctions[1])
6390         {
6391             if (m_pNgenLayout->m_MethodDescs[1][m_CurrentRuntimeFunctionIndex] != 0)
6392                 return TRUE;
6393             m_CurrentRuntimeFunctionIndex++;
6394         }
6395 #else
6396         if (m_CurrentRuntimeFunctionIndex < m_pNgenLayout->m_nRuntimeFunctions[1])
6397             return TRUE;
6398 #endif
6399     }
6400
6401     return FALSE;
6402 }
6403
6404 PTR_MethodDesc MethodIterator::GetMethodDesc()
6405 {
6406     CONTRACTL
6407     {
6408         NOTHROW;
6409         GC_NOTRIGGER;
6410     } 
6411     CONTRACTL_END;
6412
6413     return NativeUnwindInfoLookupTable::GetMethodDesc(m_pNgenLayout, GetRuntimeFunction(), m_ModuleBase);
6414 }
6415
6416 GCInfoToken MethodIterator::GetGCInfoToken()
6417 {
6418     LIMITED_METHOD_CONTRACT;
6419
6420     // get the gc info from the RT function
6421     SIZE_T size;
6422     PTR_VOID pUnwindData = GetUnwindDataBlob(m_ModuleBase, GetRuntimeFunction(), &size);
6423     PTR_VOID gcInfo = (PTR_VOID)((PTR_BYTE)pUnwindData + size);
6424     // MethodIterator is used to iterate over methods of an NgenImage.
6425     // So, GcInfo version is always current
6426     return{ gcInfo, GCINFO_VERSION };
6427 }
6428
6429 TADDR MethodIterator::GetMethodStartAddress()
6430 {
6431     LIMITED_METHOD_CONTRACT;
6432
6433     return m_ModuleBase + RUNTIME_FUNCTION__BeginAddress(GetRuntimeFunction());
6434 }
6435
6436 TADDR MethodIterator::GetMethodColdStartAddress()
6437 {
6438     LIMITED_METHOD_CONTRACT;
6439
6440     PTR_RUNTIME_FUNCTION CurrentFunctionEntry = GetRuntimeFunction();
6441
6442     //
6443     // Catch up with hot code
6444     //
6445     for ( ; m_CurrentColdRuntimeFunctionIndex < m_pNgenLayout->m_nRuntimeFunctions[2]; m_CurrentColdRuntimeFunctionIndex++)
6446     {
6447         PTR_RUNTIME_FUNCTION ColdFunctionEntry = m_pNgenLayout->m_pRuntimeFunctions[2] + m_CurrentColdRuntimeFunctionIndex;
6448
6449         PTR_RUNTIME_FUNCTION FunctionEntry;
6450
6451 #ifdef WIN64EXCEPTIONS
6452         DWORD MainFunctionEntryRVA = m_pNgenLayout->m_ColdCodeMap[m_CurrentColdRuntimeFunctionIndex].mainFunctionEntryRVA;
6453
6454         if (MainFunctionEntryRVA == 0)
6455             continue;
6456
6457         FunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(m_ModuleBase + MainFunctionEntryRVA);
6458 #else
6459         DWORD ColdUnwindData = ColdFunctionEntry->UnwindData;
6460         _ASSERTE((ColdUnwindData & RUNTIME_FUNCTION_INDIRECT) != 0);
6461         FunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(m_ModuleBase + (ColdUnwindData & ~RUNTIME_FUNCTION_INDIRECT));
6462 #endif
6463
6464         if (CurrentFunctionEntry == FunctionEntry)
6465         {
6466             // we found a match
6467             return m_ModuleBase + RUNTIME_FUNCTION__BeginAddress(ColdFunctionEntry);
6468         }
6469         else
6470         if (CurrentFunctionEntry < FunctionEntry)
6471         {
6472             // method does not have cold code
6473             return NULL;
6474         }
6475     }
6476
6477     return NULL;
6478 }
6479
6480 PTR_RUNTIME_FUNCTION MethodIterator::GetRuntimeFunction()
6481 {
6482     LIMITED_METHOD_DAC_CONTRACT;
6483     _ASSERTE(m_CurrentRuntimeFunctionIndex >= 0);
6484     _ASSERTE(m_CurrentRuntimeFunctionIndex < (m_fHotMethodsDone ? m_pNgenLayout->m_nRuntimeFunctions[1] : m_pNgenLayout->m_nRuntimeFunctions[0]));
6485     return (m_fHotMethodsDone ? m_pNgenLayout->m_pRuntimeFunctions[1] : m_pNgenLayout->m_pRuntimeFunctions[0]) + m_CurrentRuntimeFunctionIndex;
6486 }
6487
6488 ULONG MethodIterator::GetHotCodeSize()
6489 {
6490     LIMITED_METHOD_CONTRACT;
6491     _ASSERTE(GetMethodColdStartAddress() != NULL);
6492     return m_pNgenLayout->m_ColdCodeMap[m_CurrentColdRuntimeFunctionIndex].hotCodeSize;
6493 }
6494
6495 void MethodIterator::GetMethodRegionInfo(IJitManager::MethodRegionInfo *methodRegionInfo)
6496 {
6497     CONTRACTL {
6498         NOTHROW;
6499         GC_NOTRIGGER;
6500     } CONTRACTL_END;
6501
6502     methodRegionInfo->hotStartAddress  = GetMethodStartAddress();
6503     methodRegionInfo->coldStartAddress = GetMethodColdStartAddress();
6504     GCInfoToken gcInfoToken = GetGCInfoToken();
6505     methodRegionInfo->hotSize          = ExecutionManager::GetNativeImageJitManager()->GetCodeManager()->GetFunctionSize(gcInfoToken);
6506     methodRegionInfo->coldSize         = 0;
6507
6508     if (methodRegionInfo->coldStartAddress != NULL)
6509     {
6510         //
6511         // At this point methodRegionInfo->hotSize is set to the total size of
6512         // the method obtained from the GC info (we set that in the init code above).
6513         // Use that and pCMH->hotCodeSize to compute the hot and cold code sizes.
6514         //
6515
6516         ULONG hotCodeSize = GetHotCodeSize();
6517
6518         methodRegionInfo->coldSize         = methodRegionInfo->hotSize - hotCodeSize;
6519         methodRegionInfo->hotSize          = hotCodeSize;
6520     }
6521 }
6522
6523 #endif // FEATURE_PREJIT
6524
6525
6526
6527 #ifdef FEATURE_READYTORUN
6528
6529 //***************************************************************************************
6530 //***************************************************************************************
6531
6532 #ifndef DACCESS_COMPILE
6533
6534 ReadyToRunJitManager::ReadyToRunJitManager()
6535 {
6536     WRAPPER_NO_CONTRACT;
6537 }
6538
6539 #endif // #ifndef DACCESS_COMPILE
6540
6541 ReadyToRunInfo * ReadyToRunJitManager::JitTokenToReadyToRunInfo(const METHODTOKEN& MethodToken)
6542 {
6543     CONTRACTL {
6544         NOTHROW;
6545         GC_NOTRIGGER;
6546         HOST_NOCALLS;
6547         SUPPORTS_DAC;
6548     } CONTRACTL_END;
6549
6550     return dac_cast<PTR_Module>(MethodToken.m_pRangeSection->pHeapListOrZapModule)->GetReadyToRunInfo();
6551 }
6552
6553 UINT32 ReadyToRunJitManager::JitTokenToGCInfoVersion(const METHODTOKEN& MethodToken)
6554 {
6555     CONTRACTL{
6556         NOTHROW;
6557         GC_NOTRIGGER;
6558         HOST_NOCALLS;
6559         SUPPORTS_DAC;
6560     } CONTRACTL_END;
6561
6562     READYTORUN_HEADER * header = JitTokenToReadyToRunInfo(MethodToken)->GetImage()->GetReadyToRunHeader();
6563
6564     return GCInfoToken::ReadyToRunVersionToGcInfoVersion(header->MajorVersion);
6565 }
6566
6567 PTR_RUNTIME_FUNCTION ReadyToRunJitManager::JitTokenToRuntimeFunction(const METHODTOKEN& MethodToken)
6568 {
6569     CONTRACTL {
6570         NOTHROW;
6571         GC_NOTRIGGER;
6572         HOST_NOCALLS;
6573         SUPPORTS_DAC;
6574     } CONTRACTL_END;
6575
6576     return dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader);
6577 }
6578
6579 TADDR ReadyToRunJitManager::JitTokenToStartAddress(const METHODTOKEN& MethodToken)
6580
6581     CONTRACTL {
6582         NOTHROW;
6583         GC_NOTRIGGER;
6584         HOST_NOCALLS;
6585         SUPPORTS_DAC;
6586     } CONTRACTL_END;
6587
6588     return JitTokenToModuleBase(MethodToken) + 
6589         RUNTIME_FUNCTION__BeginAddress(dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader));
6590 }
6591
6592 GCInfoToken ReadyToRunJitManager::GetGCInfoToken(const METHODTOKEN& MethodToken)
6593 {
6594     CONTRACTL {
6595         NOTHROW;
6596         GC_NOTRIGGER;
6597         HOST_NOCALLS;
6598         SUPPORTS_DAC;
6599     } CONTRACTL_END;
6600
6601     PTR_RUNTIME_FUNCTION pRuntimeFunction = JitTokenToRuntimeFunction(MethodToken);
6602     TADDR baseAddress = JitTokenToModuleBase(MethodToken);
6603
6604 #ifndef DACCESS_COMPILE
6605     if (g_IBCLogger.InstrEnabled())
6606     {
6607         ReadyToRunInfo * pInfo = JitTokenToReadyToRunInfo(MethodToken);
6608         MethodDesc * pMD = pInfo->GetMethodDescForEntryPoint(JitTokenToStartAddress(MethodToken));
6609         g_IBCLogger.LogMethodGCInfoAccess(pMD);
6610     }
6611 #endif
6612
6613     SIZE_T nUnwindDataSize;
6614     PTR_VOID pUnwindData = GetUnwindDataBlob(baseAddress, pRuntimeFunction, &nUnwindDataSize);
6615
6616     // GCInfo immediatelly follows unwind data
6617     PTR_BYTE gcInfo = dac_cast<PTR_BYTE>(pUnwindData) + nUnwindDataSize;
6618     UINT32 gcInfoVersion = JitTokenToGCInfoVersion(MethodToken);
6619
6620     return{ gcInfo, gcInfoVersion };
6621 }
6622
6623 unsigned ReadyToRunJitManager::InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState)
6624 {
6625     CONTRACTL {
6626         NOTHROW;
6627         GC_NOTRIGGER;
6628     } CONTRACTL_END;
6629
6630     ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(MethodToken);
6631
6632     IMAGE_DATA_DIRECTORY * pExceptionInfoDir = pReadyToRunInfo->FindSection(READYTORUN_SECTION_EXCEPTION_INFO);
6633     if (pExceptionInfoDir == NULL)
6634         return 0;
6635
6636     PEImageLayout * pLayout = pReadyToRunInfo->GetImage();
6637
6638     PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE pExceptionLookupTable = dac_cast<PTR_CORCOMPILE_EXCEPTION_LOOKUP_TABLE>(pLayout->GetRvaData(pExceptionInfoDir->VirtualAddress));
6639
6640     COUNT_T numLookupTableEntries = (COUNT_T)(pExceptionInfoDir->Size / sizeof(CORCOMPILE_EXCEPTION_LOOKUP_TABLE_ENTRY));
6641     // at least 2 entries (1 valid entry + 1 sentinal entry)
6642     _ASSERTE(numLookupTableEntries >= 2);
6643
6644     DWORD methodStartRVA = (DWORD)(JitTokenToStartAddress(MethodToken) - JitTokenToModuleBase(MethodToken));
6645
6646     COUNT_T ehInfoSize = 0;
6647     DWORD exceptionInfoRVA = NativeExceptionInfoLookupTable::LookupExceptionInfoRVAForMethod(pExceptionLookupTable,
6648                                                                   numLookupTableEntries,
6649                                                                   methodStartRVA, 
6650                                                                   &ehInfoSize);
6651     if (exceptionInfoRVA == 0)
6652         return 0;
6653
6654     pEnumState->iCurrentPos = 0;
6655     pEnumState->pExceptionClauseArray = JitTokenToModuleBase(MethodToken) + exceptionInfoRVA;
6656
6657     return ehInfoSize / sizeof(CORCOMPILE_EXCEPTION_CLAUSE);
6658 }
6659
6660 PTR_EXCEPTION_CLAUSE_TOKEN ReadyToRunJitManager::GetNextEHClause(EH_CLAUSE_ENUMERATOR* pEnumState,
6661                               EE_ILEXCEPTION_CLAUSE* pEHClauseOut)
6662 {
6663     CONTRACTL {
6664         NOTHROW;
6665         GC_NOTRIGGER;
6666     } CONTRACTL_END;
6667
6668     unsigned iCurrentPos = pEnumState->iCurrentPos;
6669     pEnumState->iCurrentPos++;
6670
6671     CORCOMPILE_EXCEPTION_CLAUSE* pClause = &(dac_cast<PTR_CORCOMPILE_EXCEPTION_CLAUSE>(pEnumState->pExceptionClauseArray)[iCurrentPos]);
6672
6673     // copy to the input parmeter, this is a nice abstraction for the future
6674     // if we want to compress the Clause encoding, we can do without affecting the call sites
6675     pEHClauseOut->TryStartPC = pClause->TryStartPC; 
6676     pEHClauseOut->TryEndPC = pClause->TryEndPC; 
6677     pEHClauseOut->HandlerStartPC = pClause->HandlerStartPC; 
6678     pEHClauseOut->HandlerEndPC = pClause->HandlerEndPC;
6679     pEHClauseOut->Flags = pClause->Flags;
6680     pEHClauseOut->FilterOffset = pClause->FilterOffset;
6681
6682     return dac_cast<PTR_EXCEPTION_CLAUSE_TOKEN>(pClause);
6683 }
6684
6685 StubCodeBlockKind ReadyToRunJitManager::GetStubCodeBlockKind(RangeSection * pRangeSection, PCODE currentPC)
6686 {
6687     CONTRACTL
6688     {
6689         NOTHROW;
6690         GC_NOTRIGGER;
6691         SO_TOLERANT;
6692         MODE_ANY;
6693     }
6694     CONTRACTL_END;
6695
6696     DWORD rva = (DWORD)(currentPC - pRangeSection->LowAddress);
6697
6698     ReadyToRunInfo * pReadyToRunInfo = dac_cast<PTR_Module>(pRangeSection->pHeapListOrZapModule)->GetReadyToRunInfo();
6699
6700     IMAGE_DATA_DIRECTORY * pDelayLoadMethodCallThunksDir = pReadyToRunInfo->FindSection(READYTORUN_SECTION_DELAYLOAD_METHODCALL_THUNKS);
6701     if (pDelayLoadMethodCallThunksDir != NULL)
6702     {
6703         if (pDelayLoadMethodCallThunksDir->VirtualAddress <= rva 
6704                 && rva < pDelayLoadMethodCallThunksDir->VirtualAddress + pDelayLoadMethodCallThunksDir->Size)
6705             return STUB_CODE_BLOCK_METHOD_CALL_THUNK;
6706     }
6707
6708     return STUB_CODE_BLOCK_UNKNOWN;
6709 }
6710
6711 #ifndef DACCESS_COMPILE
6712
6713 TypeHandle ReadyToRunJitManager::ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause,
6714                                               CrawlFrame* pCf)
6715 {
6716     CONTRACTL {
6717         THROWS;
6718         GC_TRIGGERS;
6719     } CONTRACTL_END;
6720
6721     _ASSERTE(NULL != pCf);
6722     _ASSERTE(NULL != pEHClause);
6723     _ASSERTE(IsTypedHandler(pEHClause));
6724     
6725     MethodDesc *pMD = PTR_MethodDesc(pCf->GetFunction());
6726
6727     _ASSERTE(pMD != NULL);
6728
6729     Module* pModule = pMD->GetModule();
6730     PREFIX_ASSUME(pModule != NULL);
6731
6732     SigTypeContext typeContext(pMD);
6733     VarKind k = hasNoVars;
6734
6735     mdToken typeTok = pEHClause->ClassToken;
6736
6737     // In the vast majority of cases the code un der the "if" below
6738     // will not be executed.
6739     //
6740     // First grab the representative instantiations.  For code
6741     // shared by multiple generic instantiations these are the
6742     // canonical (representative) instantiation.
6743     if (TypeFromToken(typeTok) == mdtTypeSpec)
6744     {
6745         PCCOR_SIGNATURE pSig;
6746         ULONG cSig;
6747         IfFailThrow(pModule->GetMDImport()->GetTypeSpecFromToken(typeTok, &pSig, &cSig));
6748         
6749         SigPointer psig(pSig, cSig);
6750         k = psig.IsPolyType(&typeContext);
6751
6752         // Grab the active class and method instantiation.  This exact instantiation is only
6753         // needed in the corner case of "generic" exception catching in shared
6754         // generic code.  We don't need the exact instantiation if the token
6755         // doesn't contain E_T_VAR or E_T_MVAR.        
6756         if ((k & hasSharableVarsMask) != 0)
6757         {
6758             Instantiation classInst;
6759             Instantiation methodInst;
6760             pCf->GetExactGenericInstantiations(&classInst,&methodInst);
6761             SigTypeContext::InitTypeContext(pMD,classInst, methodInst,&typeContext);
6762         }
6763     }
6764
6765     return ClassLoader::LoadTypeDefOrRefOrSpecThrowing(pModule, typeTok, &typeContext, 
6766                                                           ClassLoader::ReturnNullIfNotFound);
6767 }
6768
6769 #endif // #ifndef DACCESS_COMPILE
6770
6771 //-----------------------------------------------------------------------------
6772 // Ngen info manager
6773 //-----------------------------------------------------------------------------
6774 BOOL ReadyToRunJitManager::GetBoundariesAndVars(
6775         const DebugInfoRequest & request,
6776         IN FP_IDS_NEW fpNew, IN void * pNewData,
6777         OUT ULONG32 * pcMap, 
6778         OUT ICorDebugInfo::OffsetMapping **ppMap,
6779         OUT ULONG32 * pcVars, 
6780         OUT ICorDebugInfo::NativeVarInfo **ppVars)
6781 {
6782     CONTRACTL {
6783         THROWS;       // on OOM.
6784         GC_NOTRIGGER; // getting vars shouldn't trigger
6785         SUPPORTS_DAC;
6786     } CONTRACTL_END;
6787
6788     EECodeInfo codeInfo(request.GetStartAddress());
6789     if (!codeInfo.IsValid())
6790         return FALSE;
6791
6792     ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(codeInfo.GetMethodToken());
6793     PTR_RUNTIME_FUNCTION pRuntimeFunction = JitTokenToRuntimeFunction(codeInfo.GetMethodToken());
6794
6795     PTR_BYTE pDebugInfo = pReadyToRunInfo->GetDebugInfo(pRuntimeFunction);
6796     if (pDebugInfo == NULL)
6797         return FALSE;
6798
6799     // Uncompress. This allocates memory and may throw.
6800     CompressDebugInfo::RestoreBoundariesAndVars(
6801         fpNew, pNewData, // allocators
6802         pDebugInfo,      // input
6803         pcMap, ppMap,
6804         pcVars, ppVars); // output
6805
6806     return TRUE;
6807 }
6808
6809 #ifdef DACCESS_COMPILE
6810 //
6811 // Need to write out debug info
6812 //
6813 void ReadyToRunJitManager::EnumMemoryRegionsForMethodDebugInfo(CLRDataEnumMemoryFlags flags, MethodDesc * pMD)
6814 {
6815     SUPPORTS_DAC;
6816
6817     EECodeInfo codeInfo(pMD->GetNativeCode());
6818     if (!codeInfo.IsValid())
6819         return;
6820
6821     ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(codeInfo.GetMethodToken());
6822     PTR_RUNTIME_FUNCTION pRuntimeFunction = JitTokenToRuntimeFunction(codeInfo.GetMethodToken());
6823
6824     PTR_BYTE pDebugInfo = pReadyToRunInfo->GetDebugInfo(pRuntimeFunction);
6825     if (pDebugInfo == NULL)
6826         return;
6827
6828     CompressDebugInfo::EnumMemoryRegions(flags, pDebugInfo);
6829 }
6830 #endif
6831
6832 PCODE ReadyToRunJitManager::GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset)
6833 {
6834     WRAPPER_NO_CONTRACT;
6835
6836     MethodRegionInfo methodRegionInfo;
6837     JitTokenToMethodRegionInfo(MethodToken, &methodRegionInfo);
6838
6839     if (relOffset < methodRegionInfo.hotSize)
6840         return methodRegionInfo.hotStartAddress + relOffset;
6841
6842     SIZE_T coldOffset = relOffset - methodRegionInfo.hotSize;
6843     _ASSERTE(coldOffset < methodRegionInfo.coldSize);
6844     return methodRegionInfo.coldStartAddress + coldOffset;
6845 }
6846
6847 BOOL ReadyToRunJitManager::JitCodeToMethodInfo(RangeSection * pRangeSection,
6848                                             PCODE currentPC,
6849                                             MethodDesc** ppMethodDesc,
6850                                             OUT EECodeInfo * pCodeInfo)
6851 {
6852     CONTRACTL {
6853         NOTHROW;
6854         GC_NOTRIGGER;
6855         SO_TOLERANT;
6856         SUPPORTS_DAC;
6857     } CONTRACTL_END;
6858
6859     // READYTORUN: FUTURE: Hot-cold spliting
6860
6861     TADDR currentInstr = PCODEToPINSTR(currentPC);
6862
6863     TADDR ImageBase = pRangeSection->LowAddress;
6864
6865     DWORD RelativePc = (DWORD)(currentInstr - ImageBase);
6866
6867     Module * pModule = dac_cast<PTR_Module>(pRangeSection->pHeapListOrZapModule);
6868     ReadyToRunInfo * pInfo = pModule->GetReadyToRunInfo();
6869
6870     COUNT_T nRuntimeFunctions = pInfo->m_nRuntimeFunctions;
6871     PTR_RUNTIME_FUNCTION pRuntimeFunctions = pInfo->m_pRuntimeFunctions;
6872
6873     int MethodIndex = NativeUnwindInfoLookupTable::LookupUnwindInfoForMethod(RelativePc,
6874                                                                              pRuntimeFunctions,
6875                                                                              0,
6876                                                                              nRuntimeFunctions - 1);
6877
6878     if (MethodIndex < 0)
6879         return FALSE;
6880
6881 #ifdef WIN64EXCEPTIONS
6882     // Save the raw entry
6883     PTR_RUNTIME_FUNCTION RawFunctionEntry = pRuntimeFunctions + MethodIndex;
6884
6885     MethodDesc *pMethodDesc;
6886     while ((pMethodDesc = pInfo->GetMethodDescForEntryPoint(ImageBase + RUNTIME_FUNCTION__BeginAddress(pRuntimeFunctions + MethodIndex))) == NULL)
6887         MethodIndex--;
6888 #endif
6889
6890     PTR_RUNTIME_FUNCTION FunctionEntry = pRuntimeFunctions + MethodIndex;
6891
6892     if (ppMethodDesc)
6893     {
6894 #ifdef WIN64EXCEPTIONS
6895         *ppMethodDesc = pMethodDesc;
6896 #else
6897         *ppMethodDesc = pInfo->GetMethodDescForEntryPoint(ImageBase + RUNTIME_FUNCTION__BeginAddress(FunctionEntry));
6898 #endif
6899         _ASSERTE(*ppMethodDesc != NULL);
6900     }
6901
6902     if (pCodeInfo)
6903     {
6904         pCodeInfo->m_relOffset = (DWORD)
6905             (RelativePc - RUNTIME_FUNCTION__BeginAddress(FunctionEntry));
6906
6907         // We are using RUNTIME_FUNCTION as METHODTOKEN
6908         pCodeInfo->m_methodToken = METHODTOKEN(pRangeSection, dac_cast<TADDR>(FunctionEntry));
6909
6910 #ifdef WIN64EXCEPTIONS
6911         AMD64_ONLY(_ASSERTE((RawFunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0));
6912         pCodeInfo->m_pFunctionEntry = RawFunctionEntry;
6913 #endif
6914     }
6915
6916     return TRUE;
6917 }
6918
6919 #if defined(WIN64EXCEPTIONS)
6920 PTR_RUNTIME_FUNCTION ReadyToRunJitManager::LazyGetFunctionEntry(EECodeInfo * pCodeInfo)
6921 {
6922     CONTRACTL {
6923         NOTHROW;
6924         GC_NOTRIGGER;
6925     } CONTRACTL_END;
6926
6927     if (!pCodeInfo->IsValid())
6928     {
6929         return NULL;
6930     }
6931
6932     // code:ReadyToRunJitManager::JitCodeToMethodInfo computes PTR_RUNTIME_FUNCTION eagerly. This path is only 
6933     // reachable via EECodeInfo::GetMainFunctionInfo, and so we can just return the main entry.
6934     _ASSERTE(pCodeInfo->GetRelOffset() == 0);
6935
6936     return dac_cast<PTR_RUNTIME_FUNCTION>(pCodeInfo->GetMethodToken().m_pCodeHeader);
6937 }
6938
6939 TADDR ReadyToRunJitManager::GetFuncletStartAddress(EECodeInfo * pCodeInfo)
6940 {
6941     LIMITED_METHOD_DAC_CONTRACT;
6942
6943     // READYTORUN: FUTURE: Hot-cold spliting
6944
6945     return IJitManager::GetFuncletStartAddress(pCodeInfo);
6946 }
6947
6948 DWORD ReadyToRunJitManager::GetFuncletStartOffsets(const METHODTOKEN& MethodToken, DWORD* pStartFuncletOffsets, DWORD dwLength)
6949 {
6950     PTR_RUNTIME_FUNCTION pFirstFuncletFunctionEntry = dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader) + 1;
6951     
6952     TADDR moduleBase = JitTokenToModuleBase(MethodToken);
6953     DWORD nFunclets = 0;
6954     MethodRegionInfo regionInfo;
6955     JitTokenToMethodRegionInfo(MethodToken, &regionInfo);
6956
6957     // pFirstFuncletFunctionEntry will work for ARM when passed to GetFuncletStartOffsetsHelper()
6958     // even if it is a fragment of the main body and not a RUNTIME_FUNCTION for the beginning
6959     // of the first hot funclet, because GetFuncletStartOffsetsHelper() will skip all the function
6960     // fragments until the first funclet, if any, is found.
6961
6962     GetFuncletStartOffsetsHelper(regionInfo.hotStartAddress, regionInfo.hotSize, 0,
6963         pFirstFuncletFunctionEntry, moduleBase,
6964         &nFunclets, pStartFuncletOffsets, dwLength);
6965
6966     // READYTORUN: FUTURE: Hot/cold splitting
6967
6968     return nFunclets;
6969 }
6970
6971 BOOL ReadyToRunJitManager::IsFilterFunclet(EECodeInfo * pCodeInfo)
6972 {
6973     CONTRACTL {
6974         NOTHROW;
6975         GC_NOTRIGGER;
6976         MODE_ANY;
6977     }
6978     CONTRACTL_END;
6979
6980     if (!pCodeInfo->IsFunclet())
6981         return FALSE;
6982
6983     // Get address of the personality routine for the function being queried.
6984     SIZE_T size;
6985     PTR_VOID pUnwindData = GetUnwindDataBlob(pCodeInfo->GetModuleBase(), pCodeInfo->GetFunctionEntry(), &size);
6986     _ASSERTE(pUnwindData != NULL);
6987
6988     // Personality routine is always the last element of the unwind data
6989     DWORD rvaPersonalityRoutine = *(dac_cast<PTR_DWORD>(dac_cast<TADDR>(pUnwindData) + size) - 1);
6990
6991     // Get the personality routine for the first function in the module, which is guaranteed to be not a funclet.
6992     ReadyToRunInfo * pInfo = JitTokenToReadyToRunInfo(pCodeInfo->GetMethodToken());
6993     if (pInfo->m_nRuntimeFunctions == 0)
6994         return FALSE;
6995
6996     PTR_VOID pFirstUnwindData = GetUnwindDataBlob(pCodeInfo->GetModuleBase(), pInfo->m_pRuntimeFunctions, &size);
6997     _ASSERTE(pFirstUnwindData != NULL);
6998     DWORD rvaFirstPersonalityRoutine = *(dac_cast<PTR_DWORD>(dac_cast<TADDR>(pFirstUnwindData) + size) - 1);
6999
7000     // Compare the two personality routines. If they are different, then the current function is a filter funclet.
7001     BOOL fRet = (rvaPersonalityRoutine != rvaFirstPersonalityRoutine);
7002
7003     // Verify that the optimized implementation is in sync with the slow implementation
7004     _ASSERTE(fRet == IJitManager::IsFilterFunclet(pCodeInfo));
7005
7006     return fRet;
7007 }
7008
7009 #endif  // WIN64EXCEPTIONS
7010
7011 void ReadyToRunJitManager::JitTokenToMethodRegionInfo(const METHODTOKEN& MethodToken, 
7012                                                    MethodRegionInfo * methodRegionInfo)
7013 {
7014     CONTRACTL {
7015         NOTHROW;
7016         GC_NOTRIGGER;
7017         HOST_NOCALLS;
7018         SUPPORTS_DAC;
7019         PRECONDITION(methodRegionInfo != NULL);
7020     } CONTRACTL_END;
7021
7022     // READYTORUN: FUTURE: Hot-cold spliting
7023
7024     methodRegionInfo->hotStartAddress  = JitTokenToStartAddress(MethodToken);
7025     methodRegionInfo->hotSize          = GetCodeManager()->GetFunctionSize(GetGCInfoToken(MethodToken));
7026     methodRegionInfo->coldStartAddress = 0;
7027     methodRegionInfo->coldSize         = 0;
7028 }
7029
7030 #ifdef DACCESS_COMPILE
7031
7032 void ReadyToRunJitManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
7033 {
7034     IJitManager::EnumMemoryRegions(flags);
7035 }
7036
7037 #if defined(WIN64EXCEPTIONS)
7038
7039 //
7040 // EnumMemoryRegionsForMethodUnwindInfo - enumerate the memory necessary to read the unwind info for the
7041 // specified method.
7042 // 
7043 // Note that in theory, a dump generation library could save the unwind information itself without help
7044 // from us, since it's stored in the image in the standard function table layout for Win64.  However, 
7045 // dump-generation libraries assume that the image will be available at debug time, and if the image 
7046 // isn't available then it is acceptable for stackwalking to break.  For ngen images (which are created 
7047 // on the client), it usually isn't possible to have the image available at debug time, and so for minidumps
7048 // we must explicitly ensure the unwind information is saved into the dump. 
7049 // 
7050 // Arguments:
7051 //     flags - EnumMem flags
7052 //     pMD   - MethodDesc for the method in question
7053 //     
7054 void ReadyToRunJitManager::EnumMemoryRegionsForMethodUnwindInfo(CLRDataEnumMemoryFlags flags, EECodeInfo * pCodeInfo)
7055 {
7056     // Get the RUNTIME_FUNCTION entry for this method 
7057     PTR_RUNTIME_FUNCTION pRtf = pCodeInfo->GetFunctionEntry();
7058
7059     if (pRtf==NULL)
7060     {
7061         return;
7062     }
7063
7064     // Enumerate the function entry and other entries needed to locate it in the program exceptions directory
7065     ReadyToRunInfo * pReadyToRunInfo = JitTokenToReadyToRunInfo(pCodeInfo->GetMethodToken());
7066     EnumRuntimeFunctionEntriesToFindEntry(pRtf, pReadyToRunInfo->GetImage());
7067
7068     SIZE_T size;
7069     PTR_VOID pUnwindData = GetUnwindDataBlob(pCodeInfo->GetModuleBase(), pRtf, &size);
7070     if (pUnwindData != NULL)
7071         DacEnumMemoryRegion(PTR_TO_TADDR(pUnwindData), size); 
7072 }
7073
7074 #endif //WIN64EXCEPTIONS
7075 #endif // #ifdef DACCESS_COMPILE
7076
7077 #endif