bd66bad8cd630ac95a57b37c8e1593bbe61e4a7e
[platform/upstream/dotnet/runtime.git] / src / coreclr / vm / stublink.cpp
1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 //
4 // stublink.cpp
5 //
6
7
8
9 #include "common.h"
10
11 #include "threads.h"
12 #include "excep.h"
13 #include "stublink.h"
14 #include "stubgen.h"
15 #include "stublink.inl"
16
17 #include "rtlfunctions.h"
18
19 #define S_BYTEPTR(x)    S_SIZE_T((SIZE_T)(x))
20
21 #ifndef DACCESS_COMPILE
22
23
24 //************************************************************************
25 // CodeElement
26 //
27 // There are two types of CodeElements: CodeRuns (a stream of uninterpreted
28 // code bytes) and LabelRefs (an instruction containing
29 // a fixup.)
30 //************************************************************************
31 struct CodeElement
32 {
33     enum CodeElementType {
34         kCodeRun  = 0,
35         kLabelRef = 1,
36     };
37
38
39     CodeElementType     m_type;  // kCodeRun or kLabelRef
40     CodeElement        *m_next;  // ptr to next CodeElement
41
42     // Used as workspace during Link(): holds the offset relative to
43     // the start of the final stub.
44     UINT                m_globaloffset;
45     UINT                m_dataoffset;
46 };
47
48
49 //************************************************************************
50 // CodeRun: A run of uninterrupted code bytes.
51 //************************************************************************
52
53 #ifdef _DEBUG
54 #define CODERUNSIZE 3
55 #else
56 #define CODERUNSIZE 32
57 #endif
58
59 struct CodeRun : public CodeElement
60 {
61     UINT    m_numcodebytes;       // how many bytes are actually used
62     BYTE    m_codebytes[CODERUNSIZE];
63 };
64
65 //************************************************************************
66 // LabelRef: An instruction containing an embedded label reference
67 //************************************************************************
68 struct LabelRef : public CodeElement
69 {
70     // provides platform-specific information about the instruction
71     InstructionFormat    *m_pInstructionFormat;
72
73     // a variation code (interpretation is specific to the InstructionFormat)
74     //  typically used to customize an instruction (e.g. with a condition
75     //  code.)
76     UINT                 m_variationCode;
77
78
79     CodeLabel           *m_target;
80
81     // Workspace during the link phase
82     UINT                 m_refsize;
83
84
85     // Pointer to next LabelRef
86     LabelRef            *m_nextLabelRef;
87 };
88
89
90 //************************************************************************
91 // IntermediateUnwindInfo
92 //************************************************************************
93
94 #ifdef STUBLINKER_GENERATES_UNWIND_INFO
95
96
97 #ifdef TARGET_AMD64
98 // List of unwind operations, queued in StubLinker::m_pUnwindInfoList.
99 struct IntermediateUnwindInfo
100 {
101     IntermediateUnwindInfo *pNext;
102     CodeRun *pCodeRun;
103     UINT LocalOffset;
104     UNWIND_CODE rgUnwindCode[1];    // variable length, depends on first entry's UnwindOp
105 };
106 #endif // TARGET_AMD64
107
108
109 StubUnwindInfoHeapSegment *g_StubHeapSegments;
110 CrstStatic g_StubUnwindInfoHeapSegmentsCrst;
111 #ifdef _DEBUG  // for unit test
112 void *__DEBUG__g_StubHeapSegments = &g_StubHeapSegments;
113 #endif
114
115
116 //
117 // Callback registered via RtlInstallFunctionTableCallback.  Called by
118 // RtlpLookupDynamicFunctionEntry to locate RUNTIME_FUNCTION entry for a PC
119 // found within a portion of a heap that contains stub code.
120 //
121 T_RUNTIME_FUNCTION*
122 FindStubFunctionEntry (
123    BIT64_ONLY(IN ULONG64    ControlPc)
124     NOT_BIT64(IN ULONG      ControlPc),
125               IN PVOID      Context
126     )
127 {
128     CONTRACTL
129     {
130         NOTHROW;
131         GC_NOTRIGGER;
132         FORBID_FAULT;
133     }
134     CONTRACTL_END
135
136     CONSISTENCY_CHECK(DYNFNTABLE_STUB == IdentifyDynamicFunctionTableTypeFromContext(Context));
137
138     StubUnwindInfoHeapSegment *pStubHeapSegment = (StubUnwindInfoHeapSegment*)DecodeDynamicFunctionTableContext(Context);
139
140     //
141     // The RUNTIME_FUNCTION entry contains ULONG offsets relative to the
142     // segment base.  Stub::EmitUnwindInfo ensures that this cast is valid.
143     //
144     ULONG RelativeAddress = (ULONG)((BYTE*)ControlPc - pStubHeapSegment->pbBaseAddress);
145
146     LOG((LF_STUBS, LL_INFO100000, "ControlPc %p, RelativeAddress 0x%x, pStubHeapSegment %p, pStubHeapSegment->pbBaseAddress %p\n",
147             ControlPc,
148             RelativeAddress,
149             pStubHeapSegment,
150             pStubHeapSegment->pbBaseAddress));
151
152     //
153     // Search this segment's list of stubs for an entry that includes the
154     // segment-relative offset.
155     //
156     for (StubUnwindInfoHeader *pHeader = pStubHeapSegment->pUnwindHeaderList;
157          pHeader;
158          pHeader = pHeader->pNext)
159     {
160         // The entry points are in increasing address order.
161         if (RelativeAddress >= RUNTIME_FUNCTION__BeginAddress(&pHeader->FunctionEntry))
162         {
163             T_RUNTIME_FUNCTION *pCurFunction = &pHeader->FunctionEntry;
164             T_RUNTIME_FUNCTION *pPrevFunction = NULL;
165
166             LOG((LF_STUBS, LL_INFO100000, "pCurFunction %p, pCurFunction->BeginAddress 0x%x, pCurFunction->EndAddress 0x%x\n",
167                     pCurFunction,
168                     RUNTIME_FUNCTION__BeginAddress(pCurFunction),
169                     RUNTIME_FUNCTION__EndAddress(pCurFunction, (TADDR)pStubHeapSegment->pbBaseAddress)));
170
171             CONSISTENCY_CHECK((RUNTIME_FUNCTION__EndAddress(pCurFunction, (TADDR)pStubHeapSegment->pbBaseAddress) > RUNTIME_FUNCTION__BeginAddress(pCurFunction)));
172             CONSISTENCY_CHECK((!pPrevFunction || RUNTIME_FUNCTION__EndAddress(pPrevFunction, (TADDR)pStubHeapSegment->pbBaseAddress) <= RUNTIME_FUNCTION__BeginAddress(pCurFunction)));
173
174             // The entry points are in increasing address order.  They're
175             // also contiguous, so after we're sure it's after the start of
176             // the first function (checked above), we only need to test
177             // the end address.
178             if (RelativeAddress < RUNTIME_FUNCTION__EndAddress(pCurFunction, (TADDR)pStubHeapSegment->pbBaseAddress))
179             {
180                 CONSISTENCY_CHECK((RelativeAddress >= RUNTIME_FUNCTION__BeginAddress(pCurFunction)));
181
182                 return pCurFunction;
183             }
184         }
185     }
186
187     //
188     // Return NULL to indicate that there is no RUNTIME_FUNCTION/unwind
189     // information for this offset.
190     //
191     return NULL;
192 }
193
194
195 bool UnregisterUnwindInfoInLoaderHeapCallback (PVOID pvArgs, PVOID pvAllocationBase, SIZE_T cbReserved)
196 {
197     CONTRACTL
198     {
199         NOTHROW;
200         GC_TRIGGERS;
201     }
202     CONTRACTL_END;
203
204     //
205     // There may be multiple StubUnwindInfoHeapSegment's associated with a region.
206     //
207
208     LOG((LF_STUBS, LL_INFO1000, "Looking for stub unwind info for LoaderHeap segment %p size %p\n", pvAllocationBase, cbReserved));
209
210     CrstHolder crst(&g_StubUnwindInfoHeapSegmentsCrst);
211
212     StubUnwindInfoHeapSegment *pStubHeapSegment;
213     for (StubUnwindInfoHeapSegment **ppPrevStubHeapSegment = &g_StubHeapSegments;
214             (pStubHeapSegment = *ppPrevStubHeapSegment); )
215     {
216         LOG((LF_STUBS, LL_INFO10000, "    have unwind info for address %p size %p\n", pStubHeapSegment->pbBaseAddress, pStubHeapSegment->cbSegment));
217
218         // If heap region ends before stub segment
219         if ((BYTE*)pvAllocationBase + cbReserved <= pStubHeapSegment->pbBaseAddress)
220         {
221             // The list is ordered, so address range is between segments
222             break;
223         }
224
225         // The given heap segment base address may fall within a prereserved
226         // region that was given to the heap when the heap was constructed, so
227         // pvAllocationBase may be > pbBaseAddress.  Also, there could be
228         // multiple segments for each heap region, so pvAllocationBase may be
229         // < pbBaseAddress.  So...there is no meaningful relationship between
230         // pvAllocationBase and pbBaseAddress.
231
232         // If heap region starts before end of stub segment
233         if ((BYTE*)pvAllocationBase < pStubHeapSegment->pbBaseAddress + pStubHeapSegment->cbSegment)
234         {
235             _ASSERTE((BYTE*)pvAllocationBase + cbReserved <= pStubHeapSegment->pbBaseAddress + pStubHeapSegment->cbSegment);
236
237             DeleteEEFunctionTable(pStubHeapSegment);
238 #ifdef TARGET_AMD64
239             if (pStubHeapSegment->pUnwindInfoTable != 0)
240                 delete pStubHeapSegment->pUnwindInfoTable;
241 #endif
242             *ppPrevStubHeapSegment = pStubHeapSegment->pNext;
243
244             delete pStubHeapSegment;
245         }
246         else
247         {
248             ppPrevStubHeapSegment = &pStubHeapSegment->pNext;
249         }
250     }
251
252     return false; // Keep enumerating
253 }
254
255
256 VOID UnregisterUnwindInfoInLoaderHeap (UnlockedLoaderHeap *pHeap)
257 {
258     CONTRACTL
259     {
260         NOTHROW;
261         GC_TRIGGERS;
262         PRECONDITION(pHeap->m_fPermitStubsWithUnwindInfo);
263     }
264     CONTRACTL_END;
265
266     pHeap->EnumPageRegions(&UnregisterUnwindInfoInLoaderHeapCallback, NULL /* pvArgs */);
267
268 #ifdef _DEBUG
269     pHeap->m_fStubUnwindInfoUnregistered = TRUE;
270 #endif // _DEBUG
271 }
272
273
274 class StubUnwindInfoSegmentBoundaryReservationList
275 {
276     struct ReservationList
277     {
278         ReservationList *pNext;
279
280         static ReservationList *FromStub (Stub *pStub)
281         {
282             return (ReservationList*)(pStub+1);
283         }
284
285         Stub *GetStub ()
286         {
287             return (Stub*)this - 1;
288         }
289     };
290
291     ReservationList *m_pList;
292
293 public:
294
295     StubUnwindInfoSegmentBoundaryReservationList ()
296     {
297         LIMITED_METHOD_CONTRACT;
298
299         m_pList = NULL;
300     }
301
302     ~StubUnwindInfoSegmentBoundaryReservationList ()
303     {
304         LIMITED_METHOD_CONTRACT;
305
306         ReservationList *pList = m_pList;
307         while (pList)
308         {
309             ReservationList *pNext = pList->pNext;
310
311             ExecutableWriterHolder<Stub> stubWriterHolder(pList->GetStub(), sizeof(Stub));
312             stubWriterHolder.GetRW()->DecRef();
313
314             pList = pNext;
315         }
316     }
317
318     void AddStub (Stub *pStub)
319     {
320         LIMITED_METHOD_CONTRACT;
321
322         ReservationList *pList = ReservationList::FromStub(pStub);
323
324         ExecutableWriterHolder<ReservationList> listWriterHolder(pList, sizeof(ReservationList));
325         listWriterHolder.GetRW()->pNext = m_pList;
326         m_pList = pList;
327     }
328 };
329
330
331 #endif // STUBLINKER_GENERATES_UNWIND_INFO
332
333
334 //************************************************************************
335 // StubLinker
336 //************************************************************************
337
338 //---------------------------------------------------------------
339 // Construction
340 //---------------------------------------------------------------
341 StubLinker::StubLinker()
342 {
343     CONTRACTL
344     {
345         NOTHROW;
346         GC_NOTRIGGER;
347     }
348     CONTRACTL_END;
349
350     m_pCodeElements     = NULL;
351     m_pFirstCodeLabel   = NULL;
352     m_pFirstLabelRef    = NULL;
353     m_pPatchLabel       = NULL;
354     m_pTargetMethod     = NULL;
355     m_stackSize         = 0;
356     m_fDataOnly         = FALSE;
357 #ifdef TARGET_ARM
358     m_fProlog           = FALSE;
359     m_cCalleeSavedRegs  = 0;
360     m_cbStackFrame      = 0;
361     m_fPushArgRegs      = FALSE;
362 #endif
363 #ifdef STUBLINKER_GENERATES_UNWIND_INFO
364 #ifdef _DEBUG
365     m_pUnwindInfoCheckLabel = NULL;
366 #endif
367 #ifdef TARGET_AMD64
368     m_pUnwindInfoList   = NULL;
369     m_nUnwindSlots      = 0;
370     m_fHaveFramePointer = FALSE;
371 #endif
372 #ifdef TARGET_ARM64
373     m_fProlog           = FALSE;
374     m_cIntRegArgs       = 0;
375     m_cVecRegArgs       = 0;
376     m_cCalleeSavedRegs  = 0;
377     m_cbStackSpace      = 0;
378 #endif
379 #endif // STUBLINKER_GENERATES_UNWIND_INFO
380 }
381
382
383
384 //---------------------------------------------------------------
385 // Append code bytes.
386 //---------------------------------------------------------------
387 VOID StubLinker::EmitBytes(const BYTE *pBytes, UINT numBytes)
388 {
389     CONTRACTL
390     {
391         THROWS;
392         GC_NOTRIGGER;
393     }
394     CONTRACTL_END;
395
396     CodeElement *pLastCodeElement = GetLastCodeElement();
397     while (numBytes != 0) {
398
399         if (pLastCodeElement != NULL &&
400             pLastCodeElement->m_type == CodeElement::kCodeRun) {
401             CodeRun *pCodeRun = (CodeRun*)pLastCodeElement;
402             UINT numbytessrc  = numBytes;
403             UINT numbytesdst  = CODERUNSIZE - pCodeRun->m_numcodebytes;
404             if (numbytesdst <= numbytessrc) {
405                 CopyMemory(&(pCodeRun->m_codebytes[pCodeRun->m_numcodebytes]),
406                            pBytes,
407                            numbytesdst);
408                 pCodeRun->m_numcodebytes = CODERUNSIZE;
409                 pLastCodeElement = NULL;
410                 pBytes += numbytesdst;
411                 numBytes -= numbytesdst;
412             } else {
413                 CopyMemory(&(pCodeRun->m_codebytes[pCodeRun->m_numcodebytes]),
414                            pBytes,
415                            numbytessrc);
416                 pCodeRun->m_numcodebytes += numbytessrc;
417                 pBytes += numbytessrc;
418                 numBytes = 0;
419             }
420
421         } else {
422             pLastCodeElement = AppendNewEmptyCodeRun();
423         }
424     }
425 }
426
427
428 //---------------------------------------------------------------
429 // Append code bytes.
430 //---------------------------------------------------------------
431 VOID StubLinker::Emit8 (unsigned __int8  val)
432 {
433     CONTRACTL
434     {
435         THROWS;
436         GC_NOTRIGGER;
437     }
438     CONTRACTL_END;
439
440     CodeRun *pCodeRun = GetLastCodeRunIfAny();
441     if (pCodeRun && (CODERUNSIZE - pCodeRun->m_numcodebytes) >= sizeof(val)) {
442         *((unsigned __int8 *)(pCodeRun->m_codebytes + pCodeRun->m_numcodebytes)) = val;
443         pCodeRun->m_numcodebytes += sizeof(val);
444     } else {
445         EmitBytes((BYTE*)&val, sizeof(val));
446     }
447 }
448
449 //---------------------------------------------------------------
450 // Append code bytes.
451 //---------------------------------------------------------------
452 VOID StubLinker::Emit16(unsigned __int16 val)
453 {
454     CONTRACTL
455     {
456         THROWS;
457         GC_NOTRIGGER;
458     }
459     CONTRACTL_END;
460
461     CodeRun *pCodeRun = GetLastCodeRunIfAny();
462     if (pCodeRun && (CODERUNSIZE - pCodeRun->m_numcodebytes) >= sizeof(val)) {
463         SET_UNALIGNED_16(pCodeRun->m_codebytes + pCodeRun->m_numcodebytes, val);
464         pCodeRun->m_numcodebytes += sizeof(val);
465     } else {
466         EmitBytes((BYTE*)&val, sizeof(val));
467     }
468 }
469
470 //---------------------------------------------------------------
471 // Append code bytes.
472 //---------------------------------------------------------------
473 VOID StubLinker::Emit32(unsigned __int32 val)
474 {
475     CONTRACTL
476     {
477         THROWS;
478         GC_NOTRIGGER;
479     }
480     CONTRACTL_END;
481
482     CodeRun *pCodeRun = GetLastCodeRunIfAny();
483     if (pCodeRun && (CODERUNSIZE - pCodeRun->m_numcodebytes) >= sizeof(val)) {
484         SET_UNALIGNED_32(pCodeRun->m_codebytes + pCodeRun->m_numcodebytes,  val);
485         pCodeRun->m_numcodebytes += sizeof(val);
486     } else {
487         EmitBytes((BYTE*)&val, sizeof(val));
488     }
489 }
490
491 //---------------------------------------------------------------
492 // Append code bytes.
493 //---------------------------------------------------------------
494 VOID StubLinker::Emit64(unsigned __int64 val)
495 {
496     CONTRACTL
497     {
498         THROWS;
499         GC_NOTRIGGER;
500     }
501     CONTRACTL_END;
502
503     CodeRun *pCodeRun = GetLastCodeRunIfAny();
504     if (pCodeRun && (CODERUNSIZE - pCodeRun->m_numcodebytes) >= sizeof(val)) {
505         SET_UNALIGNED_64(pCodeRun->m_codebytes + pCodeRun->m_numcodebytes, val);
506         pCodeRun->m_numcodebytes += sizeof(val);
507     } else {
508         EmitBytes((BYTE*)&val, sizeof(val));
509     }
510 }
511
512 //---------------------------------------------------------------
513 // Append pointer value.
514 //---------------------------------------------------------------
515 VOID StubLinker::EmitPtr(const VOID *val)
516 {
517     CONTRACTL
518     {
519         THROWS;
520         GC_NOTRIGGER;
521     }
522     CONTRACTL_END;
523
524     CodeRun *pCodeRun = GetLastCodeRunIfAny();
525     if (pCodeRun && (CODERUNSIZE - pCodeRun->m_numcodebytes) >= sizeof(val)) {
526         SET_UNALIGNED_PTR(pCodeRun->m_codebytes + pCodeRun->m_numcodebytes, (UINT_PTR)val);
527         pCodeRun->m_numcodebytes += sizeof(val);
528     } else {
529         EmitBytes((BYTE*)&val, sizeof(val));
530     }
531 }
532
533
534 //---------------------------------------------------------------
535 // Create a new undefined label. Label must be assigned to a code
536 // location using EmitLabel() prior to final linking.
537 // Throws COM+ exception on failure.
538 //---------------------------------------------------------------
539 CodeLabel* StubLinker::NewCodeLabel()
540 {
541     CONTRACTL
542     {
543         THROWS;
544         GC_NOTRIGGER;
545     }
546     CONTRACTL_END;
547
548     CodeLabel *pCodeLabel = (CodeLabel*)(m_quickHeap.Alloc(sizeof(CodeLabel)));
549     _ASSERTE(pCodeLabel); // QuickHeap throws exceptions rather than returning NULL
550     pCodeLabel->m_next       = m_pFirstCodeLabel;
551     pCodeLabel->m_fExternal  = FALSE;
552     pCodeLabel->m_fAbsolute = FALSE;
553     pCodeLabel->i.m_pCodeRun = NULL;
554     m_pFirstCodeLabel = pCodeLabel;
555     return pCodeLabel;
556
557
558 }
559
560 CodeLabel* StubLinker::NewAbsoluteCodeLabel()
561 {
562     CONTRACTL
563     {
564         THROWS;
565         GC_NOTRIGGER;
566     }
567     CONTRACTL_END;
568
569     CodeLabel *pCodeLabel = NewCodeLabel();
570     pCodeLabel->m_fAbsolute = TRUE;
571     return pCodeLabel;
572 }
573
574
575 //---------------------------------------------------------------
576 // Sets the label to point to the current "instruction pointer".
577 // It is invalid to call EmitLabel() twice on
578 // the same label.
579 //---------------------------------------------------------------
580 VOID StubLinker::EmitLabel(CodeLabel* pCodeLabel)
581 {
582     CONTRACTL
583     {
584         THROWS;
585         GC_NOTRIGGER;
586     }
587     CONTRACTL_END;
588
589     _ASSERTE(!(pCodeLabel->m_fExternal));       //can't emit an external label
590     _ASSERTE(pCodeLabel->i.m_pCodeRun == NULL);  //must only emit label once
591     CodeRun *pLastCodeRun = GetLastCodeRunIfAny();
592     if (!pLastCodeRun) {
593         pLastCodeRun = AppendNewEmptyCodeRun();
594     }
595     pCodeLabel->i.m_pCodeRun    = pLastCodeRun;
596     pCodeLabel->i.m_localOffset = pLastCodeRun->m_numcodebytes;
597 }
598
599
600 //---------------------------------------------------------------
601 // Combines NewCodeLabel() and EmitLabel() for convenience.
602 // Throws COM+ exception on failure.
603 //---------------------------------------------------------------
604 CodeLabel* StubLinker::EmitNewCodeLabel()
605 {
606     CONTRACTL
607     {
608         THROWS;
609         GC_NOTRIGGER;
610     }
611     CONTRACTL_END;
612
613     CodeLabel* label = NewCodeLabel();
614     EmitLabel(label);
615     return label;
616 }
617
618
619 //---------------------------------------------------------------
620 // Creates & emits the patch offset label for the stub
621 //---------------------------------------------------------------
622 VOID StubLinker::EmitPatchLabel()
623 {
624     CONTRACTL
625     {
626         THROWS;
627         GC_NOTRIGGER;
628     }
629     CONTRACTL_END;
630
631     //
632     // Note that it's OK to have re-emit the patch label,
633     // just use the later one.
634     //
635
636     m_pPatchLabel = EmitNewCodeLabel();
637 }
638
639 //---------------------------------------------------------------
640 // Returns final location of label as an offset from the start
641 // of the stub. Can only be called after linkage.
642 //---------------------------------------------------------------
643 UINT32 StubLinker::GetLabelOffset(CodeLabel *pLabel)
644 {
645     CONTRACTL
646     {
647         NOTHROW;
648         GC_NOTRIGGER;
649     }
650     CONTRACTL_END;
651
652     _ASSERTE(!(pLabel->m_fExternal));
653     return pLabel->i.m_localOffset + pLabel->i.m_pCodeRun->m_globaloffset;
654 }
655
656
657 //---------------------------------------------------------------
658 // Create a new label to an external address.
659 // Throws COM+ exception on failure.
660 //---------------------------------------------------------------
661 CodeLabel* StubLinker::NewExternalCodeLabel(LPVOID pExternalAddress)
662 {
663     CONTRACTL
664     {
665         THROWS;
666         GC_NOTRIGGER;
667
668         PRECONDITION(CheckPointer(pExternalAddress));
669     }
670     CONTRACTL_END;
671
672     CodeLabel *pCodeLabel = (CodeLabel*)(m_quickHeap.Alloc(sizeof(CodeLabel)));
673     _ASSERTE(pCodeLabel); // QuickHeap throws exceptions rather than returning NULL
674     pCodeLabel->m_next       = m_pFirstCodeLabel;
675     pCodeLabel->m_fExternal          = TRUE;
676     pCodeLabel->m_fAbsolute  = FALSE;
677     pCodeLabel->e.m_pExternalAddress = pExternalAddress;
678     m_pFirstCodeLabel = pCodeLabel;
679     return pCodeLabel;
680 }
681
682 //---------------------------------------------------------------
683 // Set the target method for Instantiating stubs.
684 //---------------------------------------------------------------
685 void StubLinker::SetTargetMethod(PTR_MethodDesc pMD)
686 {
687     CONTRACTL
688     {
689         NOTHROW;
690         GC_NOTRIGGER;
691         PRECONDITION(pMD != NULL);
692     }
693     CONTRACTL_END;
694     m_pTargetMethod = pMD;
695 }
696
697
698 //---------------------------------------------------------------
699 // Append an instruction containing a reference to a label.
700 //
701 //      target          - the label being referenced.
702 //      instructionFormat         - a platform-specific InstructionFormat object
703 //                        that gives properties about the reference.
704 //      variationCode   - uninterpreted data passed to the pInstructionFormat methods.
705 //---------------------------------------------------------------
706 VOID StubLinker::EmitLabelRef(CodeLabel* target, const InstructionFormat & instructionFormat, UINT variationCode)
707 {
708     CONTRACTL
709     {
710         THROWS;
711         GC_NOTRIGGER;
712     }
713     CONTRACTL_END;
714
715     LabelRef *pLabelRef = (LabelRef *)(m_quickHeap.Alloc(sizeof(LabelRef)));
716     _ASSERTE(pLabelRef);      // m_quickHeap throws an exception rather than returning NULL
717     pLabelRef->m_type               = LabelRef::kLabelRef;
718     pLabelRef->m_pInstructionFormat = (InstructionFormat*)&instructionFormat;
719     pLabelRef->m_variationCode      = variationCode;
720     pLabelRef->m_target             = target;
721
722     pLabelRef->m_nextLabelRef = m_pFirstLabelRef;
723     m_pFirstLabelRef = pLabelRef;
724
725     AppendCodeElement(pLabelRef);
726
727
728 }
729
730
731
732
733
734 //---------------------------------------------------------------
735 // Internal helper routine.
736 //---------------------------------------------------------------
737 CodeRun *StubLinker::GetLastCodeRunIfAny()
738 {
739     CONTRACTL
740     {
741         NOTHROW;
742         GC_NOTRIGGER;
743     }
744     CONTRACTL_END;
745
746     CodeElement *pLastCodeElem = GetLastCodeElement();
747     if (pLastCodeElem == NULL || pLastCodeElem->m_type != CodeElement::kCodeRun) {
748         return NULL;
749     } else {
750         return (CodeRun*)pLastCodeElem;
751     }
752 }
753
754
755 //---------------------------------------------------------------
756 // Internal helper routine.
757 //---------------------------------------------------------------
758 CodeRun *StubLinker::AppendNewEmptyCodeRun()
759 {
760     CONTRACTL
761     {
762         THROWS;
763         GC_NOTRIGGER;
764     }
765     CONTRACTL_END;
766
767     CodeRun *pNewCodeRun = (CodeRun*)(m_quickHeap.Alloc(sizeof(CodeRun)));
768     _ASSERTE(pNewCodeRun); // QuickHeap throws exceptions rather than returning NULL
769     pNewCodeRun->m_type = CodeElement::kCodeRun;
770     pNewCodeRun->m_numcodebytes = 0;
771     AppendCodeElement(pNewCodeRun);
772     return pNewCodeRun;
773
774 }
775
776 //---------------------------------------------------------------
777 // Internal helper routine.
778 //---------------------------------------------------------------
779 VOID StubLinker::AppendCodeElement(CodeElement *pCodeElement)
780 {
781     CONTRACTL
782     {
783         NOTHROW;
784         GC_NOTRIGGER;
785     }
786     CONTRACTL_END;
787
788     pCodeElement->m_next = m_pCodeElements;
789     m_pCodeElements = pCodeElement;
790 }
791
792
793
794 //---------------------------------------------------------------
795 // Is the current LabelRef's size big enough to reach the target?
796 //---------------------------------------------------------------
797 static BOOL LabelCanReach(LabelRef *pLabelRef)
798 {
799     CONTRACTL
800     {
801         NOTHROW;
802         GC_NOTRIGGER;
803     }
804     CONTRACTL_END;
805
806     InstructionFormat *pIF  = pLabelRef->m_pInstructionFormat;
807
808     if (pLabelRef->m_target->m_fExternal)
809     {
810         return pLabelRef->m_pInstructionFormat->CanReach(
811                 pLabelRef->m_refsize, pLabelRef->m_variationCode, TRUE, (INT_PTR)pLabelRef->m_target->e.m_pExternalAddress);
812     }
813     else
814     {
815         UINT targetglobaloffset = pLabelRef->m_target->i.m_pCodeRun->m_globaloffset +
816                                   pLabelRef->m_target->i.m_localOffset;
817         UINT srcglobaloffset = pLabelRef->m_globaloffset +
818                                pIF->GetHotSpotOffset(pLabelRef->m_refsize,
819                                                      pLabelRef->m_variationCode);
820         INT offset = (INT)(targetglobaloffset - srcglobaloffset);
821
822         return pLabelRef->m_pInstructionFormat->CanReach(
823             pLabelRef->m_refsize, pLabelRef->m_variationCode, FALSE, offset);
824     }
825 }
826
827 //---------------------------------------------------------------
828 // Generate the actual stub. The returned stub has a refcount of 1.
829 // No other methods (other than the destructor) should be called
830 // after calling Link().
831 //
832 // Throws COM+ exception on failure.
833 //---------------------------------------------------------------
834 Stub *StubLinker::Link(LoaderHeap *pHeap, DWORD flags)
835 {
836     STANDARD_VM_CONTRACT;
837
838     int globalsize = 0;
839     int size = CalculateSize(&globalsize);
840
841     _ASSERTE(!pHeap || pHeap->IsExecutable());
842
843     StubHolder<Stub> pStub;
844
845 #ifdef STUBLINKER_GENERATES_UNWIND_INFO
846     StubUnwindInfoSegmentBoundaryReservationList ReservedStubs;
847
848     for (;;)
849 #endif
850     {
851         pStub = Stub::NewStub(
852                 pHeap,
853                 size,
854                 flags
855 #ifdef STUBLINKER_GENERATES_UNWIND_INFO
856                 , UnwindInfoSize(globalsize)
857 #endif
858                 );
859         ASSERT(pStub != NULL);
860
861         bool fSuccess = EmitStub(pStub, globalsize, size, pHeap);
862
863 #ifdef STUBLINKER_GENERATES_UNWIND_INFO
864         if (fSuccess)
865         {
866             break;
867         }
868         else
869         {
870             ReservedStubs.AddStub(pStub);
871             pStub.SuppressRelease();
872         }
873 #else
874         CONSISTENCY_CHECK_MSG(fSuccess, ("EmitStub should always return true"));
875 #endif
876     }
877
878     return pStub.Extract();
879 }
880
881 int StubLinker::CalculateSize(int* pGlobalSize)
882 {
883     CONTRACTL
884     {
885         NOTHROW;
886         GC_NOTRIGGER;
887     }
888     CONTRACTL_END;
889
890     _ASSERTE(pGlobalSize);
891
892 #if defined(_DEBUG) && defined(STUBLINKER_GENERATES_UNWIND_INFO)
893     if (m_pUnwindInfoCheckLabel)
894     {
895         EmitLabel(m_pUnwindInfoCheckLabel);
896         EmitUnwindInfoCheckSubfunction();
897         m_pUnwindInfoCheckLabel = NULL;
898     }
899 #endif
900
901 #ifdef _DEBUG
902     // Don't want any undefined labels
903     for (CodeLabel *pCodeLabel = m_pFirstCodeLabel;
904          pCodeLabel != NULL;
905          pCodeLabel = pCodeLabel->m_next) {
906         if ((!(pCodeLabel->m_fExternal)) && pCodeLabel->i.m_pCodeRun == NULL) {
907             _ASSERTE(!"Forgot to define a label before asking StubLinker to link.");
908         }
909     }
910 #endif //_DEBUG
911
912     //-------------------------------------------------------------------
913     // Tentatively set all of the labelref sizes to their smallest possible
914     // value.
915     //-------------------------------------------------------------------
916     for (LabelRef *pLabelRef = m_pFirstLabelRef;
917          pLabelRef != NULL;
918          pLabelRef = pLabelRef->m_nextLabelRef) {
919
920         for (UINT bitmask = 1; bitmask <= InstructionFormat::kMax; bitmask = bitmask << 1) {
921             if (pLabelRef->m_pInstructionFormat->m_allowedSizes & bitmask) {
922                 pLabelRef->m_refsize = bitmask;
923                 break;
924             }
925         }
926
927     }
928
929     UINT globalsize;
930     UINT datasize;
931     BOOL fSomethingChanged;
932     do {
933         fSomethingChanged = FALSE;
934
935
936         // Layout each code element.
937         globalsize = 0;
938         datasize = 0;
939         CodeElement *pCodeElem;
940         for (pCodeElem = m_pCodeElements; pCodeElem; pCodeElem = pCodeElem->m_next) {
941
942             switch (pCodeElem->m_type) {
943                 case CodeElement::kCodeRun:
944                     globalsize += ((CodeRun*)pCodeElem)->m_numcodebytes;
945                     break;
946
947                 case CodeElement::kLabelRef: {
948                     LabelRef *pLabelRef = (LabelRef*)pCodeElem;
949                     globalsize += pLabelRef->m_pInstructionFormat->GetSizeOfInstruction( pLabelRef->m_refsize,
950                                                                                          pLabelRef->m_variationCode );
951                     datasize += pLabelRef->m_pInstructionFormat->GetSizeOfData( pLabelRef->m_refsize,
952                                                                                          pLabelRef->m_variationCode );
953                     }
954                     break;
955
956                 default:
957                     _ASSERTE(0);
958             }
959
960             // Record a temporary global offset; this is actually
961             // wrong by a fixed value. We'll fix up after we know the
962             // size of the entire stub.
963             pCodeElem->m_globaloffset = 0 - globalsize;
964
965             // also record the data offset. Note the link-list we walk is in
966             // *reverse* order so we visit the last instruction first
967             // so what we record now is in fact the offset from the *end* of
968             // the data block. We fix it up later.
969             pCodeElem->m_dataoffset = 0 - datasize;
970         }
971
972         // Now fix up the global offsets.
973         for (pCodeElem = m_pCodeElements; pCodeElem; pCodeElem = pCodeElem->m_next) {
974             pCodeElem->m_globaloffset += globalsize;
975             pCodeElem->m_dataoffset += datasize;
976         }
977
978
979         // Now, iterate thru the LabelRef's and check if any of them
980         // have to be resized.
981         for (LabelRef *pLabelRef = m_pFirstLabelRef;
982              pLabelRef != NULL;
983              pLabelRef = pLabelRef->m_nextLabelRef) {
984
985
986             if (!LabelCanReach(pLabelRef)) {
987                 fSomethingChanged = TRUE;
988
989                 UINT bitmask = pLabelRef->m_refsize << 1;
990                 // Find the next largest size.
991                 // (we could be smarter about this and eliminate intermediate
992                 // sizes based on the tentative offset.)
993                 for (; bitmask <= InstructionFormat::kMax; bitmask = bitmask << 1) {
994                     if (pLabelRef->m_pInstructionFormat->m_allowedSizes & bitmask) {
995                         pLabelRef->m_refsize = bitmask;
996                         break;
997                     }
998                 }
999 #ifdef _DEBUG
1000                 if (bitmask > InstructionFormat::kMax) {
1001                     // CANNOT REACH target even with kMax
1002                     _ASSERTE(!"Stub instruction cannot reach target: must choose a different instruction!");
1003                 }
1004 #endif
1005             }
1006         }
1007
1008
1009     } while (fSomethingChanged); // Keep iterating until all LabelRef's can reach
1010
1011
1012     // We now have the correct layout write out the stub.
1013
1014     // Compute stub code+data size after aligning data correctly
1015     if(globalsize % DATA_ALIGNMENT)
1016         globalsize += (DATA_ALIGNMENT - (globalsize % DATA_ALIGNMENT));
1017
1018     *pGlobalSize = globalsize;
1019     return globalsize + datasize;
1020 }
1021
1022 bool StubLinker::EmitStub(Stub* pStub, int globalsize, int totalSize, LoaderHeap* pHeap)
1023 {
1024     STANDARD_VM_CONTRACT;
1025
1026     BYTE *pCode = (BYTE*)(pStub->GetBlob());
1027
1028     ExecutableWriterHolder<Stub> stubWriterHolder(pStub, sizeof(Stub) + totalSize);
1029     Stub *pStubRW = stubWriterHolder.GetRW();
1030
1031     BYTE *pCodeRW = (BYTE*)(pStubRW->GetBlob());
1032     BYTE *pDataRW = pCodeRW+globalsize; // start of data area
1033     {
1034         int lastCodeOffset = 0;
1035
1036         // Write out each code element.
1037         for (CodeElement* pCodeElem = m_pCodeElements; pCodeElem; pCodeElem = pCodeElem->m_next) {
1038             int currOffset = 0;
1039
1040             switch (pCodeElem->m_type) {
1041                 case CodeElement::kCodeRun:
1042                     CopyMemory(pCodeRW + pCodeElem->m_globaloffset,
1043                                ((CodeRun*)pCodeElem)->m_codebytes,
1044                                ((CodeRun*)pCodeElem)->m_numcodebytes);
1045                     currOffset = pCodeElem->m_globaloffset + ((CodeRun *)pCodeElem)->m_numcodebytes;
1046                     break;
1047
1048                 case CodeElement::kLabelRef: {
1049                     LabelRef *pLabelRef = (LabelRef*)pCodeElem;
1050                     InstructionFormat *pIF  = pLabelRef->m_pInstructionFormat;
1051                     __int64 fixupval;
1052
1053                     LPBYTE srcglobaladdr = pCode +
1054                                            pLabelRef->m_globaloffset +
1055                                            pIF->GetHotSpotOffset(pLabelRef->m_refsize,
1056                                                                  pLabelRef->m_variationCode);
1057                     LPBYTE targetglobaladdr;
1058                     if (!(pLabelRef->m_target->m_fExternal)) {
1059                         targetglobaladdr = pCode +
1060                                            pLabelRef->m_target->i.m_pCodeRun->m_globaloffset +
1061                                            pLabelRef->m_target->i.m_localOffset;
1062                     } else {
1063                         targetglobaladdr = (LPBYTE)(pLabelRef->m_target->e.m_pExternalAddress);
1064                     }
1065                     if ((pLabelRef->m_target->m_fAbsolute)) {
1066                         fixupval = (__int64)(size_t)targetglobaladdr;
1067                     } else
1068                         fixupval = (__int64)(targetglobaladdr - srcglobaladdr);
1069
1070                     pLabelRef->m_pInstructionFormat->EmitInstruction(
1071                         pLabelRef->m_refsize,
1072                         fixupval,
1073                         pCode + pCodeElem->m_globaloffset,
1074                         pCodeRW + pCodeElem->m_globaloffset,
1075                         pLabelRef->m_variationCode,
1076                         pDataRW + pCodeElem->m_dataoffset);
1077
1078                     currOffset =
1079                         pCodeElem->m_globaloffset +
1080                         pLabelRef->m_pInstructionFormat->GetSizeOfInstruction( pLabelRef->m_refsize,
1081                                                                                pLabelRef->m_variationCode );
1082                     }
1083                     break;
1084
1085                 default:
1086                     _ASSERTE(0);
1087             }
1088             lastCodeOffset = (currOffset > lastCodeOffset) ? currOffset : lastCodeOffset;
1089         }
1090
1091         // Fill in zeros at the end, if necessary
1092         if (lastCodeOffset < globalsize)
1093             ZeroMemory(pCodeRW + lastCodeOffset, globalsize - lastCodeOffset);
1094     }
1095
1096     // Set additional stub data.
1097     // - Fill in the target method for the Instantiating stub.
1098     //
1099     // - Fill in patch offset, if we have one
1100     //      Note that these offsets are relative to the start of the stub,
1101     //      not the code, so you'll have to add sizeof(Stub) to get to the
1102     //      right spot.
1103     if (pStubRW->IsInstantiatingStub())
1104     {
1105         _ASSERTE(m_pTargetMethod != NULL);
1106         _ASSERTE(m_pPatchLabel == NULL);
1107         pStubRW->SetInstantiatedMethodDesc(m_pTargetMethod);
1108
1109         LOG((LF_CORDB, LL_INFO100, "SL::ES: InstantiatedMethod fd:0x%x\n",
1110             pStub->GetInstantiatedMethodDesc()));
1111     }
1112     else if (m_pPatchLabel != NULL)
1113     {
1114         UINT32 uLabelOffset = GetLabelOffset(m_pPatchLabel);
1115         _ASSERTE(FitsIn<USHORT>(uLabelOffset));
1116         pStubRW->SetPatchOffset(static_cast<USHORT>(uLabelOffset));
1117
1118         LOG((LF_CORDB, LL_INFO100, "SL::ES: patch offset:0x%x\n",
1119             pStub->GetPatchOffset()));
1120     }
1121
1122 #ifdef STUBLINKER_GENERATES_UNWIND_INFO
1123     if (pStub->HasUnwindInfo())
1124     {
1125         if (!EmitUnwindInfo(pStub, pStubRW, globalsize, pHeap))
1126             return false;
1127     }
1128 #endif // STUBLINKER_GENERATES_UNWIND_INFO
1129
1130     if (!m_fDataOnly)
1131     {
1132         FlushInstructionCache(GetCurrentProcess(), pCode, globalsize);
1133     }
1134
1135     _ASSERTE(m_fDataOnly || DbgIsExecutable(pCode, globalsize));
1136
1137     return true;
1138 }
1139
1140
1141 #ifdef STUBLINKER_GENERATES_UNWIND_INFO
1142 #if defined(TARGET_AMD64)
1143
1144 // See RtlVirtualUnwind in base\ntos\rtl\amd64\exdsptch.c
1145
1146 static_assert_no_msg(kRAX == (offsetof(CONTEXT, Rax) - offsetof(CONTEXT, Rax)) / sizeof(ULONG64));
1147 static_assert_no_msg(kRCX == (offsetof(CONTEXT, Rcx) - offsetof(CONTEXT, Rax)) / sizeof(ULONG64));
1148 static_assert_no_msg(kRDX == (offsetof(CONTEXT, Rdx) - offsetof(CONTEXT, Rax)) / sizeof(ULONG64));
1149 static_assert_no_msg(kRBX == (offsetof(CONTEXT, Rbx) - offsetof(CONTEXT, Rax)) / sizeof(ULONG64));
1150 static_assert_no_msg(kRBP == (offsetof(CONTEXT, Rbp) - offsetof(CONTEXT, Rax)) / sizeof(ULONG64));
1151 static_assert_no_msg(kRSI == (offsetof(CONTEXT, Rsi) - offsetof(CONTEXT, Rax)) / sizeof(ULONG64));
1152 static_assert_no_msg(kRDI == (offsetof(CONTEXT, Rdi) - offsetof(CONTEXT, Rax)) / sizeof(ULONG64));
1153 static_assert_no_msg(kR8  == (offsetof(CONTEXT, R8 ) - offsetof(CONTEXT, Rax)) / sizeof(ULONG64));
1154 static_assert_no_msg(kR9  == (offsetof(CONTEXT, R9 ) - offsetof(CONTEXT, Rax)) / sizeof(ULONG64));
1155 static_assert_no_msg(kR10 == (offsetof(CONTEXT, R10) - offsetof(CONTEXT, Rax)) / sizeof(ULONG64));
1156 static_assert_no_msg(kR11 == (offsetof(CONTEXT, R11) - offsetof(CONTEXT, Rax)) / sizeof(ULONG64));
1157 static_assert_no_msg(kR12 == (offsetof(CONTEXT, R12) - offsetof(CONTEXT, Rax)) / sizeof(ULONG64));
1158 static_assert_no_msg(kR13 == (offsetof(CONTEXT, R13) - offsetof(CONTEXT, Rax)) / sizeof(ULONG64));
1159 static_assert_no_msg(kR14 == (offsetof(CONTEXT, R14) - offsetof(CONTEXT, Rax)) / sizeof(ULONG64));
1160 static_assert_no_msg(kR15 == (offsetof(CONTEXT, R15) - offsetof(CONTEXT, Rax)) / sizeof(ULONG64));
1161
1162 VOID StubLinker::UnwindSavedReg (UCHAR reg, ULONG SPRelativeOffset)
1163 {
1164     USHORT FrameOffset = (USHORT)(SPRelativeOffset / 8);
1165
1166     if ((ULONG)FrameOffset == SPRelativeOffset)
1167     {
1168         UNWIND_CODE *pUnwindCode = AllocUnwindInfo(UWOP_SAVE_NONVOL);
1169         pUnwindCode->OpInfo = reg;
1170         pUnwindCode[1].FrameOffset = FrameOffset;
1171     }
1172     else
1173     {
1174         UNWIND_CODE *pUnwindCode = AllocUnwindInfo(UWOP_SAVE_NONVOL_FAR);
1175         pUnwindCode->OpInfo = reg;
1176         pUnwindCode[1].FrameOffset = (USHORT)SPRelativeOffset;
1177         pUnwindCode[2].FrameOffset = (USHORT)(SPRelativeOffset >> 16);
1178     }
1179 }
1180
1181 VOID StubLinker::UnwindPushedReg (UCHAR reg)
1182 {
1183     m_stackSize += sizeof(void*);
1184
1185     if (m_fHaveFramePointer)
1186         return;
1187
1188     UNWIND_CODE *pUnwindCode = AllocUnwindInfo(UWOP_PUSH_NONVOL);
1189     pUnwindCode->OpInfo = reg;
1190 }
1191
1192 VOID StubLinker::UnwindAllocStack (SHORT FrameSizeIncrement)
1193 {
1194     CONTRACTL
1195     {
1196         THROWS;
1197         GC_NOTRIGGER;
1198     } CONTRACTL_END;
1199
1200     if (! ClrSafeInt<SHORT>::addition(m_stackSize, FrameSizeIncrement, m_stackSize))
1201         COMPlusThrowArithmetic();
1202
1203     if (m_fHaveFramePointer)
1204         return;
1205
1206     UCHAR OpInfo = (UCHAR)((FrameSizeIncrement - 8) / 8);
1207
1208     if (OpInfo*8 + 8 == FrameSizeIncrement)
1209     {
1210         UNWIND_CODE *pUnwindCode = AllocUnwindInfo(UWOP_ALLOC_SMALL);
1211         pUnwindCode->OpInfo = OpInfo;
1212     }
1213     else
1214     {
1215         USHORT FrameOffset = (USHORT)FrameSizeIncrement;
1216         bool fNeedExtraSlot = ((ULONG)FrameOffset != (ULONG)FrameSizeIncrement);
1217
1218         UNWIND_CODE *pUnwindCode = AllocUnwindInfo(UWOP_ALLOC_LARGE, fNeedExtraSlot ? 1 : 0);
1219
1220         pUnwindCode->OpInfo = fNeedExtraSlot ? 1 : 0;
1221
1222         pUnwindCode[1].FrameOffset = FrameOffset;
1223
1224         if (fNeedExtraSlot)
1225             pUnwindCode[2].FrameOffset = (USHORT)(FrameSizeIncrement >> 16);
1226     }
1227 }
1228
1229 VOID StubLinker::UnwindSetFramePointer (UCHAR reg)
1230 {
1231     _ASSERTE(!m_fHaveFramePointer);
1232
1233     UNWIND_CODE *pUnwindCode = AllocUnwindInfo(UWOP_SET_FPREG);
1234     pUnwindCode->OpInfo = reg;
1235
1236     m_fHaveFramePointer = TRUE;
1237 }
1238
1239 UNWIND_CODE *StubLinker::AllocUnwindInfo (UCHAR Op, UCHAR nExtraSlots /*= 0*/)
1240 {
1241     CONTRACTL
1242     {
1243         THROWS;
1244         GC_NOTRIGGER;
1245     } CONTRACTL_END;
1246
1247     _ASSERTE(Op < sizeof(UnwindOpExtraSlotTable));
1248
1249     UCHAR nSlotsAlloc = UnwindOpExtraSlotTable[Op] + nExtraSlots;
1250
1251     IntermediateUnwindInfo *pUnwindInfo = (IntermediateUnwindInfo*)m_quickHeap.Alloc(  sizeof(IntermediateUnwindInfo)
1252                                                                                      + nSlotsAlloc * sizeof(UNWIND_CODE));
1253     m_nUnwindSlots += 1 + nSlotsAlloc;
1254
1255     pUnwindInfo->pNext = m_pUnwindInfoList;
1256                          m_pUnwindInfoList = pUnwindInfo;
1257
1258     UNWIND_CODE *pUnwindCode = &pUnwindInfo->rgUnwindCode[0];
1259
1260     pUnwindCode->UnwindOp = Op;
1261
1262     CodeRun *pCodeRun = GetLastCodeRunIfAny();
1263     _ASSERTE(pCodeRun != NULL);
1264
1265     pUnwindInfo->pCodeRun = pCodeRun;
1266     pUnwindInfo->LocalOffset = pCodeRun->m_numcodebytes;
1267
1268     EmitUnwindInfoCheck();
1269
1270     return pUnwindCode;
1271 }
1272 #endif // defined(TARGET_AMD64)
1273
1274 struct FindBlockArgs
1275 {
1276     BYTE *pCode;
1277     BYTE *pBlockBase;
1278     SIZE_T cbBlockSize;
1279 };
1280
1281 bool FindBlockCallback (PTR_VOID pvArgs, PTR_VOID pvAllocationBase, SIZE_T cbReserved)
1282 {
1283     CONTRACTL
1284     {
1285         NOTHROW;
1286         GC_TRIGGERS;
1287     }
1288     CONTRACTL_END;
1289
1290     FindBlockArgs* pArgs = (FindBlockArgs*)pvArgs;
1291     if (pArgs->pCode >= pvAllocationBase && (pArgs->pCode < ((BYTE *)pvAllocationBase + cbReserved)))
1292     {
1293         pArgs->pBlockBase = (BYTE*)pvAllocationBase;
1294         pArgs->cbBlockSize = cbReserved;
1295         return true;
1296     }
1297
1298     return false;
1299 }
1300
1301 bool StubLinker::EmitUnwindInfo(Stub* pStubRX, Stub* pStubRW, int globalsize, LoaderHeap* pHeap)
1302 {
1303     STANDARD_VM_CONTRACT;
1304
1305     BYTE *pCode = (BYTE*)(pStubRX->GetEntryPoint());
1306
1307     //
1308     // Determine the lower bound of the address space containing the stub.
1309     //
1310
1311     FindBlockArgs findBlockArgs;
1312     findBlockArgs.pCode = pCode;
1313     findBlockArgs.pBlockBase = NULL;
1314
1315     pHeap->EnumPageRegions(&FindBlockCallback, &findBlockArgs);
1316
1317     if (findBlockArgs.pBlockBase == NULL)
1318     {
1319         // REVISIT_TODO better exception
1320         COMPlusThrowOM();
1321     }
1322
1323     BYTE *pbRegionBaseAddress = findBlockArgs.pBlockBase;
1324
1325 #ifdef _DEBUG
1326     static SIZE_T MaxSegmentSize = -1;
1327     if (MaxSegmentSize == (SIZE_T)-1)
1328         MaxSegmentSize = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_MaxStubUnwindInfoSegmentSize, DYNAMIC_FUNCTION_TABLE_MAX_RANGE);
1329 #else
1330     const SIZE_T MaxSegmentSize = DYNAMIC_FUNCTION_TABLE_MAX_RANGE;
1331 #endif
1332
1333     //
1334     // The RUNTIME_FUNCTION offsets are ULONGs.  If the region size is >
1335     // UINT32_MAX, then we'll shift the base address to the next 4gb and
1336     // register a separate function table.
1337     //
1338     // But...RtlInstallFunctionTableCallback has a 2gb restriction...so
1339     // make that INT32_MAX.
1340     //
1341
1342     StubUnwindInfoHeader *pHeader = pStubRW->GetUnwindInfoHeader();
1343     _ASSERTE(IS_ALIGNED(pHeader, sizeof(void*)));
1344
1345     BYTE *pbBaseAddress = pbRegionBaseAddress;
1346
1347     while ((size_t)((BYTE*)pHeader - pbBaseAddress) > MaxSegmentSize)
1348     {
1349         pbBaseAddress += MaxSegmentSize;
1350     }
1351
1352     //
1353     // If the unwind info/code straddle a 2gb boundary, then we're stuck.
1354     // Rather than add a lot more bit twiddling code to deal with this
1355     // exceptionally rare case, we'll signal the caller to keep this allocation
1356     // temporarily and allocate another.  This repeats until we eventually get
1357     // an allocation that doesn't straddle a 2gb boundary.  Afterwards the old
1358     // allocations are freed.
1359     //
1360
1361     if ((size_t)(pCode + globalsize - pbBaseAddress) > MaxSegmentSize)
1362     {
1363         return false;
1364     }
1365
1366     // Ensure that the first RUNTIME_FUNCTION struct ends up pointer aligned,
1367     // so that the StubUnwindInfoHeader struct is aligned.  UNWIND_INFO
1368     // includes one UNWIND_CODE.
1369     _ASSERTE(IS_ALIGNED(pStubRX, sizeof(void*)));
1370     _ASSERTE(0 == (offsetof(StubUnwindInfoHeader, FunctionEntry) % sizeof(void*)));
1371
1372     StubUnwindInfoHeader * pUnwindInfoHeader = pStubRW->GetUnwindInfoHeader();
1373
1374 #ifdef TARGET_AMD64
1375
1376     UNWIND_CODE *pDestUnwindCode = &pUnwindInfoHeader->UnwindInfo.UnwindCode[0];
1377 #ifdef _DEBUG
1378     UNWIND_CODE *pDestUnwindCodeLimit = (UNWIND_CODE*)pStubRW->GetUnwindInfoHeaderSuffix();
1379 #endif
1380
1381     UINT FrameRegister = 0;
1382
1383     //
1384     // Resolve the unwind operation offsets, and fill in the UNWIND_INFO and
1385     // RUNTIME_FUNCTION structs preceding the stub.  The unwind codes are recorded
1386     // in decreasing address order.
1387     //
1388
1389     for (IntermediateUnwindInfo *pUnwindInfoList = m_pUnwindInfoList; pUnwindInfoList != NULL; pUnwindInfoList = pUnwindInfoList->pNext)
1390     {
1391         UNWIND_CODE *pUnwindCode = &pUnwindInfoList->rgUnwindCode[0];
1392         UCHAR op = pUnwindCode[0].UnwindOp;
1393
1394         if (UWOP_SET_FPREG == op)
1395         {
1396             FrameRegister = pUnwindCode[0].OpInfo;
1397         }
1398
1399         //
1400         // Compute number of slots used by this encoding.
1401         //
1402
1403         UINT nSlots;
1404
1405         if (UWOP_ALLOC_LARGE == op)
1406         {
1407             nSlots = 2 + pUnwindCode[0].OpInfo;
1408         }
1409         else
1410         {
1411             _ASSERTE(UnwindOpExtraSlotTable[op] != (UCHAR)-1);
1412             nSlots = 1 + UnwindOpExtraSlotTable[op];
1413         }
1414
1415         //
1416         // Compute offset and ensure that it will fit in the encoding.
1417         //
1418
1419         SIZE_T CodeOffset =   pUnwindInfoList->pCodeRun->m_globaloffset
1420                             + pUnwindInfoList->LocalOffset;
1421
1422         if (CodeOffset != (SIZE_T)(UCHAR)CodeOffset)
1423         {
1424             // REVISIT_TODO better exception
1425             COMPlusThrowOM();
1426         }
1427
1428         //
1429         // Copy the encoding data, overwrite the new offset, and advance
1430         // to the next encoding.
1431         //
1432
1433         _ASSERTE(pDestUnwindCode + nSlots <= pDestUnwindCodeLimit);
1434
1435         CopyMemory(pDestUnwindCode, pUnwindCode, nSlots * sizeof(UNWIND_CODE));
1436
1437         pDestUnwindCode->CodeOffset = (UCHAR)CodeOffset;
1438
1439         pDestUnwindCode += nSlots;
1440     }
1441
1442     //
1443     // Fill in the UNWIND_INFO struct
1444     //
1445     UNWIND_INFO *pUnwindInfo = &pUnwindInfoHeader->UnwindInfo;
1446     _ASSERTE(IS_ALIGNED(pUnwindInfo, sizeof(ULONG)));
1447
1448     // PrologueSize may be 0 if all unwind directives at offset 0.
1449     SIZE_T PrologueSize =   m_pUnwindInfoList->pCodeRun->m_globaloffset
1450                             + m_pUnwindInfoList->LocalOffset;
1451
1452     UINT nEntryPointSlots = m_nUnwindSlots;
1453
1454     if (   PrologueSize != (SIZE_T)(UCHAR)PrologueSize
1455         || nEntryPointSlots > UCHAR_MAX)
1456     {
1457         // REVISIT_TODO better exception
1458         COMPlusThrowOM();
1459     }
1460
1461     _ASSERTE(nEntryPointSlots);
1462
1463     pUnwindInfo->Version = 1;
1464     pUnwindInfo->Flags = 0;
1465     pUnwindInfo->SizeOfProlog = (UCHAR)PrologueSize;
1466     pUnwindInfo->CountOfUnwindCodes = (UCHAR)nEntryPointSlots;
1467     pUnwindInfo->FrameRegister = FrameRegister;
1468     pUnwindInfo->FrameOffset = 0;
1469
1470     //
1471     // Fill in the RUNTIME_FUNCTION struct for this prologue.
1472     //
1473     PT_RUNTIME_FUNCTION pCurFunction = &pUnwindInfoHeader->FunctionEntry;
1474     _ASSERTE(IS_ALIGNED(pCurFunction, sizeof(ULONG)));
1475
1476     S_UINT32 sBeginAddress = S_BYTEPTR(pCode) - S_BYTEPTR(pbBaseAddress);
1477     if (sBeginAddress.IsOverflow())
1478         COMPlusThrowArithmetic();
1479     pCurFunction->BeginAddress = sBeginAddress.Value();
1480
1481     S_UINT32 sEndAddress = S_BYTEPTR(pCode) + S_BYTEPTR(globalsize) - S_BYTEPTR(pbBaseAddress);
1482     if (sEndAddress.IsOverflow())
1483         COMPlusThrowArithmetic();
1484     pCurFunction->EndAddress = sEndAddress.Value();
1485
1486     S_UINT32 sTemp = S_BYTEPTR(pUnwindInfo) - S_BYTEPTR(pbBaseAddress);
1487     if (sTemp.IsOverflow())
1488         COMPlusThrowArithmetic();
1489     RUNTIME_FUNCTION__SetUnwindInfoAddress(pCurFunction, sTemp.Value());
1490 #elif defined(TARGET_ARM)
1491     //
1492     // Fill in the RUNTIME_FUNCTION struct for this prologue.
1493     //
1494     UNWIND_INFO *pUnwindInfo = &pUnwindInfoHeader->UnwindInfo;
1495
1496     PT_RUNTIME_FUNCTION pCurFunction = &pUnwindInfoHeader->FunctionEntry;
1497     _ASSERTE(IS_ALIGNED(pCurFunction, sizeof(ULONG)));
1498
1499     S_UINT32 sBeginAddress = S_BYTEPTR(pCode) - S_BYTEPTR(pbBaseAddress);
1500     if (sBeginAddress.IsOverflow())
1501         COMPlusThrowArithmetic();
1502     RUNTIME_FUNCTION__SetBeginAddress(pCurFunction, sBeginAddress.Value());
1503
1504     S_UINT32 sTemp = S_BYTEPTR(pUnwindInfo) - S_BYTEPTR(pbBaseAddress);
1505     if (sTemp.IsOverflow())
1506         COMPlusThrowArithmetic();
1507     RUNTIME_FUNCTION__SetUnwindInfoAddress(pCurFunction, sTemp.Value());
1508
1509     //Get the exact function Length. Cannot use globalsize as it is explicitly made to be
1510     // 4 byte aligned
1511     CodeRun *pLastCodeElem = GetLastCodeRunIfAny();
1512     _ASSERTE(pLastCodeElem != NULL);
1513
1514     int functionLength = pLastCodeElem->m_numcodebytes + pLastCodeElem->m_globaloffset;
1515
1516     // cannot encode functionLength greater than (2 * 0xFFFFF)
1517     if (functionLength > 2 * 0xFFFFF)
1518         COMPlusThrowArithmetic();
1519
1520     _ASSERTE(functionLength <= globalsize);
1521
1522     BYTE * pUnwindCodes = (BYTE *)pUnwindInfo + sizeof(DWORD);
1523
1524     // Not emitting compact unwind info as there are very few (4) dynamic stubs with unwind info.
1525     // Benefit of the optimization does not outweigh the cost of adding the code for it.
1526
1527     //UnwindInfo for prolog
1528     if (m_cbStackFrame != 0)
1529     {
1530         if(m_cbStackFrame < 512)
1531         {
1532             *pUnwindCodes++ = (BYTE)0xF8;                     // 16-bit sub/add sp,#x
1533             *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 18);
1534             *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 10);
1535             *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 2);
1536         }
1537         else
1538         {
1539             *pUnwindCodes++ = (BYTE)0xFA;                     // 32-bit sub/add sp,#x
1540             *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 18);
1541             *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 10);
1542             *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 2);
1543         }
1544
1545         if(m_cbStackFrame >= 4096)
1546         {
1547             // r4 register is used as param to checkStack function and must have been saved in prolog
1548             _ASSERTE(m_cCalleeSavedRegs > 0);
1549             *pUnwindCodes++ = (BYTE)0xFB; // nop 16 bit for bl r12
1550             *pUnwindCodes++ = (BYTE)0xFC; // nop 32 bit for movt r12, checkStack
1551             *pUnwindCodes++ = (BYTE)0xFC; // nop 32 bit for movw r12, checkStack
1552
1553             // Ensure that mov r4, m_cbStackFrame fits in a 32-bit instruction
1554             if(m_cbStackFrame > 65535)
1555                 COMPlusThrow(kNotSupportedException);
1556             *pUnwindCodes++ = (BYTE)0xFC; // nop 32 bit for mov r4, m_cbStackFrame
1557         }
1558     }
1559
1560     // Unwind info generated will be incorrect when m_cCalleeSavedRegs = 0.
1561     // The unwind code will say that the size of push/pop instruction
1562     // size is 16bits when actually the opcode generated by
1563     // ThumbEmitPop & ThumbEMitPush will be 32bits.
1564     // Currently no stubs has m_cCalleeSavedRegs as 0
1565     // therefore just adding the assert.
1566     _ASSERTE(m_cCalleeSavedRegs > 0);
1567
1568     if (m_cCalleeSavedRegs <= 4)
1569     {
1570         *pUnwindCodes++ = (BYTE)(0xD4 + (m_cCalleeSavedRegs - 1)); // push/pop {r4-rX}
1571     }
1572     else
1573     {
1574         _ASSERTE(m_cCalleeSavedRegs <= 8);
1575         *pUnwindCodes++ = (BYTE)(0xDC + (m_cCalleeSavedRegs - 5)); // push/pop {r4-rX}
1576     }
1577
1578     if (m_fPushArgRegs)
1579     {
1580         *pUnwindCodes++ = (BYTE)0x04; // push {r0-r3} / add sp,#16
1581         *pUnwindCodes++ = (BYTE)0xFD; // bx lr
1582     }
1583     else
1584     {
1585         *pUnwindCodes++ = (BYTE)0xFF; // end
1586     }
1587
1588     ptrdiff_t epilogUnwindCodeIndex = 0;
1589
1590     //epilog differs from prolog
1591     if(m_cbStackFrame >= 4096)
1592     {
1593         //Index of the first unwind code of the epilog
1594         epilogUnwindCodeIndex = pUnwindCodes - (BYTE *)pUnwindInfo - sizeof(DWORD);
1595
1596         *pUnwindCodes++ = (BYTE)0xF8;                     // sub/add sp,#x
1597         *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 18);
1598         *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 10);
1599         *pUnwindCodes++ = (BYTE)(m_cbStackFrame >> 2);
1600
1601         if (m_cCalleeSavedRegs <= 4)
1602         {
1603             *pUnwindCodes++ = (BYTE)(0xD4 + (m_cCalleeSavedRegs - 1)); // push/pop {r4-rX}
1604         }
1605         else
1606         {
1607             *pUnwindCodes++ = (BYTE)(0xDC + (m_cCalleeSavedRegs - 5)); // push/pop {r4-rX}
1608         }
1609
1610         if (m_fPushArgRegs)
1611         {
1612             *pUnwindCodes++ = (BYTE)0x04; // push {r0-r3} / add sp,#16
1613             *pUnwindCodes++ = (BYTE)0xFD; // bx lr
1614         }
1615         else
1616         {
1617             *pUnwindCodes++ = (BYTE)0xFF; // end
1618         }
1619
1620     }
1621
1622     // Number of 32-bit unwind codes
1623     size_t codeWordsCount = (ALIGN_UP((size_t)pUnwindCodes, sizeof(void*)) - (size_t)pUnwindInfo - sizeof(DWORD))/4;
1624
1625     _ASSERTE(epilogUnwindCodeIndex < 32);
1626
1627     //Check that MAX_UNWIND_CODE_WORDS is sufficient to store all unwind Codes
1628     _ASSERTE(codeWordsCount <= MAX_UNWIND_CODE_WORDS);
1629
1630     *(DWORD *)pUnwindInfo =
1631         ((functionLength) / 2) |
1632         (1 << 21) |
1633         ((int)epilogUnwindCodeIndex << 23)|
1634         ((int)codeWordsCount << 28);
1635
1636 #elif defined(TARGET_ARM64)
1637     if (!m_fProlog)
1638     {
1639         // If EmitProlog isn't called. This is a leaf function which doesn't need any unwindInfo
1640         T_RUNTIME_FUNCTION *pCurFunction = NULL;
1641     }
1642     else
1643     {
1644
1645         //
1646         // Fill in the RUNTIME_FUNCTION struct for this prologue.
1647         //
1648         UNWIND_INFO *pUnwindInfo = &(pUnwindInfoHeader->UnwindInfo);
1649
1650         T_RUNTIME_FUNCTION *pCurFunction = &(pUnwindInfoHeader->FunctionEntry);
1651
1652         _ASSERTE(IS_ALIGNED(pCurFunction, sizeof(void*)));
1653
1654         S_UINT32 sBeginAddress = S_BYTEPTR(pCode) - S_BYTEPTR(pbBaseAddress);
1655         if (sBeginAddress.IsOverflow())
1656             COMPlusThrowArithmetic();
1657
1658         S_UINT32 sTemp = S_BYTEPTR(pUnwindInfo) - S_BYTEPTR(pbBaseAddress);
1659         if (sTemp.IsOverflow())
1660             COMPlusThrowArithmetic();
1661
1662         RUNTIME_FUNCTION__SetBeginAddress(pCurFunction, sBeginAddress.Value());
1663         RUNTIME_FUNCTION__SetUnwindInfoAddress(pCurFunction, sTemp.Value());
1664
1665         CodeRun *pLastCodeElem = GetLastCodeRunIfAny();
1666         _ASSERTE(pLastCodeElem != NULL);
1667
1668         int functionLength = pLastCodeElem->m_numcodebytes + pLastCodeElem->m_globaloffset;
1669
1670         // .xdata has 18 bits for function length and it is to store the total length of the function in bytes, divided by 4
1671         // If the function is larger than 1M, then multiple pdata and xdata records must be used, which we don't support right now.
1672         if (functionLength > 4 * 0x3FFFF)
1673             COMPlusThrowArithmetic();
1674
1675         _ASSERTE(functionLength <= globalsize);
1676
1677         // No support for extended code words and/or extended epilog.
1678         // ASSERTION: first 10 bits of the pUnwindInfo, which holds the #codewords and #epilogcount, cannot be 0
1679         // And no space for exception scope data also means that no support for exceptions for the stubs
1680         // generated with this stublinker.
1681         BYTE * pUnwindCodes = (BYTE *)pUnwindInfo + sizeof(DWORD);
1682
1683
1684         // Emitting the unwind codes:
1685         // The unwind codes are emitted in Epilog order.
1686         //
1687         // 6. Integer argument registers
1688         // Although we might be saving the argument registers in the prolog we don't need
1689         // to report them to the OS. (they are not expressible anyways)
1690
1691         // 5. Floating point argument registers:
1692         // Similar to Integer argument registers, no reporting
1693         //
1694
1695         // 4. Set the frame pointer
1696         // ASSUMPTION: none of the Stubs generated with this stublinker change SP value outside of epilog and prolog
1697         // when that is the case we can skip reporting setting up the frame pointer
1698
1699         // With skiping Step #4, #5 and #6 Prolog and Epilog becomes reversible. so they can share the unwind codes
1700         int epilogUnwindCodeIndex = 0;
1701
1702         unsigned cStackFrameSizeInQWORDs = GetStackFrameSize()/16;
1703         // 3. Store FP/LR
1704         // save_fplr
1705         *pUnwindCodes++ = (BYTE)(0x40 | (m_cbStackSpace>>3));
1706
1707         // 2. Callee-saved registers
1708         //
1709         if (m_cCalleeSavedRegs > 0)
1710         {
1711             unsigned offset = 2 + m_cbStackSpace/8; // 2 is for fp,lr
1712             if ((m_cCalleeSavedRegs %2) ==1)
1713             {
1714                 // save_reg
1715                 *pUnwindCodes++ = (BYTE) (0xD0 | ((m_cCalleeSavedRegs-1)>>2));
1716                 *pUnwindCodes++ = (BYTE) ((BYTE)((m_cCalleeSavedRegs-1) << 6) | ((offset + m_cCalleeSavedRegs - 1) & 0x3F));
1717             }
1718             for (int i=(m_cCalleeSavedRegs/2)*2-2; i>=0; i-=2)
1719             {
1720                 if (i!=0)
1721                 {
1722                     // save_next
1723                     *pUnwindCodes++ = 0xE6;
1724                 }
1725                 else
1726                 {
1727                     // save_regp
1728                     *pUnwindCodes++ = 0xC8;
1729                     *pUnwindCodes++ = (BYTE)(offset & 0x3F);
1730                 }
1731             }
1732         }
1733
1734         // 1. SP Relocation
1735         //
1736         // EmitProlog is supposed to reject frames larger than 504 bytes.
1737         // Assert that here.
1738         _ASSERTE(cStackFrameSizeInQWORDs <= 0x3F);
1739         if (cStackFrameSizeInQWORDs <= 0x1F)
1740         {
1741             // alloc_s
1742             *pUnwindCodes++ = (BYTE)(cStackFrameSizeInQWORDs);
1743         }
1744         else
1745         {
1746             // alloc_m
1747             *pUnwindCodes++ = (BYTE)(0xC0 | (cStackFrameSizeInQWORDs >> 8));
1748             *pUnwindCodes++ = (BYTE)(cStackFrameSizeInQWORDs);
1749         }
1750
1751         // End
1752         *pUnwindCodes++ = 0xE4;
1753
1754         // Number of 32-bit unwind codes
1755         int codeWordsCount = (int)(ALIGN_UP((size_t)pUnwindCodes, sizeof(DWORD)) - (size_t)pUnwindInfo - sizeof(DWORD))/4;
1756
1757         //Check that MAX_UNWIND_CODE_WORDS is sufficient to store all unwind Codes
1758         _ASSERTE(codeWordsCount <= MAX_UNWIND_CODE_WORDS);
1759
1760         *(DWORD *)pUnwindInfo =
1761             ((functionLength) / 4) |
1762             (1 << 21) |     // E bit
1763             (epilogUnwindCodeIndex << 22)|
1764             (codeWordsCount << 27);
1765     } // end else (!m_fProlog)
1766 #else
1767     PORTABILITY_ASSERT("StubLinker::EmitUnwindInfo");
1768     T_RUNTIME_FUNCTION *pCurFunction = NULL;
1769 #endif
1770
1771     //
1772     // Get a StubUnwindInfoHeapSegment for this base address
1773     //
1774
1775     CrstHolder crst(&g_StubUnwindInfoHeapSegmentsCrst);
1776
1777     StubUnwindInfoHeapSegment *pStubHeapSegment;
1778     StubUnwindInfoHeapSegment **ppPrevStubHeapSegment;
1779     for (ppPrevStubHeapSegment = &g_StubHeapSegments;
1780          (pStubHeapSegment = *ppPrevStubHeapSegment);
1781          (ppPrevStubHeapSegment = &pStubHeapSegment->pNext))
1782     {
1783         if (pbBaseAddress < pStubHeapSegment->pbBaseAddress)
1784         {
1785             // The list is ordered, so address is between segments
1786             pStubHeapSegment = NULL;
1787             break;
1788         }
1789
1790         if (pbBaseAddress == pStubHeapSegment->pbBaseAddress)
1791         {
1792             // Found an existing segment
1793             break;
1794         }
1795     }
1796
1797     if (!pStubHeapSegment)
1798     {
1799         //
1800         // RtlInstallFunctionTableCallback will only accept a ULONG for the
1801         // region size.  We've already checked above that the RUNTIME_FUNCTION
1802         // offsets will work relative to pbBaseAddress.
1803         //
1804
1805         SIZE_T cbSegment = findBlockArgs.cbBlockSize;
1806
1807         if (cbSegment > MaxSegmentSize)
1808             cbSegment = MaxSegmentSize;
1809
1810         NewHolder<StubUnwindInfoHeapSegment> pNewStubHeapSegment = new StubUnwindInfoHeapSegment();
1811
1812
1813         pNewStubHeapSegment->pbBaseAddress = pbBaseAddress;
1814         pNewStubHeapSegment->cbSegment = cbSegment;
1815         pNewStubHeapSegment->pUnwindHeaderList = NULL;
1816 #ifdef TARGET_AMD64
1817         pNewStubHeapSegment->pUnwindInfoTable = NULL;
1818 #endif
1819
1820         // Insert the new stub into list
1821         pNewStubHeapSegment->pNext = *ppPrevStubHeapSegment;
1822         *ppPrevStubHeapSegment = pNewStubHeapSegment;
1823         pNewStubHeapSegment.SuppressRelease();
1824
1825         // Use new segment for the stub
1826         pStubHeapSegment = pNewStubHeapSegment;
1827
1828         InstallEEFunctionTable(
1829                 pNewStubHeapSegment,
1830                 pbBaseAddress,
1831                 (ULONG)cbSegment,
1832                 &FindStubFunctionEntry,
1833                 pNewStubHeapSegment,
1834                 DYNFNTABLE_STUB);
1835     }
1836
1837     //
1838     // Link the new stub into the segment.
1839     //
1840
1841     pHeader->pNext = pStubHeapSegment->pUnwindHeaderList;
1842                      pStubHeapSegment->pUnwindHeaderList = pHeader;
1843
1844 #ifdef TARGET_AMD64
1845     // Publish Unwind info to ETW stack crawler
1846     UnwindInfoTable::AddToUnwindInfoTable(
1847         &pStubHeapSegment->pUnwindInfoTable, pCurFunction,
1848         (TADDR) pStubHeapSegment->pbBaseAddress,
1849         (TADDR) pStubHeapSegment->pbBaseAddress + pStubHeapSegment->cbSegment);
1850 #endif
1851
1852 #ifdef _DEBUG
1853     _ASSERTE(pHeader->IsRegistered());
1854     _ASSERTE(   &pHeader->FunctionEntry
1855              == FindStubFunctionEntry((ULONG64)pCode,                  EncodeDynamicFunctionTableContext(pStubHeapSegment, DYNFNTABLE_STUB)));
1856 #endif
1857
1858     return true;
1859 }
1860 #endif // STUBLINKER_GENERATES_UNWIND_INFO
1861
1862 #ifdef TARGET_ARM
1863 void StubLinker::DescribeProlog(UINT cCalleeSavedRegs, UINT cbStackFrame, BOOL fPushArgRegs)
1864 {
1865     m_fProlog = TRUE;
1866     m_cCalleeSavedRegs = cCalleeSavedRegs;
1867     m_cbStackFrame = cbStackFrame;
1868     m_fPushArgRegs = fPushArgRegs;
1869 }
1870 #elif defined(TARGET_ARM64)
1871 void StubLinker::DescribeProlog(UINT cIntRegArgs, UINT cVecRegArgs, UINT cCalleeSavedRegs, UINT cbStackSpace)
1872 {
1873     m_fProlog               = TRUE;
1874     m_cIntRegArgs           = cIntRegArgs;
1875     m_cVecRegArgs           = cVecRegArgs;
1876     m_cCalleeSavedRegs      = cCalleeSavedRegs;
1877     m_cbStackSpace          = cbStackSpace;
1878 }
1879
1880 UINT StubLinker::GetSavedRegArgsOffset()
1881 {
1882     _ASSERTE(m_fProlog);
1883     // This is the offset from SP
1884     // We're assuming that the stublinker will push the arg registers to the bottom of the stack frame
1885     return m_cbStackSpace +  (2+ m_cCalleeSavedRegs)*sizeof(void*); // 2 is for FP and LR
1886 }
1887
1888 UINT StubLinker::GetStackFrameSize()
1889 {
1890     _ASSERTE(m_fProlog);
1891     return m_cbStackSpace + (2 + m_cCalleeSavedRegs + m_cIntRegArgs + m_cVecRegArgs)*sizeof(void*);
1892 }
1893
1894 #endif // ifdef TARGET_ARM, elif defined(TARGET_ARM64)
1895
1896 #endif // #ifndef DACCESS_COMPILE
1897
1898 #ifndef DACCESS_COMPILE
1899
1900 // Redeclaring the Stub type here and assert its size.
1901 // The size assertion is done here because of where CODE_SIZE_ALIGN
1902 // is defined - it is not included in all places where stublink.h
1903 // is consumed.
1904 class Stub;
1905 static_assert_no_msg((sizeof(Stub) % CODE_SIZE_ALIGN) == 0);
1906
1907 //-------------------------------------------------------------------
1908 // Inc the refcount.
1909 //-------------------------------------------------------------------
1910 VOID Stub::IncRef()
1911 {
1912     CONTRACTL
1913     {
1914         NOTHROW;
1915         GC_NOTRIGGER;
1916     }
1917     CONTRACTL_END;
1918
1919     _ASSERTE(m_signature == kUsedStub);
1920     InterlockedIncrement((LONG*)&m_refcount);
1921 }
1922
1923 //-------------------------------------------------------------------
1924 // Dec the refcount.
1925 //-------------------------------------------------------------------
1926 BOOL Stub::DecRef()
1927 {
1928     CONTRACTL
1929     {
1930         NOTHROW;
1931         GC_TRIGGERS;
1932     }
1933     CONTRACTL_END;
1934
1935     _ASSERTE(m_signature == kUsedStub);
1936     int count = InterlockedDecrement((LONG*)&m_refcount);
1937     if (count <= 0) {
1938         DeleteStub();
1939         return TRUE;
1940     }
1941     return FALSE;
1942 }
1943
1944 VOID Stub::DeleteStub()
1945 {
1946     CONTRACTL
1947     {
1948         NOTHROW;
1949         GC_TRIGGERS;
1950     }
1951     CONTRACTL_END;
1952
1953 #ifdef STUBLINKER_GENERATES_UNWIND_INFO
1954     if (HasUnwindInfo())
1955     {
1956         StubUnwindInfoHeader *pHeader = GetUnwindInfoHeader();
1957
1958         //
1959         // Check if the stub has been linked into a StubUnwindInfoHeapSegment.
1960         //
1961         if (pHeader->IsRegistered())
1962         {
1963             CrstHolder crst(&g_StubUnwindInfoHeapSegmentsCrst);
1964
1965             //
1966             // Find the segment containing the stub.
1967             //
1968             StubUnwindInfoHeapSegment **ppPrevSegment = &g_StubHeapSegments;
1969             StubUnwindInfoHeapSegment *pSegment = *ppPrevSegment;
1970
1971             if (pSegment)
1972             {
1973                 PBYTE pbCode = (PBYTE)GetEntryPointInternal();
1974 #ifdef TARGET_AMD64
1975                 UnwindInfoTable::RemoveFromUnwindInfoTable(&pSegment->pUnwindInfoTable,
1976                     (TADDR) pSegment->pbBaseAddress, (TADDR) pbCode);
1977 #endif
1978                 for (StubUnwindInfoHeapSegment *pNextSegment = pSegment->pNext;
1979                      pNextSegment;
1980                      ppPrevSegment = &pSegment->pNext, pSegment = pNextSegment, pNextSegment = pSegment->pNext)
1981                 {
1982                     // The segments are sorted by pbBaseAddress.
1983                     if (pbCode < pNextSegment->pbBaseAddress)
1984                         break;
1985                 }
1986             }
1987
1988             // The stub was marked as registered, so a segment should exist.
1989             _ASSERTE(pSegment);
1990
1991             if (pSegment)
1992             {
1993
1994                 //
1995                 // Find this stub's location in the segment's list.
1996                 //
1997                 StubUnwindInfoHeader *pCurHeader;
1998                 StubUnwindInfoHeader **ppPrevHeaderList;
1999                 for (ppPrevHeaderList = &pSegment->pUnwindHeaderList;
2000                      (pCurHeader = *ppPrevHeaderList);
2001                      (ppPrevHeaderList = &pCurHeader->pNext))
2002                 {
2003                     if (pHeader == pCurHeader)
2004                         break;
2005                 }
2006
2007                 // The stub was marked as registered, so we should find it in the segment's list.
2008                 _ASSERTE(pCurHeader);
2009
2010                 if (pCurHeader)
2011                 {
2012                     //
2013                     // Remove the stub from the segment's list.
2014                     //
2015                     *ppPrevHeaderList = pHeader->pNext;
2016
2017                     //
2018                     // If the segment's list is now empty, delete the segment.
2019                     //
2020                     if (!pSegment->pUnwindHeaderList)
2021                     {
2022                         DeleteEEFunctionTable(pSegment);
2023 #ifdef TARGET_AMD64
2024                         if (pSegment->pUnwindInfoTable != 0)
2025                             delete pSegment->pUnwindInfoTable;
2026 #endif
2027                         *ppPrevSegment = pSegment->pNext;
2028                         delete pSegment;
2029                     }
2030                 }
2031             }
2032         }
2033     }
2034 #endif
2035
2036     if ((m_numCodeBytesAndFlags & LOADER_HEAP_BIT) == 0)
2037     {
2038 #ifdef _DEBUG
2039         m_signature = kFreedStub;
2040         FillMemory(this+1, GetNumCodeBytes(), 0xcc);
2041 #endif
2042
2043         delete [] (BYTE*)GetAllocationBase();
2044     }
2045 }
2046
2047 TADDR Stub::GetAllocationBase()
2048 {
2049     CONTRACTL
2050     {
2051         NOTHROW;
2052         GC_NOTRIGGER;
2053         FORBID_FAULT;
2054     }
2055     CONTRACTL_END
2056
2057     TADDR info = dac_cast<TADDR>(this);
2058     SIZE_T cbPrefix = 0;
2059
2060 #ifdef STUBLINKER_GENERATES_UNWIND_INFO
2061     if (HasUnwindInfo())
2062     {
2063         StubUnwindInfoHeaderSuffix *pSuffix =
2064             PTR_StubUnwindInfoHeaderSuffix(info - cbPrefix -
2065                                            sizeof(*pSuffix));
2066
2067         cbPrefix += StubUnwindInfoHeader::ComputeAlignedSize(pSuffix->nUnwindInfoSize);
2068     }
2069 #endif // STUBLINKER_GENERATES_UNWIND_INFO
2070
2071     if (!HasExternalEntryPoint())
2072     {
2073         cbPrefix = ALIGN_UP(cbPrefix + sizeof(Stub), CODE_SIZE_ALIGN) - sizeof(Stub);
2074     }
2075
2076     return info - cbPrefix;
2077 }
2078
2079 Stub* Stub::NewStub(PTR_VOID pCode, DWORD flags)
2080 {
2081     CONTRACTL
2082     {
2083         THROWS;
2084         GC_NOTRIGGER;
2085     }
2086     CONTRACTL_END;
2087
2088     Stub* pStub = NewStub(NULL, 0, flags | NEWSTUB_FL_EXTERNAL);
2089
2090     // Passing NEWSTUB_FL_EXTERNAL requests the stub struct be
2091     // expanded in size by a single pointer. Insert the code point at this
2092     // location.
2093     *(PTR_VOID *)(pStub + 1) = pCode;
2094
2095     return pStub;
2096 }
2097
2098 //-------------------------------------------------------------------
2099 // Stub allocation done here.
2100 //-------------------------------------------------------------------
2101 /*static*/ Stub* Stub::NewStub(
2102         LoaderHeap *pHeap,
2103         UINT numCodeBytes,
2104         DWORD flags
2105 #ifdef STUBLINKER_GENERATES_UNWIND_INFO
2106         , UINT nUnwindInfoSize
2107 #endif
2108         )
2109 {
2110     CONTRACTL
2111     {
2112         THROWS;
2113         GC_NOTRIGGER;
2114     }
2115     CONTRACTL_END;
2116
2117     // The memory layout of the allocated memory for the Stub instance is as follows:
2118     //  Offset:
2119     //  - 0
2120     //      optional: unwind info - see nUnwindInfoSize usage.
2121     //  - stubPayloadOffset
2122     //      Stub instance
2123     //      optional: external pointer | padding + code
2124     size_t stubPayloadOffset = 0;
2125     S_SIZE_T size = S_SIZE_T(sizeof(Stub));
2126
2127 #ifdef STUBLINKER_GENERATES_UNWIND_INFO
2128     _ASSERTE(!nUnwindInfoSize || !pHeap || pHeap->m_fPermitStubsWithUnwindInfo);
2129
2130     if (nUnwindInfoSize != 0)
2131     {
2132         // The Unwind info precedes the Stub itself.
2133         stubPayloadOffset = StubUnwindInfoHeader::ComputeAlignedSize(nUnwindInfoSize);
2134         size += stubPayloadOffset;
2135     }
2136 #endif // STUBLINKER_GENERATES_UNWIND_INFO
2137
2138     if (flags & NEWSTUB_FL_EXTERNAL)
2139     {
2140         _ASSERTE(pHeap == NULL);
2141         _ASSERTE(numCodeBytes == 0);
2142         size += sizeof(PTR_PCODE);
2143     }
2144     else
2145     {
2146         size.AlignUp(CODE_SIZE_ALIGN);
2147         size += numCodeBytes;
2148     }
2149
2150     if (size.IsOverflow())
2151         COMPlusThrowArithmetic();
2152
2153     size_t totalSize = size.Value();
2154
2155     BYTE *pBlock;
2156     if (pHeap == NULL)
2157     {
2158         pBlock = new BYTE[totalSize];
2159     }
2160     else
2161     {
2162         TaggedMemAllocPtr ptr = pHeap->AllocAlignedMem(totalSize, CODE_SIZE_ALIGN);
2163         pBlock = (BYTE*)(void*)ptr;
2164         flags |= NEWSTUB_FL_LOADERHEAP;
2165     }
2166
2167     _ASSERTE((stubPayloadOffset % CODE_SIZE_ALIGN) == 0);
2168     Stub* pStubRX = (Stub*)(pBlock + stubPayloadOffset);
2169     Stub* pStubRW;
2170     ExecutableWriterHolderNoLog<Stub> stubWriterHolder;
2171
2172     if (pHeap == NULL)
2173     {
2174         pStubRW = pStubRX;
2175     }
2176     else
2177     {
2178         stubWriterHolder.AssignExecutableWriterHolder(pStubRX, sizeof(Stub));
2179         pStubRW = stubWriterHolder.GetRW();
2180     }
2181     pStubRW->SetupStub(
2182             numCodeBytes,
2183             flags
2184 #ifdef STUBLINKER_GENERATES_UNWIND_INFO
2185             , nUnwindInfoSize
2186 #endif
2187             );
2188
2189     _ASSERTE((BYTE *)pStubRX->GetAllocationBase() == pBlock);
2190
2191     return pStubRX;
2192 }
2193
2194 void Stub::SetupStub(int numCodeBytes, DWORD flags
2195 #ifdef STUBLINKER_GENERATES_UNWIND_INFO
2196                      , UINT nUnwindInfoSize
2197 #endif
2198                      )
2199 {
2200     CONTRACTL
2201     {
2202         THROWS;
2203         GC_NOTRIGGER;
2204     }
2205     CONTRACTL_END;
2206
2207 #ifdef _DEBUG
2208     m_signature = kUsedStub;
2209 #ifdef HOST_64BIT
2210     m_pad_code_bytes1 = 0;
2211     m_pad_code_bytes2 = 0;
2212     m_pad_code_bytes3 = 0;
2213 #endif
2214 #endif
2215
2216     if (((DWORD)numCodeBytes) >= MAX_CODEBYTES)
2217         COMPlusThrowHR(COR_E_OVERFLOW);
2218
2219     m_numCodeBytesAndFlags = numCodeBytes;
2220
2221     m_refcount = 1;
2222     m_data = {};
2223
2224     if (flags != NEWSTUB_FL_NONE)
2225     {
2226         if((flags & NEWSTUB_FL_LOADERHEAP) != 0)
2227             m_numCodeBytesAndFlags |= LOADER_HEAP_BIT;
2228         if((flags & NEWSTUB_FL_MULTICAST) != 0)
2229             m_numCodeBytesAndFlags |= MULTICAST_DELEGATE_BIT;
2230         if ((flags & NEWSTUB_FL_EXTERNAL) != 0)
2231             m_numCodeBytesAndFlags |= EXTERNAL_ENTRY_BIT;
2232         if ((flags & NEWSTUB_FL_INSTANTIATING_METHOD) != 0)
2233             m_numCodeBytesAndFlags |= INSTANTIATING_STUB_BIT;
2234         if ((flags & NEWSTUB_FL_THUNK) != 0)
2235             m_numCodeBytesAndFlags |= THUNK_BIT;
2236     }
2237
2238 #ifdef STUBLINKER_GENERATES_UNWIND_INFO
2239     if (nUnwindInfoSize)
2240     {
2241         m_numCodeBytesAndFlags |= UNWIND_INFO_BIT;
2242
2243         StubUnwindInfoHeaderSuffix * pSuffix = GetUnwindInfoHeaderSuffix();
2244         pSuffix->nUnwindInfoSize = (BYTE)nUnwindInfoSize;
2245
2246         StubUnwindInfoHeader * pHeader = GetUnwindInfoHeader();
2247         pHeader->Init();
2248     }
2249 #endif
2250 }
2251
2252 //-------------------------------------------------------------------
2253 // One-time init
2254 //-------------------------------------------------------------------
2255 /*static*/ void Stub::Init()
2256 {
2257     CONTRACTL
2258     {
2259         THROWS;
2260         GC_NOTRIGGER;
2261     }
2262     CONTRACTL_END;
2263
2264 #ifdef STUBLINKER_GENERATES_UNWIND_INFO
2265     g_StubUnwindInfoHeapSegmentsCrst.Init(CrstStubUnwindInfoHeapSegments);
2266 #endif
2267 }
2268
2269 //-------------------------------------------------------------------
2270 // Constructor
2271 //-------------------------------------------------------------------
2272 ArgBasedStubCache::ArgBasedStubCache(UINT fixedSlots)
2273         : m_numFixedSlots(fixedSlots),
2274           m_crst(CrstArgBasedStubCache)
2275 {
2276     WRAPPER_NO_CONTRACT;
2277
2278     m_aStub = new Stub * [m_numFixedSlots];
2279     _ASSERTE(m_aStub != NULL);
2280
2281     for (unsigned __int32 i = 0; i < m_numFixedSlots; i++) {
2282         m_aStub[i] = NULL;
2283     }
2284     m_pSlotEntries = NULL;
2285 }
2286
2287
2288 //-------------------------------------------------------------------
2289 // Destructor
2290 //-------------------------------------------------------------------
2291 ArgBasedStubCache::~ArgBasedStubCache()
2292 {
2293     CONTRACTL
2294     {
2295         NOTHROW;
2296         GC_NOTRIGGER;
2297     }
2298     CONTRACTL_END;
2299
2300     for (unsigned __int32 i = 0; i < m_numFixedSlots; i++) {
2301         Stub *pStub = m_aStub[i];
2302         if (pStub) {
2303             pStub->DecRef();
2304         }
2305     }
2306     // a size of 0 is a signal to Nirvana to flush the entire cache
2307     // not sure if this is needed, but should have no CLR perf impact since size is 0.
2308     FlushInstructionCache(GetCurrentProcess(),0,0);
2309
2310     SlotEntry **ppSlotEntry = &m_pSlotEntries;
2311     SlotEntry *pCur;
2312     while (NULL != (pCur = *ppSlotEntry)) {
2313         Stub *pStub = pCur->m_pStub;
2314         pStub->DecRef();
2315         *ppSlotEntry = pCur->m_pNext;
2316         delete pCur;
2317     }
2318     delete [] m_aStub;
2319 }
2320
2321
2322
2323 //-------------------------------------------------------------------
2324 // Queries/retrieves a previously cached stub.
2325 //
2326 // If there is no stub corresponding to the given index,
2327 //   this function returns NULL.
2328 //
2329 // Otherwise, this function returns the stub after
2330 //   incrementing its refcount.
2331 //-------------------------------------------------------------------
2332 Stub *ArgBasedStubCache::GetStub(UINT_PTR key)
2333 {
2334     CONTRACTL
2335     {
2336         NOTHROW;
2337         GC_TRIGGERS;
2338         MODE_ANY;
2339     }
2340     CONTRACTL_END;
2341
2342     Stub *pStub;
2343
2344     CrstHolder ch(&m_crst);
2345
2346     if (key < m_numFixedSlots) {
2347         pStub = m_aStub[key];
2348     } else {
2349         pStub = NULL;
2350         for (SlotEntry *pSlotEntry = m_pSlotEntries;
2351              pSlotEntry != NULL;
2352              pSlotEntry = pSlotEntry->m_pNext) {
2353
2354             if (pSlotEntry->m_key == key) {
2355                 pStub = pSlotEntry->m_pStub;
2356                 break;
2357             }
2358         }
2359     }
2360     if (pStub) {
2361         ExecutableWriterHolder<Stub> stubWriterHolder(pStub, sizeof(Stub));
2362         stubWriterHolder.GetRW()->IncRef();
2363     }
2364     return pStub;
2365 }
2366
2367
2368 //-------------------------------------------------------------------
2369 // Tries to associate a stub with a given index. This association
2370 // may fail because some other thread may have beaten you to it
2371 // just before you make the call.
2372 //
2373 // If the association succeeds, "pStub" is installed, and it is
2374 // returned back to the caller. The stub's refcount is incremented
2375 // twice (one to reflect the cache's ownership, and one to reflect
2376 // the caller's ownership.)
2377 //
2378 // If the association fails because another stub is already installed,
2379 // then the incumbent stub is returned to the caller and its refcount
2380 // is incremented once (to reflect the caller's ownership.)
2381 //
2382 // If the association fails due to lack of memory, NULL is returned
2383 // and no one's refcount changes.
2384 //
2385 // This routine is intended to be called like this:
2386 //
2387 //    Stub *pCandidate = MakeStub();  // after this, pCandidate's rc is 1
2388 //    Stub *pWinner = cache->SetStub(idx, pCandidate);
2389 //    pCandidate->DecRef();
2390 //    pCandidate = 0xcccccccc;     // must not use pCandidate again.
2391 //    if (!pWinner) {
2392 //          OutOfMemoryError;
2393 //    }
2394 //    // If the association succeeded, pWinner's refcount is 2 and so
2395 //    // is pCandidate's (because it *is* pWinner);.
2396 //    // If the association failed, pWinner's refcount is still 2
2397 //    // and pCandidate got destroyed by the last DecRef().
2398 //    // Either way, pWinner is now the official index holder. It
2399 //    // has a refcount of 2 (one for the cache's ownership, and
2400 //    // one belonging to this code.)
2401 //-------------------------------------------------------------------
2402 Stub* ArgBasedStubCache::AttemptToSetStub(UINT_PTR key, Stub *pStub)
2403 {
2404     CONTRACTL
2405     {
2406         THROWS;
2407         GC_TRIGGERS;
2408         MODE_ANY;
2409     }
2410     CONTRACTL_END;
2411
2412     CrstHolder ch(&m_crst);
2413
2414     bool incRefForCache = false;
2415
2416     if (key < m_numFixedSlots) {
2417         if (m_aStub[key]) {
2418             pStub = m_aStub[key];
2419         } else {
2420             m_aStub[key] = pStub;
2421             incRefForCache = true;
2422         }
2423     } else {
2424         SlotEntry *pSlotEntry;
2425         for (pSlotEntry = m_pSlotEntries;
2426              pSlotEntry != NULL;
2427              pSlotEntry = pSlotEntry->m_pNext) {
2428
2429             if (pSlotEntry->m_key == key) {
2430                 pStub = pSlotEntry->m_pStub;
2431                 break;
2432             }
2433         }
2434         if (!pSlotEntry) {
2435             pSlotEntry = new SlotEntry;
2436             pSlotEntry->m_pStub = pStub;
2437             incRefForCache = true;
2438             pSlotEntry->m_key = key;
2439             pSlotEntry->m_pNext = m_pSlotEntries;
2440             m_pSlotEntries = pSlotEntry;
2441         }
2442     }
2443     if (pStub) {
2444         ExecutableWriterHolder<Stub> stubWriterHolder(pStub, sizeof(Stub));
2445         if (incRefForCache)
2446         {
2447             stubWriterHolder.GetRW()->IncRef();   // IncRef on cache's behalf
2448         }
2449         stubWriterHolder.GetRW()->IncRef();  // IncRef because we're returning it to caller
2450     }
2451     return pStub;
2452 }
2453
2454
2455
2456 #ifdef _DEBUG
2457 // Diagnostic dump
2458 VOID ArgBasedStubCache::Dump()
2459 {
2460     CONTRACTL
2461     {
2462         NOTHROW;
2463         GC_NOTRIGGER;
2464         MODE_ANY;
2465     }
2466     CONTRACTL_END;
2467
2468     printf("--------------------------------------------------------------\n");
2469     printf("ArgBasedStubCache dump (%u fixed entries):\n", m_numFixedSlots);
2470     for (UINT32 i = 0; i < m_numFixedSlots; i++) {
2471
2472         printf("  Fixed slot %u: ", (ULONG)i);
2473         Stub *pStub = m_aStub[i];
2474         if (!pStub) {
2475             printf("empty\n");
2476         } else {
2477             printf("%zxh   - refcount is %u\n",
2478                    (size_t)(pStub->GetEntryPoint()),
2479                    (ULONG)( *( ( ((ULONG*)(pStub->GetEntryPoint())) - 1))));
2480         }
2481     }
2482
2483     for (SlotEntry *pSlotEntry = m_pSlotEntries;
2484          pSlotEntry != NULL;
2485          pSlotEntry = pSlotEntry->m_pNext) {
2486
2487         printf("  Dyna. slot %u: ", (ULONG)(pSlotEntry->m_key));
2488         Stub *pStub = pSlotEntry->m_pStub;
2489         printf("%zxh   - refcount is %u\n",
2490                (size_t)(pStub->GetEntryPoint()),
2491                (ULONG)( *( ( ((ULONG*)(pStub->GetEntryPoint())) - 1))));
2492
2493     }
2494
2495
2496     printf("--------------------------------------------------------------\n");
2497 }
2498 #endif
2499
2500 #endif // #ifndef DACCESS_COMPILE
2501