1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
8 #include "jitinterface.h"
15 #include "virtualcallstub.h"
21 #include "float.h" // for isnan
22 #include "dbginterface.h"
23 #include "dllimport.h"
24 #include "gcheaputilities.h"
25 #include "comdelegate.h"
26 #include "jitperf.h" // to track jit perf
28 #include "eeprofinterfaces.h"
31 // Included for referencing __report_gsfailure
33 #endif // !FEATURE_PAL
35 #ifdef PROFILING_SUPPORTED
36 #include "proftoeeinterfaceimpl.h"
40 #include "typestring.h"
42 #include "genericdict.h"
44 #include "debuginfostore.h"
46 #include "threadstatics.h"
54 #endif // HAVE_GCCOVER
56 #include "runtimehandles.h"
58 //========================================================================
60 // This file contains implementation of all JIT helpers. The helpers are
61 // divided into following categories:
63 // INTEGER ARITHMETIC HELPERS
64 // FLOATING POINT HELPERS
65 // INSTANCE FIELD HELPERS
66 // STATIC FIELD HELPERS
67 // SHARED STATIC FIELD HELPERS
72 // VALUETYPE/BYREF HELPERS
76 // DEBUGGER/PROFILER HELPERS
80 //========================================================================
84 //========================================================================
86 // INTEGER ARITHMETIC HELPERS
88 //========================================================================
90 #include <optsmallperfcritical.h>
93 // helper macro to multiply two 32-bit uints
95 #define Mul32x32To64(a, b) ((UINT64)((UINT32)(a)) * (UINT64)((UINT32)(b)))
98 // helper macro to get high 32-bit of 64-bit int
100 #define Hi32Bits(a) ((UINT32)((UINT64)(a) >> 32))
103 // helper macro to check whether 64-bit signed int fits into 32-bit signed (compiles into one 32-bit compare)
105 #define Is32BitSigned(a) (Hi32Bits(a) == Hi32Bits((INT64)(INT32)(a)))
108 // helper function to shift the result by 32-bits
110 inline UINT64 ShiftToHi32Bits(UINT32 x)
112 // The shift compiles into slow multiplication by 2^32! VSWhidbey 360736
113 // return ((UINT64)x) << 32;
121 #if !defined(_TARGET_X86_) || defined(FEATURE_PAL)
122 /*********************************************************************/
123 HCIMPL2_VV(INT64, JIT_LMul, INT64 val1, INT64 val2)
127 UINT32 val1High = Hi32Bits(val1);
128 UINT32 val2High = Hi32Bits(val2);
130 if ((val1High == 0) && (val2High == 0))
131 return Mul32x32To64(val1, val2);
133 return (val1 * val2);
136 #endif // !_TARGET_X86_ || FEATURE_PAL
138 /*********************************************************************/
139 HCIMPL2_VV(INT64, JIT_LMulOvf, INT64 val1, INT64 val2)
143 // This short-cut does not actually help since the multiplication
144 // of two 32-bit signed ints compiles into the call to a slow helper
145 // if (Is32BitSigned(val1) && Is32BitSigned(val2))
146 // return (INT64)(INT32)val1 * (INT64)(INT32)val2;
148 INDEBUG(INT64 expected = val1 * val2;)
151 // Remember the sign of the result
152 INT32 sign = Hi32Bits(val1) ^ Hi32Bits(val2);
154 // Convert to unsigned multiplication
155 if (val1 < 0) val1 = -val1;
156 if (val2 < 0) val2 = -val2;
158 // Get the upper 32 bits of the numbers
159 UINT32 val1High = Hi32Bits(val1);
160 UINT32 val2High = Hi32Bits(val2);
165 // Compute the 'middle' bits of the long multiplication
166 valMid = Mul32x32To64(val2High, val1);
171 // Compute the 'middle' bits of the long multiplication
172 valMid = Mul32x32To64(val1High, val2);
175 // See if any bits after bit 32 are set
176 if (Hi32Bits(valMid) != 0)
179 ret = Mul32x32To64(val1, val2) + ShiftToHi32Bits((UINT32)(valMid));
181 // check for overflow
182 if (Hi32Bits(ret) < (UINT32)valMid)
186 // have we spilled into the sign bit?
192 // have we spilled into the sign bit?
196 _ASSERTE(ret == expected);
200 FCThrow(kOverflowException);
204 /*********************************************************************/
205 HCIMPL2_VV(UINT64, JIT_ULMulOvf, UINT64 val1, UINT64 val2)
209 INDEBUG(UINT64 expected = val1 * val2;)
212 // Get the upper 32 bits of the numbers
213 UINT32 val1High = Hi32Bits(val1);
214 UINT32 val2High = Hi32Bits(val2);
220 return Mul32x32To64(val1, val2);
221 // Compute the 'middle' bits of the long multiplication
222 valMid = Mul32x32To64(val2High, val1);
227 // Compute the 'middle' bits of the long multiplication
228 valMid = Mul32x32To64(val1High, val2);
231 // See if any bits after bit 32 are set
232 if (Hi32Bits(valMid) != 0)
235 ret = Mul32x32To64(val1, val2) + ShiftToHi32Bits((UINT32)(valMid));
237 // check for overflow
238 if (Hi32Bits(ret) < (UINT32)valMid)
241 _ASSERTE(ret == expected);
245 FCThrow(kOverflowException);
249 /*********************************************************************/
250 HCIMPL2(INT32, JIT_Div, INT32 dividend, INT32 divisor)
254 RuntimeExceptionKind ehKind;
256 if (((UINT32) (divisor + 1)) <= 1) // Unsigned test for divisor in [-1 .. 0]
260 ehKind = kDivideByZeroException;
263 else if (divisor == -1)
265 if (dividend == _I32_MIN)
267 ehKind = kOverflowException;
274 return(dividend / divisor);
281 /*********************************************************************/
282 HCIMPL2(INT32, JIT_Mod, INT32 dividend, INT32 divisor)
286 RuntimeExceptionKind ehKind;
288 if (((UINT32) (divisor + 1)) <= 1) // Unsigned test for divisor in [-1 .. 0]
292 ehKind = kDivideByZeroException;
295 else if (divisor == -1)
297 if (dividend == _I32_MIN)
299 ehKind = kOverflowException;
306 return(dividend % divisor);
313 /*********************************************************************/
314 HCIMPL2(UINT32, JIT_UDiv, UINT32 dividend, UINT32 divisor)
319 FCThrow(kDivideByZeroException);
321 return(dividend / divisor);
325 /*********************************************************************/
326 HCIMPL2(UINT32, JIT_UMod, UINT32 dividend, UINT32 divisor)
331 FCThrow(kDivideByZeroException);
333 return(dividend % divisor);
337 /*********************************************************************/
338 HCIMPL2_VV(INT64, JIT_LDiv, INT64 dividend, INT64 divisor)
342 RuntimeExceptionKind ehKind;
344 if (Is32BitSigned(divisor))
346 if ((INT32)divisor == 0)
348 ehKind = kDivideByZeroException;
352 if ((INT32)divisor == -1)
354 if ((UINT64) dividend == UI64(0x8000000000000000))
356 ehKind = kOverflowException;
362 // Check for -ive or +ive numbers in the range -2**31 to 2**31
363 if (Is32BitSigned(dividend))
364 return((INT32)dividend / (INT32)divisor);
367 // For all other combinations fallback to int64 div.
368 return(dividend / divisor);
375 /*********************************************************************/
376 HCIMPL2_VV(INT64, JIT_LMod, INT64 dividend, INT64 divisor)
380 RuntimeExceptionKind ehKind;
382 if (Is32BitSigned(divisor))
384 if ((INT32)divisor == 0)
386 ehKind = kDivideByZeroException;
390 if ((INT32)divisor == -1)
392 // <TODO>TODO, we really should remove this as it lengthens the code path
393 // and the spec really says that it should not throw an exception. </TODO>
394 if ((UINT64) dividend == UI64(0x8000000000000000))
396 ehKind = kOverflowException;
402 // Check for -ive or +ive numbers in the range -2**31 to 2**31
403 if (Is32BitSigned(dividend))
404 return((INT32)dividend % (INT32)divisor);
407 // For all other combinations fallback to int64 div.
408 return(dividend % divisor);
415 /*********************************************************************/
416 HCIMPL2_VV(UINT64, JIT_ULDiv, UINT64 dividend, UINT64 divisor)
420 if (Hi32Bits(divisor) == 0)
422 if ((UINT32)(divisor) == 0)
423 FCThrow(kDivideByZeroException);
425 if (Hi32Bits(dividend) == 0)
426 return((UINT32)dividend / (UINT32)divisor);
429 return(dividend / divisor);
433 /*********************************************************************/
434 HCIMPL2_VV(UINT64, JIT_ULMod, UINT64 dividend, UINT64 divisor)
438 if (Hi32Bits(divisor) == 0)
440 if ((UINT32)(divisor) == 0)
441 FCThrow(kDivideByZeroException);
443 if (Hi32Bits(dividend) == 0)
444 return((UINT32)dividend % (UINT32)divisor);
447 return(dividend % divisor);
451 #if !defined(BIT64) && !defined(_TARGET_X86_)
452 /*********************************************************************/
453 HCIMPL2_VV(UINT64, JIT_LLsh, UINT64 num, int shift)
456 return num << (shift & 0x3F);
460 /*********************************************************************/
461 HCIMPL2_VV(INT64, JIT_LRsh, INT64 num, int shift)
464 return num >> (shift & 0x3F);
468 /*********************************************************************/
469 HCIMPL2_VV(UINT64, JIT_LRsz, UINT64 num, int shift)
472 return num >> (shift & 0x3F);
475 #endif // !BIT64 && !_TARGET_X86_
477 #include <optdefault.h>
480 //========================================================================
482 // FLOATING POINT HELPERS
484 //========================================================================
486 #include <optsmallperfcritical.h>
488 /*********************************************************************/
490 HCIMPL1_V(double, JIT_ULng2Dbl, UINT64 val)
494 double conv = (double) ((INT64) val);
496 conv += (4294967296.0 * 4294967296.0); // add 2^64
502 /*********************************************************************/
503 // needed for ARM and RyuJIT-x86
504 HCIMPL1_V(double, JIT_Lng2Dbl, INT64 val)
511 //--------------------------------------------------------------------------
512 template <class ftype>
513 ftype modftype(ftype value, ftype *iptr);
514 template <> float modftype(float value, float *iptr) { return modff(value, iptr); }
515 template <> double modftype(double value, double *iptr) { return modf(value, iptr); }
517 // round to nearest, round to even if tied
518 template <class ftype>
519 ftype BankersRound(ftype value)
521 if (value < 0.0) return -BankersRound <ftype> (-value);
524 modftype( value, &integerPart );
526 // if decimal part is exactly .5
527 if ((value -(integerPart +0.5)) == 0.0)
530 #if defined(_TARGET_ARM_) && defined(FEATURE_CORESYSTEM)
531 // @ARMTODO: On ARM when building on CoreSystem (where we link against the system CRT) an attempt to
532 // use fmod(float, float) fails to link (apparently this is converted to a reference to fmodf, which
533 // is not included in the system CRT). Use the double version instead.
534 if (fmod(double(integerPart), double(2.0)) == 0.0)
537 if (fmod(ftype(integerPart), ftype(2.0)) == 0.0)
541 // Else return the nearest even integer
542 return (ftype)_copysign(ceil(fabs(value+0.5)),
546 // Otherwise round to closest
547 return (ftype)_copysign(floor(fabs(value)+0.5),
552 /*********************************************************************/
553 // round double to nearest int (as double)
554 HCIMPL1_V(double, JIT_DoubleRound, double val)
557 return BankersRound(val);
561 /*********************************************************************/
562 // round float to nearest int (as float)
563 HCIMPL1_V(float, JIT_FloatRound, float val)
566 return BankersRound(val);
570 /*********************************************************************/
571 // Call fast Dbl2Lng conversion - used by functions below
572 FORCEINLINE INT64 FastDbl2Lng(double val)
576 return HCCALL1_V(JIT_Dbl2Lng, val);
579 return((__int64) val);
583 /*********************************************************************/
584 HCIMPL1_V(UINT32, JIT_Dbl2UIntOvf, double val)
588 // Note that this expression also works properly for val = NaN case
589 if (val > -1.0 && val < 4294967296.0)
590 return((UINT32)FastDbl2Lng(val));
592 FCThrow(kOverflowException);
596 /*********************************************************************/
597 HCIMPL1_V(UINT64, JIT_Dbl2ULng, double val)
601 const double two63 = 2147483648.0 * 4294967296.0;
604 ret = FastDbl2Lng(val);
607 // subtract 0x8000000000000000, do the convert then add it back again
608 ret = FastDbl2Lng(val - two63) + I64(0x8000000000000000);
614 /*********************************************************************/
615 HCIMPL1_V(UINT64, JIT_Dbl2ULngOvf, double val)
619 const double two64 = 4294967296.0 * 4294967296.0;
620 // Note that this expression also works properly for val = NaN case
621 if (val > -1.0 && val < two64) {
622 const double two63 = 2147483648.0 * 4294967296.0;
625 ret = FastDbl2Lng(val);
628 // subtract 0x8000000000000000, do the convert then add it back again
629 ret = FastDbl2Lng(val - two63) + I64(0x8000000000000000);
632 // since no overflow can occur, the value always has to be within 1
633 double roundTripVal = HCCALL1_V(JIT_ULng2Dbl, ret);
634 _ASSERTE(val - 1.0 <= roundTripVal && roundTripVal <= val + 1.0);
639 FCThrow(kOverflowException);
644 #if !defined(_TARGET_X86_) || defined(FEATURE_PAL)
646 HCIMPL1_V(INT64, JIT_Dbl2Lng, double val)
654 HCIMPL1_V(int, JIT_Dbl2IntOvf, double val)
658 const double two31 = 2147483648.0;
660 // Note that this expression also works properly for val = NaN case
661 if (val > -two31 - 1 && val < two31)
664 FCThrow(kOverflowException);
668 HCIMPL1_V(INT64, JIT_Dbl2LngOvf, double val)
672 const double two63 = 2147483648.0 * 4294967296.0;
674 // Note that this expression also works properly for val = NaN case
675 // We need to compare with the very next double to two63. 0x402 is epsilon to get us there.
676 if (val > -two63 - 0x402 && val < two63)
679 FCThrow(kOverflowException);
683 HCIMPL2_VV(float, JIT_FltRem, float dividend, float divisor)
688 // From the ECMA standard:
690 // If [divisor] is zero or [dividend] is infinity
691 // the result is NaN.
692 // If [divisor] is infinity,
693 // the result is [dividend] (negated for -infinity***).
695 // ***"negated for -infinity" has been removed from the spec
698 if (divisor==0 || !_finite(dividend))
700 UINT32 NaN = CLR_NAN_32;
701 return *(float *)(&NaN);
703 else if (!_finite(divisor) && !_isnan(divisor))
709 // COMPILER BUG WITH FMODF() + /Oi, USE FMOD() INSTEAD
710 return fmodf(dividend,divisor);
712 return (float)fmod((double)dividend,(double)divisor);
717 HCIMPL2_VV(double, JIT_DblRem, double dividend, double divisor)
722 // From the ECMA standard:
724 // If [divisor] is zero or [dividend] is infinity
725 // the result is NaN.
726 // If [divisor] is infinity,
727 // the result is [dividend] (negated for -infinity***).
729 // ***"negated for -infinity" has been removed from the spec
731 if (divisor==0 || !_finite(dividend))
733 UINT64 NaN = CLR_NAN_64;
734 return *(double *)(&NaN);
736 else if (!_finite(divisor) && !_isnan(divisor))
741 return(fmod(dividend,divisor));
745 #endif // !_TARGET_X86_ || FEATURE_PAL
747 #include <optdefault.h>
750 //========================================================================
752 // INSTANCE FIELD HELPERS
754 //========================================================================
756 /*********************************************************************/
757 // Returns the address of the field in the object (This is an interior
758 // pointer and the caller has to use it appropriately). obj can be
759 // either a reference or a byref
760 HCIMPL2(void*, JIT_GetFieldAddr_Framed, Object *obj, FieldDesc* pFD)
764 PRECONDITION(CheckPointer(pFD));
767 void * fldAddr = NULL;
768 OBJECTREF objRef = ObjectToOBJECTREF(obj);
770 HELPER_METHOD_FRAME_BEGIN_RET_1(objRef);
773 COMPlusThrow(kNullReferenceException);
776 fldAddr = pFD->GetAddress(OBJECTREFToObject(objRef));
778 HELPER_METHOD_FRAME_END();
784 #include <optsmallperfcritical.h>
785 HCIMPL2(void*, JIT_GetFieldAddr, Object *obj, FieldDesc* pFD)
789 PRECONDITION(CheckPointer(pFD));
792 if (obj == NULL || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
795 return HCCALL2(JIT_GetFieldAddr_Framed, obj, pFD);
798 return pFD->GetAddressGuaranteedInHeap(obj);
801 #include <optdefault.h>
803 /*********************************************************************/
804 #define HCallAssert(cache, target) // suppressed to avoid ambiguous cast errors caused by use of template
805 template <typename FIELDTYPE>
806 NOINLINE HCIMPL2(FIELDTYPE, JIT_GetField_Framed, Object *obj, FieldDesc *pFD)
813 // This is an instance field helper
814 _ASSERTE(!pFD->IsStatic());
816 OBJECTREF objRef = ObjectToOBJECTREF(obj);
818 HELPER_METHOD_FRAME_BEGIN_RET_1(objRef);
820 COMPlusThrow(kNullReferenceException);
821 pFD->GetInstanceField(objRef, &value);
822 HELPER_METHOD_POLL();
823 HELPER_METHOD_FRAME_END();
829 /*********************************************************************/
830 #include <optsmallperfcritical.h>
832 HCIMPL2(INT8, JIT_GetField8, Object *obj, FieldDesc *pFD)
836 if (obj == NULL || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
839 return HCCALL2(JIT_GetField_Framed<INT8>, obj, pFD);
842 INT8 val = VolatileLoad<INT8>((INT8*)pFD->GetAddressGuaranteedInHeap(obj));
848 HCIMPL2(INT16, JIT_GetField16, Object *obj, FieldDesc *pFD)
852 if (obj == NULL || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
855 return HCCALL2(JIT_GetField_Framed<INT16>, obj, pFD);
858 INT16 val = VolatileLoad<INT16>((INT16*)pFD->GetAddressGuaranteedInHeap(obj));
864 HCIMPL2(INT32, JIT_GetField32, Object *obj, FieldDesc *pFD)
868 if (obj == NULL || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
871 return HCCALL2(JIT_GetField_Framed<INT32>, obj, pFD);
874 INT32 val = VolatileLoad<INT32>((INT32*)pFD->GetAddressGuaranteedInHeap(obj));
880 HCIMPL2(INT64, JIT_GetField64, Object *obj, FieldDesc *pFD)
884 if (obj == NULL || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
887 return HCCALL2(JIT_GetField_Framed<INT64>, obj, pFD);
890 INT64 val = VolatileLoad<INT64>((INT64*)pFD->GetAddressGuaranteedInHeap(obj));
896 HCIMPL2(FLOAT, JIT_GetFieldFloat, Object *obj, FieldDesc *pFD)
900 if (obj == NULL || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
903 return HCCALL2(JIT_GetField_Framed<FLOAT>, obj, pFD);
907 (INT32&)val = VolatileLoad<INT32>((INT32*)pFD->GetAddressGuaranteedInHeap(obj));
913 HCIMPL2(DOUBLE, JIT_GetFieldDouble, Object *obj, FieldDesc *pFD)
917 if (obj == NULL || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
920 return HCCALL2(JIT_GetField_Framed<DOUBLE>, obj, pFD);
924 (INT64&)val = VolatileLoad<INT64>((INT64*)pFD->GetAddressGuaranteedInHeap(obj));
930 #include <optdefault.h>
932 /*********************************************************************/
933 #define HCallAssert(cache, target) // suppressed to avoid ambiguous cast errors caused by use of template
934 template <typename FIELDTYPE>
935 NOINLINE HCIMPL3(VOID, JIT_SetField_Framed, Object *obj, FieldDesc* pFD, FIELDTYPE val)
940 // This is an instance field helper
941 _ASSERTE(!pFD->IsStatic());
943 OBJECTREF objRef = ObjectToOBJECTREF(obj);
945 HELPER_METHOD_FRAME_BEGIN_1(objRef);
947 COMPlusThrow(kNullReferenceException);
948 pFD->SetInstanceField(objRef, &val);
949 HELPER_METHOD_POLL();
950 HELPER_METHOD_FRAME_END();
954 /*********************************************************************/
955 #include <optsmallperfcritical.h>
957 HCIMPL3(VOID, JIT_SetField8, Object *obj, FieldDesc *pFD, INT8 val)
961 if (obj == NULL || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
964 return HCCALL3(JIT_SetField_Framed<INT8>, obj, pFD, val);
967 VolatileStore<INT8>((INT8*)pFD->GetAddressGuaranteedInHeap(obj), val);
972 HCIMPL3(VOID, JIT_SetField16, Object *obj, FieldDesc *pFD, INT16 val)
976 if (obj == NULL || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
979 return HCCALL3(JIT_SetField_Framed<INT16>, obj, pFD, val);
982 VolatileStore<INT16>((INT16*)pFD->GetAddressGuaranteedInHeap(obj), val);
987 HCIMPL3(VOID, JIT_SetField32, Object *obj, FieldDesc *pFD, INT32 val)
991 if (obj == NULL || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
994 return HCCALL3(JIT_SetField_Framed<INT32>, obj, pFD, val);
997 VolatileStore<INT32>((INT32*)pFD->GetAddressGuaranteedInHeap(obj), val);
1002 HCIMPL3(VOID, JIT_SetField64, Object *obj, FieldDesc *pFD, INT64 val)
1006 if (obj == NULL || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
1009 return HCCALL3(JIT_SetField_Framed<INT64>, obj, pFD, val);
1012 VolatileStore<INT64>((INT64*)pFD->GetAddressGuaranteedInHeap(obj), val);
1017 HCIMPL3(VOID, JIT_SetFieldFloat, Object *obj, FieldDesc *pFD, FLOAT val)
1021 if (obj == NULL || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
1024 return HCCALL3(JIT_SetField_Framed<FLOAT>, obj, pFD, val);
1027 VolatileStore<INT32>((INT32*)pFD->GetAddressGuaranteedInHeap(obj), (INT32&)val);
1032 HCIMPL3(VOID, JIT_SetFieldDouble, Object *obj, FieldDesc *pFD, DOUBLE val)
1036 if (obj == NULL || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
1039 return HCCALL3(JIT_SetField_Framed<DOUBLE>, obj, pFD, val);
1042 VolatileStore<INT64>((INT64*)pFD->GetAddressGuaranteedInHeap(obj), (INT64&)val);
1047 #include <optdefault.h>
1049 /*********************************************************************/
1050 HCIMPL2(Object*, JIT_GetFieldObj_Framed, Object *obj, FieldDesc *pFD)
1054 PRECONDITION(!pFD->IsStatic());
1055 PRECONDITION(!pFD->IsPrimitive() && !pFD->IsByValue()); // Assert that we are called only for objects
1058 OBJECTREF objRef = ObjectToOBJECTREF(obj);
1059 OBJECTREF val = NULL;
1061 HELPER_METHOD_FRAME_BEGIN_RET_2(objRef, val); // Set up a frame
1063 COMPlusThrow(kNullReferenceException);
1064 pFD->GetInstanceField(objRef, &val);
1065 HELPER_METHOD_POLL();
1066 HELPER_METHOD_FRAME_END();
1068 return OBJECTREFToObject(val);
1072 #include <optsmallperfcritical.h>
1073 HCIMPL2(Object*, JIT_GetFieldObj, Object *obj, FieldDesc *pFD)
1077 PRECONDITION(!pFD->IsStatic());
1078 PRECONDITION(!pFD->IsPrimitive() && !pFD->IsByValue()); // Assert that we are called only for objects
1081 if (obj == NULL || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
1084 return HCCALL2(JIT_GetFieldObj_Framed, obj, pFD);
1087 void * address = pFD->GetAddressGuaranteedInHeap(obj);
1088 OBJECTREF val = ObjectToOBJECTREF(VolatileLoad((Object **)address));
1090 FC_GC_POLL_AND_RETURN_OBJREF(val);
1093 #include <optdefault.h>
1095 /*********************************************************************/
1096 HCIMPL3(VOID, JIT_SetFieldObj_Framed, Object *obj, FieldDesc *pFD, Object *value)
1100 PRECONDITION(!pFD->IsStatic());
1101 PRECONDITION(!pFD->IsPrimitive() && !pFD->IsByValue()); // Assert that we are called only for objects
1104 OBJECTREF objRef = ObjectToOBJECTREF(obj);
1105 OBJECTREF val = ObjectToOBJECTREF(value);
1107 HELPER_METHOD_FRAME_BEGIN_2(objRef, val);
1109 COMPlusThrow(kNullReferenceException);
1110 pFD->SetInstanceField(objRef, &val);
1111 HELPER_METHOD_POLL();
1112 HELPER_METHOD_FRAME_END();
1116 #include <optsmallperfcritical.h>
1117 HCIMPL3(VOID, JIT_SetFieldObj, Object *obj, FieldDesc *pFD, Object *value)
1121 PRECONDITION(!pFD->IsStatic());
1122 PRECONDITION(!pFD->IsPrimitive() && !pFD->IsByValue()); // Assert that we are called only for objects
1125 if (obj == NULL || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
1128 return HCCALL3(JIT_SetFieldObj_Framed, obj, pFD, value);
1131 void * address = pFD->GetAddressGuaranteedInHeap(obj);
1132 SetObjectReference((OBJECTREF*)address, ObjectToOBJECTREF(value));
1136 #include <optdefault.h>
1138 /*********************************************************************/
1139 HCIMPL4(VOID, JIT_GetFieldStruct_Framed, LPVOID retBuff, Object *obj, FieldDesc *pFD, MethodTable *pFieldMT)
1143 // This may be a cross context field access. Setup a frame as we will
1144 // transition to managed code later
1146 // This is an instance field helper
1147 _ASSERTE(!pFD->IsStatic());
1149 // Assert that we are not called for objects or primitive types
1150 _ASSERTE(!pFD->IsPrimitive());
1152 OBJECTREF objRef = ObjectToOBJECTREF(obj);
1154 HELPER_METHOD_FRAME_BEGIN_1(objRef); // Set up a frame
1157 COMPlusThrow(kNullReferenceException);
1159 // Try an unwrap operation in case that we are not being called
1160 // in the same context as the server.
1161 // If that is the case then GetObjectFromProxy will return
1162 // the server object.
1163 BOOL fRemoted = FALSE;
1168 void * pAddr = pFD->GetAddress(OBJECTREFToObject(objRef));
1169 CopyValueClass(retBuff, pAddr, pFieldMT);
1172 HELPER_METHOD_FRAME_END(); // Tear down the frame
1176 #include <optsmallperfcritical.h>
1177 HCIMPL4(VOID, JIT_GetFieldStruct, LPVOID retBuff, Object *obj, FieldDesc *pFD, MethodTable *pFieldMT)
1181 _ASSERTE(pFieldMT->IsValueType());
1183 if (obj == NULL || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
1186 return HCCALL4(JIT_GetFieldStruct_Framed, retBuff, obj, pFD, pFieldMT);
1189 void * pAddr = pFD->GetAddressGuaranteedInHeap(obj);
1190 CopyValueClass(retBuff, pAddr, pFieldMT);
1193 #include <optdefault.h>
1195 /*********************************************************************/
1196 HCIMPL4(VOID, JIT_SetFieldStruct_Framed, Object *obj, FieldDesc *pFD, MethodTable *pFieldMT, LPVOID valuePtr)
1200 // Assert that we are not called for objects or primitive types
1201 _ASSERTE(!pFD->IsPrimitive());
1203 OBJECTREF objRef = ObjectToOBJECTREF(obj);
1205 // This may be a cross context field access. Setup a frame as we will
1206 // transition to managed code later
1208 HELPER_METHOD_FRAME_BEGIN_1(objRef); // Set up a frame
1211 COMPlusThrow(kNullReferenceException);
1213 // Try an unwrap operation in case that we are not being called
1214 // in the same context as the server.
1215 // If that is the case then GetObjectFromProxy will return
1216 // the server object.
1217 BOOL fRemoted = FALSE;
1222 void * pAddr = pFD->GetAddress(OBJECTREFToObject(objRef));
1223 CopyValueClass(pAddr, valuePtr, pFieldMT);
1226 HELPER_METHOD_FRAME_END(); // Tear down the frame
1230 #include <optsmallperfcritical.h>
1231 HCIMPL4(VOID, JIT_SetFieldStruct, Object *obj, FieldDesc *pFD, MethodTable *pFieldMT, LPVOID valuePtr)
1235 _ASSERTE(pFieldMT->IsValueType());
1237 if (obj == NULL || g_IBCLogger.InstrEnabled() || pFD->IsEnCNew())
1240 return HCCALL4(JIT_SetFieldStruct_Framed, obj, pFD, pFieldMT, valuePtr);
1243 void * pAddr = pFD->GetAddressGuaranteedInHeap(obj);
1244 CopyValueClass(pAddr, valuePtr, pFieldMT);
1247 #include <optdefault.h>
1251 //========================================================================
1253 // STATIC FIELD HELPERS
1255 //========================================================================
1259 // Slow helper to tailcall from the fast one
1260 NOINLINE HCIMPL1(void, JIT_InitClass_Framed, MethodTable* pMT)
1264 HELPER_METHOD_FRAME_BEGIN_0();
1266 // We don't want to be calling JIT_InitClass at all for perf reasons
1267 // on the Global Class <Module> as the Class loading logic ensures that we
1268 // already have initialized the Gloabl Class <Module>
1269 CONSISTENCY_CHECK(!pMT->IsGlobalClass());
1271 pMT->CheckRestore();
1272 pMT->CheckRunClassInitThrowing();
1274 HELPER_METHOD_FRAME_END();
1279 /*************************************************************/
1280 #include <optsmallperfcritical.h>
1281 HCIMPL1(void, JIT_InitClass, CORINFO_CLASS_HANDLE typeHnd_)
1285 TypeHandle typeHnd(typeHnd_);
1286 MethodTable *pMT = typeHnd.AsMethodTable();
1287 _ASSERTE(!pMT->IsClassPreInited());
1289 if (pMT->GetDomainLocalModule()->IsClassInitialized(pMT))
1292 // Tailcall to the slow helper
1294 HCCALL1(JIT_InitClass_Framed, pMT);
1297 #include <optdefault.h>
1299 /*************************************************************/
1300 HCIMPL2(void, JIT_InitInstantiatedClass, CORINFO_CLASS_HANDLE typeHnd_, CORINFO_METHOD_HANDLE methHnd_)
1304 PRECONDITION(methHnd_ != NULL);
1307 HELPER_METHOD_FRAME_BEGIN_NOPOLL(); // Set up a frame
1309 MethodTable * pMT = (MethodTable*) typeHnd_;
1310 MethodDesc * pMD = (MethodDesc*) methHnd_;
1312 MethodTable * pTemplateMT = pMD->GetMethodTable();
1313 if (pTemplateMT->IsSharedByGenericInstantiations())
1315 pMT = ClassLoader::LoadGenericInstantiationThrowing(pTemplateMT->GetModule(),
1316 pTemplateMT->GetCl(),
1317 pMD->GetExactClassInstantiation(pMT)).AsMethodTable();
1324 pMT->CheckRestore();
1325 pMT->EnsureInstanceActive();
1326 pMT->CheckRunClassInitThrowing();
1327 HELPER_METHOD_FRAME_END();
1332 //========================================================================
1334 // SHARED STATIC FIELD HELPERS
1336 //========================================================================
1338 #include <optsmallperfcritical.h>
1340 HCIMPL2(void*, JIT_GetSharedNonGCStaticBase_Portable, SIZE_T moduleDomainID, DWORD dwClassDomainID)
1344 DomainLocalModule *pLocalModule = NULL;
1346 if (!Module::IsEncodedModuleIndex(moduleDomainID))
1347 pLocalModule = (DomainLocalModule *) moduleDomainID;
1350 DomainLocalBlock *pLocalBlock = GetAppDomain()->GetDomainLocalBlock();
1351 pLocalModule = pLocalBlock->GetModuleSlot(Module::IDToIndex(moduleDomainID));
1354 // If type doesn't have a class constructor, the contents of this if statement may
1355 // still get executed. JIT_GetSharedNonGCStaticBaseNoCtor should be used in this case.
1356 if (pLocalModule->IsPrecomputedClassInitialized(dwClassDomainID))
1358 return (void*)pLocalModule->GetPrecomputedNonGCStaticsBasePointer();
1361 // Tailcall to the slow helper
1363 return HCCALL2(JIT_GetSharedNonGCStaticBase_Helper, pLocalModule, dwClassDomainID);
1367 // No constructor version of JIT_GetSharedNonGCStaticBase. Does not check if class has
1368 // been initialized.
1369 HCIMPL1(void*, JIT_GetSharedNonGCStaticBaseNoCtor_Portable, SIZE_T moduleDomainID)
1373 DomainLocalModule *pLocalModule = NULL;
1375 if (!Module::IsEncodedModuleIndex(moduleDomainID))
1376 pLocalModule = (DomainLocalModule *) moduleDomainID;
1379 DomainLocalBlock *pLocalBlock = GetAppDomain()->GetDomainLocalBlock();
1380 pLocalModule = pLocalBlock->GetModuleSlot(Module::IDToIndex(moduleDomainID));
1383 return (void*)pLocalModule->GetPrecomputedNonGCStaticsBasePointer();
1387 HCIMPL2(void*, JIT_GetSharedGCStaticBase_Portable, SIZE_T moduleDomainID, DWORD dwClassDomainID)
1391 DomainLocalModule *pLocalModule = NULL;
1393 if (!Module::IsEncodedModuleIndex(moduleDomainID))
1394 pLocalModule = (DomainLocalModule *) moduleDomainID;
1397 DomainLocalBlock *pLocalBlock = GetAppDomain()->GetDomainLocalBlock();
1398 pLocalModule = pLocalBlock->GetModuleSlot(Module::IDToIndex(moduleDomainID));
1401 // If type doesn't have a class constructor, the contents of this if statement may
1402 // still get executed. JIT_GetSharedGCStaticBaseNoCtor should be used in this case.
1403 if (pLocalModule->IsPrecomputedClassInitialized(dwClassDomainID))
1405 return (void*)pLocalModule->GetPrecomputedGCStaticsBasePointer();
1408 // Tailcall to the slow helper
1410 return HCCALL2(JIT_GetSharedGCStaticBase_Helper, pLocalModule, dwClassDomainID);
1414 // No constructor version of JIT_GetSharedGCStaticBase. Does not check if class has been
1416 HCIMPL1(void*, JIT_GetSharedGCStaticBaseNoCtor_Portable, SIZE_T moduleDomainID)
1420 DomainLocalModule *pLocalModule = NULL;
1422 if (!Module::IsEncodedModuleIndex(moduleDomainID))
1423 pLocalModule = (DomainLocalModule *) moduleDomainID;
1426 DomainLocalBlock *pLocalBlock = GetAppDomain()->GetDomainLocalBlock();
1427 pLocalModule = pLocalBlock->GetModuleSlot(Module::IDToIndex(moduleDomainID));
1430 return (void*)pLocalModule->GetPrecomputedGCStaticsBasePointer();
1434 #include <optdefault.h>
1437 // The following two functions can be tail called from platform dependent versions of
1438 // JIT_GetSharedGCStaticBase and JIT_GetShareNonGCStaticBase
1439 HCIMPL2(void*, JIT_GetSharedNonGCStaticBase_Helper, DomainLocalModule *pLocalModule, DWORD dwClassDomainID)
1443 HELPER_METHOD_FRAME_BEGIN_RET_0();
1445 // Obtain Method table
1446 MethodTable * pMT = pLocalModule->GetMethodTableFromClassDomainID(dwClassDomainID);
1448 PREFIX_ASSUME(pMT != NULL);
1449 pMT->CheckRunClassInitThrowing();
1450 HELPER_METHOD_FRAME_END();
1452 return (void*)pLocalModule->GetPrecomputedNonGCStaticsBasePointer();
1456 HCIMPL2(void*, JIT_GetSharedGCStaticBase_Helper, DomainLocalModule *pLocalModule, DWORD dwClassDomainID)
1460 HELPER_METHOD_FRAME_BEGIN_RET_0();
1462 // Obtain Method table
1463 MethodTable * pMT = pLocalModule->GetMethodTableFromClassDomainID(dwClassDomainID);
1465 PREFIX_ASSUME(pMT != NULL);
1466 pMT->CheckRunClassInitThrowing();
1467 HELPER_METHOD_FRAME_END();
1469 return (void*)pLocalModule->GetPrecomputedGCStaticsBasePointer();
1473 /*********************************************************************/
1474 // Slow helper to tail call from the fast one
1475 HCIMPL2(void*, JIT_GetSharedNonGCStaticBaseDynamicClass_Helper, DomainLocalModule *pLocalModule, DWORD dwDynamicClassDomainID)
1479 void* result = NULL;
1481 HELPER_METHOD_FRAME_BEGIN_RET_0();
1483 MethodTable *pMT = pLocalModule->GetDomainFile()->GetModule()->GetDynamicClassMT(dwDynamicClassDomainID);
1486 pMT->CheckRunClassInitThrowing();
1488 result = (void*)pLocalModule->GetDynamicEntryNonGCStaticsBasePointer(dwDynamicClassDomainID, pMT->GetLoaderAllocator());
1489 HELPER_METHOD_FRAME_END();
1495 /*************************************************************/
1496 #include <optsmallperfcritical.h>
1497 HCIMPL2(void*, JIT_GetSharedNonGCStaticBaseDynamicClass, SIZE_T moduleDomainID, DWORD dwDynamicClassDomainID)
1501 DomainLocalModule *pLocalModule;
1503 if (!Module::IsEncodedModuleIndex(moduleDomainID))
1504 pLocalModule = (DomainLocalModule *) moduleDomainID;
1507 DomainLocalBlock *pLocalBlock = GetAppDomain()->GetDomainLocalBlock();
1508 pLocalModule = pLocalBlock->GetModuleSlot(Module::IDToIndex(moduleDomainID));
1511 DomainLocalModule::PTR_DynamicClassInfo pLocalInfo = pLocalModule->GetDynamicClassInfoIfInitialized(dwDynamicClassDomainID);
1512 if (pLocalInfo != NULL)
1515 GET_DYNAMICENTRY_NONGCSTATICS_BASEPOINTER(pLocalModule->GetDomainFile()->GetModule()->GetLoaderAllocator(),
1522 // Tailcall to the slow helper
1524 return HCCALL2(JIT_GetSharedNonGCStaticBaseDynamicClass_Helper, pLocalModule, dwDynamicClassDomainID);
1527 #include <optdefault.h>
1529 /*************************************************************/
1530 // Slow helper to tail call from the fast one
1531 HCIMPL2(void, JIT_ClassInitDynamicClass_Helper, DomainLocalModule *pLocalModule, DWORD dwDynamicClassDomainID)
1535 HELPER_METHOD_FRAME_BEGIN_0();
1537 MethodTable *pMT = pLocalModule->GetDomainFile()->GetModule()->GetDynamicClassMT(dwDynamicClassDomainID);
1540 pMT->CheckRunClassInitThrowing();
1542 HELPER_METHOD_FRAME_END();
1548 #include <optsmallperfcritical.h>
1549 HCIMPL2(void, JIT_ClassInitDynamicClass, SIZE_T moduleDomainID, DWORD dwDynamicClassDomainID)
1553 DomainLocalModule *pLocalModule;
1555 if (!Module::IsEncodedModuleIndex(moduleDomainID))
1556 pLocalModule = (DomainLocalModule *) moduleDomainID;
1559 DomainLocalBlock *pLocalBlock = GetAppDomain()->GetDomainLocalBlock();
1560 pLocalModule = pLocalBlock->GetModuleSlot(Module::IDToIndex(moduleDomainID));
1563 DomainLocalModule::PTR_DynamicClassInfo pLocalInfo = pLocalModule->GetDynamicClassInfoIfInitialized(dwDynamicClassDomainID);
1564 if (pLocalInfo != NULL)
1569 // Tailcall to the slow helper
1571 return HCCALL2(JIT_ClassInitDynamicClass_Helper, pLocalModule, dwDynamicClassDomainID);
1574 #include <optdefault.h>
1576 /*************************************************************/
1577 // Slow helper to tail call from the fast one
1578 HCIMPL2(void*, JIT_GetSharedGCStaticBaseDynamicClass_Helper, DomainLocalModule *pLocalModule, DWORD dwDynamicClassDomainID)
1582 void* result = NULL;
1584 HELPER_METHOD_FRAME_BEGIN_RET_0();
1586 MethodTable *pMT = pLocalModule->GetDomainFile()->GetModule()->GetDynamicClassMT(dwDynamicClassDomainID);
1589 pMT->CheckRunClassInitThrowing();
1591 result = (void*)pLocalModule->GetDynamicEntryGCStaticsBasePointer(dwDynamicClassDomainID, pMT->GetLoaderAllocator());
1592 HELPER_METHOD_FRAME_END();
1598 /*************************************************************/
1599 #include <optsmallperfcritical.h>
1600 HCIMPL2(void*, JIT_GetSharedGCStaticBaseDynamicClass, SIZE_T moduleDomainID, DWORD dwDynamicClassDomainID)
1604 DomainLocalModule *pLocalModule;
1606 if (!Module::IsEncodedModuleIndex(moduleDomainID))
1607 pLocalModule = (DomainLocalModule *) moduleDomainID;
1610 DomainLocalBlock *pLocalBlock = GetAppDomain()->GetDomainLocalBlock();
1611 pLocalModule = pLocalBlock->GetModuleSlot(Module::IDToIndex(moduleDomainID));
1614 DomainLocalModule::PTR_DynamicClassInfo pLocalInfo = pLocalModule->GetDynamicClassInfoIfInitialized(dwDynamicClassDomainID);
1615 if (pLocalInfo != NULL)
1618 GET_DYNAMICENTRY_GCSTATICS_BASEPOINTER(pLocalModule->GetDomainFile()->GetModule()->GetLoaderAllocator(),
1625 // Tailcall to the slow helper
1627 return HCCALL2(JIT_GetSharedGCStaticBaseDynamicClass_Helper, pLocalModule, dwDynamicClassDomainID);
1630 #include <optdefault.h>
1632 /*********************************************************************/
1633 // Slow helper to tail call from the fast one
1634 NOINLINE HCIMPL1(void*, JIT_GetGenericsGCStaticBase_Framed, MethodTable *pMT)
1638 PRECONDITION(CheckPointer(pMT));
1639 PRECONDITION(pMT->HasGenericsStaticsInfo());
1644 HELPER_METHOD_FRAME_BEGIN_RET_0();
1646 pMT->CheckRestore();
1648 pMT->CheckRunClassInitThrowing();
1650 base = (void*) pMT->GetGCStaticsBasePointer();
1651 CONSISTENCY_CHECK(base != NULL);
1653 HELPER_METHOD_FRAME_END();
1659 /*********************************************************************/
1660 #include <optsmallperfcritical.h>
1661 HCIMPL1(void*, JIT_GetGenericsGCStaticBase, MethodTable *pMT)
1665 PRECONDITION(CheckPointer(pMT));
1666 PRECONDITION(pMT->HasGenericsStaticsInfo());
1669 DWORD dwDynamicClassDomainID;
1670 PTR_Module pModuleForStatics = pMT->GetGenericsStaticsModuleAndID(&dwDynamicClassDomainID);
1672 DomainLocalModule *pLocalModule = pModuleForStatics->GetDomainLocalModule();
1673 _ASSERTE(pLocalModule);
1675 DomainLocalModule::PTR_DynamicClassInfo pLocalInfo = pLocalModule->GetDynamicClassInfoIfInitialized(dwDynamicClassDomainID);
1676 if (pLocalInfo != NULL)
1679 GET_DYNAMICENTRY_GCSTATICS_BASEPOINTER(pMT->GetLoaderAllocator(),
1686 // Tailcall to the slow helper
1688 return HCCALL1(JIT_GetGenericsGCStaticBase_Framed, pMT);
1691 #include <optdefault.h>
1693 /*********************************************************************/
1694 // Slow helper to tail call from the fast one
1695 NOINLINE HCIMPL1(void*, JIT_GetGenericsNonGCStaticBase_Framed, MethodTable *pMT)
1699 PRECONDITION(CheckPointer(pMT));
1700 PRECONDITION(pMT->HasGenericsStaticsInfo());
1705 HELPER_METHOD_FRAME_BEGIN_RET_0();
1707 pMT->CheckRestore();
1709 // If pMT refers to a method table that requires some initialization work,
1710 // then pMT cannot to a method table that is shared by generic instantiations,
1711 // because method tables that are shared by generic instantiations do not have
1712 // a base for statics to live in.
1713 _ASSERTE(pMT->IsClassPreInited() || !pMT->IsSharedByGenericInstantiations());
1715 pMT->CheckRunClassInitThrowing();
1717 // We could just return null here instead of returning base when this helper is called just to trigger the cctor
1718 base = (void*) pMT->GetNonGCStaticsBasePointer();
1720 HELPER_METHOD_FRAME_END();
1726 /*********************************************************************/
1727 #include <optsmallperfcritical.h>
1728 HCIMPL1(void*, JIT_GetGenericsNonGCStaticBase, MethodTable *pMT)
1732 PRECONDITION(CheckPointer(pMT));
1733 PRECONDITION(pMT->HasGenericsStaticsInfo());
1736 // This fast path will typically always be taken once the slow framed path below
1737 // has executed once. Sometimes the slow path will be executed more than once,
1738 // e.g. if static fields are accessed during the call to CheckRunClassInitThrowing()
1739 // in the slow path.
1741 DWORD dwDynamicClassDomainID;
1742 PTR_Module pModuleForStatics = pMT->GetGenericsStaticsModuleAndID(&dwDynamicClassDomainID);
1744 DomainLocalModule *pLocalModule = pModuleForStatics->GetDomainLocalModule();
1745 _ASSERTE(pLocalModule);
1747 DomainLocalModule::PTR_DynamicClassInfo pLocalInfo = pLocalModule->GetDynamicClassInfoIfInitialized(dwDynamicClassDomainID);
1748 if (pLocalInfo != NULL)
1751 GET_DYNAMICENTRY_NONGCSTATICS_BASEPOINTER(pMT->GetLoaderAllocator(),
1758 // Tailcall to the slow helper
1760 return HCCALL1(JIT_GetGenericsNonGCStaticBase_Framed, pMT);
1763 #include <optdefault.h>
1766 //========================================================================
1768 // THREAD STATIC FIELD HELPERS
1770 //========================================================================
1773 // *** These framed helpers get called if allocation needs to occur or
1774 // if the class constructor needs to run
1776 HCIMPL1(void*, JIT_GetNonGCThreadStaticBase_Helper, MethodTable * pMT)
1780 PRECONDITION(CheckPointer(pMT));
1785 HELPER_METHOD_FRAME_BEGIN_RET_0();
1787 // For generics, we need to call CheckRestore() for some reason
1788 if (pMT->HasGenericsStaticsInfo())
1789 pMT->CheckRestore();
1792 ThreadLocalModule * pThreadLocalModule = ThreadStatics::GetTLM(pMT);
1793 _ASSERTE(pThreadLocalModule != NULL);
1795 // Check if the class constructor needs to be run
1796 pThreadLocalModule->CheckRunClassInitThrowing(pMT);
1798 // Lookup the non-GC statics base pointer
1799 base = (void*) pMT->GetNonGCThreadStaticsBasePointer();
1800 CONSISTENCY_CHECK(base != NULL);
1802 HELPER_METHOD_FRAME_END();
1808 HCIMPL1(void*, JIT_GetGCThreadStaticBase_Helper, MethodTable * pMT)
1812 PRECONDITION(CheckPointer(pMT));
1817 HELPER_METHOD_FRAME_BEGIN_RET_0();
1819 // For generics, we need to call CheckRestore() for some reason
1820 if (pMT->HasGenericsStaticsInfo())
1821 pMT->CheckRestore();
1824 ThreadLocalModule * pThreadLocalModule = ThreadStatics::GetTLM(pMT);
1825 _ASSERTE(pThreadLocalModule != NULL);
1827 // Check if the class constructor needs to be run
1828 pThreadLocalModule->CheckRunClassInitThrowing(pMT);
1830 // Lookup the GC statics base pointer
1831 base = (void*) pMT->GetGCThreadStaticsBasePointer();
1832 CONSISTENCY_CHECK(base != NULL);
1834 HELPER_METHOD_FRAME_END();
1841 // *** This helper corresponds to both CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE and
1842 // CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_NOCTOR. Even though we always check
1843 // if the class constructor has been run, we have a separate helper ID for the "no ctor"
1844 // version because it allows the JIT to do some reordering that otherwise wouldn't be
1847 #include <optsmallperfcritical.h>
1848 HCIMPL2(void*, JIT_GetSharedNonGCThreadStaticBase, SIZE_T moduleDomainID, DWORD dwClassDomainID)
1852 // Get the ModuleIndex
1854 (Module::IsEncodedModuleIndex(moduleDomainID)) ?
1855 Module::IDToIndex(moduleDomainID) :
1856 ((DomainLocalModule *)moduleDomainID)->GetModuleIndex();
1858 // Get the relevant ThreadLocalModule
1859 ThreadLocalModule * pThreadLocalModule = ThreadStatics::GetTLMIfExists(index);
1861 // If the TLM has been allocated and the class has been marked as initialized,
1862 // get the pointer to the non-GC statics base and return
1863 if (pThreadLocalModule != NULL && pThreadLocalModule->IsPrecomputedClassInitialized(dwClassDomainID))
1864 return (void*)pThreadLocalModule->GetPrecomputedNonGCStaticsBasePointer();
1866 // If the TLM was not allocated or if the class was not marked as initialized
1867 // then we have to go through the slow path
1869 // Get the DomainLocalModule
1870 DomainLocalModule *pDomainLocalModule =
1871 (Module::IsEncodedModuleIndex(moduleDomainID)) ?
1872 GetAppDomain()->GetDomainLocalBlock()->GetModuleSlot(Module::IDToIndex(moduleDomainID)) :
1873 (DomainLocalModule *) moduleDomainID;
1875 // Obtain the MethodTable
1876 MethodTable * pMT = pDomainLocalModule->GetMethodTableFromClassDomainID(dwClassDomainID);
1877 _ASSERTE(!pMT->HasGenericsStaticsInfo());
1880 return HCCALL1(JIT_GetNonGCThreadStaticBase_Helper, pMT);
1883 #include <optdefault.h>
1885 // *** This helper corresponds to both CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE and
1886 // CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_NOCTOR. Even though we always check
1887 // if the class constructor has been run, we have a separate helper ID for the "no ctor"
1888 // version because it allows the JIT to do some reordering that otherwise wouldn't be
1891 #include <optsmallperfcritical.h>
1892 HCIMPL2(void*, JIT_GetSharedGCThreadStaticBase, SIZE_T moduleDomainID, DWORD dwClassDomainID)
1896 // Get the ModuleIndex
1898 (Module::IsEncodedModuleIndex(moduleDomainID)) ?
1899 Module::IDToIndex(moduleDomainID) :
1900 ((DomainLocalModule *)moduleDomainID)->GetModuleIndex();
1902 // Get the relevant ThreadLocalModule
1903 ThreadLocalModule * pThreadLocalModule = ThreadStatics::GetTLMIfExists(index);
1905 // If the TLM has been allocated and the class has been marked as initialized,
1906 // get the pointer to the GC statics base and return
1907 if (pThreadLocalModule != NULL && pThreadLocalModule->IsPrecomputedClassInitialized(dwClassDomainID))
1908 return (void*)pThreadLocalModule->GetPrecomputedGCStaticsBasePointer();
1910 // If the TLM was not allocated or if the class was not marked as initialized
1911 // then we have to go through the slow path
1913 // Get the DomainLocalModule
1914 DomainLocalModule *pDomainLocalModule =
1915 (Module::IsEncodedModuleIndex(moduleDomainID)) ?
1916 GetAppDomain()->GetDomainLocalBlock()->GetModuleSlot(Module::IDToIndex(moduleDomainID)) :
1917 (DomainLocalModule *) moduleDomainID;
1919 // Obtain the MethodTable
1920 MethodTable * pMT = pDomainLocalModule->GetMethodTableFromClassDomainID(dwClassDomainID);
1921 _ASSERTE(!pMT->HasGenericsStaticsInfo());
1924 return HCCALL1(JIT_GetGCThreadStaticBase_Helper, pMT);
1927 #include <optdefault.h>
1929 // *** This helper corresponds to CORINFO_HELP_GETSHARED_NONGCTHREADSTATIC_BASE_DYNAMICCLASS
1931 #include <optsmallperfcritical.h>
1932 HCIMPL2(void*, JIT_GetSharedNonGCThreadStaticBaseDynamicClass, SIZE_T moduleDomainID, DWORD dwDynamicClassDomainID)
1936 // Obtain the DomainLocalModule
1937 DomainLocalModule *pDomainLocalModule =
1938 (Module::IsEncodedModuleIndex(moduleDomainID)) ?
1939 GetAppDomain()->GetDomainLocalBlock()->GetModuleSlot(Module::IDToIndex(moduleDomainID)) :
1940 (DomainLocalModule *)moduleDomainID;
1942 // Get the ModuleIndex
1943 ModuleIndex index = pDomainLocalModule->GetModuleIndex();
1945 // Get the relevant ThreadLocalModule
1946 ThreadLocalModule * pThreadLocalModule = ThreadStatics::GetTLMIfExists(index);
1948 // If the TLM has been allocated and the class has been marked as initialized,
1949 // get the pointer to the non-GC statics base and return
1950 if (pThreadLocalModule != NULL)
1952 ThreadLocalModule::PTR_DynamicClassInfo pLocalInfo = pThreadLocalModule->GetDynamicClassInfoIfInitialized(dwDynamicClassDomainID);
1953 if (pLocalInfo != NULL)
1956 GET_DYNAMICENTRY_NONGCTHREADSTATICS_BASEPOINTER(pDomainLocalModule->GetDomainFile()->GetModule()->GetLoaderAllocator(),
1963 // If the TLM was not allocated or if the class was not marked as initialized
1964 // then we have to go through the slow path
1966 // Obtain the Module
1967 Module * pModule = pDomainLocalModule->GetDomainFile()->GetModule();
1969 // Obtain the MethodTable
1970 MethodTable * pMT = pModule->GetDynamicClassMT(dwDynamicClassDomainID);
1971 _ASSERTE(pMT != NULL);
1972 _ASSERTE(!pMT->IsSharedByGenericInstantiations());
1974 // Tailcall to the slow helper
1977 return HCCALL1(JIT_GetNonGCThreadStaticBase_Helper, pMT);
1981 #include <optdefault.h>
1983 // *** This helper corresponds to CORINFO_HELP_GETSHARED_GCTHREADSTATIC_BASE_DYNAMICCLASS
1985 #include <optsmallperfcritical.h>
1986 HCIMPL2(void*, JIT_GetSharedGCThreadStaticBaseDynamicClass, SIZE_T moduleDomainID, DWORD dwDynamicClassDomainID)
1990 // Obtain the DomainLocalModule
1991 DomainLocalModule *pDomainLocalModule =
1992 (Module::IsEncodedModuleIndex(moduleDomainID)) ?
1993 GetAppDomain()->GetDomainLocalBlock()->GetModuleSlot(Module::IDToIndex(moduleDomainID)) :
1994 (DomainLocalModule *)moduleDomainID;
1996 // Get the ModuleIndex
1997 ModuleIndex index = pDomainLocalModule->GetModuleIndex();
1999 // Get the relevant ThreadLocalModule
2000 ThreadLocalModule * pThreadLocalModule = ThreadStatics::GetTLMIfExists(index);
2002 // If the TLM has been allocated and the class has been marked as initialized,
2003 // get the pointer to the GC statics base and return
2004 if (pThreadLocalModule != NULL)
2006 ThreadLocalModule::PTR_DynamicClassInfo pLocalInfo = pThreadLocalModule->GetDynamicClassInfoIfInitialized(dwDynamicClassDomainID);
2007 if (pLocalInfo != NULL)
2010 GET_DYNAMICENTRY_GCTHREADSTATICS_BASEPOINTER(pDomainLocalModule->GetDomainFile()->GetModule()->GetLoaderAllocator(),
2018 // If the TLM was not allocated or if the class was not marked as initialized
2019 // then we have to go through the slow path
2021 // Obtain the Module
2022 Module * pModule = pDomainLocalModule->GetDomainFile()->GetModule();
2024 // Obtain the MethodTable
2025 MethodTable * pMT = pModule->GetDynamicClassMT(dwDynamicClassDomainID);
2026 _ASSERTE(pMT != NULL);
2027 _ASSERTE(!pMT->IsSharedByGenericInstantiations());
2029 // Tailcall to the slow helper
2031 return HCCALL1(JIT_GetGCThreadStaticBase_Helper, pMT);
2034 #include <optdefault.h>
2036 // *** This helper corresponds to CORINFO_HELP_GETGENERICS_NONGCTHREADSTATIC_BASE
2038 #include <optsmallperfcritical.h>
2039 HCIMPL1(void*, JIT_GetGenericsNonGCThreadStaticBase, MethodTable *pMT)
2043 PRECONDITION(CheckPointer(pMT));
2044 PRECONDITION(pMT->HasGenericsStaticsInfo());
2047 // This fast path will typically always be taken once the slow framed path below
2048 // has executed once. Sometimes the slow path will be executed more than once,
2049 // e.g. if static fields are accessed during the call to CheckRunClassInitThrowing()
2050 // in the slow path.
2052 // Get the Module and dynamic class ID
2053 DWORD dwDynamicClassDomainID;
2054 PTR_Module pModule = pMT->GetGenericsStaticsModuleAndID(&dwDynamicClassDomainID);
2057 ModuleIndex index = pModule->GetModuleIndex();
2059 // Get the relevant ThreadLocalModule
2060 ThreadLocalModule * pThreadLocalModule = ThreadStatics::GetTLMIfExists(index);
2062 // If the TLM has been allocated and the class has been marked as initialized,
2063 // get the pointer to the non-GC statics base and return
2064 if (pThreadLocalModule != NULL)
2066 ThreadLocalModule::PTR_DynamicClassInfo pLocalInfo = pThreadLocalModule->GetDynamicClassInfoIfInitialized(dwDynamicClassDomainID);
2067 if (pLocalInfo != NULL)
2070 GET_DYNAMICENTRY_NONGCSTATICS_BASEPOINTER(pMT->GetLoaderAllocator(),
2078 // If the TLM was not allocated or if the class was not marked as initialized
2079 // then we have to go through the slow path
2081 // Tailcall to the slow helper
2083 return HCCALL1(JIT_GetNonGCThreadStaticBase_Helper, pMT);
2086 #include <optdefault.h>
2088 // *** This helper corresponds to CORINFO_HELP_GETGENERICS_GCTHREADSTATIC_BASE
2090 #include <optsmallperfcritical.h>
2091 HCIMPL1(void*, JIT_GetGenericsGCThreadStaticBase, MethodTable *pMT)
2095 PRECONDITION(CheckPointer(pMT));
2096 PRECONDITION(pMT->HasGenericsStaticsInfo());
2099 // This fast path will typically always be taken once the slow framed path below
2100 // has executed once. Sometimes the slow path will be executed more than once,
2101 // e.g. if static fields are accessed during the call to CheckRunClassInitThrowing()
2102 // in the slow path.
2104 // Get the Module and dynamic class ID
2105 DWORD dwDynamicClassDomainID;
2106 PTR_Module pModule = pMT->GetGenericsStaticsModuleAndID(&dwDynamicClassDomainID);
2109 ModuleIndex index = pModule->GetModuleIndex();
2111 // Get the relevant ThreadLocalModule
2112 ThreadLocalModule * pThreadLocalModule = ThreadStatics::GetTLMIfExists(index);
2114 // If the TLM has been allocated and the class has been marked as initialized,
2115 // get the pointer to the GC statics base and return
2116 if (pThreadLocalModule != NULL)
2118 ThreadLocalModule::PTR_DynamicClassInfo pLocalInfo = pThreadLocalModule->GetDynamicClassInfoIfInitialized(dwDynamicClassDomainID);
2119 if (pLocalInfo != NULL)
2122 GET_DYNAMICENTRY_GCTHREADSTATICS_BASEPOINTER(pMT->GetLoaderAllocator(),
2130 // If the TLM was not allocated or if the class was not marked as initialized
2131 // then we have to go through the slow path
2133 // Tailcall to the slow helper
2135 return HCCALL1(JIT_GetGCThreadStaticBase_Helper, pMT);
2138 #include <optdefault.h>
2140 //========================================================================
2142 // STATIC FIELD DYNAMIC HELPERS
2144 //========================================================================
2146 #include <optsmallperfcritical.h>
2147 HCIMPL1_RAW(TADDR, JIT_StaticFieldAddress_Dynamic, StaticFieldAddressArgs * pArgs)
2151 TADDR base = HCCALL2(pArgs->staticBaseHelper, pArgs->arg0, pArgs->arg1);
2152 return base + pArgs->offset;
2155 #include <optdefault.h>
2157 #include <optsmallperfcritical.h>
2158 HCIMPL1_RAW(TADDR, JIT_StaticFieldAddressUnbox_Dynamic, StaticFieldAddressArgs * pArgs)
2162 TADDR base = HCCALL2(pArgs->staticBaseHelper, pArgs->arg0, pArgs->arg1);
2163 return *(TADDR *)(base + pArgs->offset) + Object::GetOffsetOfFirstField();
2166 #include <optdefault.h>
2168 //========================================================================
2172 //========================================================================
2174 // pObject MUST be an instance of an array.
2175 TypeHandle::CastResult ArrayIsInstanceOfNoGC(Object *pObject, TypeHandle toTypeHnd)
2181 PRECONDITION(CheckPointer(pObject));
2182 PRECONDITION(pObject->GetMethodTable()->IsArray());
2183 PRECONDITION(toTypeHnd.IsArray());
2186 ArrayBase *pArray = (ArrayBase*) pObject;
2187 ArrayTypeDesc *toArrayType = toTypeHnd.AsArray();
2189 // GetRank touches EEClass. Try to avoid it for SZArrays.
2190 if (toArrayType->GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY)
2192 if (pArray->GetMethodTable()->IsMultiDimArray())
2193 return TypeHandle::CannotCast;
2197 if (pArray->GetRank() != toArrayType->GetRank())
2198 return TypeHandle::CannotCast;
2200 _ASSERTE(pArray->GetRank() == toArrayType->GetRank());
2202 // ArrayBase::GetTypeHandle consults the loader tables to find the
2203 // exact type handle for an array object. This can be disproportionately slow - but after
2204 // all, why should we need to go looking up hash tables just to do a cast test?
2206 // Thus we can always special-case the casting logic to avoid fetching this
2207 // exact type handle. Here we have only done so for one
2208 // particular case, i.e. when we are trying to cast to an array type where
2209 // there is an exact match between the rank, kind and element type of the two
2210 // array types. This happens when, for example, assigning an int32[] into an int32[][].
2213 TypeHandle elementTypeHandle = pArray->GetArrayElementTypeHandle();
2214 TypeHandle toElementTypeHandle = toArrayType->GetArrayElementTypeHandle();
2216 if (elementTypeHandle == toElementTypeHandle)
2217 return TypeHandle::CanCast;
2219 // By this point we know that toArrayType->GetInternalCorElementType matches the element type of the Array object
2220 // so we can use a faster constructor to create the TypeDesc. (It so happens that ArrayTypeDescs derives from ParamTypeDesc
2221 // and can be created as identical in a slightly faster way with the following set of parameters.)
2222 ParamTypeDesc arrayType(toArrayType->GetInternalCorElementType(), pArray->GetMethodTable(), elementTypeHandle);
2223 return arrayType.CanCastToNoGC(toTypeHnd);
2226 // pObject MUST be an instance of an array.
2227 TypeHandle::CastResult ArrayObjSupportsBizarreInterfaceNoGC(Object *pObject, MethodTable * pInterfaceMT)
2233 PRECONDITION(CheckPointer(pObject));
2234 PRECONDITION(pObject->GetMethodTable()->IsArray());
2235 PRECONDITION(pInterfaceMT->IsInterface());
2238 ArrayBase *pArray = (ArrayBase*) pObject;
2240 // IList<T> & IReadOnlyList<T> only supported for SZ_ARRAYS
2241 if (pArray->GetMethodTable()->IsMultiDimArray())
2242 return TypeHandle::CannotCast;
2244 if (pInterfaceMT->GetLoadLevel() < CLASS_DEPENDENCIES_LOADED)
2246 if (!pInterfaceMT->HasInstantiation())
2247 return TypeHandle::CannotCast;
2248 // The slow path will take care of restoring the interface
2249 return TypeHandle::MaybeCast;
2252 if (!IsImplicitInterfaceOfSZArray(pInterfaceMT))
2253 return TypeHandle::CannotCast;
2255 return TypeDesc::CanCastParamNoGC(pArray->GetArrayElementTypeHandle(), pInterfaceMT->GetInstantiation()[0]);
2258 TypeHandle::CastResult STDCALL ObjIsInstanceOfNoGC(Object *pObject, TypeHandle toTypeHnd)
2264 PRECONDITION(CheckPointer(pObject));
2268 MethodTable *pMT = pObject->GetMethodTable();
2270 // Quick exact match first
2271 if (TypeHandle(pMT) == toTypeHnd)
2272 return TypeHandle::CanCast;
2274 if ((toTypeHnd.IsInterface() && ( pMT->IsComObjectType() || pMT->IsICastable())))
2276 return TypeHandle::MaybeCast;
2281 if (toTypeHnd.IsArray())
2282 return ArrayIsInstanceOfNoGC(pObject, toTypeHnd);
2284 if (toTypeHnd.IsInterface())
2286 MethodTable * pInterfaceMT = toTypeHnd.AsMethodTable();
2287 if (pInterfaceMT->HasInstantiation())
2288 return ArrayObjSupportsBizarreInterfaceNoGC(pObject, pInterfaceMT);
2289 return pMT->ImplementsInterface(pInterfaceMT) ? TypeHandle::CanCast : TypeHandle::CannotCast;
2292 if (toTypeHnd == TypeHandle(g_pObjectClass) || toTypeHnd == TypeHandle(g_pArrayClass))
2293 return TypeHandle::CanCast;
2295 return TypeHandle::CannotCast;
2298 if (toTypeHnd.IsTypeDesc())
2299 return TypeHandle::CannotCast;
2301 // allow an object of type T to be cast to Nullable<T> (they have the same representation)
2302 if (Nullable::IsNullableForTypeNoGC(toTypeHnd, pMT))
2303 return TypeHandle::CanCast;
2305 return pMT->CanCastToClassOrInterfaceNoGC(toTypeHnd.AsMethodTable());
2308 BOOL ObjIsInstanceOf(Object *pObject, TypeHandle toTypeHnd, BOOL throwCastException)
2314 PRECONDITION(CheckPointer(pObject));
2319 OBJECTREF obj = ObjectToOBJECTREF(pObject);
2321 GCPROTECT_BEGIN(obj);
2323 TypeHandle fromTypeHnd = obj->GetTypeHandle();
2325 // If we are trying to cast a proxy we need to delegate to remoting
2326 // services which will determine whether the proxy and the type are compatible.
2327 // Start by doing a quick static cast check to see if the type information captured in
2328 // the metadata indicates that the cast is legal.
2329 if (fromTypeHnd.CanCastTo(toTypeHnd))
2334 #ifdef FEATURE_COMINTEROP
2335 // If we are casting a COM object from interface then we need to do a check to see
2336 // if it implements the interface.
2337 if (toTypeHnd.IsInterface() && fromTypeHnd.GetMethodTable()->IsComObjectType())
2339 fCast = ComObject::SupportsInterface(obj, toTypeHnd.AsMethodTable());
2342 #endif // FEATURE_COMINTEROP
2343 if (Nullable::IsNullableForType(toTypeHnd, obj->GetMethodTable()))
2345 // allow an object of type T to be cast to Nullable<T> (they have the same representation)
2348 #ifdef FEATURE_ICASTABLE
2349 // If type implements ICastable interface we give it a chance to tell us if it can be casted
2351 else if (toTypeHnd.IsInterface() && fromTypeHnd.GetMethodTable()->IsICastable())
2353 // Make actuall call to ICastableHelpers.IsInstanceOfInterface(obj, interfaceTypeObj, out exception)
2354 OBJECTREF exception = NULL;
2355 GCPROTECT_BEGIN(exception);
2357 PREPARE_NONVIRTUAL_CALLSITE(METHOD__ICASTABLEHELPERS__ISINSTANCEOF);
2359 OBJECTREF managedType = toTypeHnd.GetManagedClassObject(); //GC triggers
2361 DECLARE_ARGHOLDER_ARRAY(args, 3);
2362 args[ARGNUM_0] = OBJECTREF_TO_ARGHOLDER(obj);
2363 args[ARGNUM_1] = OBJECTREF_TO_ARGHOLDER(managedType);
2364 args[ARGNUM_2] = PTR_TO_ARGHOLDER(&exception);
2366 CALL_MANAGED_METHOD(fCast, BOOL, args);
2367 INDEBUG(managedType = NULL); // managedType isn't protected during the call
2369 if (!fCast && throwCastException && exception != NULL)
2371 RealCOMPlusThrow(exception);
2373 GCPROTECT_END(); //exception
2375 #endif // FEATURE_ICASTABLE
2377 if (!fCast && throwCastException)
2379 COMPlusThrowInvalidCastException(&obj, toTypeHnd);
2382 GCPROTECT_END(); // obj
2388 // This optimization is intended for all non-framed casting helpers
2391 #include <optsmallperfcritical.h>
2393 HCIMPL2(Object*, JIT_ChkCastClass_Portable, MethodTable* pTargetMT, Object* pObject)
2398 // casts pObject to type pMT
2401 if (NULL == pObject)
2406 PTR_VOID pMT = pObject->GetMethodTable();
2409 if (pMT == pTargetMT)
2412 pMT = MethodTable::GetParentMethodTableOrIndirection(pMT);
2416 return HCCALL2(JITutil_ChkCastAny, CORINFO_CLASS_HANDLE(pTargetMT), pObject);
2421 // This helper assumes that the check for the trivial cases has been inlined by the JIT.
2423 HCIMPL2(Object*, JIT_ChkCastClassSpecial_Portable, MethodTable* pTargetMT, Object* pObject)
2427 // This assumes that the check for the trivial cases has been inlined by the JIT.
2428 PRECONDITION(pObject != NULL);
2429 PRECONDITION(pObject->GetMethodTable() != pTargetMT);
2432 PTR_VOID pMT = MethodTable::GetParentMethodTableOrIndirection(pObject->GetMethodTable());
2436 if (pMT == pTargetMT)
2439 pMT = MethodTable::GetParentMethodTableOrIndirection(pMT);
2443 return HCCALL2(JITutil_ChkCastAny, CORINFO_CLASS_HANDLE(pTargetMT), pObject);
2447 HCIMPL2(Object*, JIT_IsInstanceOfClass_Portable, MethodTable* pTargetMT, Object* pObject)
2452 // casts pObject to type pMT
2455 if (NULL == pObject)
2460 PTR_VOID pMT = pObject->GetMethodTable();
2463 if (pMT == pTargetMT)
2466 pMT = MethodTable::GetParentMethodTableOrIndirection(pMT);
2469 if (!pObject->GetMethodTable()->HasTypeEquivalence())
2475 return HCCALL2(JITutil_IsInstanceOfAny, CORINFO_CLASS_HANDLE(pTargetMT), pObject);
2479 HCIMPL2(Object*, JIT_ChkCastInterface_Portable, MethodTable *pInterfaceMT, Object* pObject)
2483 PRECONDITION(pInterfaceMT->IsInterface());
2486 if (NULL == pObject)
2491 if (pObject->GetMethodTable()->ImplementsInterfaceInline(pInterfaceMT))
2497 return HCCALL2(JITutil_ChkCastInterface, pInterfaceMT, pObject);
2501 HCIMPL2(Object*, JIT_IsInstanceOfInterface_Portable, MethodTable *pInterfaceMT, Object* pObject)
2505 PRECONDITION(pInterfaceMT->IsInterface());
2508 if (NULL == pObject)
2513 if (pObject->GetMethodTable()->ImplementsInterfaceInline(pInterfaceMT))
2518 if (!pObject->GetMethodTable()->InstanceRequiresNonTrivialInterfaceCast())
2524 return HCCALL2(JITutil_IsInstanceOfInterface, pInterfaceMT, pObject);
2528 HCIMPL2(Object *, JIT_ChkCastArray, CORINFO_CLASS_HANDLE type, Object *pObject)
2532 PRECONDITION(TypeHandle(type).IsArray());
2535 if (pObject == NULL)
2540 OBJECTREF refObj = ObjectToOBJECTREF(pObject);
2541 VALIDATEOBJECTREF(refObj);
2543 TypeHandle::CastResult result = refObj->GetMethodTable()->IsArray() ?
2544 ArrayIsInstanceOfNoGC(pObject, TypeHandle(type)) : TypeHandle::CannotCast;
2546 if (result == TypeHandle::CanCast)
2552 Object* pRet = HCCALL2(JITutil_ChkCastAny, type, pObject);
2553 // Make sure that the fast helper have not lied
2554 _ASSERTE(result != TypeHandle::CannotCast);
2560 HCIMPL2(Object *, JIT_IsInstanceOfArray, CORINFO_CLASS_HANDLE type, Object *pObject)
2564 PRECONDITION(TypeHandle(type).IsArray());
2567 if (pObject == NULL)
2572 OBJECTREF refObj = ObjectToOBJECTREF(pObject);
2573 VALIDATEOBJECTREF(refObj);
2574 MethodTable *pMT = refObj->GetMethodTable();
2576 if (!pMT->IsArray())
2578 // We know that the clsHnd is an array so check the object. If it is not an array return null
2583 switch (ArrayIsInstanceOfNoGC(pObject, TypeHandle(type))) {
2584 case TypeHandle::CanCast:
2586 case TypeHandle::CannotCast:
2589 // fall through to the slow helper
2595 return HCCALL2(JITutil_IsInstanceOfAny, type, pObject);
2599 /*********************************************************************/
2600 // IsInstanceOf test used for unusual cases (naked type parameters, variant generic types)
2601 // Unlike the IsInstanceOfInterface, IsInstanceOfClass, and IsIsntanceofArray functions,
2602 // this test must deal with all kinds of type tests
2603 HCIMPL2(Object *, JIT_IsInstanceOfAny, CORINFO_CLASS_HANDLE type, Object* obj)
2612 switch (ObjIsInstanceOfNoGC(obj, TypeHandle(type))) {
2613 case TypeHandle::CanCast:
2615 case TypeHandle::CannotCast:
2618 // fall through to the slow helper
2623 return HCCALL2(JITutil_IsInstanceOfAny, type, obj);
2627 // ChkCast test used for unusual cases (naked type parameters, variant generic types)
2628 // Unlike the ChkCastInterface, ChkCastClass, and ChkCastArray functions,
2629 // this test must deal with all kinds of type tests
2630 HCIMPL2(Object *, JIT_ChkCastAny, CORINFO_CLASS_HANDLE type, Object *obj)
2639 TypeHandle::CastResult result = ObjIsInstanceOfNoGC(obj, TypeHandle(type));
2641 if (result == TypeHandle::CanCast)
2647 Object* pRet = HCCALL2(JITutil_ChkCastAny, type, obj);
2648 // Make sure that the fast helper have not lied
2649 _ASSERTE(result != TypeHandle::CannotCast);
2655 NOINLINE HCIMPL2(Object *, JITutil_IsInstanceOfInterface, MethodTable *pInterfaceMT, Object* obj)
2659 if (obj->GetMethodTable()->IsArray())
2661 switch (ArrayObjSupportsBizarreInterfaceNoGC(obj, pInterfaceMT)) {
2662 case TypeHandle::CanCast:
2664 case TypeHandle::CannotCast:
2667 // fall through to the slow helper
2673 return HCCALL2(JITutil_IsInstanceOfAny, CORINFO_CLASS_HANDLE(pInterfaceMT), obj);
2678 NOINLINE HCIMPL2(Object *, JITutil_ChkCastInterface, MethodTable *pInterfaceMT, Object *obj)
2682 if (obj->GetMethodTable()->IsArray())
2684 if (ArrayObjSupportsBizarreInterfaceNoGC(obj, pInterfaceMT) == TypeHandle::CanCast)
2691 return HCCALL2(JITutil_ChkCastAny, CORINFO_CLASS_HANDLE(pInterfaceMT), obj);
2696 #include <optdefault.h>
2702 NOINLINE HCIMPL2(Object *, JITutil_ChkCastAny, CORINFO_CLASS_HANDLE type, Object *obj)
2706 // This case should be handled by frameless helper
2707 _ASSERTE(obj != NULL);
2709 OBJECTREF oref = ObjectToOBJECTREF (obj);
2710 VALIDATEOBJECTREF(oref);
2712 TypeHandle clsHnd(type);
2714 HELPER_METHOD_FRAME_BEGIN_RET_1(oref);
2715 if (!ObjIsInstanceOf(OBJECTREFToObject(oref), clsHnd, TRUE))
2717 UNREACHABLE(); //ObjIsInstanceOf will throw if cast can't be done
2719 HELPER_METHOD_FRAME_END();
2721 return OBJECTREFToObject(oref);
2725 NOINLINE HCIMPL2(Object *, JITutil_IsInstanceOfAny, CORINFO_CLASS_HANDLE type, Object *obj)
2729 // This case should be handled by frameless helper
2730 _ASSERTE(obj != NULL);
2732 OBJECTREF oref = ObjectToOBJECTREF (obj);
2733 VALIDATEOBJECTREF(oref);
2735 TypeHandle clsHnd(type);
2737 HELPER_METHOD_FRAME_BEGIN_RET_1(oref);
2738 if (!ObjIsInstanceOf(OBJECTREFToObject(oref), clsHnd))
2740 HELPER_METHOD_FRAME_END();
2742 return OBJECTREFToObject(oref);
2748 //========================================================================
2750 // ALLOCATION HELPERS
2752 //========================================================================
2754 #include <optsmallperfcritical.h>
2756 //*************************************************************
2757 // Allocation fast path for typical objects
2759 HCIMPL1(Object*, JIT_NewS_MP_FastPortable, CORINFO_CLASS_HANDLE typeHnd_)
2765 _ASSERTE(GCHeapUtilities::UseThreadAllocationContexts());
2767 // This is typically the only call in the fast path. Making the call early seems to be better, as it allows the compiler
2768 // to use volatile registers for intermediate values. This reduces the number of push/pop instructions and eliminates
2769 // some reshuffling of intermediate values into nonvolatile registers around the call.
2770 Thread *thread = GetThread();
2772 TypeHandle typeHandle(typeHnd_);
2773 _ASSERTE(!typeHandle.IsTypeDesc());
2774 MethodTable *methodTable = typeHandle.AsMethodTable();
2776 SIZE_T size = methodTable->GetBaseSize();
2777 _ASSERTE(size % DATA_ALIGNMENT == 0);
2779 gc_alloc_context *allocContext = thread->GetAllocContext();
2780 BYTE *allocPtr = allocContext->alloc_ptr;
2781 _ASSERTE(allocPtr <= allocContext->alloc_limit);
2782 if (size > static_cast<SIZE_T>(allocContext->alloc_limit - allocPtr))
2786 allocContext->alloc_ptr = allocPtr + size;
2788 _ASSERTE(allocPtr != nullptr);
2789 Object *object = reinterpret_cast<Object *>(allocPtr);
2790 _ASSERTE(object->HasEmptySyncBlockInfo());
2791 object->SetMethodTable(methodTable);
2796 // Tail call to the slow helper
2798 return HCCALL1(JIT_New, typeHnd_);
2802 #include <optdefault.h>
2804 /*************************************************************/
2805 HCIMPL1(Object*, JIT_New, CORINFO_CLASS_HANDLE typeHnd_)
2809 OBJECTREF newobj = NULL;
2810 HELPER_METHOD_FRAME_BEGIN_RET_0(); // Set up a frame
2812 TypeHandle typeHnd(typeHnd_);
2814 _ASSERTE(!typeHnd.IsTypeDesc()); // we never use this helper for arrays
2815 MethodTable *pMT = typeHnd.AsMethodTable();
2816 _ASSERTE(pMT->IsRestored_NoLogging());
2819 if (g_pConfig->FastGCStressLevel()) {
2820 GetThread()->DisableStressHeap();
2824 newobj = AllocateObject(pMT);
2826 HELPER_METHOD_FRAME_END();
2827 return(OBJECTREFToObject(newobj));
2833 //========================================================================
2837 //========================================================================
2839 #include <optsmallperfcritical.h>
2841 //*************************************************************
2842 // Allocation fast path for typical objects
2844 HCIMPL1(StringObject*, AllocateString_MP_FastPortable, DWORD stringLength)
2850 _ASSERTE(GCHeapUtilities::UseThreadAllocationContexts());
2852 // Instead of doing elaborate overflow checks, we just limit the number of elements. This will avoid all overflow
2853 // problems, as well as making sure big string objects are correctly allocated in the big object heap.
2854 if (stringLength >= (LARGE_OBJECT_SIZE - 256) / sizeof(WCHAR))
2859 // This is typically the only call in the fast path. Making the call early seems to be better, as it allows the compiler
2860 // to use volatile registers for intermediate values. This reduces the number of push/pop instructions and eliminates
2861 // some reshuffling of intermediate values into nonvolatile registers around the call.
2862 Thread *thread = GetThread();
2864 SIZE_T totalSize = StringObject::GetSize(stringLength);
2866 // The method table's base size includes space for a terminating null character
2867 _ASSERTE(totalSize >= g_pStringClass->GetBaseSize());
2868 _ASSERTE((totalSize - g_pStringClass->GetBaseSize()) / sizeof(WCHAR) == stringLength);
2870 SIZE_T alignedTotalSize = ALIGN_UP(totalSize, DATA_ALIGNMENT);
2871 _ASSERTE(alignedTotalSize >= totalSize);
2872 totalSize = alignedTotalSize;
2874 gc_alloc_context *allocContext = thread->GetAllocContext();
2875 BYTE *allocPtr = allocContext->alloc_ptr;
2876 _ASSERTE(allocPtr <= allocContext->alloc_limit);
2877 if (totalSize > static_cast<SIZE_T>(allocContext->alloc_limit - allocPtr))
2881 allocContext->alloc_ptr = allocPtr + totalSize;
2883 _ASSERTE(allocPtr != nullptr);
2884 StringObject *stringObject = reinterpret_cast<StringObject *>(allocPtr);
2885 stringObject->SetMethodTable(g_pStringClass);
2886 stringObject->SetStringLength(stringLength);
2887 _ASSERTE(stringObject->GetBuffer()[stringLength] == W('\0'));
2889 return stringObject;
2892 // Tail call to the slow helper
2894 return HCCALL1(FramedAllocateString, stringLength);
2898 #ifdef FEATURE_UTF8STRING
2899 HCIMPL1(Utf8StringObject*, AllocateUtf8String_MP_FastPortable, DWORD stringLength)
2905 _ASSERTE(GCHeapUtilities::UseThreadAllocationContexts());
2907 // Instead of doing elaborate overflow checks, we just limit the number of elements. This will avoid all overflow
2908 // problems, as well as making sure big string objects are correctly allocated in the big object heap.
2909 if (stringLength >= LARGE_OBJECT_SIZE - 256)
2914 // This is typically the only call in the fast path. Making the call early seems to be better, as it allows the compiler
2915 // to use volatile registers for intermediate values. This reduces the number of push/pop instructions and eliminates
2916 // some reshuffling of intermediate values into nonvolatile registers around the call.
2917 Thread *thread = GetThread();
2919 SIZE_T totalSize = Utf8StringObject::GetSize(stringLength);
2921 // The method table's base size includes space for a terminating null character
2922 _ASSERTE(totalSize >= g_pUtf8StringClass->GetBaseSize());
2923 _ASSERTE(totalSize - g_pUtf8StringClass->GetBaseSize() == stringLength);
2925 SIZE_T alignedTotalSize = ALIGN_UP(totalSize, DATA_ALIGNMENT);
2926 _ASSERTE(alignedTotalSize >= totalSize);
2927 totalSize = alignedTotalSize;
2929 gc_alloc_context *allocContext = thread->GetAllocContext();
2930 BYTE *allocPtr = allocContext->alloc_ptr;
2931 _ASSERTE(allocPtr <= allocContext->alloc_limit);
2932 if (totalSize > static_cast<SIZE_T>(allocContext->alloc_limit - allocPtr))
2936 allocContext->alloc_ptr = allocPtr + totalSize;
2938 _ASSERTE(allocPtr != nullptr);
2939 Utf8StringObject *stringObject = reinterpret_cast<Utf8StringObject *>(allocPtr);
2940 stringObject->SetMethodTable(g_pUtf8StringClass);
2941 stringObject->SetLength(stringLength);
2943 return stringObject;
2946 // Tail call to the slow helper
2948 return HCCALL1(FramedAllocateUtf8String, stringLength);
2951 #endif // FEATURE_UTF8STRING
2953 #include <optdefault.h>
2955 /*********************************************************************/
2956 /* We don't use HCIMPL macros because this is not a real helper call */
2957 /* This function just needs mangled arguments like a helper call */
2959 HCIMPL1_RAW(StringObject*, UnframedAllocateString, DWORD stringLength)
2961 // This isn't _really_ an FCALL and therefore shouldn't have the
2962 // SO_TOLERANT part of the FCALL_CONTRACT b/c it is not entered
2963 // from managed code.
2971 result = SlowAllocateString(stringLength);
2973 return((StringObject*) OBJECTREFToObject(result));
2977 HCIMPL1(StringObject*, FramedAllocateString, DWORD stringLength)
2981 STRINGREF result = NULL;
2982 HELPER_METHOD_FRAME_BEGIN_RET_0(); // Set up a frame
2984 result = SlowAllocateString(stringLength);
2986 HELPER_METHOD_FRAME_END();
2987 return((StringObject*) OBJECTREFToObject(result));
2991 #ifdef FEATURE_UTF8STRING
2992 HCIMPL1(Utf8StringObject*, FramedAllocateUtf8String, DWORD stringLength)
2996 UTF8STRINGREF result = NULL;
2997 HELPER_METHOD_FRAME_BEGIN_RET_0(); // Set up a frame
2999 result = SlowAllocateUtf8String(stringLength);
3001 HELPER_METHOD_FRAME_END();
3002 return((Utf8StringObject*) OBJECTREFToObject(result));
3005 #endif // FEATURE_UTF8STRING
3007 /*********************************************************************/
3008 OBJECTHANDLE ConstructStringLiteral(CORINFO_MODULE_HANDLE scopeHnd, mdToken metaTok)
3016 _ASSERTE(TypeFromToken(metaTok) == mdtString);
3018 Module* module = GetModule(scopeHnd);
3021 // If our module is ngenned and we're calling this API, it means that we're not going through
3022 // the fixup mechanism for strings. This can happen 2 ways:
3024 // a) Lazy string object construction: This happens when JIT decides that initizalizing a
3025 // string via fixup on method entry is very expensive. This is normally done for strings
3026 // that appear in rarely executed blocks, such as throw blocks.
3028 // b) The ngen image isn't complete (it's missing classes), therefore we're jitting methods.
3030 // If we went ahead and called ResolveStringRef directly, we would be breaking the per module
3031 // interning we're guaranteeing, so we will have to detect the case and handle it appropriately.
3032 #ifdef FEATURE_PREJIT
3033 if (module->HasNativeImage() && module->IsNoStringInterning())
3035 return module->ResolveStringRef(metaTok, module->GetAssembly()->Parent(), true);
3038 return module->ResolveStringRef(metaTok, module->GetAssembly()->Parent(), false);
3041 /*********************************************************************/
3042 HCIMPL2(Object *, JIT_StrCns, unsigned rid, CORINFO_MODULE_HANDLE scopeHnd)
3046 OBJECTHANDLE hndStr = 0;
3048 HELPER_METHOD_FRAME_BEGIN_RET_0();
3050 // Retrieve the handle to the COM+ string object.
3051 hndStr = ConstructStringLiteral(scopeHnd, RidToToken(rid, mdtString));
3052 HELPER_METHOD_FRAME_END();
3054 // Don't use ObjectFromHandle; this isn't a real handle
3055 return *(Object**)hndStr;
3060 //========================================================================
3064 //========================================================================
3066 #include <optsmallperfcritical.h>
3068 //*************************************************************
3069 // Array allocation fast path for arrays of value type elements
3071 HCIMPL2(Object*, JIT_NewArr1VC_MP_FastPortable, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size)
3077 _ASSERTE(GCHeapUtilities::UseThreadAllocationContexts());
3079 // Do a conservative check here. This is to avoid overflow while doing the calculations. We don't
3080 // have to worry about "large" objects, since the allocation quantum is never big enough for
3081 // LARGE_OBJECT_SIZE.
3083 // For Value Classes, this needs to be 2^16 - slack (2^32 / max component size),
3084 // The slack includes the size for the array header and round-up ; for alignment. Use 256 for the
3085 // slack value out of laziness.
3086 SIZE_T componentCount = static_cast<SIZE_T>(size);
3087 if (componentCount >= static_cast<SIZE_T>(65535 - 256))
3092 // This is typically the only call in the fast path. Making the call early seems to be better, as it allows the compiler
3093 // to use volatile registers for intermediate values. This reduces the number of push/pop instructions and eliminates
3094 // some reshuffling of intermediate values into nonvolatile registers around the call.
3095 Thread *thread = GetThread();
3097 MethodTable *pArrayMT = (MethodTable *)arrayMT;
3099 _ASSERTE(pArrayMT->HasComponentSize());
3100 SIZE_T componentSize = pArrayMT->RawGetComponentSize();
3101 SIZE_T totalSize = componentCount * componentSize;
3102 _ASSERTE(totalSize / componentSize == componentCount);
3104 SIZE_T baseSize = pArrayMT->GetBaseSize();
3105 totalSize += baseSize;
3106 _ASSERTE(totalSize >= baseSize);
3108 SIZE_T alignedTotalSize = ALIGN_UP(totalSize, DATA_ALIGNMENT);
3109 _ASSERTE(alignedTotalSize >= totalSize);
3110 totalSize = alignedTotalSize;
3112 gc_alloc_context *allocContext = thread->GetAllocContext();
3113 BYTE *allocPtr = allocContext->alloc_ptr;
3114 _ASSERTE(allocPtr <= allocContext->alloc_limit);
3115 if (totalSize > static_cast<SIZE_T>(allocContext->alloc_limit - allocPtr))
3119 allocContext->alloc_ptr = allocPtr + totalSize;
3121 _ASSERTE(allocPtr != nullptr);
3122 ArrayBase *array = reinterpret_cast<ArrayBase *>(allocPtr);
3123 array->SetArrayMethodTable(pArrayMT);
3124 _ASSERTE(static_cast<DWORD>(componentCount) == componentCount);
3125 array->m_NumComponents = static_cast<DWORD>(componentCount);
3130 // Tail call to the slow helper
3132 return HCCALL2(JIT_NewArr1, arrayMT, size);
3136 //*************************************************************
3137 // Array allocation fast path for arrays of object elements
3139 HCIMPL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size)
3145 _ASSERTE(GCHeapUtilities::UseThreadAllocationContexts());
3147 // Make sure that the total size cannot reach LARGE_OBJECT_SIZE, which also allows us to avoid overflow checks. The
3148 // "256" slack is to cover the array header size and round-up, using a constant value here out of laziness.
3149 SIZE_T componentCount = static_cast<SIZE_T>(size);
3150 if (componentCount >= static_cast<SIZE_T>((LARGE_OBJECT_SIZE - 256) / sizeof(void *)))
3155 // This is typically the only call in the fast path. Making the call early seems to be better, as it allows the compiler
3156 // to use volatile registers for intermediate values. This reduces the number of push/pop instructions and eliminates
3157 // some reshuffling of intermediate values into nonvolatile registers around the call.
3158 Thread *thread = GetThread();
3160 SIZE_T totalSize = componentCount * sizeof(void *);
3161 _ASSERTE(totalSize / sizeof(void *) == componentCount);
3163 MethodTable *pArrayMT = (MethodTable *)arrayMT;
3165 SIZE_T baseSize = pArrayMT->GetBaseSize();
3166 totalSize += baseSize;
3167 _ASSERTE(totalSize >= baseSize);
3169 _ASSERTE(ALIGN_UP(totalSize, DATA_ALIGNMENT) == totalSize);
3171 gc_alloc_context *allocContext = thread->GetAllocContext();
3172 BYTE *allocPtr = allocContext->alloc_ptr;
3173 _ASSERTE(allocPtr <= allocContext->alloc_limit);
3174 if (totalSize > static_cast<SIZE_T>(allocContext->alloc_limit - allocPtr))
3178 allocContext->alloc_ptr = allocPtr + totalSize;
3180 _ASSERTE(allocPtr != nullptr);
3181 ArrayBase *array = reinterpret_cast<ArrayBase *>(allocPtr);
3182 array->SetArrayMethodTable(pArrayMT);
3183 _ASSERTE(static_cast<DWORD>(componentCount) == componentCount);
3184 array->m_NumComponents = static_cast<DWORD>(componentCount);
3189 // Tail call to the slow helper
3191 return HCCALL2(JIT_NewArr1, arrayMT, size);
3195 //*************************************************************
3196 // R2R-specific array allocation wrapper that extracts array method table from ArrayTypeDesc
3198 HCIMPL2(Object*, JIT_NewArr1_R2R, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
3202 TypeHandle arrayTypeHandle(arrayTypeHnd_);
3203 ArrayTypeDesc *pArrayTypeDesc = arrayTypeHandle.AsArray();
3204 MethodTable *pArrayMT = pArrayTypeDesc->GetTemplateMethodTable();
3207 return HCCALL2(JIT_NewArr1, (CORINFO_CLASS_HANDLE)pArrayMT, size);
3211 #include <optdefault.h>
3213 /*************************************************************/
3214 HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size)
3218 OBJECTREF newArray = NULL;
3220 HELPER_METHOD_FRAME_BEGIN_RET_0(); // Set up a frame
3222 MethodTable *pArrayMT = (MethodTable *)arrayMT;
3224 _ASSERTE(pArrayMT->IsFullyLoaded());
3225 _ASSERTE(pArrayMT->IsArray());
3226 _ASSERTE(!pArrayMT->IsMultiDimArray());
3229 COMPlusThrow(kOverflowException);
3232 // Even though ECMA allows using a native int as the argument to newarr instruction
3233 // (therefore size is INT_PTR), ArrayBase::m_NumComponents is 32-bit, so even on 64-bit
3234 // platforms we can't create an array whose size exceeds 32 bits.
3236 EX_THROW(EEMessageException, (kOverflowException, IDS_EE_ARRAY_DIMENSIONS_EXCEEDED));
3240 // is this a primitive type?
3243 CorElementType elemType = pArrayMT->GetArrayElementType();
3245 if (CorTypeInfo::IsPrimitiveType(elemType)
3246 #ifdef FEATURE_64BIT_ALIGNMENT
3247 // On platforms where 64-bit types require 64-bit alignment and don't obtain it naturally force us
3248 // through the slow path where this will be handled.
3249 && (elemType != ELEMENT_TYPE_I8)
3250 && (elemType != ELEMENT_TYPE_U8)
3251 && (elemType != ELEMENT_TYPE_R8)
3256 if (g_pConfig->FastGCStressLevel()) {
3257 GetThread()->DisableStressHeap();
3261 // Disallow the creation of void[] (an array of System.Void)
3262 if (elemType == ELEMENT_TYPE_VOID)
3263 COMPlusThrow(kArgumentException);
3265 BOOL bAllocateInLargeHeap = FALSE;
3266 #ifdef FEATURE_DOUBLE_ALIGNMENT_HINT
3267 if ((elemType == ELEMENT_TYPE_R8) &&
3268 (static_cast<DWORD>(size) >= g_pConfig->GetDoubleArrayToLargeObjectHeapThreshold()))
3270 STRESS_LOG1(LF_GC, LL_INFO10, "Allocating double array of size %d to large object heap\n", size);
3271 bAllocateInLargeHeap = TRUE;
3275 if (g_pPredefinedArrayTypes[elemType] == NULL)
3277 TypeHandle elemTypeHnd = TypeHandle(MscorlibBinder::GetElementType(elemType));
3279 g_pPredefinedArrayTypes[elemType] = ClassLoader::LoadArrayTypeThrowing(elemTypeHnd, ELEMENT_TYPE_SZARRAY, 0).AsArray();
3282 newArray = FastAllocatePrimitiveArray(pArrayMT, static_cast<DWORD>(size), bAllocateInLargeHeap);
3287 if (g_pConfig->FastGCStressLevel()) {
3288 GetThread()->DisableStressHeap();
3291 INT32 size32 = (INT32)size;
3292 newArray = AllocateArrayEx(pArrayMT, &size32, 1);
3295 HELPER_METHOD_FRAME_END();
3297 return(OBJECTREFToObject(newArray));
3301 /*********************************************************************
3302 // Allocate a multi-dimensional array
3304 OBJECTREF allocNewMDArr(TypeHandle typeHnd, unsigned dwNumArgs, va_list args)
3310 PRECONDITION(dwNumArgs > 0);
3313 // Get the arguments in the right order
3318 fwdArgList = (INT32*)args;
3320 // reverse the order
3321 INT32* p = fwdArgList;
3322 INT32* q = fwdArgList + (dwNumArgs-1);
3325 INT32 t = *p; *p = *q; *q = t;
3329 // create an array where fwdArgList[0] == arg[0] ...
3330 fwdArgList = (INT32*) _alloca(dwNumArgs * sizeof(INT32));
3331 for (unsigned i = 0; i < dwNumArgs; i++)
3333 fwdArgList[i] = va_arg(args, INT32);
3337 return AllocateArrayEx(typeHnd, fwdArgList, dwNumArgs);
3340 /*********************************************************************
3341 // Allocate a multi-dimensional array with lower bounds specified.
3342 // The caller pushes both sizes AND/OR bounds for every dimension
3345 HCIMPL2VA(Object*, JIT_NewMDArr, CORINFO_CLASS_HANDLE classHnd, unsigned dwNumArgs)
3350 HELPER_METHOD_FRAME_BEGIN_RET_1(ret); // Set up a frame
3352 TypeHandle typeHnd(classHnd);
3353 typeHnd.CheckRestore();
3354 _ASSERTE(typeHnd.GetMethodTable()->IsArray());
3356 va_list dimsAndBounds;
3357 va_start(dimsAndBounds, dwNumArgs);
3359 ret = allocNewMDArr(typeHnd, dwNumArgs, dimsAndBounds);
3360 va_end(dimsAndBounds);
3362 HELPER_METHOD_FRAME_END();
3363 return OBJECTREFToObject(ret);
3367 /*************************************************************/
3368 HCIMPL3(Object*, JIT_NewMDArrNonVarArg, CORINFO_CLASS_HANDLE classHnd, unsigned dwNumArgs, INT32 * pArgList)
3373 HELPER_METHOD_FRAME_BEGIN_RET_1(ret); // Set up a frame
3375 TypeHandle typeHnd(classHnd);
3376 typeHnd.CheckRestore();
3377 _ASSERTE(typeHnd.GetMethodTable()->IsArray());
3379 ret = AllocateArrayEx(typeHnd, pArgList, dwNumArgs);
3381 HELPER_METHOD_FRAME_END();
3382 return OBJECTREFToObject(ret);
3386 /*************************************************************/
3387 /* returns '&array[idx], after doing all the proper checks */
3389 #include <optsmallperfcritical.h>
3390 HCIMPL3(void*, JIT_Ldelema_Ref, PtrArray* array, unsigned idx, CORINFO_CLASS_HANDLE type)
3394 RuntimeExceptionKind except;
3395 // This has been carefully arranged to ensure that in the common
3396 // case the branches are predicted properly (fall through).
3397 // and that we dont spill registers unnecessarily etc.
3399 if (idx < array->GetNumComponents())
3400 if (array->GetArrayElementTypeHandle() == TypeHandle(type))
3401 return(&array->m_Array[idx]);
3403 except = kArrayTypeMismatchException;
3405 except = kIndexOutOfRangeException;
3407 except = kNullReferenceException;
3412 #include <optdefault.h>
3414 //===========================================================================
3415 // This routine is called if the Array store needs a frame constructed
3416 // in order to do the array check. It should only be called from
3417 // the array store check helpers.
3419 HCIMPL2(LPVOID, ArrayStoreCheck, Object** pElement, PtrArray** pArray)
3423 HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_2(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, *pElement, *pArray);
3425 GCStress<cfg_any, EeconfigFastGcSPolicy>::MaybeTrigger();
3427 if (!ObjIsInstanceOf(*pElement, (*pArray)->GetArrayElementTypeHandle()))
3428 COMPlusThrow(kArrayTypeMismatchException);
3430 HELPER_METHOD_FRAME_END();
3432 return (LPVOID)0; // Used to aid epilog walker
3436 /****************************************************************************/
3437 /* assigns 'val to 'array[idx], after doing all the proper checks */
3439 HCIMPL3(void, JIT_Stelem_Ref_Portable, PtrArray* array, unsigned idx, Object *val)
3445 FCThrowVoid(kNullReferenceException);
3447 if (idx >= array->GetNumComponents())
3449 FCThrowVoid(kIndexOutOfRangeException);
3454 MethodTable *valMT = val->GetMethodTable();
3455 TypeHandle arrayElemTH = array->GetArrayElementTypeHandle();
3457 if (arrayElemTH != TypeHandle(valMT) && arrayElemTH != TypeHandle(g_pObjectClass))
3459 TypeHandle::CastResult result = ObjIsInstanceOfNoGC(val, arrayElemTH);
3460 if (result != TypeHandle::CanCast)
3462 // FCALL_CONTRACT increase ForbidGC count. Normally, HELPER_METHOD_FRAME macros decrease the count.
3463 // But to avoid perf hit, we manually decrease the count here before calling another HCCALL.
3466 if (HCCALL2(ArrayStoreCheck,(Object**)&val, (PtrArray**)&array) != NULL)
3468 // This return is never executed. It helps epilog walker to find its way out.
3474 #ifdef _TARGET_ARM64_
3475 SetObjectReference((OBJECTREF*)&array->m_Array[idx], ObjectToOBJECTREF(val));
3477 // The performance gain of the optimized JIT_Stelem_Ref in
3478 // jitinterfacex86.cpp is mainly due to calling JIT_WriteBarrier
3479 // By calling write barrier directly here,
3480 // we can avoid translating in-line assembly from MSVC to gcc
3481 // while keeping most of the performance gain.
3482 HCCALL2(JIT_WriteBarrier, (Object **)&array->m_Array[idx], val);
3488 // no need to go through write-barrier for NULL
3489 ClearObjectReference(&array->m_Array[idx]);
3496 //========================================================================
3498 // VALUETYPE/BYREF HELPERS
3500 //========================================================================
3502 /*************************************************************/
3503 HCIMPL2(Object*, JIT_Box, CORINFO_CLASS_HANDLE type, void* unboxedData)
3507 // <TODO>TODO: if we care, we could do a fast trial allocation
3508 // and avoid the building the frame most times</TODO>
3509 OBJECTREF newobj = NULL;
3510 HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL(); // Set up a frame
3511 GCPROTECT_BEGININTERIOR(unboxedData);
3512 HELPER_METHOD_POLL();
3514 TypeHandle clsHnd(type);
3516 _ASSERTE(!clsHnd.IsTypeDesc()); // we never use this helper for arrays
3518 MethodTable *pMT = clsHnd.AsMethodTable();
3520 pMT->CheckRestore();
3522 // You can only box valuetypes
3523 if (!pMT->IsValueType())
3524 COMPlusThrow(kInvalidCastException, W("Arg_ObjObj"));
3527 if (g_pConfig->FastGCStressLevel()) {
3528 GetThread()->DisableStressHeap();
3532 newobj = pMT->FastBox(&unboxedData);
3535 HELPER_METHOD_FRAME_END();
3536 return(OBJECTREFToObject(newobj));
3540 /*************************************************************/
3541 NOINLINE HCIMPL3(VOID, JIT_Unbox_Nullable_Framed, void * destPtr, MethodTable* typeMT, OBJECTREF objRef)
3545 HELPER_METHOD_FRAME_BEGIN_1(objRef);
3546 if (!Nullable::UnBox(destPtr, objRef, typeMT))
3548 COMPlusThrowInvalidCastException(&objRef, TypeHandle(typeMT));
3550 HELPER_METHOD_FRAME_END();
3554 /*************************************************************/
3555 HCIMPL3(VOID, JIT_Unbox_Nullable, void * destPtr, CORINFO_CLASS_HANDLE type, Object* obj)
3559 TypeHandle typeHnd(type);
3560 _ASSERTE(Nullable::IsNullableType(typeHnd));
3562 MethodTable* typeMT = typeHnd.AsMethodTable();
3564 OBJECTREF objRef = ObjectToOBJECTREF(obj);
3566 if (Nullable::UnBoxNoGC(destPtr, objRef, typeMT))
3568 // exact match (type equivalence not needed)
3572 // Fall back to a framed helper that handles type equivalence.
3574 HCCALL3(JIT_Unbox_Nullable_Framed, destPtr, typeMT, objRef);
3578 /*************************************************************/
3579 /* framed helper that handles full-blown type equivalence */
3580 NOINLINE HCIMPL2(LPVOID, JIT_Unbox_Helper_Framed, CORINFO_CLASS_HANDLE type, Object* obj)
3584 LPVOID result = NULL;
3586 OBJECTREF objRef = ObjectToOBJECTREF(obj);
3587 HELPER_METHOD_FRAME_BEGIN_RET_1(objRef);
3588 if (TypeHandle(type).IsEquivalentTo(objRef->GetTypeHandle()))
3590 // the structures are equivalent
3591 result = objRef->GetData();
3595 COMPlusThrowInvalidCastException(&objRef, TypeHandle(type));
3597 HELPER_METHOD_FRAME_END();
3603 /*************************************************************/
3604 /* the uncommon case for the helper below (allowing enums to be unboxed
3605 as their underlying type */
3606 LPVOID __fastcall JIT_Unbox_Helper(CORINFO_CLASS_HANDLE type, Object* obj)
3610 TypeHandle typeHnd(type);
3612 CorElementType type1 = typeHnd.GetInternalCorElementType();
3614 // we allow enums and their primtive type to be interchangable
3616 MethodTable* pMT2 = obj->GetMethodTable();
3617 CorElementType type2 = pMT2->GetInternalCorElementType();
3620 MethodTable* pMT1 = typeHnd.GetMethodTable();
3621 if (pMT1 && (pMT1->IsEnum() || pMT1->IsTruePrimitive()) &&
3622 (pMT2->IsEnum() || pMT2->IsTruePrimitive()))
3624 _ASSERTE(CorTypeInfo::IsPrimitiveType_NoThrow(type1));
3625 return(obj->GetData());
3629 // Even less common cases (type equivalence) go to a framed helper.
3631 return HCCALL2(JIT_Unbox_Helper_Framed, type, obj);
3634 /*************************************************************/
3635 HCIMPL2(LPVOID, JIT_Unbox, CORINFO_CLASS_HANDLE type, Object* obj)
3639 TypeHandle typeHnd(type);
3640 VALIDATEOBJECT(obj);
3641 _ASSERTE(!typeHnd.IsTypeDesc()); // value classes are always unshared
3643 // This has been tuned so that branch predictions are good
3644 // (fall through for forward branches) for the common case
3646 if (obj->GetMethodTable() == typeHnd.AsMethodTable())
3647 return(obj->GetData());
3649 // Stuff the uncommon case into a helper so that
3650 // its register needs don't cause spills that effect
3651 // the common case above.
3652 return JIT_Unbox_Helper(type, obj);
3656 FCThrow(kNullReferenceException);
3660 /*************************************************************/
3661 HCIMPL2_IV(LPVOID, JIT_GetRefAny, CORINFO_CLASS_HANDLE type, TypedByRef typedByRef)
3665 TypeHandle clsHnd(type);
3667 // <TODO>@TODO right now we check for precisely the correct type.
3668 // do we want to allow inheritance? (watch out since value
3669 // classes inherit from object but do not normal object layout).</TODO>
3670 if (clsHnd != typedByRef.type) {
3671 FCThrow(kInvalidCastException);
3674 return(typedByRef.data);
3679 //========================================================================
3683 //========================================================================
3685 /***********************************************************************/
3686 // JIT_GenericHandle and its cache
3688 // Perform a "polytypic" operation related to shared generic code at runtime, possibly filling in an entry in
3689 // either a generic dictionary cache assocaited with a descriptor or placing an entry in the global
3690 // JitGenericHandle cache.
3692 // A polytypic operation is one such as
3694 // * castclass List<T>
3695 // where the code being executed is shared generic code. In these cases the outcome of the operation depends
3696 // on the exact value for T, which is acquired from a dynamic parameter.
3698 // The actual operation always boils down to finding a "handle" (TypeHandle, MethodDesc, call address,
3699 // dispatch stub address etc.) based on some static information (passed as tokens) and on the exact runtime
3700 // type context (passed as one or two parameters classHnd and methodHnd).
3702 // The static information specifies which polytypic operation (and thus which kind of handle) we're
3705 // The dynamic information (the type context, i.e. the exact instantiation of class and method type
3706 // parameters is specified in one of two ways:
3707 // * If classHnd is null then the methodHnd should be an exact method descriptor wrapping shared code that
3708 // satisfies SharedByGenericMethodInstantiations().
3711 // * We may be running the shared code for a generic method instantiation C::m<object>. The methodHnd
3712 // will carry the exact instantiation, e.g. C::m<string>
3714 // * If classHnd is non-null (e.g. a type D<exact>) then:
3715 // * methodHnd will indicate the representative code being run (which will be
3716 // !SharedByGenericMethodInstantiations but will be SharedByGenericClassInstantiations). Let's say
3717 // this code is C<repr>::m().
3718 // * the type D will be a descendent of type C. In particular D<exact> will relate to some type C<exact'>
3719 // where C<repr> is the represntative instantiation of C<exact>'
3720 // * the relevant dictionary will be the one attached to C<exact'>.
3722 // The JitGenericHandleCache is a global data structure shared across all application domains. It is only
3723 // used if generic dictionaries have overflowed. It is flushed each time an application domain is unloaded.
3725 struct JitGenericHandleCacheKey
3727 JitGenericHandleCacheKey(CORINFO_CLASS_HANDLE classHnd, CORINFO_METHOD_HANDLE methodHnd, void *signature, BaseDomain* pDomain=NULL)
3729 LIMITED_METHOD_CONTRACT;
3730 m_Data1 = (size_t)classHnd;
3731 m_Data2 = (size_t)methodHnd;
3732 m_Data3 = (size_t)signature;
3733 m_pDomainAndType = 0 | (size_t)pDomain;
3736 JitGenericHandleCacheKey(MethodTable* pMT, CORINFO_CLASS_HANDLE classHnd, CORINFO_METHOD_HANDLE methodHnd, BaseDomain* pDomain=NULL)
3738 LIMITED_METHOD_CONTRACT;
3739 m_Data1 = (size_t)pMT;
3740 m_Data2 = (size_t)classHnd;
3741 m_Data3 = (size_t)methodHnd;
3742 m_pDomainAndType = 1 | (size_t)pDomain;
3745 size_t GetType() const
3747 LIMITED_METHOD_CONTRACT;
3748 return (m_pDomainAndType & 1);
3751 BaseDomain* GetDomain() const
3753 LIMITED_METHOD_CONTRACT;
3754 return (BaseDomain*)(m_pDomainAndType & ~1);
3761 size_t m_pDomainAndType; // Which domain the entry belongs to. Not actually part of the key.
3762 // Used only so we can scrape the table on AppDomain termination.
3763 // NULL appdomain means that the entry should be scratched
3764 // on any appdomain unload.
3766 // The lowest bit is used to indicate the type of the entry:
3767 // 0 - JIT_GenericHandle entry
3768 // 1 - JIT_VirtualFunctionPointer entry
3771 class JitGenericHandleCacheTraits
3774 static EEHashEntry_t *AllocateEntry(const JitGenericHandleCacheKey *pKey, BOOL bDeepCopy, AllocationHeap pHeap = 0)
3776 LIMITED_METHOD_CONTRACT;
3777 EEHashEntry_t *pEntry = (EEHashEntry_t *) new (nothrow) BYTE[SIZEOF_EEHASH_ENTRY + sizeof(JitGenericHandleCacheKey)];
3780 *((JitGenericHandleCacheKey*)pEntry->Key) = *pKey;
3784 static void DeleteEntry(EEHashEntry_t *pEntry, AllocationHeap pHeap = 0)
3786 LIMITED_METHOD_CONTRACT;
3787 delete [] (BYTE*)pEntry;
3790 static BOOL CompareKeys(EEHashEntry_t *pEntry, const JitGenericHandleCacheKey *e2)
3792 LIMITED_METHOD_CONTRACT;
3793 const JitGenericHandleCacheKey *e1 = (const JitGenericHandleCacheKey*)&pEntry->Key;
3794 return (e1->m_Data1 == e2->m_Data1) && (e1->m_Data2 == e2->m_Data2) && (e1->m_Data3 == e2->m_Data3) &&
3795 (e1->GetType() == e2->GetType()) &&
3796 // Any domain will work if the lookup key does not specify it
3797 ((e2->GetDomain() == NULL) || (e1->GetDomain() == e2->GetDomain()));
3800 static DWORD Hash(const JitGenericHandleCacheKey *k)
3802 LIMITED_METHOD_CONTRACT;
3803 return (DWORD)k->m_Data1 + _rotl((DWORD)k->m_Data2,5) + _rotr((DWORD)k->m_Data3,5);
3806 static const JitGenericHandleCacheKey *GetKey(EEHashEntry_t *pEntry)
3808 LIMITED_METHOD_CONTRACT;
3809 return (const JitGenericHandleCacheKey*)&pEntry->Key;
3813 typedef EEHashTable<const JitGenericHandleCacheKey *, JitGenericHandleCacheTraits, FALSE> JitGenericHandleCache;
3815 JitGenericHandleCache *g_pJitGenericHandleCache = NULL; //cache of calls to JIT_GenericHandle
3816 CrstStatic g_pJitGenericHandleCacheCrst;
3818 void AddToGenericHandleCache(JitGenericHandleCacheKey* pKey, HashDatum datum)
3824 PRECONDITION(CheckPointer(pKey));
3825 PRECONDITION(CheckPointer(datum));
3832 CrstHolder lock(&g_pJitGenericHandleCacheCrst);
3835 if (!g_pJitGenericHandleCache->GetValue(pKey,&entry))
3836 g_pJitGenericHandleCache->InsertValue(pKey,datum);
3841 EX_END_CATCH(SwallowAllExceptions) // Swallow OOM
3845 void ClearJitGenericHandleCache(AppDomain *pDomain)
3853 // We call this on every AppDomain unload, because entries in the cache might include
3854 // pointers into the AppDomain being unloaded. We would prefer to
3855 // only flush entries that have that are no longer valid, but the entries don't yet contain
3856 // enough information to do that. However everything in the cache can be found again by calling
3857 // loader functions, and the total number of entries in the cache is typically very small (indeed
3858 // normally the cache is not used at all - it is only used when the generic dictionaries overflow).
3859 if (g_pJitGenericHandleCache)
3861 // It's not necessary to take the lock here because this function should only be called when EE is suspended,
3862 // the lock is only taken to fullfill the threadsafety check and to be consistent. If the lock becomes a problem, we
3863 // could put it in a "ifdef _DEBUG" block
3864 CrstHolder lock(&g_pJitGenericHandleCacheCrst);
3865 EEHashTableIteration iter;
3866 g_pJitGenericHandleCache->IterateStart(&iter);
3867 BOOL keepGoing = g_pJitGenericHandleCache->IterateNext(&iter);
3870 const JitGenericHandleCacheKey *key = g_pJitGenericHandleCache->IterateGetKey(&iter);
3871 BaseDomain* pKeyDomain = key->GetDomain();
3872 if (pKeyDomain == pDomain || pKeyDomain == NULL
3873 // We compute fake domain for types during NGen (see code:ClassLoader::ComputeLoaderModule).
3874 // To avoid stale handles, we need to clear the cache unconditionally during NGen.
3875 || IsCompilationProcess())
3877 // Advance the iterator before we delete!! See notes in EEHash.h
3878 keepGoing = g_pJitGenericHandleCache->IterateNext(&iter);
3879 g_pJitGenericHandleCache->DeleteValue(key);
3883 keepGoing = g_pJitGenericHandleCache->IterateNext(&iter);
3889 // Factored out most of the body of JIT_GenericHandle so it could be called easily from the CER reliability code to pre-populate the
3891 CORINFO_GENERIC_HANDLE JIT_GenericHandleWorker(MethodDesc * pMD, MethodTable * pMT, LPVOID signature, DWORD dictionaryIndexAndSlot, Module* pModule)
3898 MethodTable * pDeclaringMT = NULL;
3902 ULONG dictionaryIndex = 0;
3904 if (pModule != NULL)
3907 // Only in R2R mode are the module, dictionary index and dictionary slot provided as an input
3908 _ASSERTE(dictionaryIndexAndSlot != (DWORD)-1);
3909 _ASSERT(ExecutionManager::FindReadyToRunModule(dac_cast<TADDR>(signature)) == pModule);
3911 dictionaryIndex = (dictionaryIndexAndSlot >> 16);
3915 SigPointer ptr((PCCOR_SIGNATURE)signature);
3917 ULONG kind; // DictionaryEntryKind
3918 IfFailThrow(ptr.GetData(&kind));
3920 // We need to normalize the class passed in (if any) for reliability purposes. That's because preparation of a code region that
3921 // contains these handle lookups depends on being able to predict exactly which lookups are required (so we can pre-cache the
3922 // answers and remove any possibility of failure at runtime). This is hard to do if the lookup (in this case the lookup of the
3923 // dictionary overflow cache) is keyed off the somewhat arbitrary type of the instance on which the call is made (we'd need to
3924 // prepare for every possible derived type of the type containing the method). So instead we have to locate the exactly
3925 // instantiated (non-shared) super-type of the class passed in.
3927 _ASSERTE(dictionaryIndexAndSlot == (DWORD)-1);
3928 IfFailThrow(ptr.GetData(&dictionaryIndex));
3934 MethodTable * pParentMT = pDeclaringMT->GetParentMethodTable();
3935 if (pParentMT->GetNumDicts() <= dictionaryIndex)
3937 pDeclaringMT = pParentMT;
3940 if (pDeclaringMT != pMT)
3942 JitGenericHandleCacheKey key((CORINFO_CLASS_HANDLE)pDeclaringMT, NULL, signature);
3944 if (g_pJitGenericHandleCache->GetValue(&key,&res))
3946 // Add the denormalized key for faster lookup next time. This is not a critical entry - no need
3947 // to specify appdomain affinity.
3948 JitGenericHandleCacheKey denormKey((CORINFO_CLASS_HANDLE)pMT, NULL, signature);
3949 AddToGenericHandleCache(&denormKey, res);
3950 return (CORINFO_GENERIC_HANDLE) (DictionaryEntry) res;
3955 DictionaryEntry * pSlot;
3956 CORINFO_GENERIC_HANDLE result = (CORINFO_GENERIC_HANDLE)Dictionary::PopulateEntry(pMD, pDeclaringMT, signature, FALSE, &pSlot, dictionaryIndexAndSlot, pModule);
3960 // If we've overflowed the dictionary write the result to the cache.
3961 BaseDomain *pDictDomain = NULL;
3965 pDictDomain = pDeclaringMT->GetDomain();
3969 pDictDomain = pMD->GetDomain();
3972 // Add the normalized key (pDeclaringMT) here so that future lookups of any
3973 // inherited types are faster next time rather than just just for this specific pMT.
3974 JitGenericHandleCacheKey key((CORINFO_CLASS_HANDLE)pDeclaringMT, (CORINFO_METHOD_HANDLE)pMD, signature, pDictDomain);
3975 AddToGenericHandleCache(&key, (HashDatum)result);
3979 } // JIT_GenericHandleWorker
3981 /*********************************************************************/
3982 // slow helper to tail call from the fast one
3983 NOINLINE HCIMPL5(CORINFO_GENERIC_HANDLE, JIT_GenericHandle_Framed,
3984 CORINFO_CLASS_HANDLE classHnd,
3985 CORINFO_METHOD_HANDLE methodHnd,
3987 DWORD dictionaryIndexAndSlot,
3988 CORINFO_MODULE_HANDLE moduleHnd)
3992 PRECONDITION(classHnd != NULL || methodHnd != NULL);
3993 PRECONDITION(classHnd == NULL || methodHnd == NULL);
3996 // Result is a generic handle (in fact, a CORINFO_CLASS_HANDLE, CORINFO_METHOD_HANDLE, or a code pointer)
3997 CORINFO_GENERIC_HANDLE result = NULL;
3999 MethodDesc * pMD = GetMethod(methodHnd);
4000 MethodTable * pMT = TypeHandle(classHnd).AsMethodTable();
4001 Module * pModule = GetModule(moduleHnd);
4004 HELPER_METHOD_FRAME_BEGIN_RET_0();
4006 result = JIT_GenericHandleWorker(pMD, pMT, signature, dictionaryIndexAndSlot, pModule);
4008 HELPER_METHOD_FRAME_END();
4010 _ASSERTE(result != NULL);
4012 // Return the handle
4017 /*********************************************************************/
4018 #include <optsmallperfcritical.h>
4019 HCIMPL2(CORINFO_GENERIC_HANDLE, JIT_GenericHandleMethod, CORINFO_METHOD_HANDLE methodHnd, LPVOID signature)
4023 PRECONDITION(CheckPointer(methodHnd));
4024 PRECONDITION(GetMethod(methodHnd)->IsRestored());
4025 PRECONDITION(CheckPointer(signature));
4028 JitGenericHandleCacheKey key(NULL, methodHnd, signature);
4030 if (g_pJitGenericHandleCache->GetValueSpeculative(&key,&res))
4031 return (CORINFO_GENERIC_HANDLE) (DictionaryEntry) res;
4033 // Tailcall to the slow helper
4035 return HCCALL5(JIT_GenericHandle_Framed, NULL, methodHnd, signature, -1, NULL);
4039 HCIMPL2(CORINFO_GENERIC_HANDLE, JIT_GenericHandleMethodWithSlotAndModule, CORINFO_METHOD_HANDLE methodHnd, GenericHandleArgs * pArgs)
4043 PRECONDITION(CheckPointer(methodHnd));
4044 PRECONDITION(GetMethod(methodHnd)->IsRestored());
4045 PRECONDITION(CheckPointer(pArgs));
4048 JitGenericHandleCacheKey key(NULL, methodHnd, pArgs->signature);
4050 if (g_pJitGenericHandleCache->GetValueSpeculative(&key, &res))
4051 return (CORINFO_GENERIC_HANDLE)(DictionaryEntry)res;
4053 // Tailcall to the slow helper
4055 return HCCALL5(JIT_GenericHandle_Framed, NULL, methodHnd, pArgs->signature, pArgs->dictionaryIndexAndSlot, pArgs->module);
4058 #include <optdefault.h>
4060 /*********************************************************************/
4061 HCIMPL2(CORINFO_GENERIC_HANDLE, JIT_GenericHandleMethodLogging, CORINFO_METHOD_HANDLE methodHnd, LPVOID signature)
4065 PRECONDITION(CheckPointer(methodHnd));
4066 PRECONDITION(GetMethod(methodHnd)->IsRestored());
4067 PRECONDITION(CheckPointer(signature));
4070 g_IBCLogger.LogMethodDescAccess(GetMethod(methodHnd));
4072 JitGenericHandleCacheKey key(NULL, methodHnd, signature);
4074 if (g_pJitGenericHandleCache->GetValueSpeculative(&key,&res))
4075 return (CORINFO_GENERIC_HANDLE) (DictionaryEntry) res;
4077 // Tailcall to the slow helper
4079 return HCCALL5(JIT_GenericHandle_Framed, NULL, methodHnd, signature, -1, NULL);
4083 /*********************************************************************/
4084 #include <optsmallperfcritical.h>
4085 HCIMPL2(CORINFO_GENERIC_HANDLE, JIT_GenericHandleClass, CORINFO_CLASS_HANDLE classHnd, LPVOID signature)
4089 PRECONDITION(CheckPointer(classHnd));
4090 PRECONDITION(TypeHandle(classHnd).IsRestored());
4091 PRECONDITION(CheckPointer(signature));
4094 JitGenericHandleCacheKey key(classHnd, NULL, signature);
4096 if (g_pJitGenericHandleCache->GetValueSpeculative(&key,&res))
4097 return (CORINFO_GENERIC_HANDLE) (DictionaryEntry) res;
4099 // Tailcall to the slow helper
4101 return HCCALL5(JIT_GenericHandle_Framed, classHnd, NULL, signature, -1, NULL);
4105 HCIMPL2(CORINFO_GENERIC_HANDLE, JIT_GenericHandleClassWithSlotAndModule, CORINFO_CLASS_HANDLE classHnd, GenericHandleArgs * pArgs)
4109 PRECONDITION(CheckPointer(classHnd));
4110 PRECONDITION(TypeHandle(classHnd).IsRestored());
4111 PRECONDITION(CheckPointer(pArgs));
4114 JitGenericHandleCacheKey key(classHnd, NULL, pArgs->signature);
4116 if (g_pJitGenericHandleCache->GetValueSpeculative(&key, &res))
4117 return (CORINFO_GENERIC_HANDLE)(DictionaryEntry)res;
4119 // Tailcall to the slow helper
4121 return HCCALL5(JIT_GenericHandle_Framed, classHnd, NULL, pArgs->signature, pArgs->dictionaryIndexAndSlot, pArgs->module);
4124 #include <optdefault.h>
4126 /*********************************************************************/
4127 HCIMPL2(CORINFO_GENERIC_HANDLE, JIT_GenericHandleClassLogging, CORINFO_CLASS_HANDLE classHnd, LPVOID signature)
4131 PRECONDITION(CheckPointer(classHnd));
4132 PRECONDITION(TypeHandle(classHnd).IsRestored());
4133 PRECONDITION(CheckPointer(signature));
4136 g_IBCLogger.LogMethodTableAccess((MethodTable *)classHnd);
4138 JitGenericHandleCacheKey key(classHnd, NULL, signature);
4140 if (g_pJitGenericHandleCache->GetValueSpeculative(&key,&res))
4141 return (CORINFO_GENERIC_HANDLE) (DictionaryEntry) res;
4143 // Tailcall to the slow helper
4145 return HCCALL5(JIT_GenericHandle_Framed, classHnd, NULL, signature, -1, NULL);
4149 /*********************************************************************/
4150 // Resolve a virtual method at run-time, either because of
4151 // aggressive backpatching or because the call is to a generic
4152 // method which is itself virtual.
4154 // classHnd is the actual run-time type for the call is made.
4155 // methodHnd is the exact (instantiated) method descriptor corresponding to the
4156 // static method signature (i.e. might be for a superclass of classHnd)
4158 // slow helper to tail call from the fast one
4159 NOINLINE HCIMPL3(CORINFO_MethodPtr, JIT_VirtualFunctionPointer_Framed, Object * objectUNSAFE,
4160 CORINFO_CLASS_HANDLE classHnd,
4161 CORINFO_METHOD_HANDLE methodHnd)
4165 // The address of the method that's returned.
4166 CORINFO_MethodPtr addr = NULL;
4168 OBJECTREF objRef = ObjectToOBJECTREF(objectUNSAFE);
4170 HELPER_METHOD_FRAME_BEGIN_RET_1(objRef); // Set up a frame
4173 COMPlusThrow(kNullReferenceException);
4175 // This is the static method descriptor describing the call.
4176 // It is not the destination of the call, which we must compute.
4177 MethodDesc* pStaticMD = (MethodDesc*) methodHnd;
4178 TypeHandle staticTH(classHnd);
4180 pStaticMD->CheckRestore();
4182 // MDIL: If IL specifies callvirt/ldvirtftn it remains a "virtual" instruction
4183 // even if the target is an instance method at MDIL generation time because
4184 // we want to keep MDIL as resilient as IL. Right now we can end up here with
4185 // non-virtual generic methods called from a "shared generic code".
4186 // As soon as this deficiency is fixed in the binder we can get rid of this test.
4187 if (!pStaticMD->IsVtableMethod())
4189 addr = (CORINFO_MethodPtr) pStaticMD->GetMultiCallableAddrOfCode();
4194 // This is the new way of resolving a virtual call, including generic virtual methods.
4195 // The code is now also used by reflection, remoting etc.
4196 addr = (CORINFO_MethodPtr) pStaticMD->GetMultiCallableAddrOfVirtualizedCode(&objRef, staticTH);
4199 // This is not a critical entry - no need to specify appdomain affinity
4200 JitGenericHandleCacheKey key(objRef->GetMethodTable(), classHnd, methodHnd);
4201 AddToGenericHandleCache(&key, (HashDatum)addr);
4204 HELPER_METHOD_FRAME_END();
4210 HCIMPL2(VOID, JIT_GetRuntimeFieldHandle, Object ** destPtr, CORINFO_FIELD_HANDLE field)
4214 HELPER_METHOD_FRAME_BEGIN_0();
4216 FieldDesc *pField = (FieldDesc *)field;
4217 SetObjectReference((OBJECTREF*) destPtr,
4218 pField->GetStubFieldInfo());
4220 HELPER_METHOD_FRAME_END();
4224 HCIMPL1(Object*, JIT_GetRuntimeFieldStub, CORINFO_FIELD_HANDLE field)
4228 OBJECTREF stubRuntimeField = NULL;
4230 HELPER_METHOD_FRAME_BEGIN_RET_0(); // Set up a frame
4232 FieldDesc *pField = (FieldDesc *)field;
4233 stubRuntimeField = (OBJECTREF)pField->GetStubFieldInfo();
4235 HELPER_METHOD_FRAME_END();
4237 return (OBJECTREFToObject(stubRuntimeField));
4241 HCIMPL2(VOID, JIT_GetRuntimeMethodHandle, Object ** destPtr, CORINFO_METHOD_HANDLE method)
4245 HELPER_METHOD_FRAME_BEGIN_0();
4247 MethodDesc *pMethod = (MethodDesc *)method;
4248 SetObjectReference((OBJECTREF*) destPtr,
4249 pMethod->GetStubMethodInfo());
4251 HELPER_METHOD_FRAME_END();
4255 HCIMPL1(Object*, JIT_GetRuntimeMethodStub, CORINFO_METHOD_HANDLE method)
4259 OBJECTREF stubRuntimeMethod = NULL;
4261 HELPER_METHOD_FRAME_BEGIN_RET_0(); // Set up a frame
4263 MethodDesc *pMethod = (MethodDesc *)method;
4264 stubRuntimeMethod = (OBJECTREF)pMethod->GetStubMethodInfo();
4266 HELPER_METHOD_FRAME_END();
4268 return (OBJECTREFToObject(stubRuntimeMethod));
4272 HCIMPL2(VOID, JIT_GetRuntimeTypeHandle, Object ** destPtr, CORINFO_CLASS_HANDLE type)
4276 TypeHandle typeHnd(type);
4278 if (!typeHnd.IsTypeDesc())
4280 // Most common... and fastest case
4281 OBJECTREF typePtr = typeHnd.AsMethodTable()->GetManagedClassObjectIfExists();
4282 if (typePtr != NULL)
4284 SetObjectReference((OBJECTREF*) destPtr,
4290 HELPER_METHOD_FRAME_BEGIN_0();
4292 SetObjectReference((OBJECTREF*) destPtr,
4293 typeHnd.GetManagedClassObject());
4295 HELPER_METHOD_FRAME_END();
4300 NOINLINE HCIMPL1(Object*, JIT_GetRuntimeType_Framed, CORINFO_CLASS_HANDLE type)
4304 TypeHandle typeHandle(type);
4306 // Array/other type handle case.
4307 OBJECTREF refType = typeHandle.GetManagedClassObjectFast();
4308 if (refType == NULL)
4310 HELPER_METHOD_FRAME_BEGIN_RET_1(refType);
4311 refType = typeHandle.GetManagedClassObject();
4312 HELPER_METHOD_FRAME_END();
4315 return OBJECTREFToObject(refType);
4319 #include <optsmallperfcritical.h>
4320 HCIMPL1(Object*, JIT_GetRuntimeType, CORINFO_CLASS_HANDLE type)
4324 TypeHandle typeHnd(type);
4326 if (!typeHnd.IsTypeDesc())
4328 // Most common... and fastest case
4329 OBJECTREF typePtr = typeHnd.AsMethodTable()->GetManagedClassObjectIfExists();
4330 if (typePtr != NULL)
4332 return OBJECTREFToObject(typePtr);
4337 return HCCALL1(JIT_GetRuntimeType_Framed, type);
4341 HCIMPL1(Object*, JIT_GetRuntimeType_MaybeNull, CORINFO_CLASS_HANDLE type)
4349 return HCCALL1(JIT_GetRuntimeType, type);
4352 #include <optdefault.h>
4354 /*********************************************************************/
4355 #include <optsmallperfcritical.h>
4356 HCIMPL3(CORINFO_MethodPtr, JIT_VirtualFunctionPointer, Object * objectUNSAFE,
4357 CORINFO_CLASS_HANDLE classHnd,
4358 CORINFO_METHOD_HANDLE methodHnd)
4362 OBJECTREF objRef = ObjectToOBJECTREF(objectUNSAFE);
4366 JitGenericHandleCacheKey key(objRef->GetMethodTable(), classHnd, methodHnd);
4368 if (g_pJitGenericHandleCache->GetValueSpeculative(&key,&res))
4369 return (CORINFO_GENERIC_HANDLE)res;
4372 // Tailcall to the slow helper
4374 return HCCALL3(JIT_VirtualFunctionPointer_Framed, OBJECTREFToObject(objRef), classHnd, methodHnd);
4378 HCIMPL2(CORINFO_MethodPtr, JIT_VirtualFunctionPointer_Dynamic, Object * objectUNSAFE, VirtualFunctionPointerArgs * pArgs)
4382 OBJECTREF objRef = ObjectToOBJECTREF(objectUNSAFE);
4386 JitGenericHandleCacheKey key(objRef->GetMethodTable(), pArgs->classHnd, pArgs->methodHnd);
4388 if (g_pJitGenericHandleCache->GetValueSpeculative(&key,&res))
4389 return (CORINFO_GENERIC_HANDLE)res;
4392 // Tailcall to the slow helper
4394 return HCCALL3(JIT_VirtualFunctionPointer_Framed, OBJECTREFToObject(objRef), pArgs->classHnd, pArgs->methodHnd);
4398 #include <optdefault.h>
4400 // Helper for synchronized static methods in shared generics code
4401 #include <optsmallperfcritical.h>
4402 HCIMPL1(CORINFO_CLASS_HANDLE, JIT_GetClassFromMethodParam, CORINFO_METHOD_HANDLE methHnd_)
4405 PRECONDITION(methHnd_ != NULL);
4408 MethodDesc * pMD = (MethodDesc*) methHnd_;
4410 MethodTable * pMT = pMD->GetMethodTable();
4411 _ASSERTE(!pMT->IsSharedByGenericInstantiations());
4413 return((CORINFO_CLASS_HANDLE)pMT);
4415 #include <optdefault.h>
4419 //========================================================================
4423 //========================================================================
4425 /*********************************************************************/
4426 NOINLINE static void JIT_MonEnter_Helper(Object* obj, BYTE* pbLockTaken, LPVOID __me)
4428 FC_INNER_PROLOG_NO_ME_SETUP();
4430 OBJECTREF objRef = ObjectToOBJECTREF(obj);
4432 // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth.
4433 HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, objRef);
4436 COMPlusThrow(kArgumentNullException);
4438 GCPROTECT_BEGININTERIOR(pbLockTaken);
4441 Thread *pThread = GetThread();
4442 DWORD lockCount = pThread->m_dwLockCount;
4444 if (GET_THREAD()->CatchAtSafePointOpportunistic())
4446 GET_THREAD()->PulseGCMode();
4448 objRef->EnterObjMonitor();
4449 _ASSERTE ((objRef->GetSyncBlock()->GetMonitor()->GetRecursionLevel() == 1 && pThread->m_dwLockCount == lockCount + 1) ||
4450 pThread->m_dwLockCount == lockCount);
4451 if (pbLockTaken != 0) *pbLockTaken = 1;
4454 HELPER_METHOD_FRAME_END();
4459 /*********************************************************************/
4460 #include <optsmallperfcritical.h>
4462 HCIMPL_MONHELPER(JIT_MonEnterWorker_Portable, Object* obj)
4466 if (obj != nullptr && obj->TryEnterObjMonitorSpinHelper())
4468 MONHELPER_STATE(*pbLockTaken = 1);
4472 FC_INNER_RETURN_VOID(JIT_MonEnter_Helper(obj, MONHELPER_ARG, GetEEFuncEntryPointMacro(JIT_MonEnter)));
4476 HCIMPL1(void, JIT_MonEnter_Portable, Object* obj)
4480 if (obj != nullptr && obj->TryEnterObjMonitorSpinHelper())
4485 FC_INNER_RETURN_VOID(JIT_MonEnter_Helper(obj, NULL, GetEEFuncEntryPointMacro(JIT_MonEnter)));
4489 HCIMPL2(void, JIT_MonReliableEnter_Portable, Object* obj, BYTE* pbLockTaken)
4493 if (obj != nullptr && obj->TryEnterObjMonitorSpinHelper())
4499 FC_INNER_RETURN_VOID(JIT_MonEnter_Helper(obj, pbLockTaken, GetEEFuncEntryPointMacro(JIT_MonReliableEnter)));
4503 #include <optdefault.h>
4506 /*********************************************************************/
4507 NOINLINE static void JIT_MonTryEnter_Helper(Object* obj, INT32 timeOut, BYTE* pbLockTaken)
4509 FC_INNER_PROLOG(JIT_MonTryEnter);
4511 OBJECTREF objRef = ObjectToOBJECTREF(obj);
4513 // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth.
4514 HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, objRef);
4517 COMPlusThrow(kArgumentNullException);
4520 COMPlusThrow(kArgumentOutOfRangeException);
4522 GCPROTECT_BEGININTERIOR(pbLockTaken);
4524 if (GET_THREAD()->CatchAtSafePointOpportunistic())
4526 GET_THREAD()->PulseGCMode();
4529 BOOL result = objRef->TryEnterObjMonitor(timeOut);
4530 *pbLockTaken = result != FALSE;
4533 HELPER_METHOD_FRAME_END();
4538 #include <optsmallperfcritical.h>
4539 HCIMPL3(void, JIT_MonTryEnter_Portable, Object* obj, INT32 timeOut, BYTE* pbLockTaken)
4543 AwareLock::EnterHelperResult result;
4544 Thread * pCurThread;
4548 goto FramedLockHelper;
4553 goto FramedLockHelper;
4556 pCurThread = GetThread();
4558 if (pCurThread->CatchAtSafePointOpportunistic())
4560 goto FramedLockHelper;
4563 result = obj->EnterObjMonitorHelper(pCurThread);
4564 if (result == AwareLock::EnterHelperResult_Entered)
4569 if (result == AwareLock::EnterHelperResult_Contention)
4576 result = obj->EnterObjMonitorHelperSpin(pCurThread);
4577 if (result == AwareLock::EnterHelperResult_Entered)
4585 FC_INNER_RETURN_VOID(JIT_MonTryEnter_Helper(obj, timeOut, pbLockTaken));
4588 #include <optdefault.h>
4590 /*********************************************************************/
4591 NOINLINE static void JIT_MonExit_Helper(Object* obj, BYTE* pbLockTaken)
4593 FC_INNER_PROLOG(JIT_MonExit);
4595 OBJECTREF objRef = ObjectToOBJECTREF(obj);
4597 // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth.
4598 HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_NO_THREAD_ABORT|Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, objRef);
4601 COMPlusThrow(kArgumentNullException);
4603 if (!objRef->LeaveObjMonitor())
4604 COMPlusThrow(kSynchronizationLockException);
4606 if (pbLockTaken != 0) *pbLockTaken = 0;
4608 TESTHOOKCALL(AppDomainCanBeUnloaded(DefaultADID,FALSE));
4610 if (GET_THREAD()->IsAbortRequested()) {
4611 GET_THREAD()->HandleThreadAbort();
4614 HELPER_METHOD_FRAME_END();
4619 NOINLINE static void JIT_MonExit_Signal(Object* obj)
4621 FC_INNER_PROLOG(JIT_MonExit);
4623 OBJECTREF objRef = ObjectToOBJECTREF(obj);
4625 // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth.
4626 HELPER_METHOD_FRAME_BEGIN_ATTRIB_1(Frame::FRAME_ATTR_NO_THREAD_ABORT|Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, objRef);
4629 SyncBlock *psb = objRef->PassiveGetSyncBlock();
4631 psb->QuickGetMonitor()->Signal();
4633 TESTHOOKCALL(AppDomainCanBeUnloaded(DefaultADID,FALSE));
4635 if (GET_THREAD()->IsAbortRequested()) {
4636 GET_THREAD()->HandleThreadAbort();
4639 HELPER_METHOD_FRAME_END();
4644 #include <optsmallperfcritical.h>
4645 FCIMPL1(void, JIT_MonExit_Portable, Object* obj)
4649 AwareLock::LeaveHelperAction action;
4653 goto FramedLockHelper;
4656 // Handle the simple case without erecting helper frame
4657 action = obj->LeaveObjMonitorHelper(GetThread());
4658 if (action == AwareLock::LeaveHelperAction_None)
4662 if (action == AwareLock::LeaveHelperAction_Signal)
4664 FC_INNER_RETURN_VOID(JIT_MonExit_Signal(obj));
4668 FC_INNER_RETURN_VOID(JIT_MonExit_Helper(obj, NULL));
4672 HCIMPL_MONHELPER(JIT_MonExitWorker_Portable, Object* obj)
4676 MONHELPER_STATE(_ASSERTE(pbLockTaken != NULL));
4677 MONHELPER_STATE(if (*pbLockTaken == 0) return;)
4679 AwareLock::LeaveHelperAction action;
4683 goto FramedLockHelper;
4686 // Handle the simple case without erecting helper frame
4687 action = obj->LeaveObjMonitorHelper(GetThread());
4688 if (action == AwareLock::LeaveHelperAction_None)
4690 MONHELPER_STATE(*pbLockTaken = 0;)
4693 if (action == AwareLock::LeaveHelperAction_Signal)
4695 MONHELPER_STATE(*pbLockTaken = 0;)
4696 FC_INNER_RETURN_VOID(JIT_MonExit_Signal(obj));
4700 FC_INNER_RETURN_VOID(JIT_MonExit_Helper(obj, MONHELPER_ARG));
4703 #include <optdefault.h>
4705 /*********************************************************************/
4706 NOINLINE static void JIT_MonEnterStatic_Helper(AwareLock *lock, BYTE* pbLockTaken)
4708 // The following makes sure that Monitor.Enter shows up on thread abort
4709 // stack walks (otherwise Monitor.Enter called within a CER can block a
4710 // thread abort indefinitely). Setting the __me internal variable (normally
4711 // only set for fcalls) will cause the helper frame below to be able to
4712 // backtranslate into the method desc for the Monitor.Enter fcall.
4713 FC_INNER_PROLOG(JIT_MonEnter);
4715 // Monitor helpers are used as both hcalls and fcalls, thus we need exact depth.
4716 HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
4718 MONHELPER_STATE(*pbLockTaken = 1;)
4719 HELPER_METHOD_FRAME_END_POLL();
4724 #include <optsmallperfcritical.h>
4725 HCIMPL_MONHELPER(JIT_MonEnterStatic_Portable, AwareLock *lock)
4731 MONHELPER_STATE(_ASSERTE(pbLockTaken != NULL && *pbLockTaken == 0));
4733 Thread *pCurThread = GetThread();
4735 if (pCurThread->CatchAtSafePointOpportunistic())
4737 goto FramedLockHelper;
4740 if (lock->TryEnterHelper(pCurThread))
4742 #if defined(_DEBUG) && defined(TRACK_SYNC)
4743 // The best place to grab this is from the ECall frame
4744 Frame * pFrame = pCurThread->GetFrame();
4745 int caller = (pFrame && pFrame != FRAME_TOP ? (int) pFrame->GetReturnAddress() : -1);
4746 pCurThread->m_pTrackSync->EnterSync(caller, lock);
4749 MONHELPER_STATE(*pbLockTaken = 1;)
4754 FC_INNER_RETURN_VOID(JIT_MonEnterStatic_Helper(lock, MONHELPER_ARG));
4757 #include <optdefault.h>
4759 /*********************************************************************/
4760 NOINLINE static void JIT_MonExitStatic_Helper(AwareLock *lock, BYTE* pbLockTaken)
4762 FC_INNER_PROLOG(JIT_MonExit);
4764 HELPER_METHOD_FRAME_BEGIN_ATTRIB(Frame::FRAME_ATTR_NO_THREAD_ABORT|Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
4766 // Error, yield or contention
4768 COMPlusThrow(kSynchronizationLockException);
4769 MONHELPER_STATE(*pbLockTaken = 0;)
4771 TESTHOOKCALL(AppDomainCanBeUnloaded(DefaultADID,FALSE));
4772 if (GET_THREAD()->IsAbortRequested()) {
4773 GET_THREAD()->HandleThreadAbort();
4776 HELPER_METHOD_FRAME_END();
4781 NOINLINE static void JIT_MonExitStatic_Signal(AwareLock *lock)
4783 FC_INNER_PROLOG(JIT_MonExit);
4785 HELPER_METHOD_FRAME_BEGIN_ATTRIB(Frame::FRAME_ATTR_NO_THREAD_ABORT|Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
4789 TESTHOOKCALL(AppDomainCanBeUnloaded(DefaultADID,FALSE));
4790 if (GET_THREAD()->IsAbortRequested()) {
4791 GET_THREAD()->HandleThreadAbort();
4794 HELPER_METHOD_FRAME_END();
4799 #include <optsmallperfcritical.h>
4800 HCIMPL_MONHELPER(JIT_MonExitStatic_Portable, AwareLock *lock)
4806 MONHELPER_STATE(_ASSERTE(pbLockTaken != NULL));
4807 MONHELPER_STATE(if (*pbLockTaken == 0) return;)
4809 // Handle the simple case without erecting helper frame
4810 AwareLock::LeaveHelperAction action = lock->LeaveHelper(GetThread());
4811 if (action == AwareLock::LeaveHelperAction_None)
4813 MONHELPER_STATE(*pbLockTaken = 0;)
4817 if (action == AwareLock::LeaveHelperAction_Signal)
4819 MONHELPER_STATE(*pbLockTaken = 0;)
4820 FC_INNER_RETURN_VOID(JIT_MonExitStatic_Signal(lock));
4823 FC_INNER_RETURN_VOID(JIT_MonExitStatic_Helper(lock, MONHELPER_ARG));
4826 #include <optdefault.h>
4828 HCIMPL1(void *, JIT_GetSyncFromClassHandle, CORINFO_CLASS_HANDLE typeHnd_)
4831 PRECONDITION(typeHnd_ != NULL);
4834 void * result = NULL;
4836 HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL(); // Set up a frame
4838 TypeHandle typeHnd(typeHnd_);
4839 MethodTable *pMT = typeHnd.AsMethodTable();
4841 OBJECTREF ref = pMT->GetManagedClassObject();
4844 result = (void*)ref->GetSyncBlock()->GetMonitor();
4846 HELPER_METHOD_FRAME_END();
4853 //========================================================================
4855 // EXCEPTION HELPERS
4857 //========================================================================
4859 // In general, we want to use COMPlusThrow to throw exceptions. However,
4860 // the IL_Throw helper is a special case. Here, we're called from
4861 // managed code. We have a guarantee that the first FS:0 handler
4862 // is our COMPlusFrameHandler. We could call COMPlusThrow(), which pushes
4863 // another handler, but there is a significant (10% on JGFExceptionBench)
4864 // performance gain if we avoid this by calling RaiseTheException()
4868 /*************************************************************/
4870 HCIMPL1(void, IL_Throw, Object* obj)
4874 /* Make no assumptions about the current machine state */
4875 ResetCurrentContext();
4877 FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
4879 HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
4881 OBJECTREF oref = ObjectToOBJECTREF(obj);
4883 #if defined(_DEBUG) && defined(_TARGET_X86_)
4884 __helperframe.InsureInit(false, NULL);
4885 g_ExceptionEIP = (LPVOID)__helperframe.GetReturnAddress();
4886 #endif // defined(_DEBUG) && defined(_TARGET_X86_)
4890 COMPlusThrow(kNullReferenceException);
4892 if (!IsException(oref->GetMethodTable()))
4894 GCPROTECT_BEGIN(oref);
4896 WrapNonCompliantException(&oref);
4901 { // We know that the object derives from System.Exception
4903 // If the flag indicating ForeignExceptionRaise has been set,
4904 // then do not clear the "_stackTrace" field of the exception object.
4905 if (GetThread()->GetExceptionState()->IsRaisingForeignException())
4907 ((EXCEPTIONREF)oref)->SetStackTraceString(NULL);
4911 ((EXCEPTIONREF)oref)->ClearStackTracePreservingRemoteStackTrace();
4915 #ifdef FEATURE_CORRUPTING_EXCEPTIONS
4916 if (!g_pConfig->LegacyCorruptedStateExceptionsPolicy())
4918 // Within the VM, we could have thrown and caught a managed exception. This is done by
4919 // RaiseTheException that will flag that exception's corruption severity to be used
4920 // incase it leaks out to managed code.
4922 // If it does not leak out, but ends up calling into managed code that throws,
4923 // we will come here. In such a case, simply reset the corruption-severity
4924 // since we want the exception being thrown to have its correct severity set
4925 // when CLR's managed code exception handler sets it.
4927 ThreadExceptionState *pExState = GetThread()->GetExceptionState();
4928 pExState->SetLastActiveExceptionCorruptionSeverity(NotSet);
4930 #endif // FEATURE_CORRUPTING_EXCEPTIONS
4932 RaiseTheExceptionInternalOnly(oref, FALSE);
4934 HELPER_METHOD_FRAME_END();
4938 /*************************************************************/
4940 HCIMPL0(void, IL_Rethrow)
4944 FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
4946 HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
4948 OBJECTREF throwable = GetThread()->GetThrowable();
4949 if (throwable != NULL)
4951 RaiseTheExceptionInternalOnly(throwable, TRUE);
4955 // This can only be the result of bad IL (or some internal EE failure).
4956 _ASSERTE(!"No throwable on rethrow");
4957 RealCOMPlusThrow(kInvalidProgramException, (UINT)IDS_EE_RETHROW_NOT_ALLOWED);
4960 HELPER_METHOD_FRAME_END();
4964 /*********************************************************************/
4965 HCIMPL0(void, JIT_RngChkFail)
4969 /* Make no assumptions about the current machine state */
4970 ResetCurrentContext();
4972 FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
4974 HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
4976 COMPlusThrow(kIndexOutOfRangeException);
4978 HELPER_METHOD_FRAME_END();
4982 /*********************************************************************/
4983 HCIMPL0(void, JIT_ThrowArgumentException)
4987 /* Make no assumptions about the current machine state */
4988 ResetCurrentContext();
4990 FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
4992 HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
4994 COMPlusThrow(kArgumentException);
4996 HELPER_METHOD_FRAME_END();
5000 /*********************************************************************/
5001 HCIMPL0(void, JIT_ThrowArgumentOutOfRangeException)
5005 /* Make no assumptions about the current machine state */
5006 ResetCurrentContext();
5008 FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
5010 HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
5012 COMPlusThrow(kArgumentOutOfRangeException);
5014 HELPER_METHOD_FRAME_END();
5018 /*********************************************************************/
5019 HCIMPL0(void, JIT_ThrowNotImplementedException)
5023 /* Make no assumptions about the current machine state */
5024 ResetCurrentContext();
5026 FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
5028 HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
5030 COMPlusThrow(kNotImplementedException);
5032 HELPER_METHOD_FRAME_END();
5036 /*********************************************************************/
5037 HCIMPL0(void, JIT_ThrowPlatformNotSupportedException)
5041 /* Make no assumptions about the current machine state */
5042 ResetCurrentContext();
5044 FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
5046 HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
5048 COMPlusThrow(kPlatformNotSupportedException);
5050 HELPER_METHOD_FRAME_END();
5054 /*********************************************************************/
5055 HCIMPL0(void, JIT_ThrowTypeNotSupportedException)
5059 /* Make no assumptions about the current machine state */
5060 ResetCurrentContext();
5062 FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
5064 HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
5066 COMPlusThrow(kNotSupportedException, W("Arg_TypeNotSupported"));
5068 HELPER_METHOD_FRAME_END();
5072 /*********************************************************************/
5073 HCIMPL0(void, JIT_Overflow)
5077 /* Make no assumptions about the current machine state */
5078 ResetCurrentContext();
5080 FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
5082 HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
5084 COMPlusThrow(kOverflowException);
5086 HELPER_METHOD_FRAME_END();
5090 /*********************************************************************/
5091 HCIMPL0(void, JIT_ThrowDivZero)
5095 /* Make no assumptions about the current machine state */
5096 ResetCurrentContext();
5098 FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
5100 HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
5102 COMPlusThrow(kDivideByZeroException);
5104 HELPER_METHOD_FRAME_END();
5108 /*********************************************************************/
5109 HCIMPL0(void, JIT_ThrowNullRef)
5113 /* Make no assumptions about the current machine state */
5114 ResetCurrentContext();
5116 FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
5118 HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
5120 COMPlusThrow(kNullReferenceException);
5122 HELPER_METHOD_FRAME_END();
5126 /*********************************************************************/
5127 HCIMPL1(void, IL_VerificationError, int ilOffset)
5131 FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
5132 HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
5134 COMPlusThrow(kVerificationException);
5136 HELPER_METHOD_FRAME_END();
5140 /*********************************************************************/
5141 HCIMPL1(void, JIT_SecurityUnmanagedCodeException, CORINFO_CLASS_HANDLE typeHnd_)
5145 FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
5147 HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
5149 COMPlusThrow(kSecurityException);
5151 HELPER_METHOD_FRAME_END();
5155 /*********************************************************************/
5156 static RuntimeExceptionKind MapCorInfoExceptionToRuntimeExceptionKind(unsigned exceptNum)
5158 LIMITED_METHOD_CONTRACT;
5160 static const RuntimeExceptionKind map[CORINFO_Exception_Count] =
5162 kNullReferenceException,
5163 kDivideByZeroException,
5164 kInvalidCastException,
5165 kIndexOutOfRangeException,
5167 kSynchronizationLockException,
5168 kArrayTypeMismatchException,
5170 kArgumentNullException,
5174 // spot check of the array above
5175 _ASSERTE(map[CORINFO_NullReferenceException] == kNullReferenceException);
5176 _ASSERTE(map[CORINFO_DivideByZeroException] == kDivideByZeroException);
5177 _ASSERTE(map[CORINFO_IndexOutOfRangeException] == kIndexOutOfRangeException);
5178 _ASSERTE(map[CORINFO_OverflowException] == kOverflowException);
5179 _ASSERTE(map[CORINFO_SynchronizationLockException] == kSynchronizationLockException);
5180 _ASSERTE(map[CORINFO_ArrayTypeMismatchException] == kArrayTypeMismatchException);
5181 _ASSERTE(map[CORINFO_RankException] == kRankException);
5182 _ASSERTE(map[CORINFO_ArgumentNullException] == kArgumentNullException);
5183 _ASSERTE(map[CORINFO_ArgumentException] == kArgumentException);
5185 PREFIX_ASSUME(exceptNum < CORINFO_Exception_Count);
5186 return map[exceptNum];
5189 /*********************************************************************/
5190 HCIMPL1(void, JIT_InternalThrow, unsigned exceptNum)
5194 FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
5196 HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXACT_DEPTH);
5197 COMPlusThrow(MapCorInfoExceptionToRuntimeExceptionKind(exceptNum));
5198 HELPER_METHOD_FRAME_END();
5202 /*********************************************************************/
5203 HCIMPL1(void*, JIT_InternalThrowFromHelper, unsigned exceptNum)
5207 FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
5208 HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_NOPOLL(Frame::FRAME_ATTR_CAPTURE_DEPTH_2|Frame::FRAME_ATTR_EXACT_DEPTH);
5209 COMPlusThrow(MapCorInfoExceptionToRuntimeExceptionKind(exceptNum));
5210 HELPER_METHOD_FRAME_END();
5215 #ifndef STATUS_STACK_BUFFER_OVERRUN // Not defined yet in CESDK includes
5216 # define STATUS_STACK_BUFFER_OVERRUN ((NTSTATUS)0xC0000409L)
5219 /*********************************************************************
5220 * Kill process without using any potentially corrupted data:
5221 * o Do not throw an exception
5222 * o Do not call any indirect/virtual functions
5223 * o Do not depend on any global data
5225 * This function is used by the security checks for unsafe buffers (VC's -GS checks)
5228 void DoJITFailFast ()
5232 WRAPPER(GC_TRIGGERS);
5236 LOG((LF_ALWAYS, LL_FATALERROR, "Unsafe buffer security check failure: Buffer overrun detected"));
5239 if (g_pConfig->fAssertOnFailFast())
5240 _ASSERTE(!"About to FailFast. set ComPlus_AssertOnFailFast=0 if this is expected");
5244 // Use the function provided by the C runtime.
5246 // Ideally, this function is called directly from managed code so
5247 // that the address of the managed function will be included in the
5248 // error log. However, this function is also used by the stackwalker.
5249 // To keep things simple, we just call it from here.
5250 #if defined(_TARGET_X86_)
5251 __report_gsfailure();
5252 #else // !defined(_TARGET_X86_)
5253 // On AMD64/IA64/ARM, we need to pass a stack cookie, which will be saved in the context record
5254 // that is used to raise the buffer-overrun exception by __report_gsfailure.
5255 __report_gsfailure((ULONG_PTR)0);
5256 #endif // defined(_TARGET_X86_)
5257 #else // FEATURE_PAL
5258 if(ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, FailFast))
5260 // Fire an ETW FailFast event
5261 FireEtwFailFast(W("Unsafe buffer security check failure: Buffer overrun detected"),
5262 (const PVOID)GetThread()->GetFrame()->GetIP(),
5263 STATUS_STACK_BUFFER_OVERRUN,
5264 COR_E_EXECUTIONENGINE,
5265 GetClrInstanceId());
5268 TerminateProcess(GetCurrentProcess(), STATUS_STACK_BUFFER_OVERRUN);
5269 #endif // !FEATURE_PAL
5272 HCIMPL0(void, JIT_FailFast)
5279 HCIMPL2(void, JIT_ThrowMethodAccessException, CORINFO_METHOD_HANDLE caller, CORINFO_METHOD_HANDLE callee)
5283 FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
5285 HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
5287 MethodDesc* pCallerMD = GetMethod(caller);
5289 _ASSERTE(pCallerMD != NULL);
5290 StaticAccessCheckContext accessContext(pCallerMD);
5292 ThrowMethodAccessException(&accessContext, GetMethod(callee));
5294 HELPER_METHOD_FRAME_END();
5298 HCIMPL2(void, JIT_ThrowFieldAccessException, CORINFO_METHOD_HANDLE caller, CORINFO_FIELD_HANDLE callee)
5302 FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
5304 HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
5306 MethodDesc* pCallerMD = GetMethod(caller);
5308 _ASSERTE(pCallerMD != NULL);
5309 StaticAccessCheckContext accessContext(pCallerMD);
5311 ThrowFieldAccessException(&accessContext, reinterpret_cast<FieldDesc *>(callee));
5313 HELPER_METHOD_FRAME_END();
5317 HCIMPL2(void, JIT_ThrowClassAccessException, CORINFO_METHOD_HANDLE caller, CORINFO_CLASS_HANDLE callee)
5321 FC_GC_POLL_NOT_NEEDED(); // throws always open up for GC
5323 HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_EXCEPTION); // Set up a frame
5325 MethodDesc* pCallerMD = GetMethod(caller);
5327 _ASSERTE(pCallerMD != NULL);
5328 StaticAccessCheckContext accessContext(pCallerMD);
5330 ThrowTypeAccessException(&accessContext, TypeHandle(callee).GetMethodTable());
5332 HELPER_METHOD_FRAME_END();
5336 //========================================================================
5340 //========================================================================
5342 HCIMPL2(void, JIT_DelegateSecurityCheck, CORINFO_CLASS_HANDLE delegateHnd, CORINFO_METHOD_HANDLE calleeMethodHnd)
5348 HCIMPL4(void, JIT_MethodAccessCheck, CORINFO_METHOD_HANDLE callerMethodHnd, CORINFO_METHOD_HANDLE calleeMethodHnd, CORINFO_CLASS_HANDLE calleeTypeHnd, CorInfoSecurityRuntimeChecks check)
5354 HCIMPL3(void, JIT_FieldAccessCheck, CORINFO_METHOD_HANDLE callerMethodHnd, CORINFO_FIELD_HANDLE calleeFieldHnd, CorInfoSecurityRuntimeChecks check)
5360 HCIMPL3(void, JIT_ClassAccessCheck, CORINFO_METHOD_HANDLE callerMethodHnd, CORINFO_CLASS_HANDLE calleeClassHnd, CorInfoSecurityRuntimeChecks check)
5366 HCIMPL2(void, JIT_Security_Prolog, CORINFO_METHOD_HANDLE methHnd_, OBJECTREF* ppFrameSecDesc)
5372 HCIMPL2(void, JIT_Security_Prolog_Framed, CORINFO_METHOD_HANDLE methHnd_, OBJECTREF* ppFrameSecDesc)
5378 HCIMPL1(void, JIT_VerificationRuntimeCheck, CORINFO_METHOD_HANDLE methHnd_)
5385 //========================================================================
5387 // DEBUGGER/PROFILER HELPERS
5389 //========================================================================
5391 /*********************************************************************/
5392 // JIT_UserBreakpoint
5393 // Called by the JIT whenever a cee_break instruction should be executed.
5394 // This ensures that enough info will be pushed onto the stack so that
5395 // we can continue from the exception w/o having special code elsewhere.
5396 // Body of function is written by debugger team
5399 // <TODO> make sure this actually gets called by all JITters</TODO>
5400 // Note: this code is duplicated in the ecall in VM\DebugDebugger:Break,
5401 // so propogate changes to there
5403 HCIMPL0(void, JIT_UserBreakpoint)
5407 HELPER_METHOD_FRAME_BEGIN_NOPOLL(); // Set up a frame
5409 #ifdef DEBUGGING_SUPPORTED
5410 FrameWithCookie<DebuggerExitFrame> __def;
5412 MethodDescCallSite breakCanThrow(METHOD__DEBUGGER__BREAK_CAN_THROW);
5414 // Call Diagnostic.Debugger.BreakCanThrow instead. This will make us demand
5415 // UnmanagedCode permission if debugger is not attached.
5417 breakCanThrow.Call((ARG_SLOT*)NULL);
5420 #else // !DEBUGGING_SUPPORTED
5421 _ASSERTE(!"JIT_UserBreakpoint called, but debugging support is not available in this build.");
5422 #endif // !DEBUGGING_SUPPORTED
5424 HELPER_METHOD_FRAME_END_POLL();
5428 #if defined(_MSC_VER)
5429 // VC++ Compiler intrinsic.
5430 extern "C" void * _ReturnAddress(void);
5433 /*********************************************************************/
5434 // Callback for Just-My-Code probe
5435 // Probe looks like:
5436 // if (*pFlag != 0) call JIT_DbgIsJustMyCode
5437 // So this is only called if the flag (obtained by GetJMCFlagAddr) is
5439 // Body of this function is maintained by the debugger people.
5440 HCIMPL0(void, JIT_DbgIsJustMyCode)
5444 // We need to get both the ip of the managed function this probe is in
5445 // (which will be our return address) and the frame pointer for that
5446 // function (since we can't get it later because we're pushing unmanaged
5447 // frames on the stack).
5451 // In order for the return address to be correct, we must NOT call any
5452 // function before calling _ReturnAddress().
5454 ip = _ReturnAddress();
5456 _ASSERTE(ip != NULL);
5458 // Call into debugger proper
5459 g_pDebugInterface->OnMethodEnter(ip);
5465 #if !(defined(_TARGET_X86_) || defined(_WIN64))
5466 void JIT_ProfilerEnterLeaveTailcallStub(UINT_PTR ProfilerHandle)
5470 #endif // !(_TARGET_X86_ || _WIN64)
5472 #ifdef PROFILING_SUPPORTED
5474 //---------------------------------------------------------------------------------------
5476 // Sets the profiler's enter/leave/tailcall hooks into the JIT's dynamic helper
5480 // pFuncEnter - Enter hook
5481 // pFuncLeave - Leave hook
5482 // pFuncTailcall - Tailcall hook
5484 // For each hook parameter, if NULL is passed in, that will cause the JIT
5485 // to insert calls to its default stub replacement for that hook, which
5489 // HRESULT indicating success or failure
5492 // On IA64, this will allocate space for stubs to update GP, and that
5493 // allocation may take locks and may throw on failure. Callers be warned.
5496 HRESULT EEToProfInterfaceImpl::SetEnterLeaveFunctionHooksForJit(FunctionEnter3 * pFuncEnter,
5497 FunctionLeave3 * pFuncLeave,
5498 FunctionTailcall3 * pFuncTailcall)
5505 SetJitHelperFunction(
5506 CORINFO_HELP_PROF_FCN_ENTER,
5507 (pFuncEnter == NULL) ?
5508 reinterpret_cast<void *>(JIT_ProfilerEnterLeaveTailcallStub) :
5509 reinterpret_cast<void *>(pFuncEnter));
5511 SetJitHelperFunction(
5512 CORINFO_HELP_PROF_FCN_LEAVE,
5513 (pFuncLeave == NULL) ?
5514 reinterpret_cast<void *>(JIT_ProfilerEnterLeaveTailcallStub) :
5515 reinterpret_cast<void *>(pFuncLeave));
5517 SetJitHelperFunction(
5518 CORINFO_HELP_PROF_FCN_TAILCALL,
5519 (pFuncTailcall == NULL) ?
5520 reinterpret_cast<void *>(JIT_ProfilerEnterLeaveTailcallStub) :
5521 reinterpret_cast<void *>(pFuncTailcall));
5525 #endif // PROFILING_SUPPORTED
5527 /*************************************************************/
5528 HCIMPL1(void, JIT_LogMethodEnter, CORINFO_METHOD_HANDLE methHnd_)
5532 // Record an access to this method desc
5535 HELPER_METHOD_FRAME_BEGIN_NOPOLL();
5537 g_IBCLogger.LogMethodCodeAccess(GetMethod(methHnd_));
5539 HELPER_METHOD_FRAME_END_POLL();
5545 //========================================================================
5549 //========================================================================
5551 /*************************************************************/
5552 HCIMPL3(VOID, JIT_StructWriteBarrier, void *dest, void* src, CORINFO_CLASS_HANDLE typeHnd_)
5556 TypeHandle typeHnd(typeHnd_);
5557 MethodTable *pMT = typeHnd.AsMethodTable();
5559 HELPER_METHOD_FRAME_BEGIN_NOPOLL(); // Set up a frame
5560 CopyValueClass(dest, src, pMT);
5561 HELPER_METHOD_FRAME_END_POLL();
5566 /*************************************************************/
5567 HCIMPL0(VOID, JIT_PollGC)
5571 FC_GC_POLL_NOT_NEEDED();
5573 Thread *thread = GetThread();
5574 if (thread->CatchAtSafePointOpportunistic()) // Does someone want this thread stopped?
5576 HELPER_METHOD_FRAME_BEGIN_NOPOLL(); // Set up a frame
5578 BOOL GCOnTransition = FALSE;
5579 if (g_pConfig->FastGCStressLevel()) {
5580 GCOnTransition = GC_ON_TRANSITIONS (FALSE);
5583 CommonTripThread(); // Indicate we are at a GC safe point
5585 if (g_pConfig->FastGCStressLevel()) {
5586 GC_ON_TRANSITIONS (GCOnTransition);
5589 HELPER_METHOD_FRAME_END();
5595 /*************************************************************/
5596 // This helper is similar to JIT_RareDisableHelper, but has more operations
5597 // tailored to the post-pinvoke operations.
5598 extern "C" FCDECL0(VOID, JIT_PInvokeEndRarePath);
5600 HCIMPL0(void, JIT_PInvokeEndRarePath)
5602 BEGIN_PRESERVE_LAST_ERROR;
5606 Thread *thread = GetThread();
5608 // We need to disable the implicit FORBID GC region that exists inside an FCALL
5609 // in order to call RareDisablePreemptiveGC().
5610 FC_CAN_TRIGGER_GC();
5611 thread->RareDisablePreemptiveGC();
5612 FC_CAN_TRIGGER_GC_END();
5614 FC_GC_POLL_NOT_NEEDED();
5616 HELPER_METHOD_FRAME_BEGIN_NOPOLL(); // Set up a frame
5617 thread->HandleThreadAbort();
5618 HELPER_METHOD_FRAME_END();
5620 InlinedCallFrame* frame = (InlinedCallFrame*)thread->m_pFrame;
5622 thread->m_pFrame->Pop(thread);
5624 END_PRESERVE_LAST_ERROR;
5628 /*************************************************************/
5629 // For an inlined N/Direct call (and possibly for other places that need this service)
5630 // we have noticed that the returning thread should trap for one reason or another.
5631 // ECall sets up the frame.
5633 extern "C" FCDECL0(VOID, JIT_RareDisableHelper);
5635 #if defined(_TARGET_ARM_) || defined(_TARGET_AMD64_)
5636 // The JIT expects this helper to preserve the return value on AMD64 and ARM. We should eventually
5637 // switch other platforms to the same convention since it produces smaller code.
5638 extern "C" FCDECL0(VOID, JIT_RareDisableHelperWorker);
5640 HCIMPL0(void, JIT_RareDisableHelperWorker)
5642 HCIMPL0(void, JIT_RareDisableHelper)
5645 // We do this here (before we set up a frame), because the following scenario
5646 // We are in the process of doing an inlined pinvoke. Since we are in preemtive
5647 // mode, the thread is allowed to continue. The thread continues and gets a context
5648 // switch just after it has cleared the preemptive mode bit but before it gets
5649 // to this helper. When we do our stack crawl now, we think this thread is
5650 // in cooperative mode (and believed that it was suspended in the SuspendEE), so
5651 // we do a getthreadcontext (on the unsuspended thread!) and get an EIP in jitted code.
5652 // and proceed. Assume the crawl of jitted frames is proceeding on the other thread
5653 // when this thread wakes up and sets up a frame. Eventually the other thread
5654 // runs out of jitted frames and sees the frame we just established. This causes
5655 // an assert in the stack crawling code. If this assert is ignored, however, we
5656 // will end up scanning the jitted frames twice, which will lead to GC holes
5658 // <TODO>TODO: It would be MUCH more robust if we should remember which threads
5659 // we suspended in the SuspendEE, and only even consider using EIP if it was suspended
5660 // in the first phase.
5663 BEGIN_PRESERVE_LAST_ERROR;
5667 Thread *thread = GetThread();
5669 // We need to disable the implicit FORBID GC region that exists inside an FCALL
5670 // in order to call RareDisablePreemptiveGC().
5671 FC_CAN_TRIGGER_GC();
5672 thread->RareDisablePreemptiveGC();
5673 FC_CAN_TRIGGER_GC_END();
5675 FC_GC_POLL_NOT_NEEDED();
5677 HELPER_METHOD_FRAME_BEGIN_NOPOLL(); // Set up a frame
5678 thread->HandleThreadAbort();
5679 HELPER_METHOD_FRAME_END();
5681 END_PRESERVE_LAST_ERROR;
5685 /*********************************************************************/
5686 // This is called by the JIT after every instruction in fully interuptable
5687 // code to make certain our GC tracking is OK
5688 HCIMPL0(VOID, JIT_StressGC_NOP)
5695 HCIMPL0(VOID, JIT_StressGC)
5700 HELPER_METHOD_FRAME_BEGIN_0(); // Set up a frame
5702 bool fSkipGC = false;
5705 GCHeapUtilities::GetGCHeap()->GarbageCollect();
5707 // <TODO>@TODO: the following ifdef is in error, but if corrected the
5708 // compiler complains about the *__ms->pRetAddr() saying machine state
5709 // doesn't allow -></TODO>
5711 // Get the machine state, (from HELPER_METHOD_FRAME_BEGIN)
5712 // and wack our return address to a nop function
5713 BYTE* retInstrs = ((BYTE*) *__ms->pRetAddr()) - 4;
5714 _ASSERTE(retInstrs[-1] == 0xE8); // it is a call instruction
5715 // Wack it to point to the JITStressGCNop instead
5716 FastInterlockExchange((LONG*) retInstrs), (LONG) JIT_StressGC_NOP);
5719 HELPER_METHOD_FRAME_END();
5726 HCIMPL0(INT32, JIT_GetCurrentManagedThreadId)
5730 FC_GC_POLL_NOT_NEEDED();
5732 Thread * pThread = GetThread();
5733 return pThread->GetThreadId();
5738 /*********************************************************************/
5739 /* we don't use HCIMPL macros because we don't want the overhead even in debug mode */
5741 HCIMPL1_RAW(Object*, JIT_CheckObj, Object* obj)
5746 MethodTable* pMT = obj->GetMethodTable();
5747 if (!pMT->ValidateWithPossibleAV()) {
5748 _ASSERTE(!"Bad Method Table");
5749 FreeBuildDebugBreak();
5756 static int loopChoice = 0;
5758 // This function supports a JIT mode in which we're debugging the mechanism for loop cloning.
5759 // We want to clone loops, then make a semi-random choice, on each execution of the loop,
5760 // whether to run the original loop or the cloned copy. We do this by incrementing the contents
5761 // of a memory location, and testing whether the result is odd or even. The "loopChoice" variable
5762 // above provides that memory location, and this JIT helper merely informs the JIT of the address of
5764 HCIMPL0(void*, JIT_LoopCloneChoiceAddr)
5774 // Prints a message that loop cloning optimization has occurred.
5775 HCIMPL0(void, JIT_DebugLogLoopCloning)
5782 printf(">> Logging loop cloning optimization\n");
5787 //========================================================================
5791 //========================================================================
5795 /**********************************************************************/
5796 /* Fills out portions of an InlinedCallFrame for JIT64 */
5797 /* The idea here is to allocate and initalize the frame to only once, */
5798 /* regardless of how many PInvokes there are in the method */
5799 Thread * __stdcall JIT_InitPInvokeFrame(InlinedCallFrame *pFrame, PTR_VOID StubSecretArg)
5807 Thread *pThread = GetThread();
5809 // The JIT messed up and is initializing a frame that is already live on the stack?!?!?!?!
5810 _ASSERTE(pFrame != pThread->GetFrame());
5813 pFrame->m_StubSecretArg = StubSecretArg;
5814 pFrame->m_Next = pThread->GetFrame();
5821 EXTERN_C void JIT_PInvokeBegin(InlinedCallFrame* pFrame);
5822 EXTERN_C void JIT_PInvokeEnd(InlinedCallFrame* pFrame);
5824 //========================================================================
5826 // JIT HELPERS IMPLEMENTED AS FCALLS
5828 //========================================================================
5831 // This function is used from the FCallMemcpy for GC polling
5832 EXTERN_C VOID FCallMemCpy_GCPoll()
5834 FC_INNER_PROLOG(FCallMemcpy);
5836 Thread *thread = GetThread();
5837 // CommonTripThread does this check, but doing this to avoid raising the frames
5838 if (thread->CatchAtSafePointOpportunistic())
5840 HELPER_METHOD_FRAME_BEGIN_0();
5842 HELPER_METHOD_FRAME_END();
5847 #endif // _TARGET_ARM_
5849 //========================================================================
5851 // JIT HELPERS INITIALIZATION
5853 //========================================================================
5855 // verify consistency of jithelpers.h and corinfo.h
5856 enum __CorInfoHelpFunc {
5857 #define JITHELPER(code, pfnHelper, sig) __##code,
5858 #include "jithelpers.h"
5860 #define JITHELPER(code, pfnHelper, sig) C_ASSERT((int)__##code == (int)code);
5861 #include "jithelpers.h"
5864 #define HELPERDEF(code, lpv, sig) { (LPVOID)(lpv), #code },
5866 #define HELPERDEF(code, lpv, sig) { (LPVOID)(lpv) },
5869 // static helpers - constant array
5870 const VMHELPDEF hlpFuncTable[CORINFO_HELP_COUNT] =
5872 #define JITHELPER(code, pfnHelper, sig) HELPERDEF(code, pfnHelper,sig)
5873 #define DYNAMICJITHELPER(code, pfnHelper,sig) HELPERDEF(code, 1 + DYNAMIC_##code, sig)
5874 #include "jithelpers.h"
5877 // dynamic helpers - filled in at runtime
5878 VMHELPDEF hlpDynamicFuncTable[DYNAMIC_CORINFO_HELP_COUNT] =
5880 #define JITHELPER(code, pfnHelper, sig)
5881 #define DYNAMICJITHELPER(code, pfnHelper, sig) HELPERDEF(DYNAMIC_ ## code, pfnHelper, sig)
5882 #include "jithelpers.h"
5885 #if defined(_DEBUG) && (defined(_TARGET_AMD64_) || defined(_TARGET_X86_)) && !defined(FEATURE_PAL)
5886 #define HELPERCOUNTDEF(lpv) { (LPVOID)(lpv), NULL, 0 },
5888 VMHELPCOUNTDEF hlpFuncCountTable[CORINFO_HELP_COUNT+1] =
5890 #define JITHELPER(code, pfnHelper, sig) HELPERCOUNTDEF(pfnHelper)
5891 #define DYNAMICJITHELPER(code, pfnHelper, sig) HELPERCOUNTDEF(1 + DYNAMIC_##code)
5892 #include "jithelpers.h"
5896 // Set the JIT helper function in the helper table
5897 // Handles the case where the function does not reside in mscorwks.dll
5899 void _SetJitHelperFunction(DynamicCorInfoHelpFunc ftnNum, void * pFunc)
5906 _ASSERTE(ftnNum < DYNAMIC_CORINFO_HELP_COUNT);
5908 LOG((LF_JIT, LL_INFO1000000, "Setting JIT dynamic helper %3d (%s) to %p\n",
5909 ftnNum, hlpDynamicFuncTable[ftnNum].name, pFunc));
5911 hlpDynamicFuncTable[ftnNum].pfnHelper = (void *) pFunc;
5914 /*********************************************************************/
5915 // Initialize the part of the JIT helpers that require much of the
5916 // EE infrastructure to be in place.
5917 /*********************************************************************/
5918 void InitJITHelpers2()
5920 STANDARD_VM_CONTRACT;
5922 #if defined(_TARGET_X86_) || defined(_TARGET_ARM_)
5923 SetJitHelperFunction(CORINFO_HELP_INIT_PINVOKE_FRAME, (void *)GenerateInitPInvokeFrameHelper()->GetEntryPoint());
5924 #endif // _TARGET_X86_ || _TARGET_ARM_
5926 ECall::DynamicallyAssignFCallImpl(GetEEFuncEntryPoint(GetThread), ECall::InternalGetCurrentThread);
5928 InitJitHelperLogging();
5930 g_pJitGenericHandleCacheCrst.Init(CrstJitGenericHandleCache, CRST_UNSAFE_COOPGC);
5932 // Allocate and initialize the table
5933 NewHolder <JitGenericHandleCache> tempGenericHandleCache (new JitGenericHandleCache());
5934 LockOwner sLock = {&g_pJitGenericHandleCacheCrst, IsOwnerOfCrst};
5935 if (!tempGenericHandleCache->Init(59, &sLock))
5937 g_pJitGenericHandleCache = tempGenericHandleCache.Extract();
5940 #if defined(_TARGET_AMD64_) || defined(_TARGET_ARM_)
5942 NOINLINE void DoCopy(CONTEXT * ctx, void * pvTempStack, size_t cbTempStack, Thread * pThread, Frame * pNewFrame)
5944 // We need to ensure that copying pvTempStack onto our stack will not in
5945 // *ANY* way trash the context record (or our pointer to it) that we need
5946 // in order to restore context
5947 _ASSERTE((DWORD_PTR)&ctx + sizeof(ctx) < (DWORD_PTR)GetSP(ctx));
5950 if ((DWORD_PTR)ctx + sizeof(*ctx) > (DWORD_PTR)GetSP(ctx))
5952 // The context record is in danger, copy it down
5953 _ASSERTE((DWORD_PTR)&ctx2 + sizeof(ctx2) < (DWORD_PTR)GetSP(ctx));
5956 // Clear any context that we didn't copy...
5957 ctx2.ContextFlags &= CONTEXT_ALL;
5961 _ASSERTE((DWORD_PTR)ctx + sizeof(*ctx) <= (DWORD_PTR)GetSP(ctx));
5963 // DevDiv 189140 - use memmove because source and dest might overlap.
5964 memmove((void*)GetSP(ctx), pvTempStack, cbTempStack);
5966 if (pNewFrame != NULL)
5968 // Now that the memmove above is complete, pNewFrame is actually pointing at a
5969 // TailCallFrame, and not garbage. So it's safe to add pNewFrame to the Frame
5971 _ASSERTE(pThread != NULL);
5972 pThread->SetFrame(pNewFrame);
5975 RtlRestoreContext(ctx, NULL);
5979 // Mostly Architecture-agnostic RtlVirtualUnwind-based tail call helper...
5981 // Can't use HCIMPL macro because it requires unwind, and this method *NEVER* unwinds.
5984 #define INVOKE_COPY_ARGS_HELPER(helperFunc, arg1, arg2, arg3, arg4) ((pfnCopyArgs)helperFunc)(arg1, arg2, arg3, arg4)
5985 void F_CALL_VA_CONV JIT_TailCall(PCODE copyArgs, PCODE target, ...)
5987 // Can't have a regular contract because we would never pop it
5988 // We only throw a stack overflow if needed, and we can't handle
5989 // a GC because the incoming parameters are totally unprotected.
5990 STATIC_CONTRACT_THROWS;
5991 STATIC_CONTRACT_GC_NOTRIGGER;
5992 STATIC_CONTRACT_MODE_COOPERATIVE
5996 Thread *pThread = GetThread();
5998 #ifdef FEATURE_HIJACK
5999 // We can't crawl the stack of a thread that currently has a hijack pending
6000 // (since the hijack routine won't be recognized by any code manager). So we
6001 // undo any hijack, the EE will re-attempt it later.
6002 pThread->UnhijackThread();
6005 ULONG_PTR establisherFrame = 0;
6006 PVOID handlerData = NULL;
6009 // Unwind back to our caller in managed code
6010 static PT_RUNTIME_FUNCTION my_pdata;
6011 static ULONG_PTR my_imagebase;
6013 ctx.ContextFlags = CONTEXT_ALL;
6014 RtlCaptureContext(&ctx);
6016 if (!VolatileLoadWithoutBarrier(&my_imagebase)) {
6017 ULONG_PTR imagebase = 0;
6018 my_pdata = RtlLookupFunctionEntry(GetIP(&ctx), &imagebase, NULL);
6019 InterlockedExchangeT(&my_imagebase, imagebase);
6022 RtlVirtualUnwind(UNW_FLAG_NHANDLER, my_imagebase, GetIP(&ctx), my_pdata, &ctx, &handlerData,
6023 &establisherFrame, NULL);
6025 EECodeInfo codeInfo(GetIP(&ctx));
6027 // Now unwind back to our caller's caller
6028 establisherFrame = 0;
6029 RtlVirtualUnwind(UNW_FLAG_NHANDLER, codeInfo.GetModuleBase(), GetIP(&ctx), codeInfo.GetFunctionEntry(), &ctx, &handlerData,
6030 &establisherFrame, NULL);
6034 // Compute the space needed for arguments
6035 va_start(args, target);
6037 ULONG_PTR pGCLayout = 0;
6038 size_t cbArgArea = INVOKE_COPY_ARGS_HELPER(copyArgs, args, NULL, NULL, (size_t)&pGCLayout);
6042 // reset (in case the helper walked them)
6043 va_start(args, target);
6045 // Fake call frame (if needed)
6046 size_t cbCopyFrame = 0;
6047 bool fCopyDown = false;
6048 BYTE rgFrameBuffer[sizeof(FrameWithCookie<TailCallFrame>)];
6049 Frame * pNewFrame = NULL;
6051 #if defined(_TARGET_AMD64_)
6052 # define STACK_ADJUST_FOR_RETURN_ADDRESS (sizeof(void*))
6053 # define STACK_ALIGN_MASK (0xF)
6054 #elif defined(_TARGET_ARM_)
6055 # define STACK_ADJUST_FOR_RETURN_ADDRESS (0)
6056 # define STACK_ALIGN_MASK (0x7)
6058 #error "Unknown tail call architecture"
6061 // figure out if we can re-use an existing TailCallHelperStub
6062 // or if we need to create a new one.
6063 if ((void*)GetIP(&ctx) == JIT_TailCallHelperStub_ReturnAddress) {
6064 TailCallFrame * pCurrentFrame = TailCallFrame::GetFrameFromContext(&ctx);
6065 _ASSERTE(pThread->GetFrame() == pCurrentFrame);
6066 // The caller was tail called, so we can re-use that frame
6067 // See if we need to enlarge the ArgArea
6068 // This can potentially enlarge cbArgArea to the size of the
6069 // existing TailCallFrame.
6070 const size_t endOfFrame = (size_t)pCurrentFrame - (size_t)sizeof(GSCookie);
6071 size_t cbOldArgArea = (endOfFrame - GetSP(&ctx));
6072 if (cbOldArgArea >= cbArgArea) {
6073 cbArgArea = cbOldArgArea;
6076 SetSP(&ctx, (endOfFrame - cbArgArea));
6080 // Reset the GCLayout
6081 pCurrentFrame->SetGCLayout((TADDR)pGCLayout);
6083 // We're jumping to the new method, not calling it
6084 // so make room for the return address that the 'call'
6085 // would have pushed.
6086 SetSP(&ctx, GetSP(&ctx) - STACK_ADJUST_FOR_RETURN_ADDRESS);
6089 // Create a fake fixed frame as if the new method was called by
6090 // TailCallHelperStub asm stub and did an
6091 // alloca, then called the target method.
6092 cbCopyFrame = sizeof(rgFrameBuffer);
6093 FrameWithCookie<TailCallFrame> * CookieFrame = new (rgFrameBuffer) FrameWithCookie<TailCallFrame>(&ctx, pThread);
6094 TailCallFrame * tailCallFrame = &*CookieFrame;
6096 tailCallFrame->SetGCLayout((TADDR)pGCLayout);
6097 pNewFrame = TailCallFrame::AdjustContextForTailCallHelperStub(&ctx, cbArgArea, pThread);
6100 // Eventually, we'll add pNewFrame to our frame chain, but don't do it yet. It's
6101 // pointing to the place on the stack where the TailCallFrame contents WILL be,
6102 // but aren't there yet. In order to keep the stack walkable by profilers, wait
6103 // until the contents are moved over properly (inside DoCopy), and then add
6104 // pNewFrame onto the frame chain.
6107 // The stack should be properly aligned, modulo the pushed return
6108 // address (at least on x64)
6109 _ASSERTE((GetSP(&ctx) & STACK_ALIGN_MASK) == STACK_ADJUST_FOR_RETURN_ADDRESS);
6111 // Set the target pointer so we land there when we restore the context
6112 SetIP(&ctx, (PCODE)target);
6114 // Begin creating the new stack frame and copying arguments
6115 size_t cbTempStack = cbCopyFrame + cbArgArea + STACK_ADJUST_FOR_RETURN_ADDRESS;
6117 // If we're going to have to overwrite some of our incoming argument slots
6118 // then do a double-copy, first to temporary copy below us on the stack and
6119 // then back up to the real stack.
6121 if (!fCopyDown && (((ULONG_PTR)args + cbArgArea) < GetSP(&ctx))) {
6124 // After this our stack may no longer be walkable by the debugger!!!
6127 pvTempStack = (void*)GetSP(&ctx);
6132 // Need to align properly for a return address (if it goes on the stack)
6135 // _alloca produces 16-byte aligned buffers, but the return address,
6136 // where our buffer 'starts' is off by 8, so make sure our buffer is
6139 pvTempStack = (BYTE*)_alloca(cbTempStack + STACK_ADJUST_FOR_RETURN_ADDRESS) + STACK_ADJUST_FOR_RETURN_ADDRESS;
6142 _ASSERTE(((size_t)pvTempStack & STACK_ALIGN_MASK) == STACK_ADJUST_FOR_RETURN_ADDRESS);
6144 // Start creating the new stack (bottom up)
6145 BYTE * pbTempStackFill = (BYTE*)pvTempStack;
6147 if (STACK_ADJUST_FOR_RETURN_ADDRESS > 0) {
6148 *((PVOID*)pbTempStackFill) = (PVOID)JIT_TailCallHelperStub_ReturnAddress; // return address
6149 pbTempStackFill += STACK_ADJUST_FOR_RETURN_ADDRESS;
6153 INVOKE_COPY_ARGS_HELPER(copyArgs, args, &ctx, (DWORD_PTR*)pbTempStackFill, cbArgArea);
6157 pbTempStackFill += cbArgArea;
6159 // frame (includes TailCallFrame)
6160 if (cbCopyFrame > 0) {
6161 _ASSERTE(cbCopyFrame == sizeof(rgFrameBuffer));
6162 memcpy(pbTempStackFill, rgFrameBuffer, cbCopyFrame);
6163 pbTempStackFill += cbCopyFrame;
6166 // If this fires, check the math above, because we copied more than we should have
6167 _ASSERTE((size_t)((pbTempStackFill - (BYTE*)pvTempStack)) == cbTempStack);
6169 // If this fires, it means we messed up the math and we're about to overwrite
6170 // some of our locals which would be bad because we still need them to call
6171 // RtlRestoreContext and pop the contract...
6172 _ASSERTE(fCopyDown || ((DWORD_PTR)&ctx + sizeof(ctx) < (DWORD_PTR)GetSP(&ctx)));
6175 // We've created a dummy stack below our frame and now we overwrite
6176 // our own real stack.
6179 // After this our stack may no longer be walkable by the debugger!!!
6182 // This does the copy, adds pNewFrame to the frame chain, and calls RtlRestoreContext
6183 DoCopy(&ctx, pvTempStack, cbTempStack, pThread, pNewFrame);
6186 RtlRestoreContext(&ctx, NULL);
6188 #undef STACK_ADJUST_FOR_RETURN_ADDRESS
6189 #undef STACK_ALIGN_MASK
6191 #else // !FEATURE_PAL
6192 PORTABILITY_ASSERT("TODO: Implement JIT_TailCall for PAL");
6193 #endif // !FEATURE_PAL
6197 #endif // _TARGET_AMD64_ || _TARGET_ARM_
6199 //========================================================================
6201 // JIT HELPERS LOGGING
6203 //========================================================================
6205 #if defined(_DEBUG) && (defined(_TARGET_AMD64_) || defined(_TARGET_X86_)) && !defined(FEATURE_PAL)
6206 // *****************************************************************************
6207 // JitHelperLogging usage:
6209 // COMPlus_HardPrejitEnabled=0
6211 // This allows us to instrument even ngen'd image calls to JIT helpers.
6212 // Remember to clear the key after ngen-ing and before actually running
6213 // the app you want to log.
6216 // COMPlus_JitHelperLogging=1
6217 // COMPlus_LogEnable=1
6218 // COMPlus_LogLevel=1
6219 // COMPlus_LogToFile=1
6221 // 3) Run the app that you want to log; Results will be in COMPLUS.LOG(.X)
6223 // 4) JitHelperLogging=2 and JitHelperLogging=3 result in different output
6224 // as per code in WriteJitHelperCountToSTRESSLOG() below.
6225 // *****************************************************************************
6226 void WriteJitHelperCountToSTRESSLOG()
6235 int jitHelperLoggingLevel = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitHelperLogging);
6236 if (jitHelperLoggingLevel != 0)
6238 DWORD logFacility, logLevel;
6240 logFacility = LF_ALL; //LF_ALL/LL_ALWAYS is okay here only because this logging would normally
6241 logLevel = LL_ALWAYS; // would never be turned on at all (used only for performance measurements)
6243 const int countPos = 60;
6245 STRESS_LOG0(logFacility, logLevel, "Writing Jit Helper COUNT table to log\n");
6247 VMHELPCOUNTDEF* hlpFuncCount = hlpFuncCountTable;
6248 while(hlpFuncCount < (hlpFuncCountTable + CORINFO_HELP_COUNT))
6253 name = hlpFuncCount->helperName;
6254 count = hlpFuncCount->count;
6257 switch (jitHelperLoggingLevel)
6260 // This will print a comma seperated list:
6261 // CORINFO_XXX_HELPER, 10
6262 // CORINFO_YYYY_HELPER, 11
6263 STRESS_LOG2(logFacility, logLevel, "%s, %d\n", name, count);
6267 // This will print a table like:
6268 // CORINFO_XXX_HELPER 10
6269 // CORINFO_YYYY_HELPER 11
6270 if (hlpFuncCount->helperName != NULL)
6271 nameLen = (int)strlen(name);
6273 nameLen = (int)strlen("(null)");
6275 if (nameLen < countPos)
6277 char* buffer = new char[(countPos - nameLen) + 1];
6278 memset(buffer, (int)' ', (countPos-nameLen));
6279 buffer[(countPos - nameLen)] = '\0';
6280 STRESS_LOG3(logFacility, logLevel, "%s%s %d\n", name, buffer, count);
6284 STRESS_LOG2(logFacility, logLevel, "%s %d\n", name, count);
6289 // This will print out the counts and the address range of the helper (if we know it)
6290 // CORINFO_XXX_HELPER, 10, (0x12345678 -> 0x12345778)
6291 // CORINFO_YYYY_HELPER, 11, (0x00011234 -> 0x00012234)
6292 STRESS_LOG4(logFacility, logLevel, "%s, %d, (0x%p -> 0x%p)\n", name, count, hlpFuncCount->pfnRealHelper, ((LPBYTE)hlpFuncCount->pfnRealHelper + hlpFuncCount->helperSize));
6296 STRESS_LOG1(logFacility, logLevel, "Unsupported JitHelperLogging mode (%d)\n", jitHelperLoggingLevel);
6304 // This will do the work to instrument the JIT helper table.
6305 void InitJitHelperLogging()
6307 STANDARD_VM_CONTRACT;
6309 if ((CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitHelperLogging) != 0))
6313 IMAGE_DOS_HEADER *pDOS = (IMAGE_DOS_HEADER *)g_pMSCorEE;
6314 _ASSERTE(pDOS->e_magic == VAL16(IMAGE_DOS_SIGNATURE) && pDOS->e_lfanew != 0);
6316 IMAGE_NT_HEADERS *pNT = (IMAGE_NT_HEADERS*)((LPBYTE)g_pMSCorEE + VAL32(pDOS->e_lfanew));
6318 _ASSERTE(pNT->Signature == VAL32(IMAGE_NT_SIGNATURE)
6319 && pNT->FileHeader.SizeOfOptionalHeader == VAL16(sizeof(IMAGE_OPTIONAL_HEADER64))
6320 && pNT->OptionalHeader.Magic == VAL16(IMAGE_NT_OPTIONAL_HDR_MAGIC) );
6322 _ASSERTE(pNT->Signature == VAL32(IMAGE_NT_SIGNATURE)
6323 && pNT->FileHeader.SizeOfOptionalHeader == VAL16(sizeof(IMAGE_OPTIONAL_HEADER32))
6324 && pNT->OptionalHeader.Magic == VAL16(IMAGE_NT_OPTIONAL_HDR_MAGIC) );
6326 #endif // _TARGET_X86_
6328 // Make the static hlpFuncTable read/write for purposes of writing the logging thunks
6330 if (!ClrVirtualProtect((LPVOID)hlpFuncTable, (sizeof(VMHELPDEF) * CORINFO_HELP_COUNT), PAGE_EXECUTE_READWRITE, &dwOldProtect))
6335 LoaderHeap* pHeap = SystemDomain::GetGlobalLoaderAllocator()->GetStubHeap();
6337 // iterate through the jit helper tables replacing helpers with logging thunks
6339 // NOTE: if NGEN'd images were NGEN'd with hard binding on then static helper
6340 // calls will NOT be instrumented.
6341 VMHELPDEF* hlpFunc = const_cast<VMHELPDEF*>(hlpFuncTable);
6342 VMHELPCOUNTDEF* hlpFuncCount = hlpFuncCountTable;
6343 while(hlpFunc < (hlpFuncTable + CORINFO_HELP_COUNT))
6345 if (hlpFunc->pfnHelper != NULL)
6348 CPUSTUBLINKER* pSl = &sl;
6350 if (((size_t)hlpFunc->pfnHelper - 1) > DYNAMIC_CORINFO_HELP_COUNT)
6352 // While we're here initialize the table of VMHELPCOUNTDEF
6353 // guys with info about this helper
6354 hlpFuncCount->pfnRealHelper = hlpFunc->pfnHelper;
6355 hlpFuncCount->helperName = hlpFunc->name;
6356 hlpFuncCount->count = 0;
6357 #ifdef _TARGET_AMD64_
6358 ULONGLONG uImageBase;
6359 PT_RUNTIME_FUNCTION pFunctionEntry;
6360 pFunctionEntry = RtlLookupFunctionEntry((ULONGLONG)hlpFunc->pfnHelper, &uImageBase, NULL);
6362 if (pFunctionEntry != NULL)
6364 _ASSERTE((uImageBase + pFunctionEntry->BeginAddress) == (ULONGLONG)hlpFunc->pfnHelper);
6365 hlpFuncCount->helperSize = pFunctionEntry->EndAddress - pFunctionEntry->BeginAddress;
6369 hlpFuncCount->helperSize = 0;
6371 #else // _TARGET_X86_
6372 // How do I get this for x86?
6373 hlpFuncCount->helperSize = 0;
6374 #endif // _TARGET_AMD64_
6376 pSl->EmitJITHelperLoggingThunk(GetEEFuncEntryPoint(hlpFunc->pfnHelper), (LPVOID)hlpFuncCount);
6377 Stub* pStub = pSl->Link(pHeap);
6378 hlpFunc->pfnHelper = (void*)pStub->GetEntryPoint();
6382 _ASSERTE(((size_t)hlpFunc->pfnHelper - 1) >= 0 &&
6383 ((size_t)hlpFunc->pfnHelper - 1) < COUNTOF(hlpDynamicFuncTable));
6384 VMHELPDEF* dynamicHlpFunc = &hlpDynamicFuncTable[((size_t)hlpFunc->pfnHelper - 1)];
6386 // While we're here initialize the table of VMHELPCOUNTDEF
6387 // guys with info about this helper. There is only one table
6388 // for the count dudes that contains info about both dynamic
6389 // and static helpers.
6392 #pragma warning(push)
6393 #pragma warning(disable:26001) // "Bounds checked above"
6394 #endif /*_PREFAST_ */
6395 hlpFuncCount->pfnRealHelper = dynamicHlpFunc->pfnHelper;
6396 hlpFuncCount->helperName = dynamicHlpFunc->name;
6397 hlpFuncCount->count = 0;
6399 #pragma warning(pop)
6400 #endif /*_PREFAST_*/
6402 #ifdef _TARGET_AMD64_
6403 ULONGLONG uImageBase;
6404 PT_RUNTIME_FUNCTION pFunctionEntry;
6405 pFunctionEntry = RtlLookupFunctionEntry((ULONGLONG)hlpFunc->pfnHelper, &uImageBase, NULL);
6407 if (pFunctionEntry != NULL)
6409 _ASSERTE((uImageBase + pFunctionEntry->BeginAddress) == (ULONGLONG)hlpFunc->pfnHelper);
6410 hlpFuncCount->helperSize = pFunctionEntry->EndAddress - pFunctionEntry->BeginAddress;
6414 // if we can't get a function entry for this we'll just pretend the size is 0
6415 hlpFuncCount->helperSize = 0;
6417 #else // _TARGET_X86_
6418 // Is the address in mscoree.dll at all? (All helpers are in
6420 if (dynamicHlpFunc->pfnHelper >= (LPBYTE*)g_pMSCorEE && dynamicHlpFunc->pfnHelper < (LPBYTE*)g_pMSCorEE + VAL32(pNT->OptionalHeader.SizeOfImage))
6422 // See note above. How do I get the size on x86 for a static method?
6423 hlpFuncCount->helperSize = 0;
6427 Stub::RecoverStubAndSize((TADDR)dynamicHlpFunc->pfnHelper, (DWORD*)&hlpFuncCount->helperSize);
6428 hlpFuncCount->helperSize -= sizeof(Stub);
6431 #endif // _TARGET_AMD64_
6433 pSl->EmitJITHelperLoggingThunk(GetEEFuncEntryPoint(dynamicHlpFunc->pfnHelper), (LPVOID)hlpFuncCount);
6434 Stub* pStub = pSl->Link(pHeap);
6435 dynamicHlpFunc->pfnHelper = (void*)pStub->GetEntryPoint();
6443 // Restore original access rights to the static hlpFuncTable
6444 ClrVirtualProtect((LPVOID)hlpFuncTable, (sizeof(VMHELPDEF) * CORINFO_HELP_COUNT), dwOldProtect, &dwOldProtect);
6449 #endif // _DEBUG && (_TARGET_AMD64_ || _TARGET_X86_)