1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
8 /*============================================================
10 ** File: COMUtilNative
14 ** Purpose: A dumping ground for classes which aren't large
15 ** enough to get their own file in the EE.
19 ===========================================================*/
24 #include "comutilnative.h"
30 #include "gcheaputilities.h"
32 #include "invokeutil.h"
34 #include "typestring.h"
36 #include "finalizerthread.h"
37 #include "threadsuspend.h"
39 #ifdef FEATURE_COMINTEROP
40 #include "comcallablewrapper.h"
42 #endif // FEATURE_COMINTEROP
44 #include "arraynative.inl"
46 /*===================================IsDigit====================================
47 **Returns a bool indicating whether the character passed in represents a **
49 ==============================================================================*/
50 bool IsDigit(WCHAR c, int radix, int *result)
57 PRECONDITION(CheckPointer(result));
62 *result = DIGIT_TO_INT(c);
64 else if (c>='A' && c<='Z') {
65 //+10 is necessary because A is actually 10, etc.
68 else if (c>='a' && c<='z') {
69 //+10 is necessary because a is actually 10, etc.
76 if ((*result >=0) && (*result < radix))
82 INT32 wtoi(__in_ecount(length) WCHAR* wstr, DWORD length)
89 PRECONDITION(CheckPointer(wstr));
90 PRECONDITION(length >= 0);
98 while ( (i < length) && (IsDigit(wstr[i], 10 ,&value)) ) {
99 //Read all of the digits and convert to a number
100 result = result*10 + value;
114 FCIMPL1(FC_BOOL_RET, ExceptionNative::IsImmutableAgileException, Object* pExceptionUNSAFE)
118 ASSERT(pExceptionUNSAFE != NULL);
120 OBJECTREF pException = (OBJECTREF) pExceptionUNSAFE;
122 // The preallocated exception objects may be used from multiple AppDomains
123 // and therefore must remain immutable from the application's perspective.
124 FC_RETURN_BOOL(CLRException::IsPreallocatedExceptionObject(pException));
128 // This FCall sets a flag against the thread exception state to indicate to
129 // IL_Throw and the StackTraceInfo implementation to account for the fact
130 // that we have restored a foreign exception dispatch details.
132 // Refer to the respective methods for details on how they use this flag.
133 FCIMPL0(VOID, ExceptionNative::PrepareForForeignExceptionRaise)
137 PTR_ThreadExceptionState pCurTES = GetThread()->GetExceptionState();
139 // Set a flag against the TES to indicate this is a foreign exception raise.
140 pCurTES->SetRaisingForeignException();
144 // Given an exception object, this method will extract the stacktrace and dynamic method array and set them up for return to the caller.
145 FCIMPL3(VOID, ExceptionNative::GetStackTracesDeepCopy, Object* pExceptionObjectUnsafe, Object **pStackTraceUnsafe, Object **pDynamicMethodsUnsafe);
153 ASSERT(pExceptionObjectUnsafe != NULL);
154 ASSERT(pStackTraceUnsafe != NULL);
155 ASSERT(pDynamicMethodsUnsafe != NULL);
159 StackTraceArray stackTrace;
160 StackTraceArray stackTraceCopy;
161 EXCEPTIONREF refException;
162 PTRARRAYREF dynamicMethodsArray; // Object array of Managed Resolvers
163 PTRARRAYREF dynamicMethodsArrayCopy; // Copy of the object array of Managed Resolvers
166 ZeroMemory(&gc, sizeof(gc));
168 // GC protect the array reference
169 HELPER_METHOD_FRAME_BEGIN_PROTECT(gc);
171 // Get the exception object reference
172 gc.refException = (EXCEPTIONREF)(ObjectToOBJECTREF(pExceptionObjectUnsafe));
174 // Fetch the stacktrace details from the exception under a lock
175 gc.refException->GetStackTrace(gc.stackTrace, &gc.dynamicMethodsArray);
177 bool fHaveStackTrace = false;
178 bool fHaveDynamicMethodArray = false;
180 if ((unsigned)gc.stackTrace.Size() > 0)
182 // Deepcopy the array
183 gc.stackTraceCopy.CopyFrom(gc.stackTrace);
184 fHaveStackTrace = true;
187 if (gc.dynamicMethodsArray != NULL)
189 // Get the number of elements in the dynamic methods array
190 unsigned cOrigDynamic = gc.dynamicMethodsArray->GetNumComponents();
192 // ..and allocate a new array. This can trigger GC or throw under OOM.
193 gc.dynamicMethodsArrayCopy = (PTRARRAYREF)AllocateObjectArray(cOrigDynamic, g_pObjectClass);
195 // Deepcopy references to the new array we just allocated
196 memmoveGCRefs(gc.dynamicMethodsArrayCopy->GetDataPtr(), gc.dynamicMethodsArray->GetDataPtr(),
197 cOrigDynamic * sizeof(Object *));
199 fHaveDynamicMethodArray = true;
203 *pStackTraceUnsafe = fHaveStackTrace?OBJECTREFToObject(gc.stackTraceCopy.Get()):NULL;
204 *pDynamicMethodsUnsafe = fHaveDynamicMethodArray?OBJECTREFToObject(gc.dynamicMethodsArrayCopy):NULL;
206 HELPER_METHOD_FRAME_END();
210 // Given an exception object and deep copied instances of a stacktrace and/or dynamic method array, this method will set the latter in the exception object instance.
211 FCIMPL3(VOID, ExceptionNative::SaveStackTracesFromDeepCopy, Object* pExceptionObjectUnsafe, Object *pStackTraceUnsafe, Object *pDynamicMethodsUnsafe);
219 ASSERT(pExceptionObjectUnsafe != NULL);
223 StackTraceArray stackTrace;
224 EXCEPTIONREF refException;
225 PTRARRAYREF dynamicMethodsArray; // Object array of Managed Resolvers
228 ZeroMemory(&gc, sizeof(gc));
230 // GC protect the array reference
231 HELPER_METHOD_FRAME_BEGIN_PROTECT(gc);
233 // Get the exception object reference
234 gc.refException = (EXCEPTIONREF)(ObjectToOBJECTREF(pExceptionObjectUnsafe));
236 if (pStackTraceUnsafe != NULL)
238 // Copy the stacktrace
239 StackTraceArray stackTraceArray((I1ARRAYREF)ObjectToOBJECTREF(pStackTraceUnsafe));
240 gc.stackTrace.Swap(stackTraceArray);
243 gc.dynamicMethodsArray = NULL;
244 if (pDynamicMethodsUnsafe != NULL)
246 gc.dynamicMethodsArray = (PTRARRAYREF)ObjectToOBJECTREF(pDynamicMethodsUnsafe);
249 // If there is no stacktrace, then there cannot be any dynamic method array. Thus,
250 // save stacktrace only when we have it.
251 if (gc.stackTrace.Size() > 0)
253 // Save the stacktrace details in the exception under a lock
254 gc.refException->SetStackTrace(gc.stackTrace, gc.dynamicMethodsArray);
258 gc.refException->SetNullStackTrace();
261 HELPER_METHOD_FRAME_END();
265 // This method performs a deep copy of the stack trace array.
266 FCIMPL1(Object*, ExceptionNative::CopyStackTrace, Object* pStackTraceUNSAFE)
270 ASSERT(pStackTraceUNSAFE != NULL);
274 StackTraceArray stackTrace;
275 StackTraceArray stackTraceCopy;
276 _gc(I1ARRAYREF refStackTrace)
277 : stackTrace(refStackTrace)
280 _gc gc((I1ARRAYREF)(ObjectToOBJECTREF(pStackTraceUNSAFE)));
282 // GC protect the array reference
283 HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
285 // Deepcopy the array
286 gc.stackTraceCopy.CopyFrom(gc.stackTrace);
288 HELPER_METHOD_FRAME_END();
290 return OBJECTREFToObject(gc.stackTraceCopy.Get());
294 // This method performs a deep copy of the dynamic method array.
295 FCIMPL1(Object*, ExceptionNative::CopyDynamicMethods, Object* pDynamicMethodsUNSAFE)
299 ASSERT(pDynamicMethodsUNSAFE != NULL);
303 PTRARRAYREF dynamicMethodsArray; // Object array of Managed Resolvers
304 PTRARRAYREF dynamicMethodsArrayCopy; // Copy of the object array of Managed Resolvers
309 ZeroMemory(&gc, sizeof(gc));
310 HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
312 gc.dynamicMethodsArray = (PTRARRAYREF)(ObjectToOBJECTREF(pDynamicMethodsUNSAFE));
314 // Get the number of elements in the array
315 unsigned cOrigDynamic = gc.dynamicMethodsArray->GetNumComponents();
316 // ..and allocate a new array. This can trigger GC or throw under OOM.
317 gc.dynamicMethodsArrayCopy = (PTRARRAYREF)AllocateObjectArray(cOrigDynamic, g_pObjectClass);
319 // Copy references to the new array we just allocated
320 memmoveGCRefs(gc.dynamicMethodsArrayCopy->GetDataPtr(), gc.dynamicMethodsArray->GetDataPtr(),
321 cOrigDynamic * sizeof(Object *));
322 HELPER_METHOD_FRAME_END();
324 return OBJECTREFToObject(gc.dynamicMethodsArrayCopy);
329 BSTR BStrFromString(STRINGREF s)
344 s->RefInterpretGetStringValuesDangerousForGC(&wz, &cch);
346 bstr = SysAllocString(wz);
353 static BSTR GetExceptionDescription(OBJECTREF objException)
360 PRECONDITION( IsException(objException->GetMethodTable()) );
364 BSTR bstrDescription;
366 STRINGREF MessageString = NULL;
367 GCPROTECT_BEGIN(MessageString)
368 GCPROTECT_BEGIN(objException)
370 // read Exception.Message property
371 MethodDescCallSite getMessage(METHOD__EXCEPTION__GET_MESSAGE, &objException);
373 ARG_SLOT GetMessageArgs[] = { ObjToArgSlot(objException)};
374 MessageString = getMessage.Call_RetSTRINGREF(GetMessageArgs);
376 // if the message string is empty then use the exception classname.
377 if (MessageString == NULL || MessageString->GetStringLength() == 0) {
379 MethodDescCallSite getClassName(METHOD__EXCEPTION__GET_CLASS_NAME, &objException);
380 ARG_SLOT GetClassNameArgs[] = { ObjToArgSlot(objException)};
381 MessageString = getClassName.Call_RetSTRINGREF(GetClassNameArgs);
382 _ASSERTE(MessageString != NULL && MessageString->GetStringLength() != 0);
385 // Allocate the description BSTR.
386 int DescriptionLen = MessageString->GetStringLength();
387 bstrDescription = SysAllocStringLen(MessageString->GetBuffer(), DescriptionLen);
392 return bstrDescription;
395 static BSTR GetExceptionSource(OBJECTREF objException)
402 PRECONDITION( IsException(objException->GetMethodTable()) );
407 GCPROTECT_BEGIN(objException)
409 // read Exception.Source property
410 MethodDescCallSite getSource(METHOD__EXCEPTION__GET_SOURCE, &objException);
412 ARG_SLOT GetSourceArgs[] = { ObjToArgSlot(objException)};
414 refRetVal = getSource.Call_RetSTRINGREF(GetSourceArgs);
417 return BStrFromString(refRetVal);
420 static void GetExceptionHelp(OBJECTREF objException, BSTR *pbstrHelpFile, DWORD *pdwHelpContext)
427 INJECT_FAULT(COMPlusThrowOM());
428 PRECONDITION(IsException(objException->GetMethodTable()));
429 PRECONDITION(CheckPointer(pbstrHelpFile));
430 PRECONDITION(CheckPointer(pdwHelpContext));
436 GCPROTECT_BEGIN(objException);
438 // read Exception.HelpLink property
439 MethodDescCallSite getHelpLink(METHOD__EXCEPTION__GET_HELP_LINK, &objException);
441 ARG_SLOT GetHelpLinkArgs[] = { ObjToArgSlot(objException)};
442 *pbstrHelpFile = BStrFromString(getHelpLink.Call_RetSTRINGREF(GetHelpLinkArgs));
446 // parse the help file to check for the presence of helpcontext
447 int len = SysStringLen(*pbstrHelpFile);
449 WCHAR *pwstr = *pbstrHelpFile;
451 BOOL fFoundPound = FALSE;
453 for (pos = len - 1; pos >= 0; pos--) {
454 if (pwstr[pos] == W('#')) {
462 int NumberStartPos = -1;
463 BOOL bNumberStarted = FALSE;
464 BOOL bNumberFinished = FALSE;
465 BOOL bInvalidDigitsFound = FALSE;
467 _ASSERTE(pwstr[pos] == W('#'));
469 // Check to see if the string to the right of the pound a valid number.
470 for (pos++; pos < len; pos++) {
471 if (bNumberFinished) {
472 if (!COMCharacter::nativeIsWhiteSpace(pwstr[pos])) {
473 bInvalidDigitsFound = TRUE;
477 else if (bNumberStarted) {
478 if (COMCharacter::nativeIsWhiteSpace(pwstr[pos])) {
479 bNumberFinished = TRUE;
481 else if (!COMCharacter::nativeIsDigit(pwstr[pos])) {
482 bInvalidDigitsFound = TRUE;
487 if (COMCharacter::nativeIsDigit(pwstr[pos])) {
488 NumberStartPos = pos;
489 bNumberStarted = TRUE;
491 else if (!COMCharacter::nativeIsWhiteSpace(pwstr[pos])) {
492 bInvalidDigitsFound = TRUE;
498 if (bNumberStarted && !bInvalidDigitsFound) {
499 // Grab the help context and remove it from the help file.
500 *pdwHelpContext = (DWORD)wtoi(&pwstr[NumberStartPos], len - NumberStartPos);
502 // Allocate a new help file string of the right length.
503 BSTR strOld = *pbstrHelpFile;
504 *pbstrHelpFile = SysAllocStringLen(strOld, PoundPos);
505 SysFreeString(strOld);
513 // NOTE: caller cleans up any partially initialized BSTRs in pED
514 void ExceptionNative::GetExceptionData(OBJECTREF objException, ExceptionData *pED)
521 PRECONDITION(IsException(objException->GetMethodTable()));
522 PRECONDITION(CheckPointer(pED));
526 ZeroMemory(pED, sizeof(ExceptionData));
528 GCPROTECT_BEGIN(objException);
529 pED->hr = GetExceptionHResult(objException);
530 pED->bstrDescription = GetExceptionDescription(objException);
531 pED->bstrSource = GetExceptionSource(objException);
532 GetExceptionHelp(objException, &pED->bstrHelpFile, &pED->dwHelpContext);
537 #ifdef FEATURE_COMINTEROP
539 HRESULT SimpleComCallWrapper::IErrorInfo_hr()
542 return GetExceptionHResult(this->GetObjectRef());
545 BSTR SimpleComCallWrapper::IErrorInfo_bstrDescription()
548 return GetExceptionDescription(this->GetObjectRef());
551 BSTR SimpleComCallWrapper::IErrorInfo_bstrSource()
554 return GetExceptionSource(this->GetObjectRef());
557 BSTR SimpleComCallWrapper::IErrorInfo_bstrHelpFile()
562 GetExceptionHelp(this->GetObjectRef(), &bstrHelpFile, &dwHelpContext);
566 DWORD SimpleComCallWrapper::IErrorInfo_dwHelpContext()
571 GetExceptionHelp(this->GetObjectRef(), &bstrHelpFile, &dwHelpContext);
572 SysFreeString(bstrHelpFile);
573 return dwHelpContext;
576 GUID SimpleComCallWrapper::IErrorInfo_guid()
578 LIMITED_METHOD_CONTRACT;
582 #endif // FEATURE_COMINTEROP
584 FCIMPL0(EXCEPTION_POINTERS*, ExceptionNative::GetExceptionPointers)
588 EXCEPTION_POINTERS* retVal = NULL;
590 Thread *pThread = GetThread();
593 if (pThread->IsExceptionInProgress())
595 retVal = pThread->GetExceptionState()->GetExceptionPointers();
602 FCIMPL0(INT32, ExceptionNative::GetExceptionCode)
608 Thread *pThread = GetThread();
611 if (pThread->IsExceptionInProgress())
613 retVal = pThread->GetExceptionState()->GetExceptionCode();
620 extern uint32_t g_exceptionCount;
621 FCIMPL0(UINT32, ExceptionNative::GetExceptionCount)
624 return g_exceptionCount;
630 // This must be implemented as an FCALL because managed code cannot
631 // swallow a thread abort exception without resetting the abort,
632 // which we don't want to do. Additionally, we can run into deadlocks
633 // if we use the ResourceManager to do resource lookups - it requires
634 // taking managed locks when initializing Globalization & Security,
635 // but a thread abort on a separate thread initializing those same
636 // systems would also do a resource lookup via the ResourceManager.
637 // We've deadlocked in CompareInfo.GetCompareInfo &
638 // Environment.GetResourceString. It's not practical to take all of
639 // our locks within CER's to avoid this problem - just use the CLR's
640 // unmanaged resources.
642 void QCALLTYPE ExceptionNative::GetMessageFromNativeResources(ExceptionMessageKind kind, QCall::StringHandleOnStack retMesg)
650 const WCHAR * wszFallbackString = NULL;
654 hr = buffer.LoadResourceAndReturnHR(CCompRC::Error, IDS_EE_THREAD_ABORT);
656 wszFallbackString = W("Thread was being aborted.");
660 case ThreadInterrupted:
661 hr = buffer.LoadResourceAndReturnHR(CCompRC::Error, IDS_EE_THREAD_INTERRUPTED);
663 wszFallbackString = W("Thread was interrupted from a waiting state.");
668 hr = buffer.LoadResourceAndReturnHR(CCompRC::Error, IDS_EE_OUT_OF_MEMORY);
670 wszFallbackString = W("Insufficient memory to continue the execution of the program.");
675 _ASSERTE(!"Unknown ExceptionMessageKind value!");
678 STRESS_LOG1(LF_BCL, LL_ALWAYS, "LoadResource error: %x", hr);
679 _ASSERTE(wszFallbackString != NULL);
680 retMesg.Set(wszFallbackString);
690 // This method from one primitive array to another based
691 // upon an offset into each an a byte count.
692 FCIMPL5(VOID, Buffer::BlockCopy, ArrayBase *src, int srcOffset, ArrayBase *dst, int dstOffset, int count)
696 // Verify that both the src and dst are Arrays of primitive
698 // <TODO>@TODO: We need to check for booleans</TODO>
699 if (src==NULL || dst==NULL)
700 FCThrowArgumentNullVoid((src==NULL) ? W("src") : W("dst"));
702 SIZE_T srcLen, dstLen;
705 // Use specialized fast path for byte arrays because of it is what Buffer::BlockCopy is
706 // typically used for.
709 MethodTable * pByteArrayMT = g_pByteArrayMT;
710 _ASSERTE(pByteArrayMT != NULL);
712 // Optimization: If src is a byte array, we can
713 // simply set srcLen to GetNumComponents, without having
714 // to call GetComponentSize or verifying GetArrayElementType
715 if (src->GetMethodTable() == pByteArrayMT)
717 srcLen = src->GetNumComponents();
721 srcLen = src->GetNumComponents() * src->GetComponentSize();
723 // We only want to allow arrays of primitives, no Objects.
724 const CorElementType srcET = src->GetArrayElementType();
725 if (!CorTypeInfo::IsPrimitiveType_NoThrow(srcET))
726 FCThrowArgumentVoid(W("src"), W("Arg_MustBePrimArray"));
729 // Optimization: If copying to/from the same array, then
730 // we know that dstLen and srcLen must be the same.
735 else if (dst->GetMethodTable() == pByteArrayMT)
737 dstLen = dst->GetNumComponents();
741 dstLen = dst->GetNumComponents() * dst->GetComponentSize();
742 if (dst->GetMethodTable() != src->GetMethodTable())
744 const CorElementType dstET = dst->GetArrayElementType();
745 if (!CorTypeInfo::IsPrimitiveType_NoThrow(dstET))
746 FCThrowArgumentVoid(W("dst"), W("Arg_MustBePrimArray"));
750 if (srcOffset < 0 || dstOffset < 0 || count < 0) {
751 const wchar_t* str = W("srcOffset");
752 if (dstOffset < 0) str = W("dstOffset");
753 if (count < 0) str = W("count");
754 FCThrowArgumentOutOfRangeVoid(str, W("ArgumentOutOfRange_NeedNonNegNum"));
757 if (srcLen < (SIZE_T)srcOffset + (SIZE_T)count || dstLen < (SIZE_T)dstOffset + (SIZE_T)count) {
758 FCThrowArgumentVoid(NULL, W("Argument_InvalidOffLen"));
761 PTR_BYTE srcPtr = src->GetDataPtr() + srcOffset;
762 PTR_BYTE dstPtr = dst->GetDataPtr() + dstOffset;
764 if ((srcPtr != dstPtr) && (count > 0)) {
765 memmove(dstPtr, srcPtr, count);
773 void QCALLTYPE MemoryNative::Clear(void *dst, size_t length)
777 #if defined(_X86_) || defined(_AMD64_)
780 // memset ends up calling rep stosb if the hardware claims to support it efficiently. rep stosb is up to 2x slower
781 // on misaligned blocks. Workaround this issue by aligning the blocks passed to memset upfront.
784 *((uint64_t*)dst + 1) = 0;
785 *((uint64_t*)dst + 2) = 0;
786 *((uint64_t*)dst + 3) = 0;
788 void* end = (uint8_t*)dst + length;
789 *((uint64_t*)end - 1) = 0;
790 *((uint64_t*)end - 2) = 0;
791 *((uint64_t*)end - 3) = 0;
792 *((uint64_t*)end - 4) = 0;
794 dst = ALIGN_UP((uint8_t*)dst + 1, 32);
795 length = ALIGN_DOWN((uint8_t*)end - 1, 32) - (uint8_t*)dst;
799 memset(dst, 0, length);
802 FCIMPL3(VOID, MemoryNative::BulkMoveWithWriteBarrier, void *dst, void *src, size_t byteCount)
806 InlinedMemmoveGCRefsHelper(dst, src, byteCount);
812 void QCALLTYPE Buffer::MemMove(void *dst, void *src, size_t length)
816 memmove(dst, src, length);
819 // Returns a bool to indicate if the array is of primitive types or not.
820 FCIMPL1(FC_BOOL_RET, Buffer::IsPrimitiveTypeArray, ArrayBase *arrayUNSAFE)
824 _ASSERTE(arrayUNSAFE != NULL);
826 // Check the type from the contained element's handle
827 TypeHandle elementTH = arrayUNSAFE->GetArrayElementTypeHandle();
828 BOOL fIsPrimitiveTypeArray = CorTypeInfo::IsPrimitiveType_NoThrow(elementTH.GetVerifierCorElementType());
830 FC_RETURN_BOOL(fIsPrimitiveTypeArray);
835 // Returns the length in bytes of an array containing
836 // primitive type elements
837 FCIMPL1(INT32, Buffer::ByteLength, ArrayBase* arrayUNSAFE)
841 _ASSERTE(arrayUNSAFE != NULL);
843 SIZE_T iRetVal = arrayUNSAFE->GetNumComponents() * arrayUNSAFE->GetComponentSize();
845 // This API is explosed both as Buffer.ByteLength and also used indirectly in argument
846 // checks for Buffer.GetByte/SetByte.
848 // If somebody called Get/SetByte on 2GB+ arrays, there is a decent chance that
849 // the computation of the index has overflowed. Thus we intentionally always
850 // throw on 2GB+ arrays in Get/SetByte argument checks (even for indicies <2GB)
851 // to prevent people from running into a trap silently.
852 if (iRetVal > INT32_MAX)
853 FCThrow(kOverflowException);
855 return (INT32)iRetVal;
862 MethodDesc *GCInterface::m_pCacheMethod=NULL;
864 UINT64 GCInterface::m_ulMemPressure = 0;
865 UINT64 GCInterface::m_ulThreshold = MIN_GC_MEMORYPRESSURE_THRESHOLD;
866 INT32 GCInterface::m_gc_counts[3] = {0,0,0};
867 CrstStatic GCInterface::m_MemoryPressureLock;
869 UINT64 GCInterface::m_addPressure[NEW_PRESSURE_COUNT] = {0, 0, 0, 0}; // history of memory pressure additions
870 UINT64 GCInterface::m_remPressure[NEW_PRESSURE_COUNT] = {0, 0, 0, 0}; // history of memory pressure removals
872 // incremented after a gen2 GC has been detected,
873 // (m_iteration % NEW_PRESSURE_COUNT) is used as an index into m_addPressure and m_remPressure
874 UINT GCInterface::m_iteration = 0;
876 FCIMPL6(void, GCInterface::GetMemoryInfo, UINT64* highMemLoadThreshold, UINT64* totalAvailableMemoryBytes, UINT64* lastRecordedMemLoadBytes, UINT32* lastRecordedMemLoadPct, size_t* lastRecordedHeapSizeBytes, size_t* lastRecordedFragmentationBytes)
880 FC_GC_POLL_NOT_NEEDED();
882 return GCHeapUtilities::GetGCHeap()->GetMemoryInfo(highMemLoadThreshold, totalAvailableMemoryBytes,
883 lastRecordedMemLoadBytes, lastRecordedMemLoadPct,
884 lastRecordedHeapSizeBytes, lastRecordedFragmentationBytes);
888 FCIMPL0(int, GCInterface::GetGcLatencyMode)
892 FC_GC_POLL_NOT_NEEDED();
894 int result = (INT32)GCHeapUtilities::GetGCHeap()->GetGcLatencyMode();
899 FCIMPL1(int, GCInterface::SetGcLatencyMode, int newLatencyMode)
903 FC_GC_POLL_NOT_NEEDED();
905 return GCHeapUtilities::GetGCHeap()->SetGcLatencyMode(newLatencyMode);
909 FCIMPL0(int, GCInterface::GetLOHCompactionMode)
913 FC_GC_POLL_NOT_NEEDED();
915 int result = (INT32)GCHeapUtilities::GetGCHeap()->GetLOHCompactionMode();
920 FCIMPL1(void, GCInterface::SetLOHCompactionMode, int newLOHCompactionyMode)
924 FC_GC_POLL_NOT_NEEDED();
926 GCHeapUtilities::GetGCHeap()->SetLOHCompactionMode(newLOHCompactionyMode);
931 FCIMPL2(FC_BOOL_RET, GCInterface::RegisterForFullGCNotification, UINT32 gen2Percentage, UINT32 lohPercentage)
935 FC_GC_POLL_NOT_NEEDED();
937 FC_RETURN_BOOL(GCHeapUtilities::GetGCHeap()->RegisterForFullGCNotification(gen2Percentage, lohPercentage));
941 FCIMPL0(FC_BOOL_RET, GCInterface::CancelFullGCNotification)
945 FC_GC_POLL_NOT_NEEDED();
946 FC_RETURN_BOOL(GCHeapUtilities::GetGCHeap()->CancelFullGCNotification());
950 FCIMPL1(int, GCInterface::WaitForFullGCApproach, int millisecondsTimeout)
956 DISABLED(GC_TRIGGERS); // can't use this in an FCALL because we're in forbid gc mode until we setup a H_M_F.
962 //We don't need to check the top end because the GC will take care of that.
963 HELPER_METHOD_FRAME_BEGIN_RET_0();
965 DWORD dwMilliseconds = ((millisecondsTimeout == -1) ? INFINITE : millisecondsTimeout);
966 result = GCHeapUtilities::GetGCHeap()->WaitForFullGCApproach(dwMilliseconds);
968 HELPER_METHOD_FRAME_END();
974 FCIMPL1(int, GCInterface::WaitForFullGCComplete, int millisecondsTimeout)
980 DISABLED(GC_TRIGGERS); // can't use this in an FCALL because we're in forbid gc mode until we setup a H_M_F.
986 //We don't need to check the top end because the GC will take care of that.
987 HELPER_METHOD_FRAME_BEGIN_RET_0();
989 DWORD dwMilliseconds = ((millisecondsTimeout == -1) ? INFINITE : millisecondsTimeout);
990 result = GCHeapUtilities::GetGCHeap()->WaitForFullGCComplete(dwMilliseconds);
992 HELPER_METHOD_FRAME_END();
998 /*================================GetGeneration=================================
999 **Action: Returns the generation in which args->obj is found.
1000 **Returns: The generation in which args->obj is found.
1001 **Arguments: args->obj -- The object to locate.
1002 **Exceptions: ArgumentException if args->obj is null.
1003 ==============================================================================*/
1004 FCIMPL1(int, GCInterface::GetGeneration, Object* objUNSAFE)
1008 if (objUNSAFE == NULL)
1009 FCThrowArgumentNull(W("obj"));
1011 int result = (INT32)GCHeapUtilities::GetGCHeap()->WhichGeneration(objUNSAFE);
1017 /*================================GetSegmentSize========-=======================
1018 **Action: Returns the maximum GC heap segment size
1019 **Returns: The maximum segment size of either the normal heap or the large object heap, whichever is bigger
1020 ==============================================================================*/
1021 FCIMPL0(UINT64, GCInterface::GetSegmentSize)
1025 IGCHeap * pGC = GCHeapUtilities::GetGCHeap();
1026 size_t segment_size = pGC->GetValidSegmentSize(false);
1027 size_t large_segment_size = pGC->GetValidSegmentSize(true);
1028 _ASSERTE(segment_size < SIZE_T_MAX && large_segment_size < SIZE_T_MAX);
1029 if (segment_size < large_segment_size)
1030 segment_size = large_segment_size;
1033 return (UINT64) segment_size;
1037 /*================================CollectionCount=================================
1038 **Action: Returns the number of collections for this generation since the begining of the life of the process
1039 **Returns: The collection count.
1040 **Arguments: args->generation -- The generation
1041 **Exceptions: Argument exception if args->generation is < 0 or > GetMaxGeneration();
1042 ==============================================================================*/
1043 FCIMPL2(int, GCInterface::CollectionCount, INT32 generation, INT32 getSpecialGCCount)
1047 //We've already checked this in GC.cs, so we'll just assert it here.
1048 _ASSERTE(generation >= 0);
1050 //We don't need to check the top end because the GC will take care of that.
1051 int result = (INT32)GCHeapUtilities::GetGCHeap()->CollectionCount(generation, getSpecialGCCount);
1057 int QCALLTYPE GCInterface::StartNoGCRegion(INT64 totalSize, BOOL lohSizeKnown, INT64 lohSize, BOOL disallowFullBlockingGC)
1067 retVal = GCHeapUtilities::GetGCHeap()->StartNoGCRegion((ULONGLONG)totalSize,
1070 !!disallowFullBlockingGC);
1077 int QCALLTYPE GCInterface::EndNoGCRegion()
1085 retVal = GCHeapUtilities::GetGCHeap()->EndNoGCRegion();
1092 /*===============================GetGenerationWR================================
1093 **Action: Returns the generation in which the object pointed to by a WeakReference is found.
1095 **Arguments: args->handle -- the OBJECTHANDLE to the object which we're locating.
1096 **Exceptions: ArgumentException if handle points to an object which is not accessible.
1097 ==============================================================================*/
1098 FCIMPL1(int, GCInterface::GetGenerationWR, LPVOID handle)
1104 HELPER_METHOD_FRAME_BEGIN_RET_0();
1107 temp = ObjectFromHandle((OBJECTHANDLE) handle);
1109 COMPlusThrowArgumentNull(W("weak handle"));
1111 iRetVal = (INT32)GCHeapUtilities::GetGCHeap()->WhichGeneration(OBJECTREFToObject(temp));
1113 HELPER_METHOD_FRAME_END();
1119 FCIMPL0(int, GCInterface::GetLastGCPercentTimeInGC)
1123 return GCHeapUtilities::GetGCHeap()->GetLastGCPercentTimeInGC();
1127 FCIMPL1(UINT64, GCInterface::GetGenerationSize, int gen)
1131 return (UINT64)(GCHeapUtilities::GetGCHeap()->GetLastGCGenerationSize(gen));
1135 /*================================GetTotalMemory================================
1136 **Action: Returns the total number of bytes in use
1137 **Returns: The total number of bytes in use
1140 ==============================================================================*/
1141 INT64 QCALLTYPE GCInterface::GetTotalMemory()
1150 iRetVal = (INT64) GCHeapUtilities::GetGCHeap()->GetTotalBytesInUse();
1157 /*==============================Collect=========================================
1158 **Action: Collects all generations <= args->generation
1160 **Arguments: args->generation: The maximum generation to collect
1161 **Exceptions: Argument exception if args->generation is < 0 or > GetMaxGeneration();
1162 ==============================================================================*/
1163 void QCALLTYPE GCInterface::Collect(INT32 generation, INT32 mode)
1169 //We've already checked this in GC.cs, so we'll just assert it here.
1170 _ASSERTE(generation >= -1);
1172 //We don't need to check the top end because the GC will take care of that.
1175 GCHeapUtilities::GetGCHeap()->GarbageCollect(generation, false, mode);
1181 /*==========================WaitForPendingFinalizers============================
1182 **Action: Run all Finalizers that haven't been run.
1185 ==============================================================================*/
1186 void QCALLTYPE GCInterface::WaitForPendingFinalizers()
1192 FinalizerThread::FinalizerThreadWait();
1198 /*===============================GetMaxGeneration===============================
1199 **Action: Returns the largest GC generation
1200 **Returns: The largest GC Generation
1203 ==============================================================================*/
1204 FCIMPL0(int, GCInterface::GetMaxGeneration)
1208 return(INT32)GCHeapUtilities::GetGCHeap()->GetMaxGeneration();
1212 /*===============================GetAllocatedBytesForCurrentThread===============================
1213 **Action: Computes the allocated bytes so far on the current thread
1214 **Returns: The allocated bytes so far on the current thread
1217 ==============================================================================*/
1218 FCIMPL0(INT64, GCInterface::GetAllocatedBytesForCurrentThread)
1222 INT64 currentAllocated = 0;
1223 Thread *pThread = GetThread();
1224 gc_alloc_context* ac = pThread->GetAllocContext();
1225 currentAllocated = ac->alloc_bytes + ac->alloc_bytes_loh - (ac->alloc_limit - ac->alloc_ptr);
1227 return currentAllocated;
1231 /*===============================AllocateNewArray===============================
1232 **Action: Allocates a new array object. Allows passing extra flags
1233 **Returns: The allocated array.
1234 **Arguments: elementTypeHandle -> type of the element,
1235 ** length -> number of elements,
1236 ** zeroingOptional -> whether caller prefers to skip clearing the content of the array, if possible.
1237 **Exceptions: IDS_EE_ARRAY_DIMENSIONS_EXCEEDED when size is too large. OOM if can't allocate.
1238 ==============================================================================*/
1239 FCIMPL3(Object*, GCInterface::AllocateNewArray, void* arrayTypeHandle, INT32 length, CLR_BOOL zeroingOptional)
1243 PRECONDITION(length >= 0);
1246 OBJECTREF pRet = NULL;
1247 TypeHandle arrayType = TypeHandle::FromPtr(arrayTypeHandle);
1249 HELPER_METHOD_FRAME_BEGIN_RET_0();
1251 pRet = AllocateSzArray(arrayType, length, zeroingOptional ? GC_ALLOC_ZEROING_OPTIONAL : GC_ALLOC_NO_FLAGS);
1253 HELPER_METHOD_FRAME_END();
1255 return OBJECTREFToObject(pRet);
1260 FCIMPL1(INT64, GCInterface::GetTotalAllocatedBytes, CLR_BOOL precise)
1266 #ifdef _TARGET_64BIT_
1267 uint64_t unused_bytes = Thread::dead_threads_non_alloc_bytes;
1269 // As it could be noticed we read 64bit values that may be concurrently updated.
1270 // Such reads are not guaranteed to be atomic on 32bit so extra care should be taken.
1271 uint64_t unused_bytes = FastInterlockCompareExchangeLong((LONG64*)& Thread::dead_threads_non_alloc_bytes, 0, 0);
1274 uint64_t allocated_bytes = GCHeapUtilities::GetGCHeap()->GetTotalAllocatedBytes() - unused_bytes;
1276 // highest reported allocated_bytes. We do not want to report a value less than that even if unused_bytes has increased.
1277 static uint64_t high_watermark;
1279 uint64_t current_high = high_watermark;
1280 while (allocated_bytes > current_high)
1282 uint64_t orig = FastInterlockCompareExchangeLong((LONG64*)& high_watermark, allocated_bytes, current_high);
1283 if (orig == current_high)
1284 return allocated_bytes;
1286 current_high = orig;
1289 return current_high;
1294 HELPER_METHOD_FRAME_BEGIN_RET_0();
1296 // We need to suspend/restart the EE to get each thread's
1297 // non-allocated memory from their allocation contexts
1299 ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_OTHER);
1301 allocated = GCHeapUtilities::GetGCHeap()->GetTotalAllocatedBytes() - Thread::dead_threads_non_alloc_bytes;
1303 for (Thread *pThread = ThreadStore::GetThreadList(NULL); pThread; pThread = ThreadStore::GetThreadList(pThread))
1305 gc_alloc_context* ac = pThread->GetAllocContext();
1306 allocated -= ac->alloc_limit - ac->alloc_ptr;
1309 ThreadSuspend::RestartEE(FALSE, TRUE);
1311 HELPER_METHOD_FRAME_END();
1317 #ifdef FEATURE_BASICFREEZE
1319 /*===============================RegisterFrozenSegment===============================
1320 **Action: Registers the frozen segment
1321 **Returns: segment_handle
1322 **Arguments: args-> pointer to section, size of section
1324 ==============================================================================*/
1325 void* QCALLTYPE GCInterface::RegisterFrozenSegment(void* pSection, SIZE_T sizeSection)
1329 void* retVal = nullptr;
1333 _ASSERTE(pSection != nullptr);
1334 _ASSERTE(sizeSection > 0);
1338 segment_info seginfo;
1339 seginfo.pvMem = pSection;
1340 seginfo.ibFirstObject = sizeof(ObjHeader);
1341 seginfo.ibAllocated = sizeSection;
1342 seginfo.ibCommit = seginfo.ibAllocated;
1343 seginfo.ibReserved = seginfo.ibAllocated;
1345 retVal = (void*)GCHeapUtilities::GetGCHeap()->RegisterFrozenSegment(&seginfo);
1352 /*===============================UnregisterFrozenSegment===============================
1353 **Action: Unregisters the frozen segment
1355 **Arguments: args-> segment handle
1357 ==============================================================================*/
1358 void QCALLTYPE GCInterface::UnregisterFrozenSegment(void* segment)
1364 _ASSERTE(segment != nullptr);
1368 GCHeapUtilities::GetGCHeap()->UnregisterFrozenSegment((segment_handle)segment);
1373 #endif // FEATURE_BASICFREEZE
1375 /*==============================SuppressFinalize================================
1376 **Action: Indicate that an object's finalizer should not be run by the system
1377 **Arguments: Object of interest
1379 ==============================================================================*/
1380 FCIMPL1(void, GCInterface::SuppressFinalize, Object *obj)
1384 // Checked by the caller
1385 _ASSERTE(obj != NULL);
1387 if (!obj->GetMethodTable ()->HasFinalizer())
1390 GCHeapUtilities::GetGCHeap()->SetFinalizationRun(obj);
1396 /*============================ReRegisterForFinalize==============================
1397 **Action: Indicate that an object's finalizer should be run by the system.
1398 **Arguments: Object of interest
1400 ==============================================================================*/
1401 FCIMPL1(void, GCInterface::ReRegisterForFinalize, Object *obj)
1405 // Checked by the caller
1406 _ASSERTE(obj != NULL);
1408 if (obj->GetMethodTable()->HasFinalizer())
1410 HELPER_METHOD_FRAME_BEGIN_1(obj);
1411 if (!GCHeapUtilities::GetGCHeap()->RegisterForFinalization(-1, obj))
1415 HELPER_METHOD_FRAME_END();
1420 FORCEINLINE UINT64 GCInterface::InterlockedAdd (UINT64 *pAugend, UINT64 addend) {
1421 WRAPPER_NO_CONTRACT;
1427 oldMemValue = *pAugend;
1428 newMemValue = oldMemValue + addend;
1430 // check for overflow
1431 if (newMemValue < oldMemValue)
1433 newMemValue = UINT64_MAX;
1435 } while (InterlockedCompareExchange64((LONGLONG*) pAugend, (LONGLONG) newMemValue, (LONGLONG) oldMemValue) != (LONGLONG) oldMemValue);
1440 FORCEINLINE UINT64 GCInterface::InterlockedSub(UINT64 *pMinuend, UINT64 subtrahend) {
1441 WRAPPER_NO_CONTRACT;
1447 oldMemValue = *pMinuend;
1448 newMemValue = oldMemValue - subtrahend;
1450 // check for underflow
1451 if (newMemValue > oldMemValue)
1454 } while (InterlockedCompareExchange64((LONGLONG*) pMinuend, (LONGLONG) newMemValue, (LONGLONG) oldMemValue) != (LONGLONG) oldMemValue);
1459 void QCALLTYPE GCInterface::_AddMemoryPressure(UINT64 bytesAllocated)
1463 // AddMemoryPressure could cause a GC, so we need a frame
1465 AddMemoryPressure(bytesAllocated);
1469 void GCInterface::AddMemoryPressure(UINT64 bytesAllocated)
1479 SendEtwAddMemoryPressureEvent(bytesAllocated);
1481 UINT64 newMemValue = InterlockedAdd(&m_ulMemPressure, bytesAllocated);
1483 if (newMemValue > m_ulThreshold)
1485 INT32 gen_collect = 0;
1488 CrstHolder holder(&m_MemoryPressureLock);
1490 // to avoid collecting too often, take the max threshold of the linear and geometric growth
1494 UINT64 bytesAllocatedMax = (UINT64_MAX - m_ulThreshold) / 8;
1496 if (bytesAllocated >= bytesAllocatedMax) // overflow check
1498 addMethod = UINT64_MAX;
1502 addMethod = m_ulThreshold + bytesAllocated * 8;
1505 multMethod = newMemValue + newMemValue / 10;
1506 if (multMethod < newMemValue) // overflow check
1508 multMethod = UINT64_MAX;
1511 m_ulThreshold = (addMethod > multMethod) ? addMethod : multMethod;
1512 for (int i = 0; i <= 1; i++)
1514 if ((GCHeapUtilities::GetGCHeap()->CollectionCount(i) / RELATIVE_GC_RATIO) > GCHeapUtilities::GetGCHeap()->CollectionCount(i + 1))
1516 gen_collect = i + 1;
1522 PREFIX_ASSUME(gen_collect <= 2);
1524 if ((gen_collect == 0) || (m_gc_counts[gen_collect] == GCHeapUtilities::GetGCHeap()->CollectionCount(gen_collect)))
1526 GarbageCollectModeAny(gen_collect);
1529 for (int i = 0; i < 3; i++)
1531 m_gc_counts [i] = GCHeapUtilities::GetGCHeap()->CollectionCount(i);
1537 const unsigned MIN_MEMORYPRESSURE_BUDGET = 4 * 1024 * 1024; // 4 MB
1539 const unsigned MIN_MEMORYPRESSURE_BUDGET = 3 * 1024 * 1024; // 3 MB
1542 const unsigned MAX_MEMORYPRESSURE_RATIO = 10; // 40 MB or 30 MB
1545 // Resets pressure accounting after a gen2 GC has occurred.
1546 void GCInterface::CheckCollectionCount()
1548 LIMITED_METHOD_CONTRACT;
1550 IGCHeap * pHeap = GCHeapUtilities::GetGCHeap();
1552 if (m_gc_counts[2] != pHeap->CollectionCount(2))
1554 for (int i = 0; i < 3; i++)
1556 m_gc_counts[i] = pHeap->CollectionCount(i);
1561 UINT p = m_iteration % NEW_PRESSURE_COUNT;
1563 m_addPressure[p] = 0; // new pressure will be accumulated here
1564 m_remPressure[p] = 0;
1569 // New AddMemoryPressure implementation (used by RCW and the CLRServicesImpl class)
1571 // 1. Less sensitive than the original implementation (start budget 3 MB)
1572 // 2. Focuses more on newly added memory pressure
1573 // 3. Budget adjusted by effectiveness of last 3 triggered GC (add / remove ratio, max 10x)
1574 // 4. Budget maxed with 30% of current managed GC size
1575 // 5. If Gen2 GC is happening naturally, ignore past pressure
1577 // Here's a brief description of the ideal algorithm for Add/Remove memory pressure:
1578 // Do a GC when (HeapStart < X * MemPressureGrowth) where
1579 // - HeapStart is GC Heap size after doing the last GC
1580 // - MemPressureGrowth is the net of Add and Remove since the last GC
1581 // - X is proportional to our guess of the ummanaged memory death rate per GC interval,
1582 // and would be calculated based on historic data using standard exponential approximation:
1583 // Xnew = UMDeath/UMTotal * 0.5 + Xprev
1585 void GCInterface::NewAddMemoryPressure(UINT64 bytesAllocated)
1595 CheckCollectionCount();
1597 UINT p = m_iteration % NEW_PRESSURE_COUNT;
1599 UINT64 newMemValue = InterlockedAdd(&m_addPressure[p], bytesAllocated);
1601 static_assert(NEW_PRESSURE_COUNT == 4, "NewAddMemoryPressure contains unrolled loops which depend on NEW_PRESSURE_COUNT");
1603 UINT64 add = m_addPressure[0] + m_addPressure[1] + m_addPressure[2] + m_addPressure[3] - m_addPressure[p];
1604 UINT64 rem = m_remPressure[0] + m_remPressure[1] + m_remPressure[2] + m_remPressure[3] - m_remPressure[p];
1606 STRESS_LOG4(LF_GCINFO, LL_INFO10000, "AMP Add: %I64u => added=%I64u total_added=%I64u total_removed=%I64u",
1607 bytesAllocated, newMemValue, add, rem);
1609 SendEtwAddMemoryPressureEvent(bytesAllocated);
1611 if (newMemValue >= MIN_MEMORYPRESSURE_BUDGET)
1613 UINT64 budget = MIN_MEMORYPRESSURE_BUDGET;
1615 if (m_iteration >= NEW_PRESSURE_COUNT) // wait until we have enough data points
1617 // Adjust according to effectiveness of GC
1618 // Scale budget according to past m_addPressure / m_remPressure ratio
1619 if (add >= rem * MAX_MEMORYPRESSURE_RATIO)
1621 budget = MIN_MEMORYPRESSURE_BUDGET * MAX_MEMORYPRESSURE_RATIO;
1625 CONSISTENCY_CHECK(rem != 0);
1627 // Avoid overflow by calculating addPressure / remPressure as fixed point (1 = 1024)
1628 budget = (add * 1024 / rem) * budget / 1024;
1632 // If still over budget, check current managed heap size
1633 if (newMemValue >= budget)
1635 IGCHeap *pGCHeap = GCHeapUtilities::GetGCHeap();
1636 UINT64 heapOver3 = pGCHeap->GetCurrentObjSize() / 3;
1638 if (budget < heapOver3) // Max
1643 if (newMemValue >= budget)
1645 // last check - if we would exceed 20% of GC "duty cycle", do not trigger GC at this time
1646 if ((pGCHeap->GetNow() - pGCHeap->GetLastGCStartTime(2)) > (pGCHeap->GetLastGCDuration(2) * 5))
1648 STRESS_LOG6(LF_GCINFO, LL_INFO10000, "AMP Budget: pressure=%I64u ? budget=%I64u (total_added=%I64u, total_removed=%I64u, mng_heap=%I64u) pos=%d",
1649 newMemValue, budget, add, rem, heapOver3 * 3, m_iteration);
1651 GarbageCollectModeAny(2);
1653 CheckCollectionCount();
1660 void QCALLTYPE GCInterface::_RemoveMemoryPressure(UINT64 bytesAllocated)
1665 RemoveMemoryPressure(bytesAllocated);
1669 void GCInterface::RemoveMemoryPressure(UINT64 bytesAllocated)
1679 SendEtwRemoveMemoryPressureEvent(bytesAllocated);
1681 UINT64 newMemValue = InterlockedSub(&m_ulMemPressure, bytesAllocated);
1683 UINT64 bytesAllocatedMax = (m_ulThreshold / 4);
1685 UINT64 multMethod = (m_ulThreshold - m_ulThreshold / 20); // can never underflow
1686 if (bytesAllocated >= bytesAllocatedMax) // protect against underflow
1688 m_ulThreshold = MIN_GC_MEMORYPRESSURE_THRESHOLD;
1693 addMethod = m_ulThreshold - bytesAllocated * 4;
1696 new_th = (addMethod < multMethod) ? addMethod : multMethod;
1698 if (newMemValue <= new_th)
1701 CrstHolder holder(&m_MemoryPressureLock);
1702 if (new_th > MIN_GC_MEMORYPRESSURE_THRESHOLD)
1703 m_ulThreshold = new_th;
1705 m_ulThreshold = MIN_GC_MEMORYPRESSURE_THRESHOLD;
1707 for (int i = 0; i < 3; i++)
1709 m_gc_counts [i] = GCHeapUtilities::GetGCHeap()->CollectionCount(i);
1714 void GCInterface::NewRemoveMemoryPressure(UINT64 bytesAllocated)
1724 CheckCollectionCount();
1726 UINT p = m_iteration % NEW_PRESSURE_COUNT;
1728 SendEtwRemoveMemoryPressureEvent(bytesAllocated);
1730 InterlockedAdd(&m_remPressure[p], bytesAllocated);
1732 STRESS_LOG2(LF_GCINFO, LL_INFO10000, "AMP Remove: %I64u => removed=%I64u",
1733 bytesAllocated, m_remPressure[p]);
1736 inline void GCInterface::SendEtwAddMemoryPressureEvent(UINT64 bytesAllocated)
1746 FireEtwIncreaseMemoryPressure(bytesAllocated, GetClrInstanceId());
1749 // Out-of-line helper to avoid EH prolog/epilog in functions that otherwise don't throw.
1750 NOINLINE void GCInterface::SendEtwRemoveMemoryPressureEvent(UINT64 bytesAllocated)
1762 FireEtwDecreaseMemoryPressure(bytesAllocated, GetClrInstanceId());
1768 EX_END_CATCH(SwallowAllExceptions)
1771 // Out-of-line helper to avoid EH prolog/epilog in functions that otherwise don't throw.
1772 NOINLINE void GCInterface::GarbageCollectModeAny(int generation)
1783 GCHeapUtilities::GetGCHeap()->GarbageCollect(generation, false, collection_non_blocking);
1790 #include <optsmallperfcritical.h>
1792 FCIMPL2(INT32,COMInterlocked::Exchange, INT32 *location, INT32 value)
1796 if( NULL == location) {
1797 FCThrow(kNullReferenceException);
1800 return FastInterlockExchange((LONG *) location, value);
1804 FCIMPL2_IV(INT64,COMInterlocked::Exchange64, INT64 *location, INT64 value)
1808 if( NULL == location) {
1809 FCThrow(kNullReferenceException);
1812 return FastInterlockExchangeLong((INT64 *) location, value);
1816 FCIMPL2(LPVOID,COMInterlocked::ExchangePointer, LPVOID *location, LPVOID value)
1820 if( NULL == location) {
1821 FCThrow(kNullReferenceException);
1825 return FastInterlockExchangePointer(location, value);
1829 FCIMPL3(INT32, COMInterlocked::CompareExchange, INT32* location, INT32 value, INT32 comparand)
1833 if( NULL == location) {
1834 FCThrow(kNullReferenceException);
1837 return FastInterlockCompareExchange((LONG*)location, value, comparand);
1841 FCIMPL4(INT32, COMInterlocked::CompareExchangeReliableResult, INT32* location, INT32 value, INT32 comparand, CLR_BOOL* succeeded)
1845 if( NULL == location) {
1846 FCThrow(kNullReferenceException);
1849 INT32 result = FastInterlockCompareExchange((LONG*)location, value, comparand);
1850 if (result == comparand)
1857 FCIMPL3_IVV(INT64, COMInterlocked::CompareExchange64, INT64* location, INT64 value, INT64 comparand)
1861 if( NULL == location) {
1862 FCThrow(kNullReferenceException);
1865 return FastInterlockCompareExchangeLong((INT64*)location, value, comparand);
1869 FCIMPL3(LPVOID,COMInterlocked::CompareExchangePointer, LPVOID *location, LPVOID value, LPVOID comparand)
1873 if( NULL == location) {
1874 FCThrow(kNullReferenceException);
1878 return FastInterlockCompareExchangePointer(location, value, comparand);
1882 FCIMPL2_IV(float,COMInterlocked::ExchangeFloat, float *location, float value)
1886 if( NULL == location) {
1887 FCThrow(kNullReferenceException);
1890 LONG ret = FastInterlockExchange((LONG *) location, *(LONG*)&value);
1891 return *(float*)&ret;
1895 FCIMPL2_IV(double,COMInterlocked::ExchangeDouble, double *location, double value)
1899 if( NULL == location) {
1900 FCThrow(kNullReferenceException);
1904 INT64 ret = FastInterlockExchangeLong((INT64 *) location, *(INT64*)&value);
1905 return *(double*)&ret;
1909 FCIMPL3_IVV(float,COMInterlocked::CompareExchangeFloat, float *location, float value, float comparand)
1913 if( NULL == location) {
1914 FCThrow(kNullReferenceException);
1917 LONG ret = (LONG)FastInterlockCompareExchange((LONG*) location, *(LONG*)&value, *(LONG*)&comparand);
1918 return *(float*)&ret;
1922 FCIMPL3_IVV(double,COMInterlocked::CompareExchangeDouble, double *location, double value, double comparand)
1926 if( NULL == location) {
1927 FCThrow(kNullReferenceException);
1930 INT64 ret = (INT64)FastInterlockCompareExchangeLong((INT64*) location, *(INT64*)&value, *(INT64*)&comparand);
1931 return *(double*)&ret;
1935 FCIMPL2(LPVOID,COMInterlocked::ExchangeObject, LPVOID*location, LPVOID value)
1939 if( NULL == location) {
1940 FCThrow(kNullReferenceException);
1943 LPVOID ret = FastInterlockExchangePointer(location, value);
1945 Thread::ObjectRefAssign((OBJECTREF *)location);
1947 ErectWriteBarrier((OBJECTREF*) location, ObjectToOBJECTREF((Object*) value));
1952 FCIMPL3(LPVOID,COMInterlocked::CompareExchangeObject, LPVOID *location, LPVOID value, LPVOID comparand)
1956 if( NULL == location) {
1957 FCThrow(kNullReferenceException);
1960 // <TODO>@todo: only set ref if is updated</TODO>
1961 LPVOID ret = FastInterlockCompareExchangePointer(location, value, comparand);
1962 if (ret == comparand) {
1964 Thread::ObjectRefAssign((OBJECTREF *)location);
1966 ErectWriteBarrier((OBJECTREF*) location, ObjectToOBJECTREF((Object*) value));
1972 FCIMPL2(INT32,COMInterlocked::ExchangeAdd32, INT32 *location, INT32 value)
1976 if( NULL == location) {
1977 FCThrow(kNullReferenceException);
1980 return FastInterlockExchangeAdd((LONG *) location, value);
1984 FCIMPL2_IV(INT64,COMInterlocked::ExchangeAdd64, INT64 *location, INT64 value)
1988 if( NULL == location) {
1989 FCThrow(kNullReferenceException);
1992 return FastInterlockExchangeAddLong((INT64 *) location, value);
1996 FCIMPL0(void, COMInterlocked::FCMemoryBarrier)
2005 #include <optdefault.h>
2007 void QCALLTYPE COMInterlocked::MemoryBarrierProcessWide()
2011 FlushProcessWriteBuffers();
2014 static BOOL HasOverriddenMethod(MethodTable* mt, MethodTable* classMT, WORD methodSlot)
2022 _ASSERTE(mt != NULL);
2023 _ASSERTE(classMT != NULL);
2024 _ASSERTE(methodSlot != 0);
2026 PCODE actual = mt->GetRestoredSlot(methodSlot);
2027 PCODE base = classMT->GetRestoredSlot(methodSlot);
2034 if (!classMT->IsZapped())
2036 // If mscorlib is JITed, the slots can be patched and thus we need to compare the actual MethodDescs
2037 // to detect match reliably
2038 if (MethodTable::GetMethodDescForSlotAddress(actual) == MethodTable::GetMethodDescForSlotAddress(base))
2047 static BOOL CanCompareBitsOrUseFastGetHashCode(MethodTable* mt)
2056 _ASSERTE(mt != NULL);
2058 if (mt->HasCheckedCanCompareBitsOrUseFastGetHashCode())
2060 return mt->CanCompareBitsOrUseFastGetHashCode();
2063 if (mt->ContainsPointers()
2064 || mt->IsNotTightlyPacked())
2066 mt->SetHasCheckedCanCompareBitsOrUseFastGetHashCode();
2070 MethodTable* valueTypeMT = MscorlibBinder::GetClass(CLASS__VALUE_TYPE);
2071 WORD slotEquals = MscorlibBinder::GetMethod(METHOD__VALUE_TYPE__EQUALS)->GetSlot();
2072 WORD slotGetHashCode = MscorlibBinder::GetMethod(METHOD__VALUE_TYPE__GET_HASH_CODE)->GetSlot();
2074 // Check the input type.
2075 if (HasOverriddenMethod(mt, valueTypeMT, slotEquals)
2076 || HasOverriddenMethod(mt, valueTypeMT, slotGetHashCode))
2078 mt->SetHasCheckedCanCompareBitsOrUseFastGetHashCode();
2080 // If overridden Equals or GetHashCode found, stop searching further.
2084 BOOL canCompareBitsOrUseFastGetHashCode = TRUE;
2086 // The type itself did not override Equals or GetHashCode, go for its fields.
2087 ApproxFieldDescIterator iter = ApproxFieldDescIterator(mt, ApproxFieldDescIterator::INSTANCE_FIELDS);
2088 for (FieldDesc* pField = iter.Next(); pField != NULL; pField = iter.Next())
2090 if (pField->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
2092 // Check current field type.
2093 MethodTable* fieldMethodTable = pField->GetApproxFieldTypeHandleThrowing().GetMethodTable();
2094 if (!CanCompareBitsOrUseFastGetHashCode(fieldMethodTable))
2096 canCompareBitsOrUseFastGetHashCode = FALSE;
2100 else if (pField->GetFieldType() == ELEMENT_TYPE_R8
2101 || pField->GetFieldType() == ELEMENT_TYPE_R4)
2103 // We have double/single field, cannot compare in fast path.
2104 canCompareBitsOrUseFastGetHashCode = FALSE;
2109 // We've gone through all instance fields. It's time to cache the result.
2110 // Note SetCanCompareBitsOrUseFastGetHashCode(BOOL) ensures the checked flag
2111 // and canCompare flag being set atomically to avoid race.
2112 mt->SetCanCompareBitsOrUseFastGetHashCode(canCompareBitsOrUseFastGetHashCode);
2114 return canCompareBitsOrUseFastGetHashCode;
2117 NOINLINE static FC_BOOL_RET CanCompareBitsHelper(MethodTable* mt, OBJECTREF objRef)
2119 FC_INNER_PROLOG(ValueTypeHelper::CanCompareBits);
2121 _ASSERTE(mt != NULL);
2122 _ASSERTE(objRef != NULL);
2126 HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, objRef);
2128 ret = CanCompareBitsOrUseFastGetHashCode(mt);
2130 HELPER_METHOD_FRAME_END();
2133 FC_RETURN_BOOL(ret);
2136 // Return true if the valuetype does not contain pointer, is tightly packed,
2137 // does not have floating point number field and does not override Equals method.
2138 FCIMPL1(FC_BOOL_RET, ValueTypeHelper::CanCompareBits, Object* obj)
2142 _ASSERTE(obj != NULL);
2143 MethodTable* mt = obj->GetMethodTable();
2145 if (mt->HasCheckedCanCompareBitsOrUseFastGetHashCode())
2147 FC_RETURN_BOOL(mt->CanCompareBitsOrUseFastGetHashCode());
2150 OBJECTREF objRef(obj);
2152 FC_INNER_RETURN(FC_BOOL_RET, CanCompareBitsHelper(mt, objRef));
2156 FCIMPL2(FC_BOOL_RET, ValueTypeHelper::FastEqualsCheck, Object* obj1, Object* obj2)
2160 _ASSERTE(obj1 != NULL);
2161 _ASSERTE(obj2 != NULL);
2162 _ASSERTE(!obj1->GetMethodTable()->ContainsPointers());
2163 _ASSERTE(obj1->GetSize() == obj2->GetSize());
2165 TypeHandle pTh = obj1->GetTypeHandle();
2167 FC_RETURN_BOOL(memcmp(obj1->GetData(),obj2->GetData(),pTh.GetSize()) == 0);
2171 static INT32 FastGetValueTypeHashCodeHelper(MethodTable *mt, void *pObjRef)
2181 INT32 *pObj = (INT32*)pObjRef;
2183 // this is a struct with no refs and no "strange" offsets, just go through the obj and xor the bits
2184 INT32 size = mt->GetNumInstanceFieldBytes();
2185 for (INT32 i = 0; i < (INT32)(size / sizeof(INT32)); i++)
2186 hashCode ^= *pObj++;
2191 static INT32 RegularGetValueTypeHashCode(MethodTable *mt, void *pObjRef)
2202 GCPROTECT_BEGININTERIOR(pObjRef);
2204 BOOL canUseFastGetHashCodeHelper = FALSE;
2205 if (mt->HasCheckedCanCompareBitsOrUseFastGetHashCode())
2207 canUseFastGetHashCodeHelper = mt->CanCompareBitsOrUseFastGetHashCode();
2211 canUseFastGetHashCodeHelper = CanCompareBitsOrUseFastGetHashCode(mt);
2214 // While we shouln't get here directly from ValueTypeHelper::GetHashCode, if we recurse we need to
2215 // be able to handle getting the hashcode for an embedded structure whose hashcode is computed by the fast path.
2216 if (canUseFastGetHashCodeHelper)
2218 hashCode = FastGetValueTypeHashCodeHelper(mt, pObjRef);
2222 // it's looking ugly so we'll use the old behavior in managed code. Grab the first non-null
2223 // field and return its hash code or 'it' as hash code
2224 // <TODO> Note that the old behavior has already been broken for value types
2225 // that is qualified for CanUseFastGetHashCodeHelper. So maybe we should
2226 // change the implementation here to use all fields instead of just the 1st one.
2229 // <TODO> check this approximation - we may be losing exact type information </TODO>
2230 ApproxFieldDescIterator fdIterator(mt, ApproxFieldDescIterator::INSTANCE_FIELDS);
2233 while ((field = fdIterator.Next()) != NULL)
2235 _ASSERTE(!field->IsRVA());
2236 if (field->IsObjRef())
2238 // if we get an object reference we get the hash code out of that
2239 if (*(Object**)((BYTE *)pObjRef + field->GetOffsetUnsafe()) != NULL)
2241 PREPARE_SIMPLE_VIRTUAL_CALLSITE(METHOD__OBJECT__GET_HASH_CODE, (*(Object**)((BYTE *)pObjRef + field->GetOffsetUnsafe())));
2242 DECLARE_ARGHOLDER_ARRAY(args, 1);
2243 args[ARGNUM_0] = PTR_TO_ARGHOLDER(*(Object**)((BYTE *)pObjRef + field->GetOffsetUnsafe()));
2244 CALL_MANAGED_METHOD(hashCode, INT32, args);
2248 // null object reference, try next
2254 CorElementType fieldType = field->GetFieldType();
2255 if (fieldType == ELEMENT_TYPE_R8)
2257 PREPARE_NONVIRTUAL_CALLSITE(METHOD__DOUBLE__GET_HASH_CODE);
2258 DECLARE_ARGHOLDER_ARRAY(args, 1);
2259 args[ARGNUM_0] = PTR_TO_ARGHOLDER(((BYTE *)pObjRef + field->GetOffsetUnsafe()));
2260 CALL_MANAGED_METHOD(hashCode, INT32, args);
2262 else if (fieldType == ELEMENT_TYPE_R4)
2264 PREPARE_NONVIRTUAL_CALLSITE(METHOD__SINGLE__GET_HASH_CODE);
2265 DECLARE_ARGHOLDER_ARRAY(args, 1);
2266 args[ARGNUM_0] = PTR_TO_ARGHOLDER(((BYTE *)pObjRef + field->GetOffsetUnsafe()));
2267 CALL_MANAGED_METHOD(hashCode, INT32, args);
2269 else if (fieldType != ELEMENT_TYPE_VALUETYPE)
2271 UINT fieldSize = field->LoadSize();
2272 INT32 *pValue = (INT32*)((BYTE *)pObjRef + field->GetOffsetUnsafe());
2273 for (INT32 j = 0; j < (INT32)(fieldSize / sizeof(INT32)); j++)
2274 hashCode ^= *pValue++;
2278 // got another value type. Get the type
2279 TypeHandle fieldTH = field->GetFieldTypeHandleThrowing();
2280 _ASSERTE(!fieldTH.IsNull());
2281 hashCode = RegularGetValueTypeHashCode(fieldTH.GetMethodTable(), (BYTE *)pObjRef + field->GetOffsetUnsafe());
2293 // The default implementation of GetHashCode() for all value types.
2294 // Note that this implementation reveals the value of the fields.
2295 // So if the value type contains any sensitive information it should
2296 // implement its own GetHashCode().
2297 FCIMPL1(INT32, ValueTypeHelper::GetHashCode, Object* objUNSAFE)
2301 if (objUNSAFE == NULL)
2302 FCThrow(kNullReferenceException);
2304 OBJECTREF obj = ObjectToOBJECTREF(objUNSAFE);
2305 VALIDATEOBJECTREF(obj);
2308 MethodTable *pMT = objUNSAFE->GetMethodTable();
2310 // We don't want to expose the method table pointer in the hash code
2311 // Let's use the typeID instead.
2312 UINT32 typeID = pMT->LookupTypeID();
2313 if (typeID == TypeIDProvider::INVALID_TYPE_ID)
2315 // If the typeID has yet to be generated, fall back to GetTypeID
2316 // This only needs to be done once per MethodTable
2317 HELPER_METHOD_FRAME_BEGIN_RET_1(obj);
2318 typeID = pMT->GetTypeID();
2319 HELPER_METHOD_FRAME_END();
2322 // To get less colliding and more evenly distributed hash codes,
2323 // we munge the class index with two big prime numbers
2324 hashCode = typeID * 711650207 + 2506965631U;
2326 BOOL canUseFastGetHashCodeHelper = FALSE;
2327 if (pMT->HasCheckedCanCompareBitsOrUseFastGetHashCode())
2329 canUseFastGetHashCodeHelper = pMT->CanCompareBitsOrUseFastGetHashCode();
2333 HELPER_METHOD_FRAME_BEGIN_RET_1(obj);
2334 canUseFastGetHashCodeHelper = CanCompareBitsOrUseFastGetHashCode(pMT);
2335 HELPER_METHOD_FRAME_END();
2338 if (canUseFastGetHashCodeHelper)
2340 hashCode ^= FastGetValueTypeHashCodeHelper(pMT, obj->UnBox());
2344 HELPER_METHOD_FRAME_BEGIN_RET_1(obj);
2345 hashCode ^= RegularGetValueTypeHashCode(pMT, obj->UnBox());
2346 HELPER_METHOD_FRAME_END();
2353 static LONG s_dwSeed;
2355 FCIMPL1(INT32, ValueTypeHelper::GetHashCodeOfPtr, LPVOID ptr)
2359 INT32 hashCode = (INT32)((INT64)(ptr));
2366 DWORD dwSeed = s_dwSeed;
2368 // Initialize s_dwSeed lazily
2371 // We use the first non-0 pointer as the seed, all hashcodes will be based off that.
2372 // This is to make sure that we only reveal relative memory addresses and never absolute ones.
2374 InterlockedCompareExchange(&s_dwSeed, dwSeed, 0);
2377 _ASSERTE(dwSeed != 0);
2379 return hashCode - dwSeed;
2383 static MethodTable * g_pStreamMT;
2384 static WORD g_slotBeginRead, g_slotEndRead;
2385 static WORD g_slotBeginWrite, g_slotEndWrite;
2387 static bool HasOverriddenStreamMethod(MethodTable * pMT, WORD slot)
2395 PCODE actual = pMT->GetRestoredSlot(slot);
2396 PCODE base = g_pStreamMT->GetRestoredSlot(slot);
2400 if (!g_pStreamMT->IsZapped())
2402 // If mscorlib is JITed, the slots can be patched and thus we need to compare the actual MethodDescs
2403 // to detect match reliably
2404 if (MethodTable::GetMethodDescForSlotAddress(actual) == MethodTable::GetMethodDescForSlotAddress(base))
2411 FCIMPL1(FC_BOOL_RET, StreamNative::HasOverriddenBeginEndRead, Object *stream)
2416 FC_RETURN_BOOL(TRUE);
2418 if (g_pStreamMT == NULL || g_slotBeginRead == 0 || g_slotEndRead == 0)
2420 HELPER_METHOD_FRAME_BEGIN_RET_1(stream);
2421 g_pStreamMT = MscorlibBinder::GetClass(CLASS__STREAM);
2422 g_slotBeginRead = MscorlibBinder::GetMethod(METHOD__STREAM__BEGIN_READ)->GetSlot();
2423 g_slotEndRead = MscorlibBinder::GetMethod(METHOD__STREAM__END_READ)->GetSlot();
2424 HELPER_METHOD_FRAME_END();
2427 MethodTable * pMT = stream->GetMethodTable();
2429 FC_RETURN_BOOL(HasOverriddenStreamMethod(pMT, g_slotBeginRead) || HasOverriddenStreamMethod(pMT, g_slotEndRead));
2433 FCIMPL1(FC_BOOL_RET, StreamNative::HasOverriddenBeginEndWrite, Object *stream)
2438 FC_RETURN_BOOL(TRUE);
2440 if (g_pStreamMT == NULL || g_slotBeginWrite == 0 || g_slotEndWrite == 0)
2442 HELPER_METHOD_FRAME_BEGIN_RET_1(stream);
2443 g_pStreamMT = MscorlibBinder::GetClass(CLASS__STREAM);
2444 g_slotBeginWrite = MscorlibBinder::GetMethod(METHOD__STREAM__BEGIN_WRITE)->GetSlot();
2445 g_slotEndWrite = MscorlibBinder::GetMethod(METHOD__STREAM__END_WRITE)->GetSlot();
2446 HELPER_METHOD_FRAME_END();
2449 MethodTable * pMT = stream->GetMethodTable();
2451 FC_RETURN_BOOL(HasOverriddenStreamMethod(pMT, g_slotBeginWrite) || HasOverriddenStreamMethod(pMT, g_slotEndWrite));