From: David Mason Date: Sat, 11 Aug 2018 17:11:49 +0000 (-0700) Subject: [local gc] disable feature redhawk (#17769) X-Git-Tag: accepted/tizen/unified/20190422.045933~1465 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=d4738d1e9e9b50d00023eb4caca225812cabe285;p=platform%2Fupstream%2Fcoreclr.git [local gc] disable feature redhawk (#17769) --- diff --git a/src/gc/CMakeLists.txt b/src/gc/CMakeLists.txt index caecdba..7709304 100644 --- a/src/gc/CMakeLists.txt +++ b/src/gc/CMakeLists.txt @@ -11,9 +11,6 @@ remove_definitions(-DSTRESS_HEAP) # https://github.com/dotnet/coreclr/issues/11519 remove_definitions(-DWRITE_BARRIER_CHECK) -# https://github.com/dotnet/coreclr/issues/14701 -add_definitions(-DFEATURE_REDHAWK) - set( GC_SOURCES gceventstatus.cpp gcconfig.cpp diff --git a/src/gc/env/gcenv.base.h b/src/gc/env/gcenv.base.h index b695abc..8693bbe 100644 --- a/src/gc/env/gcenv.base.h +++ b/src/gc/env/gcenv.base.h @@ -378,7 +378,6 @@ typedef struct _PROCESSOR_NUMBER { #define WRAPPER(_contract) #define DISABLED(_contract) #define INJECT_FAULT(_expr) -#define INJECTFAULT_HANDLETABLE 0x1 #define INJECTFAULT_GCHEAP 0x2 #define FAULT_NOT_FATAL() #define BEGIN_DEBUG_ONLY_CODE diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp index 08a1050..31715cb 100644 --- a/src/gc/gc.cpp +++ b/src/gc/gc.cpp @@ -3874,7 +3874,7 @@ class CObjectHeader : public Object { public: -#ifdef FEATURE_REDHAWK +#if defined(FEATURE_REDHAWK) || defined(BUILD_AS_STANDALONE) // The GC expects the following methods that are provided by the Object class in the CLR but not provided // by Redhawk's version of Object. uint32_t GetNumComponents() @@ -5051,19 +5051,13 @@ public: if (!GCToOSInterface::CanGetCurrentProcessorNumber()) { n_sniff_buffers = n_heaps*2+1; - size_t sniff_buf_size = 0; -#ifdef FEATURE_REDHAWK - size_t n_cache_lines = 1 + n_heaps*n_sniff_buffers + 1; - sniff_buf_size = n_cache_lines * HS_CACHE_LINE_SIZE; -#else - S_SIZE_T safe_sniff_buf_size = S_SIZE_T(1 + n_heaps*n_sniff_buffers + 1); - safe_sniff_buf_size *= HS_CACHE_LINE_SIZE; - if (safe_sniff_buf_size.IsOverflow()) + size_t n_cache_lines = 1 + n_heaps * n_sniff_buffers + 1; + size_t sniff_buf_size = n_cache_lines * HS_CACHE_LINE_SIZE; + if (sniff_buf_size / HS_CACHE_LINE_SIZE != n_cache_lines) // check for overlow { return FALSE; } - sniff_buf_size = safe_sniff_buf_size.Value(); -#endif //FEATURE_REDHAWK + sniff_buffer = new (nothrow) uint8_t[sniff_buf_size]; if (sniff_buffer == 0) return FALSE; @@ -5463,7 +5457,7 @@ void gc_heap::gc_thread_function () bool virtual_alloc_commit_for_heap(void* addr, size_t size, int h_number) { -#if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK) && !defined(FEATURE_PAL) +#if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK) && !defined(FEATURE_PAL) && !defined(BUILD_AS_STANDALONE) // Currently there is no way for us to specific the numa node to allocate on via hosting interfaces to // a host. This will need to be added later. #if !defined(FEATURE_CORECLR) @@ -9011,7 +9005,7 @@ inline size_t my_get_size (Object* ob) #ifdef COLLECTIBLE_CLASS #define contain_pointers_or_collectible(i) header(i)->ContainsPointersOrCollectible() -#define get_class_object(i) method_table(i)->GetLoaderAllocatorObjectForGC() +#define get_class_object(i) GCToEEInterface::GetLoaderAllocatorObjectForGC((Object *)i) #define is_collectible(i) method_table(i)->Collectible() #else //COLLECTIBLE_CLASS #define contain_pointers_or_collectible(i) header(i)->ContainsPointers() @@ -16305,6 +16299,7 @@ void gc_heap::update_collection_counts () inline BOOL AnalyzeSurvivorsRequested(int condemnedGeneration) { +#ifndef BUILD_AS_STANDALONE // Is the list active? GcNotifications gn(g_pGcNotificationTable); if (gn.IsActive()) @@ -16315,11 +16310,13 @@ BOOL AnalyzeSurvivorsRequested(int condemnedGeneration) return TRUE; } } +#endif // BUILD_AS_STANDALONE return FALSE; } void DACNotifyGcMarkEnd(int condemnedGeneration) { +#ifndef BUILD_AS_STANDALONE // Is the list active? GcNotifications gn(g_pGcNotificationTable); if (gn.IsActive()) @@ -16330,6 +16327,7 @@ void DACNotifyGcMarkEnd(int condemnedGeneration) DACNotify::DoGCNotification(gea); } } +#endif // BUILD_AS_STANDALONE } #endif // HEAP_ANALYZE @@ -34215,10 +34213,6 @@ GCHeap::AllocAlign8Common(void* _hp, alloc_context* acontext, size_t size, uint3 #endif //COUNT_CYCLES #endif //TRACE_GC -#ifndef FEATURE_REDHAWK - GCStress::MaybeTrigger(acontext); -#endif // FEATURE_REDHAWK - if (size < LARGE_OBJECT_SIZE) { #ifdef TRACE_GC @@ -34395,10 +34389,6 @@ GCHeap::Alloc(gc_alloc_context* context, size_t size, uint32_t flags REQD_ALIGN_ } #endif //MULTIPLE_HEAPS -#ifndef FEATURE_REDHAWK - GCStress::MaybeTrigger(acontext); -#endif // FEATURE_REDHAWK - #ifdef MULTIPLE_HEAPS gc_heap* hp = acontext->get_alloc_heap()->pGenGCHeap; #else diff --git a/src/gc/gcpriv.h b/src/gc/gcpriv.h index 9b40b58..0cb72ec 100644 --- a/src/gc/gcpriv.h +++ b/src/gc/gcpriv.h @@ -275,7 +275,7 @@ void GCLog (const char *fmt, ... ); #define dprintf(l,x) #endif //TRACE_GC -#ifndef FEATURE_REDHAWK +#if !defined(FEATURE_REDHAWK) && !defined(BUILD_AS_STANDALONE) #undef assert #define assert _ASSERTE #undef ASSERT diff --git a/src/gc/handletable.cpp b/src/gc/handletable.cpp index fde5cff..0c05715 100644 --- a/src/gc/handletable.cpp +++ b/src/gc/handletable.cpp @@ -301,14 +301,6 @@ OBJECTHANDLE HndCreateHandle(HHANDLETABLE hTable, uint32_t uType, OBJECTREF obje } CONTRACTL_END; -#if defined( _DEBUG) && !defined(FEATURE_REDHAWK) - if (g_pConfig->ShouldInjectFault(INJECTFAULT_HANDLETABLE)) - { - FAULT_NOT_FATAL(); - return NULL; - } -#endif // _DEBUG && !FEATURE_REDHAWK - // If we are creating a variable-strength handle, verify that the // requested variable handle type is valid. _ASSERTE(uType != HNDTYPE_VARIABLE || IS_VALID_VHT_VALUE(lExtraInfo)); diff --git a/src/gc/handletablecore.cpp b/src/gc/handletablecore.cpp index 8fbdbe9..8c0be42 100644 --- a/src/gc/handletablecore.cpp +++ b/src/gc/handletablecore.cpp @@ -510,14 +510,6 @@ BOOL SegmentInitialize(TableSegment *pSegment, HandleTable *pTable) // we want to commit enough for the header PLUS some handles uint32_t dwCommit = HANDLE_HEADER_SIZE; -#ifndef FEATURE_REDHAWK // todo: implement SafeInt - // Prefast overflow sanity check the addition - if (!ClrSafeInt::addition(dwCommit, OS_PAGE_SIZE, dwCommit)) - { - return FALSE; - } -#endif // !FEATURE_REDHAWK - // Round down to the dwPageSize dwCommit &= ~(OS_PAGE_SIZE - 1); diff --git a/src/gc/handletablescan.cpp b/src/gc/handletablescan.cpp index fb08d37..6a39a00 100644 --- a/src/gc/handletablescan.cpp +++ b/src/gc/handletablescan.cpp @@ -891,7 +891,7 @@ void CALLBACK BlockResetAgeMapForBlocks(TableSegment *pSegment, uint32_t uBlock, static void VerifyObject(_UNCHECKED_OBJECTREF from, _UNCHECKED_OBJECTREF obj) { -#ifdef FEATURE_REDHAWK +#if defined(FEATURE_REDHAWK) || defined(BUILD_AS_STANDALONE) UNREFERENCED_PARAMETER(from); MethodTable* pMT = (MethodTable*)(obj->GetGCSafeMethodTable()); pMT->SanityCheck(); diff --git a/src/gc/objecthandle.cpp b/src/gc/objecthandle.cpp index 2e26476..c2af23a 100644 --- a/src/gc/objecthandle.cpp +++ b/src/gc/objecthandle.cpp @@ -469,7 +469,7 @@ void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintpt #endif break; -#if defined(FEATURE_COMINTEROP) && !defined(FEATURE_REDHAWK) +#if defined(FEATURE_COMINTEROP) && !defined(FEATURE_REDHAWK) && !defined(BUILD_AS_STANDALONE) case HNDTYPE_REFCOUNTED: rootFlags |= kEtwGCRootFlagsRefCounted; if (*pRef != NULL) @@ -776,8 +776,7 @@ bool Ref_InitializeHandleTableBucket(HandleTableBucket* bucket, void* context) // No free slot. // Let's create a new node - NewHolder newMap; - newMap = new (nothrow) HandleTableMap; + HandleTableMap *newMap = new (nothrow) HandleTableMap; if (!newMap) { return false; @@ -786,17 +785,16 @@ bool Ref_InitializeHandleTableBucket(HandleTableBucket* bucket, void* context) newMap->pBuckets = new (nothrow) HandleTableBucket * [ INITIAL_HANDLE_TABLE_ARRAY_SIZE ]; if (!newMap->pBuckets) { + delete newMap; return false; } - newMap.SuppressRelease(); - newMap->dwMaxIndex = last->dwMaxIndex + INITIAL_HANDLE_TABLE_ARRAY_SIZE; newMap->pNext = NULL; ZeroMemory(newMap->pBuckets, INITIAL_HANDLE_TABLE_ARRAY_SIZE * sizeof (HandleTableBucket *)); - if (Interlocked::CompareExchangePointer(&last->pNext, newMap.GetValue(), NULL) != NULL) + if (Interlocked::CompareExchangePointer(&last->pNext, newMap, NULL) != NULL) { // This thread loses. delete [] newMap->pBuckets; diff --git a/src/vm/eeconfig.h b/src/vm/eeconfig.h index bcd767b..60d8b70 100644 --- a/src/vm/eeconfig.h +++ b/src/vm/eeconfig.h @@ -827,7 +827,6 @@ public: DWORD GetHostTestThreadAbort() const {LIMITED_METHOD_CONTRACT; return testThreadAbort;} #define INJECTFAULT_LOADERHEAP 0x1 -#define INJECTFAULT_HANDLETABLE 0x1 #define INJECTFAULT_GCHEAP 0x2 #define INJECTFAULT_SO 0x4 #define INJECTFAULT_GMHEAP 0x8 diff --git a/src/vm/gcenv.ee.cpp b/src/vm/gcenv.ee.cpp index 7f9d269..581d7fc 100644 --- a/src/vm/gcenv.ee.cpp +++ b/src/vm/gcenv.ee.cpp @@ -357,7 +357,7 @@ uint8_t* GCToEEInterface::GetLoaderAllocatorObjectForGC(Object* pObject) } CONTRACTL_END; - return pObject->GetMethodTable()->GetLoaderAllocatorObjectForGC(); + return pObject->GetGCSafeMethodTable()->GetLoaderAllocatorObjectForGC(); } bool GCToEEInterface::IsPreemptiveGCDisabled() diff --git a/src/vm/gchelpers.cpp b/src/vm/gchelpers.cpp index 4684ede..b4f018f 100644 --- a/src/vm/gchelpers.cpp +++ b/src/vm/gchelpers.cpp @@ -238,12 +238,16 @@ inline Object* Alloc(size_t size, BOOL bFinalize, BOOL bContainsPointers ) INTERIOR_STACK_PROBE_FOR(GetThread(), static_cast(DEFAULT_ENTRY_PROBE_AMOUNT * 1.5)); if (GCHeapUtilities::UseThreadAllocationContexts()) { - retVal = GCHeapUtilities::GetGCHeap()->Alloc(GetThreadAllocContext(), size, flags); + gc_alloc_context *threadContext = GetThreadAllocContext(); + GCStress::MaybeTrigger(threadContext); + retVal = GCHeapUtilities::GetGCHeap()->Alloc(threadContext, size, flags); } else { GlobalAllocLockHolder holder(&g_global_alloc_lock); - retVal = GCHeapUtilities::GetGCHeap()->Alloc(&g_global_alloc_context, size, flags); + gc_alloc_context *globalContext = &g_global_alloc_context; + GCStress::MaybeTrigger(globalContext); + retVal = GCHeapUtilities::GetGCHeap()->Alloc(globalContext, size, flags); } @@ -279,12 +283,16 @@ inline Object* AllocAlign8(size_t size, BOOL bFinalize, BOOL bContainsPointers, INTERIOR_STACK_PROBE_FOR(GetThread(), static_cast(DEFAULT_ENTRY_PROBE_AMOUNT * 1.5)); if (GCHeapUtilities::UseThreadAllocationContexts()) { - retVal = GCHeapUtilities::GetGCHeap()->AllocAlign8(GetThreadAllocContext(), size, flags); + gc_alloc_context *threadContext = GetThreadAllocContext(); + GCStress::MaybeTrigger(threadContext); + retVal = GCHeapUtilities::GetGCHeap()->AllocAlign8(threadContext, size, flags); } else { GlobalAllocLockHolder holder(&g_global_alloc_lock); - retVal = GCHeapUtilities::GetGCHeap()->AllocAlign8(&g_global_alloc_context, size, flags); + gc_alloc_context *globalContext = &g_global_alloc_context; + GCStress::MaybeTrigger(globalContext); + retVal = GCHeapUtilities::GetGCHeap()->AllocAlign8(globalContext, size, flags); } if (!retVal)