# https://github.com/dotnet/coreclr/issues/11519
remove_definitions(-DWRITE_BARRIER_CHECK)
-# https://github.com/dotnet/coreclr/issues/14701
-add_definitions(-DFEATURE_REDHAWK)
-
set( GC_SOURCES
gceventstatus.cpp
gcconfig.cpp
#define WRAPPER(_contract)
#define DISABLED(_contract)
#define INJECT_FAULT(_expr)
-#define INJECTFAULT_HANDLETABLE 0x1
#define INJECTFAULT_GCHEAP 0x2
#define FAULT_NOT_FATAL()
#define BEGIN_DEBUG_ONLY_CODE
{
public:
-#ifdef FEATURE_REDHAWK
+#if defined(FEATURE_REDHAWK) || defined(BUILD_AS_STANDALONE)
// The GC expects the following methods that are provided by the Object class in the CLR but not provided
// by Redhawk's version of Object.
uint32_t GetNumComponents()
if (!GCToOSInterface::CanGetCurrentProcessorNumber())
{
n_sniff_buffers = n_heaps*2+1;
- size_t sniff_buf_size = 0;
-#ifdef FEATURE_REDHAWK
- size_t n_cache_lines = 1 + n_heaps*n_sniff_buffers + 1;
- sniff_buf_size = n_cache_lines * HS_CACHE_LINE_SIZE;
-#else
- S_SIZE_T safe_sniff_buf_size = S_SIZE_T(1 + n_heaps*n_sniff_buffers + 1);
- safe_sniff_buf_size *= HS_CACHE_LINE_SIZE;
- if (safe_sniff_buf_size.IsOverflow())
+ size_t n_cache_lines = 1 + n_heaps * n_sniff_buffers + 1;
+ size_t sniff_buf_size = n_cache_lines * HS_CACHE_LINE_SIZE;
+ if (sniff_buf_size / HS_CACHE_LINE_SIZE != n_cache_lines) // check for overlow
{
return FALSE;
}
- sniff_buf_size = safe_sniff_buf_size.Value();
-#endif //FEATURE_REDHAWK
+
sniff_buffer = new (nothrow) uint8_t[sniff_buf_size];
if (sniff_buffer == 0)
return FALSE;
bool virtual_alloc_commit_for_heap(void* addr, size_t size, int h_number)
{
-#if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK) && !defined(FEATURE_PAL)
+#if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK) && !defined(FEATURE_PAL) && !defined(BUILD_AS_STANDALONE)
// Currently there is no way for us to specific the numa node to allocate on via hosting interfaces to
// a host. This will need to be added later.
#if !defined(FEATURE_CORECLR)
#ifdef COLLECTIBLE_CLASS
#define contain_pointers_or_collectible(i) header(i)->ContainsPointersOrCollectible()
-#define get_class_object(i) method_table(i)->GetLoaderAllocatorObjectForGC()
+#define get_class_object(i) GCToEEInterface::GetLoaderAllocatorObjectForGC((Object *)i)
#define is_collectible(i) method_table(i)->Collectible()
#else //COLLECTIBLE_CLASS
#define contain_pointers_or_collectible(i) header(i)->ContainsPointers()
inline
BOOL AnalyzeSurvivorsRequested(int condemnedGeneration)
{
+#ifndef BUILD_AS_STANDALONE
// Is the list active?
GcNotifications gn(g_pGcNotificationTable);
if (gn.IsActive())
return TRUE;
}
}
+#endif // BUILD_AS_STANDALONE
return FALSE;
}
void DACNotifyGcMarkEnd(int condemnedGeneration)
{
+#ifndef BUILD_AS_STANDALONE
// Is the list active?
GcNotifications gn(g_pGcNotificationTable);
if (gn.IsActive())
DACNotify::DoGCNotification(gea);
}
}
+#endif // BUILD_AS_STANDALONE
}
#endif // HEAP_ANALYZE
#endif //COUNT_CYCLES
#endif //TRACE_GC
-#ifndef FEATURE_REDHAWK
- GCStress<gc_on_alloc>::MaybeTrigger(acontext);
-#endif // FEATURE_REDHAWK
-
if (size < LARGE_OBJECT_SIZE)
{
#ifdef TRACE_GC
}
#endif //MULTIPLE_HEAPS
-#ifndef FEATURE_REDHAWK
- GCStress<gc_on_alloc>::MaybeTrigger(acontext);
-#endif // FEATURE_REDHAWK
-
#ifdef MULTIPLE_HEAPS
gc_heap* hp = acontext->get_alloc_heap()->pGenGCHeap;
#else
#define dprintf(l,x)
#endif //TRACE_GC
-#ifndef FEATURE_REDHAWK
+#if !defined(FEATURE_REDHAWK) && !defined(BUILD_AS_STANDALONE)
#undef assert
#define assert _ASSERTE
#undef ASSERT
}
CONTRACTL_END;
-#if defined( _DEBUG) && !defined(FEATURE_REDHAWK)
- if (g_pConfig->ShouldInjectFault(INJECTFAULT_HANDLETABLE))
- {
- FAULT_NOT_FATAL();
- return NULL;
- }
-#endif // _DEBUG && !FEATURE_REDHAWK
-
// If we are creating a variable-strength handle, verify that the
// requested variable handle type is valid.
_ASSERTE(uType != HNDTYPE_VARIABLE || IS_VALID_VHT_VALUE(lExtraInfo));
// we want to commit enough for the header PLUS some handles
uint32_t dwCommit = HANDLE_HEADER_SIZE;
-#ifndef FEATURE_REDHAWK // todo: implement SafeInt
- // Prefast overflow sanity check the addition
- if (!ClrSafeInt<uint32_t>::addition(dwCommit, OS_PAGE_SIZE, dwCommit))
- {
- return FALSE;
- }
-#endif // !FEATURE_REDHAWK
-
// Round down to the dwPageSize
dwCommit &= ~(OS_PAGE_SIZE - 1);
static void VerifyObject(_UNCHECKED_OBJECTREF from, _UNCHECKED_OBJECTREF obj)
{
-#ifdef FEATURE_REDHAWK
+#if defined(FEATURE_REDHAWK) || defined(BUILD_AS_STANDALONE)
UNREFERENCED_PARAMETER(from);
MethodTable* pMT = (MethodTable*)(obj->GetGCSafeMethodTable());
pMT->SanityCheck();
#endif
break;
-#if defined(FEATURE_COMINTEROP) && !defined(FEATURE_REDHAWK)
+#if defined(FEATURE_COMINTEROP) && !defined(FEATURE_REDHAWK) && !defined(BUILD_AS_STANDALONE)
case HNDTYPE_REFCOUNTED:
rootFlags |= kEtwGCRootFlagsRefCounted;
if (*pRef != NULL)
// No free slot.
// Let's create a new node
- NewHolder<HandleTableMap> newMap;
- newMap = new (nothrow) HandleTableMap;
+ HandleTableMap *newMap = new (nothrow) HandleTableMap;
if (!newMap)
{
return false;
newMap->pBuckets = new (nothrow) HandleTableBucket * [ INITIAL_HANDLE_TABLE_ARRAY_SIZE ];
if (!newMap->pBuckets)
{
+ delete newMap;
return false;
}
- newMap.SuppressRelease();
-
newMap->dwMaxIndex = last->dwMaxIndex + INITIAL_HANDLE_TABLE_ARRAY_SIZE;
newMap->pNext = NULL;
ZeroMemory(newMap->pBuckets,
INITIAL_HANDLE_TABLE_ARRAY_SIZE * sizeof (HandleTableBucket *));
- if (Interlocked::CompareExchangePointer(&last->pNext, newMap.GetValue(), NULL) != NULL)
+ if (Interlocked::CompareExchangePointer(&last->pNext, newMap, NULL) != NULL)
{
// This thread loses.
delete [] newMap->pBuckets;
DWORD GetHostTestThreadAbort() const {LIMITED_METHOD_CONTRACT; return testThreadAbort;}
#define INJECTFAULT_LOADERHEAP 0x1
-#define INJECTFAULT_HANDLETABLE 0x1
#define INJECTFAULT_GCHEAP 0x2
#define INJECTFAULT_SO 0x4
#define INJECTFAULT_GMHEAP 0x8
}
CONTRACTL_END;
- return pObject->GetMethodTable()->GetLoaderAllocatorObjectForGC();
+ return pObject->GetGCSafeMethodTable()->GetLoaderAllocatorObjectForGC();
}
bool GCToEEInterface::IsPreemptiveGCDisabled()
INTERIOR_STACK_PROBE_FOR(GetThread(), static_cast<unsigned>(DEFAULT_ENTRY_PROBE_AMOUNT * 1.5));
if (GCHeapUtilities::UseThreadAllocationContexts())
{
- retVal = GCHeapUtilities::GetGCHeap()->Alloc(GetThreadAllocContext(), size, flags);
+ gc_alloc_context *threadContext = GetThreadAllocContext();
+ GCStress<gc_on_alloc>::MaybeTrigger(threadContext);
+ retVal = GCHeapUtilities::GetGCHeap()->Alloc(threadContext, size, flags);
}
else
{
GlobalAllocLockHolder holder(&g_global_alloc_lock);
- retVal = GCHeapUtilities::GetGCHeap()->Alloc(&g_global_alloc_context, size, flags);
+ gc_alloc_context *globalContext = &g_global_alloc_context;
+ GCStress<gc_on_alloc>::MaybeTrigger(globalContext);
+ retVal = GCHeapUtilities::GetGCHeap()->Alloc(globalContext, size, flags);
}
INTERIOR_STACK_PROBE_FOR(GetThread(), static_cast<unsigned>(DEFAULT_ENTRY_PROBE_AMOUNT * 1.5));
if (GCHeapUtilities::UseThreadAllocationContexts())
{
- retVal = GCHeapUtilities::GetGCHeap()->AllocAlign8(GetThreadAllocContext(), size, flags);
+ gc_alloc_context *threadContext = GetThreadAllocContext();
+ GCStress<gc_on_alloc>::MaybeTrigger(threadContext);
+ retVal = GCHeapUtilities::GetGCHeap()->AllocAlign8(threadContext, size, flags);
}
else
{
GlobalAllocLockHolder holder(&g_global_alloc_lock);
- retVal = GCHeapUtilities::GetGCHeap()->AllocAlign8(&g_global_alloc_context, size, flags);
+ gc_alloc_context *globalContext = &g_global_alloc_context;
+ GCStress<gc_on_alloc>::MaybeTrigger(globalContext);
+ retVal = GCHeapUtilities::GetGCHeap()->AllocAlign8(globalContext, size, flags);
}
if (!retVal)