BOOL operator!=(const ADIndex& ad) const { return m_dwIndex != ad.m_dwIndex; }
};
-class AppDomain
-{
-public:
- ADIndex GetIndex() { return ADIndex(RH_DEFAULT_DOMAIN_ID); }
- BOOL IsRudeUnload() { return FALSE; }
- BOOL NoAccessToHandleTable() { return FALSE; }
- void DecNumSizedRefHandles() {}
-};
-
-class SystemDomain
-{
-public:
- static SystemDomain *System() { return NULL; }
- static AppDomain *GetAppDomainAtIndex(ADIndex /*index*/) { return (AppDomain *)-1; }
- static AppDomain *AppDomainBeingUnloaded() { return NULL; }
- AppDomain *DefaultDomain() { return NULL; }
- DWORD GetTotalNumSizedRefHandles() { return 0; }
-};
-
-class NumaNodeInfo
-{
-public:
- static bool CanEnableGCNumaAware()
- {
- // [LOCALGC TODO] enable NUMA node support
- return false;
- }
-
- static void GetGroupForProcessor(uint16_t processor_number, uint16_t * group_number, uint16_t * group_processor_number)
- {
- // [LOCALGC TODO] enable NUMA node support
- assert(!"should not be called");
- }
-
- static bool GetNumaProcessorNodeEx(PPROCESSOR_NUMBER proc_no, uint16_t * node_no)
- {
- // [LOCALGC TODO] enable NUMA node support
- assert(!"should not be called");
- return false;
- }
-};
-
-class CPUGroupInfo
-{
-public:
- static bool CanEnableGCCPUGroups()
- {
- // [LOCALGC TODO] enable CPU group support
- return false;
- }
-
- static uint32_t GetNumActiveProcessors()
- {
- // [LOCALGC TODO] enable CPU group support
- assert(!"should not be called");
- return 0;
- }
-
- static void GetGroupForProcessor(uint16_t processor_number, uint16_t * group_number, uint16_t * group_processor_number)
- {
- // [LOCALGC TODO] enable CPU group support
- assert(!"should not be called");
- }
-};
-
-
#endif // __GCENV_BASE_INCLUDED__
static void EnableFinalization(bool foundFinalizers);
static void HandleFatalError(unsigned int exitCode);
- static bool ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj);
+ static bool ShouldFinalizeObjectForUnload(void* pDomain, Object* obj);
static bool ForceFullGCToBeBlocking();
static bool EagerFinalized(Object* obj);
static MethodTable* GetFreeObjectMethodTable();
static void WalkAsyncPinnedForPromotion(Object* object, ScanContext* sc, promote_func* callback);
static void WalkAsyncPinned(Object* object, void* context, void(*callback)(Object*, Object*, void*));
static IGCToCLREventSink* EventSink();
+
+ static uint32_t GetDefaultDomainIndex();
+ static void *GetAppDomainAtIndex(uint32_t appDomainIndex);
+ static bool AppDomainCanAccessHandleTable(uint32_t appDomainID);
+ static uint32_t GetIndexOfAppDomainBeingUnloaded();
+ static uint32_t GetTotalNumSizedRefHandles();
+ static bool GCToEEInterface::AppDomainIsRudeUnload(void *appDomain);
};
#endif // __GCENV_EE_H__
#undef Sleep
#endif // Sleep
+#define NUMA_NODE_UNDEFINED UINT32_MAX
+
// Critical section used by the GC
class CLRCriticalSection
{
// size - size of the virtual memory range
// Return:
// true if it has succeeded, false if it has failed
- static bool VirtualCommit(void *address, size_t size);
+ static bool VirtualCommit(void *address, size_t size, uint32_t node = NUMA_NODE_UNDEFINED);
// Decomit virtual memory range.
// Parameters:
// Return:
// Number of processors on the machine
static uint32_t GetTotalProcessorCount();
+
+ // Is NUMA support available
+ static bool CanEnableGCNumaAware();
+
+ // Gets the NUMA node for the processor
+ static bool GetNumaProcessorNode(PPROCESSOR_NUMBER proc_no, uint16_t *node_no);
+
+ // Are CPU groups enabled
+ static bool CanEnableGCCPUGroups();
+
+ // Get the CPU group for the specified processor
+ static void GetGroupForProcessor(uint16_t processor_number, uint16_t* group_number, uint16_t* group_processor_number);
+
};
#endif // __GCENV_OS_H__
struct GCSystemInfo
{
- uint32_t dwNumberOfProcessors;
- uint32_t dwPageSize;
- uint32_t dwAllocationGranularity;
+ uint32_t dwNumberOfProcessors;
+ uint32_t dwPageSize;
+ uint32_t dwAllocationGranularity;
};
typedef void * HANDLE;
//can not enable gc numa aware, force all heaps to be in
//one numa node by filling the array with all 0s
- if (!NumaNodeInfo::CanEnableGCNumaAware())
+ if (!GCToOSInterface::CanEnableGCNumaAware())
memset(heap_no_to_numa_node, 0, sizeof (heap_no_to_numa_node));
return TRUE;
affinity->Processor = GCThreadAffinity::None;
uint16_t gn, gpn;
- CPUGroupInfo::GetGroupForProcessor((uint16_t)heap_number, &gn, &gpn);
+ GCToOSInterface::GetGroupForProcessor((uint16_t)heap_number, &gn, &gpn);
int bit_number = 0;
for (uintptr_t mask = 1; mask !=0; mask <<=1)
affinity->Group = gn;
heap_select::set_cpu_group_for_heap(heap_number, gn);
heap_select::set_group_proc_for_heap(heap_number, gpn);
- if (NumaNodeInfo::CanEnableGCNumaAware())
+ if (GCToOSInterface::CanEnableGCNumaAware())
{
PROCESSOR_NUMBER proc_no;
proc_no.Group = gn;
proc_no.Reserved = 0;
uint16_t node_no = 0;
- if (NumaNodeInfo::GetNumaProcessorNodeEx(&proc_no, &node_no))
+ if (GCToOSInterface::GetNumaProcessorNode(&proc_no, &node_no))
heap_select::set_numa_node_for_heap(heap_number, node_no);
}
else
dprintf (3, ("Using processor %d for heap %d", proc_number, heap_number));
affinity->Processor = proc_number;
heap_select::set_proc_no_for_heap(heap_number, proc_number);
- if (NumaNodeInfo::CanEnableGCNumaAware())
+ if (GCToOSInterface::CanEnableGCNumaAware())
{
uint16_t node_no = 0;
PROCESSOR_NUMBER proc_no;
proc_no.Group = 0;
proc_no.Number = (uint8_t)proc_number;
proc_no.Reserved = 0;
- if (NumaNodeInfo::GetNumaProcessorNodeEx(&proc_no, &node_no))
+ if (GCToOSInterface::GetNumaProcessorNode(&proc_no, &node_no))
{
heap_select::set_numa_node_for_heap(heap_number, node_no);
}
bool virtual_alloc_commit_for_heap(void* addr, size_t size, int h_number)
{
-#if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK) && !defined(FEATURE_PAL) && !defined(BUILD_AS_STANDALONE)
+#if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK)
// Currently there is no way for us to specific the numa node to allocate on via hosting interfaces to
// a host. This will need to be added later.
#if !defined(FEATURE_CORECLR)
if (!CLRMemoryHosted())
#endif
{
- if (NumaNodeInfo::CanEnableGCNumaAware())
+ if (GCToOSInterface::CanEnableGCNumaAware())
{
uint32_t numa_node = heap_select::find_numa_node_from_heap_no(h_number);
- void * ret = NumaNodeInfo::VirtualAllocExNuma(GetCurrentProcess(), addr, size,
- MEM_COMMIT, PAGE_READWRITE, numa_node);
- if (ret != NULL)
+ if (GCToOSInterface::VirtualCommit(addr, size, numa_node))
return true;
}
}
org_hp->alloc_context_count--;
max_hp->alloc_context_count++;
acontext->set_alloc_heap(GCHeap::GetHeap(max_hp->heap_number));
- if (CPUGroupInfo::CanEnableGCCPUGroups())
+ if (GCToOSInterface::CanEnableGCCPUGroups())
{ //only set ideal processor when max_hp and org_hp are in the same cpu
//group. DO NOT MOVE THREADS ACROSS CPU GROUPS
uint16_t org_gn = heap_select::find_cpu_group_from_heap_no(org_hp->heap_number);
{
#endif //MULTIPLE_HEAPS
- num_sizedrefs = SystemDomain::System()->GetTotalNumSizedRefHandles();
+ num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles();
#ifdef MULTIPLE_HEAPS
// We are about to set affinity for GC threads. It is a good place to set up NUMA and
// CPU groups because the process mask, processor number, and group number are all
// readily available.
- if (CPUGroupInfo::CanEnableGCCPUGroups())
+ if (GCToOSInterface::CanEnableGCCPUGroups())
set_thread_group_affinity_for_heap(heap->heap_number, &affinity);
else
set_thread_affinity_mask_for_heap(heap->heap_number, &affinity);
#endif //WRITE_WATCH
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
- num_sizedrefs = SystemDomain::System()->GetTotalNumSizedRefHandles();
+ num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles();
// this c_write is not really necessary because restart_vm
// has an instruction that will flush the cpu cache (interlocked
gc_heap::gc_thread_no_affinitize_p = true;
uint32_t nhp_from_config = static_cast<uint32_t>(GCConfig::GetHeapCount());
- // GetGCProcessCpuCount only returns up to 64 procs.
- uint32_t nhp_from_process = CPUGroupInfo::CanEnableGCCPUGroups() ?
- CPUGroupInfo::GetNumActiveProcessors():
- GCToOSInterface::GetCurrentProcessCpuCount();
+
+ uint32_t nhp_from_process = GCToOSInterface::GetCurrentProcessCpuCount();
uint32_t nhp = ((nhp_from_config == 0) ? nhp_from_process :
(min (nhp_from_config, nhp_from_process)));
#endif //MULTIPLE_HEAPS
}
-bool GCHeap::FinalizeAppDomain(AppDomain *pDomain, bool fRunFinalizers)
+bool GCHeap::FinalizeAppDomain(void *pDomain, bool fRunFinalizers)
{
#ifdef MULTIPLE_HEAPS
bool foundp = false;
}
BOOL
-CFinalize::FinalizeSegForAppDomain (AppDomain *pDomain,
+CFinalize::FinalizeSegForAppDomain (void *pDomain,
BOOL fRunFinalizers,
unsigned int Seg)
{
}
else
{
- if (pDomain->IsRudeUnload())
+ if (GCToEEInterface::AppDomainIsRudeUnload(pDomain))
{
MoveItem (i, Seg, FreeList);
}
}
bool
-CFinalize::FinalizeAppDomain (AppDomain *pDomain, bool fRunFinalizers)
+CFinalize::FinalizeAppDomain (void *pDomain, bool fRunFinalizers)
{
bool finalizedFound = false;
"Specifies if you want to turn on logging in GC") \
BOOL_CONFIG(ConfigLogEnabled, "GCConfigLogEnabled", false, \
"Specifies the name of the GC config log file") \
+ BOOL_CONFIG(GCNumaAware, "GCNumaAware", true, "Enables numa allocations in the GC") \
+ BOOL_CONFIG(GCCpuGroup, "GCCpuGroup", false, "Enables CPU groups in the GC") \
INT_CONFIG(HeapVerifyLevel, "HeapVerify", HEAPVERIFY_NONE, \
"When set verifies the integrity of the managed heap on entry and exit of each GC") \
INT_CONFIG(LOHCompactionMode, "GCLOHCompact", 0, "Specifies the LOH compaction mode") \
g_theGCToCLR->HandleFatalError(exitCode);
}
-inline bool GCToEEInterface::ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj)
+inline bool GCToEEInterface::ShouldFinalizeObjectForUnload(void* pDomain, Object* obj)
{
assert(g_theGCToCLR != nullptr);
return g_theGCToCLR->ShouldFinalizeObjectForUnload(pDomain, obj);
return g_theGCToCLR->EventSink();
}
+inline uint32_t GCToEEInterface::GetDefaultDomainIndex()
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->GetDefaultDomainIndex();
+}
+
+inline void *GCToEEInterface::GetAppDomainAtIndex(uint32_t appDomainIndex)
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->GetAppDomainAtIndex(appDomainIndex);
+}
+
+inline bool GCToEEInterface::AppDomainCanAccessHandleTable(uint32_t appDomainID)
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->AppDomainCanAccessHandleTable(appDomainID);
+}
+
+inline uint32_t GCToEEInterface::GetIndexOfAppDomainBeingUnloaded()
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->GetIndexOfAppDomainBeingUnloaded();
+}
+
+inline uint32_t GCToEEInterface::GetTotalNumSizedRefHandles()
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->GetTotalNumSizedRefHandles();
+}
+
+inline bool GCToEEInterface::AppDomainIsRudeUnload(void *appDomain)
+{
+ assert(g_theGCToCLR != nullptr);
+ return g_theGCToCLR->AppDomainIsRudeUnload(appDomain);
+}
+
#endif // __GCTOENV_EE_STANDALONE_INL__
PER_HEAP_ISOLATED size_t GetFinalizablePromotedCount();
void SetFinalizeQueueForShutdown(bool fHasLock);
- bool FinalizeAppDomain(AppDomain *pDomain, bool fRunFinalizers);
+ bool FinalizeAppDomain(void *pDomain, bool fRunFinalizers);
bool ShouldRestartFinalizerWatchDog();
void DiagWalkObject (Object* obj, walk_fn fn, void* context);
// Asks the EE if it wants a particular object to be finalized when unloading
// an app domain.
virtual
- bool ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj) = 0;
+ bool ShouldFinalizeObjectForUnload(void* pDomain, Object* obj) = 0;
// Offers the EE the option to finalize the given object eagerly, i.e.
// not on the finalizer thread but on the current thread. The
// Returns an IGCToCLREventSink instance that can be used to fire events.
virtual
IGCToCLREventSink* EventSink() = 0;
+
+ virtual
+ uint32_t GetDefaultDomainIndex() = 0;
+
+ virtual
+ void *GetAppDomainAtIndex(uint32_t appDomainIndex) = 0;
+
+ virtual
+ uint32_t GetIndexOfAppDomainBeingUnloaded() = 0;
+
+ virtual
+ bool AppDomainCanAccessHandleTable(uint32_t appDomainID) = 0;
+
+ virtual
+ uint32_t GetTotalNumSizedRefHandles() = 0;
+
+ virtual
+ bool AppDomainIsRudeUnload(void *appDomain) = 0;
};
#endif // _GCINTERFACE_EE_H_
*/
// Finalizes an app domain by finalizing objects within that app domain.
- virtual bool FinalizeAppDomain(AppDomain* pDomain, bool fRunFinalizers) = 0;
+ virtual bool FinalizeAppDomain(void* pDomain, bool fRunFinalizers) = 0;
// Finalizes all registered objects for shutdown, even if they are still reachable.
virtual void SetFinalizeQueueForShutdown(bool fHasLock) = 0;
// Initialize GCConfig before anything else - initialization of our
// various components may want to query the current configuration.
GCConfig::Initialize();
+
if (!GCToOSInterface::Initialize())
{
return E_FAIL;
}
- BOOL FinalizeSegForAppDomain (AppDomain *pDomain,
+ BOOL FinalizeSegForAppDomain (void *pDomain,
BOOL fRunFinalizers,
unsigned int Seg);
void DiscardNonCriticalObjects();
//Methods used by the app domain unloading call to finalize objects in an app domain
- bool FinalizeAppDomain (AppDomain *pDomain, bool fRunFinalizers);
+ bool FinalizeAppDomain (void *pDomain, bool fRunFinalizers);
void CheckFinalizerObjects();
BEGIN_DEBUG_ONLY_CODE;
VALIDATEOBJECTREF (objref);
- AppDomain *pDomain = SystemDomain::GetAppDomainAtIndex(appDomainIndex);
-
- // Access to a handle in unloaded domain is not allowed
- _ASSERTE(pDomain != NULL);
- _ASSERTE(!pDomain->NoAccessToHandleTable());
+#ifndef DACCESS_COMPILE
+ _ASSERTE(GCToEEInterface::AppDomainCanAccessHandleTable(appDomainIndex.m_dwIndex));
+#endif // DACCESS_COMPILE
END_DEBUG_ONLY_CODE;
}
VALIDATEOBJECTREF (objref);
- AppDomain *pDomain = SystemDomain::GetAppDomainAtIndex(appDomainIndex);
-
- // Access to a handle in unloaded domain is not allowed
- _ASSERTE(pDomain != NULL);
- _ASSERTE(!pDomain->NoAccessToHandleTable());
-
+#ifndef DACCESS_COMPILE
+ _ASSERTE(GCToEEInterface::AppDomainCanAccessHandleTable(appDomainIndex.m_dwIndex));
+#endif // DACCESS_COMPILE
END_DEBUG_ONLY_CODE;
}
#else
BEGIN_DEBUG_ONLY_CODE;
ADIndex id = HndGetHandleADIndex(handle);
- AppDomain *pUnloadingDomain = SystemDomain::AppDomainBeingUnloaded();
- if (!pUnloadingDomain || pUnloadingDomain->GetIndex() != id)
+ ADIndex unloadingDomain(GCToEEInterface::GetIndexOfAppDomainBeingUnloaded());
+ if (unloadingDomain != id)
{
return;
}
- if (!pUnloadingDomain->NoAccessToHandleTable())
+ if (GCToEEInterface::AppDomainCanAccessHandleTable(unloadingDomain.m_dwIndex))
{
return;
}
{
uint32_t hndType = HandleFetchType(handle);
ADIndex appDomainIndex = HndGetHandleADIndex(handle);
- AppDomain* pAppDomain = SystemDomain::GetAppDomainAtIndex(appDomainIndex);
+ void* pAppDomain = GCToEEInterface::GetAppDomainAtIndex(appDomainIndex.m_dwIndex);
uint32_t generation = value != 0 ? g_theGCHeap->WhichGeneration(value) : 0;
FIRE_EVENT(SetGCHandle, (void *)handle, (void *)value, hndType, generation, (uint64_t)pAppDomain);
FIRE_EVENT(PrvSetGCHandle, (void *) handle, (void *)value, hndType, generation, (uint64_t)pAppDomain);
// to this structure as our closure's context pointer.
struct ClosureCapture
{
- AppDomain* pAppDomain;
+ void* pAppDomain;
Object* overlapped;
};
}
CONTRACTL_END;
- _ASSERTE (pTargetTable->uADIndex == SystemDomain::System()->DefaultDomain()->GetIndex()); // must be for default domain
+ _ASSERTE (pTargetTable->uADIndex == ADIndex(GCToEEInterface::GetDefaultDomainIndex())); // must be for default domain
BOOL fGotException = FALSE;
TableSegment *pSegment = pTable->pSegmentList;
if (!IsServerHeap())
return 1;
-#ifdef FEATURE_REDHAWK
return GCToOSInterface::GetCurrentProcessCpuCount();
-#else
- return (CPUGroupInfo::CanEnableGCCPUGroups() ? CPUGroupInfo::GetNumActiveProcessors() :
- GCToOSInterface::GetCurrentProcessCpuCount());
-#endif
}
class HandleTableBucketHolder
abort();
}
-bool GCToEEInterface::ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj)
+bool GCToEEInterface::ShouldFinalizeObjectForUnload(void* pDomain, Object* obj)
{
return true;
}
void GCToEEInterface::WalkAsyncPinned(Object* object, void* context, void (*callback)(Object*, Object*, void*))
{
}
+
+uint32_t GCToEEInterface::GetDefaultDomainIndex()
+{
+ return -1;
+}
+
+void *GCToEEInterface::GetAppDomainAtIndex(uint32_t appDomainIndex)
+{
+ return nullptr;
+}
+
+bool GCToEEInterface::AppDomainCanAccessHandleTable(uint32_t appDomainID)
+{
+ return false;
+}
+
+uint32_t GCToEEInterface::GetIndexOfAppDomainBeingUnloaded()
+{
+ return -1;
+}
+
+uint32_t GCToEEInterface::GetTotalNumSizedRefHandles()
+{
+ return -1;
+}
+
+bool GCToEEInterface::AppDomainIsRudeUnload(void *appDomain)
+{
+ return false;
+}
GCSTRESS_INSTR_NGEN = 8, // GC on every allowable NGEN instr
GCSTRESS_UNIQUE = 16, // GC only on a unique stack trace
};
-
- int GetHeapVerifyLevel() { return 0; }
- bool IsHeapVerifyEnabled() { return GetHeapVerifyLevel() != 0; }
-
- GCStressFlags GetGCStressLevel() const { return GCSTRESS_NONE; }
- bool IsGCStressMix() const { return false; }
-
- int GetGCtraceStart() const { return 0; }
- int GetGCtraceEnd() const { return 0; }//1000000000; }
- int GetGCtraceFac() const { return 0; }
- int GetGCprnLvl() const { return 0; }
- bool IsGCBreakOnOOMEnabled() const { return false; }
- int GetGCgen0size() const { return 0; }
- int GetSegmentSize() const { return 0; }
- int GetGCconcurrent() const { return 1; }
- int GetGCLatencyMode() const { return 1; }
- int GetGCForceCompact() const { return 0; }
- int GetGCRetainVM() const { return 0; }
- int GetGCTrimCommit() const { return 0; }
- int GetGCLOHCompactionMode() const { return 0; }
-
- bool GetGCConservative() const { return true; }
};
-extern EEConfig * g_pConfig;
-
#include "etmdummy.h"
#define ETW_EVENT_ENABLED(e,f) false
set(GC_PAL_SOURCES
gcenv.unix.cpp
events.cpp
- cgroup.cpp)
+ cgroup.cpp
+ cpuinfo.cpp)
add_library(gc_unix STATIC ${GC_PAL_SOURCES} ${VERSION_FILE_PATH})
#cmakedefine01 HAVE_PTHREAD_THREADID_NP
#cmakedefine01 HAVE_PTHREAD_GETTHREADID_NP
#cmakedefine01 HAVE_SCHED_GETCPU
+#cmakedefine01 HAVE_NUMA_H
+#cmakedefine01 HAVE_VM_ALLOCATE
#cmakedefine01 HAVE_PTHREAD_CONDATTR_SETCLOCK
#cmakedefine01 HAVE_MACH_ABSOLUTE_TIME
#cmakedefine01 HAVE_SCHED_GETAFFINITY
check_include_files(sys/time.h HAVE_SYS_TIME_H)
check_include_files(sys/mman.h HAVE_SYS_MMAN_H)
+check_include_files(numa.h HAVE_NUMA_H)
+check_function_exists(vm_allocate HAVE_VM_ALLOCATE)
check_cxx_source_compiles("
#include <pthread.h>
#include <stdint.h>
// size - size of the virtual memory range
// Return:
// true if it has succeeded, false if it has failed
-bool GCToOSInterface::VirtualCommit(void* address, size_t size)
+bool GCToOSInterface::VirtualCommit(void* address, size_t size, uint32_t node)
{
+ assert(node == NUMA_NODE_UNDEFINED && "Numa allocation is not ported to local GC on unix yet");
return mprotect(address, size, PROT_WRITE | PROT_READ) == 0;
}
return g_logicalCpuCount;
}
+bool GCToOSInterface::CanEnableGCNumaAware()
+{
+ return false;
+}
+
+bool GCToOSInterface::GetNumaProcessorNode(PPROCESSOR_NUMBER proc_no, uint16_t *node_no)
+{
+ assert(!"Numa has not been ported to local GC for unix");
+ return false;
+}
+
+bool GCToOSInterface::CanEnableGCCPUGroups()
+{
+ return false;
+}
+
+void GCToOSInterface::GetGroupForProcessor(uint16_t processor_number, uint16_t* group_number, uint16_t* group_processor_number)
+{
+ assert(!"CpuGroup has not been ported to local GC for unix");
+}
// Initialize the critical section
void CLRCriticalSection::Initialize()
#include "env/gcenv.structs.h"
#include "env/gcenv.base.h"
#include "env/gcenv.os.h"
+#include "env/gcenv.ee.h"
#include "env/gcenv.windows.inl"
#include "env/volatile.h"
+#include "gcconfig.h"
GCSystemInfo g_SystemInfo;
namespace {
+static bool g_fEnableGCNumaAware;
+
+struct CPU_Group_Info
+{
+ WORD nr_active; // at most 64
+ WORD reserved[1];
+ WORD begin;
+ WORD end;
+ DWORD_PTR active_mask;
+ DWORD groupWeight;
+ DWORD activeThreadWeight;
+};
+
+static bool g_fEnableGCCPUGroups;
+static bool g_fHadSingleProcessorAtStartup;
+static DWORD g_nGroups;
+static DWORD g_nProcessors;
+static CPU_Group_Info *g_CPUGroupInfoArray;
+
+void InitNumaNodeInfo()
+{
+ ULONG highest = 0;
+
+ g_fEnableGCNumaAware = false;
+
+ if (!GCConfig::GetGCNumaAware())
+ return;
+
+ // fail to get the highest numa node number
+ if (!GetNumaHighestNodeNumber(&highest) || (highest == 0))
+ return;
+
+ g_fEnableGCNumaAware = true;
+ return;
+}
+
+#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_))
+// Calculate greatest common divisor
+DWORD GCD(DWORD u, DWORD v)
+{
+ while (v != 0)
+ {
+ DWORD dwTemp = v;
+ v = u % v;
+ u = dwTemp;
+ }
+
+ return u;
+}
+
+// Calculate least common multiple
+DWORD LCM(DWORD u, DWORD v)
+{
+ return u / GCD(u, v) * v;
+}
+#endif
+
+bool InitCPUGroupInfoArray()
+{
+#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_))
+ BYTE *bBuffer = NULL;
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pSLPIEx = NULL;
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pRecord = NULL;
+ DWORD cbSLPIEx = 0;
+ DWORD byteOffset = 0;
+ DWORD dwNumElements = 0;
+ DWORD dwWeight = 1;
+
+ if (GetLogicalProcessorInformationEx(RelationGroup, pSLPIEx, &cbSLPIEx) &&
+ GetLastError() != ERROR_INSUFFICIENT_BUFFER)
+ return false;
+
+ assert(cbSLPIEx);
+
+ // Fail to allocate buffer
+ bBuffer = new (std::nothrow) BYTE[ cbSLPIEx ];
+ if (bBuffer == NULL)
+ return false;
+
+ pSLPIEx = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *)bBuffer;
+ if (!GetLogicalProcessorInformationEx(RelationGroup, pSLPIEx, &cbSLPIEx))
+ {
+ delete[] bBuffer;
+ return false;
+ }
+
+ pRecord = pSLPIEx;
+ while (byteOffset < cbSLPIEx)
+ {
+ if (pRecord->Relationship == RelationGroup)
+ {
+ g_nGroups = pRecord->Group.ActiveGroupCount;
+ break;
+ }
+ byteOffset += pRecord->Size;
+ pRecord = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *)(bBuffer + byteOffset);
+ }
+
+ g_CPUGroupInfoArray = new (std::nothrow) CPU_Group_Info[g_nGroups];
+ if (g_CPUGroupInfoArray == NULL)
+ {
+ delete[] bBuffer;
+ return false;
+ }
+
+ for (DWORD i = 0; i < g_nGroups; i++)
+ {
+ g_CPUGroupInfoArray[i].nr_active = (WORD)pRecord->Group.GroupInfo[i].ActiveProcessorCount;
+ g_CPUGroupInfoArray[i].active_mask = pRecord->Group.GroupInfo[i].ActiveProcessorMask;
+ g_nProcessors += g_CPUGroupInfoArray[i].nr_active;
+ dwWeight = LCM(dwWeight, (DWORD)g_CPUGroupInfoArray[i].nr_active);
+ }
+
+ // The number of threads per group that can be supported will depend on the number of CPU groups
+ // and the number of LPs within each processor group. For example, when the number of LPs in
+ // CPU groups is the same and is 64, the number of threads per group before weight overflow
+ // would be 2^32/2^6 = 2^26 (64M threads)
+ for (DWORD i = 0; i < g_nGroups; i++)
+ {
+ g_CPUGroupInfoArray[i].groupWeight = dwWeight / (DWORD)g_CPUGroupInfoArray[i].nr_active;
+ g_CPUGroupInfoArray[i].activeThreadWeight = 0;
+ }
+
+ delete[] bBuffer; // done with it; free it
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool InitCPUGroupInfoRange()
+{
+#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_))
+ WORD begin = 0;
+ WORD nr_proc = 0;
+
+ for (WORD i = 0; i < g_nGroups; i++)
+ {
+ nr_proc += g_CPUGroupInfoArray[i].nr_active;
+ g_CPUGroupInfoArray[i].begin = begin;
+ g_CPUGroupInfoArray[i].end = nr_proc - 1;
+ begin = nr_proc;
+ }
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+void InitCPUGroupInfo()
+{
+ g_fEnableGCCPUGroups = false;
+
+#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_))
+ if (!GCConfig::GetGCCpuGroup())
+ return;
+
+ if (!InitCPUGroupInfoArray())
+ return;
+
+ if (!InitCPUGroupInfoRange())
+ return;
+
+ // only enable CPU groups if more than one group exists
+ g_fEnableGCCPUGroups = g_nGroups > 1;
+#endif // _TARGET_AMD64_ || _TARGET_ARM64_
+
+ // Determine if the process is affinitized to a single processor (or if the system has a single processor)
+ DWORD_PTR processAffinityMask, systemAffinityMask;
+ if (::GetProcessAffinityMask(::GetCurrentProcess(), &processAffinityMask, &systemAffinityMask))
+ {
+ processAffinityMask &= systemAffinityMask;
+ if (processAffinityMask != 0 && // only one CPU group is involved
+ (processAffinityMask & (processAffinityMask - 1)) == 0) // only one bit is set
+ {
+ g_fHadSingleProcessorAtStartup = true;
+ }
+ }
+}
+
void GetProcessMemoryLoad(LPMEMORYSTATUSEX pMSEX)
{
pMSEX->dwLength = sizeof(MEMORYSTATUSEX);
assert(systemInfo.dwPageSize == 0x1000);
+ InitNumaNodeInfo();
+ InitCPUGroupInfo();
+
return true;
}
// size - size of the virtual memory range
// Return:
// true if it has succeeded, false if it has failed
-bool GCToOSInterface::VirtualCommit(void* address, size_t size)
+bool GCToOSInterface::VirtualCommit(void* address, size_t size, uint32_t node)
{
- return ::VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE) != nullptr;
+ if (node == NUMA_NODE_UNDEFINED)
+ {
+ return ::VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE) != nullptr;
+ }
+ else
+ {
+ assert(g_fEnableGCNumaAware);
+ return ::VirtualAllocExNuma(::GetCurrentProcess(), address, size, MEM_COMMIT, PAGE_READWRITE, node) != nullptr;
+ }
}
// Decomit virtual memory range.
return ::GetTickCount();
}
+// Gets the total number of processors on the machine, not taking
+// into account current process affinity.
+// Return:
+// Number of processors on the machine
+uint32_t GCToOSInterface::GetTotalProcessorCount()
+{
+ if (CanEnableGCCPUGroups())
+ {
+ return g_nProcessors;
+ }
+ else
+ {
+ return g_SystemInfo.dwNumberOfProcessors;
+ }
+}
+
+bool GCToOSInterface::CanEnableGCNumaAware()
+{
+ return g_fEnableGCNumaAware;
+}
+
+bool GCToOSInterface::GetNumaProcessorNode(PPROCESSOR_NUMBER proc_no, uint16_t *node_no)
+{
+ assert(g_fEnableGCNumaAware);
+ return ::GetNumaProcessorNodeEx(proc_no, node_no) != FALSE;
+}
+
+bool GCToOSInterface::CanEnableGCCPUGroups()
+{
+ return g_fEnableGCCPUGroups;
+}
+
+void GCToOSInterface::GetGroupForProcessor(uint16_t processor_number, uint16_t* group_number, uint16_t* group_processor_number)
+{
+ assert(g_fEnableGCCPUGroups);
+
+#if !defined(FEATURE_REDHAWK) && (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_))
+ WORD bTemp = 0;
+ WORD bDiff = processor_number - bTemp;
+
+ for (WORD i=0; i < g_nGroups; i++)
+ {
+ bTemp += g_CPUGroupInfoArray[i].nr_active;
+ if (bTemp > processor_number)
+ {
+ *group_number = i;
+ *group_processor_number = bDiff;
+ break;
+ }
+ bDiff = processor_number - bTemp;
+ }
+#else
+ *group_number = 0;
+ *group_processor_number = 0;
+#endif
+}
+
// Parameters of the GC thread stub
struct GCThreadStubParam
{
return 0;
}
-// Gets the total number of processors on the machine, not taking
-// into account current process affinity.
-// Return:
-// Number of processors on the machine
-uint32_t GCToOSInterface::GetTotalProcessorCount()
-{
- return g_SystemInfo.dwNumberOfProcessors;
-}
-
// Initialize the critical section
void CLRCriticalSection::Initialize()
{
m_impl = event.release();
return true;
}
-
/*static*/ NumaNodeInfo::PVAExN NumaNodeInfo::m_pVirtualAllocExNuma = NULL;
/*static*/ LPVOID NumaNodeInfo::VirtualAllocExNuma(HANDLE hProc, LPVOID lpAddr, SIZE_T dwSize,
- DWORD allocType, DWORD prot, DWORD node)
+ DWORD allocType, DWORD prot, DWORD node)
{
return (*m_pVirtualAllocExNuma)(hProc, lpAddr, dwSize, allocType, prot, node);
}
#if !defined(FEATURE_REDHAWK)
//check for numa support if multiple heaps are used
ULONG highest = 0;
-
+
if (CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_GCNumaAware) == 0)
return FALSE;
EEPOLICY_HANDLE_FATAL_ERROR(exitCode);
}
-bool GCToEEInterface::ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj)
+bool GCToEEInterface::ShouldFinalizeObjectForUnload(void* pDomain, Object* obj)
{
// CoreCLR does not have appdomains, so this code path is dead. Other runtimes may
// choose to inspect the object being finalized here.
return &g_gcToClrEventSink;
}
+
+uint32_t GCToEEInterface::GetDefaultDomainIndex()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return SystemDomain::System()->DefaultDomain()->GetIndex().m_dwIndex;
+}
+
+void *GCToEEInterface::GetAppDomainAtIndex(uint32_t appDomainIndex)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ ADIndex index(appDomainIndex);
+ return static_cast<void *>(SystemDomain::GetAppDomainAtIndex(index));
+}
+
+bool GCToEEInterface::AppDomainCanAccessHandleTable(uint32_t appDomainID)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ ADIndex index(appDomainID);
+ AppDomain *pDomain = SystemDomain::GetAppDomainAtIndex(index);
+ return (pDomain != NULL) && !pDomain->NoAccessToHandleTable();
+}
+
+uint32_t GCToEEInterface::GetIndexOfAppDomainBeingUnloaded()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return SystemDomain::IndexOfAppDomainBeingUnloaded().m_dwIndex;
+}
+
+uint32_t GCToEEInterface::GetTotalNumSizedRefHandles()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return SystemDomain::System()->GetTotalNumSizedRefHandles();
+}
+
+
+bool GCToEEInterface::AppDomainIsRudeUnload(void *appDomain)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ AppDomain *realPtr = static_cast<AppDomain *>(appDomain);
+ return realPtr->IsRudeUnload() != FALSE;
+}
+
void EnableFinalization(bool foundFinalizers);
void HandleFatalError(unsigned int exitCode);
- bool ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj);
+ bool ShouldFinalizeObjectForUnload(void* pDomain, Object* obj);
bool ForceFullGCToBeBlocking();
bool EagerFinalized(Object* obj);
MethodTable* GetFreeObjectMethodTable();
void WalkAsyncPinnedForPromotion(Object* object, ScanContext* sc, promote_func* callback);
void WalkAsyncPinned(Object* object, void* context, void(*callback)(Object*, Object*, void*));
IGCToCLREventSink* EventSink();
+
+ uint32_t GetDefaultDomainIndex();
+ void *GetAppDomainAtIndex(uint32_t appDomainIndex);
+ bool AppDomainCanAccessHandleTable(uint32_t appDomainID);
+ uint32_t GetIndexOfAppDomainBeingUnloaded();
+ uint32_t GetTotalNumSizedRefHandles();
+ bool AppDomainIsRudeUnload(void *appDomain);
};
} // namespace standalone
// size - size of the virtual memory range
// Return:
// true if it has succeeded, false if it has failed
-bool GCToOSInterface::VirtualCommit(void* address, size_t size)
+bool GCToOSInterface::VirtualCommit(void* address, size_t size, uint32_t node)
{
LIMITED_METHOD_CONTRACT;
- return ::ClrVirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE) != NULL;
+ if (node == NUMA_NODE_UNDEFINED)
+ {
+ return ::ClrVirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE) != NULL;
+ }
+ else
+ {
+ return NumaNodeInfo::VirtualAllocExNuma(::GetCurrentProcess(), address, size, MEM_COMMIT, PAGE_READWRITE, node) != NULL;
+ }
}
// Decomit virtual memory range.
{
LIMITED_METHOD_CONTRACT;
- return g_SystemInfo.dwNumberOfProcessors;
+ if (CPUGroupInfo::CanEnableGCCPUGroups())
+ {
+ return CPUGroupInfo::GetNumActiveProcessors();
+ }
+ else
+ {
+ return g_SystemInfo.dwNumberOfProcessors;
+ }
+}
+
+bool GCToOSInterface::CanEnableGCNumaAware()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return NumaNodeInfo::CanEnableGCNumaAware() != FALSE;
+}
+
+bool GCToOSInterface::GetNumaProcessorNode(PPROCESSOR_NUMBER proc_no, uint16_t *node_no)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return NumaNodeInfo::GetNumaProcessorNodeEx(proc_no, node_no) != FALSE;
+}
+
+bool GCToOSInterface::CanEnableGCCPUGroups()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return CPUGroupInfo::CanEnableGCCPUGroups() != FALSE;
+}
+
+void GCToOSInterface::GetGroupForProcessor(uint16_t processor_number, uint16_t* group_number, uint16_t* group_processor_number)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return CPUGroupInfo::GetGroupForProcessor(processor_number, group_number, group_processor_number);
}
// Initialize the critical section