Remove NumaNodeInfo, CPUGroupInfo, AppDomain, SystemDomain, and EEConfig stubs from...
authorDavid Mason <davmason@microsoft.com>
Sat, 25 Aug 2018 19:24:35 +0000 (12:24 -0700)
committerGitHub <noreply@github.com>
Sat, 25 Aug 2018 19:24:35 +0000 (12:24 -0700)
* Switch NumaNodeInfo and CPUGroupInfo to the interface

* Remove AppDomain/SystemDomain stubs

* remove EEConfig methods

* Port numa code to the coreclr side

* add numa back to PAL and standalone builds

* enable numa for PAL/Standalone builds, and fix BOOL warnings

* remove unused defines, and fix linux build errors

* building on windows

* about to delete numa work from unix and want a backup

* add stubs for unix numa/cpugroup

* Code review feedback

* Code review feedback

26 files changed:
src/gc/env/gcenv.base.h
src/gc/env/gcenv.ee.h
src/gc/env/gcenv.os.h
src/gc/env/gcenv.structs.h
src/gc/gc.cpp
src/gc/gcconfig.h
src/gc/gcenv.ee.standalone.inl
src/gc/gcimpl.h
src/gc/gcinterface.ee.h
src/gc/gcinterface.h
src/gc/gcload.cpp
src/gc/gcpriv.h
src/gc/handletable.cpp
src/gc/handletablecore.cpp
src/gc/objecthandle.cpp
src/gc/sample/gcenv.ee.cpp
src/gc/sample/gcenv.h
src/gc/unix/CMakeLists.txt
src/gc/unix/config.h.in
src/gc/unix/configure.cmake
src/gc/unix/gcenv.unix.cpp
src/gc/windows/gcenv.windows.cpp
src/utilcode/util.cpp
src/vm/gcenv.ee.cpp
src/vm/gcenv.ee.h
src/vm/gcenv.os.cpp

index 8693bbe..15a81d7 100644 (file)
@@ -489,70 +489,4 @@ struct ADIndex
     BOOL operator!=(const ADIndex& ad) const { return m_dwIndex != ad.m_dwIndex; }
 };
 
-class AppDomain
-{
-public:
-    ADIndex GetIndex() { return ADIndex(RH_DEFAULT_DOMAIN_ID); }
-    BOOL IsRudeUnload() { return FALSE; }
-    BOOL NoAccessToHandleTable() { return FALSE; }
-    void DecNumSizedRefHandles() {}
-};
-
-class SystemDomain
-{
-public:
-    static SystemDomain *System() { return NULL; }
-    static AppDomain *GetAppDomainAtIndex(ADIndex /*index*/) { return (AppDomain *)-1; }
-    static AppDomain *AppDomainBeingUnloaded() { return NULL; }
-    AppDomain *DefaultDomain() { return NULL; }
-    DWORD GetTotalNumSizedRefHandles() { return 0; }
-};
-
-class NumaNodeInfo
-{
-public:
-    static bool CanEnableGCNumaAware()
-    {
-        // [LOCALGC TODO] enable NUMA node support
-        return false;
-    }
-
-    static void GetGroupForProcessor(uint16_t processor_number, uint16_t * group_number, uint16_t * group_processor_number)
-    {
-        // [LOCALGC TODO] enable NUMA node support
-        assert(!"should not be called");
-    }
-
-    static bool GetNumaProcessorNodeEx(PPROCESSOR_NUMBER proc_no, uint16_t * node_no)
-    {
-        // [LOCALGC TODO] enable NUMA node support
-        assert(!"should not be called");
-        return false;
-    }
-};
-
-class CPUGroupInfo
-{
-public:
-    static bool CanEnableGCCPUGroups()
-    {
-        // [LOCALGC TODO] enable CPU group support
-        return false;
-    }
-
-    static uint32_t GetNumActiveProcessors()
-    {
-        // [LOCALGC TODO] enable CPU group support
-        assert(!"should not be called");
-        return 0;
-    }
-
-    static void GetGroupForProcessor(uint16_t processor_number, uint16_t * group_number, uint16_t * group_processor_number)
-    {
-        // [LOCALGC TODO] enable CPU group support
-        assert(!"should not be called");
-    }
-};
-
-
 #endif // __GCENV_BASE_INCLUDED__
index ec79877..ebe3046 100644 (file)
@@ -71,7 +71,7 @@ public:
     static void EnableFinalization(bool foundFinalizers);
 
     static void HandleFatalError(unsigned int exitCode);
-    static bool ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj);
+    static bool ShouldFinalizeObjectForUnload(void* pDomain, Object* obj);
     static bool ForceFullGCToBeBlocking();
     static bool EagerFinalized(Object* obj);
     static MethodTable* GetFreeObjectMethodTable();
@@ -85,6 +85,13 @@ public:
     static void WalkAsyncPinnedForPromotion(Object* object, ScanContext* sc, promote_func* callback);
     static void WalkAsyncPinned(Object* object, void* context, void(*callback)(Object*, Object*, void*));
     static IGCToCLREventSink* EventSink();
+
+    static uint32_t GetDefaultDomainIndex();
+    static void *GetAppDomainAtIndex(uint32_t appDomainIndex);
+    static bool AppDomainCanAccessHandleTable(uint32_t appDomainID);
+    static uint32_t GetIndexOfAppDomainBeingUnloaded();
+    static uint32_t GetTotalNumSizedRefHandles();
+    static bool GCToEEInterface::AppDomainIsRudeUnload(void *appDomain);
 };
 
 #endif // __GCENV_EE_H__
index 41e46f8..35515de 100644 (file)
@@ -18,6 +18,8 @@
 #undef Sleep
 #endif // Sleep
 
+#define NUMA_NODE_UNDEFINED UINT32_MAX
+
 // Critical section used by the GC
 class CLRCriticalSection
 {
@@ -194,7 +196,7 @@ public:
     //  size    - size of the virtual memory range
     // Return:
     //  true if it has succeeded, false if it has failed
-    static bool VirtualCommit(void *address, size_t size);
+    static bool VirtualCommit(void *address, size_t size, uint32_t node = NUMA_NODE_UNDEFINED);
 
     // Decomit virtual memory range.
     // Parameters:
@@ -391,6 +393,19 @@ public:
     // Return:
     //  Number of processors on the machine
     static uint32_t GetTotalProcessorCount();
+
+    // Is NUMA support available
+    static bool CanEnableGCNumaAware();
+
+    // Gets the NUMA node for the processor
+    static bool GetNumaProcessorNode(PPROCESSOR_NUMBER proc_no, uint16_t *node_no);
+
+    // Are CPU groups enabled
+    static bool CanEnableGCCPUGroups();
+
+    // Get the CPU group for the specified processor
+    static void GetGroupForProcessor(uint16_t processor_number, uint16_t* group_number, uint16_t* group_processor_number);
+
 };
 
 #endif // __GCENV_OS_H__
index bb503e3..4f51ad0 100644 (file)
@@ -9,9 +9,9 @@
 
 struct GCSystemInfo
 {
-    uint32_t dwNumberOfProcessors;
-    uint32_t dwPageSize;
-    uint32_t dwAllocationGranularity;
+    uint32_t        dwNumberOfProcessors;
+    uint32_t        dwPageSize;
+    uint32_t        dwAllocationGranularity;
 };
 
 typedef void * HANDLE;
index 31715cb..edd7d07 100644 (file)
@@ -5066,7 +5066,7 @@ public:
 
         //can not enable gc numa aware, force all heaps to be in
         //one numa node by filling the array with all 0s
-        if (!NumaNodeInfo::CanEnableGCNumaAware())
+        if (!GCToOSInterface::CanEnableGCNumaAware())
             memset(heap_no_to_numa_node, 0, sizeof (heap_no_to_numa_node)); 
 
         return TRUE;
@@ -5262,7 +5262,7 @@ void set_thread_group_affinity_for_heap(int heap_number, GCThreadAffinity* affin
     affinity->Processor = GCThreadAffinity::None;
 
     uint16_t gn, gpn;
-    CPUGroupInfo::GetGroupForProcessor((uint16_t)heap_number, &gn, &gpn);
+    GCToOSInterface::GetGroupForProcessor((uint16_t)heap_number, &gn, &gpn);
 
     int bit_number = 0;
     for (uintptr_t mask = 1; mask !=0; mask <<=1) 
@@ -5274,7 +5274,7 @@ void set_thread_group_affinity_for_heap(int heap_number, GCThreadAffinity* affin
             affinity->Group = gn;
             heap_select::set_cpu_group_for_heap(heap_number, gn);
             heap_select::set_group_proc_for_heap(heap_number, gpn);
-            if (NumaNodeInfo::CanEnableGCNumaAware())
+            if (GCToOSInterface::CanEnableGCNumaAware())
             {  
                 PROCESSOR_NUMBER proc_no;
                 proc_no.Group    = gn;
@@ -5282,7 +5282,7 @@ void set_thread_group_affinity_for_heap(int heap_number, GCThreadAffinity* affin
                 proc_no.Reserved = 0;
 
                 uint16_t node_no = 0;
-                if (NumaNodeInfo::GetNumaProcessorNodeEx(&proc_no, &node_no))
+                if (GCToOSInterface::GetNumaProcessorNode(&proc_no, &node_no))
                     heap_select::set_numa_node_for_heap(heap_number, node_no);
             }
             else
@@ -5315,14 +5315,14 @@ void set_thread_affinity_mask_for_heap(int heap_number, GCThreadAffinity* affini
                     dprintf (3, ("Using processor %d for heap %d", proc_number, heap_number));
                     affinity->Processor = proc_number;
                     heap_select::set_proc_no_for_heap(heap_number, proc_number);
-                    if (NumaNodeInfo::CanEnableGCNumaAware())
+                    if (GCToOSInterface::CanEnableGCNumaAware())
                     {
                         uint16_t node_no = 0;
                         PROCESSOR_NUMBER proc_no;
                         proc_no.Group = 0;
                         proc_no.Number = (uint8_t)proc_number;
                         proc_no.Reserved = 0;
-                        if (NumaNodeInfo::GetNumaProcessorNodeEx(&proc_no, &node_no))
+                        if (GCToOSInterface::GetNumaProcessorNode(&proc_no, &node_no))
                         {
                             heap_select::set_numa_node_for_heap(heap_number, node_no);
                         }
@@ -5457,19 +5457,17 @@ void gc_heap::gc_thread_function ()
 
 bool virtual_alloc_commit_for_heap(void* addr, size_t size, int h_number)
 {
-#if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK) && !defined(FEATURE_PAL) && !defined(BUILD_AS_STANDALONE)
+#if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK)
     // Currently there is no way for us to specific the numa node to allocate on via hosting interfaces to
     // a host. This will need to be added later.
 #if !defined(FEATURE_CORECLR)
     if (!CLRMemoryHosted())
 #endif
     {
-        if (NumaNodeInfo::CanEnableGCNumaAware())
+        if (GCToOSInterface::CanEnableGCNumaAware())
         {
             uint32_t numa_node = heap_select::find_numa_node_from_heap_no(h_number);
-            void * ret = NumaNodeInfo::VirtualAllocExNuma(GetCurrentProcess(), addr, size, 
-                                                          MEM_COMMIT, PAGE_READWRITE, numa_node);
-            if (ret != NULL)
+            if (GCToOSInterface::VirtualCommit(addr, size, numa_node))
                 return true;
         }
     }
@@ -13343,7 +13341,7 @@ try_again:
                     org_hp->alloc_context_count--;
                     max_hp->alloc_context_count++;
                     acontext->set_alloc_heap(GCHeap::GetHeap(max_hp->heap_number));
-                    if (CPUGroupInfo::CanEnableGCCPUGroups())
+                    if (GCToOSInterface::CanEnableGCCPUGroups())
                     {   //only set ideal processor when max_hp and org_hp are in the same cpu
                         //group. DO NOT MOVE THREADS ACROSS CPU GROUPS
                         uint16_t org_gn = heap_select::find_cpu_group_from_heap_no(org_hp->heap_number);
@@ -19548,7 +19546,7 @@ void gc_heap::mark_phase (int condemned_gen_number, BOOL mark_only_p)
     {
 #endif //MULTIPLE_HEAPS
 
-        num_sizedrefs = SystemDomain::System()->GetTotalNumSizedRefHandles();
+        num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles();
 
 #ifdef MULTIPLE_HEAPS
 
@@ -24914,7 +24912,7 @@ void gc_heap::gc_thread_stub (void* arg)
         // We are about to set affinity for GC threads. It is a good place to set up NUMA and
         // CPU groups because the process mask, processor number, and group number are all
         // readily available.
-        if (CPUGroupInfo::CanEnableGCCPUGroups())
+        if (GCToOSInterface::CanEnableGCCPUGroups())
             set_thread_group_affinity_for_heap(heap->heap_number, &affinity);
         else
             set_thread_affinity_mask_for_heap(heap->heap_number, &affinity);
@@ -25707,7 +25705,7 @@ void gc_heap::background_mark_phase ()
 #endif //WRITE_WATCH
 #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
 
-            num_sizedrefs = SystemDomain::System()->GetTotalNumSizedRefHandles();
+            num_sizedrefs = GCToEEInterface::GetTotalNumSizedRefHandles();
 
             // this c_write is not really necessary because restart_vm
             // has an instruction that will flush the cpu cache (interlocked
@@ -33498,10 +33496,8 @@ HRESULT GCHeap::Initialize ()
         gc_heap::gc_thread_no_affinitize_p = true;
 
     uint32_t nhp_from_config = static_cast<uint32_t>(GCConfig::GetHeapCount());
-    // GetGCProcessCpuCount only returns up to 64 procs.
-    uint32_t nhp_from_process = CPUGroupInfo::CanEnableGCCPUGroups() ?
-                                CPUGroupInfo::GetNumActiveProcessors():
-                                GCToOSInterface::GetCurrentProcessCpuCount();
+    
+    uint32_t nhp_from_process = GCToOSInterface::GetCurrentProcessCpuCount();
 
     uint32_t nhp = ((nhp_from_config == 0) ? nhp_from_process :
                                              (min (nhp_from_config, nhp_from_process)));
@@ -35615,7 +35611,7 @@ size_t GCHeap::GetFinalizablePromotedCount()
 #endif //MULTIPLE_HEAPS
 }
 
-bool GCHeap::FinalizeAppDomain(AppDomain *pDomain, bool fRunFinalizers)
+bool GCHeap::FinalizeAppDomain(void *pDomain, bool fRunFinalizers)
 {
 #ifdef MULTIPLE_HEAPS
     bool foundp = false;
@@ -35937,7 +35933,7 @@ CFinalize::GetNumberFinalizableObjects()
 }
 
 BOOL
-CFinalize::FinalizeSegForAppDomain (AppDomain *pDomain, 
+CFinalize::FinalizeSegForAppDomain (void *pDomain, 
                                     BOOL fRunFinalizers, 
                                     unsigned int Seg)
 {
@@ -35980,7 +35976,7 @@ CFinalize::FinalizeSegForAppDomain (AppDomain *pDomain,
             }
             else
             {
-                if (pDomain->IsRudeUnload())
+                if (GCToEEInterface::AppDomainIsRudeUnload(pDomain))
                 {
                     MoveItem (i, Seg, FreeList);
                 }
@@ -35997,7 +35993,7 @@ CFinalize::FinalizeSegForAppDomain (AppDomain *pDomain,
 }
 
 bool
-CFinalize::FinalizeAppDomain (AppDomain *pDomain, bool fRunFinalizers)
+CFinalize::FinalizeAppDomain (void *pDomain, bool fRunFinalizers)
 {
     bool finalizedFound = false;
 
index 811e6f9..ea44a09 100644 (file)
@@ -72,6 +72,8 @@ public:
       "Specifies if you want to turn on logging in GC")                                        \
   BOOL_CONFIG(ConfigLogEnabled, "GCConfigLogEnabled", false,                                   \
       "Specifies the name of the GC config log file")                                          \
+  BOOL_CONFIG(GCNumaAware,   "GCNumaAware", true, "Enables numa allocations in the GC")        \
+  BOOL_CONFIG(GCCpuGroup,    "GCCpuGroup", false, "Enables CPU groups in the GC")              \
   INT_CONFIG(HeapVerifyLevel, "HeapVerify", HEAPVERIFY_NONE,                                   \
       "When set verifies the integrity of the managed heap on entry and exit of each GC")      \
   INT_CONFIG(LOHCompactionMode, "GCLOHCompact", 0, "Specifies the LOH compaction mode")        \
index 4fc8ca6..1aca1dc 100644 (file)
@@ -191,7 +191,7 @@ inline void GCToEEInterface::HandleFatalError(unsigned int exitCode)
     g_theGCToCLR->HandleFatalError(exitCode);
 }
 
-inline bool GCToEEInterface::ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj)
+inline bool GCToEEInterface::ShouldFinalizeObjectForUnload(void* pDomain, Object* obj)
 {
     assert(g_theGCToCLR != nullptr);
     return g_theGCToCLR->ShouldFinalizeObjectForUnload(pDomain, obj);
@@ -275,4 +275,40 @@ inline IGCToCLREventSink* GCToEEInterface::EventSink()
     return g_theGCToCLR->EventSink();
 }
 
+inline uint32_t GCToEEInterface::GetDefaultDomainIndex()
+{
+    assert(g_theGCToCLR != nullptr);
+    return g_theGCToCLR->GetDefaultDomainIndex();   
+}
+
+inline void *GCToEEInterface::GetAppDomainAtIndex(uint32_t appDomainIndex)
+{
+    assert(g_theGCToCLR != nullptr);
+    return g_theGCToCLR->GetAppDomainAtIndex(appDomainIndex);
+}
+
+inline bool GCToEEInterface::AppDomainCanAccessHandleTable(uint32_t appDomainID)
+{
+    assert(g_theGCToCLR != nullptr);
+    return g_theGCToCLR->AppDomainCanAccessHandleTable(appDomainID);
+}
+
+inline uint32_t GCToEEInterface::GetIndexOfAppDomainBeingUnloaded()
+{
+    assert(g_theGCToCLR != nullptr);
+    return g_theGCToCLR->GetIndexOfAppDomainBeingUnloaded();
+}
+
+inline uint32_t GCToEEInterface::GetTotalNumSizedRefHandles()
+{
+    assert(g_theGCToCLR != nullptr);
+    return g_theGCToCLR->GetTotalNumSizedRefHandles();
+}
+
+inline bool GCToEEInterface::AppDomainIsRudeUnload(void *appDomain)
+{
+    assert(g_theGCToCLR != nullptr);
+    return g_theGCToCLR->AppDomainIsRudeUnload(appDomain);
+}
+
 #endif // __GCTOENV_EE_STANDALONE_INL__
index 67f906a..fe85964 100644 (file)
@@ -209,7 +209,7 @@ public:
     PER_HEAP_ISOLATED size_t GetFinalizablePromotedCount();
 
     void SetFinalizeQueueForShutdown(bool fHasLock);
-    bool FinalizeAppDomain(AppDomain *pDomain, bool fRunFinalizers);
+    bool FinalizeAppDomain(void *pDomain, bool fRunFinalizers);
     bool ShouldRestartFinalizerWatchDog();
 
     void DiagWalkObject (Object* obj, walk_fn fn, void* context);
index 82d8934..ae887e6 100644 (file)
@@ -319,7 +319,7 @@ public:
     // Asks the EE if it wants a particular object to be finalized when unloading
     // an app domain.
     virtual
-    bool ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj) = 0;
+    bool ShouldFinalizeObjectForUnload(void* pDomain, Object* obj) = 0;
 
     // Offers the EE the option to finalize the given object eagerly, i.e.
     // not on the finalizer thread but on the current thread. The
@@ -409,6 +409,24 @@ public:
     // Returns an IGCToCLREventSink instance that can be used to fire events.
     virtual
     IGCToCLREventSink* EventSink() = 0;
+
+    virtual
+    uint32_t GetDefaultDomainIndex() = 0;
+
+    virtual
+    void *GetAppDomainAtIndex(uint32_t appDomainIndex) = 0;
+
+    virtual
+    uint32_t GetIndexOfAppDomainBeingUnloaded() = 0;
+
+    virtual
+    bool AppDomainCanAccessHandleTable(uint32_t appDomainID) = 0;
+
+    virtual
+    uint32_t GetTotalNumSizedRefHandles() = 0;
+
+    virtual
+    bool AppDomainIsRudeUnload(void *appDomain) = 0;
 };
 
 #endif // _GCINTERFACE_EE_H_
index 58482b8..55c755d 100644 (file)
@@ -581,7 +581,7 @@ public:
     */
 
     // Finalizes an app domain by finalizing objects within that app domain.
-    virtual bool FinalizeAppDomain(AppDomain* pDomain, bool fRunFinalizers) = 0;
+    virtual bool FinalizeAppDomain(void* pDomain, bool fRunFinalizers) = 0;
 
     // Finalizes all registered objects for shutdown, even if they are still reachable.
     virtual void SetFinalizeQueueForShutdown(bool fHasLock) = 0;
index 21eedb2..2d157c8 100644 (file)
@@ -72,6 +72,7 @@ GC_Initialize(
     // Initialize GCConfig before anything else - initialization of our
     // various components may want to query the current configuration.
     GCConfig::Initialize();
+
     if (!GCToOSInterface::Initialize())
     {
         return E_FAIL;
index 0cb72ec..c2f7356 100644 (file)
@@ -3750,7 +3750,7 @@ private:
 
     }
 
-    BOOL FinalizeSegForAppDomain (AppDomain *pDomain, 
+    BOOL FinalizeSegForAppDomain (void *pDomain, 
                                   BOOL fRunFinalizers, 
                                   unsigned int Seg);
 
@@ -3774,7 +3774,7 @@ public:
     void DiscardNonCriticalObjects();
 
     //Methods used by the app domain unloading call to finalize objects in an app domain
-    bool FinalizeAppDomain (AppDomain *pDomain, bool fRunFinalizers);
+    bool FinalizeAppDomain (void *pDomain, bool fRunFinalizers);
 
     void CheckFinalizerObjects();
 
index 0c05715..13fb196 100644 (file)
@@ -363,11 +363,9 @@ void ValidateFetchObjrefForHandle(OBJECTREF objref, ADIndex appDomainIndex)
     BEGIN_DEBUG_ONLY_CODE;
     VALIDATEOBJECTREF (objref);
 
-    AppDomain *pDomain = SystemDomain::GetAppDomainAtIndex(appDomainIndex);
-
-    // Access to a handle in unloaded domain is not allowed
-    _ASSERTE(pDomain != NULL);
-    _ASSERTE(!pDomain->NoAccessToHandleTable());
+#ifndef DACCESS_COMPILE
+    _ASSERTE(GCToEEInterface::AppDomainCanAccessHandleTable(appDomainIndex.m_dwIndex));
+#endif // DACCESS_COMPILE
 
     END_DEBUG_ONLY_CODE;
 }
@@ -384,12 +382,9 @@ void ValidateAssignObjrefForHandle(OBJECTREF objref, ADIndex appDomainIndex)
 
     VALIDATEOBJECTREF (objref);
 
-    AppDomain *pDomain = SystemDomain::GetAppDomainAtIndex(appDomainIndex);
-
-    // Access to a handle in unloaded domain is not allowed
-    _ASSERTE(pDomain != NULL);
-    _ASSERTE(!pDomain->NoAccessToHandleTable());
-
+#ifndef DACCESS_COMPILE
+    _ASSERTE(GCToEEInterface::AppDomainCanAccessHandleTable(appDomainIndex.m_dwIndex));
+#endif // DACCESS_COMPILE
     END_DEBUG_ONLY_CODE;
 }
 
@@ -407,12 +402,12 @@ void ValidateAppDomainForHandle(OBJECTHANDLE handle)
 #else
     BEGIN_DEBUG_ONLY_CODE;
     ADIndex id = HndGetHandleADIndex(handle);
-    AppDomain *pUnloadingDomain = SystemDomain::AppDomainBeingUnloaded();
-    if (!pUnloadingDomain || pUnloadingDomain->GetIndex() != id)
+    ADIndex unloadingDomain(GCToEEInterface::GetIndexOfAppDomainBeingUnloaded());
+    if (unloadingDomain != id)
     {
         return;
     }
-    if (!pUnloadingDomain->NoAccessToHandleTable())
+    if (GCToEEInterface::AppDomainCanAccessHandleTable(unloadingDomain.m_dwIndex))
     {
         return;
     }
@@ -604,7 +599,7 @@ void HndLogSetEvent(OBJECTHANDLE handle, _UNCHECKED_OBJECTREF value)
     {
         uint32_t hndType = HandleFetchType(handle);
         ADIndex appDomainIndex = HndGetHandleADIndex(handle);   
-        AppDomain* pAppDomain = SystemDomain::GetAppDomainAtIndex(appDomainIndex);
+        void* pAppDomain = GCToEEInterface::GetAppDomainAtIndex(appDomainIndex.m_dwIndex);
         uint32_t generation = value != 0 ? g_theGCHeap->WhichGeneration(value) : 0;
         FIRE_EVENT(SetGCHandle, (void *)handle, (void *)value, hndType, generation, (uint64_t)pAppDomain);
         FIRE_EVENT(PrvSetGCHandle, (void *) handle, (void *)value, hndType, generation, (uint64_t)pAppDomain);
@@ -620,7 +615,7 @@ void HndLogSetEvent(OBJECTHANDLE handle, _UNCHECKED_OBJECTREF value)
             // to this structure as our closure's context pointer.
             struct ClosureCapture
             {
-                AppDomain* pAppDomain;
+                void* pAppDomain;
                 Object* overlapped;
             };
 
index 8c0be42..0189438 100644 (file)
@@ -1041,7 +1041,7 @@ void TableRelocateAsyncPinHandles(HandleTable *pTable,
     }
     CONTRACTL_END;
 
-    _ASSERTE (pTargetTable->uADIndex == SystemDomain::System()->DefaultDomain()->GetIndex());  // must be for default domain
+    _ASSERTE (pTargetTable->uADIndex == ADIndex(GCToEEInterface::GetDefaultDomainIndex()));  // must be for default domain
 
     BOOL fGotException = FALSE;
     TableSegment *pSegment = pTable->pSegmentList;
index c2af23a..09460d2 100644 (file)
@@ -541,12 +541,7 @@ int getNumberOfSlots()
     if (!IsServerHeap())
         return 1;
 
-#ifdef FEATURE_REDHAWK
     return GCToOSInterface::GetCurrentProcessCpuCount();
-#else
-    return (CPUGroupInfo::CanEnableGCCPUGroups() ? CPUGroupInfo::GetNumActiveProcessors() :
-                                                   GCToOSInterface::GetCurrentProcessCpuCount());
-#endif
 }
 
 class HandleTableBucketHolder
index 0311c0e..a705ae2 100644 (file)
@@ -278,7 +278,7 @@ void GCToEEInterface::HandleFatalError(unsigned int exitCode)
     abort();
 }
 
-bool GCToEEInterface::ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj)
+bool GCToEEInterface::ShouldFinalizeObjectForUnload(void* pDomain, Object* obj)
 {
     return true;
 }
@@ -341,3 +341,33 @@ void GCToEEInterface::WalkAsyncPinnedForPromotion(Object* object, ScanContext* s
 void GCToEEInterface::WalkAsyncPinned(Object* object, void* context, void (*callback)(Object*, Object*, void*))
 {
 }
+
+uint32_t GCToEEInterface::GetDefaultDomainIndex()
+{
+    return -1;
+}
+
+void *GCToEEInterface::GetAppDomainAtIndex(uint32_t appDomainIndex)
+{
+    return nullptr;
+}
+
+bool GCToEEInterface::AppDomainCanAccessHandleTable(uint32_t appDomainID)
+{
+    return false;
+}
+
+uint32_t GCToEEInterface::GetIndexOfAppDomainBeingUnloaded()
+{
+    return -1;
+}
+
+uint32_t GCToEEInterface::GetTotalNumSizedRefHandles()
+{
+    return -1;
+}
+
+bool GCToEEInterface::AppDomainIsRudeUnload(void *appDomain)
+{
+    return false;
+}
index 012ab44..4dc2da0 100644 (file)
@@ -167,32 +167,8 @@ public:
         GCSTRESS_INSTR_NGEN = 8,    // GC on every allowable NGEN instr
         GCSTRESS_UNIQUE = 16,   // GC only on a unique stack trace
     };
-
-    int     GetHeapVerifyLevel() { return 0; }
-    bool    IsHeapVerifyEnabled() { return GetHeapVerifyLevel() != 0; }
-
-    GCStressFlags GetGCStressLevel()        const { return GCSTRESS_NONE; }
-    bool    IsGCStressMix()                 const { return false; }
-
-    int     GetGCtraceStart()               const { return 0; }
-    int     GetGCtraceEnd()               const { return 0; }//1000000000; }
-    int     GetGCtraceFac()               const { return 0; }
-    int     GetGCprnLvl()               const { return 0; }
-    bool    IsGCBreakOnOOMEnabled()         const { return false; }
-    int     GetGCgen0size()               const { return 0; }
-    int     GetSegmentSize()               const { return 0; }
-    int     GetGCconcurrent()               const { return 1; }
-    int     GetGCLatencyMode()              const { return 1; }
-    int     GetGCForceCompact()             const { return 0; }
-    int     GetGCRetainVM()                const { return 0; }
-    int     GetGCTrimCommit()               const { return 0; }
-    int     GetGCLOHCompactionMode()        const { return 0; }
-
-    bool    GetGCConservative()             const { return true; }
 };
 
-extern EEConfig * g_pConfig;
-
 #include "etmdummy.h"
 #define ETW_EVENT_ENABLED(e,f) false
 
index 1025810..fbb94fd 100644 (file)
@@ -7,6 +7,7 @@ include(configure.cmake)
 set(GC_PAL_SOURCES
     gcenv.unix.cpp
     events.cpp
-    cgroup.cpp)
+    cgroup.cpp
+    cpuinfo.cpp)
 
 add_library(gc_unix STATIC ${GC_PAL_SOURCES} ${VERSION_FILE_PATH})
index 3a56be9..a4a59b6 100644 (file)
@@ -10,6 +10,8 @@
 #cmakedefine01 HAVE_PTHREAD_THREADID_NP
 #cmakedefine01 HAVE_PTHREAD_GETTHREADID_NP
 #cmakedefine01 HAVE_SCHED_GETCPU
+#cmakedefine01 HAVE_NUMA_H
+#cmakedefine01 HAVE_VM_ALLOCATE
 #cmakedefine01 HAVE_PTHREAD_CONDATTR_SETCLOCK
 #cmakedefine01 HAVE_MACH_ABSOLUTE_TIME
 #cmakedefine01 HAVE_SCHED_GETAFFINITY
index b118232..c2d6afe 100644 (file)
@@ -1,5 +1,7 @@
 check_include_files(sys/time.h HAVE_SYS_TIME_H)
 check_include_files(sys/mman.h HAVE_SYS_MMAN_H)
+check_include_files(numa.h HAVE_NUMA_H)
+check_function_exists(vm_allocate HAVE_VM_ALLOCATE)
 check_cxx_source_compiles("
     #include <pthread.h>
     #include <stdint.h>
index a1e1296..23a4935 100644 (file)
@@ -319,8 +319,9 @@ bool GCToOSInterface::VirtualRelease(void* address, size_t size)
 //  size    - size of the virtual memory range
 // Return:
 //  true if it has succeeded, false if it has failed
-bool GCToOSInterface::VirtualCommit(void* address, size_t size)
+bool GCToOSInterface::VirtualCommit(void* address, size_t size, uint32_t node)
 {
+    assert(node == NUMA_NODE_UNDEFINED && "Numa allocation is not ported to local GC on unix yet");
     return mprotect(address, size, PROT_WRITE | PROT_READ) == 0;
 }
 
@@ -697,6 +698,26 @@ uint32_t GCToOSInterface::GetTotalProcessorCount()
     return g_logicalCpuCount;
 }
 
+bool GCToOSInterface::CanEnableGCNumaAware()
+{
+    return false;
+}
+
+bool GCToOSInterface::GetNumaProcessorNode(PPROCESSOR_NUMBER proc_no, uint16_t *node_no)
+{
+    assert(!"Numa has not been ported to local GC for unix");
+    return false;
+}
+
+bool GCToOSInterface::CanEnableGCCPUGroups()
+{
+    return false;
+}
+
+void GCToOSInterface::GetGroupForProcessor(uint16_t processor_number, uint16_t* group_number, uint16_t* group_processor_number)
+{
+    assert(!"CpuGroup has not been ported to local GC for unix");
+}
 
 // Initialize the critical section
 void CLRCriticalSection::Initialize()
index 85bcd85..4be5ec5 100644 (file)
 #include "env/gcenv.structs.h"
 #include "env/gcenv.base.h"
 #include "env/gcenv.os.h"
+#include "env/gcenv.ee.h"
 #include "env/gcenv.windows.inl"
 #include "env/volatile.h"
+#include "gcconfig.h"
 
 GCSystemInfo g_SystemInfo;
 
@@ -30,6 +32,187 @@ typedef BOOL (WINAPI *PQUERY_INFORMATION_JOB_OBJECT)(HANDLE jobHandle, JOBOBJECT
 
 namespace {
 
+static bool g_fEnableGCNumaAware;
+
+struct CPU_Group_Info 
+{
+    WORD    nr_active;  // at most 64
+    WORD    reserved[1];
+    WORD    begin;
+    WORD    end;
+    DWORD_PTR   active_mask;
+    DWORD   groupWeight;
+    DWORD   activeThreadWeight;
+};
+
+static bool g_fEnableGCCPUGroups;
+static bool g_fHadSingleProcessorAtStartup;
+static DWORD  g_nGroups;
+static DWORD g_nProcessors;
+static CPU_Group_Info *g_CPUGroupInfoArray;
+
+void InitNumaNodeInfo()
+{
+    ULONG highest = 0;
+    
+    g_fEnableGCNumaAware = false;
+
+    if (!GCConfig::GetGCNumaAware())
+        return;
+
+    // fail to get the highest numa node number
+    if (!GetNumaHighestNodeNumber(&highest) || (highest == 0))
+        return;
+
+    g_fEnableGCNumaAware = true;
+    return;
+}
+
+#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_))
+// Calculate greatest common divisor
+DWORD GCD(DWORD u, DWORD v)
+{
+    while (v != 0)
+    {
+        DWORD dwTemp = v;
+        v = u % v;
+        u = dwTemp;
+    }
+
+    return u;
+}
+
+// Calculate least common multiple
+DWORD LCM(DWORD u, DWORD v)
+{
+    return u / GCD(u, v) * v;
+}
+#endif
+
+bool InitCPUGroupInfoArray()
+{
+#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_))
+    BYTE *bBuffer = NULL;
+    SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pSLPIEx = NULL;
+    SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *pRecord = NULL;
+    DWORD cbSLPIEx = 0;
+    DWORD byteOffset = 0;
+    DWORD dwNumElements = 0;
+    DWORD dwWeight = 1;
+
+    if (GetLogicalProcessorInformationEx(RelationGroup, pSLPIEx, &cbSLPIEx) &&
+                      GetLastError() != ERROR_INSUFFICIENT_BUFFER)
+        return false;
+
+    assert(cbSLPIEx);
+
+    // Fail to allocate buffer
+    bBuffer = new (std::nothrow) BYTE[ cbSLPIEx ];
+    if (bBuffer == NULL)
+        return false;
+
+    pSLPIEx = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *)bBuffer;
+    if (!GetLogicalProcessorInformationEx(RelationGroup, pSLPIEx, &cbSLPIEx))
+    {
+        delete[] bBuffer;
+        return false;
+    }
+
+    pRecord = pSLPIEx;
+    while (byteOffset < cbSLPIEx)
+    {
+        if (pRecord->Relationship == RelationGroup)
+        {
+            g_nGroups = pRecord->Group.ActiveGroupCount;
+            break;
+        }
+        byteOffset += pRecord->Size;
+        pRecord = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *)(bBuffer + byteOffset);
+    }
+
+    g_CPUGroupInfoArray = new (std::nothrow) CPU_Group_Info[g_nGroups];
+    if (g_CPUGroupInfoArray == NULL) 
+    {
+        delete[] bBuffer;
+        return false;
+    }
+
+    for (DWORD i = 0; i < g_nGroups; i++)
+    {
+        g_CPUGroupInfoArray[i].nr_active   = (WORD)pRecord->Group.GroupInfo[i].ActiveProcessorCount;
+        g_CPUGroupInfoArray[i].active_mask = pRecord->Group.GroupInfo[i].ActiveProcessorMask;
+        g_nProcessors += g_CPUGroupInfoArray[i].nr_active;
+        dwWeight = LCM(dwWeight, (DWORD)g_CPUGroupInfoArray[i].nr_active);
+    }
+
+    // The number of threads per group that can be supported will depend on the number of CPU groups
+    // and the number of LPs within each processor group. For example, when the number of LPs in
+    // CPU groups is the same and is 64, the number of threads per group before weight overflow
+    // would be 2^32/2^6 = 2^26 (64M threads)
+    for (DWORD i = 0; i < g_nGroups; i++)
+    {
+        g_CPUGroupInfoArray[i].groupWeight = dwWeight / (DWORD)g_CPUGroupInfoArray[i].nr_active;
+        g_CPUGroupInfoArray[i].activeThreadWeight = 0;
+    }
+
+    delete[] bBuffer;  // done with it; free it
+    return true;
+#else
+    return false;
+#endif
+}
+
+bool InitCPUGroupInfoRange()
+{
+#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_))
+    WORD begin   = 0;
+    WORD nr_proc = 0;
+
+    for (WORD i = 0; i < g_nGroups; i++) 
+    {
+        nr_proc += g_CPUGroupInfoArray[i].nr_active;
+        g_CPUGroupInfoArray[i].begin = begin;
+        g_CPUGroupInfoArray[i].end   = nr_proc - 1;
+        begin = nr_proc;
+    }
+
+    return true;
+#else
+    return false;
+#endif
+}
+
+void InitCPUGroupInfo()
+{
+    g_fEnableGCCPUGroups = false;
+
+#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_))
+    if (!GCConfig::GetGCCpuGroup())
+        return;
+
+    if (!InitCPUGroupInfoArray())
+        return;
+
+    if (!InitCPUGroupInfoRange())
+        return;
+
+    // only enable CPU groups if more than one group exists
+    g_fEnableGCCPUGroups = g_nGroups > 1;
+#endif // _TARGET_AMD64_ || _TARGET_ARM64_
+
+    // Determine if the process is affinitized to a single processor (or if the system has a single processor)
+    DWORD_PTR processAffinityMask, systemAffinityMask;
+    if (::GetProcessAffinityMask(::GetCurrentProcess(), &processAffinityMask, &systemAffinityMask))
+    {
+        processAffinityMask &= systemAffinityMask;
+        if (processAffinityMask != 0 && // only one CPU group is involved
+            (processAffinityMask & (processAffinityMask - 1)) == 0) // only one bit is set
+        {
+            g_fHadSingleProcessorAtStartup = true;
+        }
+    }
+}
+
 void GetProcessMemoryLoad(LPMEMORYSTATUSEX pMSEX)
 {
     pMSEX->dwLength = sizeof(MEMORYSTATUSEX);
@@ -177,6 +360,9 @@ bool GCToOSInterface::Initialize()
 
     assert(systemInfo.dwPageSize == 0x1000);
 
+    InitNumaNodeInfo();
+    InitCPUGroupInfo();
+
     return true;
 }
 
@@ -320,9 +506,17 @@ bool GCToOSInterface::VirtualRelease(void* address, size_t size)
 //  size    - size of the virtual memory range
 // Return:
 //  true if it has succeeded, false if it has failed
-bool GCToOSInterface::VirtualCommit(void* address, size_t size)
+bool GCToOSInterface::VirtualCommit(void* address, size_t size, uint32_t node)
 {
-    return ::VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE) != nullptr;
+    if (node == NUMA_NODE_UNDEFINED)
+    {
+        return ::VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE) != nullptr;
+    }
+    else
+    {
+        assert(g_fEnableGCNumaAware);
+        return ::VirtualAllocExNuma(::GetCurrentProcess(), address, size, MEM_COMMIT, PAGE_READWRITE, node) != nullptr;
+    }
 }
 
 // Decomit virtual memory range.
@@ -623,6 +817,63 @@ uint32_t GCToOSInterface::GetLowPrecisionTimeStamp()
     return ::GetTickCount();
 }
 
+// Gets the total number of processors on the machine, not taking
+// into account current process affinity.
+// Return:
+//  Number of processors on the machine
+uint32_t GCToOSInterface::GetTotalProcessorCount()
+{
+    if (CanEnableGCCPUGroups())
+    {
+        return g_nProcessors;
+    }
+    else
+    {
+        return g_SystemInfo.dwNumberOfProcessors;
+    }
+}
+bool GCToOSInterface::CanEnableGCNumaAware()
+{
+    return g_fEnableGCNumaAware;
+}
+
+bool GCToOSInterface::GetNumaProcessorNode(PPROCESSOR_NUMBER proc_no, uint16_t *node_no)
+{
+    assert(g_fEnableGCNumaAware);
+    return ::GetNumaProcessorNodeEx(proc_no, node_no) != FALSE;
+}
+
+bool GCToOSInterface::CanEnableGCCPUGroups()
+{
+    return g_fEnableGCCPUGroups;
+}
+
+void GCToOSInterface::GetGroupForProcessor(uint16_t processor_number, uint16_t* group_number, uint16_t* group_processor_number)
+{
+    assert(g_fEnableGCCPUGroups);
+
+#if !defined(FEATURE_REDHAWK) && (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_))
+    WORD bTemp = 0;
+    WORD bDiff = processor_number - bTemp;
+
+    for (WORD i=0; i < g_nGroups; i++)
+    {
+        bTemp += g_CPUGroupInfoArray[i].nr_active;
+        if (bTemp > processor_number)
+        {
+            *group_number = i;
+            *group_processor_number = bDiff;
+            break;
+        }
+        bDiff = processor_number - bTemp;
+    }
+#else
+    *group_number = 0;
+    *group_processor_number = 0;
+#endif
+}
+
 // Parameters of the GC thread stub
 struct GCThreadStubParam
 {
@@ -644,15 +895,6 @@ static DWORD GCThreadStub(void* param)
     return 0;
 }
 
-// Gets the total number of processors on the machine, not taking
-// into account current process affinity.
-// Return:
-//  Number of processors on the machine
-uint32_t GCToOSInterface::GetTotalProcessorCount()
-{
-    return g_SystemInfo.dwNumberOfProcessors;
-}
-
 // Initialize the critical section
 void CLRCriticalSection::Initialize()
 {
@@ -817,4 +1059,3 @@ bool GCEvent::CreateOSManualEventNoThrow(bool initialState)
     m_impl = event.release();
     return true;
 }
-
index d1dfac3..6a48e0c 100644 (file)
@@ -748,7 +748,7 @@ BYTE * ClrVirtualAllocWithinRange(const BYTE *pMinAddr,
 /*static*/ NumaNodeInfo::PVAExN NumaNodeInfo::m_pVirtualAllocExNuma = NULL;
 
 /*static*/ LPVOID NumaNodeInfo::VirtualAllocExNuma(HANDLE hProc, LPVOID lpAddr, SIZE_T dwSize,
-                                    DWORD allocType, DWORD prot, DWORD node)
+                         DWORD allocType, DWORD prot, DWORD node)
 {
     return (*m_pVirtualAllocExNuma)(hProc, lpAddr, dwSize, allocType, prot, node);
 }
@@ -766,7 +766,7 @@ BYTE * ClrVirtualAllocWithinRange(const BYTE *pMinAddr,
 #if !defined(FEATURE_REDHAWK)
     //check for numa support if multiple heaps are used
     ULONG highest = 0;
-       
+    
     if (CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_GCNumaAware) == 0)
         return FALSE;
 
index 581d7fc..cdc3a68 100644 (file)
@@ -994,7 +994,7 @@ void GCToEEInterface::HandleFatalError(unsigned int exitCode)
     EEPOLICY_HANDLE_FATAL_ERROR(exitCode);
 }
 
-bool GCToEEInterface::ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj)
+bool GCToEEInterface::ShouldFinalizeObjectForUnload(void* pDomain, Object* obj)
 {
     // CoreCLR does not have appdomains, so this code path is dead. Other runtimes may
     // choose to inspect the object being finalized here.
@@ -1429,3 +1429,51 @@ IGCToCLREventSink* GCToEEInterface::EventSink()
 
     return &g_gcToClrEventSink;
 }
+
+uint32_t GCToEEInterface::GetDefaultDomainIndex()
+{
+    LIMITED_METHOD_CONTRACT;
+
+    return SystemDomain::System()->DefaultDomain()->GetIndex().m_dwIndex;
+}
+
+void *GCToEEInterface::GetAppDomainAtIndex(uint32_t appDomainIndex)
+{
+    LIMITED_METHOD_CONTRACT;
+
+    ADIndex index(appDomainIndex);
+    return static_cast<void *>(SystemDomain::GetAppDomainAtIndex(index));
+}
+
+bool GCToEEInterface::AppDomainCanAccessHandleTable(uint32_t appDomainID)
+{
+    LIMITED_METHOD_CONTRACT;
+
+    ADIndex index(appDomainID);
+    AppDomain *pDomain = SystemDomain::GetAppDomainAtIndex(index);
+    return (pDomain != NULL) && !pDomain->NoAccessToHandleTable();
+}
+
+uint32_t GCToEEInterface::GetIndexOfAppDomainBeingUnloaded()
+{
+    LIMITED_METHOD_CONTRACT;
+
+    return SystemDomain::IndexOfAppDomainBeingUnloaded().m_dwIndex;
+}
+
+uint32_t GCToEEInterface::GetTotalNumSizedRefHandles()
+{
+    LIMITED_METHOD_CONTRACT;
+
+    return SystemDomain::System()->GetTotalNumSizedRefHandles();
+}
+
+
+bool GCToEEInterface::AppDomainIsRudeUnload(void *appDomain)
+{
+    LIMITED_METHOD_CONTRACT;
+
+    AppDomain *realPtr = static_cast<AppDomain *>(appDomain);
+    return realPtr->IsRudeUnload() != FALSE;
+}
+
index 0dbf70d..ca32cec 100644 (file)
@@ -49,7 +49,7 @@ public:
 
     void EnableFinalization(bool foundFinalizers);
     void HandleFatalError(unsigned int exitCode);
-    bool ShouldFinalizeObjectForUnload(AppDomain* pDomain, Object* obj);
+    bool ShouldFinalizeObjectForUnload(void* pDomain, Object* obj);
     bool ForceFullGCToBeBlocking();
     bool EagerFinalized(Object* obj);
     MethodTable* GetFreeObjectMethodTable();
@@ -63,6 +63,13 @@ public:
     void WalkAsyncPinnedForPromotion(Object* object, ScanContext* sc, promote_func* callback);
     void WalkAsyncPinned(Object* object, void* context, void(*callback)(Object*, Object*, void*));
     IGCToCLREventSink* EventSink();
+
+    uint32_t GetDefaultDomainIndex();
+    void *GetAppDomainAtIndex(uint32_t appDomainIndex);
+    bool AppDomainCanAccessHandleTable(uint32_t appDomainID);
+    uint32_t GetIndexOfAppDomainBeingUnloaded();
+    uint32_t GetTotalNumSizedRefHandles();
+    bool AppDomainIsRudeUnload(void *appDomain);
 };
 
 } // namespace standalone
index 99e9ff6..114cbba 100644 (file)
@@ -211,11 +211,18 @@ bool GCToOSInterface::VirtualRelease(void* address, size_t size)
 //  size    - size of the virtual memory range
 // Return:
 //  true if it has succeeded, false if it has failed
-bool GCToOSInterface::VirtualCommit(void* address, size_t size)
+bool GCToOSInterface::VirtualCommit(void* address, size_t size, uint32_t node)
 {
     LIMITED_METHOD_CONTRACT;
 
-    return ::ClrVirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE) != NULL;
+    if (node == NUMA_NODE_UNDEFINED)
+    {
+        return ::ClrVirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE) != NULL;
+    }
+    else
+    {
+        return NumaNodeInfo::VirtualAllocExNuma(::GetCurrentProcess(), address, size, MEM_COMMIT, PAGE_READWRITE, node) != NULL;
+    }
 }
 
 // Decomit virtual memory range.
@@ -708,7 +715,42 @@ uint32_t GCToOSInterface::GetTotalProcessorCount()
 {
     LIMITED_METHOD_CONTRACT;
 
-    return g_SystemInfo.dwNumberOfProcessors;
+    if (CPUGroupInfo::CanEnableGCCPUGroups())
+    {
+        return CPUGroupInfo::GetNumActiveProcessors();
+    }
+    else
+    {
+        return g_SystemInfo.dwNumberOfProcessors;
+    }
+}
+
+bool GCToOSInterface::CanEnableGCNumaAware()
+{
+    LIMITED_METHOD_CONTRACT;
+
+    return NumaNodeInfo::CanEnableGCNumaAware() != FALSE;
+}
+
+bool GCToOSInterface::GetNumaProcessorNode(PPROCESSOR_NUMBER proc_no, uint16_t *node_no)
+{
+    LIMITED_METHOD_CONTRACT;
+
+    return NumaNodeInfo::GetNumaProcessorNodeEx(proc_no, node_no) != FALSE;
+}
+
+bool GCToOSInterface::CanEnableGCCPUGroups()
+{
+    LIMITED_METHOD_CONTRACT;
+
+    return CPUGroupInfo::CanEnableGCCPUGroups() != FALSE;
+}
+
+void GCToOSInterface::GetGroupForProcessor(uint16_t processor_number, uint16_t* group_number, uint16_t* group_processor_number)
+{
+    LIMITED_METHOD_CONTRACT;
+
+    return CPUGroupInfo::GetGroupForProcessor(processor_number, group_number, group_processor_number);
 }
 
 // Initialize the critical section