set (GC_LINK_LIBRARIES
${STATIC_MT_CRT_LIB}
${STATIC_MT_VCRT_LIB}
- kernel32.lib)
+ kernel32.lib
+ advapi32.lib)
else()
set (GC_LINK_LIBRARIES)
endif(WIN32)
// true if it has succeeded, false if it has failed
static bool VirtualCommit(void *address, size_t size, uint16_t node = NUMA_NODE_UNDEFINED);
+ // Reserve and Commit virtual memory range for Large Pages
+ // Parameters:
+ // size - size of the virtual memory range
+ // Return:
+ // Address of the allocated memory
+ static void* VirtualReserveAndCommitLargePages(size_t size);
+
// Decomit virtual memory range.
// Parameters:
// address - starting virtual address
#endif //USE_INTROSORT
void* virtual_alloc (size_t size);
+void* virtual_alloc (size_t size, bool use_large_pages_p);
void virtual_free (void* add, size_t size);
/* per heap static initialization */
size_t gc_heap::eph_gen_starts_size = 0;
heap_segment* gc_heap::segment_standby_list;
+size_t gc_heap::use_large_pages_p = 0;
size_t gc_heap::last_gc_index = 0;
#ifdef SEG_MAPPING_TABLE
size_t gc_heap::min_segment_size = 0;
initial_memory_details memory_details;
-BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_heaps)
+BOOL reserve_initial_memory (size_t normal_size, size_t large_size, size_t num_heaps, bool use_large_pages_p)
{
BOOL reserve_success = FALSE;
size_t requestedMemory = memory_details.block_count * (normal_size + large_size);
- uint8_t* allatonce_block = (uint8_t*)virtual_alloc (requestedMemory);
+ uint8_t* allatonce_block = (uint8_t*)virtual_alloc (requestedMemory, use_large_pages_p);
if (allatonce_block)
{
g_gc_lowest_address = allatonce_block;
// try to allocate 2 blocks
uint8_t* b1 = 0;
uint8_t* b2 = 0;
- b1 = (uint8_t*)virtual_alloc (memory_details.block_count * normal_size);
+ b1 = (uint8_t*)virtual_alloc (memory_details.block_count * normal_size, use_large_pages_p);
if (b1)
{
- b2 = (uint8_t*)virtual_alloc (memory_details.block_count * large_size);
+ b2 = (uint8_t*)virtual_alloc (memory_details.block_count * large_size, use_large_pages_p);
if (b2)
{
memory_details.allocation_pattern = initial_memory_details::TWO_STAGE;
memory_details.block_size_normal :
memory_details.block_size_large);
current_block->memory_base =
- (uint8_t*)virtual_alloc (block_size);
+ (uint8_t*)virtual_alloc (block_size, use_large_pages_p);
if (current_block->memory_base == 0)
{
// Free the blocks that we've allocated so far
void* virtual_alloc (size_t size)
{
+ return virtual_alloc(size, false);
+}
+
+void* virtual_alloc (size_t size, bool use_large_pages_p)
+{
size_t requested_size = size;
if ((gc_heap::reserved_memory_limit - gc_heap::reserved_memory) < requested_size)
flags = VirtualReserveFlags::WriteWatch;
}
#endif // !FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
- void* prgmem = GCToOSInterface::VirtualReserve (requested_size, card_size * card_word_width, flags);
+
+ void* prgmem = use_large_pages_p ? GCToOSInterface::VirtualReserveAndCommitLargePages(requested_size) : GCToOSInterface::VirtualReserve(requested_size, card_size * card_word_width, flags);
void *aligned_mem = prgmem;
// We don't want (prgmem + size) to be right at the end of the address space
}
// If it's a valid heap number it means it's commiting for memory on the GC heap.
- bool commit_succeeded_p = ((h_number >= 0) ?
- virtual_alloc_commit_for_heap (address, size, h_number) :
- GCToOSInterface::VirtualCommit(address, size));
+ // In addition if large pages is enabled, we set commit_succeeded_p to true because memory is already committed.
+ bool commit_succeeded_p = ((h_number >= 0) ? (use_large_pages_p ? true :
+ virtual_alloc_commit_for_heap (address, size, h_number)) :
+ GCToOSInterface::VirtualCommit(address, size));
if (!commit_succeeded_p && heap_hard_limit)
{
heap_segment_mem (new_segment) = start;
heap_segment_used (new_segment) = start;
heap_segment_reserved (new_segment) = new_pages + size;
- heap_segment_committed (new_segment) = new_pages + initial_commit;
+ heap_segment_committed (new_segment) = (use_large_pages_p ? heap_segment_reserved(new_segment) : (new_pages + initial_commit));
init_heap_segment (new_segment);
dprintf (2, ("Creating heap segment %Ix", (size_t)new_segment));
return new_segment;
void gc_heap::decommit_heap_segment_pages (heap_segment* seg,
size_t extra_space)
{
+ if (use_large_pages_p)
+ return;
uint8_t* page_start = align_on_page (heap_segment_allocated(seg));
size_t size = heap_segment_committed (seg) - page_start;
extra_space = align_on_page (extra_space);
block_count = 1;
#endif //MULTIPLE_HEAPS
+ use_large_pages_p = false;
+
if (heap_hard_limit)
{
check_commit_cs.Initialize();
+ use_large_pages_p = GCConfig::GetGCLargePages();
}
- if (!reserve_initial_memory(segment_size,heap_size,block_count))
+ if (!reserve_initial_memory(segment_size,heap_size,block_count,use_large_pages_p))
return E_OUTOFMEMORY;
#ifdef CARD_BUNDLE
"Specifies the name of the GC config log file") \
BOOL_CONFIG(GCNumaAware, "GCNumaAware", true, "Enables numa allocations in the GC") \
BOOL_CONFIG(GCCpuGroup, "GCCpuGroup", false, "Enables CPU groups in the GC") \
+ BOOL_CONFIG(GCLargePages, "GCLargePages", false, "Enables using Large Pages in the GC") \
INT_CONFIG(HeapVerifyLevel, "HeapVerify", HEAPVERIFY_NONE, \
"When set verifies the integrity of the managed heap on entry and exit of each GC") \
INT_CONFIG(LOHCompactionMode, "GCLOHCompact", 0, "Specifies the LOH compaction mode") \
PER_HEAP_ISOLATED
size_t current_total_committed_gc_own;
+ // This is if large pages should be used.
+ PER_HEAP_ISOLATED
+ size_t use_large_pages_p;
+
PER_HEAP_ISOLATED
size_t last_gc_index;
)
if(WIN32)
+ set (GC_LINK_LIBRARIES
+ ${STATIC_MT_CRT_LIB}
+ ${STATIC_MT_VCRT_LIB}
+ kernel32.lib
+ advapi32.lib)
+endif(WIN32)
+
+if(WIN32)
list(APPEND SOURCES
../windows/gcenv.windows.cpp)
add_definitions(-DUNICODE=1)
_add_executable(gcsample
${SOURCES}
)
+
+if(WIN32)
+ target_link_libraries(gcsample ${GC_LINK_LIBRARIES})
+endif()
\ No newline at end of file
#cmakedefine01 HAVE_SYS_MMAN_H
#cmakedefine01 HAVE_PTHREAD_THREADID_NP
#cmakedefine01 HAVE_PTHREAD_GETTHREADID_NP
+#cmakedefine01 HAVE_MAP_HUGETLB
#cmakedefine01 HAVE_SCHED_GETCPU
#cmakedefine01 HAVE_NUMA_H
#cmakedefine01 HAVE_VM_ALLOCATE
}
" HAVE_PTHREAD_GETTHREADID_NP)
+check_cxx_source_compiles("
+ #include <sys/mman.h>
+
+ int main()
+ {
+ return MAP_HUGETLB;
+ }
+ " HAVE_MAP_HUGETLB)
+
check_cxx_source_runs("
#include <sched.h>
// flags - flags to control special settings like write watching
// Return:
// Starting virtual address of the reserved range
-void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t flags)
+static void* VirtualReserveInner(size_t size, size_t alignment, uint32_t flags, uint32_t hugePagesFlag = 0)
{
assert(!(flags & VirtualReserveFlags::WriteWatch) && "WriteWatch not supported on Unix");
if (alignment == 0)
}
size_t alignedSize = size + (alignment - OS_PAGE_SIZE);
- void * pRetVal = mmap(nullptr, alignedSize, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0);
+ void * pRetVal = mmap(nullptr, alignedSize, PROT_NONE, MAP_ANON | MAP_PRIVATE | hugePagesFlag, -1, 0);
if (pRetVal != NULL)
{
return pRetVal;
}
+// Reserve virtual memory range.
+// Parameters:
+// size - size of the virtual memory range
+// alignment - requested memory alignment, 0 means no specific alignment requested
+// flags - flags to control special settings like write watching
+// Return:
+// Starting virtual address of the reserved range
+void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t flags)
+{
+ return VirtualReserveInner(size, alignment, flags);
+}
+
// Release virtual memory range previously reserved using VirtualReserve
// Parameters:
// address - starting virtual address
return (ret == 0);
}
+// Commit virtual memory range.
+// Parameters:
+// size - size of the virtual memory range
+// Return:
+// Starting virtual address of the committed range
+void* GCToOSInterface::VirtualReserveAndCommitLargePages(size_t size)
+{
+#if HAVE_MAP_HUGETLB
+ uint32_t largePagesFlag = MAP_HUGETLB;
+#else
+ uint32_t largePagesFlag = 0;
+#endif
+
+ void* pRetVal = VirtualReserveInner(size, OS_PAGE_SIZE, 0, largePagesFlag);
+ if (VirtualCommit(pRetVal, size, NUMA_NODE_UNDEFINED))
+ {
+ return pRetVal;
+ }
+
+ return nullptr;
+}
+
// Commit virtual memory range. It must be part of a range reserved using VirtualReserve.
// Parameters:
// address - starting virtual address
// memory on the machine/in the container, we need to restrict by the VM.
static bool g_UseRestrictedVirtualMemory = false;
+static bool g_SeLockMemoryPrivilegeAcquired = false;
+
static AffinitySet g_processAffinitySet;
typedef BOOL (WINAPI *PIS_PROCESS_IN_JOB)(HANDLE processHandle, HANDLE jobHandle, BOOL* result);
}
#endif
+bool InitLargePagesPrivilege()
+{
+ TOKEN_PRIVILEGES tp;
+ LUID luid;
+ if (!LookupPrivilegeValueW(nullptr, SE_LOCK_MEMORY_NAME, &luid))
+ {
+ return false;
+ }
+
+ tp.PrivilegeCount = 1;
+ tp.Privileges[0].Luid = luid;
+ tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
+
+ HANDLE token;
+ if (!OpenProcessToken(::GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, &token))
+ {
+ return false;
+ }
+
+ BOOL retVal = AdjustTokenPrivileges(token, FALSE, &tp, 0, nullptr, 0);
+ DWORD gls = GetLastError();
+ CloseHandle(token);
+
+ if (!retVal)
+ {
+ return false;
+ }
+
+ if (gls != 0)
+ {
+ return false;
+ }
+
+ return true;
+}
+
bool InitCPUGroupInfoArray()
{
#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_))
return !!::VirtualFree(address, 0, MEM_RELEASE);
}
+// Commit virtual memory range.
+// Parameters:
+// size - size of the virtual memory range
+// Return:
+// Starting virtual address of the committed range
+void* GCToOSInterface::VirtualReserveAndCommitLargePages(size_t size)
+{
+ void* pRetVal = nullptr;
+
+ if (!g_SeLockMemoryPrivilegeAcquired)
+ {
+ if (!InitLargePagesPrivilege())
+ {
+ return nullptr;
+ }
+
+ g_SeLockMemoryPrivilegeAcquired = true;
+ }
+
+ SIZE_T largePageMinimum = GetLargePageMinimum();
+ size = (size + (largePageMinimum - 1)) & ~(largePageMinimum - 1);
+
+ return ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES, PAGE_READWRITE);
+}
+
// Commit virtual memory range. It must be part of a range reserved using VirtualReserve.
// Parameters:
// address - starting virtual address
RETAIL_CONFIG_DWORD_INFO_DIRECT_ACCESS(EXTERNAL_GCHeapHardLimit, W("GCHeapHardLimit"), "Specifies the maximum commit size for the GC heap")
RETAIL_CONFIG_DWORD_INFO_DIRECT_ACCESS(EXTERNAL_GCHeapHardLimitPercent, W("GCHeapHardLimitPercent"), "Specifies the GC heap usage as a percentage of the total memory")
RETAIL_CONFIG_STRING_INFO(EXTERNAL_GCHeapAffinitizeRanges, W("GCHeapAffinitizeRanges"), "Specifies list of processors for Server GC threads. The format is a comma separated list of processor numbers or ranges of processor numbers. Example: 1,3,5,7-9,12")
+RETAIL_CONFIG_DWORD_INFO_DIRECT_ACCESS(EXTERNAL_GCLargePages, W("GCLargePages"), "Specifies whether large pages should be used when a heap hard limit is set")
///
/// IBC
#define MEM_MAPPED 0x40000
#define MEM_TOP_DOWN 0x100000
#define MEM_WRITE_WATCH 0x200000
+#define MEM_LARGE_PAGES 0x20000000
#define MEM_RESERVE_EXECUTABLE 0x40000000 // reserve memory using executable memory allocator
PALIMPORT
uint16_t GetCombinedValue() { return m_groupProc; }
};
+#if !defined(FEATURE_PAL)
+
+static bool g_SeLockMemoryPrivilegeAcquired = false;
+
+bool InitLargePagesPrivilege()
+{
+ TOKEN_PRIVILEGES tp;
+ LUID luid;
+ if (!LookupPrivilegeValueW(nullptr, SE_LOCK_MEMORY_NAME, &luid))
+ {
+ return false;
+ }
+
+ tp.PrivilegeCount = 1;
+ tp.Privileges[0].Luid = luid;
+ tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
+
+ HANDLE token;
+ if (!OpenProcessToken(::GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, &token))
+ {
+ return false;
+ }
+
+ BOOL retVal = AdjustTokenPrivileges(token, FALSE, &tp, 0, nullptr, 0);
+ DWORD gls = GetLastError();
+ CloseHandle(token);
+
+ if (!retVal)
+ {
+ return false;
+ }
+
+ if (gls != 0)
+ {
+ return false;
+ }
+
+ return true;
+}
+
+#endif // FEATURE_PAL
+
// Initialize the interface implementation
// Return:
// true if it has succeeded, false if it has failed
return !!::ClrVirtualFree(address, 0, MEM_RELEASE);
}
+// Commit virtual memory range.
+// Parameters:
+// size - size of the virtual memory range
+// Return:
+// Starting virtual address of the committed range
+void* GCToOSInterface::VirtualReserveAndCommitLargePages(size_t size)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#if !defined(FEATURE_PAL)
+ if (!g_SeLockMemoryPrivilegeAcquired)
+ {
+ if (!InitLargePagesPrivilege())
+ {
+ return nullptr;
+ }
+
+ g_SeLockMemoryPrivilegeAcquired = true;
+ }
+
+ SIZE_T largePageMinimum = GetLargePageMinimum();
+ size = (size + (largePageMinimum - 1)) & ~(largePageMinimum - 1);
+#endif
+
+ return ::ClrVirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES, PAGE_READWRITE);
+}
+
// Commit virtual memory range. It must be part of a range reserved using VirtualReserve.
// Parameters:
// address - starting virtual address