1 // Licensed to the .NET Foundation under one or more agreements.
2 // The .NET Foundation licenses this file to you under the MIT license.
3 // See the LICENSE file in the project root for more information.
15 #include "gcenv.structs.h"
16 #include "gcenv.base.h"
18 #include "gcenv.unix.inl"
24 #error "sys/time.h required by GC PAL for the time being"
25 #endif // HAVE_SYS_TIME_
30 #error "sys/mman.h required by GC PAL"
31 #endif // HAVE_SYS_MMAN_H
34 #include <sys/syscall.h> // __NR_membarrier
35 // Ensure __NR_membarrier is defined for portable builds.
36 # if !defined(__NR_membarrier)
37 # if defined(__amd64__)
38 # define __NR_membarrier 324
39 # elif defined(__i386__)
40 # define __NR_membarrier 375
41 # elif defined(__arm__)
42 # define __NR_membarrier 389
43 # elif defined(__aarch64__)
44 # define __NR_membarrier 283
46 # error Unknown architecture
51 #include <time.h> // nanosleep
52 #include <sched.h> // sched_yield
54 #include <unistd.h> // sysconf
64 // List of all functions from the numa library that are used
65 #define FOR_ALL_NUMA_FUNCTIONS \
66 PER_FUNCTION_BLOCK(mbind) \
67 PER_FUNCTION_BLOCK(numa_available) \
68 PER_FUNCTION_BLOCK(numa_max_node) \
69 PER_FUNCTION_BLOCK(numa_node_of_cpu)
71 // Declare pointers to all the used numa functions
72 #define PER_FUNCTION_BLOCK(fn) extern decltype(fn)* fn##_ptr;
73 FOR_ALL_NUMA_FUNCTIONS
74 #undef PER_FUNCTION_BLOCK
76 // Redefine all calls to numa functions as calls through pointers that are set
77 // to the functions of libnuma in the initialization.
78 #define mbind(...) mbind_ptr(__VA_ARGS__)
79 #define numa_available() numa_available_ptr()
80 #define numa_max_node() numa_max_node_ptr()
81 #define numa_node_of_cpu(...) numa_node_of_cpu_ptr(__VA_ARGS__)
85 #if defined(_ARM_) || defined(_ARM64_)
86 #define SYSCONF_GET_NUMPROCS _SC_NPROCESSORS_CONF
88 #define SYSCONF_GET_NUMPROCS _SC_NPROCESSORS_ONLN
91 // The cached number of logical CPUs observed.
92 static uint32_t g_logicalCpuCount = 0;
94 // The cached number of CPUs available for the current process.
95 static uint32_t g_currentProcessCpuCount = 0;
98 // Helper membarrier function
100 #ifdef __NR_membarrier
101 # define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
103 # define membarrier(...) -ENOSYS
108 MEMBARRIER_CMD_QUERY = 0,
109 MEMBARRIER_CMD_GLOBAL = (1 << 0),
110 MEMBARRIER_CMD_GLOBAL_EXPEDITED = (1 << 1),
111 MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED = (1 << 2),
112 MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
113 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
114 MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE = (1 << 5),
115 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE = (1 << 6)
119 // Tracks if the OS supports FlushProcessWriteBuffers using membarrier
121 static int s_flushUsingMemBarrier = 0;
123 // Helper memory page used by the FlushProcessWriteBuffers
124 static uint8_t* g_helperPage = 0;
126 // Mutex to make the FlushProcessWriteBuffersMutex thread safe
127 static pthread_mutex_t g_flushProcessWriteBuffersMutex;
129 size_t GetRestrictedPhysicalMemoryLimit();
130 bool GetPhysicalMemoryUsed(size_t* val);
131 bool GetCpuLimit(uint32_t* val);
133 static size_t g_RestrictedPhysicalMemoryLimit = 0;
135 uint32_t g_pageSizeUnixInl = 0;
137 AffinitySet g_processAffinitySet;
140 typedef cpuset_t cpu_set_t;
143 // The highest NUMA node available
144 int g_highestNumaNode = 0;
146 bool g_numaAvailable = false;
148 void* g_numaHandle = nullptr;
151 #define PER_FUNCTION_BLOCK(fn) decltype(fn)* fn##_ptr;
152 FOR_ALL_NUMA_FUNCTIONS
153 #undef PER_FUNCTION_BLOCK
154 #endif // HAVE_NUMA_H
157 // Initialize data structures for getting and setting thread affinities to processors and
158 // querying NUMA related processor information.
159 // On systems with no NUMA support, it behaves as if there was a single NUMA node with
160 // a single group of processors.
161 void NUMASupportInitialize()
164 g_numaHandle = dlopen("libnuma.so", RTLD_LAZY);
165 if (g_numaHandle == 0)
167 g_numaHandle = dlopen("libnuma.so.1", RTLD_LAZY);
169 if (g_numaHandle != 0)
171 dlsym(g_numaHandle, "numa_allocate_cpumask");
172 #define PER_FUNCTION_BLOCK(fn) \
173 fn##_ptr = (decltype(fn)*)dlsym(g_numaHandle, #fn); \
174 if (fn##_ptr == NULL) { fprintf(stderr, "Cannot get symbol " #fn " from libnuma\n"); abort(); }
175 FOR_ALL_NUMA_FUNCTIONS
176 #undef PER_FUNCTION_BLOCK
178 if (numa_available() == -1)
180 dlclose(g_numaHandle);
184 g_numaAvailable = true;
185 g_highestNumaNode = numa_max_node();
188 #endif // HAVE_NUMA_H
189 if (!g_numaAvailable)
192 g_highestNumaNode = 0;
196 // Cleanup of the NUMA support data structures
197 void NUMASupportCleanup()
202 dlclose(g_numaHandle);
204 #endif // HAVE_NUMA_H
207 // Initialize the interface implementation
209 // true if it has succeeded, false if it has failed
210 bool GCToOSInterface::Initialize()
212 int pageSize = sysconf( _SC_PAGE_SIZE );
214 g_pageSizeUnixInl = uint32_t((pageSize > 0) ? pageSize : 0x1000);
216 // Calculate and cache the number of processors on this machine
217 int cpuCount = sysconf(SYSCONF_GET_NUMPROCS);
223 g_logicalCpuCount = cpuCount;
226 // support for FlusProcessWriteBuffers
229 assert(s_flushUsingMemBarrier == 0);
231 // Starting with Linux kernel 4.14, process memory barriers can be generated
232 // using MEMBARRIER_CMD_PRIVATE_EXPEDITED.
233 int mask = membarrier(MEMBARRIER_CMD_QUERY, 0);
235 mask & MEMBARRIER_CMD_PRIVATE_EXPEDITED &&
236 // Register intent to use the private expedited command.
237 membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0) == 0)
239 s_flushUsingMemBarrier = TRUE;
243 assert(g_helperPage == 0);
245 g_helperPage = static_cast<uint8_t*>(mmap(0, OS_PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0));
247 if (g_helperPage == MAP_FAILED)
252 // Verify that the s_helperPage is really aligned to the g_SystemInfo.dwPageSize
253 assert((((size_t)g_helperPage) & (OS_PAGE_SIZE - 1)) == 0);
255 // Locking the page ensures that it stays in memory during the two mprotect
256 // calls in the FlushProcessWriteBuffers below. If the page was unmapped between
257 // those calls, they would not have the expected effect of generating IPI.
258 int status = mlock(g_helperPage, OS_PAGE_SIZE);
265 status = pthread_mutex_init(&g_flushProcessWriteBuffersMutex, NULL);
268 munlock(g_helperPage, OS_PAGE_SIZE);
273 #if HAVE_MACH_ABSOLUTE_TIME
274 kern_return_t machRet;
275 if ((machRet = mach_timebase_info(&g_TimebaseInfo)) != KERN_SUCCESS)
279 #endif // HAVE_MACH_ABSOLUTE_TIME
283 #if HAVE_SCHED_GETAFFINITY
285 g_currentProcessCpuCount = 0;
288 int st = sched_getaffinity(0, sizeof(cpu_set_t), &cpuSet);
292 for (size_t i = 0; i < g_logicalCpuCount; i++)
294 if (CPU_ISSET(i, &cpuSet))
296 g_currentProcessCpuCount++;
297 g_processAffinitySet.Add(i);
303 // We should not get any of the errors that the sched_getaffinity can return since none
304 // of them applies for the current thread, so this is an unexpected kind of failure.
308 #else // HAVE_SCHED_GETAFFINITY
310 g_currentProcessCpuCount = g_logicalCpuCount;
312 for (size_t i = 0; i < g_logicalCpuCount; i++)
314 g_processAffinitySet.Add(i);
317 #endif // HAVE_SCHED_GETAFFINITY
319 NUMASupportInitialize();
324 // Shutdown the interface implementation
325 void GCToOSInterface::Shutdown()
327 int ret = munlock(g_helperPage, OS_PAGE_SIZE);
329 ret = pthread_mutex_destroy(&g_flushProcessWriteBuffersMutex);
332 munmap(g_helperPage, OS_PAGE_SIZE);
335 NUMASupportCleanup();
338 // Get numeric id of the current thread if possible on the
339 // current platform. It is indended for logging purposes only.
341 // Numeric id of the current thread, as best we can retrieve it.
342 uint64_t GCToOSInterface::GetCurrentThreadIdForLogging()
344 #if defined(__linux__)
345 return (uint64_t)syscall(SYS_gettid);
346 #elif HAVE_PTHREAD_GETTHREADID_NP
347 return (uint64_t)pthread_getthreadid_np();
348 #elif HAVE_PTHREAD_THREADID_NP
349 unsigned long long tid;
350 pthread_threadid_np(pthread_self(), &tid);
351 return (uint64_t)tid;
353 // Fallback in case we don't know how to get integer thread id on the current platform
354 return (uint64_t)pthread_self();
358 // Get the process ID of the process.
359 uint32_t GCToOSInterface::GetCurrentProcessId()
364 // Set ideal processor for the current thread
366 // srcProcNo - processor number the thread currently runs on
367 // dstProcNo - processor number the thread should be migrated to
369 // true if it has succeeded, false if it has failed
370 bool GCToOSInterface::SetCurrentThreadIdealAffinity(uint16_t srcProcNo, uint16_t dstProcNo)
372 return GCToOSInterface::SetThreadAffinity(dstProcNo);
375 // Get the number of the current processor
376 uint32_t GCToOSInterface::GetCurrentProcessorNumber()
378 #if HAVE_SCHED_GETCPU
379 int processorNumber = sched_getcpu();
380 assert(processorNumber != -1);
381 return processorNumber;
387 // Check if the OS supports getting current processor number
388 bool GCToOSInterface::CanGetCurrentProcessorNumber()
390 return HAVE_SCHED_GETCPU;
393 // Flush write buffers of processors that are executing threads of the current process
394 void GCToOSInterface::FlushProcessWriteBuffers()
396 if (s_flushUsingMemBarrier)
398 int status = membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0);
399 assert(status == 0 && "Failed to flush using membarrier");
403 int status = pthread_mutex_lock(&g_flushProcessWriteBuffersMutex);
404 assert(status == 0 && "Failed to lock the flushProcessWriteBuffersMutex lock");
406 // Changing a helper memory page protection from read / write to no access
407 // causes the OS to issue IPI to flush TLBs on all processors. This also
408 // results in flushing the processor buffers.
409 status = mprotect(g_helperPage, OS_PAGE_SIZE, PROT_READ | PROT_WRITE);
410 assert(status == 0 && "Failed to change helper page protection to read / write");
412 // Ensure that the page is dirty before we change the protection so that
413 // we prevent the OS from skipping the global TLB flush.
414 __sync_add_and_fetch((size_t*)g_helperPage, 1);
416 status = mprotect(g_helperPage, OS_PAGE_SIZE, PROT_NONE);
417 assert(status == 0 && "Failed to change helper page protection to no access");
419 status = pthread_mutex_unlock(&g_flushProcessWriteBuffersMutex);
420 assert(status == 0 && "Failed to unlock the flushProcessWriteBuffersMutex lock");
424 // Break into a debugger. Uses a compiler intrinsic if one is available,
425 // otherwise raises a SIGTRAP.
426 void GCToOSInterface::DebugBreak()
428 // __has_builtin is only defined by clang. GCC doesn't have a debug
429 // trap intrinsic anyway.
430 #ifndef __has_builtin
431 #define __has_builtin(x) 0
432 #endif // __has_builtin
434 #if __has_builtin(__builtin_debugtrap)
435 __builtin_debugtrap();
441 // Causes the calling thread to sleep for the specified number of milliseconds
443 // sleepMSec - time to sleep before switching to another thread
444 void GCToOSInterface::Sleep(uint32_t sleepMSec)
452 requested.tv_sec = sleepMSec / tccSecondsToMilliSeconds;
453 requested.tv_nsec = (sleepMSec - requested.tv_sec * tccSecondsToMilliSeconds) * tccMilliSecondsToNanoSeconds;
456 while (nanosleep(&requested, &remaining) == EINTR)
458 requested = remaining;
462 // Causes the calling thread to yield execution to another thread that is ready to run on the current processor.
464 // switchCount - number of times the YieldThread was called in a loop
465 void GCToOSInterface::YieldThread(uint32_t switchCount)
467 int ret = sched_yield();
469 // sched_yield never fails on Linux, unclear about other OSes
473 // Reserve virtual memory range.
475 // size - size of the virtual memory range
476 // alignment - requested memory alignment, 0 means no specific alignment requested
477 // flags - flags to control special settings like write watching
479 // Starting virtual address of the reserved range
480 static void* VirtualReserveInner(size_t size, size_t alignment, uint32_t flags, uint32_t hugePagesFlag = 0)
482 assert(!(flags & VirtualReserveFlags::WriteWatch) && "WriteWatch not supported on Unix");
485 alignment = OS_PAGE_SIZE;
488 size_t alignedSize = size + (alignment - OS_PAGE_SIZE);
489 void * pRetVal = mmap(nullptr, alignedSize, PROT_NONE, MAP_ANON | MAP_PRIVATE | hugePagesFlag, -1, 0);
493 void * pAlignedRetVal = (void *)(((size_t)pRetVal + (alignment - 1)) & ~(alignment - 1));
494 size_t startPadding = (size_t)pAlignedRetVal - (size_t)pRetVal;
495 if (startPadding != 0)
497 int ret = munmap(pRetVal, startPadding);
501 size_t endPadding = alignedSize - (startPadding + size);
504 int ret = munmap((void *)((size_t)pAlignedRetVal + size), endPadding);
508 pRetVal = pAlignedRetVal;
514 // Reserve virtual memory range.
516 // size - size of the virtual memory range
517 // alignment - requested memory alignment, 0 means no specific alignment requested
518 // flags - flags to control special settings like write watching
520 // Starting virtual address of the reserved range
521 void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t flags)
523 return VirtualReserveInner(size, alignment, flags);
526 // Release virtual memory range previously reserved using VirtualReserve
528 // address - starting virtual address
529 // size - size of the virtual memory range
531 // true if it has succeeded, false if it has failed
532 bool GCToOSInterface::VirtualRelease(void* address, size_t size)
534 int ret = munmap(address, size);
539 // Commit virtual memory range.
541 // size - size of the virtual memory range
543 // Starting virtual address of the committed range
544 void* GCToOSInterface::VirtualReserveAndCommitLargePages(size_t size)
547 uint32_t largePagesFlag = MAP_HUGETLB;
549 uint32_t largePagesFlag = 0;
552 void* pRetVal = VirtualReserveInner(size, OS_PAGE_SIZE, 0, largePagesFlag);
553 if (VirtualCommit(pRetVal, size, NUMA_NODE_UNDEFINED))
561 // Commit virtual memory range. It must be part of a range reserved using VirtualReserve.
563 // address - starting virtual address
564 // size - size of the virtual memory range
566 // true if it has succeeded, false if it has failed
567 bool GCToOSInterface::VirtualCommit(void* address, size_t size, uint16_t node)
569 bool success = mprotect(address, size, PROT_WRITE | PROT_READ) == 0;
572 if (success && g_numaAvailable && (node != NUMA_NODE_UNDEFINED))
574 if ((int)node <= g_highestNumaNode)
576 int usedNodeMaskBits = g_highestNumaNode + 1;
577 int nodeMaskLength = (usedNodeMaskBits + sizeof(unsigned long) - 1) / sizeof(unsigned long);
578 unsigned long nodeMask[nodeMaskLength];
579 memset(nodeMask, 0, sizeof(nodeMask));
581 int index = node / sizeof(unsigned long);
582 nodeMask[index] = ((unsigned long)1) << (node & (sizeof(unsigned long) - 1));
584 int st = mbind(address, size, MPOL_PREFERRED, nodeMask, usedNodeMaskBits, 0);
586 // If the mbind fails, we still return the allocated memory since the node is just a hint
589 #endif // HAVE_NUMA_H
594 // Decomit virtual memory range.
596 // address - starting virtual address
597 // size - size of the virtual memory range
599 // true if it has succeeded, false if it has failed
600 bool GCToOSInterface::VirtualDecommit(void* address, size_t size)
602 // TODO: This can fail, however the GC does not handle the failure gracefully
603 // Explicitly calling mmap instead of mprotect here makes it
604 // that much more clear to the operating system that we no
605 // longer need these pages. Also, GC depends on re-commited pages to
607 return mmap(address, size, PROT_NONE, MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0) != NULL;
610 // Reset virtual memory range. Indicates that data in the memory range specified by address and size is no
611 // longer of interest, but it should not be decommitted.
613 // address - starting virtual address
614 // size - size of the virtual memory range
615 // unlock - true if the memory range should also be unlocked
617 // true if it has succeeded, false if it has failed
618 bool GCToOSInterface::VirtualReset(void * address, size_t size, bool unlock)
622 // Try to use MADV_FREE if supported. It tells the kernel that the application doesn't
623 // need the pages in the range. Freeing the pages can be delayed until a memory pressure
625 st = madvise(address, size, MADV_FREE);
629 // In case the MADV_FREE is not supported, use MADV_DONTNEED
630 st = madvise(address, size, MADV_DONTNEED);
636 // Check if the OS supports write watching
637 bool GCToOSInterface::SupportsWriteWatch()
642 // Reset the write tracking state for the specified virtual memory range.
644 // address - starting virtual address
645 // size - size of the virtual memory range
646 void GCToOSInterface::ResetWriteWatch(void* address, size_t size)
648 assert(!"should never call ResetWriteWatch on Unix");
651 // Retrieve addresses of the pages that are written to in a region of virtual memory
653 // resetState - true indicates to reset the write tracking state
654 // address - starting virtual address
655 // size - size of the virtual memory range
656 // pageAddresses - buffer that receives an array of page addresses in the memory region
657 // pageAddressesCount - on input, size of the lpAddresses array, in array elements
658 // on output, the number of page addresses that are returned in the array.
660 // true if it has succeeded, false if it has failed
661 bool GCToOSInterface::GetWriteWatch(bool resetState, void* address, size_t size, void** pageAddresses, uintptr_t* pageAddressesCount)
663 assert(!"should never call GetWriteWatch on Unix");
667 // Get size of the largest cache on the processor die
669 // trueSize - true to return true cache size, false to return scaled up size based on
670 // the processor architecture
673 size_t GCToOSInterface::GetCacheSizePerLogicalCpu(bool trueSize)
675 // TODO(segilles) processor detection
679 // Sets the calling thread's affinity to only run on the processor specified
681 // procNo - The requested processor for the calling thread.
683 // true if setting the affinity was successful, false otherwise.
684 bool GCToOSInterface::SetThreadAffinity(uint16_t procNo)
686 #if HAVE_PTHREAD_GETAFFINITY_NP
689 CPU_SET((int)procNo, &cpuSet);
691 int st = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuSet);
695 #else // HAVE_PTHREAD_GETAFFINITY_NP
696 // There is no API to manage thread affinity, so let's ignore the request
698 #endif // HAVE_PTHREAD_GETAFFINITY_NP
701 // Boosts the calling thread's thread priority to a level higher than the default
706 // true if the priority boost was successful, false otherwise.
707 bool GCToOSInterface::BoostThreadPriority()
709 // [LOCALGC TODO] Thread priority for unix
713 // Set the set of processors enabled for GC threads for the current process based on config specified affinity mask and set
715 // configAffinityMask - mask specified by the GCHeapAffinitizeMask config
716 // configAffinitySet - affinity set specified by the GCHeapAffinitizeRanges config
718 // set of enabled processors
719 const AffinitySet* GCToOSInterface::SetGCThreadsAffinitySet(uintptr_t configAffinityMask, const AffinitySet* configAffinitySet)
721 if (!configAffinitySet->IsEmpty())
723 // Update the process affinity set using the configured set
724 for (size_t i = 0; i < MAX_SUPPORTED_CPUS; i++)
726 if (g_processAffinitySet.Contains(i) && !configAffinitySet->Contains(i))
728 g_processAffinitySet.Remove(i);
733 return &g_processAffinitySet;
736 // Get number of processors assigned to the current process
738 // The number of processors
739 uint32_t GCToOSInterface::GetCurrentProcessCpuCount()
741 return g_currentProcessCpuCount;
744 // Return the size of the user-mode portion of the virtual address space of this process.
746 // non zero if it has succeeded, 0 if it has failed
747 size_t GCToOSInterface::GetVirtualMemoryLimit()
750 // There is no API to get the total virtual address space size on
751 // Unix, so we use a constant value representing 128TB, which is
752 // the approximate size of total user virtual address space on
753 // the currently supported Unix systems.
754 static const uint64_t _128TB = (1ull << 47);
761 // Get the physical memory that this process can use.
763 // non zero if it has succeeded, 0 if it has failed
765 // If a process runs with a restricted memory limit, it returns the limit. If there's no limit
766 // specified, it returns amount of actual physical memory.
767 uint64_t GCToOSInterface::GetPhysicalMemoryLimit(bool* is_restricted)
769 size_t restricted_limit;
771 *is_restricted = false;
773 // The limit was not cached
774 if (g_RestrictedPhysicalMemoryLimit == 0)
776 restricted_limit = GetRestrictedPhysicalMemoryLimit();
777 VolatileStore(&g_RestrictedPhysicalMemoryLimit, restricted_limit);
779 restricted_limit = g_RestrictedPhysicalMemoryLimit;
781 if (restricted_limit != 0 && restricted_limit != SIZE_T_MAX)
784 *is_restricted = true;
785 return restricted_limit;
788 long pages = sysconf(_SC_PHYS_PAGES);
794 long pageSize = sysconf(_SC_PAGE_SIZE);
800 return pages * pageSize;
805 // memory_load - A number between 0 and 100 that specifies the approximate percentage of physical memory
806 // that is in use (0 indicates no memory use and 100 indicates full memory use).
807 // available_physical - The amount of physical memory currently available, in bytes.
808 // available_page_file - The maximum amount of memory the current process can commit, in bytes.
809 void GCToOSInterface::GetMemoryStatus(uint32_t* memory_load, uint64_t* available_physical, uint64_t* available_page_file)
811 if (memory_load != nullptr || available_physical != nullptr)
813 uint64_t total = GetPhysicalMemoryLimit();
815 uint64_t available = 0;
819 // Get the physical memory in use - from it, we can get the physical memory available.
820 // We do this only when we have the total physical memory available.
821 if (total > 0 && GetPhysicalMemoryUsed(&used))
823 available = total > used ? total-used : 0;
824 load = (uint32_t)(((float)used * 100) / (float)total);
827 if (memory_load != nullptr)
829 if (available_physical != nullptr)
830 *available_physical = available;
833 if (available_page_file != nullptr)
834 *available_page_file = 0;
837 // Get a high precision performance counter
840 int64_t GCToOSInterface::QueryPerformanceCounter()
842 // TODO: This is not a particularly efficient implementation - we certainly could
843 // do much more specific platform-dependent versions if we find that this method
844 // runs hot. However, most likely it does not.
846 if (gettimeofday(&tv, NULL) == -1)
848 assert(!"gettimeofday() failed");
849 // TODO (segilles) unconditional asserts
852 return (int64_t) tv.tv_sec * (int64_t) tccSecondsToMicroSeconds + (int64_t) tv.tv_usec;
855 // Get a frequency of the high precision performance counter
857 // The counter frequency
858 int64_t GCToOSInterface::QueryPerformanceFrequency()
860 // The counter frequency of gettimeofday is in microseconds.
861 return tccSecondsToMicroSeconds;
864 // Get a time stamp with a low precision
866 // Time stamp in milliseconds
867 uint32_t GCToOSInterface::GetLowPrecisionTimeStamp()
869 // TODO(segilles) this is pretty naive, we can do better
872 if (gettimeofday(&tv, NULL) == 0)
874 retval = (tv.tv_sec * tccSecondsToMilliSeconds) + (tv.tv_usec / tccMilliSecondsToMicroSeconds);
878 assert(!"gettimeofday() failed\n");
884 // Gets the total number of processors on the machine, not taking
885 // into account current process affinity.
887 // Number of processors on the machine
888 uint32_t GCToOSInterface::GetTotalProcessorCount()
890 // Calculated in GCToOSInterface::Initialize using
891 // sysconf(_SC_NPROCESSORS_ONLN)
892 return g_logicalCpuCount;
895 bool GCToOSInterface::CanEnableGCNumaAware()
897 return g_numaAvailable;
900 // Get processor number and optionally its NUMA node number for the specified heap number
902 // heap_number - heap number to get the result for
903 // proc_no - set to the selected processor number
904 // node_no - set to the NUMA node of the selected processor or to NUMA_NODE_UNDEFINED
906 // true if it succeeded
907 bool GCToOSInterface::GetProcessorForHeap(uint16_t heap_number, uint16_t* proc_no, uint16_t* node_no)
909 bool success = false;
911 uint16_t availableProcNumber = 0;
912 for (size_t procNumber = 0; procNumber < g_logicalCpuCount; procNumber++)
914 if (g_processAffinitySet.Contains(procNumber))
916 if (availableProcNumber == heap_number)
918 *proc_no = procNumber;
920 if (GCToOSInterface::CanEnableGCNumaAware())
922 int result = numa_node_of_cpu(procNumber);
923 *node_no = (result >= 0) ? (uint16_t)result : NUMA_NODE_UNDEFINED;
926 #endif // HAVE_NUMA_H
928 *node_no = NUMA_NODE_UNDEFINED;
934 availableProcNumber++;
941 // Parse the confing string describing affinitization ranges and update the passed in affinitySet accordingly
943 // config_string - string describing the affinitization range, platform specific
944 // start_index - the range start index extracted from the config_string
945 // end_index - the range end index extracted from the config_string, equal to the start_index if only an index and not a range was passed in
947 // true if the configString was successfully parsed, false if it was not correct
948 bool GCToOSInterface::ParseGCHeapAffinitizeRangesEntry(const char** config_string, size_t* start_index, size_t* end_index)
950 return ParseIndexOrRange(config_string, start_index, end_index);
953 // Initialize the critical section
954 void CLRCriticalSection::Initialize()
956 int st = pthread_mutex_init(&m_cs.mutex, NULL);
960 // Destroy the critical section
961 void CLRCriticalSection::Destroy()
963 int st = pthread_mutex_destroy(&m_cs.mutex);
967 // Enter the critical section. Blocks until the section can be entered.
968 void CLRCriticalSection::Enter()
970 pthread_mutex_lock(&m_cs.mutex);
973 // Leave the critical section
974 void CLRCriticalSection::Leave()
976 pthread_mutex_unlock(&m_cs.mutex);