From 5daacc185b6b20962652fd740cb6b3cd6431d13c Mon Sep 17 00:00:00 2001 From: fanyang-mono Date: Fri, 17 Apr 2020 10:05:43 -0400 Subject: [PATCH] Pass void* to init, clean_up and get_time function to accormodate different clock ID types for different os, and move back profiler specific function --- src/mono/mono/mini/mini-posix.c | 137 +++++++++++++++++++++++++++-- src/mono/mono/mini/mini-runtime.c | 7 +- src/mono/mono/utils/mono-time.c | 175 +++++--------------------------------- src/mono/mono/utils/mono-time.h | 10 +-- 4 files changed, 158 insertions(+), 171 deletions(-) diff --git a/src/mono/mono/mini/mini-posix.c b/src/mono/mono/mini/mini-posix.c index 8441ede..fc681e8 100644 --- a/src/mono/mono/mini/mini-posix.c +++ b/src/mono/mono/mini/mini-posix.c @@ -514,6 +514,131 @@ mono_runtime_cleanup_handlers (void) #ifdef HAVE_PROFILER_SIGNAL +static volatile gint32 sampling_thread_running; + +#ifdef HOST_DARWIN + +static clock_serv_t sampling_clock; + +void +clock_init_for_profiler (MonoProfilerSampleMode mode) +{ +} + +static void +clock_sleep_ns_abs (guint64 ns_abs) +{ + kern_return_t ret; + mach_timespec_t then, remain_unused; + + then.tv_sec = ns_abs / 1000000000; + then.tv_nsec = ns_abs % 1000000000; + + do { + ret = clock_sleep (sampling_clock, TIME_ABSOLUTE, then, &remain_unused); + + if (ret != KERN_SUCCESS && ret != KERN_ABORTED) + g_error ("%s: clock_sleep () returned %d", __func__, ret); + } while (ret == KERN_ABORTED && mono_atomic_load_i32 (&sampling_thread_running)); +} + +#else + +static clockid_t sampling_clock; + +void +clock_init_for_profiler (MonoProfilerSampleMode mode) +{ + switch (mode) { + case MONO_PROFILER_SAMPLE_MODE_PROCESS: { + /* + * If we don't have clock_nanosleep (), measuring the process time + * makes very little sense as we can only use nanosleep () to sleep on + * real time. + */ +#if defined(HAVE_CLOCK_NANOSLEEP) && !defined(__PASE__) + struct timespec ts = { 0 }; + + /* + * Some systems (e.g. Windows Subsystem for Linux) declare the + * CLOCK_PROCESS_CPUTIME_ID clock but don't actually support it. For + * those systems, we fall back to CLOCK_MONOTONIC if we get EINVAL. + */ + if (clock_nanosleep (CLOCK_PROCESS_CPUTIME_ID, TIMER_ABSTIME, &ts, NULL) != EINVAL) { + sampling_clock = CLOCK_PROCESS_CPUTIME_ID; + break; + } +#endif + + // fallthrough + } + case MONO_PROFILER_SAMPLE_MODE_REAL: sampling_clock = CLOCK_MONOTONIC; break; + default: g_assert_not_reached (); break; + } +} + +static void +clock_sleep_ns_abs (guint64 ns_abs) +{ +#if defined(HAVE_CLOCK_NANOSLEEP) && !defined(__PASE__) + int ret; + struct timespec then; + + then.tv_sec = ns_abs / 1000000000; + then.tv_nsec = ns_abs % 1000000000; + + do { + ret = clock_nanosleep (sampling_clock, TIMER_ABSTIME, &then, NULL); + + if (ret != 0 && ret != EINTR) + g_error ("%s: clock_nanosleep () returned %d", __func__, ret); + } while (ret == EINTR && mono_atomic_load_i32 (&sampling_thread_running)); +#else + int ret; + gint64 diff; + struct timespec req; + + /* + * What follows is a crude attempt at emulating clock_nanosleep () on OSs + * which don't provide it (e.g. FreeBSD). + * + * The problem with nanosleep () is that if it is interrupted by a signal, + * time will drift as a result of having to restart the call after the + * signal handler has finished. For this reason, we avoid using the rem + * argument of nanosleep (). Instead, before every nanosleep () call, we + * check if enough time has passed to satisfy the sleep request. If yes, we + * simply return. If not, we calculate the difference and do another sleep. + * + * This should reduce the amount of drift that happens because we account + * for the time spent executing the signal handler, which nanosleep () is + * not guaranteed to do for the rem argument. + * + * The downside to this approach is that it is slightly expensive: We have + * to make an extra system call to retrieve the current time whenever we're + * going to restart a nanosleep () call. This is unlikely to be a problem + * in practice since the sampling thread won't be receiving many signals in + * the first place (it's a tools thread, so no STW), and because typical + * sleep periods for the thread are many orders of magnitude bigger than + * the time it takes to actually perform that system call (just a few + * nanoseconds). + */ + do { + diff = (gint64) ns_abs - (gint64) clock_get_time_ns (); + + if (diff <= 0) + break; + + req.tv_sec = diff / 1000000000; + req.tv_nsec = diff % 1000000000; + + if ((ret = nanosleep (&req, NULL)) == -1 && errno != EINTR) + g_error ("%s: nanosleep () returned -1, errno = %d", __func__, errno); + } while (ret == -1 && mono_atomic_load_i32 (&sampling_thread_running)); +#endif +} + +#endif + static int profiler_signal; static volatile gint32 sampling_thread_exiting; static MonoOSEvent sampling_thread_exited; @@ -566,17 +691,17 @@ init: goto init; } - mono_clock_init (); - mono_clock_init_for_profiler (mode); + mono_clock_init (&sampling_clock); + clock_init_for_profiler (mode); - for (guint64 sleep = mono_clock_get_time_ns (); mono_atomic_load_i32 (&sampling_thread_running); mono_clock_sleep_ns_abs (sleep)) { + for (guint64 sleep = mono_clock_get_time_ns (&sampling_clock); mono_atomic_load_i32 (&sampling_thread_running); clock_sleep_ns_abs (sleep)) { uint32_t freq; MonoProfilerSampleMode new_mode; mono_profiler_get_sample_mode (NULL, &new_mode, &freq); if (new_mode != mode) { - mono_clock_cleanup (); + mono_clock_cleanup (&sampling_clock); goto init; } @@ -598,7 +723,7 @@ init: } FOREACH_THREAD_SAFE_END } - mono_clock_cleanup (); + mono_clock_cleanup (&sampling_clock); done: mono_atomic_store_i32 (&sampling_thread_exiting, 1); @@ -628,7 +753,7 @@ mono_runtime_shutdown_stat_profiler (void) * the sleep progresses very slowly as a result of the low CPU activity. * * We fix this by repeatedly sending the profiler signal to the sampler - * thread in order to interrupt the sleep. mono_clock_sleep_ns_abs () will check + * thread in order to interrupt the sleep. clock_sleep_ns_abs () will check * sampling_thread_running upon an interrupt and return immediately if it's * zero. profiler_signal_handler () has a special case to ignore the signal * for the sampler thread. diff --git a/src/mono/mono/mini/mini-runtime.c b/src/mono/mono/mini/mini-runtime.c index e051e70..b2ddbd6 100644 --- a/src/mono/mono/mini/mini-runtime.c +++ b/src/mono/mono/mini/mini-runtime.c @@ -1935,6 +1935,7 @@ static FILE *perf_dump_file; static mono_mutex_t perf_dump_mutex; static void *perf_dump_mmap_addr = MAP_FAILED; static guint32 perf_dump_pid; +static clockid_t clock_id = CLOCK_MONOTONIC; enum { JIT_DUMP_MAGIC = 0x4A695444, @@ -2021,7 +2022,7 @@ add_file_header_info (FileHeader *header) header->elf_mach = ELF_MACHINE; header->pad1 = 0; header->pid = perf_dump_pid; - header->timestamp = mono_clock_get_time_ns (); + header->timestamp = mono_clock_get_time_ns (&clock_id); header->flags = 0; } @@ -2047,7 +2048,7 @@ mono_emit_jit_dump (MonoJitInfo *jinfo, gpointer code) // TODO: write debugInfo and unwindInfo immediately before the JitCodeLoadRecord (while lock is held). - record.header.timestamp = mono_clock_get_time_ns (); + record.header.timestamp = mono_clock_get_time_ns (&clock_id); fwrite (&record, sizeof (record), 1, perf_dump_file); fwrite (jinfo->d.method->name, nameLen + 1, 1, perf_dump_file); @@ -2061,7 +2062,7 @@ static void add_basic_JitCodeLoadRecord_info (JitCodeLoadRecord *record) { record->header.id = JIT_CODE_LOAD; - record->header.timestamp = mono_clock_get_time_ns (); + record->header.timestamp = mono_clock_get_time_ns (&clock_id); record->pid = perf_dump_pid; record->tid = syscall (SYS_gettid); } diff --git a/src/mono/mono/utils/mono-time.c b/src/mono/mono/utils/mono-time.c index e1ff12f..0de660e 100644 --- a/src/mono/mono/utils/mono-time.c +++ b/src/mono/mono/utils/mono-time.c @@ -235,219 +235,84 @@ mono_100ns_datetime_from_timeval (struct timeval tv) #endif -volatile gint32 sampling_thread_running; - -#ifdef HOST_DARWIN - -static clock_serv_t sampling_clock_service; +#if defined(HOST_DARWIN) || defined(HOST_LINUX) void -mono_clock_init (void) +mono_clock_init (void *clk_id) { +#ifdef HOST_DARWIN kern_return_t ret; do { - ret = host_get_clock_service (mach_host_self (), SYSTEM_CLOCK, &sampling_clock_service); + ret = host_get_clock_service (mach_host_self (), SYSTEM_CLOCK, (clock_serv_t *)clk_id); } while (ret == KERN_ABORTED); if (ret != KERN_SUCCESS) g_error ("%s: host_get_clock_service () returned %d", __func__, ret); +#endif } void -mono_clock_init_for_profiler (MonoProfilerSampleMode mode) -{ -} - -void -mono_clock_cleanup (void) +mono_clock_cleanup (void *clk_id) { +#ifdef HOST_DARWIN kern_return_t ret; do { - ret = mach_port_deallocate (mach_task_self (), sampling_clock_service); + ret = mach_port_deallocate (mach_task_self (), *(clock_serv_t *)clk_id); } while (ret == KERN_ABORTED); if (ret != KERN_SUCCESS) g_error ("%s: mach_port_deallocate () returned %d", __func__, ret); +#endif } guint64 -mono_clock_get_time_ns (void) +mono_clock_get_time_ns (void *clk_id) { +#ifdef HOST_DARWIN + kern_return_t ret; mach_timespec_t mach_ts; do { - ret = clock_get_time (sampling_clock_service, &mach_ts); + ret = clock_get_time (*(clock_serv_t *)clk_id, &mach_ts); } while (ret == KERN_ABORTED); if (ret != KERN_SUCCESS) g_error ("%s: clock_get_time () returned %d", __func__, ret); return ((guint64) mach_ts.tv_sec * 1000000000) + (guint64) mach_ts.tv_nsec; -} - -void -mono_clock_sleep_ns_abs (guint64 ns_abs) -{ - kern_return_t ret; - mach_timespec_t then, remain_unused; - - then.tv_sec = ns_abs / 1000000000; - then.tv_nsec = ns_abs % 1000000000; - - do { - ret = clock_sleep (sampling_clock_service, TIME_ABSOLUTE, then, &remain_unused); - - if (ret != KERN_SUCCESS && ret != KERN_ABORTED) - g_error ("%s: clock_sleep () returned %d", __func__, ret); - } while (ret == KERN_ABORTED && mono_atomic_load_i32 (&sampling_thread_running)); -} - -#elif defined(HOST_LINUX) - -static clockid_t sampling_posix_clock; - -void -mono_clock_init (void) -{ -} - -void -mono_clock_init_for_profiler (MonoProfilerSampleMode mode) -{ - switch (mode) { - case MONO_PROFILER_SAMPLE_MODE_PROCESS: { - /* - * If we don't have clock_nanosleep (), measuring the process time - * makes very little sense as we can only use nanosleep () to sleep on - * real time. - */ -#if defined(HAVE_CLOCK_NANOSLEEP) && !defined(__PASE__) - struct timespec ts = { 0 }; - - /* - * Some systems (e.g. Windows Subsystem for Linux) declare the - * CLOCK_PROCESS_CPUTIME_ID clock but don't actually support it. For - * those systems, we fall back to CLOCK_MONOTONIC if we get EINVAL. - */ - if (clock_nanosleep (CLOCK_PROCESS_CPUTIME_ID, TIMER_ABSTIME, &ts, NULL) != EINVAL) { - sampling_posix_clock = CLOCK_PROCESS_CPUTIME_ID; - break; - } -#endif - - // fallthrough - } - case MONO_PROFILER_SAMPLE_MODE_REAL: sampling_posix_clock = CLOCK_MONOTONIC; break; - default: g_assert_not_reached (); break; - } -} - -void -mono_clock_cleanup (void) -{ -} - -guint64 -mono_clock_get_time_ns (void) -{ + +#else + struct timespec ts; - if (clock_gettime (sampling_posix_clock, &ts) == -1) + if (clock_gettime (*(clockid_t *)clock, &ts) == -1) g_error ("%s: clock_gettime () returned -1, errno = %d", __func__, errno); return ((guint64) ts.tv_sec * 1000000000) + (guint64) ts.tv_nsec; -} - -void -mono_clock_sleep_ns_abs (guint64 ns_abs) -{ -#if defined(HAVE_CLOCK_NANOSLEEP) && !defined(__PASE__) - int ret; - struct timespec then; - then.tv_sec = ns_abs / 1000000000; - then.tv_nsec = ns_abs % 1000000000; - - do { - ret = clock_nanosleep (sampling_posix_clock, TIMER_ABSTIME, &then, NULL); - - if (ret != 0 && ret != EINTR) - g_error ("%s: clock_nanosleep () returned %d", __func__, ret); - } while (ret == EINTR && mono_atomic_load_i32 (&sampling_thread_running)); -#else - int ret; - gint64 diff; - struct timespec req; - - /* - * What follows is a crude attempt at emulating clock_nanosleep () on OSs - * which don't provide it (e.g. FreeBSD). - * - * The problem with nanosleep () is that if it is interrupted by a signal, - * time will drift as a result of having to restart the call after the - * signal handler has finished. For this reason, we avoid using the rem - * argument of nanosleep (). Instead, before every nanosleep () call, we - * check if enough time has passed to satisfy the sleep request. If yes, we - * simply return. If not, we calculate the difference and do another sleep. - * - * This should reduce the amount of drift that happens because we account - * for the time spent executing the signal handler, which nanosleep () is - * not guaranteed to do for the rem argument. - * - * The downside to this approach is that it is slightly expensive: We have - * to make an extra system call to retrieve the current time whenever we're - * going to restart a nanosleep () call. This is unlikely to be a problem - * in practice since the sampling thread won't be receiving many signals in - * the first place (it's a tools thread, so no STW), and because typical - * sleep periods for the thread are many orders of magnitude bigger than - * the time it takes to actually perform that system call (just a few - * nanoseconds). - */ - do { - diff = (gint64) ns_abs - (gint64) clock_get_time_ns (); - - if (diff <= 0) - break; - - req.tv_sec = diff / 1000000000; - req.tv_nsec = diff % 1000000000; - - if ((ret = nanosleep (&req, NULL)) == -1 && errno != EINTR) - g_error ("%s: nanosleep () returned -1, errno = %d", __func__, errno); - } while (ret == -1 && mono_atomic_load_i32 (&sampling_thread_running)); #endif } #else void -mono_clock_init (void) +mono_clock_init (void *clk_id) { } void -mono_clock_init_for_profiler (MonoProfilerSampleMode mode) -{ -} - -void -mono_clock_cleanup (void) +mono_clock_cleanup (void *clk_id) { } guint64 -mono_clock_get_time_ns (void) +mono_clock_get_time_ns (void *clk_id) { // TODO: need to implement time stamp function for PC return 0; } -void -mono_clock_sleep_ns_abs (guint64 ns_abs) -{ -} - #endif diff --git a/src/mono/mono/utils/mono-time.h b/src/mono/mono/utils/mono-time.h index b5cd8b3..65ea0f2 100644 --- a/src/mono/mono/utils/mono-time.h +++ b/src/mono/mono/utils/mono-time.h @@ -7,7 +7,6 @@ #include #include -#include #ifdef HAVE_SYS_TIME_H #include #endif @@ -30,12 +29,9 @@ gint64 mono_100ns_datetime (void); gint64 mono_100ns_datetime_from_timeval (struct timeval tv); #endif -extern volatile gint32 sampling_thread_running; -void mono_clock_init (void); -void mono_clock_init_for_profiler (MonoProfilerSampleMode mode); -void mono_clock_cleanup (void); -guint64 mono_clock_get_time_ns (void); -void mono_clock_sleep_ns_abs (guint64 ns_abs); +void mono_clock_init (void *clk_id); +void mono_clock_cleanup (void *clk_id); +guint64 mono_clock_get_time_ns (void *clk_id); /* Stopwatch class for internal runtime use */ typedef struct { -- 2.7.4