uint64_t thread_id;
uintptr_t thread_ip;
uint32_t payload_data;
+ bool async_frame;
} EventPipeSampleProfileData;
// Rundown flags.
static
gboolean
-eventpipe_walk_managed_stack_for_thread_func (
+eventpipe_walk_managed_stack_for_thread (
MonoStackFrameInfo *frame,
MonoContext *ctx,
- void *data)
+ void *data,
+ bool *async_frame)
{
EP_ASSERT (frame != NULL);
EP_ASSERT (data != NULL);
case FRAME_TYPE_INTERP:
if (!frame->ji)
return FALSE;
+ *async_frame |= frame->ji->async;
MonoMethod *method = frame->ji->async ? NULL : frame->actual_method;
if (method && !m_method_is_wrapper (method))
ep_stack_contents_append ((EventPipeStackContents *)data, (uintptr_t)((uint8_t*)frame->ji->code_start + frame->native_offset), method);
+ else if (!method && frame->ji->async && !frame->ji->is_trampoline)
+ ep_stack_contents_append ((EventPipeStackContents *)data, (uintptr_t)((uint8_t*)frame->ji->code_start), method);
return ep_stack_contents_get_length ((EventPipeStackContents *)data) >= EP_MAX_STACK_DEPTH;
default:
- EP_UNREACHABLE ("eventpipe_walk_managed_stack_for_thread_func");
+ EP_UNREACHABLE ("eventpipe_walk_managed_stack_for_thread");
return FALSE;
}
}
static
gboolean
+eventpipe_walk_managed_stack_for_thread_func (
+ MonoStackFrameInfo *frame,
+ MonoContext *ctx,
+ void *data)
+{
+ bool async_frame = FALSE;
+ return eventpipe_walk_managed_stack_for_thread (frame, ctx, data, &async_frame);
+}
+
+static
+gboolean
eventpipe_sample_profiler_walk_managed_stack_for_thread_func (
MonoStackFrameInfo *frame,
MonoContext *ctx,
sample_data->payload_data = EP_SAMPLE_PROFILER_SAMPLE_TYPE_MANAGED;
}
- return eventpipe_walk_managed_stack_for_thread_func (frame, ctx, &sample_data->stack_contents);
+ return eventpipe_walk_managed_stack_for_thread (frame, ctx, &sample_data->stack_contents, &sample_data->async_frame);
}
static
uint32_t filtered_thread_count = 0;
uint32_t sampled_thread_count = 0;
- mono_stop_world (MONO_THREAD_INFO_FLAGS_NO_GC | MONO_THREAD_INFO_FLAGS_NO_SAMPLE);
+ mono_stop_world (MONO_THREAD_INFO_FLAGS_NO_GC);
+
+ gboolean async_context = mono_thread_info_is_async_context ();
+ mono_thread_info_set_is_async_context (TRUE);
// Record all info needed in sample events while runtime is suspended, must be async safe.
FOREACH_THREAD_SAFE_EXCLUDE (thread_info, MONO_THREAD_INFO_FLAGS_NO_GC | MONO_THREAD_INFO_FLAGS_NO_SAMPLE) {
data->thread_id = ep_rt_thread_id_t_to_uint64_t (mono_thread_info_get_tid (thread_info));
data->thread_ip = (uintptr_t)MONO_CONTEXT_GET_IP (&thread_state->ctx);
data->payload_data = EP_SAMPLE_PROFILER_SAMPLE_TYPE_ERROR;
+ data->async_frame = FALSE;
ep_stack_contents_reset (&data->stack_contents);
mono_get_eh_callbacks ()->mono_walk_stack_with_state (eventpipe_sample_profiler_walk_managed_stack_for_thread_func, thread_state, MONO_UNWIND_SIGNAL_SAFE, data);
sampled_thread_count++;
filtered_thread_count++;
} FOREACH_THREAD_SAFE_END
- mono_restart_world (MONO_THREAD_INFO_FLAGS_NO_GC | MONO_THREAD_INFO_FLAGS_NO_SAMPLE);
+ mono_thread_info_set_is_async_context (async_context);
+ mono_restart_world (MONO_THREAD_INFO_FLAGS_NO_GC);
// Fire sample event for threads. Must be done after runtime is resumed since it's not async safe.
// Since we can't keep thread info around after runtime as been suspended, use an empty
for (uint32_t i = 0; i < sampled_thread_count; ++i) {
EventPipeSampleProfileData *data = &g_array_index (_ep_rt_mono_sampled_thread_callstacks, EventPipeSampleProfileData, i);
if (data->payload_data != EP_SAMPLE_PROFILER_SAMPLE_TYPE_ERROR && ep_stack_contents_get_length(&data->stack_contents) > 0) {
+ // Check if we have an async frame, if so we will need to make sure all frames are registered in regular jit info table.
+ // TODO: An async frame can contain wrapper methods (no way to check during stackwalk), we could skip writing profile event
+ // for this specific stackwalk or we could cleanup stack_frames before writing profile event.
+ if (data->async_frame) {
+ for (int i = 0; i < data->stack_contents.next_available_frame; ++i)
+ mono_jit_info_table_find_internal ((gpointer)data->stack_contents.stack_frames [i], TRUE, FALSE);
+ }
mono_thread_info_set_tid (&adapter, ep_rt_uint64_t_to_thread_id_t (data->thread_id));
ep_write_sample_profile_event (sampling_thread, sampling_event, &adapter, &data->stack_contents, (uint8_t *)&data->payload_data, sizeof (data->payload_data));
}
#define ROUND_DOWN(VALUE,SIZE) ((VALUE) & ~((SIZE) - 1))
-typedef struct {
- int method_index;
+#define JIT_INFO_MAP_BUCKET_SIZE 32
+
+typedef struct _JitInfoMap JitInfoMap;
+struct _JitInfoMap {
MonoJitInfo *jinfo;
-} JitInfoMap;
+ JitInfoMap *next;
+ int method_index;
+};
#define GOT_INITIALIZING 1
#define GOT_INITIALIZED 2
gpointer *globals;
MonoDl *sofile;
- JitInfoMap *async_jit_info_table;
+ JitInfoMap **async_jit_info_table;
mono_mutex_t mutex;
};
p += mono_seq_point_info_read (&seq_points, p, FALSE);
- // FIXME: Call a function in seq-points.c
- // FIXME:
- MonoJitMemoryManager *jit_mm = get_default_jit_mm ();
- jit_mm_lock (jit_mm);
- /* This could be set already since this function can be called more than once for the same method */
- if (!g_hash_table_lookup (jit_mm->seq_points, method))
- g_hash_table_insert (jit_mm->seq_points, method, seq_points);
- else
- mono_seq_point_info_free (seq_points);
- jit_mm_unlock (jit_mm);
+ if (!async) {
+ // FIXME: Call a function in seq-points.c
+ // FIXME:
+ MonoJitMemoryManager *jit_mm = get_default_jit_mm ();
+ jit_mm_lock (jit_mm);
+ /* This could be set already since this function can be called more than once for the same method */
+ if (!g_hash_table_lookup (jit_mm->seq_points, method))
+ g_hash_table_insert (jit_mm->seq_points, method, seq_points);
+ else
+ mono_seq_point_info_free (seq_points);
+ jit_mm_unlock (jit_mm);
+ }
jinfo->seq_points = seq_points;
}
p += map_size;
}
- if (amodule != m_class_get_image (jinfo->d.method->klass)->aot_module) {
+ if (amodule != m_class_get_image (jinfo->d.method->klass)->aot_module && !async) {
mono_aot_lock ();
if (!ji_to_amodule)
ji_to_amodule = g_hash_table_new (NULL, NULL);
int nmethods;
gpointer *methods;
guint8 *code1, *code2;
- int methods_len, i;
+ int methods_len;
gboolean async;
gpointer orig_addr;
/* In async mode, jinfo is not added to the normal jit info table, so have to cache it ourselves */
if (async) {
- JitInfoMap *table = amodule->async_jit_info_table;
- int len;
-
+ JitInfoMap **table = amodule->async_jit_info_table;
+ LOAD_ACQUIRE_FENCE;
if (table) {
- len = table [0].method_index;
- for (i = 1; i < len; ++i) {
- if (table [i].method_index == method_index)
- return table [i].jinfo;
+ int buckets = (amodule->info.nmethods / JIT_INFO_MAP_BUCKET_SIZE) + 1;
+ JitInfoMap *current_item = table [method_index % buckets];
+ LOAD_ACQUIRE_FENCE;
+ while (current_item) {
+ if (current_item->method_index == method_index)
+ return current_item->jinfo;
+ current_item = current_item->next;
+ LOAD_ACQUIRE_FENCE;
}
}
}
g_assert ((guint8*)addr >= (guint8*)jinfo->code_start);
- /* Add it to the normal JitInfo tables */
if (async) {
- JitInfoMap *old_table, *new_table;
- int len;
+ /* Add it to the async JitInfo tables */
+ JitInfoMap **current_table, **new_table;
+ JitInfoMap *current_item, *new_item;
+ int buckets = (amodule->info.nmethods / JIT_INFO_MAP_BUCKET_SIZE) + 1;
+
+ for (;;) {
+ current_table = amodule->async_jit_info_table;
+ LOAD_ACQUIRE_FENCE;
+ if (current_table)
+ break;
- /*
- * Use a simple inmutable table with linear search to cache async jit info entries.
- * This assumes that the number of entries is small.
- */
- while (TRUE) {
- /* Copy the table, adding a new entry at the end */
- old_table = amodule->async_jit_info_table;
- if (old_table)
- len = old_table[0].method_index;
- else
- len = 1;
- new_table = (JitInfoMap *)alloc0_jit_info_data (mem_manager, (len + 1) * sizeof (JitInfoMap), async);
- if (old_table)
- memcpy (new_table, old_table, len * sizeof (JitInfoMap));
- new_table [0].method_index = len + 1;
- new_table [len].method_index = method_index;
- new_table [len].jinfo = jinfo;
- /* Publish it */
- mono_memory_barrier ();
- if (mono_atomic_cas_ptr ((volatile gpointer *)&amodule->async_jit_info_table, new_table, old_table) == old_table)
+ new_table = alloc0_jit_info_data (mem_manager, buckets * sizeof (JitInfoMap*), async);
+ STORE_RELEASE_FENCE;
+ if (mono_atomic_cas_ptr ((volatile gpointer *)&amodule->async_jit_info_table, new_table, current_table) == current_table)
+ break;
+ }
+
+ new_item = alloc0_jit_info_data (mem_manager, sizeof (JitInfoMap), async);
+ new_item->method_index = method_index;
+ new_item->jinfo = jinfo;
+
+ for (;;) {
+ current_item = amodule->async_jit_info_table [method_index % buckets];
+ LOAD_ACQUIRE_FENCE;
+ new_item->next = current_item;
+ STORE_RELEASE_FENCE;
+ if (mono_atomic_cas_ptr ((volatile gpointer *)&amodule->async_jit_info_table [method_index % buckets], new_item, current_item) == current_item)
break;
}
} else {
+ /* Add it to the normal JitInfo tables */
mono_jit_info_table_add (jinfo);
}