From 2d123e1fd1cdbb86bf15586a6867b8ac5b9b19c8 Mon Sep 17 00:00:00 2001 From: Hyunggi Lee Date: Wed, 22 Feb 2023 12:13:48 +0900 Subject: [PATCH] tcg: use QTree instead of GTree qemu-user can hang in a multi-threaded fork. One common reason is that when creating a TB, between fork and exec we manipulate a GTree whose memory allocator (GSlice) is not fork-safe. Although POSIX does not mandate it, the system's allocator (e.g. tcmalloc, libc malloc) is probably fork-safe. Fix some of these hangs by using QTree, which uses the system's allocator regardless of the Glib version that we used at configuration time. Tested with the test program in the original bug report, i.e.: Fixes: #285 Change-Id: I61bbe16ee5b639615a54f5afad0fa084c63c9b42 Signed-off-by: Emilio Cota --- accel/tcg/translate-all.c | 17 +++++++++-------- tcg/tcg.c | 21 +++++++++++---------- 2 files changed, 20 insertions(+), 18 deletions(-) diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 4572b49..c54d5e4 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -19,6 +19,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" +#include "qemu/qtree.h" #include "qemu-common.h" #define NO_CPU_IO_DEFS @@ -158,7 +159,7 @@ struct page_entry { * See also: page_collection_lock(). */ struct page_collection { - GTree *tree; + QTree *tree; struct page_entry *max; }; @@ -772,7 +773,7 @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr) struct page_entry *pe; PageDesc *pd; - pe = g_tree_lookup(set->tree, &index); + pe = q_tree_lookup(set->tree, &index); if (pe) { return false; } @@ -783,7 +784,7 @@ static bool page_trylock_add(struct page_collection *set, tb_page_addr_t addr) } pe = page_entry_new(pd, index); - g_tree_insert(set->tree, &pe->index, pe); + q_tree_insert(set->tree, &pe->index, pe); /* * If this is either (1) the first insertion or (2) a page whose index @@ -830,13 +831,13 @@ page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) end >>= TARGET_PAGE_BITS; g_assert(start <= end); - set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL, + set->tree = q_tree_new_full(tb_page_addr_cmp, NULL, NULL, page_entry_destroy); set->max = NULL; assert_no_pages_locked(); retry: - g_tree_foreach(set->tree, page_entry_lock, NULL); + q_tree_foreach(set->tree, page_entry_lock, NULL); for (index = start; index <= end; index++) { TranslationBlock *tb; @@ -847,7 +848,7 @@ page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) continue; } if (page_trylock_add(set, index << TARGET_PAGE_BITS)) { - g_tree_foreach(set->tree, page_entry_unlock, NULL); + q_tree_foreach(set->tree, page_entry_unlock, NULL); goto retry; } assert_page_locked(pd); @@ -856,7 +857,7 @@ page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) (tb->page_addr[1] != -1 && page_trylock_add(set, tb->page_addr[1]))) { /* drop all locks, and reacquire in order */ - g_tree_foreach(set->tree, page_entry_unlock, NULL); + q_tree_foreach(set->tree, page_entry_unlock, NULL); goto retry; } } @@ -867,7 +868,7 @@ page_collection_lock(tb_page_addr_t start, tb_page_addr_t end) void page_collection_unlock(struct page_collection *set) { /* entries are unlocked and freed via page_entry_destroy */ - g_tree_destroy(set->tree); + q_tree_destroy(set->tree); g_free(set); } diff --git a/tcg/tcg.c b/tcg/tcg.c index 43c6cf8..a7a94cc 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -35,6 +35,7 @@ #include "qemu/host-utils.h" #include "qemu/qemu-print.h" #include "qemu/timer.h" +#include "qemu/qtree.h" /* Note: the long term plan is to reduce the dependencies on the QEMU CPU definitions. Currently they are used for qemu_ld/st @@ -163,7 +164,7 @@ TCGv_env cpu_env = 0; struct tcg_region_tree { QemuMutex lock; - GTree *tree; + QTree *tree; /* padding to avoid false sharing is computed at run-time */ }; @@ -396,7 +397,7 @@ static void tcg_region_trees_init(void) struct tcg_region_tree *rt = region_trees + i * tree_size; qemu_mutex_init(&rt->lock); - rt->tree = g_tree_new(tb_tc_cmp); + rt->tree = q_tree_new(tb_tc_cmp); } } @@ -423,7 +424,7 @@ void tcg_tb_insert(TranslationBlock *tb) struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); qemu_mutex_lock(&rt->lock); - g_tree_insert(rt->tree, &tb->tc, tb); + q_tree_insert(rt->tree, &tb->tc, tb); qemu_mutex_unlock(&rt->lock); } @@ -432,7 +433,7 @@ void tcg_tb_remove(TranslationBlock *tb) struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr); qemu_mutex_lock(&rt->lock); - g_tree_remove(rt->tree, &tb->tc); + q_tree_remove(rt->tree, &tb->tc); qemu_mutex_unlock(&rt->lock); } @@ -448,7 +449,7 @@ TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr) struct tb_tc s = { .ptr = (void *)tc_ptr }; qemu_mutex_lock(&rt->lock); - tb = g_tree_lookup(rt->tree, &s); + tb = q_tree_lookup(rt->tree, &s); qemu_mutex_unlock(&rt->lock); return tb; } @@ -483,7 +484,7 @@ void tcg_tb_foreach(GTraverseFunc func, gpointer user_data) for (i = 0; i < region.n; i++) { struct tcg_region_tree *rt = region_trees + i * tree_size; - g_tree_foreach(rt->tree, func, user_data); + q_tree_foreach(rt->tree, func, user_data); } tcg_region_tree_unlock_all(); } @@ -497,7 +498,7 @@ size_t tcg_nb_tbs(void) for (i = 0; i < region.n; i++) { struct tcg_region_tree *rt = region_trees + i * tree_size; - nb_tbs += g_tree_nnodes(rt->tree); + nb_tbs += q_tree_nnodes(rt->tree); } tcg_region_tree_unlock_all(); return nb_tbs; @@ -519,10 +520,10 @@ static void tcg_region_tree_reset_all(void) for (i = 0; i < region.n; i++) { struct tcg_region_tree *rt = region_trees + i * tree_size; - g_tree_foreach(rt->tree, tcg_region_tree_traverse, NULL); + q_tree_foreach(rt->tree, tcg_region_tree_traverse, NULL); /* Increment the refcount first so that destroy acts as a reset */ - g_tree_ref(rt->tree); - g_tree_destroy(rt->tree); + q_tree_ref(rt->tree); + q_tree_destroy(rt->tree); } tcg_region_tree_unlock_all(); } -- 2.7.4