#include "qemu/osdep.h"
#include "qemu/units.h"
+#include "qemu/qtree.h"
#include "qemu-common.h"
#define NO_CPU_IO_DEFS
* See also: page_collection_lock().
*/
struct page_collection {
- GTree *tree;
+ QTree *tree;
struct page_entry *max;
};
struct page_entry *pe;
PageDesc *pd;
- pe = g_tree_lookup(set->tree, &index);
+ pe = q_tree_lookup(set->tree, &index);
if (pe) {
return false;
}
}
pe = page_entry_new(pd, index);
- g_tree_insert(set->tree, &pe->index, pe);
+ q_tree_insert(set->tree, &pe->index, pe);
/*
* If this is either (1) the first insertion or (2) a page whose index
end >>= TARGET_PAGE_BITS;
g_assert(start <= end);
- set->tree = g_tree_new_full(tb_page_addr_cmp, NULL, NULL,
+ set->tree = q_tree_new_full(tb_page_addr_cmp, NULL, NULL,
page_entry_destroy);
set->max = NULL;
assert_no_pages_locked();
retry:
- g_tree_foreach(set->tree, page_entry_lock, NULL);
+ q_tree_foreach(set->tree, page_entry_lock, NULL);
for (index = start; index <= end; index++) {
TranslationBlock *tb;
continue;
}
if (page_trylock_add(set, index << TARGET_PAGE_BITS)) {
- g_tree_foreach(set->tree, page_entry_unlock, NULL);
+ q_tree_foreach(set->tree, page_entry_unlock, NULL);
goto retry;
}
assert_page_locked(pd);
(tb->page_addr[1] != -1 &&
page_trylock_add(set, tb->page_addr[1]))) {
/* drop all locks, and reacquire in order */
- g_tree_foreach(set->tree, page_entry_unlock, NULL);
+ q_tree_foreach(set->tree, page_entry_unlock, NULL);
goto retry;
}
}
void page_collection_unlock(struct page_collection *set)
{
/* entries are unlocked and freed via page_entry_destroy */
- g_tree_destroy(set->tree);
+ q_tree_destroy(set->tree);
g_free(set);
}
#include "qemu/host-utils.h"
#include "qemu/qemu-print.h"
#include "qemu/timer.h"
+#include "qemu/qtree.h"
/* Note: the long term plan is to reduce the dependencies on the QEMU
CPU definitions. Currently they are used for qemu_ld/st
struct tcg_region_tree {
QemuMutex lock;
- GTree *tree;
+ QTree *tree;
/* padding to avoid false sharing is computed at run-time */
};
struct tcg_region_tree *rt = region_trees + i * tree_size;
qemu_mutex_init(&rt->lock);
- rt->tree = g_tree_new(tb_tc_cmp);
+ rt->tree = q_tree_new(tb_tc_cmp);
}
}
struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
qemu_mutex_lock(&rt->lock);
- g_tree_insert(rt->tree, &tb->tc, tb);
+ q_tree_insert(rt->tree, &tb->tc, tb);
qemu_mutex_unlock(&rt->lock);
}
struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
qemu_mutex_lock(&rt->lock);
- g_tree_remove(rt->tree, &tb->tc);
+ q_tree_remove(rt->tree, &tb->tc);
qemu_mutex_unlock(&rt->lock);
}
struct tb_tc s = { .ptr = (void *)tc_ptr };
qemu_mutex_lock(&rt->lock);
- tb = g_tree_lookup(rt->tree, &s);
+ tb = q_tree_lookup(rt->tree, &s);
qemu_mutex_unlock(&rt->lock);
return tb;
}
for (i = 0; i < region.n; i++) {
struct tcg_region_tree *rt = region_trees + i * tree_size;
- g_tree_foreach(rt->tree, func, user_data);
+ q_tree_foreach(rt->tree, func, user_data);
}
tcg_region_tree_unlock_all();
}
for (i = 0; i < region.n; i++) {
struct tcg_region_tree *rt = region_trees + i * tree_size;
- nb_tbs += g_tree_nnodes(rt->tree);
+ nb_tbs += q_tree_nnodes(rt->tree);
}
tcg_region_tree_unlock_all();
return nb_tbs;
for (i = 0; i < region.n; i++) {
struct tcg_region_tree *rt = region_trees + i * tree_size;
- g_tree_foreach(rt->tree, tcg_region_tree_traverse, NULL);
+ q_tree_foreach(rt->tree, tcg_region_tree_traverse, NULL);
/* Increment the refcount first so that destroy acts as a reset */
- g_tree_ref(rt->tree);
- g_tree_destroy(rt->tree);
+ q_tree_ref(rt->tree);
+ q_tree_destroy(rt->tree);
}
tcg_region_tree_unlock_all();
}