unsigned item_size,
unsigned num_items)
{
- mtx_init(&parent->mutex, mtx_plain);
+ simple_mtx_init(&parent->mutex, mtx_plain);
parent->element_size = ALIGN_POT(sizeof(struct slab_element_header) + item_size,
sizeof(intptr_t));
parent->num_elements = num_items;
void
slab_destroy_parent(struct slab_parent_pool *parent)
{
- mtx_destroy(&parent->mutex);
+ simple_mtx_destroy(&parent->mutex);
}
/**
if (!pool->parent)
return; /* the slab probably wasn't even created */
- mtx_lock(&pool->parent->mutex);
+ simple_mtx_lock(&pool->parent->mutex);
while (pool->pages) {
struct slab_page_header *page = pool->pages;
slab_free_orphaned(elt);
}
- mtx_unlock(&pool->parent->mutex);
+ simple_mtx_unlock(&pool->parent->mutex);
while (pool->free) {
struct slab_element_header *elt = pool->free;
/* First, collect elements that belong to us but were freed from a
* different child pool.
*/
- mtx_lock(&pool->parent->mutex);
+ simple_mtx_lock(&pool->parent->mutex);
pool->free = pool->migrated;
pool->migrated = NULL;
- mtx_unlock(&pool->parent->mutex);
+ simple_mtx_unlock(&pool->parent->mutex);
/* Now allocate a new page. */
if (!pool->free && !slab_add_new_page(pool))
/* The slow case: migration or an orphaned page. */
if (pool->parent)
- mtx_lock(&pool->parent->mutex);
+ simple_mtx_lock(&pool->parent->mutex);
/* Note: we _must_ re-read elt->owner here because the owning child pool
* may have been destroyed by another thread in the meantime.
elt->next = owner->migrated;
owner->migrated = elt;
if (pool->parent)
- mtx_unlock(&pool->parent->mutex);
+ simple_mtx_unlock(&pool->parent->mutex);
} else {
if (pool->parent)
- mtx_unlock(&pool->parent->mutex);
+ simple_mtx_unlock(&pool->parent->mutex);
slab_free_orphaned(elt);
}