ws->num_mapped_buffers--;
}
+ simple_mtx_destroy(&bo->lock);
FREE(bo);
}
if (r)
goto error_va_map;
+ simple_mtx_init(&bo->lock, mtx_plain);
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = alignment;
bo->base.usage = 0;
for (unsigned i = 0; i < slab->base.num_entries; ++i) {
struct amdgpu_winsys_bo *bo = &slab->entries[i];
+ simple_mtx_init(&bo->lock, mtx_plain);
bo->base.alignment = entry_size;
bo->base.usage = slab->buffer->base.usage;
bo->base.size = entry_size;
{
struct amdgpu_slab *slab = amdgpu_slab(pslab);
- for (unsigned i = 0; i < slab->base.num_entries; ++i)
+ for (unsigned i = 0; i < slab->base.num_entries; ++i) {
amdgpu_bo_remove_fences(&slab->entries[i]);
+ simple_mtx_destroy(&slab->entries[i].lock);
+ }
FREE(slab->entries);
amdgpu_winsys_bo_reference(&slab->buffer, NULL);
}
amdgpu_va_range_free(bo->u.sparse.va_handle);
- simple_mtx_destroy(&bo->u.sparse.commit_lock);
FREE(bo->u.sparse.commitments);
+ simple_mtx_destroy(&bo->lock);
FREE(bo);
}
if (!bo)
return NULL;
+ simple_mtx_init(&bo->lock, mtx_plain);
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = RADEON_SPARSE_PAGE_SIZE;
bo->base.size = size;
if (!bo->u.sparse.commitments)
goto error_alloc_commitments;
- simple_mtx_init(&bo->u.sparse.commit_lock, mtx_plain);
LIST_INITHEAD(&bo->u.sparse.backing);
/* For simplicity, we always map a multiple of the page size. */
error_va_map:
amdgpu_va_range_free(bo->u.sparse.va_handle);
error_va_alloc:
- simple_mtx_destroy(&bo->u.sparse.commit_lock);
FREE(bo->u.sparse.commitments);
error_alloc_commitments:
+ simple_mtx_destroy(&bo->lock);
FREE(bo);
return NULL;
}
va_page = offset / RADEON_SPARSE_PAGE_SIZE;
end_va_page = va_page + DIV_ROUND_UP(size, RADEON_SPARSE_PAGE_SIZE);
- simple_mtx_lock(&bo->u.sparse.commit_lock);
+ simple_mtx_lock(&bo->lock);
#if DEBUG_SPARSE_COMMITS
sparse_dump(bo, __func__);
}
out:
- simple_mtx_unlock(&bo->u.sparse.commit_lock);
+ simple_mtx_unlock(&bo->lock);
return ok;
}
initial |= RADEON_DOMAIN_GTT;
/* Initialize the structure. */
+ simple_mtx_init(&bo->lock, mtx_plain);
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = info.phys_alignment;
bo->bo = result.buf_handle;
/* Initialize it. */
pipe_reference_init(&bo->base.reference, 1);
+ simple_mtx_init(&bo->lock, mtx_plain);
bo->bo = buf_handle;
bo->base.alignment = 0;
bo->base.size = size;
/* We delay adding the backing buffers until we really have to. However,
* we cannot delay accounting for memory use.
*/
- simple_mtx_lock(&bo->u.sparse.commit_lock);
+ simple_mtx_lock(&bo->lock);
list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
acs->main.base.used_gart += backing->bo->base.size;
}
- simple_mtx_unlock(&bo->u.sparse.commit_lock);
+ simple_mtx_unlock(&bo->lock);
return idx;
}
struct amdgpu_cs_buffer *buffer = &cs->sparse_buffers[i];
struct amdgpu_winsys_bo *bo = buffer->bo;
- simple_mtx_lock(&bo->u.sparse.commit_lock);
+ simple_mtx_lock(&bo->lock);
list_for_each_entry(struct amdgpu_sparse_backing, backing, &bo->u.sparse.backing, list) {
/* We can directly add the buffer here, because we know that each
int idx = amdgpu_do_add_real_buffer(cs, backing->bo);
if (idx < 0) {
fprintf(stderr, "%s: failed to add buffer\n", __FUNCTION__);
- simple_mtx_unlock(&bo->u.sparse.commit_lock);
+ simple_mtx_unlock(&bo->lock);
return false;
}
p_atomic_inc(&backing->bo->num_active_ioctls);
}
- simple_mtx_unlock(&bo->u.sparse.commit_lock);
+ simple_mtx_unlock(&bo->lock);
}
return true;