return false;
}
- simple_mtx_lock(&bo->lock);
- bool is_shared = bo->is_shared;
- simple_mtx_unlock(&bo->lock);
+ bool is_shared = false;
+ if (bo->bo) {
+ simple_mtx_lock(&bo->lock);
+ is_shared = bo->u.real.is_shared;
+ simple_mtx_unlock(&bo->lock);
+ }
if (is_shared) {
/* We can't use user fences for shared buffers, because user fences
bo->base.placement = initial;
bo->base.usage = flags;
bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
- bo->is_shared = true;
+ bo->u.real.is_shared = true;
if (bo->base.placement & RADEON_DOMAIN_VRAM)
ws->allocated_vram += align64(bo->base.size, ws->info.gart_page_size);
whandle->handle = bo->u.real.kms_handle;
simple_mtx_lock(&bo->lock);
- bool is_shared = bo->is_shared;
+ bool is_shared = bo->u.real.is_shared;
simple_mtx_unlock(&bo->lock);
if (is_shared)
simple_mtx_unlock(&ws->bo_export_table_lock);
simple_mtx_lock(&bo->lock);
- bo->is_shared = true;
+ bo->u.real.is_shared = true;
simple_mtx_unlock(&bo->lock);
return true;
}
void *cpu_ptr; /* for user_ptr and permanent maps */
uint32_t kms_handle;
int map_count;
+
+ /* Whether buffer_get_handle or buffer_from_handle has been called,
+ * it can only transition from false to true. Protected by lock.
+ */
+ bool is_shared;
} real;
struct {
struct pb_slab_entry entry;
bool is_user_ptr;
bool use_reusable_pool;
- /* Whether buffer_get_handle or buffer_from_handle has been called,
- * it can only transition from false to true. Protected by lock.
- */
- bool is_shared;
-
uint32_t unique_id;
uint64_t va;
simple_mtx_t lock;