/* unref all used resources */
set_foreach(batch->resources, entry) {
- struct pipe_resource *pres = (struct pipe_resource *)entry->key;
- pipe_resource_reference(&pres, NULL);
+ struct zink_resource_object *obj = (struct zink_resource_object *)entry->key;
+ zink_resource_object_reference(screen, &obj, NULL);
_mesa_set_remove(batch->resources, entry);
}
uint32_t check_mask = (ZINK_RESOURCE_ACCESS_READ | ZINK_RESOURCE_ACCESS_WRITE) << batch->batch_id;
if (!(uses_check & check_mask)) {
bool found = false;
- _mesa_set_search_and_add(batch->resources, res, &found);
+ _mesa_set_search_and_add(batch->resources, res->obj, &found);
if (!found) {
- pipe_reference(NULL, &res->base.reference);
+ pipe_reference(NULL, &res->obj->reference);
batch->resource_size += res->obj->size;
if (stencil) {
- pipe_reference(NULL, &stencil->base.reference);
+ pipe_reference(NULL, &stencil->obj->reference);
batch->resource_size += stencil->obj->size;
}
}
/* the batch_uses value for this batch is guaranteed to not be in use now because
* zink_reset_batch() waits on the fence and removes access before resetting
*/
- res->batch_uses[batch->batch_id] |= mask;
+ res->obj->batch_uses[batch->batch_id] |= mask;
if (stencil)
- stencil->batch_uses[batch->batch_id] |= mask;
+ stencil->obj->batch_uses[batch->batch_id] |= mask;
batch->has_work = true;
return batch_to_flush;
static uint32_t
calc_descriptor_state_hash_ubo(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
{
- hash = XXH32(&ctx->ubos[shader][idx].buffer, sizeof(void*), hash);
+ struct zink_resource *res = zink_resource(ctx->ubos[shader][idx].buffer);
+ struct zink_resource_object *obj = res ? res->obj : NULL;
+ hash = XXH32(&obj, sizeof(void*), hash);
void *hash_data = &ctx->ubos[shader][idx].buffer_size;
size_t data_size = sizeof(unsigned);
hash = XXH32(hash_data, data_size, hash);
static uint32_t
calc_descriptor_state_hash_ssbo(struct zink_context *ctx, struct zink_shader *zs, enum pipe_shader_type shader, int i, int idx, uint32_t hash)
{
- void *hash_data = &ctx->ssbos[shader][idx];
- size_t data_size = sizeof(struct pipe_shader_buffer);
- return XXH32(hash_data, data_size, hash);
+ struct zink_resource *res = zink_resource(ctx->ssbos[shader][idx].buffer);
+ struct zink_resource_object *obj = res ? res->obj : NULL;
+ hash = XXH32(&obj, sizeof(void*), hash);
+ if (obj) {
+ struct pipe_shader_buffer *ssbo = &ctx->ssbos[shader][idx];
+ hash = XXH32(&ssbo->buffer_offset, sizeof(ssbo->buffer_offset), hash);
+ hash = XXH32(&ssbo->buffer_size, sizeof(ssbo->buffer_size), hash);
+ }
+ return hash;
}
static void
for (unsigned k = 0; k < zs->bindings[ZINK_DESCRIPTOR_TYPE_IMAGE][i].size; k++) {
if (!ctx->image_views[shader][idx + k].base.resource) {
- VkDescriptorImageInfo null_info = {VK_NULL_HANDLE, VK_NULL_HANDLE, VK_IMAGE_LAYOUT_UNDEFINED};
+ VkDescriptorImageInfo null_info = {0};
hash_data = &null_info;
data_size = sizeof(VkDescriptorImageInfo);
hash = XXH32(hash_data, data_size, hash);
struct zink_descriptor_set *alloc = ralloc_array(pool, struct zink_descriptor_set, bucket_size);
assert(alloc);
unsigned num_resources = pool->num_resources;
- struct zink_resource **resources = rzalloc_array(pool, struct zink_resource*, num_resources * bucket_size);
- assert(resources);
+ struct zink_resource_object **res_objs = rzalloc_array(pool, struct zink_resource_object*, num_resources * bucket_size);
+ assert(res_objs);
void **samplers = NULL;
if (type == ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW) {
samplers = rzalloc_array(pool, void*, num_resources * bucket_size);
zds->num_resources = num_resources;
#endif
if (type == ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW) {
- zds->sampler_views = (struct zink_sampler_view**)&resources[i * pool->key.num_descriptors];
+ zds->sampler_views = (struct zink_sampler_view**)&res_objs[i * pool->key.num_descriptors];
zds->sampler_states = (struct zink_sampler_state**)&samplers[i * pool->key.num_descriptors];
} else
- zds->resources = (struct zink_resource**)&resources[i * pool->key.num_descriptors];
+ zds->res_objs = (struct zink_resource_object**)&res_objs[i * pool->key.num_descriptors];
zds->desc_set = desc_set[i];
if (i > 0)
util_dynarray_append(&pool->alloc_desc_sets, struct zink_descriptor_set *, zds);
void
zink_resource_desc_set_add(struct zink_resource *res, struct zink_descriptor_set *zds, unsigned idx)
{
- desc_set_ref_add(zds, &res->desc_set_refs, (void**)&zds->resources[idx], res);
+ desc_set_ref_add(zds, res ? &res->obj->desc_set_refs : NULL, (void**)&zds->res_objs[idx], res ? res->obj : NULL);
}
void
unsigned num_resources;
#endif
union {
- struct zink_resource **resources;
+ struct zink_resource_object **res_objs;
struct zink_image_view **image_views;
struct {
struct zink_sampler_view **sampler_views;
* hash table on every resource with the associated descriptor sets that then needs to be iterated through
* whenever a resource is destroyed
*/
- assert(!cache_hit || zds->resources[i] == res);
+ assert(!cache_hit || zds->res_objs[i] == (res ? res->obj : NULL));
if (!cache_hit)
zink_resource_desc_set_add(res, zds, i);
}
/* the fence needs its own reference to ensure it can safely access lifetime-dependent
* resource members
*/
- struct pipe_resource *r = NULL, *pres = (struct pipe_resource *)entry->key;
- pipe_resource_reference(&r, pres);
- util_dynarray_append(&fence->resources, struct pipe_resource*, pres);
+ struct zink_resource_object *obj = (struct zink_resource_object *)entry->key;
+ pipe_reference(NULL, &obj->reference);
+ util_dynarray_append(&fence->resources, struct zink_resource_object*, obj);
}
fence->deferred_ctx = NULL;
fence->submitted = true;
}
static inline void
-fence_remove_resource_access(struct zink_fence *fence, struct zink_resource *res)
+fence_remove_resource_access(struct zink_fence *fence, struct zink_resource_object *obj)
{
- p_atomic_set(&res->batch_uses[fence->batch_id], 0);
+ p_atomic_set(&obj->batch_uses[fence->batch_id], 0);
}
bool
zink_prune_queries(screen, fence);
/* unref all used resources */
- util_dynarray_foreach(&fence->resources, struct pipe_resource*, pres) {
- struct zink_resource *stencil, *res = zink_resource(*pres);
- fence_remove_resource_access(fence, res);
-
- /* we still hold a ref, so this doesn't need to be atomic */
- zink_get_depth_stencil_resources((struct pipe_resource*)res, NULL, &stencil);
- if (stencil)
- fence_remove_resource_access(fence, stencil);
- pipe_resource_reference(pres, NULL);
+ util_dynarray_foreach(&fence->resources, struct zink_resource_object*, obj) {
+ fence_remove_resource_access(fence, *obj);
+
+ zink_resource_object_reference(screen, obj, NULL);
}
util_dynarray_clear(&fence->resources);
fence->submitted = false;
#include "drm-uapi/drm_fourcc.h"
#endif
+void
+debug_describe_zink_resource_object(char *buf, const struct zink_resource_object *ptr)
+{
+ sprintf(buf, "zink_resource_object");
+}
+
+
static void
resource_sync_writes_from_batch_usage(struct zink_context *ctx, uint32_t batch_uses)
{
vkFreeMemory(screen->dev, obj->mem, NULL);
}
-static void
-resource_object_destroy(struct zink_screen *screen, struct zink_resource_object *obj)
+void
+zink_destroy_resource_object(struct zink_screen *screen, struct zink_resource_object *obj)
{
if (obj->is_buffer)
vkDestroyBuffer(screen->dev, obj->buffer, NULL);
else
vkDestroyImage(screen->dev, obj->image, NULL);
+ zink_descriptor_set_refs_clear(&obj->desc_set_refs, obj);
cache_or_free_mem(screen, obj);
FREE(obj);
}
if (pres->target == PIPE_BUFFER)
util_range_destroy(&res->valid_buffer_range);
- zink_descriptor_set_refs_clear(&res->desc_set_refs, res);
- resource_object_destroy(screen, res->obj);
+ zink_resource_object_reference(screen, &res->obj, NULL);
FREE(res);
}
VkMemoryRequirements reqs = {};
VkMemoryPropertyFlags flags;
+ pipe_reference_init(&obj->reference, 1);
+ util_dynarray_init(&obj->desc_set_refs.refs, NULL);
if (templ->target == PIPE_BUFFER) {
VkBufferCreateInfo bci = {};
bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
&res->dt_stride);
}
- util_dynarray_init(&res->desc_set_refs.refs, NULL);
return &res->base;
}
zink_get_resource_usage(struct zink_resource *res)
{
uint32_t batch_uses = 0;
- for (unsigned i = 0; i < ARRAY_SIZE(res->batch_uses); i++)
- batch_uses |= p_atomic_read(&res->batch_uses[i]) << i;
+ for (unsigned i = 0; i < ARRAY_SIZE(res->obj->batch_uses); i++)
+ batch_uses |= p_atomic_read(&res->obj->batch_uses[i]) << i;
return batch_uses;
}
VkDeviceSize offset, size;
unsigned persistent_maps; //if nonzero, requires vkFlushMappedMemoryRanges during batch use
+ struct zink_descriptor_refs desc_set_refs;
+
+ /* this has to be atomic for fence access, so we can't use a bitmask and make everything neat */
+ uint8_t batch_uses[5]; //ZINK_NUM_BATCHES
bool is_buffer;
bool host_visible;
};
struct sw_displaytarget *dt;
unsigned dt_stride;
-
- struct zink_descriptor_refs desc_set_refs;
-
- /* this has to be atomic for fence access, so we can't use a bitmask and make everything neat */
- uint8_t batch_uses[5]; //ZINK_NUM_BATCHES
};
struct zink_transfer {
void
zink_resource_desc_set_add(struct zink_resource *res, struct zink_descriptor_set *zds, unsigned idx);
+
+
+void
+zink_destroy_resource_object(struct zink_screen *screen, struct zink_resource_object *resource_object);
+
+void
+debug_describe_zink_resource_object(char *buf, const struct zink_resource_object *ptr);
+
+static inline void
+zink_resource_object_reference(struct zink_screen *screen,
+ struct zink_resource_object **dst,
+ struct zink_resource_object *src)
+{
+ struct zink_resource_object *old_dst = dst ? *dst : NULL;
+
+ if (pipe_reference_described(old_dst ? &old_dst->reference : NULL, &src->reference,
+ (debug_reference_descriptor)debug_describe_zink_resource_object))
+ zink_destroy_resource_object(screen, old_dst);
+ if (dst) *dst = src;
+}
+
#endif