};
static inline void
-read_descriptor_resource(struct zink_descriptor_resource *resource, struct zink_resource *res)
+read_descriptor_resource(struct zink_descriptor_resource *resource, struct zink_resource *res, unsigned *num_resources)
{
resource->res = res;
resource->write = false;
+ (*num_resources)++;
}
static inline void
-write_descriptor_resource(struct zink_descriptor_resource *resource, struct zink_resource *res)
+write_descriptor_resource(struct zink_descriptor_resource *resource, struct zink_resource *res, unsigned *num_resources)
{
resource->res = res;
resource->write = true;
+ (*num_resources)++;
}
static void
unsigned num_buffer_info[ZINK_DESCRIPTOR_TYPES] = {0};
unsigned num_image_info[ZINK_DESCRIPTOR_TYPES] = {0};
unsigned num_surface_refs = 0;
+ unsigned num_resources[ZINK_DESCRIPTOR_TYPES] = {0};
struct zink_shader **stages;
struct {
uint32_t binding;
shader->bindings[h][j].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
assert(ctx->ubos[stage][index].buffer_size <= screen->info.props.limits.maxUniformBufferRange);
struct zink_resource *res = zink_resource(ctx->ubos[stage][index].buffer);
- assert(num_wds[h] < num_bindings);
assert(!res || ctx->ubos[stage][index].buffer_size > 0);
assert(!res || ctx->ubos[stage][index].buffer);
- read_descriptor_resource(&resources[h][num_wds[h]], res);
+ assert(num_resources[h] < num_bindings);
+ read_descriptor_resource(&resources[h][num_resources[h]], res, &num_resources[h]);
assert(num_buffer_info[h] < num_bindings);
buffer_infos[h][num_buffer_info[h]].buffer = res ? res->buffer :
(screen->info.rb2_feats.nullDescriptor ?
assert(ctx->ssbos[stage][index].buffer_size <= screen->info.props.limits.maxStorageBufferRange);
assert(num_buffer_info[h] < num_bindings);
unsigned flag = VK_ACCESS_SHADER_READ_BIT;
- assert(num_wds[h] < num_bindings);
+ assert(num_resources[h] < num_bindings);
if (ctx->writable_ssbos[stage] & (1 << index)) {
- write_descriptor_resource(&resources[h][num_wds[h]], res);
+ write_descriptor_resource(&resources[h][num_resources[h]], res, &num_resources[h]);
flag |= VK_ACCESS_SHADER_WRITE_BIT;
} else {
- read_descriptor_resource(&resources[h][num_wds[h]], res);
+ read_descriptor_resource(&resources[h][num_resources[h]], res, &num_resources[h]);
}
add_transition(res, 0, flag, stage, &transitions[num_transitions], &num_transitions, ht);
buffer_infos[h][num_buffer_info[h]].buffer = res->buffer;
buffer_infos[h][num_buffer_info[h]].range = ctx->ssbos[stage][index].buffer_size;
} else {
assert(screen->info.rb2_feats.nullDescriptor);
- read_descriptor_resource(&resources[h][num_wds[h]], res);
+ read_descriptor_resource(&resources[h][num_resources[h]], res, &num_resources[h]);
buffer_infos[h][num_buffer_info[h]].buffer = VK_NULL_HANDLE;
buffer_infos[h][num_buffer_info[h]].offset = 0;
buffer_infos[h][num_buffer_info[h]].range = VK_WHOLE_SIZE;
sampler = ctx->samplers[stage][index + k];
}
add_transition(res, layout, VK_ACCESS_SHADER_READ_BIT, stage, &transitions[num_transitions], &num_transitions, ht);
- assert(num_wds[h] < num_bindings);
- read_descriptor_resource(&resources[h][num_wds[h]], res);
+ assert(num_resources[h] < num_bindings);
+ read_descriptor_resource(&resources[h][num_resources[h]], res, &num_resources[h]);
}
break;
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
if (image_view->base.access & PIPE_IMAGE_ACCESS_WRITE)
flags |= VK_ACCESS_SHADER_WRITE_BIT;
add_transition(res, layout, flags, stage, &transitions[num_transitions], &num_transitions, ht);
- assert(num_wds[h] < num_bindings);
+ assert(num_resources[h] < num_bindings);
if (image_view->base.access & PIPE_IMAGE_ACCESS_WRITE)
- write_descriptor_resource(&resources[h][num_wds[h]], res);
+ write_descriptor_resource(&resources[h][num_resources[h]], res, &num_resources[h]);
else
- read_descriptor_resource(&resources[h][num_wds[h]], res);
+ read_descriptor_resource(&resources[h][num_resources[h]], res, &num_resources[h]);
}
break;
default:
* the results of this codepath are undefined in ARB_texture_buffer_object spec
*/
assert(screen->info.rb2_feats.nullDescriptor);
- assert(num_wds[h] < num_bindings);
- read_descriptor_resource(&resources[h][num_wds[h]], res);
+ assert(num_resources[h] < num_bindings);
+ read_descriptor_resource(&resources[h][num_resources[h]], res, &num_resources[h]);
switch (shader->bindings[h][j].type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
wds[h][num_wds[h]].pImageInfo = image_infos[h] + num_image_info[h];
++num_image_info[h];
}
+ assert(num_transitions <= num_bindings);
+ assert(num_resources[h] <= num_bindings);
}
}
wds[h][num_wds[h]].descriptorCount = shader->bindings[h][j].size;
wds[h][num_wds[h]].descriptorType = shader->bindings[h][j].type;
++num_wds[h];
+ assert(num_transitions <= num_bindings);
+ assert(num_resources[h] <= num_bindings);
}
}
}
if (!zds[h])
continue;
assert(zds[h]->desc_set);
- for (int i = 0; i < num_wds[h]; ++i) {
+ for (int i = 0; i < num_wds[h]; ++i)
wds[h][i].dstSet = zds[h]->desc_set;
+ for (int i = 0; i < num_resources[h]; ++i) {
+ assert(num_resources[h] <= zink_program_num_bindings_typed(pg, h, is_compute));
+ assert(num_resources[h] <= zds[h]->num_resources);
+
struct zink_resource *res = resources[h][i].res;
if (res) {
need_flush |= zink_batch_reference_resource_rw(batch, res, resources[h][i].write) == check_flush_id;
static struct zink_descriptor_set *
-allocate_desc_set(struct zink_screen *screen, struct zink_program *pg, enum zink_descriptor_type type, unsigned descs_used)
+allocate_desc_set(struct zink_screen *screen, struct zink_program *pg, enum zink_descriptor_type type, unsigned descs_used, bool is_compute)
{
VkDescriptorSetAllocateInfo dsai;
#define DESC_BUCKET_FACTOR 10
struct zink_descriptor_set *alloc = ralloc_array(pg, struct zink_descriptor_set, bucket_size);
assert(alloc);
- struct zink_resource **resources = rzalloc_array(pg, struct zink_resource*, pg->num_descriptors[type] * bucket_size);
+ unsigned num_resources = zink_program_num_bindings_typed(pg, type, is_compute);
+ struct zink_resource **resources = rzalloc_array(pg, struct zink_resource*, num_resources * bucket_size);
assert(resources);
for (unsigned i = 0; i < bucket_size; i ++) {
struct zink_descriptor_set *zds = &alloc[i];
zds->hash = 0;
zds->invalid = true;
zds->type = type;
+#ifndef NDEBUG
+ zds->num_resources = num_resources;
+#endif
zds->resources = &resources[i * pg->num_descriptors[type]];
zds->desc_set = desc_set[i];
if (i > 0)
}
}
- zds = allocate_desc_set(screen, pg, type, descs_used);
+ zds = allocate_desc_set(screen, pg, type, descs_used, is_compute);
out:
zds->hash = hash;
populate_zds_key(ctx, type, is_compute, &zds->key);