static bool
write_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds, unsigned num_wds, VkWriteDescriptorSet *wds,
- bool is_compute, bool cache_hit)
+ bool is_compute, bool cache_hit, bool need_resource_refs)
{
bool need_flush = false;
struct zink_batch *batch = is_compute ? &ctx->compute_batch : zink_curr_batch(ctx);
for (int i = 0; zds->pool->key.num_descriptors && i < util_dynarray_num_elements(&zds->barriers, struct zink_descriptor_barrier); ++i) {
struct zink_descriptor_barrier *barrier = util_dynarray_element(&zds->barriers, struct zink_descriptor_barrier, i);
- need_flush |= zink_batch_reference_resource_rw(batch, barrier->res, zink_resource_access_is_write(barrier->access)) == check_flush_id;
+ if (need_resource_refs || (ctx->curr_compute && ctx->curr_program))
+ need_flush |= zink_batch_reference_resource_rw(batch, barrier->res, zink_resource_access_is_write(barrier->access)) == check_flush_id;
zink_resource_barrier(ctx, NULL, barrier->res,
barrier->layout, barrier->access, barrier->stage);
}
static bool
update_ubo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
- bool is_compute, bool cache_hit, uint32_t *dynamic_offsets, unsigned *dynamic_offset_idx)
+ bool is_compute, bool cache_hit, bool need_resource_refs,
+ uint32_t *dynamic_offsets, unsigned *dynamic_offset_idx)
{
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
struct zink_screen *screen = zink_screen(ctx->base.screen);
dynamic_offsets[i] = dynamic_buffers[i].offset;
*dynamic_offset_idx = dynamic_offset_count;
- return write_descriptors(ctx, zds, num_wds, wds, is_compute, cache_hit);
+ return write_descriptors(ctx, zds, num_wds, wds, is_compute, cache_hit, need_resource_refs);
}
static bool
update_ssbo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
- bool is_compute, bool cache_hit)
+ bool is_compute, bool cache_hit, bool need_resource_refs)
{
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
ASSERTED struct zink_screen *screen = zink_screen(ctx->base.screen);
else
stages = &ctx->gfx_stages[0];
- for (int i = 0; i < num_stages; i++) {
+ for (int i = 0; (!cache_hit || need_resource_refs) && i < num_stages; i++) {
struct zink_shader *shader = stages[i];
if (!shader)
continue;
}
}
_mesa_set_destroy(ht, NULL);
- return write_descriptors(ctx, zds, num_wds, wds, is_compute, cache_hit);
+ return write_descriptors(ctx, zds, num_wds, wds, is_compute, cache_hit, need_resource_refs);
}
static void
static bool
update_sampler_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
- bool is_compute, bool cache_hit)
+ bool is_compute, bool cache_hit, bool need_resource_refs)
{
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
struct zink_screen *screen = zink_screen(ctx->base.screen);
else
stages = &ctx->gfx_stages[0];
- for (int i = 0; i < num_stages; i++) {
+ for (int i = 0; (!cache_hit || need_resource_refs) && i < num_stages; i++) {
struct zink_shader *shader = stages[i];
if (!shader)
continue;
}
}
_mesa_set_destroy(ht, NULL);
- return write_descriptors(ctx, zds, num_wds, wds, is_compute, cache_hit);
+ return write_descriptors(ctx, zds, num_wds, wds, is_compute, cache_hit, need_resource_refs);
}
static bool
update_image_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
- bool is_compute, bool cache_hit)
+ bool is_compute, bool cache_hit, bool need_resource_refs)
{
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
struct zink_screen *screen = zink_screen(ctx->base.screen);
else
stages = &ctx->gfx_stages[0];
- for (int i = 0; i < num_stages; i++) {
+ for (int i = 0; (!cache_hit || need_resource_refs) && i < num_stages; i++) {
struct zink_shader *shader = stages[i];
if (!shader)
continue;
}
}
_mesa_set_destroy(ht, NULL);
- return write_descriptors(ctx, zds, num_wds, wds, is_compute, cache_hit);
+ return write_descriptors(ctx, zds, num_wds, wds, is_compute, cache_hit, need_resource_refs);
}
static void
zink_context_update_descriptor_states(ctx, is_compute);
bool cache_hit[ZINK_DESCRIPTOR_TYPES];
+ bool need_resource_refs[ZINK_DESCRIPTOR_TYPES];
struct zink_descriptor_set *zds[ZINK_DESCRIPTOR_TYPES];
for (int h = 0; h < ZINK_DESCRIPTOR_TYPES; h++) {
if (pg->pool[h])
- zds[h] = zink_descriptor_set_get(ctx, h, is_compute, &cache_hit[h]);
+ zds[h] = zink_descriptor_set_get(ctx, h, is_compute, &cache_hit[h], &need_resource_refs[h]);
else
zds[h] = NULL;
}
bool need_flush = false;
if (zds[ZINK_DESCRIPTOR_TYPE_UBO])
need_flush |= update_ubo_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_UBO],
- is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_UBO], dynamic_offsets, &dynamic_offset_idx);
+ is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_UBO],
+ need_resource_refs[ZINK_DESCRIPTOR_TYPE_UBO], dynamic_offsets, &dynamic_offset_idx);
if (zds[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW])
need_flush |= update_sampler_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW],
- is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW]);
+ is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW],
+ need_resource_refs[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW]);
if (zds[ZINK_DESCRIPTOR_TYPE_SSBO])
need_flush |= update_ssbo_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_SSBO],
- is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_SSBO]);
+ is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_SSBO],
+ need_resource_refs[ZINK_DESCRIPTOR_TYPE_SSBO]);
if (zds[ZINK_DESCRIPTOR_TYPE_IMAGE])
need_flush |= update_image_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_IMAGE],
- is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_IMAGE]);
+ is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_IMAGE],
+ need_resource_refs[ZINK_DESCRIPTOR_TYPE_IMAGE]);
for (unsigned h = 0; h < ZINK_DESCRIPTOR_TYPES; h++) {
if (zds[h]) {