zink_screen(ctx->base.screen)->buffer_barrier(ctx, new_res, VK_ACCESS_UNIFORM_READ_BIT,
new_res->gfx_barrier);
zink_batch_resource_usage_set(&ctx->batch, new_res, false, true);
- new_res->obj->unordered_read = false;
+ if (!ctx->unordered_blitting)
+ new_res->obj->unordered_read = false;
}
update |= ctx->ubos[shader][index].buffer_offset != offset ||
!!res != !!buffer || (res && res->obj->buffer != new_res->obj->buffer) ||
unsigned modified_bits = u_bit_consecutive(start_slot, count);
unsigned old_writable_mask = ctx->writable_ssbos[p_stage];
+ assert(!ctx->unordered_blitting);
ctx->writable_ssbos[p_stage] &= ~modified_bits;
ctx->writable_ssbos[p_stage] |= writable_bitmask << start_slot;
struct zink_screen *screen = zink_screen(pctx->screen);
bool update = false;
bool is_compute = shader_type == MESA_SHADER_COMPUTE;
+ assert(!ctx->unordered_blitting);
for (unsigned i = 0; i < count; i++) {
struct zink_image_view *a = &ctx->image_views[shader_type][start_slot + i];
const struct pipe_image_view *b = images ? &images[i] : NULL;
}
}
res->sampler_binds[shader_type] |= BITFIELD_BIT(start_slot + i);
- res->obj->unordered_read = false;
+ if (!ctx->unordered_blitting)
+ res->obj->unordered_read = false;
} else if (a) {
unbind_samplerview(ctx, shader_type, start_slot + i);
update = true;
src->obj->unordered_read = unordered_exec;
if (dst)
dst->obj->unordered_write = unordered_exec;
+ if (!unordered_exec || ctx->unordered_blitting)
+ zink_batch_no_rp(ctx);
if (unordered_exec) {
ctx->batch.state->has_barriers = true;
return ctx->batch.state->barrier_cmdbuf;
}
- zink_batch_no_rp(ctx);
return ctx->batch.state->cmdbuf;
}
stage |= VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT;
}
zink_screen(ctx->base.screen)->buffer_barrier(ctx, res, access, stage);
- res->obj->unordered_read = false;
+ if (!ctx->unordered_blitting)
+ res->obj->unordered_read = false;
}
}
{
struct zink_resource *res = zink_resource(pres);
zink_screen(ctx->base.screen)->buffer_barrier(ctx, res, flags, pipeline);
- res->obj->unordered_read = false;
+ if (!ctx->unordered_blitting)
+ res->obj->unordered_read = false;
}
ALWAYS_INLINE static void
struct zink_resource *res = zink_resource(t->base.buffer);
zink_screen(ctx->base.screen)->buffer_barrier(ctx, res,
VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT, VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT);
- res->obj->unordered_read = res->obj->unordered_write = false;
+ if (!ctx->unordered_blitting)
+ res->obj->unordered_read = res->obj->unordered_write = false;
}
}
}
zink_screen(ctx->base.screen)->buffer_barrier(ctx, res,
VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT,
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT);
- res->obj->unordered_read = false;
+ if (!ctx->unordered_blitting)
+ res->obj->unordered_read = false;
}
zink_query_update_gs_states(ctx);
struct zink_resource *res = zink_resource(t->counter_buffer);
t->stride = ctx->last_vertex_stage->sinfo.so_info.stride[i] * sizeof(uint32_t);
zink_batch_reference_resource_rw(batch, res, true);
- res->obj->unordered_read = res->obj->unordered_write = false;
+ if (!ctx->unordered_blitting)
+ res->obj->unordered_read = res->obj->unordered_write = false;
if (t->counter_buffer_valid) {
counter_buffers[i] = res->obj->buffer;
counter_buffer_offsets[i] = t->counter_buffer_offset;
struct zink_resource *res = zink_resource(vstate->input.vbuffer.buffer.resource);
zink_screen(ctx->base.screen)->buffer_barrier(ctx, res, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT);
- res->obj->unordered_read = false;
+ if (!ctx->unordered_blitting)
+ res->obj->unordered_read = false;
struct zink_vertex_elements_hw_state *hw_state = ctx->gfx_pipeline_state.element_state;
ctx->gfx_pipeline_state.element_state = &((struct zink_vertex_state*)vstate)->velems.hw_state;