if (zink_resource_image_needs_barrier(res, VK_IMAGE_LAYOUT_GENERAL, 0, 0) &&
zink_resource_image_needs_barrier(res, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 0, 0))
- zink_resource_image_barrier(ctx, NULL, res, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 0, 0);
+ zink_resource_image_barrier(ctx, res, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 0, 0);
zink_batch_reference_resource_rw(batch, res, true);
VKCTX(CmdClearColorImage)(batch->state->cmdbuf, res->obj->image, res->layout, &color, 1, &range);
}
if (zink_resource_image_needs_barrier(res, VK_IMAGE_LAYOUT_GENERAL, 0, 0) &&
zink_resource_image_needs_barrier(res, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 0, 0))
- zink_resource_image_barrier(ctx, NULL, res, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 0, 0);
+ zink_resource_image_barrier(ctx, res, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 0, 0);
zink_batch_reference_resource_rw(batch, res, true);
VKCTX(CmdClearDepthStencilImage)(batch->state->cmdbuf, res->obj->image, res->layout, &zs_value, 1, &range);
}
ctx_vb->stride = vb->stride;
ctx_vb->buffer_offset = vb->buffer_offset;
/* always barrier before possible rebind */
- zink_resource_buffer_barrier(ctx, NULL, res, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
+ zink_resource_buffer_barrier(ctx, res, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT);
set_vertex_buffer_clamped(ctx, start_slot + i);
zink_batch_resource_usage_set(&ctx->batch, res, false);
VkPipelineStageFlags pipeline;
VkImageLayout layout = zink_render_pass_attachment_get_barrier_info(ctx->gfx_pipeline_state.render_pass,
i, &pipeline, &access);
- zink_resource_image_barrier(ctx, NULL, res, layout, access, pipeline);
+ zink_resource_image_barrier(ctx, res, layout, access, pipeline);
return surf->image_view;
}
if (changed && unlikely(res->obj->needs_zs_evaluate))
/* have to flush zs eval while the sample location data still exists,
* so just throw some random barrier */
- zink_resource_image_barrier(ctx, NULL, res, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
+ zink_resource_image_barrier(ctx, res, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
}
/* renderpass changes if the number or types of attachments change */
}
void
-zink_resource_image_barrier(struct zink_context *ctx, struct zink_batch *batch, struct zink_resource *res,
+zink_resource_image_barrier(struct zink_context *ctx, struct zink_resource *res,
VkImageLayout new_layout, VkAccessFlags flags, VkPipelineStageFlags pipeline)
{
VkImageMemoryBarrier imb;
}
void
-zink_resource_buffer_barrier(struct zink_context *ctx, struct zink_batch *batch, struct zink_resource *res, VkAccessFlags flags, VkPipelineStageFlags pipeline)
+zink_resource_buffer_barrier(struct zink_context *ctx, struct zink_resource *res, VkAccessFlags flags, VkPipelineStageFlags pipeline)
{
VkMemoryBarrier bmb;
if (!pipeline)
zink_batch_reference_resource_rw(batch, src, false);
zink_batch_reference_resource_rw(batch, dst, true);
util_range_add(&dst->base.b, &dst->valid_buffer_range, dst_offset, dst_offset + size);
- zink_resource_buffer_barrier(ctx, batch, src, VK_ACCESS_TRANSFER_READ_BIT, 0);
- zink_resource_buffer_barrier(ctx, batch, dst, VK_ACCESS_TRANSFER_WRITE_BIT, 0);
+ zink_resource_buffer_barrier(ctx, src, VK_ACCESS_TRANSFER_READ_BIT, 0);
+ zink_resource_buffer_barrier(ctx, dst, VK_ACCESS_TRANSFER_WRITE_BIT, 0);
VKCTX(CmdCopyBuffer)(batch->state->cmdbuf, src->obj->buffer, dst->obj->buffer, 1, ®ion);
}
bool buf2img = buf == src;
if (buf2img) {
- zink_resource_image_barrier(ctx, batch, img, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 0, 0);
- zink_resource_buffer_barrier(ctx, batch, buf, VK_ACCESS_TRANSFER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
+ zink_resource_image_barrier(ctx, img, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 0, 0);
+ zink_resource_buffer_barrier(ctx, buf, VK_ACCESS_TRANSFER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
} else {
- zink_resource_image_barrier(ctx, batch, img, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 0, 0);
- zink_resource_buffer_barrier(ctx, batch, buf, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
+ zink_resource_image_barrier(ctx, img, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 0, 0);
+ zink_resource_buffer_barrier(ctx, buf, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT);
util_range_add(&dst->base.b, &dst->valid_buffer_range, dstx, dstx + src_box->width);
}
ctx->vertex_buffers_dirty = ctx->gfx_pipeline_state.vertex_buffers_enabled_mask > 0;
ctx->dirty_so_targets = ctx->num_so_targets > 0;
if (ctx->num_so_targets)
- zink_resource_buffer_barrier(ctx, NULL, zink_resource(ctx->dummy_xfb_buffer),
+ zink_resource_buffer_barrier(ctx, zink_resource(ctx->dummy_xfb_buffer),
VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT, VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT);
for (unsigned shader = PIPE_SHADER_VERTEX; shader < PIPE_SHADER_TYPES; shader++) {
for (unsigned slot = 0; slot < ctx->di.num_ubos[shader]; slot++) {
zink_resource_access_is_write(VkAccessFlags flags);
void
-zink_resource_buffer_barrier(struct zink_context *ctx, struct zink_batch *batch, struct zink_resource *res, VkAccessFlags flags, VkPipelineStageFlags pipeline);
+zink_resource_buffer_barrier(struct zink_context *ctx, struct zink_resource *res, VkAccessFlags flags, VkPipelineStageFlags pipeline);
void
zink_fake_buffer_barrier(struct zink_resource *res, VkAccessFlags flags, VkPipelineStageFlags pipeline);
bool
bool
zink_resource_image_barrier_init(VkImageMemoryBarrier *imb, struct zink_resource *res, VkImageLayout new_layout, VkAccessFlags flags, VkPipelineStageFlags pipeline);
void
-zink_resource_image_barrier(struct zink_context *ctx, struct zink_batch *batch, struct zink_resource *res,
+zink_resource_image_barrier(struct zink_context *ctx, struct zink_resource *res,
VkImageLayout new_layout, VkAccessFlags flags, VkPipelineStageFlags pipeline);
bool
continue;
struct zink_resource *res = zink_resource(t->counter_buffer);
if (t->counter_buffer_valid)
- zink_resource_buffer_barrier(ctx, NULL, res, VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT,
+ zink_resource_buffer_barrier(ctx, res, VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT,
VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT);
else
- zink_resource_buffer_barrier(ctx, NULL, res, VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT,
+ zink_resource_buffer_barrier(ctx, res, VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT,
VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT);
}
ctx->xfb_barrier = false;
*
* - 20.3.1. Drawing Transform Feedback
*/
- zink_resource_buffer_barrier(ctx, NULL, res, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
+ zink_resource_buffer_barrier(ctx, res, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT);
}
/* resource has been rebound */
t->counter_buffer_valid = false;
buffers[i] = res->obj->buffer;
- zink_resource_buffer_barrier(ctx, NULL, zink_resource(t->base.buffer),
+ zink_resource_buffer_barrier(ctx, zink_resource(t->base.buffer),
VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT, VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT);
zink_batch_reference_resource_rw(batch, res, true);
buffer_offsets[i] = t->base.buffer_offset;
check_buffer_barrier(struct zink_context *ctx, struct pipe_resource *pres, VkAccessFlags flags, VkPipelineStageFlags pipeline)
{
struct zink_resource *res = zink_resource(pres);
- zink_resource_buffer_barrier(ctx, NULL, res, flags, pipeline);
+ zink_resource_buffer_barrier(ctx, res, flags, pipeline);
}
ALWAYS_INLINE static void
}
}
if (res->base.b.target == PIPE_BUFFER)
- zink_resource_buffer_barrier(ctx, NULL, res, access, pipeline);
+ zink_resource_buffer_barrier(ctx, res, access, pipeline);
else {
VkImageLayout layout = zink_descriptor_util_image_layout_eval(res, is_compute);
if (layout != res->layout)
- zink_resource_image_barrier(ctx, NULL, res, layout, access, pipeline);
+ zink_resource_image_barrier(ctx, res, layout, access, pipeline);
}
/* always barrier on draw if this resource has either multiple image write binds or
* image write binds and image read binds
zink_batch_no_rp(ctx);
/* if it's a single query that doesn't need special handling, we can copy it and be done */
zink_batch_reference_resource_rw(batch, res, true);
- zink_resource_buffer_barrier(ctx, batch, res, VK_ACCESS_TRANSFER_WRITE_BIT, 0);
+ zink_resource_buffer_barrier(ctx, res, VK_ACCESS_TRANSFER_WRITE_BIT, 0);
util_range_add(&res->base.b, &res->valid_buffer_range, offset, offset + result_size);
assert(query_id < NUM_QUERIES);
VKCTX(CmdCopyQueryPoolResults)(batch->state->cmdbuf, pool, query_id, num_results, res->obj->buffer,
res->base.b.bind |= PIPE_BIND_SHADER_IMAGE;
} else {
zink_fb_clears_apply_region(ctx, &res->base.b, (struct u_rect){0, res->base.b.width0, 0, res->base.b.height0});
- zink_resource_image_barrier(ctx, NULL, res, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 0, 0);
+ zink_resource_image_barrier(ctx, res, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, 0, 0);
res->base.b.bind |= PIPE_BIND_SHADER_IMAGE;
struct zink_resource_object *old_obj = res->obj;
struct zink_resource_object *new_obj = resource_object_create(screen, &res->base.b, NULL, &res->optimal_tiling, res->modifiers, res->modifiers_count);
* VK_IMAGE_LAYOUT_GENERAL. And since this isn't a present-related
* operation, VK_IMAGE_LAYOUT_GENERAL seems most appropriate.
*/
- zink_resource_image_barrier(ctx, NULL, src,
+ zink_resource_image_barrier(ctx, src,
VK_IMAGE_LAYOUT_GENERAL,
VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT);
} else {
- zink_resource_image_barrier(ctx, NULL, src,
+ zink_resource_image_barrier(ctx, src,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
VK_ACCESS_TRANSFER_READ_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT);
- zink_resource_image_barrier(ctx, NULL, dst,
+ zink_resource_image_barrier(ctx, dst,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_ACCESS_TRANSFER_WRITE_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT);