set_foreach(bs->surfaces, entry) {
struct zink_surface *surf = (struct zink_surface *)entry->key;
- zink_batch_usage_unset(&surf->batch_uses, !!bs->fence.is_compute, bs->fence.batch_id);
+ zink_batch_usage_unset(&surf->batch_uses, bs->fence.batch_id);
zink_surface_reference(screen, &surf, NULL);
_mesa_set_remove(bs->surfaces, entry);
}
set_foreach(bs->bufferviews, entry) {
struct zink_buffer_view *buffer_view = (struct zink_buffer_view *)entry->key;
- zink_batch_usage_unset(&buffer_view->batch_uses, !!bs->fence.is_compute, bs->fence.batch_id);
+ zink_batch_usage_unset(&buffer_view->batch_uses, bs->fence.batch_id);
zink_buffer_view_reference(screen, &buffer_view, NULL);
_mesa_set_remove(bs->bufferviews, entry);
}
set_foreach(bs->desc_sets, entry) {
struct zink_descriptor_set *zds = (void*)entry->key;
- zink_batch_usage_unset(&zds->batch_uses, !!bs->fence.is_compute, bs->fence.batch_id);
+ zink_batch_usage_unset(&zds->batch_uses, bs->fence.batch_id);
/* reset descriptor pools when no bs is using this program to avoid
* having some inactive program hogging a billion descriptors
*/
}
set_foreach(bs->programs, entry) {
- if (bs->fence.is_compute) {
- struct zink_compute_program *comp = (struct zink_compute_program*)entry->key;
+ struct zink_program *pg = (struct zink_program*)entry->key;
+ if (pg->is_compute) {
+ struct zink_compute_program *comp = (struct zink_compute_program*)pg;
bool in_use = comp == ctx->curr_compute;
if (zink_compute_program_reference(screen, &comp, NULL) && in_use)
ctx->curr_compute = NULL;
} else {
- struct zink_gfx_program *prog = (struct zink_gfx_program*)entry->key;
+ struct zink_gfx_program *prog = (struct zink_gfx_program*)pg;
bool in_use = prog == ctx->curr_program;
if (zink_gfx_program_reference(screen, &prog, NULL) && in_use)
ctx->curr_program = NULL;
bs->flush_res = NULL;
bs->descs_used = 0;
- ctx->resource_size[bs->fence.is_compute] -= bs->resource_size;
+ ctx->resource_size -= bs->resource_size;
bs->resource_size = 0;
}
}
void
-zink_batch_reset_all(struct zink_context *ctx, enum zink_queue queue)
+zink_batch_reset_all(struct zink_context *ctx)
{
- hash_table_foreach(&ctx->batch_states[queue], entry) {
+ hash_table_foreach(&ctx->batch_states, entry) {
struct zink_batch_state *bs = entry->data;
zink_reset_batch_state(ctx, bs);
- _mesa_hash_table_remove(&ctx->batch_states[queue], entry);
- util_dynarray_append(&ctx->free_batch_states[queue], struct zink_batch_state *, bs);
+ _mesa_hash_table_remove(&ctx->batch_states, entry);
+ util_dynarray_append(&ctx->free_batch_states, struct zink_batch_state *, bs);
}
}
}
static struct zink_batch_state *
-create_batch_state(struct zink_context *ctx, enum zink_queue queue)
+create_batch_state(struct zink_context *ctx)
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
struct zink_batch_state *bs = rzalloc(NULL, struct zink_batch_state);
/* this destroys the batch state on failure */
return NULL;
- bs->fence.is_compute = queue == ZINK_QUEUE_COMPUTE;
-
return bs;
fail:
zink_batch_state_destroy(screen, bs);
{
struct zink_batch_state *bs = NULL;
- if (util_dynarray_num_elements(&ctx->free_batch_states[batch->queue], struct zink_batch_state*))
- bs = util_dynarray_pop(&ctx->free_batch_states[batch->queue], struct zink_batch_state*);
+ if (util_dynarray_num_elements(&ctx->free_batch_states, struct zink_batch_state*))
+ bs = util_dynarray_pop(&ctx->free_batch_states, struct zink_batch_state*);
if (!bs) {
- struct hash_entry *he = _mesa_hash_table_random_entry(&ctx->batch_states[batch->queue], find_unused_state);
+ struct hash_entry *he = _mesa_hash_table_random_entry(&ctx->batch_states, find_unused_state);
if (he) { //there may not be any entries available
bs = he->data;
- _mesa_hash_table_remove(&ctx->batch_states[batch->queue], he);
+ _mesa_hash_table_remove(&ctx->batch_states, he);
}
}
if (bs)
if (!batch->state) {
/* this is batch init, so create a few more states for later use */
for (int i = 0; i < 3; i++) {
- struct zink_batch_state *state = create_batch_state(ctx, batch->queue);
- util_dynarray_append(&ctx->free_batch_states[batch->queue], struct zink_batch_state *, state);
+ struct zink_batch_state *state = create_batch_state(ctx);
+ util_dynarray_append(&ctx->free_batch_states, struct zink_batch_state *, state);
}
}
- bs = create_batch_state(ctx, batch->queue);
+ bs = create_batch_state(ctx);
}
batch->state = bs;
}
debug_printf("vkBeginCommandBuffer failed\n");
batch->state->fence.batch_id = ctx->curr_batch;
- if (ctx->last_fence[batch->queue]) {
- struct zink_batch_state *last_state = zink_batch_state(ctx->last_fence[batch->queue]);
+ if (ctx->last_fence) {
+ struct zink_batch_state *last_state = zink_batch_state(ctx->last_fence);
batch->last_batch_id = last_state->fence.batch_id;
}
if (!ctx->queries_disabled)
}
}
- ctx->last_fence[batch->queue] = &batch->state->fence;
- _mesa_hash_table_insert_pre_hashed(&ctx->batch_states[batch->queue], batch->state->fence.batch_id, (void*)(uintptr_t)batch->state->fence.batch_id, batch->state);
- ctx->resource_size[batch->queue] += batch->state->resource_size;
+ ctx->last_fence = &batch->state->fence;
+ _mesa_hash_table_insert_pre_hashed(&ctx->batch_states, batch->state->fence.batch_id, (void*)(uintptr_t)batch->state->fence.batch_id, batch->state);
+ ctx->resource_size += batch->state->resource_size;
}
-/* returns a queue based on whether a resource
- has usage on a different queue than 'batch' belongs to
- */
-enum zink_queue
+void
zink_batch_reference_resource_rw(struct zink_batch *batch, struct zink_resource *res, bool write)
{
- enum zink_queue batch_to_flush = 0;
-
/* u_transfer_helper unrefs the stencil buffer when the depth buffer is unrefed,
* so we add an extra ref here to the stencil buffer to compensate
*/
zink_get_depth_stencil_resources((struct pipe_resource*)res, NULL, &stencil);
- if (batch->queue == ZINK_QUEUE_COMPUTE) {
- if ((write && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_RW, ZINK_QUEUE_GFX)) ||
- (!write && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE, ZINK_QUEUE_GFX)))
- batch_to_flush = ZINK_QUEUE_GFX;
- } else {
- if ((write && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_READ, ZINK_QUEUE_COMPUTE)) ||
- zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE, ZINK_QUEUE_COMPUTE))
- batch_to_flush = ZINK_QUEUE_COMPUTE;
- }
-
/* if the resource already has usage of any sort set for this batch, we can skip hashing */
- if (!zink_batch_usage_matches(&res->obj->reads, batch->queue, batch->state->fence.batch_id) &&
- !zink_batch_usage_matches(&res->obj->writes, batch->queue, batch->state->fence.batch_id)) {
+ if (!zink_batch_usage_matches(&res->obj->reads, batch->state->fence.batch_id) &&
+ !zink_batch_usage_matches(&res->obj->writes, batch->state->fence.batch_id)) {
bool found = false;
_mesa_set_search_and_add(batch->state->fence.resources, res->obj, &found);
if (!found) {
pipe_reference(NULL, &res->obj->reference);
- if (!batch->last_batch_id || !zink_batch_usage_matches(&res->obj->reads, batch->queue, batch->last_batch_id))
+ if (!batch->last_batch_id || !zink_batch_usage_matches(&res->obj->reads, batch->last_batch_id))
/* only add resource usage if it's "new" usage, though this only checks the most recent usage
* and not all pending usages
*/
batch->state->resource_size += res->obj->size;
if (stencil) {
pipe_reference(NULL, &stencil->obj->reference);
- if (!batch->last_batch_id || !zink_batch_usage_matches(&stencil->obj->reads, batch->queue, batch->last_batch_id))
+ if (!batch->last_batch_id || !zink_batch_usage_matches(&stencil->obj->reads, batch->last_batch_id))
batch->state->resource_size += stencil->obj->size;
}
}
}
if (write) {
if (stencil)
- zink_batch_usage_set(&stencil->obj->writes, batch->queue, batch->state->fence.batch_id);
- zink_batch_usage_set(&res->obj->writes, batch->queue, batch->state->fence.batch_id);
+ zink_batch_usage_set(&stencil->obj->writes, batch->state->fence.batch_id);
+ zink_batch_usage_set(&res->obj->writes, batch->state->fence.batch_id);
} else {
if (stencil)
- zink_batch_usage_set(&stencil->obj->reads, batch->queue, batch->state->fence.batch_id);
- zink_batch_usage_set(&res->obj->reads, batch->queue, batch->state->fence.batch_id);
+ zink_batch_usage_set(&stencil->obj->reads, batch->state->fence.batch_id);
+ zink_batch_usage_set(&res->obj->reads, batch->state->fence.batch_id);
}
/* multiple array entries are fine */
if (res->obj->persistent_maps)
util_dynarray_append(&batch->state->persistent_resources, struct zink_resource*, res);
batch->has_work = true;
- return batch_to_flush;
}
static bool
ptr_add_usage(struct zink_batch *batch, struct set *s, void *ptr, struct zink_batch_usage *u)
{
bool found = false;
- if (zink_batch_usage_matches(u, batch->queue, batch->state->fence.batch_id))
+ if (zink_batch_usage_matches(u, batch->state->fence.batch_id))
return false;
_mesa_set_search_and_add(s, ptr, &found);
assert(!found);
- zink_batch_usage_set(u, batch->queue, batch->state->fence.batch_id);
+ zink_batch_usage_set(u, batch->state->fence.batch_id);
return true;
}
}
void
-zink_batch_usage_set(struct zink_batch_usage *u, enum zink_queue queue, uint32_t batch_id)
+zink_batch_usage_set(struct zink_batch_usage *u, uint32_t batch_id)
{
- if (queue == ZINK_QUEUE_ANY) {
- p_atomic_set(&u->usage[ZINK_QUEUE_GFX], batch_id);
- p_atomic_set(&u->usage[ZINK_QUEUE_COMPUTE], batch_id);
- } else
- p_atomic_set(&u->usage[queue], batch_id);
+ p_atomic_set(&u->usage, batch_id);
}
bool
-zink_batch_usage_matches(struct zink_batch_usage *u, enum zink_queue queue, uint32_t batch_id)
+zink_batch_usage_matches(struct zink_batch_usage *u, uint32_t batch_id)
{
- if (queue < ZINK_QUEUE_ANY) {
- uint32_t usage = p_atomic_read(&u->usage[queue]);
- return usage == batch_id;
- }
- for (unsigned i = 0; i < ZINK_QUEUE_ANY; i++) {
- uint32_t usage = p_atomic_read(&u->usage[queue]);
- if (usage == batch_id)
- return true;
- }
- return false;
+ uint32_t usage = p_atomic_read(&u->usage);
+ return usage == batch_id;
}
bool
zink_batch_usage_exists(struct zink_batch_usage *u)
{
- uint32_t usage = p_atomic_read(&u->usage[ZINK_QUEUE_GFX]);
- if (usage)
- return true;
- usage = p_atomic_read(&u->usage[ZINK_QUEUE_COMPUTE]);
+ uint32_t usage = p_atomic_read(&u->usage);
return !!usage;
}
struct zink_sampler_view;
struct zink_surface;
-enum zink_queue {
- ZINK_QUEUE_GFX,
- ZINK_QUEUE_COMPUTE,
- ZINK_QUEUE_ANY,
-};
-
struct zink_batch_usage {
/* this has to be atomic for fence access, so we can't use a bitmask and make everything neat */
- uint32_t usage[2]; //gfx, compute
+ uint32_t usage;
};
struct zink_batch_state {
struct zink_batch {
struct zink_batch_state *state;
- enum zink_queue queue;
uint32_t last_batch_id;
zink_clear_batch_state(struct zink_context *ctx, struct zink_batch_state *bs);
void
-zink_batch_reset_all(struct zink_context *ctx, enum zink_queue queue);
+zink_batch_reset_all(struct zink_context *ctx);
void
zink_batch_state_destroy(struct zink_screen *screen, struct zink_batch_state *bs);
void
zink_end_batch(struct zink_context *ctx, struct zink_batch *batch);
-enum zink_queue
+void
zink_batch_reference_resource_rw(struct zink_batch *batch,
struct zink_resource *res,
bool write);
zink_batch_add_desc_set(struct zink_batch *batch, struct zink_descriptor_set *zds);
void
-zink_batch_usage_set(struct zink_batch_usage *u, enum zink_queue queue, uint32_t batch_id);
+zink_batch_usage_set(struct zink_batch_usage *u, uint32_t batch_id);
bool
-zink_batch_usage_matches(struct zink_batch_usage *u, enum zink_queue queue, uint32_t batch_id);
+zink_batch_usage_matches(struct zink_batch_usage *u, uint32_t batch_id);
bool
zink_batch_usage_exists(struct zink_batch_usage *u);
static inline void
-zink_batch_usage_unset(struct zink_batch_usage *u, enum zink_queue queue, uint32_t batch_id)
+zink_batch_usage_unset(struct zink_batch_usage *u, uint32_t batch_id)
{
- p_atomic_cmpxchg(&u->usage[queue], batch_id, 0);
+ p_atomic_cmpxchg(&u->usage, batch_id, 0);
}
#endif
{
struct zink_context *ctx = zink_context(pctx);
struct pipe_framebuffer_state *fb = &ctx->fb_state;
- struct zink_batch *batch = zink_batch_g(ctx);
+ struct zink_batch *batch = &ctx->batch;
bool needs_rp = false;
if (scissor_state) {
struct pipe_screen *pscreen = pctx->screen;
struct u_rect region = zink_rect_from_box(box);
bool needs_rp = !zink_blit_region_fills(region, pres->width0, pres->height0) || ctx->render_condition_active;
- struct zink_batch *batch = zink_batch_g(ctx);
+ struct zink_batch *batch = &ctx->batch;
struct pipe_surface *surf = NULL;
if (res->aspect & VK_IMAGE_ASPECT_COLOR_BIT) {
if (!zink_fb_clear_enabled(ctx, i))
return;
if (zink_resource(pres)->aspect == VK_IMAGE_ASPECT_COLOR_BIT) {
- assert(!zink_batch_g(ctx)->in_rp);
+ assert(!ctx->batch.in_rp);
if (zink_fb_clear_needs_explicit(fb_clear) || !check_3d_layers(ctx->fb_state.cbufs[i]))
/* this will automatically trigger all the clears */
zink_batch_rp(ctx);
zink_fb_clear_reset(ctx, i);
return;
} else {
- assert(!zink_batch_g(ctx)->in_rp);
+ assert(!ctx->batch.in_rp);
if (zink_fb_clear_needs_explicit(fb_clear) || !check_3d_layers(ctx->fb_state.zsbuf))
/* this will automatically trigger all the clears */
zink_batch_rp(ctx);
for (unsigned i = 0; i < ARRAY_SIZE(ctx->null_buffers); i++)
pipe_resource_reference(&ctx->null_buffers[i], NULL);
- for (unsigned i = 0; i < ZINK_QUEUE_ANY; i++) {
- struct zink_fence *fence = zink_fence(&ctx->batches[i].state);
- zink_clear_batch_state(ctx, ctx->batches[i].state);
- zink_fence_reference(zink_screen(pctx->screen), &fence, NULL);
- hash_table_foreach(&ctx->batch_states[i], entry) {
- fence = entry->data;
- zink_clear_batch_state(ctx, entry->data);
- zink_fence_reference(zink_screen(pctx->screen), &fence, NULL);
- }
- util_dynarray_foreach(&ctx->free_batch_states[i], struct zink_batch_state*, bs) {
- fence = zink_fence(*bs);
- zink_clear_batch_state(ctx, *bs);
- zink_fence_reference(zink_screen(pctx->screen), &fence, NULL);
- }
+ struct zink_fence *fence = zink_fence(&ctx->batch.state);
+ zink_clear_batch_state(ctx, ctx->batch.state);
+ zink_fence_reference(screen, &fence, NULL);
+ hash_table_foreach(&ctx->batch_states, entry) {
+ fence = entry->data;
+ zink_clear_batch_state(ctx, entry->data);
+ zink_fence_reference(screen, &fence, NULL);
+ }
+ util_dynarray_foreach(&ctx->free_batch_states, struct zink_batch_state*, bs) {
+ fence = zink_fence(*bs);
+ zink_clear_batch_state(ctx, *bs);
+ zink_fence_reference(screen, &fence, NULL);
}
if (ctx->framebuffer) {
void *sampler_state)
{
struct zink_sampler_state *sampler = sampler_state;
- struct zink_batch *batch = zink_batch_g(zink_context(pctx));
+ struct zink_batch *batch = &zink_context(pctx)->batch;
zink_descriptor_set_refs_clear(&sampler->desc_set_refs, sampler_state);
util_dynarray_append(&batch->state->zombie_samplers, VkSampler,
sampler->sampler);
void
zink_begin_render_pass(struct zink_context *ctx, struct zink_batch *batch)
{
- assert(batch == zink_batch_g(ctx));
-
setup_framebuffer(ctx);
assert(ctx->gfx_pipeline_state.render_pass);
struct pipe_framebuffer_state *fb_state = &ctx->fb_state;
}
static void
-flush_batch(struct zink_context *ctx, enum zink_queue queue)
+flush_batch(struct zink_context *ctx)
{
- struct zink_batch *batch = zink_batch_queue(ctx, queue);
- if (queue == ZINK_QUEUE_GFX)
- zink_end_render_pass(ctx, batch);
+ struct zink_batch *batch = &ctx->batch;
+ zink_end_render_pass(ctx, batch);
zink_end_batch(ctx, batch);
incr_curr_batch(ctx);
zink_start_batch(ctx, batch);
- if (queue == ZINK_QUEUE_GFX && zink_screen(ctx->base.screen)->info.have_EXT_transform_feedback && ctx->num_so_targets)
+ if (zink_screen(ctx->base.screen)->info.have_EXT_transform_feedback && ctx->num_so_targets)
ctx->dirty_so_targets = true;
}
struct zink_batch *
zink_batch_rp(struct zink_context *ctx)
{
- struct zink_batch *batch = zink_batch_g(ctx);
+ struct zink_batch *batch = &ctx->batch;
if (!batch->in_rp) {
zink_begin_render_pass(ctx, batch);
assert(ctx->framebuffer && ctx->framebuffer->rp);
struct zink_batch *
zink_batch_no_rp(struct zink_context *ctx)
{
- struct zink_batch *batch = zink_batch_g(ctx);
+ struct zink_batch *batch = &ctx->batch;
zink_end_render_pass(ctx, batch);
assert(!batch->in_rp);
return batch;
}
void
-zink_flush_queue(struct zink_context *ctx, enum zink_queue queue)
+zink_flush_queue(struct zink_context *ctx)
{
- flush_batch(ctx, queue);
+ flush_batch(ctx);
}
static void
if (!zink_resource_image_needs_barrier(res, new_layout, flags, pipeline))
return;
/* only barrier if we're changing layout or doing something besides read -> read */
- if (!batch) {
- if (pipeline == VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)
- batch = zink_batch_c(ctx);
- else
- batch = zink_batch_no_rp(ctx);
- }
+ batch = zink_batch_no_rp(ctx);
assert(!batch->in_rp);
VkImageSubresourceRange isr = {
res->aspect,
if (!zink_resource_buffer_needs_barrier(res, flags, pipeline))
return;
/* only barrier if we're changing layout or doing something besides read -> read */
- if (!batch) {
- if (pipeline == VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)
- batch = zink_batch_c(ctx);
- else
- batch = zink_batch_no_rp(ctx);
- }
+ batch = zink_batch_no_rp(ctx);
assert(!batch->in_rp);
VkBufferMemoryBarrier bmb = {
VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
{
struct zink_context *ctx = zink_context(pctx);
bool deferred = flags & PIPE_FLUSH_DEFERRED;
- struct zink_batch *batch = zink_batch_g(ctx);
+ struct zink_batch *batch = &ctx->batch;
struct zink_fence *fence = &batch->state->fence;
if (!deferred && ctx->clears_enabled) {
if (zink_screen(pctx->screen)->needs_mesa_flush_wsi && ctx->fb_state.cbufs[0])
batch->state->flush_res = zink_resource(ctx->fb_state.cbufs[0]->texture);
}
- flush_batch(ctx, ZINK_QUEUE_GFX);
+ flush_batch(ctx);
}
if (!pfence)
return;
if (deferred && !batch->has_work) {
- fence = ctx->last_fence[ZINK_QUEUE_GFX];
+ fence = ctx->last_fence;
}
zink_fence_reference(zink_screen(pctx->screen),
(struct zink_fence **)pfence,
}
void
-zink_maybe_flush_or_stall(struct zink_context *ctx, enum zink_queue queue)
+zink_maybe_flush_or_stall(struct zink_context *ctx)
{
struct zink_screen *screen = zink_screen(ctx->base.screen);
/* flush anytime our total batch memory usage is potentially >= 1/10 of total system memory */
- if (zink_batch_queue(ctx, queue)->state->resource_size >= screen->total_mem / 10)
- flush_batch(ctx, queue);
+ if (ctx->batch.state->resource_size >= screen->total_mem / 10)
+ flush_batch(ctx);
- if (ctx->resource_size[queue] >= screen->total_mem / 10) {
- zink_fence_finish(zink_screen(ctx->base.screen), &ctx->base, ctx->last_fence[queue], PIPE_TIMEOUT_INFINITE);
- zink_batch_reset_all(ctx, queue);
+ if (ctx->resource_size >= screen->total_mem / 10) {
+ zink_fence_finish(zink_screen(ctx->base.screen), &ctx->base, ctx->last_fence, PIPE_TIMEOUT_INFINITE);
+ zink_batch_reset_all(ctx);
}
}
{
struct zink_context *ctx = zink_context(pctx);
- if (zink_batch_g(ctx)->has_work)
+ if (ctx->batch.has_work)
pctx->flush(pctx, NULL, PIPE_FLUSH_HINT_FINISH);
- if (ctx->last_fence[ZINK_QUEUE_GFX])
- zink_fence_finish(zink_screen(pctx->screen), pctx, ctx->last_fence[ZINK_QUEUE_GFX], PIPE_TIMEOUT_INFINITE);
+ if (ctx->last_fence)
+ zink_fence_finish(zink_screen(pctx->screen), pctx, ctx->last_fence, PIPE_TIMEOUT_INFINITE);
}
void
-zink_wait_on_batch(struct zink_context *ctx, enum zink_queue queue, uint32_t batch_id)
+zink_wait_on_batch(struct zink_context *ctx, uint32_t batch_id)
{
- struct zink_batch_state *bs = zink_batch_queue(ctx, queue)->state;
+ struct zink_batch_state *bs = ctx->batch.state;
assert(bs);
if (!batch_id || bs->fence.batch_id == batch_id)
/* not submitted yet */
- flush_batch(ctx, queue);
+ flush_batch(ctx);
struct zink_fence *fence;
- assert(batch_id || ctx->last_fence[queue]);
- if (ctx->last_fence[queue] && (!batch_id || batch_id == zink_batch_state(ctx->last_fence[queue])->fence.batch_id))
- fence = ctx->last_fence[queue];
+ assert(batch_id || ctx->last_fence);
+ if (ctx->last_fence && (!batch_id || batch_id == zink_batch_state(ctx->last_fence)->fence.batch_id))
+ fence = ctx->last_fence;
else {
- struct hash_entry *he = _mesa_hash_table_search_pre_hashed(&ctx->batch_states[queue], batch_id, (void*)(uintptr_t)batch_id);
+ struct hash_entry *he = _mesa_hash_table_search_pre_hashed(&ctx->batch_states, batch_id, (void*)(uintptr_t)batch_id);
if (!he) {
- util_dynarray_foreach(&ctx->free_batch_states[queue], struct zink_batch_state*, bs) {
+ util_dynarray_foreach(&ctx->free_batch_states, struct zink_batch_state*, bs) {
if ((*bs)->fence.batch_id == batch_id)
return;
}
- if (ctx->last_fence[queue] && ctx->last_fence[queue]->batch_id > batch_id)
+ if (ctx->last_fence && ctx->last_fence->batch_id > batch_id)
/* already completed */
return;
unreachable("should've found batch state");
zink_texture_barrier(struct pipe_context *pctx, unsigned flags)
{
struct zink_context *ctx = zink_context(pctx);
- if (zink_batch_g(ctx)->has_work)
+ if (ctx->batch.has_work)
pctx->flush(pctx, NULL, 0);
- zink_flush_queue(ctx, ZINK_QUEUE_COMPUTE);
+ zink_flush_queue(ctx);
}
static void
b.srcAccessMask = sflags;
b.dstAccessMask = dflags;
- struct zink_batch *batch = zink_batch_g(ctx);
+ struct zink_batch *batch = &ctx->batch;
if (batch->has_work) {
zink_end_render_pass(ctx, batch);
/* this should be the only call needed */
vkCmdPipelineBarrier(batch->state->cmdbuf, src, dst, 0, 0, &b, 0, NULL, 0, NULL);
- flush_batch(ctx, ZINK_QUEUE_GFX);
- }
- batch = zink_batch_c(ctx);
- if (batch->has_work) {
- /* this should be the only call needed */
- vkCmdPipelineBarrier(batch->state->cmdbuf, src, dst, 0, 0, &b, 0, NULL, 0, NULL);
- zink_end_batch(ctx, batch);
- zink_start_batch(ctx, batch);
+ flush_batch(ctx);
}
}
}
}
-static void
-init_batch(struct zink_context *ctx, enum zink_queue queue)
-{
- struct zink_batch *batch = zink_batch_queue(ctx, queue);
- batch->queue = queue;
- zink_start_batch(ctx, batch);
-}
-
struct pipe_context *
zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
{
zink_context_resource_init(&ctx->base);
zink_context_query_init(&ctx->base);
- for (unsigned i = 0; i < ZINK_QUEUE_ANY; i++) {
- util_dynarray_init(&ctx->free_batch_states[i], ctx);
- _mesa_hash_table_init(&ctx->batch_states[i], ctx, NULL, _mesa_key_pointer_equal);
- }
+ util_dynarray_init(&ctx->free_batch_states, ctx);
+ _mesa_hash_table_init(&ctx->batch_states, ctx, NULL, _mesa_key_pointer_equal);
ctx->gfx_pipeline_state.have_EXT_extended_dynamic_state = screen->info.have_EXT_extended_dynamic_state;
goto fail;
incr_curr_batch(ctx);
- init_batch(ctx, ZINK_QUEUE_GFX);
- if (!zink_batch_g(ctx)->state)
- goto fail;
-
- init_batch(ctx, ZINK_QUEUE_COMPUTE);
- if (!zink_batch_c(ctx)->state)
+ zink_start_batch(ctx, &ctx->batch);
+ if (!ctx->batch.state)
goto fail;
vkGetDeviceQueue(screen->dev, screen->gfx_queue, 0, &ctx->queue);
bool is_device_lost;
uint32_t curr_batch; //the current batch id
- struct zink_batch batches[2]; //gfx, compute
- struct zink_fence *last_fence[2]; //gfx, compute; the last command buffer submitted
- VkQueue queue;
- struct hash_table batch_states[2]; //gfx, compute; submitted batch states
- struct util_dynarray free_batch_states[2]; //gfx, compute; unused batch states
- VkDeviceSize resource_size[2]; //gfx, compute; the accumulated size of resources in submitted buffers
+ struct zink_batch batch;
+ struct zink_fence *last_fence; //the last command buffer submitted
+ VkQueue queue; //gfx+compute
+ struct hash_table batch_states; //submitted batch states
+ struct util_dynarray free_batch_states; //unused batch states
+ VkDeviceSize resource_size; //the accumulated size of resources in submitted buffers
struct pipe_constant_buffer ubos[PIPE_SHADER_TYPES][PIPE_MAX_CONSTANT_BUFFERS];
struct pipe_shader_buffer ssbos[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_BUFFERS];
return ctx->clears_enabled & (PIPE_CLEAR_COLOR0 << idx);
}
-static inline struct zink_batch *
-zink_batch_queue(struct zink_context *ctx, enum zink_queue queue_type)
-{
- assert(queue_type < ARRAY_SIZE(ctx->batches));
- return &ctx->batches[queue_type];
-}
-
-static inline struct zink_batch *
-zink_batch_g(struct zink_context *ctx)
-{
- return &ctx->batches[ZINK_QUEUE_GFX];
-}
-
-static inline struct zink_batch *
-zink_batch_c(struct zink_context *ctx)
-{
- return &ctx->batches[ZINK_QUEUE_COMPUTE];
-}
-
struct zink_batch *
zink_batch_rp(struct zink_context *ctx);
zink_fence_wait(struct pipe_context *ctx);
void
-zink_wait_on_batch(struct zink_context *ctx, enum zink_queue queue, uint32_t batch_id);
+zink_wait_on_batch(struct zink_context *ctx, uint32_t batch_id);
void
-zink_flush_queue(struct zink_context *ctx, enum zink_queue queue);
+zink_flush_queue(struct zink_context *ctx);
void
-zink_maybe_flush_or_stall(struct zink_context *ctx, enum zink_queue queue);
+zink_maybe_flush_or_stall(struct zink_context *ctx);
bool
zink_resource_access_is_write(VkAccessFlags flags);
pipe_reference_init(&zds->reference, 1);
zds->pool = pool;
zds->hash = 0;
- zds->batch_uses.usage[0] = zds->batch_uses.usage[1] = 0;
+ zds->batch_uses.usage = 0;
zds->invalid = true;
zds->punted = zds->recycled = false;
if (num_resources) {
struct zink_descriptor_set *zds;
struct zink_screen *screen = zink_screen(ctx->base.screen);
struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
- struct zink_batch *batch = is_compute ? zink_batch_c(ctx) : zink_batch_g(ctx);
+ struct zink_batch *batch = &ctx->batch;
struct zink_descriptor_pool *pool = pg->pool[type];
unsigned descs_used = 1;
assert(type < ZINK_DESCRIPTOR_TYPES);
{
struct zink_context *ctx = zink_context(pctx);
struct zink_screen *screen = zink_screen(pctx->screen);
- struct zink_batch *batch = zink_batch_g(ctx);
+ struct zink_batch *batch = &ctx->batch;
VkBuffer buffers[PIPE_MAX_SO_OUTPUTS] = {};
VkDeviceSize buffer_offsets[PIPE_MAX_SO_OUTPUTS] = {};
VkDeviceSize buffer_sizes[PIPE_MAX_SO_OUTPUTS] = {};
return *binding_a - *binding_b;
}
-static bool
+static void
write_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds, unsigned num_wds, VkWriteDescriptorSet *wds,
- bool is_compute, bool cache_hit, bool need_resource_refs)
+ bool cache_hit, bool need_resource_refs)
{
- bool need_flush = false;
- struct zink_batch *batch = is_compute ? zink_batch_c(ctx) : zink_batch_g(ctx);
+ struct zink_batch *batch = &ctx->batch;
struct zink_screen *screen = zink_screen(ctx->base.screen);
assert(zds->desc_set);
- enum zink_queue check_flush_id = is_compute ? ZINK_QUEUE_GFX : ZINK_QUEUE_COMPUTE;
if (!cache_hit && num_wds)
vkUpdateDescriptorSets(screen->dev, num_wds, wds, 0, NULL);
for (int i = 0; zds->pool->key.num_descriptors && i < util_dynarray_num_elements(&zds->barriers, struct zink_descriptor_barrier); ++i) {
struct zink_descriptor_barrier *barrier = util_dynarray_element(&zds->barriers, struct zink_descriptor_barrier, i);
if (need_resource_refs || (ctx->curr_compute && ctx->curr_program))
- need_flush |= zink_batch_reference_resource_rw(batch, barrier->res, zink_resource_access_is_write(barrier->access)) == check_flush_id;
+ zink_batch_reference_resource_rw(batch, barrier->res, zink_resource_access_is_write(barrier->access));
zink_resource_barrier(ctx, NULL, barrier->res,
barrier->layout, barrier->access, barrier->stage);
}
-
- return need_flush;
}
static unsigned
return num_wds + 1;
}
-static bool
+static void
update_ubo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
bool is_compute, bool cache_hit, bool need_resource_refs,
uint32_t *dynamic_offsets, unsigned *dynamic_offset_idx)
dynamic_offsets[i] = dynamic_buffers[i].offset;
*dynamic_offset_idx = dynamic_offset_count;
- return write_descriptors(ctx, zds, num_wds, wds, is_compute, cache_hit, need_resource_refs);
+ write_descriptors(ctx, zds, num_wds, wds, cache_hit, need_resource_refs);
}
-static bool
+static void
update_ssbo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
bool is_compute, bool cache_hit, bool need_resource_refs)
{
}
}
_mesa_set_destroy(ht, NULL);
- return write_descriptors(ctx, zds, num_wds, wds, is_compute, cache_hit, need_resource_refs);
+ write_descriptors(ctx, zds, num_wds, wds, cache_hit, need_resource_refs);
}
static void
}
}
-static bool
+static void
update_sampler_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
bool is_compute, bool cache_hit, bool need_resource_refs)
{
desc_set_sampler_add(ctx, zds, sampler_view, sampler, num_resources++,
zink_shader_descriptor_is_buffer(shader, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW, j),
cache_hit);
- struct zink_batch *batch = is_compute ? zink_batch_c(ctx) : zink_batch_g(ctx);
+ struct zink_batch *batch = &ctx->batch;
if (sampler_view)
zink_batch_reference_sampler_view(batch, sampler_view);
if (sampler)
/* this only tracks the most recent usage for now */
- zink_batch_usage_set(&sampler->batch_uses, batch->queue, batch->state->fence.batch_id);
+ zink_batch_usage_set(&sampler->batch_uses, batch->state->fence.batch_id);
}
assert(num_wds < num_descriptors);
}
}
_mesa_set_destroy(ht, NULL);
- return write_descriptors(ctx, zds, num_wds, wds, is_compute, cache_hit, need_resource_refs);
+ write_descriptors(ctx, zds, num_wds, wds, cache_hit, need_resource_refs);
}
-static bool
+static void
update_image_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
bool is_compute, bool cache_hit, bool need_resource_refs)
{
&num_buffer_info, &buffer_views[num_buffer_info],
NULL, imageview, bufferview, !k);
- struct zink_batch *batch = is_compute ? zink_batch_c(ctx) : zink_batch_g(ctx);
+ struct zink_batch *batch = &ctx->batch;
if (res)
zink_batch_reference_image_view(batch, image_view);
}
}
}
_mesa_set_destroy(ht, NULL);
- return write_descriptors(ctx, zds, num_wds, wds, is_compute, cache_hit, need_resource_refs);
+ write_descriptors(ctx, zds, num_wds, wds, cache_hit, need_resource_refs);
}
static void
else
zds[h] = NULL;
}
- struct zink_batch *batch = is_compute ? zink_batch_c(ctx) : zink_batch_g(ctx);
+ struct zink_batch *batch = &ctx->batch;
zink_batch_reference_program(batch, pg);
uint32_t dynamic_offsets[PIPE_MAX_CONSTANT_BUFFERS];
unsigned dynamic_offset_idx = 0;
- bool need_flush = false;
if (zds[ZINK_DESCRIPTOR_TYPE_UBO])
- need_flush |= update_ubo_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_UBO],
+ update_ubo_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_UBO],
is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_UBO],
need_resource_refs[ZINK_DESCRIPTOR_TYPE_UBO], dynamic_offsets, &dynamic_offset_idx);
if (zds[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW])
- need_flush |= update_sampler_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW],
+ update_sampler_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW],
is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW],
need_resource_refs[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW]);
if (zds[ZINK_DESCRIPTOR_TYPE_SSBO])
- need_flush |= update_ssbo_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_SSBO],
+ update_ssbo_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_SSBO],
is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_SSBO],
need_resource_refs[ZINK_DESCRIPTOR_TYPE_SSBO]);
if (zds[ZINK_DESCRIPTOR_TYPE_IMAGE])
- need_flush |= update_image_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_IMAGE],
+ update_image_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_IMAGE],
is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_IMAGE],
need_resource_refs[ZINK_DESCRIPTOR_TYPE_IMAGE]);
zds[h]->pool->type == ZINK_DESCRIPTOR_TYPE_UBO ? dynamic_offset_idx : 0, dynamic_offsets);
}
}
- if (!need_flush)
- return;
-
- if (is_compute)
- /* flush gfx batch */
- ctx->base.flush(&ctx->base, NULL, PIPE_FLUSH_HINT_FINISH);
- else {
- /* flush compute batch */
- zink_flush_queue(ctx, ZINK_QUEUE_COMPUTE);
- }
}
static bool
bool need_index_buffer_unref = false;
/* check memory usage and flush/stall as needed to avoid oom */
- zink_maybe_flush_or_stall(ctx, ZINK_QUEUE_GFX);
+ zink_maybe_flush_or_stall(ctx);
if (dinfo->primitive_restart && !restart_supported(dinfo->mode)) {
util_draw_vbo_without_prim_restart(pctx, dinfo, dindirect, &draws[0]);
{
struct zink_context *ctx = zink_context(pctx);
struct zink_screen *screen = zink_screen(pctx->screen);
- struct zink_batch *batch = zink_batch_c(ctx);
+ struct zink_batch *batch = &ctx->batch;
/* check memory usage and flush/stall as needed to avoid oom */
- zink_maybe_flush_or_stall(ctx, ZINK_QUEUE_COMPUTE);
+ zink_maybe_flush_or_stall(ctx);
struct zink_compute_program *comp_program = get_compute_program(ctx);
if (!comp_program)
/* unref all used resources */
set_foreach(fence->resources, entry) {
struct zink_resource_object *obj = (struct zink_resource_object *)entry->key;
- zink_batch_usage_unset(&obj->reads, !!fence->is_compute, fence->batch_id);
- zink_batch_usage_unset(&obj->writes, !!fence->is_compute, fence->batch_id);
+ zink_batch_usage_unset(&obj->reads, fence->batch_id);
+ zink_batch_usage_unset(&obj->writes, fence->batch_id);
zink_resource_object_reference(screen, &obj, NULL);
_mesa_set_remove(fence->resources, entry);
}
uint64_t timeout_ns)
{
if (pctx && fence->deferred_ctx == pctx) {
- zink_batch_g(zink_context(pctx))->has_work = true;
+ zink_context(pctx)->batch.has_work = true;
/* this must be the current batch */
pctx->flush(pctx, NULL, 0);
}
return;
if (fence->deferred_ctx) {
- zink_batch_g(zink_context(pctx))->has_work = true;
+ zink_context(pctx)->batch.has_work = true;
/* this must be the current batch */
pctx->flush(pctx, NULL, 0);
}
uint32_t batch_id;
struct set *resources; /* resources need access removed asap, so they're on the fence */
bool submitted;
- bool is_compute;
};
static inline struct zink_fence *
goto fail;
pipe_reference_init(&comp->base.reference, 1);
+ comp->base.is_compute = true;
if (!ctx->curr_compute || !ctx->curr_compute->shader_cache) {
/* TODO: cs shader keys placeholder for now */
struct zink_program {
struct pipe_reference reference;
+ bool is_compute;
struct zink_descriptor_pool *pool[ZINK_DESCRIPTOR_TYPES];
struct zink_descriptor_set *last_set[ZINK_DESCRIPTOR_TYPES];
}
static bool
-is_cs_query(struct zink_query *query)
-{
- return query->type == PIPE_QUERY_PIPELINE_STATISTICS_SINGLE && query->index == PIPE_STAT_QUERY_CS_INVOCATIONS;
-}
-
-static bool
is_time_query(struct zink_query *query)
{
return query->type == PIPE_QUERY_TIMESTAMP || query->type == PIPE_QUERY_TIME_ELAPSED;
return query->type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE || query->type == PIPE_QUERY_SO_OVERFLOW_PREDICATE;
}
-static struct zink_batch *
-get_batch_for_query(struct zink_context *ctx, struct zink_query *query, bool no_rp)
-{
- if (query && is_cs_query(query))
- return zink_batch_c(ctx);
- if (no_rp)
- return zink_batch_no_rp(ctx);
- return zink_batch_g(ctx);
-}
-
static struct pipe_query *
zink_create_query(struct pipe_context *pctx,
unsigned query_type, unsigned index)
}
}
}
- struct zink_batch *batch = get_batch_for_query(zink_context(pctx), query, true);
+ struct zink_batch *batch = &zink_context(pctx)->batch;
batch->has_work = true;
vkCmdResetQueryPool(batch->state->cmdbuf, query->query_pool, 0, query->num_queries);
if (query->type == PIPE_QUERY_PRIMITIVES_GENERATED)
unsigned result_size = result_type <= PIPE_QUERY_TYPE_U32 ? sizeof(uint32_t) : sizeof(uint64_t);
struct zink_query *query = (struct zink_query*)pquery;
union pipe_query_result result;
- if (zink_batch_usage_matches(&query->batch_id, ZINK_QUEUE_GFX, zink_batch_g(ctx)->state->fence.batch_id))
+ if (zink_batch_usage_matches(&query->batch_id, ctx->curr_batch))
pctx->flush(pctx, NULL, PIPE_FLUSH_HINT_FINISH);
- else if (is_cs_query(query))
- zink_flush_queue(ctx, ZINK_QUEUE_COMPUTE);
bool success = get_query_result(pctx, pquery, wait, &result);
if (!success) {
copy_results_to_buffer(struct zink_context *ctx, struct zink_query *query, struct zink_resource *res, unsigned offset, int num_results, VkQueryResultFlags flags)
{
unsigned query_id = query->last_start;
- struct zink_batch *batch = get_batch_for_query(ctx, query, true);
+ struct zink_batch *batch = &ctx->batch;
unsigned base_result_size = (flags & VK_QUERY_RESULT_64_BIT) ? sizeof(uint64_t) : sizeof(uint32_t);
unsigned result_size = base_result_size * num_results;
if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
result_size += base_result_size;
- if (is_cs_query(query)) {
- if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE, ZINK_QUEUE_GFX))
- ctx->base.flush(&ctx->base, NULL, PIPE_FLUSH_HINT_FINISH);
- }
/* if it's a single query that doesn't need special handling, we can copy it and be done */
zink_batch_reference_resource_rw(batch, res, true);
zink_resource_buffer_barrier(ctx, batch, res, VK_ACCESS_TRANSFER_WRITE_BIT, 0);
vkCmdCopyQueryPoolResults(batch->state->cmdbuf, query->query_pool, query_id, num_results, res->obj->buffer,
offset, 0, flags);
/* this is required for compute batch sync and will be removed later */
- if (is_cs_query(query))
- zink_flush_queue(ctx, ZINK_QUEUE_COMPUTE);
- else
- ctx->base.flush(&ctx->base, NULL, PIPE_FLUSH_HINT_FINISH);
+ zink_flush_queue(ctx);
}
*
* - vkCmdResetQueryPool spec
*/
- batch = get_batch_for_query(ctx, q, true);
+ zink_batch_no_rp(ctx);
if (q->type != PIPE_QUERY_TIMESTAMP)
get_query_result(&ctx->base, (struct pipe_query*)q, false, &q->accumulated_result);
if (needs_stats_list(q))
list_addtail(&q->stats_list, &ctx->primitives_generated_queries);
p_atomic_inc(&q->fences);
- zink_batch_usage_set(&q->batch_id, batch->queue, batch->state->fence.batch_id);
+ zink_batch_usage_set(&q->batch_id, batch->state->fence.batch_id);
_mesa_set_add(batch->state->active_queries, q);
}
{
struct zink_query *query = (struct zink_query *)q;
struct zink_context *ctx = zink_context(pctx);
- struct zink_batch *batch = get_batch_for_query(ctx, query, false);
+ struct zink_batch *batch = &ctx->batch;
query->last_start = query->curr_query;
if (is_time_query(q)) {
vkCmdWriteTimestamp(batch->state->cmdbuf, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
q->query_pool, q->curr_query);
- zink_batch_usage_set(&q->batch_id, batch->queue, batch->state->fence.batch_id);
+ zink_batch_usage_set(&q->batch_id, batch->state->fence.batch_id);
} else if (q->type == PIPE_QUERY_PRIMITIVES_EMITTED ||
q->type == PIPE_QUERY_PRIMITIVES_GENERATED ||
q->type == PIPE_QUERY_SO_OVERFLOW_PREDICATE)
{
struct zink_context *ctx = zink_context(pctx);
struct zink_query *query = (struct zink_query *)q;
- struct zink_batch *batch = get_batch_for_query(ctx, query, false);
+ struct zink_batch *batch = &ctx->batch;
if (needs_stats_list(query))
list_delinit(&query->stats_list);
{
struct zink_query *query = (void*)q;
struct zink_context *ctx = zink_context(pctx);
- enum zink_queue queue = is_cs_query(query) ? ZINK_QUEUE_COMPUTE : ZINK_QUEUE_GFX;
- uint32_t batch_id = p_atomic_read(&query->batch_id.usage[queue]);
+ uint32_t batch_id = p_atomic_read(&query->batch_id.usage);
if (wait)
- zink_wait_on_batch(ctx, queue, batch_id);
+ zink_wait_on_batch(ctx, batch_id);
else if (batch_id == ctx->curr_batch)
- zink_flush_queue(ctx, queue);
+ zink_flush_queue(ctx);
return get_query_result(pctx, q, wait, result);
}
struct zink_context *ctx = zink_context(pctx);
ctx->queries_disabled = !enable;
- struct zink_batch *batch = zink_batch_g(ctx);
+ struct zink_batch *batch = &ctx->batch;
if (ctx->queries_disabled)
zink_suspend_queries(ctx, batch);
else
if (query->type != PIPE_QUERY_PRIMITIVES_GENERATED &&
!is_so_overflow_query(query)) {
copy_results_to_buffer(ctx, query, res, 0, num_results, flags);
- batch = zink_batch_g(ctx);
+ batch = &ctx->batch;
} else {
/* these need special handling */
force_cpu_read(ctx, pquery, true, PIPE_QUERY_TYPE_U32, pres, 0);
- batch = zink_batch_g(ctx);
+ batch = &ctx->batch;
zink_batch_reference_resource_rw(batch, res, false);
}
if (fences) {
struct pipe_resource *staging = pipe_buffer_create(pctx->screen, 0, PIPE_USAGE_STAGING, result_size * 2);
copy_results_to_buffer(ctx, query, zink_resource(staging), 0, 1, size_flags | VK_QUERY_RESULT_WITH_AVAILABILITY_BIT | VK_QUERY_RESULT_PARTIAL_BIT);
- zink_copy_buffer(ctx, get_batch_for_query(ctx, query, true), res, zink_resource(staging), offset, result_size, result_size);
+ zink_copy_buffer(ctx, &ctx->batch, res, zink_resource(staging), offset, result_size, result_size);
pipe_resource_reference(&staging, NULL);
} else {
uint64_t u64[2] = {0};
}
static uint32_t
-get_resource_usage(struct zink_resource *res, enum zink_queue queue)
+get_resource_usage(struct zink_resource *res)
{
- assert(queue < 2);
- uint32_t reads = p_atomic_read(&res->obj->reads.usage[queue]);
- uint32_t writes = p_atomic_read(&res->obj->writes.usage[queue]);
+ uint32_t reads = p_atomic_read(&res->obj->reads.usage);
+ uint32_t writes = p_atomic_read(&res->obj->writes.usage);
uint32_t batch_uses = 0;
if (reads)
- batch_uses |= ZINK_RESOURCE_ACCESS_READ << queue;
+ batch_uses |= ZINK_RESOURCE_ACCESS_READ;
if (writes)
- batch_uses |= ZINK_RESOURCE_ACCESS_WRITE << queue;
- return batch_uses;
-}
-
-static uint32_t
-get_all_resource_usage(struct zink_resource *res)
-{
- uint32_t batch_uses = 0;
- for (unsigned i = 0; i < ZINK_QUEUE_ANY; i++)
- batch_uses |= get_resource_usage(res, i);
+ batch_uses |= ZINK_RESOURCE_ACCESS_WRITE;
return batch_uses;
}
static void
-resource_sync_reads_from_compute(struct zink_context *ctx, struct zink_resource *res)
+resource_sync_reads(struct zink_context *ctx, struct zink_resource *res)
{
- uint32_t reads = p_atomic_read(&res->obj->reads.usage[ZINK_QUEUE_COMPUTE]);
+ uint32_t reads = p_atomic_read(&res->obj->reads.usage);
assert(reads);
- zink_wait_on_batch(ctx, ZINK_QUEUE_COMPUTE, reads);
+ zink_wait_on_batch(ctx, reads);
}
static void
resource_sync_writes_from_batch_usage(struct zink_context *ctx, struct zink_resource *res)
{
- uint32_t writes[2];
- for (int i = 0; i < ZINK_QUEUE_ANY; i++)
- writes[i] = p_atomic_read(&res->obj->writes.usage[i]);
-
- enum zink_queue queue = writes[0] < writes[1] ? ZINK_QUEUE_COMPUTE : ZINK_QUEUE_GFX;
- /* sync lower id first */
- if (writes[!queue])
- zink_wait_on_batch(ctx, !queue, writes[!queue]);
- zink_wait_on_batch(ctx, queue, writes[queue]);
+ uint32_t writes = p_atomic_read(&res->obj->writes.usage);
+
+ zink_wait_on_batch(ctx, writes);
}
static uint32_t
res->bind_history &= ~ZINK_RESOURCE_USAGE_STREAMOUT;
util_range_set_empty(&res->valid_buffer_range);
- if (!get_all_resource_usage(res))
+ if (!get_resource_usage(res))
return;
struct zink_resource_object *old_obj = res->obj;
}
bool
-zink_resource_has_usage(struct zink_resource *res, enum zink_resource_access usage, enum zink_queue queue)
+zink_resource_has_usage(struct zink_resource *res, enum zink_resource_access usage)
{
- uint32_t batch_uses = get_all_resource_usage(res);
- switch (queue) {
- case ZINK_QUEUE_COMPUTE:
- return batch_uses & (usage << ZINK_QUEUE_COMPUTE);
- case ZINK_QUEUE_GFX:
- return batch_uses & (usage << ZINK_QUEUE_GFX);
- case ZINK_QUEUE_ANY:
- return batch_uses & ((usage << ZINK_QUEUE_GFX) | (usage << ZINK_QUEUE_COMPUTE));
- default:
- break;
- }
- unreachable("unknown queue type");
- return false;
+ uint32_t batch_uses = get_resource_usage(res);
+ return batch_uses & usage;
}
static void *
}
if (util_ranges_intersect(&res->valid_buffer_range, box->x, box->x + box->width)) {
/* special case compute reads since they aren't handled by zink_fence_wait() */
- if (usage & PIPE_MAP_WRITE && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_READ, ZINK_QUEUE_COMPUTE))
- resource_sync_reads_from_compute(ctx, res);
- if (usage & PIPE_MAP_READ && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE, ZINK_QUEUE_ANY))
+ if (usage & PIPE_MAP_WRITE && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_READ))
+ resource_sync_reads(ctx, res);
+ if (usage & PIPE_MAP_READ && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE))
resource_sync_writes_from_batch_usage(ctx, res);
- else if (usage & PIPE_MAP_WRITE && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_RW, ZINK_QUEUE_ANY)) {
+ else if (usage & PIPE_MAP_WRITE && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_RW)) {
/* need to wait for all rendering to finish
* TODO: optimize/fix this to be much less obtrusive
* mesa/mesa#2966
if (usage & PIPE_MAP_READ) {
/* TODO: can probably just do a full cs copy if it's already in a cs batch */
- if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE, ZINK_QUEUE_COMPUTE))
+ if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE))
/* don't actually have to stall here, only ensure batch is submitted */
- zink_flush_queue(ctx, ZINK_QUEUE_COMPUTE);
+ zink_flush_queue(ctx);
struct zink_context *ctx = zink_context(pctx);
zink_transfer_copy_bufimage(ctx, staging_res, res, trans);
/* need to wait for rendering to finish */
assert(!res->optimal_tiling);
/* special case compute reads since they aren't handled by zink_fence_wait() */
- if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_READ, ZINK_QUEUE_COMPUTE))
- resource_sync_reads_from_compute(ctx, res);
- if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_RW, ZINK_QUEUE_ANY)) {
+ /* special case compute reads since they aren't handled by zink_fence_wait() */
+ if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_READ))
+ resource_sync_reads(ctx, res);
+ if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_RW)) {
if (usage & PIPE_MAP_READ)
resource_sync_writes_from_batch_usage(ctx, res);
else
if (trans->base.usage & PIPE_MAP_WRITE) {
if (trans->staging_res) {
struct zink_resource *staging_res = zink_resource(trans->staging_res);
- if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE, ZINK_QUEUE_COMPUTE))
+ if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE))
/* don't actually have to stall here, only ensure batch is submitted */
- zink_flush_queue(ctx, ZINK_QUEUE_COMPUTE);
+ zink_flush_queue(ctx);
if (ptrans->resource->target == PIPE_BUFFER)
zink_copy_buffer(ctx, NULL, res, staging_res, box->x, box->x, box->width);
zink_resource_setup_transfer_layouts(struct zink_context *ctx, struct zink_resource *src, struct zink_resource *dst);
bool
-zink_resource_has_usage(struct zink_resource *res, enum zink_resource_access usage, enum zink_queue queue);
+zink_resource_has_usage(struct zink_resource *res, enum zink_resource_access usage);
bool
zink_resource_has_usage_for_id(struct zink_resource *res, uint32_t id);