zink: unify gfx and compute batches
authorMike Blumenkrantz <michael.blumenkrantz@gmail.com>
Wed, 25 Nov 2020 22:05:58 +0000 (17:05 -0500)
committerMarge Bot <eric+marge@anholt.net>
Wed, 24 Mar 2021 03:26:20 +0000 (03:26 +0000)
now that batches aren't limited and flushing is less costly, there's no
reason to keep these separate

the primary changes here are removing the zink_queue enum and collapsing
related arrays which used it as an index, e.g., zink_batch_usage into single
members

remaining future work here will include removing synchronization flushes which
are no longer necessary and (eventually) removing batch params from a number of
functions since there is now only ever a single batch

Reviewed-by: Dave Airlie <airlied@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/9765>

14 files changed:
src/gallium/drivers/zink/zink_batch.c
src/gallium/drivers/zink/zink_batch.h
src/gallium/drivers/zink/zink_clear.c
src/gallium/drivers/zink/zink_context.c
src/gallium/drivers/zink/zink_context.h
src/gallium/drivers/zink/zink_descriptors.c
src/gallium/drivers/zink/zink_draw.c
src/gallium/drivers/zink/zink_fence.c
src/gallium/drivers/zink/zink_fence.h
src/gallium/drivers/zink/zink_program.c
src/gallium/drivers/zink/zink_program.h
src/gallium/drivers/zink/zink_query.c
src/gallium/drivers/zink/zink_resource.c
src/gallium/drivers/zink/zink_resource.h

index dbb24d9..81a5577 100644 (file)
@@ -31,13 +31,13 @@ zink_reset_batch_state(struct zink_context *ctx, struct zink_batch_state *bs)
 
    set_foreach(bs->surfaces, entry) {
       struct zink_surface *surf = (struct zink_surface *)entry->key;
-      zink_batch_usage_unset(&surf->batch_uses, !!bs->fence.is_compute, bs->fence.batch_id);
+      zink_batch_usage_unset(&surf->batch_uses, bs->fence.batch_id);
       zink_surface_reference(screen, &surf, NULL);
       _mesa_set_remove(bs->surfaces, entry);
    }
    set_foreach(bs->bufferviews, entry) {
       struct zink_buffer_view *buffer_view = (struct zink_buffer_view *)entry->key;
-      zink_batch_usage_unset(&buffer_view->batch_uses, !!bs->fence.is_compute, bs->fence.batch_id);
+      zink_batch_usage_unset(&buffer_view->batch_uses, bs->fence.batch_id);
       zink_buffer_view_reference(screen, &buffer_view, NULL);
       _mesa_set_remove(bs->bufferviews, entry);
    }
@@ -50,7 +50,7 @@ zink_reset_batch_state(struct zink_context *ctx, struct zink_batch_state *bs)
 
    set_foreach(bs->desc_sets, entry) {
       struct zink_descriptor_set *zds = (void*)entry->key;
-      zink_batch_usage_unset(&zds->batch_uses, !!bs->fence.is_compute, bs->fence.batch_id);
+      zink_batch_usage_unset(&zds->batch_uses, bs->fence.batch_id);
       /* reset descriptor pools when no bs is using this program to avoid
        * having some inactive program hogging a billion descriptors
        */
@@ -60,13 +60,14 @@ zink_reset_batch_state(struct zink_context *ctx, struct zink_batch_state *bs)
    }
 
    set_foreach(bs->programs, entry) {
-      if (bs->fence.is_compute) {
-         struct zink_compute_program *comp = (struct zink_compute_program*)entry->key;
+      struct zink_program *pg = (struct zink_program*)entry->key;
+      if (pg->is_compute) {
+         struct zink_compute_program *comp = (struct zink_compute_program*)pg;
          bool in_use = comp == ctx->curr_compute;
          if (zink_compute_program_reference(screen, &comp, NULL) && in_use)
             ctx->curr_compute = NULL;
       } else {
-         struct zink_gfx_program *prog = (struct zink_gfx_program*)entry->key;
+         struct zink_gfx_program *prog = (struct zink_gfx_program*)pg;
          bool in_use = prog == ctx->curr_program;
          if (zink_gfx_program_reference(screen, &prog, NULL) && in_use)
             ctx->curr_program = NULL;
@@ -83,7 +84,7 @@ zink_reset_batch_state(struct zink_context *ctx, struct zink_batch_state *bs)
    bs->flush_res = NULL;
 
    bs->descs_used = 0;
-   ctx->resource_size[bs->fence.is_compute] -= bs->resource_size;
+   ctx->resource_size -= bs->resource_size;
    bs->resource_size = 0;
 }
 
@@ -94,13 +95,13 @@ zink_clear_batch_state(struct zink_context *ctx, struct zink_batch_state *bs)
 }
 
 void
-zink_batch_reset_all(struct zink_context *ctx, enum zink_queue queue)
+zink_batch_reset_all(struct zink_context *ctx)
 {
-   hash_table_foreach(&ctx->batch_states[queue], entry) {
+   hash_table_foreach(&ctx->batch_states, entry) {
       struct zink_batch_state *bs = entry->data;
       zink_reset_batch_state(ctx, bs);
-      _mesa_hash_table_remove(&ctx->batch_states[queue], entry);
-      util_dynarray_append(&ctx->free_batch_states[queue], struct zink_batch_state *, bs);
+      _mesa_hash_table_remove(&ctx->batch_states, entry);
+      util_dynarray_append(&ctx->free_batch_states, struct zink_batch_state *, bs);
    }
 }
 
@@ -126,7 +127,7 @@ zink_batch_state_destroy(struct zink_screen *screen, struct zink_batch_state *bs
 }
 
 static struct zink_batch_state *
-create_batch_state(struct zink_context *ctx, enum zink_queue queue)
+create_batch_state(struct zink_context *ctx)
 {
    struct zink_screen *screen = zink_screen(ctx->base.screen);
    struct zink_batch_state *bs = rzalloc(NULL, struct zink_batch_state);
@@ -165,8 +166,6 @@ create_batch_state(struct zink_context *ctx, enum zink_queue queue)
       /* this destroys the batch state on failure */
       return NULL;
 
-   bs->fence.is_compute = queue == ZINK_QUEUE_COMPUTE;
-
    return bs;
 fail:
    zink_batch_state_destroy(screen, bs);
@@ -187,13 +186,13 @@ init_batch_state(struct zink_context *ctx, struct zink_batch *batch)
 {
    struct zink_batch_state *bs = NULL;
 
-   if (util_dynarray_num_elements(&ctx->free_batch_states[batch->queue], struct zink_batch_state*))
-      bs = util_dynarray_pop(&ctx->free_batch_states[batch->queue], struct zink_batch_state*);
+   if (util_dynarray_num_elements(&ctx->free_batch_states, struct zink_batch_state*))
+      bs = util_dynarray_pop(&ctx->free_batch_states, struct zink_batch_state*);
    if (!bs) {
-      struct hash_entry *he = _mesa_hash_table_random_entry(&ctx->batch_states[batch->queue], find_unused_state);
+      struct hash_entry *he = _mesa_hash_table_random_entry(&ctx->batch_states, find_unused_state);
       if (he) { //there may not be any entries available
          bs = he->data;
-         _mesa_hash_table_remove(&ctx->batch_states[batch->queue], he);
+         _mesa_hash_table_remove(&ctx->batch_states, he);
       }
    }
    if (bs)
@@ -202,11 +201,11 @@ init_batch_state(struct zink_context *ctx, struct zink_batch *batch)
       if (!batch->state) {
          /* this is batch init, so create a few more states for later use */
          for (int i = 0; i < 3; i++) {
-            struct zink_batch_state *state = create_batch_state(ctx, batch->queue);
-            util_dynarray_append(&ctx->free_batch_states[batch->queue], struct zink_batch_state *, state);
+            struct zink_batch_state *state = create_batch_state(ctx);
+            util_dynarray_append(&ctx->free_batch_states, struct zink_batch_state *, state);
          }
       }
-      bs = create_batch_state(ctx, batch->queue);
+      bs = create_batch_state(ctx);
    }
    batch->state = bs;
 }
@@ -239,8 +238,8 @@ zink_start_batch(struct zink_context *ctx, struct zink_batch *batch)
       debug_printf("vkBeginCommandBuffer failed\n");
 
    batch->state->fence.batch_id = ctx->curr_batch;
-   if (ctx->last_fence[batch->queue]) {
-      struct zink_batch_state *last_state = zink_batch_state(ctx->last_fence[batch->queue]);
+   if (ctx->last_fence) {
+      struct zink_batch_state *last_state = zink_batch_state(ctx->last_fence);
       batch->last_batch_id = last_state->fence.batch_id;
    }
    if (!ctx->queries_disabled)
@@ -302,19 +301,14 @@ zink_end_batch(struct zink_context *ctx, struct zink_batch *batch)
       }
    }
 
-   ctx->last_fence[batch->queue] = &batch->state->fence;
-   _mesa_hash_table_insert_pre_hashed(&ctx->batch_states[batch->queue], batch->state->fence.batch_id, (void*)(uintptr_t)batch->state->fence.batch_id, batch->state);
-   ctx->resource_size[batch->queue] += batch->state->resource_size;
+   ctx->last_fence = &batch->state->fence;
+   _mesa_hash_table_insert_pre_hashed(&ctx->batch_states, batch->state->fence.batch_id, (void*)(uintptr_t)batch->state->fence.batch_id, batch->state);
+   ctx->resource_size += batch->state->resource_size;
 }
 
-/* returns a queue based on whether a resource
-   has usage on a different queue than 'batch' belongs to
- */
-enum zink_queue
+void
 zink_batch_reference_resource_rw(struct zink_batch *batch, struct zink_resource *res, bool write)
 {
-   enum zink_queue batch_to_flush = 0;
-
    /* u_transfer_helper unrefs the stencil buffer when the depth buffer is unrefed,
     * so we add an extra ref here to the stencil buffer to compensate
     */
@@ -322,61 +316,50 @@ zink_batch_reference_resource_rw(struct zink_batch *batch, struct zink_resource
 
    zink_get_depth_stencil_resources((struct pipe_resource*)res, NULL, &stencil);
 
-   if (batch->queue == ZINK_QUEUE_COMPUTE) {
-      if ((write && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_RW, ZINK_QUEUE_GFX)) ||
-          (!write && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE, ZINK_QUEUE_GFX)))
-         batch_to_flush = ZINK_QUEUE_GFX;
-   } else {
-      if ((write && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_READ, ZINK_QUEUE_COMPUTE)) ||
-          zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE, ZINK_QUEUE_COMPUTE))
-         batch_to_flush = ZINK_QUEUE_COMPUTE;
-   }
-
    /* if the resource already has usage of any sort set for this batch, we can skip hashing */
-   if (!zink_batch_usage_matches(&res->obj->reads, batch->queue, batch->state->fence.batch_id) &&
-       !zink_batch_usage_matches(&res->obj->writes, batch->queue, batch->state->fence.batch_id)) {
+   if (!zink_batch_usage_matches(&res->obj->reads, batch->state->fence.batch_id) &&
+       !zink_batch_usage_matches(&res->obj->writes, batch->state->fence.batch_id)) {
       bool found = false;
       _mesa_set_search_and_add(batch->state->fence.resources, res->obj, &found);
       if (!found) {
          pipe_reference(NULL, &res->obj->reference);
-         if (!batch->last_batch_id || !zink_batch_usage_matches(&res->obj->reads, batch->queue, batch->last_batch_id))
+         if (!batch->last_batch_id || !zink_batch_usage_matches(&res->obj->reads, batch->last_batch_id))
             /* only add resource usage if it's "new" usage, though this only checks the most recent usage
              * and not all pending usages
              */
             batch->state->resource_size += res->obj->size;
          if (stencil) {
             pipe_reference(NULL, &stencil->obj->reference);
-            if (!batch->last_batch_id || !zink_batch_usage_matches(&stencil->obj->reads, batch->queue, batch->last_batch_id))
+            if (!batch->last_batch_id || !zink_batch_usage_matches(&stencil->obj->reads, batch->last_batch_id))
                batch->state->resource_size += stencil->obj->size;
          }
       }
        }
    if (write) {
       if (stencil)
-         zink_batch_usage_set(&stencil->obj->writes, batch->queue, batch->state->fence.batch_id);
-      zink_batch_usage_set(&res->obj->writes, batch->queue, batch->state->fence.batch_id);
+         zink_batch_usage_set(&stencil->obj->writes, batch->state->fence.batch_id);
+      zink_batch_usage_set(&res->obj->writes, batch->state->fence.batch_id);
    } else {
       if (stencil)
-         zink_batch_usage_set(&stencil->obj->reads, batch->queue, batch->state->fence.batch_id);
-      zink_batch_usage_set(&res->obj->reads, batch->queue, batch->state->fence.batch_id);
+         zink_batch_usage_set(&stencil->obj->reads, batch->state->fence.batch_id);
+      zink_batch_usage_set(&res->obj->reads, batch->state->fence.batch_id);
    }
    /* multiple array entries are fine */
    if (res->obj->persistent_maps)
       util_dynarray_append(&batch->state->persistent_resources, struct zink_resource*, res);
 
    batch->has_work = true;
-   return batch_to_flush;
 }
 
 static bool
 ptr_add_usage(struct zink_batch *batch, struct set *s, void *ptr, struct zink_batch_usage *u)
 {
    bool found = false;
-   if (zink_batch_usage_matches(u, batch->queue, batch->state->fence.batch_id))
+   if (zink_batch_usage_matches(u, batch->state->fence.batch_id))
       return false;
    _mesa_set_search_and_add(s, ptr, &found);
    assert(!found);
-   zink_batch_usage_set(u, batch->queue, batch->state->fence.batch_id);
+   zink_batch_usage_set(u, batch->state->fence.batch_id);
    return true;
 }
 
@@ -450,36 +433,21 @@ zink_batch_reference_image_view(struct zink_batch *batch,
 }
 
 void
-zink_batch_usage_set(struct zink_batch_usage *u, enum zink_queue queue, uint32_t batch_id)
+zink_batch_usage_set(struct zink_batch_usage *u, uint32_t batch_id)
 {
-   if (queue == ZINK_QUEUE_ANY) {
-      p_atomic_set(&u->usage[ZINK_QUEUE_GFX], batch_id);
-      p_atomic_set(&u->usage[ZINK_QUEUE_COMPUTE], batch_id);
-   } else
-      p_atomic_set(&u->usage[queue], batch_id);
+   p_atomic_set(&u->usage, batch_id);
 }
 
 bool
-zink_batch_usage_matches(struct zink_batch_usage *u, enum zink_queue queue, uint32_t batch_id)
+zink_batch_usage_matches(struct zink_batch_usage *u, uint32_t batch_id)
 {
-   if (queue < ZINK_QUEUE_ANY) {
-      uint32_t usage = p_atomic_read(&u->usage[queue]);
-      return usage == batch_id;
-   }
-   for (unsigned i = 0; i < ZINK_QUEUE_ANY; i++) {
-      uint32_t usage = p_atomic_read(&u->usage[queue]);
-      if (usage == batch_id)
-         return true;
-   }
-   return false;
+   uint32_t usage = p_atomic_read(&u->usage);
+   return usage == batch_id;
 }
 
 bool
 zink_batch_usage_exists(struct zink_batch_usage *u)
 {
-   uint32_t usage = p_atomic_read(&u->usage[ZINK_QUEUE_GFX]);
-   if (usage)
-      return true;
-   usage = p_atomic_read(&u->usage[ZINK_QUEUE_COMPUTE]);
+   uint32_t usage = p_atomic_read(&u->usage);
    return !!usage;
 }
index 4dac487..3e763b7 100644 (file)
@@ -44,15 +44,9 @@ struct zink_resource;
 struct zink_sampler_view;
 struct zink_surface;
 
-enum zink_queue {
-   ZINK_QUEUE_GFX,
-   ZINK_QUEUE_COMPUTE,
-   ZINK_QUEUE_ANY,
-};
-
 struct zink_batch_usage {
    /* this has to be atomic for fence access, so we can't use a bitmask and make everything neat */
-   uint32_t usage[2]; //gfx, compute
+   uint32_t usage;
 };
 
 struct zink_batch_state {
@@ -81,7 +75,6 @@ struct zink_batch_state {
 
 struct zink_batch {
    struct zink_batch_state *state;
-   enum zink_queue queue;
 
    uint32_t last_batch_id;
 
@@ -103,7 +96,7 @@ void
 zink_clear_batch_state(struct zink_context *ctx, struct zink_batch_state *bs);
 
 void
-zink_batch_reset_all(struct zink_context *ctx, enum zink_queue queue);
+zink_batch_reset_all(struct zink_context *ctx);
 
 void
 zink_batch_state_destroy(struct zink_screen *screen, struct zink_batch_state *bs);
@@ -122,7 +115,7 @@ zink_start_batch(struct zink_context *ctx, struct zink_batch *batch);
 void
 zink_end_batch(struct zink_context *ctx, struct zink_batch *batch);
 
-enum zink_queue
+void
 zink_batch_reference_resource_rw(struct zink_batch *batch,
                                  struct zink_resource *res,
                                  bool write);
@@ -148,15 +141,15 @@ bool
 zink_batch_add_desc_set(struct zink_batch *batch, struct zink_descriptor_set *zds);
 
 void
-zink_batch_usage_set(struct zink_batch_usage *u, enum zink_queue queue, uint32_t batch_id);
+zink_batch_usage_set(struct zink_batch_usage *u, uint32_t batch_id);
 bool
-zink_batch_usage_matches(struct zink_batch_usage *u, enum zink_queue queue, uint32_t batch_id);
+zink_batch_usage_matches(struct zink_batch_usage *u, uint32_t batch_id);
 bool
 zink_batch_usage_exists(struct zink_batch_usage *u);
 
 static inline void
-zink_batch_usage_unset(struct zink_batch_usage *u, enum zink_queue queue, uint32_t batch_id)
+zink_batch_usage_unset(struct zink_batch_usage *u, uint32_t batch_id)
 {
-   p_atomic_cmpxchg(&u->usage[queue], batch_id, 0);
+   p_atomic_cmpxchg(&u->usage, batch_id, 0);
 }
 #endif
index 01cd0d1..e1a6dd3 100644 (file)
@@ -192,7 +192,7 @@ zink_clear(struct pipe_context *pctx,
 {
    struct zink_context *ctx = zink_context(pctx);
    struct pipe_framebuffer_state *fb = &ctx->fb_state;
-   struct zink_batch *batch = zink_batch_g(ctx);
+   struct zink_batch *batch = &ctx->batch;
    bool needs_rp = false;
 
    if (scissor_state) {
@@ -364,7 +364,7 @@ zink_clear_texture(struct pipe_context *pctx,
    struct pipe_screen *pscreen = pctx->screen;
    struct u_rect region = zink_rect_from_box(box);
    bool needs_rp = !zink_blit_region_fills(region, pres->width0, pres->height0) || ctx->render_condition_active;
-   struct zink_batch *batch = zink_batch_g(ctx);
+   struct zink_batch *batch = &ctx->batch;
    struct pipe_surface *surf = NULL;
 
    if (res->aspect & VK_IMAGE_ASPECT_COLOR_BIT) {
@@ -452,7 +452,7 @@ fb_clears_apply_internal(struct zink_context *ctx, struct pipe_resource *pres, i
    if (!zink_fb_clear_enabled(ctx, i))
       return;
    if (zink_resource(pres)->aspect == VK_IMAGE_ASPECT_COLOR_BIT) {
-      assert(!zink_batch_g(ctx)->in_rp);
+      assert(!ctx->batch.in_rp);
       if (zink_fb_clear_needs_explicit(fb_clear) || !check_3d_layers(ctx->fb_state.cbufs[i]))
          /* this will automatically trigger all the clears */
          zink_batch_rp(ctx);
@@ -469,7 +469,7 @@ fb_clears_apply_internal(struct zink_context *ctx, struct pipe_resource *pres, i
       zink_fb_clear_reset(ctx, i);
       return;
    } else {
-      assert(!zink_batch_g(ctx)->in_rp);
+      assert(!ctx->batch.in_rp);
       if (zink_fb_clear_needs_explicit(fb_clear) || !check_3d_layers(ctx->fb_state.zsbuf))
          /* this will automatically trigger all the clears */
          zink_batch_rp(ctx);
index d835c27..2a908b6 100644 (file)
@@ -300,20 +300,18 @@ zink_context_destroy(struct pipe_context *pctx)
    for (unsigned i = 0; i < ARRAY_SIZE(ctx->null_buffers); i++)
       pipe_resource_reference(&ctx->null_buffers[i], NULL);
 
-   for (unsigned i = 0; i < ZINK_QUEUE_ANY; i++) {
-      struct zink_fence *fence = zink_fence(&ctx->batches[i].state);
-      zink_clear_batch_state(ctx, ctx->batches[i].state);
-      zink_fence_reference(zink_screen(pctx->screen), &fence, NULL);
-      hash_table_foreach(&ctx->batch_states[i], entry) {
-         fence = entry->data;
-         zink_clear_batch_state(ctx, entry->data);
-         zink_fence_reference(zink_screen(pctx->screen), &fence, NULL);
-      }
-      util_dynarray_foreach(&ctx->free_batch_states[i], struct zink_batch_state*, bs) {
-         fence = zink_fence(*bs);
-         zink_clear_batch_state(ctx, *bs);
-         zink_fence_reference(zink_screen(pctx->screen), &fence, NULL);
-      }
+   struct zink_fence *fence = zink_fence(&ctx->batch.state);
+   zink_clear_batch_state(ctx, ctx->batch.state);
+   zink_fence_reference(screen, &fence, NULL);
+   hash_table_foreach(&ctx->batch_states, entry) {
+      fence = entry->data;
+      zink_clear_batch_state(ctx, entry->data);
+      zink_fence_reference(screen, &fence, NULL);
+   }
+   util_dynarray_foreach(&ctx->free_batch_states, struct zink_batch_state*, bs) {
+      fence = zink_fence(*bs);
+      zink_clear_batch_state(ctx, *bs);
+      zink_fence_reference(screen, &fence, NULL);
    }
 
    if (ctx->framebuffer) {
@@ -519,7 +517,7 @@ zink_delete_sampler_state(struct pipe_context *pctx,
                           void *sampler_state)
 {
    struct zink_sampler_state *sampler = sampler_state;
-   struct zink_batch *batch = zink_batch_g(zink_context(pctx));
+   struct zink_batch *batch = &zink_context(pctx)->batch;
    zink_descriptor_set_refs_clear(&sampler->desc_set_refs, sampler_state);
    util_dynarray_append(&batch->state->zombie_samplers, VkSampler,
                         sampler->sampler);
@@ -1250,8 +1248,6 @@ setup_framebuffer(struct zink_context *ctx)
 void
 zink_begin_render_pass(struct zink_context *ctx, struct zink_batch *batch)
 {
-   assert(batch == zink_batch_g(ctx));
-
    setup_framebuffer(ctx);
    assert(ctx->gfx_pipeline_state.render_pass);
    struct pipe_framebuffer_state *fb_state = &ctx->fb_state;
@@ -1332,24 +1328,23 @@ zink_end_render_pass(struct zink_context *ctx, struct zink_batch *batch)
 }
 
 static void
-flush_batch(struct zink_context *ctx, enum zink_queue queue)
+flush_batch(struct zink_context *ctx)
 {
-   struct zink_batch *batch = zink_batch_queue(ctx, queue);
-   if (queue == ZINK_QUEUE_GFX)
-      zink_end_render_pass(ctx, batch);
+   struct zink_batch *batch = &ctx->batch;
+   zink_end_render_pass(ctx, batch);
    zink_end_batch(ctx, batch);
 
    incr_curr_batch(ctx);
 
    zink_start_batch(ctx, batch);
-   if (queue == ZINK_QUEUE_GFX && zink_screen(ctx->base.screen)->info.have_EXT_transform_feedback && ctx->num_so_targets)
+   if (zink_screen(ctx->base.screen)->info.have_EXT_transform_feedback && ctx->num_so_targets)
       ctx->dirty_so_targets = true;
 }
 
 struct zink_batch *
 zink_batch_rp(struct zink_context *ctx)
 {
-   struct zink_batch *batch = zink_batch_g(ctx);
+   struct zink_batch *batch = &ctx->batch;
    if (!batch->in_rp) {
       zink_begin_render_pass(ctx, batch);
       assert(ctx->framebuffer && ctx->framebuffer->rp);
@@ -1360,16 +1355,16 @@ zink_batch_rp(struct zink_context *ctx)
 struct zink_batch *
 zink_batch_no_rp(struct zink_context *ctx)
 {
-   struct zink_batch *batch = zink_batch_g(ctx);
+   struct zink_batch *batch = &ctx->batch;
    zink_end_render_pass(ctx, batch);
    assert(!batch->in_rp);
    return batch;
 }
 
 void
-zink_flush_queue(struct zink_context *ctx, enum zink_queue queue)
+zink_flush_queue(struct zink_context *ctx)
 {
-   flush_batch(ctx, queue);
+   flush_batch(ctx);
 }
 
 static void
@@ -1600,12 +1595,7 @@ zink_resource_image_barrier(struct zink_context *ctx, struct zink_batch *batch,
    if (!zink_resource_image_needs_barrier(res, new_layout, flags, pipeline))
       return;
    /* only barrier if we're changing layout or doing something besides read -> read */
-   if (!batch) {
-      if (pipeline == VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)
-         batch = zink_batch_c(ctx);
-      else
-         batch = zink_batch_no_rp(ctx);
-   }
+   batch = zink_batch_no_rp(ctx);
    assert(!batch->in_rp);
    VkImageSubresourceRange isr = {
       res->aspect,
@@ -1698,12 +1688,7 @@ zink_resource_buffer_barrier(struct zink_context *ctx, struct zink_batch *batch,
    if (!zink_resource_buffer_needs_barrier(res, flags, pipeline))
       return;
    /* only barrier if we're changing layout or doing something besides read -> read */
-   if (!batch) {
-      if (pipeline == VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT)
-         batch = zink_batch_c(ctx);
-      else
-         batch = zink_batch_no_rp(ctx);
-   }
+   batch = zink_batch_no_rp(ctx);
    assert(!batch->in_rp);
    VkBufferMemoryBarrier bmb = {
       VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,
@@ -1780,7 +1765,7 @@ zink_flush(struct pipe_context *pctx,
 {
    struct zink_context *ctx = zink_context(pctx);
    bool deferred = flags & PIPE_FLUSH_DEFERRED;
-   struct zink_batch *batch = zink_batch_g(ctx);
+   struct zink_batch *batch = &ctx->batch;
    struct zink_fence *fence = &batch->state->fence;
 
    if (!deferred && ctx->clears_enabled) {
@@ -1801,13 +1786,13 @@ zink_flush(struct pipe_context *pctx,
          if (zink_screen(pctx->screen)->needs_mesa_flush_wsi && ctx->fb_state.cbufs[0])
             batch->state->flush_res = zink_resource(ctx->fb_state.cbufs[0]->texture);
       }
-      flush_batch(ctx, ZINK_QUEUE_GFX);
+      flush_batch(ctx);
    }
 
    if (!pfence)
       return;
    if (deferred && !batch->has_work) {
-      fence = ctx->last_fence[ZINK_QUEUE_GFX];
+      fence = ctx->last_fence;
    }
    zink_fence_reference(zink_screen(pctx->screen),
                         (struct zink_fence **)pfence,
@@ -1824,16 +1809,16 @@ zink_flush(struct pipe_context *pctx,
 }
 
 void
-zink_maybe_flush_or_stall(struct zink_context *ctx, enum zink_queue queue)
+zink_maybe_flush_or_stall(struct zink_context *ctx)
 {
    struct zink_screen *screen = zink_screen(ctx->base.screen);
    /* flush anytime our total batch memory usage is potentially >= 1/10 of total system memory */
-   if (zink_batch_queue(ctx, queue)->state->resource_size >= screen->total_mem / 10)
-      flush_batch(ctx, queue);
+   if (ctx->batch.state->resource_size >= screen->total_mem / 10)
+      flush_batch(ctx);
 
-   if (ctx->resource_size[queue] >= screen->total_mem / 10) {
-      zink_fence_finish(zink_screen(ctx->base.screen), &ctx->base, ctx->last_fence[queue], PIPE_TIMEOUT_INFINITE);
-      zink_batch_reset_all(ctx, queue);
+   if (ctx->resource_size >= screen->total_mem / 10) {
+      zink_fence_finish(zink_screen(ctx->base.screen), &ctx->base, ctx->last_fence, PIPE_TIMEOUT_INFINITE);
+      zink_batch_reset_all(ctx);
    }
 }
 
@@ -1842,34 +1827,34 @@ zink_fence_wait(struct pipe_context *pctx)
 {
    struct zink_context *ctx = zink_context(pctx);
 
-   if (zink_batch_g(ctx)->has_work)
+   if (ctx->batch.has_work)
       pctx->flush(pctx, NULL, PIPE_FLUSH_HINT_FINISH);
-   if (ctx->last_fence[ZINK_QUEUE_GFX])
-      zink_fence_finish(zink_screen(pctx->screen), pctx, ctx->last_fence[ZINK_QUEUE_GFX], PIPE_TIMEOUT_INFINITE);
+   if (ctx->last_fence)
+      zink_fence_finish(zink_screen(pctx->screen), pctx, ctx->last_fence, PIPE_TIMEOUT_INFINITE);
 }
 
 void
-zink_wait_on_batch(struct zink_context *ctx, enum zink_queue queue, uint32_t batch_id)
+zink_wait_on_batch(struct zink_context *ctx, uint32_t batch_id)
 {
-   struct zink_batch_state *bs = zink_batch_queue(ctx, queue)->state;
+   struct zink_batch_state *bs = ctx->batch.state;
    assert(bs);
    if (!batch_id || bs->fence.batch_id == batch_id)
       /* not submitted yet */
-      flush_batch(ctx, queue);
+      flush_batch(ctx);
 
    struct zink_fence *fence;
 
-   assert(batch_id || ctx->last_fence[queue]);
-   if (ctx->last_fence[queue] && (!batch_id || batch_id == zink_batch_state(ctx->last_fence[queue])->fence.batch_id))
-      fence = ctx->last_fence[queue];
+   assert(batch_id || ctx->last_fence);
+   if (ctx->last_fence && (!batch_id || batch_id == zink_batch_state(ctx->last_fence)->fence.batch_id))
+      fence = ctx->last_fence;
    else {
-      struct hash_entry *he = _mesa_hash_table_search_pre_hashed(&ctx->batch_states[queue], batch_id, (void*)(uintptr_t)batch_id);
+      struct hash_entry *he = _mesa_hash_table_search_pre_hashed(&ctx->batch_states, batch_id, (void*)(uintptr_t)batch_id);
       if (!he) {
-        util_dynarray_foreach(&ctx->free_batch_states[queue], struct zink_batch_state*, bs) {
+        util_dynarray_foreach(&ctx->free_batch_states, struct zink_batch_state*, bs) {
            if ((*bs)->fence.batch_id == batch_id)
               return;
         }
-        if (ctx->last_fence[queue] && ctx->last_fence[queue]->batch_id > batch_id)
+        if (ctx->last_fence && ctx->last_fence->batch_id > batch_id)
            /* already completed */
            return;
         unreachable("should've found batch state");
@@ -1884,9 +1869,9 @@ static void
 zink_texture_barrier(struct pipe_context *pctx, unsigned flags)
 {
    struct zink_context *ctx = zink_context(pctx);
-   if (zink_batch_g(ctx)->has_work)
+   if (ctx->batch.has_work)
       pctx->flush(pctx, NULL, 0);
-   zink_flush_queue(ctx, ZINK_QUEUE_COMPUTE);
+   zink_flush_queue(ctx);
 }
 
 static void
@@ -2002,20 +1987,13 @@ zink_memory_barrier(struct pipe_context *pctx, unsigned flags)
    b.srcAccessMask = sflags;
    b.dstAccessMask = dflags;
 
-   struct zink_batch *batch = zink_batch_g(ctx);
+   struct zink_batch *batch = &ctx->batch;
    if (batch->has_work) {
       zink_end_render_pass(ctx, batch);
 
       /* this should be the only call needed */
       vkCmdPipelineBarrier(batch->state->cmdbuf, src, dst, 0, 0, &b, 0, NULL, 0, NULL);
-      flush_batch(ctx, ZINK_QUEUE_GFX);
-   }
-   batch = zink_batch_c(ctx);
-   if (batch->has_work) {
-      /* this should be the only call needed */
-      vkCmdPipelineBarrier(batch->state->cmdbuf, src, dst, 0, 0, &b, 0, NULL, 0, NULL);
-      zink_end_batch(ctx, batch);
-      zink_start_batch(ctx, batch);
+      flush_batch(ctx);
    }
 }
 
@@ -2334,14 +2312,6 @@ zink_resource_rebind(struct zink_context *ctx, struct zink_resource *res)
    }
 }
 
-static void
-init_batch(struct zink_context *ctx, enum zink_queue queue)
-{
-   struct zink_batch *batch = zink_batch_queue(ctx, queue);
-   batch->queue = queue;
-   zink_start_batch(ctx, batch);
-}
-
 struct pipe_context *
 zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
 {
@@ -2409,10 +2379,8 @@ zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
    zink_context_resource_init(&ctx->base);
    zink_context_query_init(&ctx->base);
 
-   for (unsigned i = 0; i < ZINK_QUEUE_ANY; i++) {
-      util_dynarray_init(&ctx->free_batch_states[i], ctx);
-      _mesa_hash_table_init(&ctx->batch_states[i], ctx, NULL, _mesa_key_pointer_equal);
-   }
+   util_dynarray_init(&ctx->free_batch_states, ctx);
+   _mesa_hash_table_init(&ctx->batch_states, ctx, NULL, _mesa_key_pointer_equal);
 
    ctx->gfx_pipeline_state.have_EXT_extended_dynamic_state = screen->info.have_EXT_extended_dynamic_state;
 
@@ -2440,12 +2408,8 @@ zink_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
       goto fail;
 
    incr_curr_batch(ctx);
-   init_batch(ctx, ZINK_QUEUE_GFX);
-   if (!zink_batch_g(ctx)->state)
-      goto fail;
-
-   init_batch(ctx, ZINK_QUEUE_COMPUTE);
-   if (!zink_batch_c(ctx)->state)
+   zink_start_batch(ctx, &ctx->batch);
+   if (!ctx->batch.state)
       goto fail;
 
    vkGetDeviceQueue(screen->dev, screen->gfx_queue, 0, &ctx->queue);
index 84f0dc3..67d7c4c 100644 (file)
@@ -140,12 +140,12 @@ struct zink_context {
    bool is_device_lost;
 
    uint32_t curr_batch; //the current batch id
-   struct zink_batch batches[2]; //gfx, compute
-   struct zink_fence *last_fence[2]; //gfx, compute; the last command buffer submitted
-   VkQueue queue;
-   struct hash_table batch_states[2]; //gfx, compute; submitted batch states
-   struct util_dynarray free_batch_states[2]; //gfx, compute; unused batch states
-   VkDeviceSize resource_size[2]; //gfx, compute; the accumulated size of resources in submitted buffers
+   struct zink_batch batch;
+   struct zink_fence *last_fence; //the last command buffer submitted
+   VkQueue queue; //gfx+compute
+   struct hash_table batch_states; //submitted batch states
+   struct util_dynarray free_batch_states; //unused batch states
+   VkDeviceSize resource_size; //the accumulated size of resources in submitted buffers
 
    struct pipe_constant_buffer ubos[PIPE_SHADER_TYPES][PIPE_MAX_CONSTANT_BUFFERS];
    struct pipe_shader_buffer ssbos[PIPE_SHADER_TYPES][PIPE_MAX_SHADER_BUFFERS];
@@ -238,25 +238,6 @@ zink_fb_clear_enabled(const struct zink_context *ctx, unsigned idx)
    return ctx->clears_enabled & (PIPE_CLEAR_COLOR0 << idx);
 }
 
-static inline struct zink_batch *
-zink_batch_queue(struct zink_context *ctx, enum zink_queue queue_type)
-{
-   assert(queue_type < ARRAY_SIZE(ctx->batches));
-   return &ctx->batches[queue_type];
-}
-
-static inline struct zink_batch *
-zink_batch_g(struct zink_context *ctx)
-{
-   return &ctx->batches[ZINK_QUEUE_GFX];
-}
-
-static inline struct zink_batch *
-zink_batch_c(struct zink_context *ctx)
-{
-   return &ctx->batches[ZINK_QUEUE_COMPUTE];
-}
-
 struct zink_batch *
 zink_batch_rp(struct zink_context *ctx);
 
@@ -267,13 +248,13 @@ void
 zink_fence_wait(struct pipe_context *ctx);
 
 void
-zink_wait_on_batch(struct zink_context *ctx, enum zink_queue queue, uint32_t batch_id);
+zink_wait_on_batch(struct zink_context *ctx, uint32_t batch_id);
 
 void
-zink_flush_queue(struct zink_context *ctx, enum zink_queue queue);
+zink_flush_queue(struct zink_context *ctx);
 
 void
-zink_maybe_flush_or_stall(struct zink_context *ctx, enum zink_queue queue);
+zink_maybe_flush_or_stall(struct zink_context *ctx);
 
 bool
 zink_resource_access_is_write(VkAccessFlags flags);
index 8d73589..2fcf785 100644 (file)
@@ -232,7 +232,7 @@ allocate_desc_set(struct zink_screen *screen, struct zink_program *pg, enum zink
       pipe_reference_init(&zds->reference, 1);
       zds->pool = pool;
       zds->hash = 0;
-      zds->batch_uses.usage[0] = zds->batch_uses.usage[1] = 0;
+      zds->batch_uses.usage = 0;
       zds->invalid = true;
       zds->punted = zds->recycled = false;
       if (num_resources) {
@@ -296,7 +296,7 @@ zink_descriptor_set_get(struct zink_context *ctx,
    struct zink_descriptor_set *zds;
    struct zink_screen *screen = zink_screen(ctx->base.screen);
    struct zink_program *pg = is_compute ? (struct zink_program *)ctx->curr_compute : (struct zink_program *)ctx->curr_program;
-   struct zink_batch *batch = is_compute ? zink_batch_c(ctx) : zink_batch_g(ctx);
+   struct zink_batch *batch = &ctx->batch;
    struct zink_descriptor_pool *pool = pg->pool[type];
    unsigned descs_used = 1;
    assert(type < ZINK_DESCRIPTOR_TYPES);
index a79db5e..84f4644 100644 (file)
@@ -118,7 +118,7 @@ zink_emit_stream_output_targets(struct pipe_context *pctx)
 {
    struct zink_context *ctx = zink_context(pctx);
    struct zink_screen *screen = zink_screen(pctx->screen);
-   struct zink_batch *batch = zink_batch_g(ctx);
+   struct zink_batch *batch = &ctx->batch;
    VkBuffer buffers[PIPE_MAX_SO_OUTPUTS] = {};
    VkDeviceSize buffer_offsets[PIPE_MAX_SO_OUTPUTS] = {};
    VkDeviceSize buffer_sizes[PIPE_MAX_SO_OUTPUTS] = {};
@@ -337,15 +337,13 @@ cmp_dynamic_offset_binding(const void *a, const void *b)
    return *binding_a - *binding_b;
 }
 
-static bool
+static void
 write_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds, unsigned num_wds, VkWriteDescriptorSet *wds,
-                 bool is_compute, bool cache_hit, bool need_resource_refs)
+                 bool cache_hit, bool need_resource_refs)
 {
-   bool need_flush = false;
-   struct zink_batch *batch = is_compute ? zink_batch_c(ctx) : zink_batch_g(ctx);
+   struct zink_batch *batch = &ctx->batch;
    struct zink_screen *screen = zink_screen(ctx->base.screen);
    assert(zds->desc_set);
-   enum zink_queue check_flush_id = is_compute ? ZINK_QUEUE_GFX : ZINK_QUEUE_COMPUTE;
 
    if (!cache_hit && num_wds)
       vkUpdateDescriptorSets(screen->dev, num_wds, wds, 0, NULL);
@@ -353,12 +351,10 @@ write_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds, uns
    for (int i = 0; zds->pool->key.num_descriptors && i < util_dynarray_num_elements(&zds->barriers, struct zink_descriptor_barrier); ++i) {
       struct zink_descriptor_barrier *barrier = util_dynarray_element(&zds->barriers, struct zink_descriptor_barrier, i);
       if (need_resource_refs || (ctx->curr_compute && ctx->curr_program))
-         need_flush |= zink_batch_reference_resource_rw(batch, barrier->res, zink_resource_access_is_write(barrier->access)) == check_flush_id;
+         zink_batch_reference_resource_rw(batch, barrier->res, zink_resource_access_is_write(barrier->access));
       zink_resource_barrier(ctx, NULL, barrier->res,
                             barrier->layout, barrier->access, barrier->stage);
    }
-
-   return need_flush;
 }
 
 static unsigned
@@ -374,7 +370,7 @@ init_write_descriptor(struct zink_shader *shader, struct zink_descriptor_set *zd
     return num_wds + 1;
 }
 
-static bool
+static void
 update_ubo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
                        bool is_compute, bool cache_hit, bool need_resource_refs,
                        uint32_t *dynamic_offsets, unsigned *dynamic_offset_idx)
@@ -457,10 +453,10 @@ update_ubo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds
       dynamic_offsets[i] = dynamic_buffers[i].offset;
    *dynamic_offset_idx = dynamic_offset_count;
 
-   return write_descriptors(ctx, zds, num_wds, wds, is_compute, cache_hit, need_resource_refs);
+   write_descriptors(ctx, zds, num_wds, wds, cache_hit, need_resource_refs);
 }
 
-static bool
+static void
 update_ssbo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
                         bool is_compute, bool cache_hit, bool need_resource_refs)
 {
@@ -523,7 +519,7 @@ update_ssbo_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zd
       }
    }
    _mesa_set_destroy(ht, NULL);
-   return write_descriptors(ctx, zds, num_wds, wds, is_compute, cache_hit, need_resource_refs);
+   write_descriptors(ctx, zds, num_wds, wds, cache_hit, need_resource_refs);
 }
 
 static void
@@ -575,7 +571,7 @@ handle_image_descriptor(struct zink_screen *screen, struct zink_resource *res, e
      }
 }
 
-static bool
+static void
 update_sampler_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
                            bool is_compute, bool cache_hit, bool need_resource_refs)
 {
@@ -644,12 +640,12 @@ update_sampler_descriptors(struct zink_context *ctx, struct zink_descriptor_set
             desc_set_sampler_add(ctx, zds, sampler_view, sampler, num_resources++,
                                  zink_shader_descriptor_is_buffer(shader, ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW, j),
                                  cache_hit);
-            struct zink_batch *batch = is_compute ? zink_batch_c(ctx) : zink_batch_g(ctx);
+            struct zink_batch *batch = &ctx->batch;
             if (sampler_view)
                zink_batch_reference_sampler_view(batch, sampler_view);
             if (sampler)
                /* this only tracks the most recent usage for now */
-               zink_batch_usage_set(&sampler->batch_uses, batch->queue, batch->state->fence.batch_id);
+               zink_batch_usage_set(&sampler->batch_uses, batch->state->fence.batch_id);
          }
          assert(num_wds < num_descriptors);
 
@@ -657,10 +653,10 @@ update_sampler_descriptors(struct zink_context *ctx, struct zink_descriptor_set
       }
    }
    _mesa_set_destroy(ht, NULL);
-   return write_descriptors(ctx, zds, num_wds, wds, is_compute, cache_hit, need_resource_refs);
+   write_descriptors(ctx, zds, num_wds, wds, cache_hit, need_resource_refs);
 }
 
-static bool
+static void
 update_image_descriptors(struct zink_context *ctx, struct zink_descriptor_set *zds,
                          bool is_compute, bool cache_hit, bool need_resource_refs)
 {
@@ -734,7 +730,7 @@ update_image_descriptors(struct zink_context *ctx, struct zink_descriptor_set *z
                                     &num_buffer_info, &buffer_views[num_buffer_info],
                                     NULL, imageview, bufferview, !k);
 
-            struct zink_batch *batch = is_compute ? zink_batch_c(ctx) : zink_batch_g(ctx);
+            struct zink_batch *batch = &ctx->batch;
             if (res)
                zink_batch_reference_image_view(batch, image_view);
          }
@@ -744,7 +740,7 @@ update_image_descriptors(struct zink_context *ctx, struct zink_descriptor_set *z
       }
    }
    _mesa_set_destroy(ht, NULL);
-   return write_descriptors(ctx, zds, num_wds, wds, is_compute, cache_hit, need_resource_refs);
+   write_descriptors(ctx, zds, num_wds, wds, cache_hit, need_resource_refs);
 }
 
 static void
@@ -762,27 +758,26 @@ update_descriptors(struct zink_context *ctx, struct zink_screen *screen, bool is
       else
          zds[h] = NULL;
    }
-   struct zink_batch *batch = is_compute ? zink_batch_c(ctx) : zink_batch_g(ctx);
+   struct zink_batch *batch = &ctx->batch;
    zink_batch_reference_program(batch, pg);
 
    uint32_t dynamic_offsets[PIPE_MAX_CONSTANT_BUFFERS];
    unsigned dynamic_offset_idx = 0;
 
-   bool need_flush = false;
    if (zds[ZINK_DESCRIPTOR_TYPE_UBO])
-      need_flush |= update_ubo_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_UBO],
+      update_ubo_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_UBO],
                                            is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_UBO],
                                            need_resource_refs[ZINK_DESCRIPTOR_TYPE_UBO], dynamic_offsets, &dynamic_offset_idx);
    if (zds[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW])
-      need_flush |= update_sampler_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW],
+      update_sampler_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW],
                                                is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW],
                                                need_resource_refs[ZINK_DESCRIPTOR_TYPE_SAMPLER_VIEW]);
    if (zds[ZINK_DESCRIPTOR_TYPE_SSBO])
-      need_flush |= update_ssbo_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_SSBO],
+      update_ssbo_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_SSBO],
                                                is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_SSBO],
                                                need_resource_refs[ZINK_DESCRIPTOR_TYPE_SSBO]);
    if (zds[ZINK_DESCRIPTOR_TYPE_IMAGE])
-      need_flush |= update_image_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_IMAGE],
+      update_image_descriptors(ctx, zds[ZINK_DESCRIPTOR_TYPE_IMAGE],
                                                is_compute, cache_hit[ZINK_DESCRIPTOR_TYPE_IMAGE],
                                                need_resource_refs[ZINK_DESCRIPTOR_TYPE_IMAGE]);
 
@@ -793,16 +788,6 @@ update_descriptors(struct zink_context *ctx, struct zink_screen *screen, bool is
                                  zds[h]->pool->type == ZINK_DESCRIPTOR_TYPE_UBO ? dynamic_offset_idx : 0, dynamic_offsets);
       }
    }
-   if (!need_flush)
-      return;
-
-   if (is_compute)
-      /* flush gfx batch */
-      ctx->base.flush(&ctx->base, NULL, PIPE_FLUSH_HINT_FINISH);
-   else {
-      /* flush compute batch */
-      zink_flush_queue(ctx, ZINK_QUEUE_COMPUTE);
-   }
 }
 
 static bool
@@ -864,7 +849,7 @@ zink_draw_vbo(struct pipe_context *pctx,
    bool need_index_buffer_unref = false;
 
    /* check memory usage and flush/stall as needed to avoid oom */
-   zink_maybe_flush_or_stall(ctx, ZINK_QUEUE_GFX);
+   zink_maybe_flush_or_stall(ctx);
 
    if (dinfo->primitive_restart && !restart_supported(dinfo->mode)) {
        util_draw_vbo_without_prim_restart(pctx, dinfo, dindirect, &draws[0]);
@@ -1175,10 +1160,10 @@ zink_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
 {
    struct zink_context *ctx = zink_context(pctx);
    struct zink_screen *screen = zink_screen(pctx->screen);
-   struct zink_batch *batch = zink_batch_c(ctx);
+   struct zink_batch *batch = &ctx->batch;
 
    /* check memory usage and flush/stall as needed to avoid oom */
-   zink_maybe_flush_or_stall(ctx, ZINK_QUEUE_COMPUTE);
+   zink_maybe_flush_or_stall(ctx);
 
    struct zink_compute_program *comp_program = get_compute_program(ctx);
    if (!comp_program)
index 3464b65..6d98371 100644 (file)
@@ -38,8 +38,8 @@ zink_fence_clear_resources(struct zink_screen *screen, struct zink_fence *fence)
    /* unref all used resources */
    set_foreach(fence->resources, entry) {
       struct zink_resource_object *obj = (struct zink_resource_object *)entry->key;
-      zink_batch_usage_unset(&obj->reads, !!fence->is_compute, fence->batch_id);
-      zink_batch_usage_unset(&obj->writes, !!fence->is_compute, fence->batch_id);
+      zink_batch_usage_unset(&obj->reads, fence->batch_id);
+      zink_batch_usage_unset(&obj->writes, fence->batch_id);
       zink_resource_object_reference(screen, &obj, NULL);
       _mesa_set_remove(fence->resources, entry);
    }
@@ -108,7 +108,7 @@ zink_fence_finish(struct zink_screen *screen, struct pipe_context *pctx, struct
                   uint64_t timeout_ns)
 {
    if (pctx && fence->deferred_ctx == pctx) {
-      zink_batch_g(zink_context(pctx))->has_work = true;
+      zink_context(pctx)->batch.has_work = true;
       /* this must be the current batch */
       pctx->flush(pctx, NULL, 0);
    }
@@ -146,7 +146,7 @@ zink_fence_server_sync(struct pipe_context *pctx, struct pipe_fence_handle *pfen
       return;
 
    if (fence->deferred_ctx) {
-      zink_batch_g(zink_context(pctx))->has_work = true;
+      zink_context(pctx)->batch.has_work = true;
       /* this must be the current batch */
       pctx->flush(pctx, NULL, 0);
    }
index afa82ac..07d7dac 100644 (file)
@@ -42,7 +42,6 @@ struct zink_fence {
    uint32_t batch_id;
    struct set *resources; /* resources need access removed asap, so they're on the fence */
    bool submitted;
-   bool is_compute;
 };
 
 static inline struct zink_fence *
index 638a74d..f9a4af1 100644 (file)
@@ -622,6 +622,7 @@ zink_create_compute_program(struct zink_context *ctx, struct zink_shader *shader
       goto fail;
 
    pipe_reference_init(&comp->base.reference, 1);
+   comp->base.is_compute = true;
 
    if (!ctx->curr_compute || !ctx->curr_compute->shader_cache) {
       /* TODO: cs shader keys placeholder for now */
index 7f30e3f..c0cad91 100644 (file)
@@ -69,6 +69,7 @@ struct zink_shader_cache {
 
 struct zink_program {
    struct pipe_reference reference;
+   bool is_compute;
 
    struct zink_descriptor_pool *pool[ZINK_DESCRIPTOR_TYPES];
    struct zink_descriptor_set *last_set[ZINK_DESCRIPTOR_TYPES];
index 1bf924a..e33d727 100644 (file)
@@ -116,12 +116,6 @@ needs_stats_list(struct zink_query *query)
 }
 
 static bool
-is_cs_query(struct zink_query *query)
-{
-   return query->type == PIPE_QUERY_PIPELINE_STATISTICS_SINGLE && query->index == PIPE_STAT_QUERY_CS_INVOCATIONS;
-}
-
-static bool
 is_time_query(struct zink_query *query)
 {
    return query->type == PIPE_QUERY_TIMESTAMP || query->type == PIPE_QUERY_TIME_ELAPSED;
@@ -133,16 +127,6 @@ is_so_overflow_query(struct zink_query *query)
    return query->type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE || query->type == PIPE_QUERY_SO_OVERFLOW_PREDICATE;
 }
 
-static struct zink_batch *
-get_batch_for_query(struct zink_context *ctx, struct zink_query *query, bool no_rp)
-{
-   if (query && is_cs_query(query))
-      return zink_batch_c(ctx);
-   if (no_rp)
-      return zink_batch_no_rp(ctx);
-   return zink_batch_g(ctx);
-}
-
 static struct pipe_query *
 zink_create_query(struct pipe_context *pctx,
                   unsigned query_type, unsigned index)
@@ -202,7 +186,7 @@ zink_create_query(struct pipe_context *pctx,
          }
       }
    }
-   struct zink_batch *batch = get_batch_for_query(zink_context(pctx), query, true);
+   struct zink_batch *batch = &zink_context(pctx)->batch;
    batch->has_work = true;
    vkCmdResetQueryPool(batch->state->cmdbuf, query->query_pool, 0, query->num_queries);
    if (query->type == PIPE_QUERY_PRIMITIVES_GENERATED)
@@ -406,10 +390,8 @@ force_cpu_read(struct zink_context *ctx, struct pipe_query *pquery, bool wait, e
    unsigned result_size = result_type <= PIPE_QUERY_TYPE_U32 ? sizeof(uint32_t) : sizeof(uint64_t);
    struct zink_query *query = (struct zink_query*)pquery;
    union pipe_query_result result;
-   if (zink_batch_usage_matches(&query->batch_id, ZINK_QUEUE_GFX, zink_batch_g(ctx)->state->fence.batch_id))
+   if (zink_batch_usage_matches(&query->batch_id, ctx->curr_batch))
       pctx->flush(pctx, NULL, PIPE_FLUSH_HINT_FINISH);
-   else if (is_cs_query(query))
-      zink_flush_queue(ctx, ZINK_QUEUE_COMPUTE);
 
    bool success = get_query_result(pctx, pquery, wait, &result);
    if (!success) {
@@ -443,15 +425,11 @@ static void
 copy_results_to_buffer(struct zink_context *ctx, struct zink_query *query, struct zink_resource *res, unsigned offset, int num_results, VkQueryResultFlags flags)
 {
    unsigned query_id = query->last_start;
-   struct zink_batch *batch = get_batch_for_query(ctx, query, true);
+   struct zink_batch *batch = &ctx->batch;
    unsigned base_result_size = (flags & VK_QUERY_RESULT_64_BIT) ? sizeof(uint64_t) : sizeof(uint32_t);
    unsigned result_size = base_result_size * num_results;
    if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)
       result_size += base_result_size;
-   if (is_cs_query(query)) {
-      if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE, ZINK_QUEUE_GFX))
-         ctx->base.flush(&ctx->base, NULL, PIPE_FLUSH_HINT_FINISH);
-   }
    /* if it's a single query that doesn't need special handling, we can copy it and be done */
    zink_batch_reference_resource_rw(batch, res, true);
    zink_resource_buffer_barrier(ctx, batch, res, VK_ACCESS_TRANSFER_WRITE_BIT, 0);
@@ -459,10 +437,7 @@ copy_results_to_buffer(struct zink_context *ctx, struct zink_query *query, struc
    vkCmdCopyQueryPoolResults(batch->state->cmdbuf, query->query_pool, query_id, num_results, res->obj->buffer,
                              offset, 0, flags);
    /* this is required for compute batch sync and will be removed later */
-   if (is_cs_query(query))
-      zink_flush_queue(ctx, ZINK_QUEUE_COMPUTE);
-   else
-      ctx->base.flush(&ctx->base, NULL, PIPE_FLUSH_HINT_FINISH);
+   zink_flush_queue(ctx);
 
 }
 
@@ -473,7 +448,7 @@ reset_pool(struct zink_context *ctx, struct zink_batch *batch, struct zink_query
     *
     * - vkCmdResetQueryPool spec
     */
-   batch = get_batch_for_query(ctx, q, true);
+   zink_batch_no_rp(ctx);
 
    if (q->type != PIPE_QUERY_TIMESTAMP)
       get_query_result(&ctx->base, (struct pipe_query*)q, false, &q->accumulated_result);
@@ -535,7 +510,7 @@ begin_query(struct zink_context *ctx, struct zink_batch *batch, struct zink_quer
    if (needs_stats_list(q))
       list_addtail(&q->stats_list, &ctx->primitives_generated_queries);
    p_atomic_inc(&q->fences);
-   zink_batch_usage_set(&q->batch_id, batch->queue, batch->state->fence.batch_id);
+   zink_batch_usage_set(&q->batch_id, batch->state->fence.batch_id);
    _mesa_set_add(batch->state->active_queries, q);
 }
 
@@ -545,7 +520,7 @@ zink_begin_query(struct pipe_context *pctx,
 {
    struct zink_query *query = (struct zink_query *)q;
    struct zink_context *ctx = zink_context(pctx);
-   struct zink_batch *batch = get_batch_for_query(ctx, query, false);
+   struct zink_batch *batch = &ctx->batch;
 
    query->last_start = query->curr_query;
 
@@ -565,7 +540,7 @@ end_query(struct zink_context *ctx, struct zink_batch *batch, struct zink_query
    if (is_time_query(q)) {
       vkCmdWriteTimestamp(batch->state->cmdbuf, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
                           q->query_pool, q->curr_query);
-      zink_batch_usage_set(&q->batch_id, batch->queue, batch->state->fence.batch_id);
+      zink_batch_usage_set(&q->batch_id, batch->state->fence.batch_id);
    } else if (q->type == PIPE_QUERY_PRIMITIVES_EMITTED ||
             q->type == PIPE_QUERY_PRIMITIVES_GENERATED ||
             q->type == PIPE_QUERY_SO_OVERFLOW_PREDICATE)
@@ -594,7 +569,7 @@ zink_end_query(struct pipe_context *pctx,
 {
    struct zink_context *ctx = zink_context(pctx);
    struct zink_query *query = (struct zink_query *)q;
-   struct zink_batch *batch = get_batch_for_query(ctx, query, false);
+   struct zink_batch *batch = &ctx->batch;
 
    if (needs_stats_list(query))
       list_delinit(&query->stats_list);
@@ -612,13 +587,12 @@ zink_get_query_result(struct pipe_context *pctx,
 {
    struct zink_query *query = (void*)q;
    struct zink_context *ctx = zink_context(pctx);
-   enum zink_queue queue = is_cs_query(query) ? ZINK_QUEUE_COMPUTE : ZINK_QUEUE_GFX;
-   uint32_t batch_id = p_atomic_read(&query->batch_id.usage[queue]);
+   uint32_t batch_id = p_atomic_read(&query->batch_id.usage);
 
    if (wait)
-      zink_wait_on_batch(ctx, queue, batch_id);
+      zink_wait_on_batch(ctx, batch_id);
    else if (batch_id == ctx->curr_batch)
-      zink_flush_queue(ctx, queue);
+      zink_flush_queue(ctx);
 
    return get_query_result(pctx, q, wait, result);
 }
@@ -667,7 +641,7 @@ zink_set_active_query_state(struct pipe_context *pctx, bool enable)
    struct zink_context *ctx = zink_context(pctx);
    ctx->queries_disabled = !enable;
 
-   struct zink_batch *batch = zink_batch_g(ctx);
+   struct zink_batch *batch = &ctx->batch;
    if (ctx->queries_disabled)
       zink_suspend_queries(ctx, batch);
    else
@@ -717,11 +691,11 @@ zink_render_condition(struct pipe_context *pctx,
    if (query->type != PIPE_QUERY_PRIMITIVES_GENERATED &&
        !is_so_overflow_query(query)) {
       copy_results_to_buffer(ctx, query, res, 0, num_results, flags);
-      batch = zink_batch_g(ctx);
+      batch = &ctx->batch;
    } else {
       /* these need special handling */
       force_cpu_read(ctx, pquery, true, PIPE_QUERY_TYPE_U32, pres, 0);
-      batch = zink_batch_g(ctx);
+      batch = &ctx->batch;
       zink_batch_reference_resource_rw(batch, res, false);
    }
 
@@ -769,7 +743,7 @@ zink_get_query_result_resource(struct pipe_context *pctx,
       if (fences) {
          struct pipe_resource *staging = pipe_buffer_create(pctx->screen, 0, PIPE_USAGE_STAGING, result_size * 2);
          copy_results_to_buffer(ctx, query, zink_resource(staging), 0, 1, size_flags | VK_QUERY_RESULT_WITH_AVAILABILITY_BIT | VK_QUERY_RESULT_PARTIAL_BIT);
-         zink_copy_buffer(ctx, get_batch_for_query(ctx, query, true), res, zink_resource(staging), offset, result_size, result_size);
+         zink_copy_buffer(ctx, &ctx->batch, res, zink_resource(staging), offset, result_size, result_size);
          pipe_resource_reference(&staging, NULL);
       } else {
          uint64_t u64[2] = {0};
index 2005555..35206c7 100644 (file)
@@ -54,48 +54,32 @@ debug_describe_zink_resource_object(char *buf, const struct zink_resource_object
 }
 
 static uint32_t
-get_resource_usage(struct zink_resource *res, enum zink_queue queue)
+get_resource_usage(struct zink_resource *res)
 {
-   assert(queue < 2);
-   uint32_t reads = p_atomic_read(&res->obj->reads.usage[queue]);
-   uint32_t writes = p_atomic_read(&res->obj->writes.usage[queue]);
+   uint32_t reads = p_atomic_read(&res->obj->reads.usage);
+   uint32_t writes = p_atomic_read(&res->obj->writes.usage);
    uint32_t batch_uses = 0;
    if (reads)
-      batch_uses |= ZINK_RESOURCE_ACCESS_READ << queue;
+      batch_uses |= ZINK_RESOURCE_ACCESS_READ;
    if (writes)
-      batch_uses |= ZINK_RESOURCE_ACCESS_WRITE << queue;
-   return batch_uses;
-}
-
-static uint32_t
-get_all_resource_usage(struct zink_resource *res)
-{
-   uint32_t batch_uses = 0;
-   for (unsigned i = 0; i < ZINK_QUEUE_ANY; i++)
-      batch_uses |= get_resource_usage(res, i);
+      batch_uses |= ZINK_RESOURCE_ACCESS_WRITE;
    return batch_uses;
 }
 
 static void
-resource_sync_reads_from_compute(struct zink_context *ctx, struct zink_resource *res)
+resource_sync_reads(struct zink_context *ctx, struct zink_resource *res)
 {
-   uint32_t reads = p_atomic_read(&res->obj->reads.usage[ZINK_QUEUE_COMPUTE]);
+   uint32_t reads = p_atomic_read(&res->obj->reads.usage);
    assert(reads);
-   zink_wait_on_batch(ctx, ZINK_QUEUE_COMPUTE, reads);
+   zink_wait_on_batch(ctx, reads);
 }
 
 static void
 resource_sync_writes_from_batch_usage(struct zink_context *ctx, struct zink_resource *res)
 {
-   uint32_t writes[2];
-   for (int i = 0; i < ZINK_QUEUE_ANY; i++)
-      writes[i] = p_atomic_read(&res->obj->writes.usage[i]);
-
-   enum zink_queue queue = writes[0] < writes[1] ? ZINK_QUEUE_COMPUTE : ZINK_QUEUE_GFX;
-   /* sync lower id first */
-   if (writes[!queue])
-      zink_wait_on_batch(ctx, !queue, writes[!queue]);
-   zink_wait_on_batch(ctx, queue, writes[queue]);
+   uint32_t writes = p_atomic_read(&res->obj->writes.usage);
+
+   zink_wait_on_batch(ctx, writes);
 }
 
 static uint32_t
@@ -600,7 +584,7 @@ zink_resource_invalidate(struct pipe_context *pctx, struct pipe_resource *pres)
    res->bind_history &= ~ZINK_RESOURCE_USAGE_STREAMOUT;
 
    util_range_set_empty(&res->valid_buffer_range);
-   if (!get_all_resource_usage(res))
+   if (!get_resource_usage(res))
       return;
 
    struct zink_resource_object *old_obj = res->obj;
@@ -638,21 +622,10 @@ zink_transfer_copy_bufimage(struct zink_context *ctx,
 }
 
 bool
-zink_resource_has_usage(struct zink_resource *res, enum zink_resource_access usage, enum zink_queue queue)
+zink_resource_has_usage(struct zink_resource *res, enum zink_resource_access usage)
 {
-   uint32_t batch_uses = get_all_resource_usage(res);
-   switch (queue) {
-   case ZINK_QUEUE_COMPUTE:
-      return batch_uses & (usage << ZINK_QUEUE_COMPUTE);
-   case ZINK_QUEUE_GFX:
-      return batch_uses & (usage << ZINK_QUEUE_GFX);
-   case ZINK_QUEUE_ANY:
-      return batch_uses & ((usage << ZINK_QUEUE_GFX) | (usage << ZINK_QUEUE_COMPUTE));
-   default:
-      break;
-   }
-   unreachable("unknown queue type");
-   return false;
+   uint32_t batch_uses = get_resource_usage(res);
+   return batch_uses & usage;
 }
 
 static void *
@@ -673,11 +646,11 @@ buffer_transfer_map(struct zink_context *ctx, struct zink_resource *res, unsigne
       }
       if (util_ranges_intersect(&res->valid_buffer_range, box->x, box->x + box->width)) {
          /* special case compute reads since they aren't handled by zink_fence_wait() */
-         if (usage & PIPE_MAP_WRITE && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_READ, ZINK_QUEUE_COMPUTE))
-            resource_sync_reads_from_compute(ctx, res);
-         if (usage & PIPE_MAP_READ && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE, ZINK_QUEUE_ANY))
+         if (usage & PIPE_MAP_WRITE && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_READ))
+            resource_sync_reads(ctx, res);
+         if (usage & PIPE_MAP_READ && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE))
             resource_sync_writes_from_batch_usage(ctx, res);
-         else if (usage & PIPE_MAP_WRITE && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_RW, ZINK_QUEUE_ANY)) {
+         else if (usage & PIPE_MAP_WRITE && zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_RW)) {
             /* need to wait for all rendering to finish
              * TODO: optimize/fix this to be much less obtrusive
              * mesa/mesa#2966
@@ -787,9 +760,9 @@ zink_transfer_map(struct pipe_context *pctx,
 
          if (usage & PIPE_MAP_READ) {
             /* TODO: can probably just do a full cs copy if it's already in a cs batch */
-            if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE, ZINK_QUEUE_COMPUTE))
+            if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE))
                /* don't actually have to stall here, only ensure batch is submitted */
-               zink_flush_queue(ctx, ZINK_QUEUE_COMPUTE);
+               zink_flush_queue(ctx);
             struct zink_context *ctx = zink_context(pctx);
             zink_transfer_copy_bufimage(ctx, staging_res, res, trans);
             /* need to wait for rendering to finish */
@@ -807,9 +780,10 @@ zink_transfer_map(struct pipe_context *pctx,
          assert(!res->optimal_tiling);
 
          /* special case compute reads since they aren't handled by zink_fence_wait() */
-         if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_READ, ZINK_QUEUE_COMPUTE))
-            resource_sync_reads_from_compute(ctx, res);
-         if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_RW, ZINK_QUEUE_ANY)) {
+            /* special case compute reads since they aren't handled by zink_fence_wait() */
+         if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_READ))
+            resource_sync_reads(ctx, res);
+         if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_RW)) {
             if (usage & PIPE_MAP_READ)
                resource_sync_writes_from_batch_usage(ctx, res);
             else
@@ -866,9 +840,9 @@ zink_transfer_flush_region(struct pipe_context *pctx,
    if (trans->base.usage & PIPE_MAP_WRITE) {
       if (trans->staging_res) {
          struct zink_resource *staging_res = zink_resource(trans->staging_res);
-         if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE, ZINK_QUEUE_COMPUTE))
+         if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_WRITE))
             /* don't actually have to stall here, only ensure batch is submitted */
-            zink_flush_queue(ctx, ZINK_QUEUE_COMPUTE);
+            zink_flush_queue(ctx);
 
          if (ptrans->resource->target == PIPE_BUFFER)
             zink_copy_buffer(ctx, NULL, res, staging_res, box->x, box->x, box->width);
index 4fd6f9b..c71d732 100644 (file)
@@ -125,7 +125,7 @@ void
 zink_resource_setup_transfer_layouts(struct zink_context *ctx, struct zink_resource *src, struct zink_resource *dst);
 
 bool
-zink_resource_has_usage(struct zink_resource *res, enum zink_resource_access usage, enum zink_queue queue);
+zink_resource_has_usage(struct zink_resource *res, enum zink_resource_access usage);
 
 bool
 zink_resource_has_usage_for_id(struct zink_resource *res, uint32_t id);