freedreno: Remove FD_STAGE_* in favor of a "disable_all" flag.
authorEric Anholt <eric@anholt.net>
Thu, 28 Jan 2021 21:29:15 +0000 (13:29 -0800)
committerMarge Bot <eric+marge@anholt.net>
Wed, 10 Feb 2021 03:46:25 +0000 (03:46 +0000)
The only paths are "prepare queries for rendering" and "finish all
queries", everything else is handled by set_active_query_state().

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/8873>

16 files changed:
src/gallium/drivers/freedreno/a2xx/fd2_query.c
src/gallium/drivers/freedreno/a3xx/fd3_query.c
src/gallium/drivers/freedreno/a4xx/fd4_query.c
src/gallium/drivers/freedreno/a5xx/fd5_query.c
src/gallium/drivers/freedreno/a6xx/fd6_blitter.c
src/gallium/drivers/freedreno/a6xx/fd6_query.c
src/gallium/drivers/freedreno/freedreno_batch.c
src/gallium/drivers/freedreno/freedreno_batch.h
src/gallium/drivers/freedreno/freedreno_blitter.c
src/gallium/drivers/freedreno/freedreno_context.h
src/gallium/drivers/freedreno/freedreno_draw.c
src/gallium/drivers/freedreno/freedreno_query_acc.c
src/gallium/drivers/freedreno/freedreno_query_acc.h
src/gallium/drivers/freedreno/freedreno_query_hw.c
src/gallium/drivers/freedreno/freedreno_query_hw.h
src/gallium/drivers/freedreno/freedreno_state.c

index f56390a..9cdcfee 100644 (file)
@@ -238,7 +238,7 @@ fd2_query_context_init(struct pipe_context *pctx)
        struct fd_context *ctx = fd_context(pctx);
 
        ctx->create_query = fd_acc_create_query;
-       ctx->query_set_stage = fd_acc_query_set_stage;
+       ctx->query_update_batch = fd_acc_query_update_batch;
 
        pctx->create_batch_query = fd2_create_batch_query;
 }
index 78cc31a..2273907 100644 (file)
@@ -141,7 +141,7 @@ void fd3_query_context_init(struct pipe_context *pctx)
        ctx->create_query = fd_hw_create_query;
        ctx->query_prepare = fd_hw_query_prepare;
        ctx->query_prepare_tile = fd_hw_query_prepare_tile;
-       ctx->query_set_stage = fd_hw_query_set_stage;
+       ctx->query_update_batch = fd_hw_query_update_batch;
 
        fd_hw_query_register_provider(pctx, &occlusion_counter);
        fd_hw_query_register_provider(pctx, &occlusion_predicate);
index 7cd9733..3c8ddce 100644 (file)
@@ -285,7 +285,7 @@ void fd4_query_context_init(struct pipe_context *pctx)
        ctx->create_query = fd_hw_create_query;
        ctx->query_prepare = fd_hw_query_prepare;
        ctx->query_prepare_tile = fd_hw_query_prepare_tile;
-       ctx->query_set_stage = fd_hw_query_set_stage;
+       ctx->query_update_batch = fd_hw_query_update_batch;
 
        fd_hw_query_register_provider(pctx, &occlusion_counter);
        fd_hw_query_register_provider(pctx, &occlusion_predicate);
index 2830e7b..2f61c17 100644 (file)
@@ -450,7 +450,7 @@ fd5_query_context_init(struct pipe_context *pctx)
        struct fd_context *ctx = fd_context(pctx);
 
        ctx->create_query = fd_acc_create_query;
-       ctx->query_set_stage = fd_acc_query_set_stage;
+       ctx->query_update_batch = fd_acc_query_update_batch;
 
        pctx->create_batch_query = fd5_create_batch_query;
 
index 6ea48b0..aa4f4e4 100644 (file)
@@ -885,7 +885,7 @@ handle_rgba_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
         */
        fd_fence_ref(&ctx->last_fence, NULL);
 
-       fd_batch_set_stage(batch, FD_STAGE_BLIT);
+       fd_batch_update_queries(batch);
 
        emit_setup(batch);
 
@@ -918,6 +918,11 @@ handle_rgba_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
        fd_batch_flush(batch);
        fd_batch_reference(&batch, NULL);
 
+       /* Acc query state will have been dirtied by our fd_batch_update_queries, so
+        * the ctx->batch may need to turn its queries back on.
+        */
+       ctx->update_active_queries = true;
+
        return true;
 }
 
index 21c4b0e..b5e978d 100644 (file)
@@ -643,7 +643,7 @@ fd6_query_context_init(struct pipe_context *pctx)
        struct fd_context *ctx = fd_context(pctx);
 
        ctx->create_query = fd_acc_create_query;
-       ctx->query_set_stage = fd_acc_query_set_stage;
+       ctx->query_update_batch = fd_acc_query_update_batch;
 
        ctx->record_timestamp = record_timestamp;
        ctx->ts_to_ns = ticks_to_ns;
index b2b5871..ceceb19 100644 (file)
@@ -91,7 +91,6 @@ batch_init(struct fd_batch *batch)
        batch->num_bins_per_pipe = 0;
        batch->prim_strm_bits = 0;
        batch->draw_strm_bits = 0;
-       batch->stage = FD_STAGE_NULL;
 
        fd_reset_wfi(batch);
 
@@ -343,7 +342,7 @@ batch_flush(struct fd_batch *batch)
        /* close out the draw cmds by making sure any active queries are
         * paused:
         */
-       fd_batch_set_stage(batch, FD_STAGE_NULL);
+       fd_batch_finish_queries(batch);
 
        batch_flush_reset_dependencies(batch, true);
 
index ba2bac6..be146f2 100644 (file)
@@ -213,11 +213,6 @@ struct fd_batch {
        /* which sample providers are currently enabled in the batch: */
        uint32_t query_providers_active;
 
-       /* tracking for current stage, to know when to start/stop
-        * any active queries:
-        */
-       enum fd_render_stage stage;
-
        /* list of samples in current batch: */
        struct util_dynarray samples;
 
@@ -331,15 +326,24 @@ fd_batch_lock_submit(struct fd_batch *batch)
        return ret;
 }
 
-static inline void
-fd_batch_set_stage(struct fd_batch *batch, enum fd_render_stage stage)
+/* Since we reorder batches and can pause/resume queries (notably for disabling
+ * queries dueing some meta operations), we update the current query state for
+ * the batch before each draw.
+ */
+static inline void fd_batch_update_queries(struct fd_batch *batch)
 {
        struct fd_context *ctx = batch->ctx;
 
-       if (ctx->query_set_stage)
-               ctx->query_set_stage(batch, stage);
+       if (ctx->query_update_batch)
+               ctx->query_update_batch(batch, false);
+}
+
+static inline void fd_batch_finish_queries(struct fd_batch *batch)
+{
+       struct fd_context *ctx = batch->ctx;
 
-       batch->stage = stage;
+       if (ctx->query_update_batch)
+               ctx->query_update_batch(batch, true);
 }
 
 static inline void
index bda41ff..dcdef47 100644 (file)
@@ -77,8 +77,7 @@ default_src_texture(struct pipe_sampler_view *src_templ,
 }
 
 static void
-fd_blitter_pipe_begin(struct fd_context *ctx, bool render_cond, bool discard,
-               enum fd_render_stage stage)
+fd_blitter_pipe_begin(struct fd_context *ctx, bool render_cond, bool discard)
 {
        fd_fence_ref(&ctx->last_fence, NULL);
 
@@ -112,7 +111,7 @@ fd_blitter_pipe_begin(struct fd_context *ctx, bool render_cond, bool discard,
                        ctx->cond_query, ctx->cond_cond, ctx->cond_mode);
 
        if (ctx->batch)
-               fd_batch_set_stage(ctx->batch, stage);
+               fd_batch_update_queries(ctx->batch);
 
        ctx->in_discard_blit = discard;
 }
@@ -140,7 +139,7 @@ fd_blitter_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
                                info->dst.box.height, info->dst.box.depth);
        }
 
-       fd_blitter_pipe_begin(ctx, info->render_condition_enable, discard, FD_STAGE_BLIT);
+       fd_blitter_pipe_begin(ctx, info->render_condition_enable, discard);
 
        /* Initialize the surface. */
        default_dst_texture(&dst_templ, dst, info->dst.level,
@@ -181,7 +180,7 @@ fd_blitter_clear(struct pipe_context *pctx, unsigned buffers,
        /* Note: don't use discard=true, if there was something to
         * discard, that would have been already handled in fd_clear().
         */
-       fd_blitter_pipe_begin(ctx, false, false, FD_STAGE_CLEAR);
+       fd_blitter_pipe_begin(ctx, false, false);
 
        util_blitter_common_clear_setup(blitter, pfb->width, pfb->height,
                        buffers, NULL, NULL);
@@ -313,7 +312,7 @@ fd_blitter_pipe_copy_region(struct fd_context *ctx,
                return false;
 
        /* TODO we could discard if dst box covers dst level fully.. */
-       fd_blitter_pipe_begin(ctx, false, false, FD_STAGE_BLIT);
+       fd_blitter_pipe_begin(ctx, false, false);
        util_blitter_copy_texture(ctx->blitter,
                        dst, dst_level, dstx, dsty, dstz,
                        src, src_level, src_box);
index aaade07..80e444d 100644 (file)
@@ -172,24 +172,6 @@ enum fd_dirty_shader_state {
        FD_DIRTY_SHADER_IMAGE = BIT(4),
 };
 
-/* Bitmask of stages in rendering that a particular query is active.
- * Queries will be automatically started/stopped (generating additional
- * fd_hw_sample_period's) on entrance/exit from stages that are
- * applicable to the query.
- *
- * NOTE: set the stage to NULL at end of IB to ensure no query is still
- * active.  Things aren't going to work out the way you want if a query
- * is active across IB's (or between tile IB and draw IB)
- */
-enum fd_render_stage {
-       FD_STAGE_NULL     = 0x00,
-       FD_STAGE_DRAW     = 0x01,
-       FD_STAGE_CLEAR    = 0x02,
-       /* used for driver internal draws (ie. util_blitter_blit()): */
-       FD_STAGE_BLIT     = 0x04,
-       FD_STAGE_ALL      = 0xff,
-};
-
 #define MAX_HW_SAMPLE_PROVIDERS 7
 struct fd_hw_sample_provider;
 struct fd_hw_sample;
@@ -241,8 +223,8 @@ struct fd_context {
        struct list_head acc_active_queries;
        /*@}*/
 
-       /* Whether we need to walk the acc_active_queries next fd_set_stage() to
-        * update active queries (even if stage doesn't change).
+       /* Whether we need to recheck the active_queries list next
+        * fd_batch_update_queries().
         */
        bool update_active_queries;
 
@@ -436,7 +418,7 @@ struct fd_context {
        void (*query_prepare)(struct fd_batch *batch, uint32_t num_tiles);
        void (*query_prepare_tile)(struct fd_batch *batch, uint32_t n,
                        struct fd_ringbuffer *ring);
-       void (*query_set_stage)(struct fd_batch *batch, enum fd_render_stage stage);
+       void (*query_update_batch)(struct fd_batch *batch, bool disable_all);
 
        /* blitter: */
        bool (*blit)(struct fd_context *ctx, const struct pipe_blit_info *info);
index bd2d469..fb7524c 100644 (file)
@@ -197,7 +197,7 @@ batch_draw_tracking(struct fd_batch *batch, const struct pipe_draw_info *info,
        /* NOTE: needs to be before resource_written(batch->query_buf), otherwise
         * query_buf may not be created yet.
         */
-       fd_batch_set_stage(batch, FD_STAGE_DRAW);
+       fd_batch_update_queries(batch);
 
        /*
         * Figure out the buffers/features we need:
@@ -473,7 +473,7 @@ fd_clear(struct pipe_context *pctx, unsigned buffers,
        bool fallback = true;
 
        if (ctx->clear) {
-               fd_batch_set_stage(batch, FD_STAGE_CLEAR);
+               fd_batch_update_queries(batch);
 
                if (ctx->clear(ctx, buffers, color, depth, stencil)) {
                        if (fd_mesa_debug & FD_DBG_DCLEAR)
index b06de30..a046b71 100644 (file)
@@ -235,16 +235,16 @@ fd_acc_create_query(struct fd_context *ctx, unsigned query_type,
  * batch reordering).
  */
 void
-fd_acc_query_set_stage(struct fd_batch *batch, enum fd_render_stage stage)
+fd_acc_query_update_batch(struct fd_batch *batch, bool disable_all)
 {
        struct fd_context *ctx = batch->ctx;
 
-       if (stage != batch->stage || ctx->update_active_queries) {
+       if (disable_all || ctx->update_active_queries) {
                struct fd_acc_query *aq;
                LIST_FOR_EACH_ENTRY(aq, &ctx->acc_active_queries, node) {
                        bool batch_change = aq->batch != batch;
                        bool was_active = aq->batch != NULL;
-                       bool now_active = stage != FD_STAGE_NULL &&
+                       bool now_active = !disable_all &&
                                (ctx->active_queries || aq->provider->always);
 
                        if (was_active && (!now_active || batch_change))
index 2092aba..be34d2e 100644 (file)
@@ -106,7 +106,7 @@ struct fd_query * fd_acc_create_query(struct fd_context *ctx, unsigned query_typ
        unsigned index);
 struct fd_query * fd_acc_create_query2(struct fd_context *ctx, unsigned query_type,
                unsigned index, const struct fd_acc_sample_provider *provider);
-void fd_acc_query_set_stage(struct fd_batch *batch, enum fd_render_stage stage);
+void fd_acc_query_update_batch(struct fd_batch *batch, bool disable_all);
 void fd_acc_query_register_provider(struct pipe_context *pctx,
                const struct fd_acc_sample_provider *provider);
 
index 2f1606a..f7d5644 100644 (file)
@@ -385,15 +385,15 @@ fd_hw_query_prepare_tile(struct fd_batch *batch, uint32_t n,
 }
 
 void
-fd_hw_query_set_stage(struct fd_batch *batch, enum fd_render_stage stage)
+fd_hw_query_update_batch(struct fd_batch *batch, bool disable_all)
 {
        struct fd_context *ctx = batch->ctx;
 
-       if (stage != batch->stage || ctx->update_active_queries) {
+       if (disable_all || ctx->update_active_queries) {
                struct fd_hw_query *hq;
                LIST_FOR_EACH_ENTRY(hq, &batch->ctx->hw_active_queries, list) {
                        bool was_active = query_active_in_batch(batch, hq);
-                       bool now_active = stage != FD_STAGE_NULL &&
+                       bool now_active = !disable_all &&
                                (ctx->active_queries || hq->provider->always);
 
                        if (now_active && !was_active)
index a6b702e..092aaeb 100644 (file)
@@ -144,7 +144,7 @@ void __fd_hw_sample_destroy(struct fd_context *ctx, struct fd_hw_sample *samp);
 void fd_hw_query_prepare(struct fd_batch *batch, uint32_t num_tiles);
 void fd_hw_query_prepare_tile(struct fd_batch *batch, uint32_t n,
                struct fd_ringbuffer *ring);
-void fd_hw_query_set_stage(struct fd_batch *batch, enum fd_render_stage stage);
+void fd_hw_query_update_batch(struct fd_batch *batch, bool end_batch);
 void fd_hw_query_enable(struct fd_batch *batch, struct fd_ringbuffer *ring);
 void fd_hw_query_register_provider(struct pipe_context *pctx,
                const struct fd_hw_sample_provider *provider);
index 617adf9..577555f 100644 (file)
@@ -251,7 +251,7 @@ fd_set_framebuffer_state(struct pipe_context *pctx,
                fd_batch_reference(&old_batch, ctx->batch);
 
                if (likely(old_batch))
-                       fd_batch_set_stage(old_batch, FD_STAGE_NULL);
+                       fd_batch_finish_queries(old_batch);
 
                fd_batch_reference(&ctx->batch, NULL);
                fd_context_all_dirty(ctx);