struct fd_context *ctx = fd_context(pctx);
ctx->create_query = fd_acc_create_query;
- ctx->query_set_stage = fd_acc_query_set_stage;
+ ctx->query_update_batch = fd_acc_query_update_batch;
pctx->create_batch_query = fd2_create_batch_query;
}
ctx->create_query = fd_hw_create_query;
ctx->query_prepare = fd_hw_query_prepare;
ctx->query_prepare_tile = fd_hw_query_prepare_tile;
- ctx->query_set_stage = fd_hw_query_set_stage;
+ ctx->query_update_batch = fd_hw_query_update_batch;
fd_hw_query_register_provider(pctx, &occlusion_counter);
fd_hw_query_register_provider(pctx, &occlusion_predicate);
ctx->create_query = fd_hw_create_query;
ctx->query_prepare = fd_hw_query_prepare;
ctx->query_prepare_tile = fd_hw_query_prepare_tile;
- ctx->query_set_stage = fd_hw_query_set_stage;
+ ctx->query_update_batch = fd_hw_query_update_batch;
fd_hw_query_register_provider(pctx, &occlusion_counter);
fd_hw_query_register_provider(pctx, &occlusion_predicate);
struct fd_context *ctx = fd_context(pctx);
ctx->create_query = fd_acc_create_query;
- ctx->query_set_stage = fd_acc_query_set_stage;
+ ctx->query_update_batch = fd_acc_query_update_batch;
pctx->create_batch_query = fd5_create_batch_query;
*/
fd_fence_ref(&ctx->last_fence, NULL);
- fd_batch_set_stage(batch, FD_STAGE_BLIT);
+ fd_batch_update_queries(batch);
emit_setup(batch);
fd_batch_flush(batch);
fd_batch_reference(&batch, NULL);
+ /* Acc query state will have been dirtied by our fd_batch_update_queries, so
+ * the ctx->batch may need to turn its queries back on.
+ */
+ ctx->update_active_queries = true;
+
return true;
}
struct fd_context *ctx = fd_context(pctx);
ctx->create_query = fd_acc_create_query;
- ctx->query_set_stage = fd_acc_query_set_stage;
+ ctx->query_update_batch = fd_acc_query_update_batch;
ctx->record_timestamp = record_timestamp;
ctx->ts_to_ns = ticks_to_ns;
batch->num_bins_per_pipe = 0;
batch->prim_strm_bits = 0;
batch->draw_strm_bits = 0;
- batch->stage = FD_STAGE_NULL;
fd_reset_wfi(batch);
/* close out the draw cmds by making sure any active queries are
* paused:
*/
- fd_batch_set_stage(batch, FD_STAGE_NULL);
+ fd_batch_finish_queries(batch);
batch_flush_reset_dependencies(batch, true);
/* which sample providers are currently enabled in the batch: */
uint32_t query_providers_active;
- /* tracking for current stage, to know when to start/stop
- * any active queries:
- */
- enum fd_render_stage stage;
-
/* list of samples in current batch: */
struct util_dynarray samples;
return ret;
}
-static inline void
-fd_batch_set_stage(struct fd_batch *batch, enum fd_render_stage stage)
+/* Since we reorder batches and can pause/resume queries (notably for disabling
+ * queries dueing some meta operations), we update the current query state for
+ * the batch before each draw.
+ */
+static inline void fd_batch_update_queries(struct fd_batch *batch)
{
struct fd_context *ctx = batch->ctx;
- if (ctx->query_set_stage)
- ctx->query_set_stage(batch, stage);
+ if (ctx->query_update_batch)
+ ctx->query_update_batch(batch, false);
+}
+
+static inline void fd_batch_finish_queries(struct fd_batch *batch)
+{
+ struct fd_context *ctx = batch->ctx;
- batch->stage = stage;
+ if (ctx->query_update_batch)
+ ctx->query_update_batch(batch, true);
}
static inline void
}
static void
-fd_blitter_pipe_begin(struct fd_context *ctx, bool render_cond, bool discard,
- enum fd_render_stage stage)
+fd_blitter_pipe_begin(struct fd_context *ctx, bool render_cond, bool discard)
{
fd_fence_ref(&ctx->last_fence, NULL);
ctx->cond_query, ctx->cond_cond, ctx->cond_mode);
if (ctx->batch)
- fd_batch_set_stage(ctx->batch, stage);
+ fd_batch_update_queries(ctx->batch);
ctx->in_discard_blit = discard;
}
info->dst.box.height, info->dst.box.depth);
}
- fd_blitter_pipe_begin(ctx, info->render_condition_enable, discard, FD_STAGE_BLIT);
+ fd_blitter_pipe_begin(ctx, info->render_condition_enable, discard);
/* Initialize the surface. */
default_dst_texture(&dst_templ, dst, info->dst.level,
/* Note: don't use discard=true, if there was something to
* discard, that would have been already handled in fd_clear().
*/
- fd_blitter_pipe_begin(ctx, false, false, FD_STAGE_CLEAR);
+ fd_blitter_pipe_begin(ctx, false, false);
util_blitter_common_clear_setup(blitter, pfb->width, pfb->height,
buffers, NULL, NULL);
return false;
/* TODO we could discard if dst box covers dst level fully.. */
- fd_blitter_pipe_begin(ctx, false, false, FD_STAGE_BLIT);
+ fd_blitter_pipe_begin(ctx, false, false);
util_blitter_copy_texture(ctx->blitter,
dst, dst_level, dstx, dsty, dstz,
src, src_level, src_box);
FD_DIRTY_SHADER_IMAGE = BIT(4),
};
-/* Bitmask of stages in rendering that a particular query is active.
- * Queries will be automatically started/stopped (generating additional
- * fd_hw_sample_period's) on entrance/exit from stages that are
- * applicable to the query.
- *
- * NOTE: set the stage to NULL at end of IB to ensure no query is still
- * active. Things aren't going to work out the way you want if a query
- * is active across IB's (or between tile IB and draw IB)
- */
-enum fd_render_stage {
- FD_STAGE_NULL = 0x00,
- FD_STAGE_DRAW = 0x01,
- FD_STAGE_CLEAR = 0x02,
- /* used for driver internal draws (ie. util_blitter_blit()): */
- FD_STAGE_BLIT = 0x04,
- FD_STAGE_ALL = 0xff,
-};
-
#define MAX_HW_SAMPLE_PROVIDERS 7
struct fd_hw_sample_provider;
struct fd_hw_sample;
struct list_head acc_active_queries;
/*@}*/
- /* Whether we need to walk the acc_active_queries next fd_set_stage() to
- * update active queries (even if stage doesn't change).
+ /* Whether we need to recheck the active_queries list next
+ * fd_batch_update_queries().
*/
bool update_active_queries;
void (*query_prepare)(struct fd_batch *batch, uint32_t num_tiles);
void (*query_prepare_tile)(struct fd_batch *batch, uint32_t n,
struct fd_ringbuffer *ring);
- void (*query_set_stage)(struct fd_batch *batch, enum fd_render_stage stage);
+ void (*query_update_batch)(struct fd_batch *batch, bool disable_all);
/* blitter: */
bool (*blit)(struct fd_context *ctx, const struct pipe_blit_info *info);
/* NOTE: needs to be before resource_written(batch->query_buf), otherwise
* query_buf may not be created yet.
*/
- fd_batch_set_stage(batch, FD_STAGE_DRAW);
+ fd_batch_update_queries(batch);
/*
* Figure out the buffers/features we need:
bool fallback = true;
if (ctx->clear) {
- fd_batch_set_stage(batch, FD_STAGE_CLEAR);
+ fd_batch_update_queries(batch);
if (ctx->clear(ctx, buffers, color, depth, stencil)) {
if (fd_mesa_debug & FD_DBG_DCLEAR)
* batch reordering).
*/
void
-fd_acc_query_set_stage(struct fd_batch *batch, enum fd_render_stage stage)
+fd_acc_query_update_batch(struct fd_batch *batch, bool disable_all)
{
struct fd_context *ctx = batch->ctx;
- if (stage != batch->stage || ctx->update_active_queries) {
+ if (disable_all || ctx->update_active_queries) {
struct fd_acc_query *aq;
LIST_FOR_EACH_ENTRY(aq, &ctx->acc_active_queries, node) {
bool batch_change = aq->batch != batch;
bool was_active = aq->batch != NULL;
- bool now_active = stage != FD_STAGE_NULL &&
+ bool now_active = !disable_all &&
(ctx->active_queries || aq->provider->always);
if (was_active && (!now_active || batch_change))
unsigned index);
struct fd_query * fd_acc_create_query2(struct fd_context *ctx, unsigned query_type,
unsigned index, const struct fd_acc_sample_provider *provider);
-void fd_acc_query_set_stage(struct fd_batch *batch, enum fd_render_stage stage);
+void fd_acc_query_update_batch(struct fd_batch *batch, bool disable_all);
void fd_acc_query_register_provider(struct pipe_context *pctx,
const struct fd_acc_sample_provider *provider);
}
void
-fd_hw_query_set_stage(struct fd_batch *batch, enum fd_render_stage stage)
+fd_hw_query_update_batch(struct fd_batch *batch, bool disable_all)
{
struct fd_context *ctx = batch->ctx;
- if (stage != batch->stage || ctx->update_active_queries) {
+ if (disable_all || ctx->update_active_queries) {
struct fd_hw_query *hq;
LIST_FOR_EACH_ENTRY(hq, &batch->ctx->hw_active_queries, list) {
bool was_active = query_active_in_batch(batch, hq);
- bool now_active = stage != FD_STAGE_NULL &&
+ bool now_active = !disable_all &&
(ctx->active_queries || hq->provider->always);
if (now_active && !was_active)
void fd_hw_query_prepare(struct fd_batch *batch, uint32_t num_tiles);
void fd_hw_query_prepare_tile(struct fd_batch *batch, uint32_t n,
struct fd_ringbuffer *ring);
-void fd_hw_query_set_stage(struct fd_batch *batch, enum fd_render_stage stage);
+void fd_hw_query_update_batch(struct fd_batch *batch, bool end_batch);
void fd_hw_query_enable(struct fd_batch *batch, struct fd_ringbuffer *ring);
void fd_hw_query_register_provider(struct pipe_context *pctx,
const struct fd_hw_sample_provider *provider);
fd_batch_reference(&old_batch, ctx->batch);
if (likely(old_batch))
- fd_batch_set_stage(old_batch, FD_STAGE_NULL);
+ fd_batch_finish_queries(old_batch);
fd_batch_reference(&ctx->batch, NULL);
fd_context_all_dirty(ctx);