}
fd_resource(info->dst.resource)->valid = true;
- batch->needs_flush = true;
+ fd_batch_needs_flush(batch);
fd_batch_flush(batch);
fd_batch_reference(&batch, NULL);
ASSERTED bool ret = fd_batch_lock_submit(batch);
assert(ret);
- /* Clearing last_fence must come after the batch dependency tracking
- * (resource_read()/resource_write()), as that can trigger a flush,
- * re-populating last_fence
+ /* Marking the batch as needing flush must come after the batch
+ * dependency tracking (resource_read()/resource_write()), as that
+ * can trigger a flush
*/
- fd_fence_ref(&ctx->last_fence, NULL);
+ fd_batch_needs_flush(batch);
fd_batch_update_queries(batch);
fd_batch_unlock_submit(batch);
fd_resource(info->dst.resource)->valid = true;
- batch->needs_flush = true;
fd_batch_flush(batch);
fd_batch_reference(&batch, NULL);
#include "util/u_trace.h"
#include "freedreno_context.h"
+#include "freedreno_fence.h"
#include "freedreno_util.h"
#ifdef DEBUG
return ret;
}
+/**
+ * Mark the batch as having something worth flushing (rendering, blit, query,
+ * etc)
+ */
+static inline void
+fd_batch_needs_flush(struct fd_batch *batch)
+{
+ batch->needs_flush = true;
+ fd_fence_ref(&batch->ctx->last_fence, NULL);
+}
+
/* Since we reorder batches and can pause/resume queries (notably for disabling
* queries dueing some meta operations), we update the current query state for
* the batch before each draw.
fd_blitter_pipe_begin(struct fd_context *ctx, bool render_cond,
bool discard) assert_dt
{
- fd_fence_ref(&ctx->last_fence, NULL);
-
util_blitter_save_vertex_buffer_slot(ctx->blitter, ctx->vtx.vertexbuf.vb);
util_blitter_save_vertex_elements(ctx->blitter, ctx->vtx.vtx);
util_blitter_save_vertex_shader(ctx->blitter, ctx->prog.vs);
.max_index = 1,
.instance_count = MAX2(1, pfb->layers),
};
- struct pipe_draw_start_count_bias draw = {
+ struct pipe_draw_start_count_bias draw = {
.count = 2,
};
- pctx->draw_vbo(pctx, &info, 0, NULL, &draw, 1);
+
+ pctx->draw_vbo(pctx, &info, 0, NULL, &draw, 1);
/* We expect that this should not have triggered a change in pfb: */
assert(util_framebuffer_state_equal(pfb, &ctx->framebuffer));
struct fd_batch *batch = fd_context_batch_locked(ctx);
- ctx->batch->needs_flush = true;
+ fd_batch_needs_flush(batch);
if (ctx->screen->gpu_id >= 500) {
fd_emit_string5(batch->draw, string, len);
batch->back_blit = ctx->in_shadow;
batch->num_draws++;
- /* Clearing last_fence must come after the batch dependency tracking
- * (resource_read()/resource_written()), as that can trigger a flush,
- * re-populating last_fence
+ /* Marking the batch as needing flush must come after the batch
+ * dependency tracking (resource_read()/resource_write()), as that
+ * can trigger a flush
*/
- fd_fence_ref(&ctx->last_fence, NULL);
+ fd_batch_needs_flush(batch);
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
DBG("%p: %ux%u num_draws=%u (%s/%s)", batch, pfb->width, pfb->height,
batch->cost += ctx->draw_cost;
for (unsigned i = 0; i < num_draws; i++) {
- if (ctx->draw_vbo(ctx, info, drawid_offset, indirect, &draws[i], index_offset))
- batch->needs_flush = true;
+ ctx->draw_vbo(ctx, info, drawid_offset, indirect, &draws[i], index_offset);
batch->num_vertices += draws[i].count * info->instance_count;
}
batch->invalidated |= cleared_buffers;
batch->resolve |= buffers;
- batch->needs_flush = true;
fd_screen_lock(ctx->screen);
assert(ctx->batch == batch);
}
- /* Clearing last_fence must come after the batch dependency tracking
- * (resource_read()/resource_written()), as that can trigger a flush,
- * re-populating last_fence
+ /* Marking the batch as needing flush must come after the batch
+ * dependency tracking (resource_read()/resource_write()), as that
+ * can trigger a flush
*/
- fd_fence_ref(&ctx->last_fence, NULL);
+ fd_batch_needs_flush(batch);
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
DBG("%p: %x %ux%u depth=%f, stencil=%u (%s/%s)", batch, buffers, pfb->width,
info->block[0], info->block[1], info->block[2],
info->grid[0], info->grid[1], info->grid[2]);
- batch->needs_flush = true;
+ fd_batch_needs_flush(batch);
ctx->launch_grid(ctx, info);
fd_batch_flush(batch);
if (batch) {
assert(!fence->batch);
fence->batch = batch;
- batch->needs_flush = true;
+ fd_batch_needs_flush(batch);
} else {
fence->batch = NULL;
ctx->hw_sample_providers[idx]->get_sample(batch, ring);
fd_hw_sample_reference(ctx, &batch->sample_cache[idx], new_samp);
util_dynarray_append(&batch->samples, struct fd_hw_sample *, new_samp);
- batch->needs_flush = true;
+ fd_batch_needs_flush(batch);
}
fd_hw_sample_reference(ctx, &samp, batch->sample_cache[idx]);