* just hard coded. If we start exposing more countables than we
* have counters, we will need to be more clever.
*/
- struct fd_batch *batch = fd_context_batch_locked(ctx);
+ struct fd_batch *batch = fd_context_batch(ctx);
fd_wfi(batch, ring);
OUT_PKT0(ring, REG_A4XX_CP_PERFCTR_CP_SEL_0, 1);
OUT_RING(ring, CP_ALWAYS_COUNT);
- fd_batch_unlock_submit(batch);
fd_batch_reference(&batch, NULL);
}
fd_batch_resource_write(batch, rsc);
fd_screen_unlock(ctx->screen);
- ASSERTED bool ret = fd_batch_lock_submit(batch);
- assert(ret);
+ assert(!batch->flushed);
/* Marking the batch as needing flush must come after the batch
* dependency tracking (resource_read()/resource_write()), as that
fd_wfi(batch, batch->draw);
fd6_cache_inv(batch, batch->draw);
- fd_batch_unlock_submit(batch);
-
fd_batch_flush(batch);
fd_batch_reference(&batch, NULL);
fd_screen_unlock(ctx->screen);
- ASSERTED bool ret = fd_batch_lock_submit(batch);
- assert(ret);
+ assert(!batch->flushed);
/* Marking the batch as needing flush must come after the batch
* dependency tracking (resource_read()/resource_write()), as that
fd_wfi(batch, batch->draw);
fd6_cache_inv(batch, batch->draw);
- fd_batch_unlock_submit(batch);
-
fd_batch_flush(batch);
fd_batch_reference(&batch, NULL);
batch->ctx = ctx;
batch->nondraw = nondraw;
- simple_mtx_init(&batch->submit_lock, mtx_plain);
-
batch->resources =
_mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
util_copy_framebuffer_state(&batch->framebuffer, NULL);
batch_fini(batch);
- simple_mtx_destroy(&batch->submit_lock);
-
free(batch->key);
free(batch);
fd_screen_lock(ctx->screen);
{
DBG("%p: needs_flush=%d", batch, batch->needs_flush);
- if (!fd_batch_lock_submit(batch))
+ if (batch->flushed)
return;
batch->needs_flush = false;
assert(batch->reference.count > 0);
cleanup_submit(batch);
- fd_batch_unlock_submit(batch);
}
/* NOTE: could drop the last ref to batch
struct fd_context *ctx;
- /* emit_lock serializes cmdstream emission and flush. Acquire before
- * screen->lock.
- */
- simple_mtx_t submit_lock;
-
/* do we need to mem2gmem before rendering. We don't, if for example,
* there was a glClear() that invalidated the entire previous buffer
* contents. Keep track of which buffer(s) are cleared, or needs
*ptr = batch;
}
-static inline void
-fd_batch_unlock_submit(struct fd_batch *batch)
-{
- simple_mtx_unlock(&batch->submit_lock);
-}
-
-/**
- * Returns true if emit-lock was acquired, false if failed to acquire lock,
- * ie. batch already flushed.
- */
-static inline bool MUST_CHECK
-fd_batch_lock_submit(struct fd_batch *batch)
-{
- simple_mtx_lock(&batch->submit_lock);
- bool ret = !batch->flushed;
- if (!ret)
- fd_batch_unlock_submit(batch);
- return ret;
-}
-
/**
* Mark the batch as having something worth flushing (rendering, blit, query,
* etc)
if (!ctx->batch)
return;
- struct fd_batch *batch = fd_context_batch_locked(ctx);
+ struct fd_batch *batch = fd_context_batch(ctx);
fd_batch_needs_flush(batch);
fd_emit_string(batch->draw, string, len);
}
- fd_batch_unlock_submit(batch);
fd_batch_reference(&batch, NULL);
}
}
/**
- * Return a locked reference to the current batch. A batch with emit
- * lock held is protected against flushing while the lock is held.
- * The emit-lock should be acquired before screen-lock. The emit-lock
- * should be held while emitting cmdstream.
- */
-struct fd_batch *
-fd_context_batch_locked(struct fd_context *ctx)
-{
- struct fd_batch *batch = NULL;
-
- while (!batch) {
- batch = fd_context_batch(ctx);
- if (!fd_batch_lock_submit(batch)) {
- fd_batch_reference(&batch, NULL);
- }
- }
-
- return batch;
-}
-
-/**
* Return a reference to the current non-draw (compute/blit) batch.
*/
struct fd_batch *
batch_draw_tracking(batch, info, indirect);
- while (unlikely(!fd_batch_lock_submit(batch))) {
+ while (unlikely(batch->flushed)) {
/* The current batch was flushed in batch_draw_tracking()
* so start anew. We know this won't happen a second time
* since we are dealing with a fresh batch:
assert(!batch->flushed);
- fd_batch_unlock_submit(batch);
fd_batch_check_size(batch);
fd_batch_reference(&batch, NULL);
batch_clear_tracking(batch, buffers);
- while (unlikely(!fd_batch_lock_submit(batch))) {
+ while (unlikely(batch->flushed)) {
/* The current batch was flushed in batch_clear_tracking()
* so start anew. We know this won't happen a second time
* since we are dealing with a fresh batch:
assert(!batch->flushed);
- fd_batch_unlock_submit(batch);
-
if (fallback) {
fd_blitter_clear(pctx, buffers, color, depth, stencil);
}
* need to just emit the capture at this moment.
*/
if (skip_begin_query(q->type)) {
- struct fd_batch *batch = fd_context_batch_locked(ctx);
+ struct fd_batch *batch = fd_context_batch(ctx);
fd_acc_query_resume(aq, batch);
- fd_batch_unlock_submit(batch);
fd_batch_reference(&batch, NULL);
}
}
list_delinit(&aq->node);
/* mark the result available: */
- struct fd_batch *batch = fd_context_batch_locked(ctx);
+ struct fd_batch *batch = fd_context_batch(ctx);
struct fd_ringbuffer *ring = batch->draw;
struct fd_resource *rsc = fd_resource(aq->prsc);
OUT_RING(ring, 0); /* high 32b */
}
- fd_batch_unlock_submit(batch);
fd_batch_reference(&batch, NULL);
}
{
struct fd_acc_query *aq = fd_acc_query(q);
const struct fd_acc_sample_provider *p = aq->provider;
- struct fd_batch *batch = fd_context_batch_locked(ctx);
+ struct fd_batch *batch = fd_context_batch(ctx);
assert(ctx->screen->gen >= 5);
p->result_resource(aq, ring, result_type, index, dst, offset);
}
- fd_batch_unlock_submit(batch);
-
/* If we are told to wait for results, then we need to flush. For an IMR
* this would just be a wait on the GPU, but the expectation is that draws
* following this one see the results of the query, which means we need to
static void
fd_hw_begin_query(struct fd_context *ctx, struct fd_query *q) assert_dt
{
- struct fd_batch *batch = fd_context_batch_locked(ctx);
+ struct fd_batch *batch = fd_context_batch(ctx);
struct fd_hw_query *hq = fd_hw_query(q);
DBG("%p", q);
assert(list_is_empty(&hq->list));
list_addtail(&hq->list, &ctx->hw_active_queries);
- fd_batch_unlock_submit(batch);
fd_batch_reference(&batch, NULL);
}
static void
fd_hw_end_query(struct fd_context *ctx, struct fd_query *q) assert_dt
{
- struct fd_batch *batch = fd_context_batch_locked(ctx);
+ struct fd_batch *batch = fd_context_batch(ctx);
struct fd_hw_query *hq = fd_hw_query(q);
DBG("%p", q);
/* remove from active list: */
list_delinit(&hq->list);
- fd_batch_unlock_submit(batch);
fd_batch_reference(&batch, NULL);
}