return false;
}
- batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, true);
+ batch = fd_bc_alloc_batch(ctx, true);
fd_batch_update_queries(batch);
fd6_validate_format(ctx, src, info->src.format);
fd6_validate_format(ctx, dst, info->dst.format);
- batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, true);
+ batch = fd_bc_alloc_batch(ctx, true);
fd_screen_lock(ctx->screen);
}
static void
-bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx,
- bool deferred) assert_dt
+bc_flush(struct fd_context *ctx, bool deferred) assert_dt
{
+ struct fd_batch_cache *cache = &ctx->screen->batch_cache;
+
/* fd_batch_flush() (and fd_batch_add_dep() which calls it indirectly)
* can cause batches to be unref'd and freed under our feet, so grab
* a reference to all the batches we need up-front.
}
void
-fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
+fd_bc_flush(struct fd_context *ctx)
{
- bc_flush(cache, ctx, false);
+ bc_flush(ctx, false);
}
/* deferred flush doesn't actually flush, but it marks every other
* that came before also get flushed.
*/
void
-fd_bc_flush_deferred(struct fd_batch_cache *cache, struct fd_context *ctx)
+fd_bc_flush_deferred(struct fd_context *ctx)
{
- bc_flush(cache, ctx, true);
+ bc_flush(ctx, true);
}
void
-fd_bc_dump(struct fd_screen *screen, const char *fmt, ...)
+fd_bc_dump(struct fd_context *ctx, const char *fmt, ...)
{
- struct fd_batch_cache *cache = &screen->batch_cache;
+ struct fd_batch_cache *cache = &ctx->screen->batch_cache;
if (!FD_DBG(MSGS))
return;
- fd_screen_lock(screen);
+ fd_screen_lock(ctx->screen);
va_list ap;
va_start(ap, fmt);
printf("----\n");
- fd_screen_unlock(screen);
+ fd_screen_unlock(ctx->screen);
}
void
}
struct fd_batch *
-fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx,
- bool nondraw)
+fd_bc_alloc_batch(struct fd_context *ctx, bool nondraw)
{
+ struct fd_batch_cache *cache = &ctx->screen->batch_cache;
struct fd_batch *batch;
/* For normal draw batches, pctx->set_framebuffer_state() handles
}
static struct fd_batch *
-batch_from_key(struct fd_batch_cache *cache, struct fd_batch_key *key,
- struct fd_context *ctx) assert_dt
+batch_from_key(struct fd_context *ctx, struct fd_batch_key *key) assert_dt
{
+ struct fd_batch_cache *cache = &ctx->screen->batch_cache;
struct fd_batch *batch = NULL;
uint32_t hash = fd_batch_key_hash(key);
struct hash_entry *entry =
}
struct fd_batch *
-fd_batch_from_fb(struct fd_batch_cache *cache, struct fd_context *ctx,
+fd_batch_from_fb(struct fd_context *ctx,
const struct pipe_framebuffer_state *pfb)
{
unsigned idx = 0, n = pfb->nr_cbufs + (pfb->zsbuf ? 1 : 0);
key->num_surfs = idx;
fd_screen_lock(ctx->screen);
- struct fd_batch *batch = batch_from_key(cache, key, ctx);
+ struct fd_batch *batch = batch_from_key(ctx, key);
fd_screen_unlock(ctx->screen);
return batch;
void fd_bc_init(struct fd_batch_cache *cache);
void fd_bc_fini(struct fd_batch_cache *cache);
-void fd_bc_flush(struct fd_batch_cache *cache,
- struct fd_context *ctx) assert_dt;
-void fd_bc_flush_deferred(struct fd_batch_cache *cache,
- struct fd_context *ctx) assert_dt;
-void fd_bc_dump(struct fd_screen *screen, const char *fmt, ...)
+void fd_bc_flush(struct fd_context *ctx) assert_dt;
+void fd_bc_flush_deferred(struct fd_context *ctx) assert_dt;
+void fd_bc_dump(struct fd_context *ctx, const char *fmt, ...)
_util_printf_format(2, 3);
void fd_bc_invalidate_context(struct fd_context *ctx);
void fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy);
void fd_bc_invalidate_resource(struct fd_resource *rsc, bool destroy);
-struct fd_batch *fd_bc_alloc_batch(struct fd_batch_cache *cache,
- struct fd_context *ctx,
+struct fd_batch *fd_bc_alloc_batch(struct fd_context *ctx,
bool nondraw) assert_dt;
struct fd_batch *
-fd_batch_from_fb(struct fd_batch_cache *cache, struct fd_context *ctx,
+fd_batch_from_fb(struct fd_context *ctx,
const struct pipe_framebuffer_state *pfb) assert_dt;
#endif /* FREEDRENO_BATCH_CACHE_H_ */
if (fencep && !batch) {
batch = fd_context_batch(ctx);
} else if (!batch) {
- fd_bc_dump(ctx->screen, "%p: NULL batch, remaining:\n", ctx);
+ fd_bc_dump(ctx, "%p: NULL batch, remaining:\n", ctx);
return;
}
if (ctx->last_fence) {
fd_fence_repopulate(*fencep, ctx->last_fence);
fd_fence_ref(&fence, *fencep);
- fd_bc_dump(ctx->screen, "%p: (deferred) reuse last_fence, remaining:\n", ctx);
+ fd_bc_dump(ctx, "%p: (deferred) reuse last_fence, remaining:\n", ctx);
goto out;
}
*/
if (ctx->last_fence) {
fd_fence_ref(&fence, ctx->last_fence);
- fd_bc_dump(ctx->screen, "%p: reuse last_fence, remaining:\n", ctx);
+ fd_bc_dump(ctx, "%p: reuse last_fence, remaining:\n", ctx);
goto out;
}
if (flags & PIPE_FLUSH_FENCE_FD)
fence->submit_fence.use_fence_fd = true;
- fd_bc_dump(ctx->screen, "%p: flushing %p<%u>, flags=0x%x, pending:\n", ctx,
+ fd_bc_dump(ctx, "%p: flushing %p<%u>, flags=0x%x, pending:\n", ctx,
batch, batch->seqno, flags);
/* If we get here, we need to flush for a fence, even if there is
if (!ctx->screen->reorder) {
fd_batch_flush(batch);
} else if (flags & PIPE_FLUSH_DEFERRED) {
- fd_bc_flush_deferred(&ctx->screen->batch_cache, ctx);
+ fd_bc_flush_deferred(ctx);
} else {
- fd_bc_flush(&ctx->screen->batch_cache, ctx);
+ fd_bc_flush(ctx);
}
- fd_bc_dump(ctx->screen, "%p: remaining:\n", ctx);
+ fd_bc_dump(ctx, "%p: remaining:\n", ctx);
out:
if (fencep)
if (unlikely(!batch)) {
batch =
- fd_batch_from_fb(&ctx->screen->batch_cache, ctx, &ctx->framebuffer);
+ fd_batch_from_fb(ctx, &ctx->framebuffer);
util_copy_framebuffer_state(&batch->framebuffer, &ctx->framebuffer);
fd_batch_reference(&ctx->batch, batch);
fd_context_all_dirty(ctx);
&ctx->shaderbuf[PIPE_SHADER_COMPUTE];
struct fd_batch *batch, *save_batch = NULL;
- batch = fd_bc_alloc_batch(&ctx->screen->batch_cache, ctx, true);
+ batch = fd_bc_alloc_batch(ctx, true);
fd_batch_reference(&save_batch, ctx->batch);
fd_batch_reference(&ctx->batch, batch);
fd_context_all_dirty(ctx);