fd6_blitter_init(pctx);
fd6_ctx->border_color_uploader = u_upload_create(pctx, 4096, 0,
- PIPE_USAGE_STREAM, 0);
+ PIPE_USAGE_STREAM, 0);
- return pctx;
+ return fd_context_init_tc(pctx, flags);
}
struct fd_context *ctx = fd_context(data);
struct fd6_program_state *state = CALLOC_STRUCT(fd6_program_state);
+ tc_assert_driver_thread(ctx->tc);
+
/* if we have streamout, use full VS in binning pass, as the
* binning pass VS will have outputs on other than position/psize
* stripped out:
fd6_validate_format(struct fd_context *ctx, struct fd_resource *rsc,
enum pipe_format format)
{
+ tc_assert_driver_thread(ctx->tc);
+
if (!rsc->layout.ubwc)
return;
}
}
+/* NOTE this can be called in either driver thread or frontend thread
+ * depending on where the last unref comes from
+ */
static void
fd6_sampler_view_destroy(struct pipe_context *pctx,
struct pipe_sampler_view *_view)
{
struct fd_batch *batch = NULL;
+ tc_assert_driver_thread(ctx->tc);
+
fd_batch_reference(&batch, ctx->batch);
if (unlikely(!batch)) {
pctx->destroy(pctx);
return NULL;
}
+
+struct pipe_context *
+fd_context_init_tc(struct pipe_context *pctx, unsigned flags)
+{
+ struct fd_context *ctx = fd_context(pctx);
+
+ if (!(flags & PIPE_CONTEXT_PREFER_THREADED))
+ return pctx;
+
+ /* Clover (compute-only) is unsupported. */
+ if (flags & PIPE_CONTEXT_COMPUTE_ONLY)
+ return pctx;
+
+ struct pipe_context *tc = threaded_context_create(pctx,
+ &ctx->screen->transfer_pool,
+ fd_replace_buffer_storage,
+ NULL, // TODO fd_create_fence for async flush
+ &ctx->tc);
+
+ uint64_t total_ram;
+ if (tc && tc != pctx && os_get_total_physical_memory(&total_ram)) {
+ ((struct threaded_context *) tc)->bytes_mapped_limit = total_ram / 16;
+ }
+
+ return tc;
+}
struct fd_context {
struct pipe_context base;
+ struct threaded_context *tc;
+
struct list_head node; /* node in screen->context_list */
/* We currently need to serialize emitting GMEM batches, because of
struct pipe_context * fd_context_init(struct fd_context *ctx,
struct pipe_screen *pscreen, const uint8_t *primtypes,
void *priv, unsigned flags);
+struct pipe_context * fd_context_init_tc(struct pipe_context *pctx, unsigned flags);
void fd_context_destroy(struct pipe_context *pctx) assert_dt;
int ret;
if (pending(rsc, false)) {
+ assert(!q->base.flushed);
+ tc_assert_driver_thread(ctx->tc);
+
/* piglit spec@arb_occlusion_query@occlusion_query_conform
* test, and silly apps perhaps, get stuck in a loop trying
* to get query result forever with wait==false.. we don't
}
if (rsc->track->write_batch) {
+ tc_assert_driver_thread(ctx->tc);
fd_context_access_begin(ctx);
fd_batch_flush(rsc->track->write_batch);
fd_context_access_end(ctx);
struct fd_resource *rsc = fd_resource(period->end->prsc);
if (pending(rsc, false)) {
+ assert(!q->base.flushed);
+ tc_assert_driver_thread(ctx->tc);
+
/* piglit spec@arb_occlusion_query@occlusion_query_conform
* test, and silly apps perhaps, get stuck in a loop trying
* to get query result forever with wait==false.. we don't
struct fd_resource *rsc = fd_resource(start->prsc);
if (rsc->track->write_batch) {
+ tc_assert_driver_thread(ctx->tc);
fd_context_access_begin(ctx);
fd_batch_flush(rsc->track->write_batch);
fd_context_access_end(ctx);
void
fd_resource_uncompress(struct fd_context *ctx, struct fd_resource *rsc)
{
+ tc_assert_driver_thread(ctx->tc);
+
bool success =
fd_try_shadow_resource(ctx, rsc, 0, NULL, FD_FORMAT_MOD_QCOM_TILED);
char *buf;
int ret = 0;
+ tc_assert_driver_thread(ctx->tc);
+
/* we always need a staging texture for tiled buffers:
*
* TODO we might sometimes want to *also* shadow the resource to avoid