Add a specialized agx_batch for compute commands (queued to the CDM instead of
the VDM for graphics). This uses a sentinel value for the width.
Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21062>
unsigned batch_idx = agx_batch_idx(batch);
BITSET_SET(ctx->batches.active, batch_idx);
- agx_batch_init_state(batch);
+ if (key->width != AGX_COMPUTE_BATCH_WIDTH)
+ agx_batch_init_state(batch);
}
void
return ctx->batch;
}
+struct agx_batch *
+agx_get_compute_batch(struct agx_context *ctx)
+{
+ agx_dirty_all(ctx);
+
+ struct pipe_framebuffer_state key = {.width = AGX_COMPUTE_BATCH_WIDTH};
+ ctx->batch = agx_get_batch_for_framebuffer(ctx, &key);
+ return ctx->batch;
+}
+
void
agx_flush_all(struct agx_context *ctx, const char *reason)
{
bool agx_any_batch_uses_resource(struct agx_context *ctx,
struct agx_resource *rsrc);
+/* 16384 is the maximum framebuffer dimension, so we use a larger width (the
+ * maximum uint16_t) as a sentinel to identify the compute batch. This ensures
+ * compute batches don't mix with graphics. This is a bit of a hack but it
+ * works.
+ */
+#define AGX_COMPUTE_BATCH_WIDTH 0xFFFF
+
struct agx_batch *agx_get_batch(struct agx_context *ctx);
+struct agx_batch *agx_get_compute_batch(struct agx_context *ctx);
void agx_batch_cleanup(struct agx_context *ctx, struct agx_batch *batch);
/* Blit shaders */