}
}
+bool
+agx_any_batch_uses_resource(struct agx_context *ctx, struct agx_resource *rsrc)
+{
+ unsigned idx;
+ foreach_batch(ctx, idx) {
+ struct agx_batch *batch = &ctx->batches.slots[idx];
+
+ if (agx_batch_uses_bo(batch, rsrc->bo))
+ return true;
+ }
+
+ return false;
+}
+
void
agx_flush_readers(struct agx_context *ctx, struct agx_resource *rsrc, const char *reason)
{
if (usage & PIPE_MAP_UNSYNCHRONIZED)
return;
+ /* Both writing and reading need writers flushed */
agx_flush_writer(ctx, rsrc, "Unsynchronized transfer");
- if (usage & PIPE_MAP_WRITE) {
- /* Try to shadow the resource to avoid a flush */
- if ((usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) && agx_shadow(ctx, rsrc))
- return;
+ /* Additionally, writing needs readers flushed */
+ if (!(usage & PIPE_MAP_WRITE))
+ return;
- /* Otherwise, we need to flush */
- agx_flush_readers(ctx, rsrc, "Unsynchronized write");
- }
+ /* If there are no readers, we're done. We check at the start to
+ * avoid expensive shadowing paths or duplicated checks in this hapyp path.
+ */
+ if (!agx_any_batch_uses_resource(ctx, rsrc))
+ return;
+
+ /* There are readers. Try to shadow the resource to avoid a flush */
+ if ((usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) && agx_shadow(ctx, rsrc))
+ return;
+
+ /* Otherwise, we need to flush */
+ agx_flush_readers(ctx, rsrc, "Unsynchronized write");
}
void agx_batch_reads(struct agx_batch *batch, struct agx_resource *rsrc);
void agx_batch_writes(struct agx_batch *batch, struct agx_resource *rsrc);
+bool agx_any_batch_uses_resource(struct agx_context *ctx, struct agx_resource *rsrc);
+
struct agx_batch *agx_get_batch(struct agx_context *ctx);
void agx_batch_cleanup(struct agx_context *ctx, struct agx_batch *batch);