batch->occlusion_buffer.cpu = NULL;
batch->occlusion_buffer.gpu = 0;
- /* There is no more writer for anything we wrote recorded on this context */
- hash_table_foreach(ctx->writer, ent) {
- if (ent->data == batch)
- _mesa_hash_table_remove(ctx->writer, ent);
- }
-
int handle;
AGX_BATCH_FOREACH_BO_HANDLE(batch, handle) {
+ /* There is no more writer on this context for anything we wrote */
+ struct agx_batch *writer = agx_writer_get(ctx, handle);
+ assert((writer == NULL || writer == batch) &&
+ "cannot read while another batch writes");
+
+ agx_writer_remove(ctx, handle);
agx_bo_unreference(agx_lookup_bo(dev, handle));
}
agx_flush_writer_except(struct agx_context *ctx, struct agx_resource *rsrc,
struct agx_batch *except, const char *reason)
{
- struct hash_entry *ent = _mesa_hash_table_search(ctx->writer, rsrc);
+ struct agx_batch *writer = agx_writer_get(ctx, rsrc->bo->handle);
- if (ent && ent->data != except) {
+ if (writer && writer != except) {
perf_debug_ctx(ctx, "Flush writer due to: %s\n", reason);
- agx_flush_batch(ctx, ent->data);
+ agx_flush_batch(ctx, writer);
}
}
agx_batch_writes(struct agx_batch *batch, struct agx_resource *rsrc)
{
struct agx_context *ctx = batch->ctx;
- struct hash_entry *ent = _mesa_hash_table_search(ctx->writer, rsrc);
+ struct agx_batch *writer = agx_writer_get(ctx, rsrc->bo->handle);
agx_flush_readers_except(ctx, rsrc, batch, "Write from other batch");
/* Nothing to do if we're already writing */
- if (ent && ent->data == batch)
+ if (writer == batch)
return;
/* Hazard: writer-after-write, write-after-read */
- if (ent)
+ if (writer)
agx_flush_writer(ctx, rsrc, "Multiple writers");
/* Write is strictly stronger than a read */
agx_batch_reads(batch, rsrc);
/* We are now the new writer */
- assert(!_mesa_hash_table_search(ctx->writer, rsrc));
- _mesa_hash_table_insert(ctx->writer, rsrc, batch);
+ agx_writer_add(ctx, agx_batch_idx(batch), rsrc->bo->handle);
}
/*
struct blitter_context *blitter;
- /* Map of agx_resource to agx_batch that writes that resource */
- struct hash_table *writer;
+ /* Map of GEM handle to (batch index + 1) that (conservatively) writes that
+ * BO, or 0 if no writer.
+ */
+ struct util_dynarray writer;
struct agx_meta_cache meta;
};
+static void
+agx_writer_add(struct agx_context *ctx, uint8_t batch_index, unsigned handle)
+{
+ assert(batch_index < AGX_MAX_BATCHES && "invariant");
+ static_assert(AGX_MAX_BATCHES < 0xFF, "no overflow on addition");
+
+ /* If we need to grow, double the capacity so insertion is amortized O(1). */
+ if (unlikely(handle >= ctx->writer.size)) {
+ unsigned new_size =
+ MAX2(ctx->writer.capacity * 2, util_next_power_of_two(handle + 1));
+ unsigned grow = new_size - ctx->writer.size;
+
+ memset(util_dynarray_grow(&ctx->writer, uint8_t, grow), 0,
+ grow * sizeof(uint8_t));
+ }
+
+ /* There is now room */
+ uint8_t *value = util_dynarray_element(&ctx->writer, uint8_t, handle);
+ assert((*value) == 0 && "there should be no existing writer");
+ *value = batch_index + 1;
+}
+
+static struct agx_batch *
+agx_writer_get(struct agx_context *ctx, unsigned handle)
+{
+ if (handle >= ctx->writer.size)
+ return NULL;
+
+ uint8_t value = *util_dynarray_element(&ctx->writer, uint8_t, handle);
+
+ if (value > 0)
+ return &ctx->batches.slots[value - 1];
+ else
+ return NULL;
+}
+
+static void
+agx_writer_remove(struct agx_context *ctx, unsigned handle)
+{
+ if (handle >= ctx->writer.size)
+ return;
+
+ uint8_t *value = util_dynarray_element(&ctx->writer, uint8_t, handle);
+ *value = 0;
+}
+
static inline struct agx_context *
agx_context(struct pipe_context *pctx)
{