freedreno: hold batch references when flushing
authorRob Clark <robdclark@gmail.com>
Tue, 17 Jul 2018 13:40:23 +0000 (09:40 -0400)
committerRob Clark <robdclark@gmail.com>
Tue, 17 Jul 2018 15:00:00 +0000 (11:00 -0400)
It is possible for a batch to be freed under our feet when flushing, so
it is best to hold a reference to all of them up-front.

Signed-off-by: Rob Clark <robdclark@gmail.com>
src/gallium/drivers/freedreno/freedreno_batch_cache.c

index 07dc1a9..c4640a7 100644 (file)
@@ -124,35 +124,54 @@ fd_bc_fini(struct fd_batch_cache *cache)
        _mesa_hash_table_destroy(cache->ht, NULL);
 }
 
-void
-fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
+static void
+bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx, bool deferred)
 {
-       struct hash_entry *entry;
-       struct fd_batch *last_batch = NULL;
+       /* fd_batch_flush() (and fd_batch_add_dep() which calls it indirectly)
+        * can cause batches to be unref'd and freed under our feet, so grab
+        * a reference to all the batches we need up-front.
+        */
+       struct fd_batch *batches[ARRAY_SIZE(cache->batches)] = {0};
+       struct fd_batch *batch;
+       unsigned n = 0;
 
-       mtx_lock(&ctx->screen->lock);
+       fd_context_lock(ctx);
 
-       hash_table_foreach(cache->ht, entry) {
-               struct fd_batch *batch = NULL;
-               /* hold a reference since we can drop screen->lock: */
-               fd_batch_reference_locked(&batch, (struct fd_batch *)entry->data);
+       foreach_batch(batch, cache, cache->batch_mask) {
                if (batch->ctx == ctx) {
-                       mtx_unlock(&ctx->screen->lock);
-                       fd_batch_reference(&last_batch, batch);
-                       fd_batch_flush(batch, false, false);
-                       mtx_lock(&ctx->screen->lock);
+                       fd_batch_reference_locked(&batches[n++], batch);
                }
-               fd_batch_reference_locked(&batch, NULL);
        }
 
-       mtx_unlock(&ctx->screen->lock);
+       if (deferred) {
+               struct fd_batch *current_batch = ctx->batch;
+
+               for (unsigned i = 0; i < n; i++) {
+                       if (batches[i] != current_batch) {
+                               fd_batch_add_dep(current_batch, batches[i]);
+                       }
+               }
+
+               fd_context_unlock(ctx);
+       } else {
+               fd_context_unlock(ctx);
+
+               for (unsigned i = 0; i < n; i++) {
+                       fd_batch_flush(batches[i], false, false);
+               }
+       }
 
-       if (last_batch) {
-               fd_batch_sync(last_batch);
-               fd_batch_reference(&last_batch, NULL);
+       for (unsigned i = 0; i < n; i++) {
+               fd_batch_reference(&batches[i], NULL);
        }
 }
 
+void
+fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
+{
+       bc_flush(cache, ctx, false);
+}
+
 /* deferred flush doesn't actually flush, but it marks every other
  * batch associated with the context as dependent on the current
  * batch.  So when the current batch gets flushed, all other batches
@@ -161,20 +180,7 @@ fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx)
 void
 fd_bc_flush_deferred(struct fd_batch_cache *cache, struct fd_context *ctx)
 {
-       struct fd_batch *current_batch = ctx->batch;
-       struct hash_entry *entry;
-
-       mtx_lock(&ctx->screen->lock);
-
-       hash_table_foreach(cache->ht, entry) {
-               struct fd_batch *batch = entry->data;
-               if (batch == current_batch)
-                       continue;
-               if (batch->ctx == ctx)
-                       fd_batch_add_dep(current_batch, batch);
-       }
-
-       mtx_unlock(&ctx->screen->lock);
+       bc_flush(cache, ctx, true);
 }
 
 void