i965: Don't grow batch/state buffer on every emit after an overflow.
authorKenneth Graunke <kenneth@whitecape.org>
Tue, 28 Nov 2017 16:20:39 +0000 (08:20 -0800)
committerKenneth Graunke <kenneth@whitecape.org>
Thu, 30 Nov 2017 01:30:35 +0000 (17:30 -0800)
Once we reach the intended size of the buffer (BATCH_SZ or STATE_SZ), we
try and flush.  If we're not allowed to flush, we resort to growing the
buffer so that there's space for the data we need to emit.

We accidentally got the threshold wrong.  The first non-wrappable call
beyond (e.g.) STATE_SZ would grow the buffer to floor(1.5 * STATE_SZ),
The next call would see we were beyond STATE_SZ and think we needed to
grow a second time - when the buffer was already large enough.

We still want to flush when we hit STATE_SZ, but for growing, we should
use the actual size of the buffer as the threshold.  This way, we only
grow when actually necessary.

v2: Simplify the control flow (suggested by Jordan)

Fixes: 2dfc119f22f257082ab0 "i965: Grow the batch/state buffers if we need space and can't flush."
Reviewed-by: Jordan Justen <jordan.l.justen@intel.com>
src/mesa/drivers/dri/i965/intel_batchbuffer.c

index 337bb65..80b02a7 100644 (file)
@@ -365,17 +365,15 @@ intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
    }
 
    const unsigned batch_used = USED_BATCH(*batch) * 4;
-   if (batch_used + sz >= BATCH_SZ) {
-      if (!batch->no_wrap) {
-         intel_batchbuffer_flush(brw);
-      } else {
-         const unsigned new_size =
-            MIN2(batch->bo->size + batch->bo->size / 2, MAX_BATCH_SIZE);
-         grow_buffer(brw, &batch->bo, &batch->map, &batch->batch_cpu_map,
-                     batch_used, new_size);
-         batch->map_next = (void *) batch->map + batch_used;
-         assert(batch_used + sz < batch->bo->size);
-      }
+   if (batch_used + sz >= BATCH_SZ && !batch->no_wrap) {
+      intel_batchbuffer_flush(brw);
+   } else if (batch_used + sz >= batch->bo->size) {
+      const unsigned new_size =
+         MIN2(batch->bo->size + batch->bo->size / 2, MAX_BATCH_SIZE);
+      grow_buffer(brw, &batch->bo, &batch->map, &batch->batch_cpu_map,
+                  batch_used, new_size);
+      batch->map_next = (void *) batch->map + batch_used;
+      assert(batch_used + sz < batch->bo->size);
    }
 
    /* The intel_batchbuffer_flush() calls above might have changed
@@ -1066,18 +1064,16 @@ brw_state_batch(struct brw_context *brw,
 
    uint32_t offset = ALIGN(batch->state_used, alignment);
 
-   if (offset + size >= STATE_SZ) {
-      if (!batch->no_wrap) {
-         intel_batchbuffer_flush(brw);
-         offset = ALIGN(batch->state_used, alignment);
-      } else {
-         const unsigned new_size =
-            MIN2(batch->state_bo->size + batch->state_bo->size / 2,
-                 MAX_STATE_SIZE);
-         grow_buffer(brw, &batch->state_bo, &batch->state_map,
-                     &batch->state_cpu_map, batch->state_used, new_size);
-         assert(offset + size < batch->state_bo->size);
-      }
+   if (offset + size >= STATE_SZ && !batch->no_wrap) {
+      intel_batchbuffer_flush(brw);
+      offset = ALIGN(batch->state_used, alignment);
+   } else if (offset + size >= batch->state_bo->size) {
+      const unsigned new_size =
+         MIN2(batch->state_bo->size + batch->state_bo->size / 2,
+              MAX_STATE_SIZE);
+      grow_buffer(brw, &batch->state_bo, &batch->state_map,
+                  &batch->state_cpu_map, batch->state_used, new_size);
+      assert(offset + size < batch->state_bo->size);
    }
 
    if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {