util_queue_fence_init(&glthread->batches[i].fence);
}
glthread->next_batch = &glthread->batches[glthread->next];
+ glthread->used = 0;
glthread->enabled = true;
glthread->stats.queue = &glthread->queue;
if (!glthread->enabled)
return;
- struct glthread_batch *next = glthread->next_batch;
- if (!next->used)
+ if (!glthread->used)
return;
/* Pin threads regularly to the same Zen CCX that the main thread is
}
}
+ struct glthread_batch *next = glthread->next_batch;
+
/* Debug: execute the batch immediately from this thread.
*
* Note that glthread_unmarshal_batch() changes the dispatch table so we'll
return;
}
- p_atomic_add(&glthread->stats.num_offloaded_items, next->used);
+ p_atomic_add(&glthread->stats.num_offloaded_items, glthread->used);
+ next->used = glthread->used;
util_queue_add_job(&glthread->queue, next, &next->fence,
glthread_unmarshal_batch, NULL, 0);
glthread->last = glthread->next;
glthread->next = (glthread->next + 1) % MARSHAL_MAX_BATCHES;
glthread->next_batch = &glthread->batches[glthread->next];
+ glthread->used = 0;
}
/**
synced = true;
}
- if (next->used) {
- p_atomic_add(&glthread->stats.num_direct_items, next->used);
+ if (glthread->used) {
+ p_atomic_add(&glthread->stats.num_direct_items, glthread->used);
+ next->used = glthread->used;
+ glthread->used = 0;
/* Since glthread_unmarshal_batch changes the dispatch to direct,
* restore it after it's done.
/** The worker thread will access the context with this. */
struct gl_context *ctx;
- /** Amount of data used by batch commands, in bytes. */
+ /**
+ * Amount of data used by batch commands, in bytes.
+ * This is 0 when it's being filled because glthread::used holds the real
+ * value temporarily, and glthread::used is copied to this variable when
+ * the batch is submitted.
+ */
int used;
/** Data contained in the command buffer. */
/** Index of the batch being filled and about to be submitted. */
unsigned next;
+ /** Amount of data filled in next_batch, in bytes. */
+ int used;
+
/** Upload buffer. */
struct gl_buffer_object *upload_buffer;
uint8_t *upload_ptr;
int size)
{
struct glthread_state *glthread = &ctx->GLThread;
- struct glthread_batch *next = glthread->next_batch;
- struct marshal_cmd_base *cmd_base;
- if (unlikely(next->used + size > MARSHAL_MAX_CMD_SIZE)) {
+ if (unlikely(glthread->used + size > MARSHAL_MAX_CMD_SIZE))
_mesa_glthread_flush_batch(ctx);
- next = glthread->next_batch;
- }
+ struct glthread_batch *next = glthread->next_batch;
const int aligned_size = align(size, 8);
- cmd_base = (struct marshal_cmd_base *)&next->buffer[next->used];
- next->used += aligned_size;
+ struct marshal_cmd_base *cmd_base =
+ (struct marshal_cmd_base *)&next->buffer[glthread->used];
+ glthread->used += aligned_size;
cmd_base->cmd_id = cmd_id;
cmd_base->cmd_size = aligned_size;
return cmd_base;