* thread's comm truncating the interesting part of the
* process name.
*/
- util_queue_init(&msm_dev->submit_queue, "sq", 8, 1, 0);
+ util_queue_init(&msm_dev->submit_queue, "sq", 8, 1, 0, NULL);
}
dev->bo_size = sizeof(struct msm_bo);
}
static void
-msm_submit_sp_flush_execute(void *job, int thread_index)
+msm_submit_sp_flush_execute(void *job, void *gdata, int thread_index)
{
struct fd_submit *submit = job;
struct msm_submit_sp *msm_submit = to_msm_submit_sp(submit);
}
static void
-msm_submit_sp_flush_cleanup(void *job, int thread_index)
+msm_submit_sp_flush_cleanup(void *job, void *gdata, int thread_index)
{
struct fd_submit *submit = job;
fd_submit_del(submit);
offsetof(struct pipe_draw_info, min_index)
static void
-tc_batch_execute(void *job, UNUSED int thread_index)
+tc_batch_execute(void *job, UNUSED void *gdata, int thread_index)
{
struct tc_batch *batch = job;
struct pipe_context *pipe = batch->tc->pipe;
if (next->num_total_slots) {
p_atomic_add(&tc->num_direct_slots, next->num_total_slots);
tc->bytes_mapped_estimate = 0;
- tc_batch_execute(next, 0);
+ tc_batch_execute(next, NULL, 0);
tc_begin_next_buffer_list(tc);
synced = true;
}
* from the queue before being executed, so keep one tc_batch slot for that
* execution. Also, keep one unused slot for an unflushed batch.
*/
- if (!util_queue_init(&tc->queue, "gdrv", TC_MAX_BATCHES - 2, 1, 0))
+ if (!util_queue_init(&tc->queue, "gdrv", TC_MAX_BATCHES - 2, 1, 0, NULL))
goto fail;
for (unsigned i = 0; i < TC_MAX_BATCHES; i++) {
bool ret = util_queue_init(&utctx->queue, "traceq", 256, 1,
UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY |
- UTIL_QUEUE_INIT_RESIZE_IF_FULL);
+ UTIL_QUEUE_INIT_RESIZE_IF_FULL, NULL);
assert(ret);
if (!ret)
#endif
static void
-process_chunk(void *job, int thread_index)
+process_chunk(void *job, void *gdata, int thread_index)
{
struct u_trace_chunk *chunk = job;
struct u_trace_context *utctx = chunk->utctx;
}
static void
-cleanup_chunk(void *job, int thread_index)
+cleanup_chunk(void *job, void *gdata, int thread_index)
{
ralloc_free(job);
}
}
static void
-create_initial_variants_async(void *job, int thread_index)
+create_initial_variants_async(void *job, void *gdata, int thread_index)
{
struct ir3_shader_state *hwcso = job;
struct pipe_debug_callback debug = {};
}
static void
-create_initial_compute_variants_async(void *job, int thread_index)
+create_initial_compute_variants_async(void *job, void *gdata, int thread_index)
{
struct ir3_shader_state *hwcso = job;
struct ir3_shader *shader = hwcso->shader;
util_queue_init(&screen->compile_queue, "ir3q", 64, num_threads,
UTIL_QUEUE_INIT_RESIZE_IF_FULL |
- UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY);
+ UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY, NULL);
pscreen->finalize_nir = ir3_screen_finalize_nir;
pscreen->set_max_shader_compiler_threads =
}
/* Asynchronous compute shader compilation. */
-static void si_create_compute_state_async(void *job, int thread_index)
+static void si_create_compute_state_async(void *job, void *gdata, int thread_index)
{
struct si_compute *program = (struct si_compute *)job;
struct si_shader_selector *sel = &program->sel;
if (!util_queue_init(
&sscreen->shader_compiler_queue, "sh", 64, num_comp_hi_threads,
- UTIL_QUEUE_INIT_RESIZE_IF_FULL | UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY)) {
+ UTIL_QUEUE_INIT_RESIZE_IF_FULL | UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY, NULL)) {
si_destroy_shader_cache(sscreen);
FREE(sscreen);
glsl_type_singleton_decref();
if (!util_queue_init(&sscreen->shader_compiler_queue_low_priority, "shlo", 64,
num_comp_lo_threads,
UTIL_QUEUE_INIT_RESIZE_IF_FULL | UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY |
- UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY)) {
+ UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY, NULL)) {
si_destroy_shader_cache(sscreen);
FREE(sscreen);
glsl_type_singleton_decref();
si_shader_init_pm4_state(sscreen, shader);
}
-static void si_build_shader_variant_low_priority(void *job, int thread_index)
+static void si_build_shader_variant_low_priority(void *job, void *gdata, int thread_index)
{
struct si_shader *shader = (struct si_shader *)job;
* si_shader_selector initialization. Since it can be done asynchronously,
* there is no way to report compile failures to applications.
*/
-static void si_init_shader_selector_async(void *job, int thread_index)
+static void si_init_shader_selector_async(void *job, void *gdata, int thread_index)
{
struct si_shader_selector *sel = (struct si_shader_selector *)job;
struct si_screen *sscreen = sel->screen;
batch->last_batch_id = last_state->fence.batch_id;
} else {
if (zink_screen(ctx->base.screen)->threaded)
- util_queue_init(&batch->flush_queue, "zfq", 8, 1, UTIL_QUEUE_INIT_RESIZE_IF_FULL);
+ util_queue_init(&batch->flush_queue, "zfq", 8, 1, UTIL_QUEUE_INIT_RESIZE_IF_FULL, NULL);
}
if (!ctx->queries_disabled)
zink_resume_queries(ctx, batch);
}
static void
-post_submit(void *data, int thread_index)
+post_submit(void *data, void *gdata, int thread_index)
{
struct zink_batch_state *bs = data;
}
static void
-submit_queue(void *data, int thread_index)
+submit_queue(void *data, void *gdata, int thread_index)
{
struct zink_batch_state *bs = data;
VkSubmitInfo si = {0};
submit_queue, post_submit, 0);
} else {
batch->state->queue = screen->queue;
- submit_queue(batch->state, 0);
- post_submit(batch->state, 0);
+ submit_queue(batch->state, NULL, 0);
+ post_submit(batch->state, NULL, 0);
}
}
return true;
}
-static void amdgpu_cs_submit_ib(void *job, int thread_index)
+static void amdgpu_cs_submit_ib(void *job, void *gdata, int thread_index)
{
struct amdgpu_cs *acs = (struct amdgpu_cs*)job;
struct amdgpu_winsys *ws = acs->ws;
(void) simple_mtx_init(&aws->bo_export_table_lock, mtx_plain);
if (!util_queue_init(&aws->cs_queue, "cs", 8, 1,
- UTIL_QUEUE_INIT_RESIZE_IF_FULL)) {
+ UTIL_QUEUE_INIT_RESIZE_IF_FULL, NULL)) {
amdgpu_winsys_destroy(&ws->base);
simple_mtx_unlock(&dev_tab_mutex);
return NULL;
return cs->csc->num_relocs;
}
-void radeon_drm_cs_emit_ioctl_oneshot(void *job, int thread_index)
+void radeon_drm_cs_emit_ioctl_oneshot(void *job, void *gdata, int thread_index)
{
struct radeon_cs_context *csc = ((struct radeon_drm_cs*)job)->cst;
unsigned i;
if (!(flags & PIPE_FLUSH_ASYNC))
radeon_drm_cs_sync_flush(rcs);
} else {
- radeon_drm_cs_emit_ioctl_oneshot(cs, 0);
+ radeon_drm_cs_emit_ioctl_oneshot(cs, NULL, 0);
}
} else {
radeon_cs_context_cleanup(cs->cst);
void radeon_drm_cs_sync_flush(struct radeon_cmdbuf *rcs);
void radeon_drm_cs_init_functions(struct radeon_drm_winsys *ws);
-void radeon_drm_cs_emit_ioctl_oneshot(void *job, int thread_index);
+void radeon_drm_cs_emit_ioctl_oneshot(void *job, void *gdata, int thread_index);
#endif
ws->info.pte_fragment_size = 64 * 1024; /* GPUVM page size */
if (ws->num_cpus > 1 && debug_get_option_thread())
- util_queue_init(&ws->cs_queue, "rcs", 8, 1, 0);
+ util_queue_init(&ws->cs_queue, "rcs", 8, 1, 0, NULL);
/* Create the screen at the end. The winsys must be initialized
* completely.
static void
-glthread_unmarshal_batch(void *job, int thread_index)
+glthread_unmarshal_batch(void *job, void *gdata, int thread_index)
{
struct glthread_batch *batch = (struct glthread_batch*)job;
struct gl_context *ctx = batch->ctx;
}
static void
-glthread_thread_initialization(void *job, int thread_index)
+glthread_thread_initialization(void *job, void *gdata, int thread_index)
{
struct gl_context *ctx = (struct gl_context*)job;
assert(!glthread->enabled);
if (!util_queue_init(&glthread->queue, "gl", MARSHAL_MAX_BATCHES - 2,
- 1, 0)) {
+ 1, 0, NULL)) {
return;
}
* need to restore it when it returns.
*/
if (false) {
- glthread_unmarshal_batch(next, 0);
+ glthread_unmarshal_batch(next, NULL, 0);
_glapi_set_dispatch(ctx->CurrentClientDispatch);
return;
}
* restore it after it's done.
*/
struct _glapi_table *dispatch = _glapi_get_dispatch();
- glthread_unmarshal_batch(next, 0);
+ glthread_unmarshal_batch(next, NULL, 0);
_glapi_set_dispatch(dispatch);
/* It's not a sync because we don't enqueue partial batches, but
if (!util_queue_init(&cache->cache_queue, "disk$", 32, 4,
UTIL_QUEUE_INIT_RESIZE_IF_FULL |
UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY |
- UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY))
+ UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY, NULL))
goto fail;
cache->path_init_failed = false;
}
static void
-destroy_put_job(void *job, int thread_index)
+destroy_put_job(void *job, void *gdata, int thread_index)
{
if (job) {
struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *) job;
}
static void
-destroy_put_job_nocopy(void *job, int thread_index)
+destroy_put_job_nocopy(void *job, void *gdata, int thread_index)
{
struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *) job;
free(dc_job->data);
- destroy_put_job(job, thread_index);
+ destroy_put_job(job, gdata, thread_index);
}
static void
-cache_put(void *job, int thread_index)
+cache_put(void *job, void *gdata, int thread_index)
{
assert(job);
mtx_unlock(&queue->lock);
if (job.job) {
- job.execute(job.job, thread_index);
+ job.execute(job.job, job.global_data, thread_index);
if (job.fence)
util_queue_fence_signal(job.fence);
if (job.cleanup)
- job.cleanup(job.job, thread_index);
+ job.cleanup(job.job, job.global_data, thread_index);
}
}
const char *name,
unsigned max_jobs,
unsigned num_threads,
- unsigned flags)
+ unsigned flags,
+ void *global_data)
{
unsigned i;
queue->max_threads = num_threads;
queue->num_threads = num_threads;
queue->max_jobs = max_jobs;
+ queue->global_data = global_data;
queue->jobs = (struct util_queue_job*)
calloc(max_jobs, sizeof(struct util_queue_job));
ptr = &queue->jobs[queue->write_idx];
assert(ptr->job == NULL);
ptr->job = job;
+ ptr->global_data = queue->global_data;
ptr->fence = fence;
ptr->execute = execute;
ptr->cleanup = cleanup;
i = (i + 1) % queue->max_jobs) {
if (queue->jobs[i].fence == fence) {
if (queue->jobs[i].cleanup)
- queue->jobs[i].cleanup(queue->jobs[i].job, -1);
+ queue->jobs[i].cleanup(queue->jobs[i].job, queue->global_data, -1);
/* Just clear it. The threads will treat as a no-op job. */
memset(&queue->jobs[i], 0, sizeof(queue->jobs[i]));
}
static void
-util_queue_finish_execute(void *data, int num_thread)
+util_queue_finish_execute(void *data, void *gdata, int num_thread)
{
util_barrier *barrier = data;
util_barrier_wait(barrier);
return _util_queue_fence_wait_timeout(fence, abs_timeout);
}
-typedef void (*util_queue_execute_func)(void *job, int thread_index);
+typedef void (*util_queue_execute_func)(void *job, void *gdata, int thread_index);
struct util_queue_job {
void *job;
+ void *global_data;
size_t job_size;
struct util_queue_fence *fence;
util_queue_execute_func execute;
int write_idx, read_idx; /* ring buffer pointers */
size_t total_jobs_size; /* memory use of all jobs in the queue */
struct util_queue_job *jobs;
+ void *global_data;
/* for cleanup at exit(), protected by exit_mutex */
struct list_head head;
const char *name,
unsigned max_jobs,
unsigned num_threads,
- unsigned flags);
+ unsigned flags,
+ void *global_data);
void util_queue_destroy(struct util_queue *queue);
/* optional cleanup callback is called after fence is signaled: */