u_vector_add() don't keep the returned pointers valid.
After the initial size allocated in u_vector_init() is reached it will
allocate a bigger buffer and copy data from older buffer to the new
one and free the old buffer, making all the previous pointers returned
by u_vector_add() invalid and crashing the application when trying to
access it.
This is reproduced when running
dEQP-VK.synchronization.signal_order.timeline_semaphore.* in DG2 SKUs
that has 4 CCS engines, INTEL_COMPUTE_CLASS=1 is set and of course
perfetto build is enabled.
To fix this issue here I'm moving the storage/allocation of
struct intel_ds_queue to struct anv_queue/iris_batch and using
struct list_head to maintain a chain of intel_ds_queue of the
intel_ds_device.
This allows us to append or remove queues dynamically in future if
necessary.
Fixes:
e760c5b37be9 ("anv: add perfetto source")
Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/20977>
}
- uint64_t start_ts = intel_ds_begin_submit(batch->ds);
- uint64_t submission_id = batch->ds->submission_id;
+ uint64_t start_ts = intel_ds_begin_submit(&batch->ds);
+ uint64_t submission_id = batch->ds.submission_id;
int ret = submit_batch(batch);
- intel_ds_end_submit(batch->ds, start_ts);
+ intel_ds_end_submit(&batch->ds, start_ts);
/* When batch submission fails, our end-of-batch syncobj remains
* unsignalled, and in fact is not even considered submitted.
struct u_trace trace;
/** Batch wrapper structure for perfetto */
- struct intel_ds_queue *ds;
+ struct intel_ds_queue ds;
};
void iris_init_batches(struct iris_context *ice, int priority);
void iris_utrace_flush(struct iris_batch *batch, uint64_t submission_id)
{
struct intel_ds_flush_data *flush_data = malloc(sizeof(*flush_data));
- intel_ds_flush_data_init(flush_data, batch->ds, submission_id);
+ intel_ds_flush_data_init(flush_data, &batch->ds, submission_id);
u_trace_flush(&batch->trace, flush_data, false);
}
iris_utrace_delete_flush_data);
for (int i = 0; i < IRIS_BATCH_COUNT; i++) {
- ice->batches[i].ds =
- intel_ds_device_add_queue(&ice->ds, "%s",
- iris_batch_name_to_string(i));
+ intel_ds_device_init_queue(&ice->ds, &ice->batches[i].ds, "%s",
+ iris_batch_name_to_string(i));
}
}
send_descriptors(IntelRenderpassDataSource::TraceContext &ctx,
struct intel_ds_device *device)
{
- struct intel_ds_queue *queue;
-
PERFETTO_LOG("Sending renderstage descriptors");
device->event_id = 0;
device->current_app_event_iid = device->start_app_event_iids;
- u_vector_foreach(queue, &device->queues) {
+ list_for_each_entry_safe(struct intel_ds_queue, queue, &device->queues, link) {
for (uint32_t s = 0; s < ARRAY_SIZE(queue->stages); s++) {
queue->stages[s].start_ns[0] = 0;
}
}
/* Emit all the IID picked at device/queue creation. */
- u_vector_foreach(queue, &device->queues) {
+ list_for_each_entry_safe(struct intel_ds_queue, queue, &device->queues, link) {
for (unsigned s = 0; s < INTEL_DS_QUEUE_STAGE_N_STAGES; s++) {
{
/* We put the stage number in there so that all rows are order
device->info = *devinfo;
device->iid = get_iid();
device->api = api;
- u_vector_init(&device->queues, 4, sizeof(struct intel_ds_queue));
+ list_inithead(&device->queues);
/* Reserve iids for the application generated events */
device->start_app_event_iids = 1ull << 32;
{
u_trace_context_fini(&device->trace_context);
_mesa_hash_table_destroy(device->app_events, NULL);
- u_vector_finish(&device->queues);
}
struct intel_ds_queue *
-intel_ds_device_add_queue(struct intel_ds_device *device,
- const char *fmt_name,
- ...)
+intel_ds_device_init_queue(struct intel_ds_device *device,
+ struct intel_ds_queue *queue,
+ const char *fmt_name,
+ ...)
{
- struct intel_ds_queue *queue =
- (struct intel_ds_queue *) u_vector_add(&device->queues);
va_list ap;
memset(queue, 0, sizeof(*queue));
queue->stages[s].stage_iid = get_iid();
}
+ list_add(&queue->link, &device->queues);
+
return queue;
}
struct u_trace_context trace_context;
/* List of intel_ds_queue */
- struct u_vector queues;
+ struct list_head queues;
};
struct intel_ds_stage {
};
struct intel_ds_queue {
+ struct list_head link;
+
/* Device this queue belongs to */
struct intel_ds_device *device;
enum intel_ds_api api);
void intel_ds_device_fini(struct intel_ds_device *device);
-struct intel_ds_queue *intel_ds_device_add_queue(struct intel_ds_device *device,
- const char *fmt_name,
- ...);
+struct intel_ds_queue *
+intel_ds_device_init_queue(struct intel_ds_device *device,
+ struct intel_ds_queue *queue,
+ const char *fmt_name,
+ ...);
void intel_ds_flush_data_init(struct intel_ds_flush_data *data,
struct intel_ds_queue *queue,
return VK_SUCCESS;
}
- uint64_t start_ts = intel_ds_begin_submit(queue->ds);
+ uint64_t start_ts = intel_ds_begin_submit(&queue->ds);
pthread_mutex_lock(&device->mutex);
result = anv_queue_submit_locked(queue, submit);
/* Take submission ID under lock */
pthread_mutex_unlock(&device->mutex);
- intel_ds_end_submit(queue->ds, start_ts);
+ intel_ds_end_submit(&queue->ds, start_ts);
return result;
}
/** Synchronization object for debug purposes (DEBUG_SYNC) */
struct vk_sync *sync;
- struct intel_ds_queue * ds;
+ struct intel_ds_queue ds;
};
struct nir_xfb_info;
if (!flush)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
- intel_ds_flush_data_init(&flush->ds, queue->ds, queue->ds->submission_id);
+ intel_ds_flush_data_init(&flush->ds, &queue->ds, queue->ds.submission_id);
result = vk_sync_create(&device->vk, &device->physical->sync_syncobj_type,
0, 0, &flush->sync);
for (uint32_t q = 0; q < device->queue_count; q++) {
struct anv_queue *queue = &device->queues[q];
- queue->ds =
- intel_ds_device_add_queue(&device->ds, "%s%u",
- intel_engines_class_to_string(queue->family->engine_class),
- queue->vk.index_in_family);
+ intel_ds_device_init_queue(&device->ds, &queue->ds, "%s%u",
+ intel_engines_class_to_string(queue->family->engine_class),
+ queue->vk.index_in_family);
}
}
return VK_SUCCESS;
}
- uint64_t start_ts = intel_ds_begin_submit(queue->ds);
+ uint64_t start_ts = intel_ds_begin_submit(&queue->ds);
pthread_mutex_lock(&device->mutex);
result = anv_queue_submit_locked(queue, submit);
/* Take submission ID under lock */
pthread_mutex_unlock(&device->mutex);
- intel_ds_end_submit(queue->ds, start_ts);
+ intel_ds_end_submit(&queue->ds, start_ts);
return result;
}
/** Synchronization object for debug purposes (DEBUG_SYNC) */
struct vk_sync *sync;
- struct intel_ds_queue * ds;
+ struct intel_ds_queue ds;
};
struct nir_xfb_info;
if (!flush)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
- intel_ds_flush_data_init(&flush->ds, queue->ds, queue->ds->submission_id);
+ intel_ds_flush_data_init(&flush->ds, &queue->ds, queue->ds.submission_id);
result = vk_sync_create(&device->vk, &device->physical->sync_syncobj_type,
0, 0, &flush->sync);
for (uint32_t q = 0; q < device->queue_count; q++) {
struct anv_queue *queue = &device->queues[q];
- queue->ds =
- intel_ds_device_add_queue(&device->ds, "%s%u",
- intel_engines_class_to_string(queue->family->engine_class),
- queue->vk.index_in_family);
+ intel_ds_device_init_queue(&device->ds, &queue->ds, "%s%u",
+ intel_engines_class_to_string(queue->family->engine_class),
+ queue->vk.index_in_family);
}
}