size_t cs_size;
/*
- * Submit cs to the virtual sync queue identified by sync_queue_index. The
- * virtual queue is assumed to be associated with the physical VkQueue
- * identified by vk_queue_id. After the execution completes on the
- * VkQueue, the virtual sync queue is signaled.
+ * Submit cs to the timeline identified by ring_idx. A timeline is
+ * typically associated with a physical VkQueue and bound to the ring_idx
+ * during VkQueue creation. After execution completes on the VkQueue, the
+ * timeline sync point is signaled.
*
- * sync_queue_index must be less than max_sync_queue_count.
- *
- * vk_queue_id specifies the object id of a VkQueue.
- *
- * When sync_queue_cpu is true, it specifies the special CPU sync queue,
- * and sync_queue_index/vk_queue_id are ignored. TODO revisit this later
+ * ring_idx 0 is reserved for the context-specific CPU timeline. sync
+ * points on the CPU timeline are signaled immediately after command
+ * processing by the renderer.
*/
- uint32_t sync_queue_index;
- bool sync_queue_cpu;
+ uint32_t ring_idx;
+
+ // TODO remove once vtest supports multiple timelines
vn_object_id vk_queue_id;
/* syncs to update when the virtual sync queue is signaled */
&(const struct vn_renderer_submit_batch){
.cs_data = cs_data,
.cs_size = cs_size,
- .sync_queue_cpu = true,
+ .ring_idx = 0, /* CPU ring */
.syncs = &sync,
.sync_values = &(const uint64_t){ 1 },
.sync_count = 1,
&(const struct vn_renderer_submit_batch){
.cs_data = cs_data,
.cs_size = cs_size,
+ .ring_idx = 0, /* CPU ring */
},
.batch_count = 1,
};
static int
sim_submit(struct virtgpu *gpu, const struct vn_renderer_submit *submit)
{
+ const bool use_ring_idx = gpu->base.info.supports_multiple_timelines;
+
/* TODO replace submit->bos by submit->gem_handles to avoid malloc/loop */
uint32_t *gem_handles = NULL;
if (submit->bo_count) {
const struct vn_renderer_submit_batch *batch = &submit->batches[i];
struct drm_virtgpu_execbuffer args = {
- .flags = batch->sync_count ? VIRTGPU_EXECBUF_FENCE_FD_OUT : 0,
+ .flags = (batch->sync_count ? VIRTGPU_EXECBUF_FENCE_FD_OUT : 0) |
+ (use_ring_idx ? VIRTGPU_EXECBUF_RING_IDX : 0),
.size = batch->cs_size,
.command = (uintptr_t)batch->cs_data,
.bo_handles = (uintptr_t)gem_handles,
.num_bo_handles = submit->bo_count,
+ .ring_idx = (use_ring_idx ? batch->ring_idx : 0),
};
ret = drmIoctl(gpu->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &args);
if (batch->sync_count) {
ret = sim_submit_signal_syncs(gpu, args.fence_fd, batch->syncs,
batch->sync_values, batch->sync_count,
- batch->sync_queue_cpu);
+ batch->ring_idx == 0);
close(args.fence_fd);
if (ret)
break;
.sync_offset = sync_offset / sizeof(uint32_t),
.sync_count = batch->sync_count,
};
- if (!batch->sync_queue_cpu) {
+ if (vtest->base.info.supports_multiple_timelines) {
dst.flags = VCMD_SUBMIT_CMD2_FLAG_SYNC_QUEUE;
- dst.sync_queue_index = batch->sync_queue_index;
+ dst.sync_queue_index = batch->ring_idx;
dst.sync_queue_id = batch->vk_queue_id;
}
vtest_write(vtest, &dst, sizeof(dst));