{
vn_object_base_init(&queue->base, VK_OBJECT_TYPE_QUEUE, &dev->base);
+ VkDeviceQueueTimelineInfoMESA timeline_info;
+ const struct vn_renderer_info *renderer_info =
+ &dev->instance->renderer->info;
+ if (renderer_info->supports_multiple_timelines) {
+ int ring_idx = vn_instance_acquire_ring_idx(dev->instance);
+ if (ring_idx < 0) {
+ vn_log(dev->instance,
+ "failed binding VkQueue to renderer sync queue");
+ return VK_ERROR_INITIALIZATION_FAILED;
+ }
+ queue->ring_idx = (uint32_t)ring_idx;
+
+ timeline_info = (VkDeviceQueueTimelineInfoMESA){
+ .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_TIMELINE_INFO_MESA,
+ .ringIdx = queue->ring_idx,
+ };
+ }
+
+ const VkDeviceQueueInfo2 device_queue_info = {
+ .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
+ .pNext =
+ renderer_info->supports_multiple_timelines ? &timeline_info : NULL,
+ .flags = queue_info->flags,
+ .queueFamilyIndex = queue_info->queueFamilyIndex,
+ .queueIndex = queue_index,
+ };
+
VkQueue queue_handle = vn_queue_to_handle(queue);
- vn_async_vkGetDeviceQueue2(
- dev->instance, vn_device_to_handle(dev),
- &(VkDeviceQueueInfo2){
- .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
- .flags = queue_info->flags,
- .queueFamilyIndex = queue_info->queueFamilyIndex,
- .queueIndex = queue_index,
- },
- &queue_handle);
+ vn_async_vkGetDeviceQueue2(dev->instance, vn_device_to_handle(dev),
+ &device_queue_info, &queue_handle);
queue->device = dev;
queue->family = queue_info->queueFamilyIndex;
*/
vn_async_vkDestroyDevice(dev->instance, device, NULL);
+ /* We must emit vn_call_vkDestroyDevice before releasing bound ring_idx.
+ * Otherwise, another thread might reuse their ring_idx while they
+ * are still bound to the queues in the renderer.
+ */
+ if (dev->instance->renderer->info.supports_multiple_timelines) {
+ for (uint32_t i = 0; i < dev->queue_count; i++) {
+ vn_instance_release_ring_idx(dev->instance, dev->queues[i].ring_idx);
+ }
+ }
+
vk_free(alloc, dev->queues);
vn_device_base_fini(&dev->base);
return vn_error(NULL, result);
}
+ /* ring_idx = 0 reserved for CPU timeline */
+ instance->ring_idx_used_mask = 0x1;
+
mtx_init(&instance->physical_device.mutex, mtx_plain);
mtx_init(&instance->cs_shmem.mutex, mtx_plain);
+ mtx_init(&instance->ring_idx_mutex, mtx_plain);
if (!vn_icd_supports_api_version(
instance->base.base.app_info.api_version)) {
vn_renderer_destroy(instance->renderer, alloc);
mtx_destroy(&instance->physical_device.mutex);
+ mtx_destroy(&instance->ring_idx_mutex);
mtx_destroy(&instance->cs_shmem.mutex);
vn_instance_base_fini(&instance->base);
vk_free(alloc, instance->physical_device.groups);
}
mtx_destroy(&instance->physical_device.mutex);
+ mtx_destroy(&instance->ring_idx_mutex);
vn_call_vkDestroyInstance(instance, _instance, NULL);
struct vn_renderer_shmem_pool reply_shmem_pool;
+ mtx_t ring_idx_mutex;
+ uint64_t ring_idx_used_mask;
+
/* XXX staged features to be merged to core venus protocol */
VkVenusExperimentalFeatures100000MESA experimental;
return shmem;
}
+static inline int
+vn_instance_acquire_ring_idx(struct vn_instance *instance)
+{
+ mtx_lock(&instance->ring_idx_mutex);
+ int ring_idx = ffsll(~instance->ring_idx_used_mask) - 1;
+ if (ring_idx >= instance->renderer->info.max_sync_queue_count)
+ ring_idx = -1;
+ if (ring_idx > 0)
+ instance->ring_idx_used_mask |= (1ULL << (uint32_t)ring_idx);
+ mtx_unlock(&instance->ring_idx_mutex);
+
+ assert(ring_idx); /* never acquire the dedicated CPU ring */
+
+ /* returns -1 when no vacant rings */
+ return ring_idx;
+}
+
+static inline void
+vn_instance_release_ring_idx(struct vn_instance *instance, uint32_t ring_idx)
+{
+ assert(ring_idx > 0);
+
+ mtx_lock(&instance->ring_idx_mutex);
+ assert(instance->ring_idx_used_mask & (1ULL << ring_idx));
+ instance->ring_idx_used_mask &= ~(1ULL << ring_idx);
+ mtx_unlock(&instance->ring_idx_mutex);
+}
+
#endif /* VN_INSTANCE_H */