2 * Copyright 2019 Google LLC
3 * SPDX-License-Identifier: MIT
5 * based in part on anv and radv which are:
6 * Copyright © 2015 Intel Corporation
7 * Copyright © 2016 Red Hat.
8 * Copyright © 2016 Bas Nieuwenhuizen
11 #include "vn_device.h"
13 #include "venus-protocol/vn_protocol_driver_device.h"
15 #include "vn_android.h"
16 #include "vn_instance.h"
17 #include "vn_physical_device.h"
23 vn_queue_fini(struct vn_queue *queue)
25 if (queue->wait_fence != VK_NULL_HANDLE) {
26 vn_DestroyFence(vn_device_to_handle(queue->device), queue->wait_fence,
29 vn_object_base_fini(&queue->base);
33 vn_queue_init(struct vn_device *dev,
34 struct vn_queue *queue,
35 const VkDeviceQueueCreateInfo *queue_info,
38 vn_object_base_init(&queue->base, VK_OBJECT_TYPE_QUEUE, &dev->base);
40 VkQueue queue_handle = vn_queue_to_handle(queue);
41 vn_async_vkGetDeviceQueue2(
42 dev->instance, vn_device_to_handle(dev),
43 &(VkDeviceQueueInfo2){
44 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
45 .flags = queue_info->flags,
46 .queueFamilyIndex = queue_info->queueFamilyIndex,
47 .queueIndex = queue_index,
52 queue->family = queue_info->queueFamilyIndex;
53 queue->index = queue_index;
54 queue->flags = queue_info->flags;
56 const VkExportFenceCreateInfo export_fence_info = {
57 .sType = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO,
59 .handleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT,
61 const VkFenceCreateInfo fence_info = {
62 .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
63 .pNext = dev->instance->experimental.globalFencing == VK_TRUE
68 VkResult result = vn_CreateFence(vn_device_to_handle(dev), &fence_info,
69 NULL, &queue->wait_fence);
70 if (result != VK_SUCCESS)
77 vn_device_init_queues(struct vn_device *dev,
78 const VkDeviceCreateInfo *create_info)
80 const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
83 for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++)
84 count += create_info->pQueueCreateInfos[i].queueCount;
86 struct vn_queue *queues =
87 vk_zalloc(alloc, sizeof(*queues) * count, VN_DEFAULT_ALIGN,
88 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
90 return VK_ERROR_OUT_OF_HOST_MEMORY;
93 for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
96 const VkDeviceQueueCreateInfo *queue_info =
97 &create_info->pQueueCreateInfos[i];
98 for (uint32_t j = 0; j < queue_info->queueCount; j++) {
99 result = vn_queue_init(dev, &queues[count], queue_info, j);
100 if (result != VK_SUCCESS) {
101 for (uint32_t k = 0; k < count; k++)
102 vn_queue_fini(&queues[k]);
103 vk_free(alloc, queues);
112 dev->queues = queues;
113 dev->queue_count = count;
119 find_extension_names(const char *const *exts,
123 for (uint32_t i = 0; i < ext_count; i++) {
124 if (!strcmp(exts[i], name))
131 merge_extension_names(const char *const *exts,
133 const char *const *extra_exts,
134 uint32_t extra_count,
135 const char *const *block_exts,
136 uint32_t block_count,
137 const VkAllocationCallbacks *alloc,
138 const char *const **out_exts,
141 const char **merged =
142 vk_alloc(alloc, sizeof(*merged) * (ext_count + extra_count),
143 VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
148 for (uint32_t i = 0; i < ext_count; i++) {
149 if (!find_extension_names(block_exts, block_count, exts[i]))
150 merged[count++] = exts[i];
152 for (uint32_t i = 0; i < extra_count; i++) {
153 if (!find_extension_names(exts, ext_count, extra_exts[i]))
154 merged[count++] = extra_exts[i];
162 static const VkDeviceCreateInfo *
163 vn_device_fix_create_info(const struct vn_device *dev,
164 const VkDeviceCreateInfo *dev_info,
165 const VkAllocationCallbacks *alloc,
166 VkDeviceCreateInfo *local_info)
168 const struct vn_physical_device *physical_dev = dev->physical_device;
169 const struct vk_device_extension_table *app_exts =
170 &dev->base.base.enabled_extensions;
171 /* extra_exts and block_exts must not overlap */
172 const char *extra_exts[16];
173 const char *block_exts[16];
174 uint32_t extra_count = 0;
175 uint32_t block_count = 0;
177 /* fix for WSI (treat AHB as WSI extension for simplicity) */
179 app_exts->KHR_swapchain || app_exts->ANDROID_native_buffer ||
180 app_exts->ANDROID_external_memory_android_hardware_buffer;
182 /* KHR_swapchain may be advertised without the renderer support for
183 * EXT_image_drm_format_modifier
185 if (!app_exts->EXT_image_drm_format_modifier &&
186 physical_dev->renderer_extensions.EXT_image_drm_format_modifier) {
187 extra_exts[extra_count++] =
188 VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME;
190 if (physical_dev->renderer_version < VK_API_VERSION_1_2 &&
191 !app_exts->KHR_image_format_list) {
192 extra_exts[extra_count++] =
193 VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME;
197 /* XXX KHR_swapchain may be advertised without the renderer support for
198 * EXT_queue_family_foreign
200 if (!app_exts->EXT_queue_family_foreign &&
201 physical_dev->renderer_extensions.EXT_queue_family_foreign) {
202 extra_exts[extra_count++] =
203 VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME;
206 if (app_exts->KHR_swapchain) {
207 /* see vn_physical_device_get_native_extensions */
208 block_exts[block_count++] = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
209 block_exts[block_count++] =
210 VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_EXTENSION_NAME;
211 block_exts[block_count++] =
212 VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME;
215 if (app_exts->ANDROID_native_buffer)
216 block_exts[block_count++] = VK_ANDROID_NATIVE_BUFFER_EXTENSION_NAME;
218 if (app_exts->ANDROID_external_memory_android_hardware_buffer) {
219 block_exts[block_count++] =
220 VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME;
224 if (app_exts->KHR_external_memory_fd ||
225 app_exts->EXT_external_memory_dma_buf || has_wsi) {
226 switch (physical_dev->external_memory.renderer_handle_type) {
227 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
228 if (!app_exts->EXT_external_memory_dma_buf) {
229 extra_exts[extra_count++] =
230 VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME;
233 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
234 if (!app_exts->KHR_external_memory_fd) {
235 extra_exts[extra_count++] =
236 VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME;
240 /* TODO other handle types */
245 assert(extra_count <= ARRAY_SIZE(extra_exts));
246 assert(block_count <= ARRAY_SIZE(block_exts));
248 if (!extra_count && (!block_count || !dev_info->enabledExtensionCount))
251 *local_info = *dev_info;
252 if (!merge_extension_names(dev_info->ppEnabledExtensionNames,
253 dev_info->enabledExtensionCount, extra_exts,
254 extra_count, block_exts, block_count, alloc,
255 &local_info->ppEnabledExtensionNames,
256 &local_info->enabledExtensionCount))
263 vn_device_init(struct vn_device *dev,
264 struct vn_physical_device *physical_dev,
265 const VkDeviceCreateInfo *create_info,
266 const VkAllocationCallbacks *alloc)
268 struct vn_instance *instance = physical_dev->instance;
269 VkPhysicalDevice physical_dev_handle =
270 vn_physical_device_to_handle(physical_dev);
271 VkDevice dev_handle = vn_device_to_handle(dev);
272 VkDeviceCreateInfo local_create_info;
275 dev->instance = instance;
276 dev->physical_device = physical_dev;
277 dev->renderer = instance->renderer;
280 vn_device_fix_create_info(dev, create_info, alloc, &local_create_info);
282 return VK_ERROR_OUT_OF_HOST_MEMORY;
284 result = vn_call_vkCreateDevice(instance, physical_dev_handle, create_info,
287 /* free the fixed extensions here since no longer needed below */
288 if (create_info == &local_create_info)
289 vk_free(alloc, (void *)create_info->ppEnabledExtensionNames);
291 if (result != VK_SUCCESS)
294 result = vn_device_init_queues(dev, create_info);
295 if (result != VK_SUCCESS)
296 goto out_destroy_device;
298 for (uint32_t i = 0; i < ARRAY_SIZE(dev->memory_pools); i++) {
299 struct vn_device_memory_pool *pool = &dev->memory_pools[i];
300 mtx_init(&pool->mutex, mtx_plain);
303 result = vn_buffer_cache_init(dev);
304 if (result != VK_SUCCESS)
305 goto out_memory_pool_fini;
309 out_memory_pool_fini:
310 for (uint32_t i = 0; i < ARRAY_SIZE(dev->memory_pools); i++)
311 vn_device_memory_pool_fini(dev, i);
313 for (uint32_t i = 0; i < dev->queue_count; i++)
314 vn_queue_fini(&dev->queues[i]);
315 vk_free(alloc, dev->queues);
318 vn_call_vkDestroyDevice(instance, dev_handle, NULL);
324 vn_CreateDevice(VkPhysicalDevice physicalDevice,
325 const VkDeviceCreateInfo *pCreateInfo,
326 const VkAllocationCallbacks *pAllocator,
329 struct vn_physical_device *physical_dev =
330 vn_physical_device_from_handle(physicalDevice);
331 struct vn_instance *instance = physical_dev->instance;
332 const VkAllocationCallbacks *alloc =
333 pAllocator ? pAllocator : &instance->base.base.alloc;
334 struct vn_device *dev;
337 dev = vk_zalloc(alloc, sizeof(*dev), VN_DEFAULT_ALIGN,
338 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
340 return vn_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
342 struct vk_device_dispatch_table dispatch_table;
343 vk_device_dispatch_table_from_entrypoints(&dispatch_table,
344 &vn_device_entrypoints, true);
345 vk_device_dispatch_table_from_entrypoints(&dispatch_table,
346 &wsi_device_entrypoints, false);
347 result = vn_device_base_init(&dev->base, &physical_dev->base,
348 &dispatch_table, pCreateInfo, alloc);
349 if (result != VK_SUCCESS) {
351 return vn_error(instance, result);
354 result = vn_device_init(dev, physical_dev, pCreateInfo, alloc);
355 if (result != VK_SUCCESS) {
356 vn_device_base_fini(&dev->base);
358 return vn_error(instance, result);
361 *pDevice = vn_device_to_handle(dev);
367 vn_DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator)
369 struct vn_device *dev = vn_device_from_handle(device);
370 const VkAllocationCallbacks *alloc =
371 pAllocator ? pAllocator : &dev->base.base.alloc;
376 vn_buffer_cache_fini(dev);
378 for (uint32_t i = 0; i < ARRAY_SIZE(dev->memory_pools); i++)
379 vn_device_memory_pool_fini(dev, i);
381 for (uint32_t i = 0; i < dev->queue_count; i++)
382 vn_queue_fini(&dev->queues[i]);
384 /* We must emit vkDestroyDevice before freeing dev->queues. Otherwise,
385 * another thread might reuse their object ids while they still refer to
386 * the queues in the renderer.
388 vn_async_vkDestroyDevice(dev->instance, device, NULL);
390 vk_free(alloc, dev->queues);
392 vn_device_base_fini(&dev->base);
397 vn_GetDeviceProcAddr(VkDevice device, const char *pName)
399 struct vn_device *dev = vn_device_from_handle(device);
400 return vk_device_get_proc_addr(&dev->base.base, pName);
404 vn_GetDeviceGroupPeerMemoryFeatures(
407 uint32_t localDeviceIndex,
408 uint32_t remoteDeviceIndex,
409 VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
411 struct vn_device *dev = vn_device_from_handle(device);
413 /* TODO get and cache the values in vkCreateDevice */
414 vn_call_vkGetDeviceGroupPeerMemoryFeatures(
415 dev->instance, device, heapIndex, localDeviceIndex, remoteDeviceIndex,
416 pPeerMemoryFeatures);
420 vn_DeviceWaitIdle(VkDevice device)
423 struct vn_device *dev = vn_device_from_handle(device);
425 for (uint32_t i = 0; i < dev->queue_count; i++) {
426 struct vn_queue *queue = &dev->queues[i];
427 VkResult result = vn_QueueWaitIdle(vn_queue_to_handle(queue));
428 if (result != VK_SUCCESS)
429 return vn_error(dev->instance, result);
436 vn_GetCalibratedTimestampsEXT(
438 uint32_t timestampCount,
439 const VkCalibratedTimestampInfoEXT *pTimestampInfos,
440 uint64_t *pTimestamps,
441 uint64_t *pMaxDeviation)
443 struct vn_device *dev = vn_device_from_handle(device);
445 return vn_call_vkGetCalibratedTimestampsEXT(
446 dev->instance, device, timestampCount, pTimestampInfos, pTimestamps,