venus: fix a missing mtx_destroy in vn_device_init
[platform/upstream/mesa.git] / src / virtio / vulkan / vn_device.c
1 /*
2  * Copyright 2019 Google LLC
3  * SPDX-License-Identifier: MIT
4  *
5  * based in part on anv and radv which are:
6  * Copyright © 2015 Intel Corporation
7  * Copyright © 2016 Red Hat.
8  * Copyright © 2016 Bas Nieuwenhuizen
9  */
10
11 #include "vn_device.h"
12
13 #include "venus-protocol/vn_protocol_driver_device.h"
14
15 #include "vn_android.h"
16 #include "vn_instance.h"
17 #include "vn_physical_device.h"
18 #include "vn_queue.h"
19
20 /* device commands */
21
22 static void
23 vn_queue_fini(struct vn_queue *queue)
24 {
25    if (queue->wait_fence != VK_NULL_HANDLE) {
26       vn_DestroyFence(vn_device_to_handle(queue->device), queue->wait_fence,
27                       NULL);
28    }
29    vn_object_base_fini(&queue->base);
30 }
31
32 static VkResult
33 vn_queue_init(struct vn_device *dev,
34               struct vn_queue *queue,
35               const VkDeviceQueueCreateInfo *queue_info,
36               uint32_t queue_index)
37 {
38    vn_object_base_init(&queue->base, VK_OBJECT_TYPE_QUEUE, &dev->base);
39
40    VkQueue queue_handle = vn_queue_to_handle(queue);
41    vn_async_vkGetDeviceQueue2(
42       dev->instance, vn_device_to_handle(dev),
43       &(VkDeviceQueueInfo2){
44          .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
45          .flags = queue_info->flags,
46          .queueFamilyIndex = queue_info->queueFamilyIndex,
47          .queueIndex = queue_index,
48       },
49       &queue_handle);
50
51    queue->device = dev;
52    queue->family = queue_info->queueFamilyIndex;
53    queue->index = queue_index;
54    queue->flags = queue_info->flags;
55
56    const VkExportFenceCreateInfo export_fence_info = {
57       .sType = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO,
58       .pNext = NULL,
59       .handleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT,
60    };
61    const VkFenceCreateInfo fence_info = {
62       .sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
63       .pNext = dev->instance->experimental.globalFencing == VK_TRUE
64                   ? &export_fence_info
65                   : NULL,
66       .flags = 0,
67    };
68    VkResult result = vn_CreateFence(vn_device_to_handle(dev), &fence_info,
69                                     NULL, &queue->wait_fence);
70    if (result != VK_SUCCESS)
71       return result;
72
73    return VK_SUCCESS;
74 }
75
76 static VkResult
77 vn_device_init_queues(struct vn_device *dev,
78                       const VkDeviceCreateInfo *create_info)
79 {
80    const VkAllocationCallbacks *alloc = &dev->base.base.alloc;
81
82    uint32_t count = 0;
83    for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++)
84       count += create_info->pQueueCreateInfos[i].queueCount;
85
86    struct vn_queue *queues =
87       vk_zalloc(alloc, sizeof(*queues) * count, VN_DEFAULT_ALIGN,
88                 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
89    if (!queues)
90       return VK_ERROR_OUT_OF_HOST_MEMORY;
91
92    count = 0;
93    for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
94       VkResult result;
95
96       const VkDeviceQueueCreateInfo *queue_info =
97          &create_info->pQueueCreateInfos[i];
98       for (uint32_t j = 0; j < queue_info->queueCount; j++) {
99          result = vn_queue_init(dev, &queues[count], queue_info, j);
100          if (result != VK_SUCCESS) {
101             for (uint32_t k = 0; k < count; k++)
102                vn_queue_fini(&queues[k]);
103             vk_free(alloc, queues);
104
105             return result;
106          }
107
108          count++;
109       }
110    }
111
112    dev->queues = queues;
113    dev->queue_count = count;
114
115    return VK_SUCCESS;
116 }
117
118 static bool
119 find_extension_names(const char *const *exts,
120                      uint32_t ext_count,
121                      const char *name)
122 {
123    for (uint32_t i = 0; i < ext_count; i++) {
124       if (!strcmp(exts[i], name))
125          return true;
126    }
127    return false;
128 }
129
130 static bool
131 merge_extension_names(const char *const *exts,
132                       uint32_t ext_count,
133                       const char *const *extra_exts,
134                       uint32_t extra_count,
135                       const char *const *block_exts,
136                       uint32_t block_count,
137                       const VkAllocationCallbacks *alloc,
138                       const char *const **out_exts,
139                       uint32_t *out_count)
140 {
141    const char **merged =
142       vk_alloc(alloc, sizeof(*merged) * (ext_count + extra_count),
143                VN_DEFAULT_ALIGN, VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
144    if (!merged)
145       return false;
146
147    uint32_t count = 0;
148    for (uint32_t i = 0; i < ext_count; i++) {
149       if (!find_extension_names(block_exts, block_count, exts[i]))
150          merged[count++] = exts[i];
151    }
152    for (uint32_t i = 0; i < extra_count; i++) {
153       if (!find_extension_names(exts, ext_count, extra_exts[i]))
154          merged[count++] = extra_exts[i];
155    }
156
157    *out_exts = merged;
158    *out_count = count;
159    return true;
160 }
161
162 static const VkDeviceCreateInfo *
163 vn_device_fix_create_info(const struct vn_device *dev,
164                           const VkDeviceCreateInfo *dev_info,
165                           const VkAllocationCallbacks *alloc,
166                           VkDeviceCreateInfo *local_info)
167 {
168    const struct vn_physical_device *physical_dev = dev->physical_device;
169    const struct vk_device_extension_table *app_exts =
170       &dev->base.base.enabled_extensions;
171    /* extra_exts and block_exts must not overlap */
172    const char *extra_exts[16];
173    const char *block_exts[16];
174    uint32_t extra_count = 0;
175    uint32_t block_count = 0;
176
177    /* fix for WSI (treat AHB as WSI extension for simplicity) */
178    const bool has_wsi =
179       app_exts->KHR_swapchain || app_exts->ANDROID_native_buffer ||
180       app_exts->ANDROID_external_memory_android_hardware_buffer;
181    if (has_wsi) {
182       /* KHR_swapchain may be advertised without the renderer support for
183        * EXT_image_drm_format_modifier
184        */
185       if (!app_exts->EXT_image_drm_format_modifier &&
186           physical_dev->renderer_extensions.EXT_image_drm_format_modifier) {
187          extra_exts[extra_count++] =
188             VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME;
189
190          if (physical_dev->renderer_version < VK_API_VERSION_1_2 &&
191              !app_exts->KHR_image_format_list) {
192             extra_exts[extra_count++] =
193                VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME;
194          }
195       }
196
197       /* XXX KHR_swapchain may be advertised without the renderer support for
198        * EXT_queue_family_foreign
199        */
200       if (!app_exts->EXT_queue_family_foreign &&
201           physical_dev->renderer_extensions.EXT_queue_family_foreign) {
202          extra_exts[extra_count++] =
203             VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME;
204       }
205
206       if (app_exts->KHR_swapchain) {
207          /* see vn_physical_device_get_native_extensions */
208          block_exts[block_count++] = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
209          block_exts[block_count++] =
210             VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_EXTENSION_NAME;
211          block_exts[block_count++] =
212             VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME;
213       }
214
215       if (app_exts->ANDROID_native_buffer)
216          block_exts[block_count++] = VK_ANDROID_NATIVE_BUFFER_EXTENSION_NAME;
217
218       if (app_exts->ANDROID_external_memory_android_hardware_buffer) {
219          block_exts[block_count++] =
220             VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME;
221       }
222    }
223
224    if (app_exts->KHR_external_memory_fd ||
225        app_exts->EXT_external_memory_dma_buf || has_wsi) {
226       switch (physical_dev->external_memory.renderer_handle_type) {
227       case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
228          if (!app_exts->EXT_external_memory_dma_buf) {
229             extra_exts[extra_count++] =
230                VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME;
231          }
232          FALLTHROUGH;
233       case VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT:
234          if (!app_exts->KHR_external_memory_fd) {
235             extra_exts[extra_count++] =
236                VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME;
237          }
238          break;
239       default:
240          /* TODO other handle types */
241          break;
242       }
243    }
244
245    assert(extra_count <= ARRAY_SIZE(extra_exts));
246    assert(block_count <= ARRAY_SIZE(block_exts));
247
248    if (!extra_count && (!block_count || !dev_info->enabledExtensionCount))
249       return dev_info;
250
251    *local_info = *dev_info;
252    if (!merge_extension_names(dev_info->ppEnabledExtensionNames,
253                               dev_info->enabledExtensionCount, extra_exts,
254                               extra_count, block_exts, block_count, alloc,
255                               &local_info->ppEnabledExtensionNames,
256                               &local_info->enabledExtensionCount))
257       return NULL;
258
259    return local_info;
260 }
261
262 static VkResult
263 vn_device_init(struct vn_device *dev,
264                struct vn_physical_device *physical_dev,
265                const VkDeviceCreateInfo *create_info,
266                const VkAllocationCallbacks *alloc)
267 {
268    struct vn_instance *instance = physical_dev->instance;
269    VkPhysicalDevice physical_dev_handle =
270       vn_physical_device_to_handle(physical_dev);
271    VkDevice dev_handle = vn_device_to_handle(dev);
272    VkDeviceCreateInfo local_create_info;
273    VkResult result;
274
275    dev->instance = instance;
276    dev->physical_device = physical_dev;
277    dev->renderer = instance->renderer;
278
279    create_info =
280       vn_device_fix_create_info(dev, create_info, alloc, &local_create_info);
281    if (!create_info)
282       return VK_ERROR_OUT_OF_HOST_MEMORY;
283
284    result = vn_call_vkCreateDevice(instance, physical_dev_handle, create_info,
285                                    NULL, &dev_handle);
286
287    /* free the fixed extensions here since no longer needed below */
288    if (create_info == &local_create_info)
289       vk_free(alloc, (void *)create_info->ppEnabledExtensionNames);
290
291    if (result != VK_SUCCESS)
292       return result;
293
294    result = vn_device_init_queues(dev, create_info);
295    if (result != VK_SUCCESS)
296       goto out_destroy_device;
297
298    for (uint32_t i = 0; i < ARRAY_SIZE(dev->memory_pools); i++) {
299       struct vn_device_memory_pool *pool = &dev->memory_pools[i];
300       mtx_init(&pool->mutex, mtx_plain);
301    }
302
303    result = vn_buffer_cache_init(dev);
304    if (result != VK_SUCCESS)
305       goto out_memory_pool_fini;
306
307    return VK_SUCCESS;
308
309 out_memory_pool_fini:
310    for (uint32_t i = 0; i < ARRAY_SIZE(dev->memory_pools); i++)
311       vn_device_memory_pool_fini(dev, i);
312
313    for (uint32_t i = 0; i < dev->queue_count; i++)
314       vn_queue_fini(&dev->queues[i]);
315    vk_free(alloc, dev->queues);
316
317 out_destroy_device:
318    vn_call_vkDestroyDevice(instance, dev_handle, NULL);
319
320    return result;
321 }
322
323 VkResult
324 vn_CreateDevice(VkPhysicalDevice physicalDevice,
325                 const VkDeviceCreateInfo *pCreateInfo,
326                 const VkAllocationCallbacks *pAllocator,
327                 VkDevice *pDevice)
328 {
329    struct vn_physical_device *physical_dev =
330       vn_physical_device_from_handle(physicalDevice);
331    struct vn_instance *instance = physical_dev->instance;
332    const VkAllocationCallbacks *alloc =
333       pAllocator ? pAllocator : &instance->base.base.alloc;
334    struct vn_device *dev;
335    VkResult result;
336
337    dev = vk_zalloc(alloc, sizeof(*dev), VN_DEFAULT_ALIGN,
338                    VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
339    if (!dev)
340       return vn_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
341
342    struct vk_device_dispatch_table dispatch_table;
343    vk_device_dispatch_table_from_entrypoints(&dispatch_table,
344                                              &vn_device_entrypoints, true);
345    vk_device_dispatch_table_from_entrypoints(&dispatch_table,
346                                              &wsi_device_entrypoints, false);
347    result = vn_device_base_init(&dev->base, &physical_dev->base,
348                                 &dispatch_table, pCreateInfo, alloc);
349    if (result != VK_SUCCESS) {
350       vk_free(alloc, dev);
351       return vn_error(instance, result);
352    }
353
354    result = vn_device_init(dev, physical_dev, pCreateInfo, alloc);
355    if (result != VK_SUCCESS) {
356       vn_device_base_fini(&dev->base);
357       vk_free(alloc, dev);
358       return vn_error(instance, result);
359    }
360
361    *pDevice = vn_device_to_handle(dev);
362
363    return VK_SUCCESS;
364 }
365
366 void
367 vn_DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator)
368 {
369    struct vn_device *dev = vn_device_from_handle(device);
370    const VkAllocationCallbacks *alloc =
371       pAllocator ? pAllocator : &dev->base.base.alloc;
372
373    if (!dev)
374       return;
375
376    vn_buffer_cache_fini(dev);
377
378    for (uint32_t i = 0; i < ARRAY_SIZE(dev->memory_pools); i++)
379       vn_device_memory_pool_fini(dev, i);
380
381    for (uint32_t i = 0; i < dev->queue_count; i++)
382       vn_queue_fini(&dev->queues[i]);
383
384    /* We must emit vkDestroyDevice before freeing dev->queues.  Otherwise,
385     * another thread might reuse their object ids while they still refer to
386     * the queues in the renderer.
387     */
388    vn_async_vkDestroyDevice(dev->instance, device, NULL);
389
390    vk_free(alloc, dev->queues);
391
392    vn_device_base_fini(&dev->base);
393    vk_free(alloc, dev);
394 }
395
396 PFN_vkVoidFunction
397 vn_GetDeviceProcAddr(VkDevice device, const char *pName)
398 {
399    struct vn_device *dev = vn_device_from_handle(device);
400    return vk_device_get_proc_addr(&dev->base.base, pName);
401 }
402
403 void
404 vn_GetDeviceGroupPeerMemoryFeatures(
405    VkDevice device,
406    uint32_t heapIndex,
407    uint32_t localDeviceIndex,
408    uint32_t remoteDeviceIndex,
409    VkPeerMemoryFeatureFlags *pPeerMemoryFeatures)
410 {
411    struct vn_device *dev = vn_device_from_handle(device);
412
413    /* TODO get and cache the values in vkCreateDevice */
414    vn_call_vkGetDeviceGroupPeerMemoryFeatures(
415       dev->instance, device, heapIndex, localDeviceIndex, remoteDeviceIndex,
416       pPeerMemoryFeatures);
417 }
418
419 VkResult
420 vn_DeviceWaitIdle(VkDevice device)
421 {
422    VN_TRACE_FUNC();
423    struct vn_device *dev = vn_device_from_handle(device);
424
425    for (uint32_t i = 0; i < dev->queue_count; i++) {
426       struct vn_queue *queue = &dev->queues[i];
427       VkResult result = vn_QueueWaitIdle(vn_queue_to_handle(queue));
428       if (result != VK_SUCCESS)
429          return vn_error(dev->instance, result);
430    }
431
432    return VK_SUCCESS;
433 }
434
435 VkResult
436 vn_GetCalibratedTimestampsEXT(
437    VkDevice device,
438    uint32_t timestampCount,
439    const VkCalibratedTimestampInfoEXT *pTimestampInfos,
440    uint64_t *pTimestamps,
441    uint64_t *pMaxDeviation)
442 {
443    struct vn_device *dev = vn_device_from_handle(device);
444
445    return vn_call_vkGetCalibratedTimestampsEXT(
446       dev->instance, device, timestampCount, pTimestampInfos, pTimestamps,
447       pMaxDeviation);
448 }