From 3033681f93bc023fa8e61c0c1b766411fb9ccdef Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 30 May 2022 15:32:43 +1000 Subject: [PATCH] nvk: some boilerplate for descriptor sets Part-of: --- src/nouveau/vulkan/nvk_descriptor_set.c | 94 +++++++++++++++++++++++++++++++++ src/nouveau/vulkan/nvk_descriptor_set.h | 7 +++ 2 files changed, 101 insertions(+) diff --git a/src/nouveau/vulkan/nvk_descriptor_set.c b/src/nouveau/vulkan/nvk_descriptor_set.c index 9bc1140..f677162 100644 --- a/src/nouveau/vulkan/nvk_descriptor_set.c +++ b/src/nouveau/vulkan/nvk_descriptor_set.c @@ -166,9 +166,20 @@ VKAPI_ATTR void VKAPI_CALL nvk_UpdateDescriptorSets( } static void +nvk_descriptor_set_destroy(struct nvk_device *device, struct nvk_descriptor_pool *pool, + struct nvk_descriptor_set *set, bool free_bo) +{ + vk_object_base_finish(&set->base); + vk_free2(&device->vk.alloc, NULL, set); +} + +static void nvk_destroy_descriptor_pool(struct nvk_device *device, const VkAllocationCallbacks *pAllocator, struct nvk_descriptor_pool *pool) { + for (int i = 0; i < pool->entry_count; ++i) { + nvk_descriptor_set_destroy(device, pool, pool->entries[i].set, false); + } vk_object_base_finish(&pool->base); vk_free2(&device->vk.alloc, pAllocator, pool); } @@ -187,10 +198,78 @@ nvk_CreateDescriptorPool(VkDevice _device, if (!pool) return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY); vk_object_base_init(&device->vk, &pool->base, VK_OBJECT_TYPE_DESCRIPTOR_POOL); + + pool->max_entry_count = pCreateInfo->maxSets; + *pDescriptorPool = nvk_descriptor_pool_to_handle(pool); return VK_SUCCESS; } +static VkResult +nvk_descriptor_set_create(struct nvk_device *device, struct nvk_descriptor_pool *pool, + struct nvk_descriptor_set_layout *layout, const uint32_t *variable_count, + struct nvk_descriptor_set **out_set) +{ + struct nvk_descriptor_set *set; + uint32_t mem_size = sizeof(struct nvk_descriptor_set); + set = vk_zalloc2(&device->vk.alloc, NULL, mem_size, 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + if (!set) + return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY); + + vk_object_base_init(&device->vk, &set->base, VK_OBJECT_TYPE_DESCRIPTOR_SET); + + *out_set = set; + return VK_SUCCESS; +} + +VKAPI_ATTR VkResult VKAPI_CALL +nvk_AllocateDescriptorSets(VkDevice _device, + const VkDescriptorSetAllocateInfo *pAllocateInfo, + VkDescriptorSet *pDescriptorSets) +{ + VK_FROM_HANDLE(nvk_device, device, _device); + VK_FROM_HANDLE(nvk_descriptor_pool, pool, pAllocateInfo->descriptorPool); + + VkResult result = VK_SUCCESS; + uint32_t i; + + struct nvk_descriptor_set *set = NULL; + + /* allocate a set of buffers for each shader to contain descriptors */ + for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) { + VK_FROM_HANDLE(nvk_descriptor_set_layout, layout, pAllocateInfo->pSetLayouts[i]); + const uint32_t *variable_count = NULL; + result = nvk_descriptor_set_create(device, pool, layout, variable_count, &set); + if (result != VK_SUCCESS) + break; + + pDescriptorSets[i] = nvk_descriptor_set_to_handle(set); + } + + if (result != VK_SUCCESS) { + nvk_FreeDescriptorSets(_device, pAllocateInfo->descriptorPool, i, pDescriptorSets); + for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) { + pDescriptorSets[i] = VK_NULL_HANDLE; + } + } + return result; +} + +VKAPI_ATTR VkResult VKAPI_CALL +nvk_FreeDescriptorSets(VkDevice _device, VkDescriptorPool descriptorPool, uint32_t count, + const VkDescriptorSet *pDescriptorSets) +{ + VK_FROM_HANDLE(nvk_device, device, _device); + VK_FROM_HANDLE(nvk_descriptor_pool, pool, descriptorPool); + + for (uint32_t i = 0; i < count; i++) { + VK_FROM_HANDLE(nvk_descriptor_set, set, pDescriptorSets[i]); + + if (set) + nvk_descriptor_set_destroy(device, pool, set, true); + } + return VK_SUCCESS; +} VKAPI_ATTR void VKAPI_CALL nvk_DestroyDescriptorPool(VkDevice _device, VkDescriptorPool _pool, @@ -201,3 +280,18 @@ nvk_DestroyDescriptorPool(VkDevice _device, VkDescriptorPool _pool, nvk_destroy_descriptor_pool(device, pAllocator, pool); } + +VKAPI_ATTR VkResult VKAPI_CALL +nvk_ResetDescriptorPool(VkDevice _device, VkDescriptorPool descriptorPool, + VkDescriptorPoolResetFlags flags) +{ + VK_FROM_HANDLE(nvk_device, device, _device); + VK_FROM_HANDLE(nvk_descriptor_pool, pool, descriptorPool); + + for (int i = 0; i < pool->entry_count; ++i) { + nvk_descriptor_set_destroy(device, pool, pool->entries[i].set, false); + } + pool->entry_count = 0; + + return VK_SUCCESS; +} diff --git a/src/nouveau/vulkan/nvk_descriptor_set.h b/src/nouveau/vulkan/nvk_descriptor_set.h index f895ee1..d424252 100644 --- a/src/nouveau/vulkan/nvk_descriptor_set.h +++ b/src/nouveau/vulkan/nvk_descriptor_set.h @@ -27,8 +27,15 @@ struct nvk_descriptor_set { void *map; }; +struct nvk_descriptor_pool_entry { + struct nvk_descriptor_set *set; +}; + struct nvk_descriptor_pool { struct vk_object_base base; + uint32_t entry_count; + uint32_t max_entry_count; + struct nvk_descriptor_pool_entry entries[0]; }; VK_DEFINE_NONDISP_HANDLE_CASTS(nvk_descriptor_pool, base, VkDescriptorPool, -- 2.7.4