return sorted_bindings;
}
+static bool radv_mutable_descriptor_type_size_alignment(const VkMutableDescriptorTypeListVALVE *list,
+ uint64_t *out_size, uint64_t *out_align)
+{
+ uint32_t max_size = 0;
+ uint32_t max_align = 0;
+
+ for (uint32_t i = 0; i < list->descriptorTypeCount; i++) {
+ uint32_t size = 0;
+ uint32_t align = 0;
+
+ switch (list->pDescriptorTypes[i]) {
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+ size = 16;
+ align = 16;
+ break;
+ case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ size = 64;
+ align = 32;
+ break;
+ default:
+ return false;
+ }
+
+ max_size = MAX2(max_size, size);
+ max_align = MAX2(max_align, align);
+ }
+
+ *out_size = max_size;
+ *out_align = max_align;
+ return true;
+}
+
VkResult radv_CreateDescriptorSetLayout(
VkDevice _device,
const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
const VkDescriptorSetLayoutBindingFlagsCreateInfo *variable_flags =
vk_find_struct_const(pCreateInfo->pNext, DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO);
+ const VkMutableDescriptorTypeCreateInfoVALVE *mutable_info =
+ vk_find_struct_const(pCreateInfo->pNext, MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE);
uint32_t num_bindings = 0;
uint32_t immutable_sampler_count = 0;
set_layout->binding[b].size = 16;
alignment = 16;
break;
+ case VK_DESCRIPTOR_TYPE_MUTABLE_VALVE: {
+ uint64_t mutable_size = 0, mutable_align = 0;
+ radv_mutable_descriptor_type_size_alignment(&mutable_info->pMutableDescriptorTypeLists[j],
+ &mutable_size, &mutable_align);
+ assert(mutable_size && mutable_align);
+ set_layout->binding[b].size = mutable_size;
+ alignment = mutable_align;
+ break;
+ }
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
alignment = 16;
set_layout->binding[b].size = descriptor_count;
vk_find_struct_const(pCreateInfo->pNext, DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO);
VkDescriptorSetVariableDescriptorCountLayoutSupport *variable_count =
vk_find_struct((void*)pCreateInfo->pNext, DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT);
+ const VkMutableDescriptorTypeCreateInfoVALVE *mutable_info =
+ vk_find_struct_const(pCreateInfo->pNext, MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE);
if (variable_count) {
variable_count->maxVariableDescriptorCount = 0;
}
descriptor_size = descriptor_count;
descriptor_count = 1;
break;
+ case VK_DESCRIPTOR_TYPE_MUTABLE_VALVE:
+ if (!radv_mutable_descriptor_type_size_alignment(&mutable_info->pMutableDescriptorTypeLists[i],
+ &descriptor_size, &descriptor_alignment)) {
+ supported = false;
+ }
+ break;
default:
break;
}
if (pool->current_offset + layout_size <= pool->size) {
set->bo = pool->bo;
set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + pool->current_offset);
- set->va = radv_buffer_get_va(set->bo) + pool->current_offset;
+ set->va = set->bo ? (radv_buffer_get_va(set->bo) + pool->current_offset) : 0;
if (!pool->host_memory_base) {
pool->entries[pool->entry_count].offset = pool->current_offset;
pool->entries[pool->entry_count].size = layout_size;
}
set->bo = pool->bo;
set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + offset);
- set->va = radv_buffer_get_va(set->bo) + offset;
+ set->va = set->bo ? (radv_buffer_get_va(set->bo) + offset) : 0;
memmove(&pool->entries[index + 1], &pool->entries[index],
sizeof(pool->entries[0]) * (pool->entry_count - index));
pool->entries[index].offset = offset;
if (pool->bo)
device->ws->buffer_destroy(pool->bo);
+ if (pool->host_bo)
+ vk_free2(&device->vk.alloc, pAllocator, pool->host_bo);
vk_object_base_finish(&pool->base);
vk_free2(&device->vk.alloc, pAllocator, pool);
uint64_t size = sizeof(struct radv_descriptor_pool);
uint64_t bo_size = 0, bo_count = 0, range_count = 0;
+ const VkMutableDescriptorTypeCreateInfoVALVE *mutable_info =
+ vk_find_struct_const(pCreateInfo->pNext, MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE);
+
vk_foreach_struct(ext, pCreateInfo->pNext) {
switch (ext->sType) {
case VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT: {
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
bo_size += 64 * pCreateInfo->pPoolSizes[i].descriptorCount;
break;
+ case VK_DESCRIPTOR_TYPE_MUTABLE_VALVE:
+ /* Per spec, if a mutable descriptor type list is provided for the pool entry, we
+ * allocate enough memory to hold any subset of that list.
+ * If there is no mutable descriptor type list available,
+ * we must allocate enough for any supported mutable descriptor type, i.e. 64 bytes. */
+ if (mutable_info && i < mutable_info->mutableDescriptorTypeListCount) {
+ uint64_t mutable_size, mutable_alignment;
+ if (radv_mutable_descriptor_type_size_alignment(&mutable_info->pMutableDescriptorTypeLists[i],
+ &mutable_size, &mutable_alignment)) {
+ bo_size += mutable_size * pCreateInfo->pPoolSizes[i].descriptorCount;
+ }
+ } else {
+ bo_size += 64 * pCreateInfo->pPoolSizes[i].descriptorCount;
+ }
+ break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
bo_size += 96 * pCreateInfo->pPoolSizes[i].descriptorCount;
break;
}
if (bo_size) {
- pool->bo = device->ws->buffer_create(device->ws, bo_size, 32,
- RADEON_DOMAIN_VRAM,
- RADEON_FLAG_NO_INTERPROCESS_SHARING |
- RADEON_FLAG_READ_ONLY |
- RADEON_FLAG_32BIT,
- RADV_BO_PRIORITY_DESCRIPTOR);
- if (!pool->bo) {
- radv_destroy_descriptor_pool(device, pAllocator, pool);
- return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
- }
- pool->mapped_ptr = (uint8_t*)device->ws->buffer_map(pool->bo);
- if (!pool->mapped_ptr) {
- radv_destroy_descriptor_pool(device, pAllocator, pool);
- return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_VALVE)) {
+ pool->bo = device->ws->buffer_create(device->ws, bo_size, 32,
+ RADEON_DOMAIN_VRAM,
+ RADEON_FLAG_NO_INTERPROCESS_SHARING |
+ RADEON_FLAG_READ_ONLY |
+ RADEON_FLAG_32BIT,
+ RADV_BO_PRIORITY_DESCRIPTOR);
+ if (!pool->bo) {
+ radv_destroy_descriptor_pool(device, pAllocator, pool);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ }
+ pool->mapped_ptr = (uint8_t*)device->ws->buffer_map(pool->bo);
+ if (!pool->mapped_ptr) {
+ radv_destroy_descriptor_pool(device, pAllocator, pool);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ }
+ } else {
+ pool->host_bo = vk_alloc2(&device->vk.alloc, pAllocator, bo_size, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!pool->host_bo) {
+ radv_destroy_descriptor_pool(device, pAllocator, pool);
+ return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+ pool->mapped_ptr = pool->host_bo;
}
}
pool->size = bo_size;
dst_buffer_list += dst_binding_layout->buffer_offset;
dst_buffer_list += copyset->dstArrayElement;
+ /* In case of copies between mutable descriptor types
+ * and non-mutable descriptor types. */
+ size_t copy_size = MIN2(src_binding_layout->size, dst_binding_layout->size);
+
for (j = 0; j < copyset->descriptorCount; ++j) {
switch (src_binding_layout->type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
break;
}
default:
- memcpy(dst_ptr, src_ptr, src_binding_layout->size);
+ memcpy(dst_ptr, src_ptr, copy_size);
}
src_ptr += src_binding_layout->size / 4;
dst_ptr += dst_binding_layout->size / 4;