uint32_t write_count,
const VkWriteDescriptorSet *writes)
{
- assert(layout->descriptor_buffer_size < sizeof(push_set->data));
+ assert(layout->non_variable_descriptor_buffer_size < sizeof(push_set->data));
struct nvk_descriptor_set set = {
.layout = layout,
.bo_size = sizeof(push_set->data),
nvk_descriptor_set_create(struct nvk_device *device,
struct nvk_descriptor_pool *pool,
struct nvk_descriptor_set_layout *layout,
- const uint32_t *variable_count,
+ uint32_t variable_count,
struct nvk_descriptor_set **out_set)
{
struct nvk_descriptor_set *set;
if (pool->entry_count == pool->max_entry_count)
return VK_ERROR_OUT_OF_POOL_MEMORY;
- set->bo_size = layout->descriptor_buffer_size;
- if (layout->descriptor_buffer_size > 0) {
+ set->bo_size = layout->non_variable_descriptor_buffer_size;
+
+ if (layout->binding_count > 0 &&
+ (layout->binding[layout->binding_count - 1].flags &
+ VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT)) {
+ uint32_t stride = layout->binding[layout->binding_count-1].stride;
+ set->bo_size += stride * variable_count;
+ }
+
+ if (set->bo_size > 0) {
if (pool->current_offset + set->bo_size > pool->size)
return VK_ERROR_OUT_OF_POOL_MEMORY;
if (layout->binding[b].immutable_samplers == NULL)
continue;
- for (uint32_t j = 0; j < layout->binding[b].array_size; j++)
+ uint32_t array_size = layout->binding[b].array_size;
+ if (layout->binding[b].flags &
+ VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT)
+ array_size = variable_count;
+
+ for (uint32_t j = 0; j < array_size; j++)
write_image_view_desc(set, NULL, b, j, layout->binding[b].type);
}
struct nvk_descriptor_set *set = NULL;
+ const VkDescriptorSetVariableDescriptorCountAllocateInfo *var_desc_count =
+ vk_find_struct_const(pAllocateInfo->pNext,
+ DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO);
+
/* allocate a set of buffers for each shader to contain descriptors */
for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
VK_FROM_HANDLE(nvk_descriptor_set_layout, layout,
pAllocateInfo->pSetLayouts[i]);
- const uint32_t *variable_count = NULL;
+ /* If descriptorSetCount is zero or this structure is not included in
+ * the pNext chain, then the variable lengths are considered to be zero.
+ */
+ const uint32_t variable_count =
+ var_desc_count && var_desc_count->descriptorSetCount > 0 ?
+ var_desc_count->pDescriptorCounts[i] : 0;
+
result = nvk_descriptor_set_create(device, pool, layout,
variable_count, &set);
if (result != VK_SUCCESS)
if (stride > 0) {
assert(stride <= UINT8_MAX);
assert(util_is_power_of_two_nonzero(align));
+
buffer_size = ALIGN_POT(buffer_size, align);
layout->binding[b].offset = buffer_size;
layout->binding[b].stride = stride;
- buffer_size += stride * binding->descriptorCount;
+
+ if (layout->binding[b].flags &
+ VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT) {
+ /* From the Vulkan 1.3.256 spec:
+ *
+ * VUID-VkDescriptorSetLayoutBindingFlagsCreateInfo-pBindingFlags-03004
+ * "If an element of pBindingFlags includes
+ * VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT, then
+ * all other elements of
+ * VkDescriptorSetLayoutCreateInfo::pBindings must have a
+ * smaller value of binding"
+ *
+ * In other words, it has to be the last binding.
+ */
+ assert(b == num_bindings - 1);
+ } else {
+ /* the allocation size will be computed at descriptor allocation,
+ * but the buffer size will be already aligned as this binding will
+ * be the last
+ */
+ buffer_size += stride * binding->descriptorCount;
+ }
}
}
- layout->descriptor_buffer_size = buffer_size;
+ layout->non_variable_descriptor_buffer_size = buffer_size;
layout->dynamic_buffer_count = dynamic_buffer_count;
struct mesa_sha1 sha1_ctx;
_mesa_sha1_init(&sha1_ctx);
#define SHA1_UPDATE_VALUE(x) _mesa_sha1_update(&sha1_ctx, &(x), sizeof(x));
- SHA1_UPDATE_VALUE(layout->descriptor_buffer_size);
+ SHA1_UPDATE_VALUE(layout->non_variable_descriptor_buffer_size);
SHA1_UPDATE_VALUE(layout->dynamic_buffer_count);
SHA1_UPDATE_VALUE(layout->binding_count);