VK_KHR_get_physical_device_properties2 DONE (anv, lvp, radv, tu, v3dv, vn)
VK_KHR_maintenance1 DONE (anv, lvp, radv, tu, v3dv, vn)
VK_KHR_maintenance2 DONE (anv, lvp, radv, tu, v3dv, vn)
- VK_KHR_maintenance3 DONE (anv, lvp, radv, tu, vn)
+ VK_KHR_maintenance3 DONE (anv, lvp, radv, tu, v3dv, vn)
VK_KHR_multiview DONE (anv, lvp, radv, tu, vn)
VK_KHR_relaxed_block_layout DONE (anv, lvp, radv, tu, vn)
VK_KHR_sampler_ycbcr_conversion DONE (anv, radv, tu, vn)
}
}
+uint32_t
+v3dv_max_descriptor_bo_size()
+{
+ return MAX3(sizeof(struct v3dv_sampler_descriptor),
+ sizeof(struct v3dv_combined_image_sampler_descriptor),
+ sizeof(struct v3dv_sampled_image_descriptor));
+}
+
/*
* For a given descriptor defined by the descriptor_set it belongs, its
* binding layout, and array_index, it returns the map region assigned to it
}
}
}
+
+void
+v3dv_GetDescriptorSetLayoutSupport(
+ VkDevice device,
+ const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
+ VkDescriptorSetLayoutSupport *pSupport)
+{
+ VkDescriptorSetLayoutBinding *bindings = NULL;
+ VkResult result = vk_create_sorted_bindings(
+ pCreateInfo->pBindings, pCreateInfo->bindingCount, &bindings);
+ if (result != VK_SUCCESS) {
+ pSupport->supported = false;
+ return;
+ }
+
+ bool supported = true;
+
+ uint32_t desc_host_size = sizeof(struct v3dv_descriptor);
+ uint32_t host_size = sizeof(struct v3dv_descriptor_set);
+ uint32_t bo_size = 0;
+ for (uint32_t i = 0; i < pCreateInfo->bindingCount; i++) {
+ const VkDescriptorSetLayoutBinding *binding = bindings + i;
+
+ if ((UINT32_MAX - host_size) / desc_host_size < binding->descriptorCount) {
+ supported = false;
+ break;
+ }
+
+ uint32_t desc_bo_size = descriptor_bo_size(binding->descriptorType);
+ if (desc_bo_size > 0 &&
+ (UINT32_MAX - bo_size) / desc_bo_size < binding->descriptorCount) {
+ supported = false;
+ break;
+ }
+
+ host_size += binding->descriptorCount * desc_host_size;
+ bo_size += binding->descriptorCount * desc_bo_size;
+ }
+
+ free(bindings);
+
+ pSupport->supported = supported;
+}
.KHR_external_memory_fd = true,
.KHR_maintenance1 = true,
.KHR_maintenance2 = true,
+ .KHR_maintenance3 = true,
#ifdef V3DV_HAS_SURFACE
.KHR_swapchain = true,
#endif
VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES;
break;
}
+ case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
+ VkPhysicalDeviceMaintenance3Properties *props =
+ (VkPhysicalDeviceMaintenance3Properties *)ext;
+ /* We don't really have special restrictions for the maximum
+ * descriptors per set, other than maybe not exceeding the limits
+ * of addressable memory in a single allocation on either the host
+ * or the GPU. This will be a much larger limit than any of the
+ * per-stage limits already available in Vulkan though, so in practice,
+ * it is not expected to limit anything beyond what is already
+ * constrained through per-stage limits.
+ */
+ uint32_t max_host_descriptors =
+ (UINT32_MAX - sizeof(struct v3dv_descriptor_set)) /
+ sizeof(struct v3dv_descriptor);
+ uint32_t max_gpu_descriptors =
+ (UINT32_MAX / v3dv_max_descriptor_bo_size());
+ props->maxPerSetDescriptors =
+ MIN2(max_host_descriptors, max_gpu_descriptors);
+
+ /* Minimum required by the spec */
+ props->maxMemoryAllocationSize = MAX_MEMORY_ALLOCATION_SIZE;
+ break;
+ }
default:
v3dv_debug_ignored_stype(ext->sType);
break;
VkDeviceSize size)
{
/* Our kernel interface is 32-bit */
- if (size > UINT32_MAX)
- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ assert(size <= UINT32_MAX);
mem->bo = v3dv_bo_alloc(device, size, "device_alloc", false);
if (!mem->bo)
}
VkResult result = VK_SUCCESS;
- if (wsi_info) {
- result = device_alloc_for_wsi(device, pAllocator, mem,
- pAllocateInfo->allocationSize);
- } else if (fd_info && fd_info->handleType) {
- assert(fd_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
- fd_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
- result = device_import_bo(device, pAllocator,
- fd_info->fd, pAllocateInfo->allocationSize,
- &mem->bo);
- mem->has_bo_ownership = false;
- if (result == VK_SUCCESS)
- close(fd_info->fd);
+
+ /* We always allocate device memory in multiples of a page, so round up
+ * requested size to that.
+ */
+ VkDeviceSize alloc_size = ALIGN(pAllocateInfo->allocationSize, 4096);
+
+ if (unlikely(alloc_size > MAX_MEMORY_ALLOCATION_SIZE)) {
+ result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
} else {
- result = device_alloc(device, mem, pAllocateInfo->allocationSize);
+ if (wsi_info) {
+ result = device_alloc_for_wsi(device, pAllocator, mem, alloc_size);
+ } else if (fd_info && fd_info->handleType) {
+ assert(fd_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
+ fd_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
+ result = device_import_bo(device, pAllocator,
+ fd_info->fd, alloc_size, &mem->bo);
+ mem->has_bo_ownership = false;
+ if (result == VK_SUCCESS)
+ close(fd_info->fd);
+ } else {
+ result = device_alloc(device, mem, alloc_size);
+ }
}
if (result != VK_SUCCESS) {