anv: use malloc for host only descriptor sets
authorLionel Landwerlin <lionel.g.landwerlin@intel.com>
Mon, 6 Feb 2023 08:40:39 +0000 (10:40 +0200)
committerMarge Bot <emma+marge@anholt.net>
Tue, 7 Feb 2023 07:28:32 +0000 (07:28 +0000)
On integrated products this makes almost no difference but on discrete
it's pretty important.

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Ivan Briano <ivan.briano@intel.com>
Tested-by: Chuansheng Liu <chuansheng.liu@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21131>

src/intel/vulkan/anv_descriptor_set.c
src/intel/vulkan/anv_private.h

index fa0e992..641875a 100644 (file)
@@ -908,33 +908,40 @@ VkResult anv_CreateDescriptorPool(
       buffer_view_count * sizeof(struct anv_buffer_view) +
       (host_only ? buffer_view_count * ANV_SURFACE_STATE_SIZE : 0);
 
-   pool = vk_object_alloc(&device->vk, pAllocator,
-                          sizeof(*pool) + host_mem_size,
-                          VK_OBJECT_TYPE_DESCRIPTOR_POOL);
+   pool = vk_object_zalloc(&device->vk, pAllocator,
+                           sizeof(*pool) + host_mem_size,
+                           VK_OBJECT_TYPE_DESCRIPTOR_POOL);
    if (!pool)
       return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
 
+   pool->bo_mem_size = descriptor_bo_size;
    pool->host_mem_size = host_mem_size;
    util_vma_heap_init(&pool->host_heap, POOL_HEAP_OFFSET, host_mem_size);
 
    pool->host_only = host_only;
 
-   if (descriptor_bo_size > 0) {
-      VkResult result = anv_device_alloc_bo(device,
-                                            "descriptors",
-                                            descriptor_bo_size,
-                                            ANV_BO_ALLOC_MAPPED |
-                                            ANV_BO_ALLOC_SNOOPED,
-                                            0 /* explicit_address */,
-                                            &pool->bo);
-      if (result != VK_SUCCESS) {
-         vk_object_free(&device->vk, pAllocator, pool);
-         return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+   if (pool->bo_mem_size > 0) {
+      if (pool->host_only) {
+         pool->host_bo = vk_zalloc(&device->vk.alloc, pool->bo_mem_size, 8,
+                                   VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+         if (pool->host_bo == NULL) {
+            vk_object_free(&device->vk, pAllocator, pool);
+            return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
+         }
+      } else {
+         VkResult result = anv_device_alloc_bo(device,
+                                               "descriptors",
+                                               descriptor_bo_size,
+                                               ANV_BO_ALLOC_MAPPED |
+                                               ANV_BO_ALLOC_SNOOPED,
+                                               0 /* explicit_address */,
+                                               &pool->bo);
+         if (result != VK_SUCCESS) {
+            vk_object_free(&device->vk, pAllocator, pool);
+            return vk_error(device, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+         }
       }
-
-      util_vma_heap_init(&pool->bo_heap, POOL_HEAP_OFFSET, descriptor_bo_size);
-   } else {
-      pool->bo = NULL;
+      util_vma_heap_init(&pool->bo_heap, POOL_HEAP_OFFSET, pool->bo_mem_size);
    }
 
    /* All the surface states allocated by the descriptor pool are internal. We
@@ -968,9 +975,12 @@ void anv_DestroyDescriptorPool(
       anv_descriptor_set_layout_unref(device, set->layout);
    }
 
-   if (pool->bo) {
+   if (pool->bo_mem_size) {
+      if (pool->host_bo)
+         vk_free(&device->vk.alloc, pool->host_bo);
+      if (pool->bo)
+         anv_device_release_bo(device, pool->bo);
       util_vma_heap_finish(&pool->bo_heap);
-      anv_device_release_bo(device, pool->bo);
    }
    anv_state_stream_finish(&pool->surface_state_stream);
 
@@ -994,9 +1004,9 @@ VkResult anv_ResetDescriptorPool(
    util_vma_heap_finish(&pool->host_heap);
    util_vma_heap_init(&pool->host_heap, POOL_HEAP_OFFSET, pool->host_mem_size);
 
-   if (pool->bo) {
+   if (pool->bo_mem_size) {
       util_vma_heap_finish(&pool->bo_heap);
-      util_vma_heap_init(&pool->bo_heap, POOL_HEAP_OFFSET, pool->bo->size);
+      util_vma_heap_init(&pool->bo_heap, POOL_HEAP_OFFSET, pool->bo_mem_size);
    }
 
    anv_state_stream_finish(&pool->surface_state_stream);
@@ -1127,7 +1137,11 @@ anv_descriptor_set_create(struct anv_device *device,
              pool_vma_offset - POOL_HEAP_OFFSET <= INT32_MAX);
       set->desc_mem.offset = pool_vma_offset - POOL_HEAP_OFFSET;
       set->desc_mem.alloc_size = descriptor_buffer_size;
-      set->desc_mem.map = pool->bo->map + set->desc_mem.offset;
+
+      if (pool->host_only)
+         set->desc_mem.map = pool->host_bo + set->desc_mem.offset;
+      else
+         set->desc_mem.map = pool->bo->map + set->desc_mem.offset;
 
       set->desc_addr = (struct anv_address) {
          .bo = pool->bo,
index 7efb4ab..2ee62ea 100644 (file)
@@ -1865,6 +1865,7 @@ struct anv_descriptor_pool {
    struct vk_object_base base;
 
    struct anv_bo *bo;
+   void *host_bo;
    struct util_vma_heap bo_heap;
 
    struct anv_state_stream surface_state_stream;
@@ -1879,6 +1880,9 @@ struct anv_descriptor_pool {
    /** Allocated size of host_mem */
    uint32_t host_mem_size;
 
+   /** Allocated size of descriptor bo (should be equal to bo->size) */
+   uint32_t bo_mem_size;
+
    /**
     * VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_EXT. If set, then
     * surface_state_stream is unused.