anv: link anv_bo to its VMA heap
authorLionel Landwerlin <lionel.g.landwerlin@intel.com>
Tue, 6 Dec 2022 14:32:49 +0000 (16:32 +0200)
committerLionel Landwerlin <lionel.g.landwerlin@intel.com>
Fri, 5 May 2023 11:48:15 +0000 (14:48 +0300)
We want to add more heaps in the future and so not having to do
address checks to find out in what heap to release a BO is convinient.

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: Ivan Briano <ivan.briano@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/22847>

src/intel/vulkan/anv_allocator.c
src/intel/vulkan/anv_device.c
src/intel/vulkan/anv_private.h

index a32319a..40491b2 100644 (file)
@@ -1380,10 +1380,14 @@ anv_bo_unmap_close(struct anv_device *device, struct anv_bo *bo)
    device->kmd_backend->gem_close(device, bo->gem_handle);
 }
 
-static void anv_bo_vma_free(struct anv_device *device, struct anv_bo *bo)
+static void
+anv_bo_vma_free(struct anv_device *device, struct anv_bo *bo)
 {
-   if (bo->offset != 0 && !bo->has_fixed_address)
-      anv_vma_free(device, bo->offset, bo->size + bo->_ccs_size);
+   if (bo->offset != 0 && !bo->has_fixed_address) {
+      assert(bo->vma_heap != NULL);
+      anv_vma_free(device, bo->vma_heap, bo->offset, bo->size + bo->_ccs_size);
+   }
+   bo->vma_heap = NULL;
 }
 
 static void
@@ -1402,6 +1406,7 @@ anv_bo_vma_alloc_or_close(struct anv_device *device,
                           enum anv_bo_alloc_flags alloc_flags,
                           uint64_t explicit_address)
 {
+   assert(bo->vma_heap == NULL);
    assert(explicit_address == intel_48b_address(explicit_address));
 
    uint32_t align = device->physical->info.mem_alignment;
@@ -1415,7 +1420,8 @@ anv_bo_vma_alloc_or_close(struct anv_device *device,
       bo->offset = explicit_address;
    } else {
       bo->offset = anv_vma_alloc(device, bo->size + bo->_ccs_size,
-                                 align, alloc_flags, explicit_address);
+                                 align, alloc_flags, explicit_address,
+                                 &bo->vma_heap);
       if (bo->offset == 0) {
          anv_bo_unmap_close(device, bo);
          return vk_errorf(device, VK_ERROR_OUT_OF_DEVICE_MEMORY,
index f7bfff4..d4636ab 100644 (file)
@@ -3611,24 +3611,39 @@ anv_device_wait(struct anv_device *device, struct anv_bo *bo,
    }
 }
 
+static struct util_vma_heap *
+anv_vma_heap_for_flags(struct anv_device *device,
+                       enum anv_bo_alloc_flags alloc_flags)
+{
+   if (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS)
+      return &device->vma_cva;
+
+   if (alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS)
+      return &device->vma_lo;
+
+   return &device->vma_hi;
+}
+
 uint64_t
 anv_vma_alloc(struct anv_device *device,
               uint64_t size, uint64_t align,
               enum anv_bo_alloc_flags alloc_flags,
-              uint64_t client_address)
+              uint64_t client_address,
+              struct util_vma_heap **out_vma_heap)
 {
    pthread_mutex_lock(&device->vma_mutex);
 
    uint64_t addr = 0;
+   *out_vma_heap = anv_vma_heap_for_flags(device, alloc_flags);
 
    if (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) {
       if (client_address) {
-         if (util_vma_heap_alloc_addr(&device->vma_cva,
+         if (util_vma_heap_alloc_addr(*out_vma_heap,
                                       client_address, size)) {
             addr = client_address;
          }
       } else {
-         addr = util_vma_heap_alloc(&device->vma_cva, size, align);
+         addr = util_vma_heap_alloc(*out_vma_heap, size, align);
       }
       /* We don't want to fall back to other heaps */
       goto done;
@@ -3636,11 +3651,7 @@ anv_vma_alloc(struct anv_device *device,
 
    assert(client_address == 0);
 
-   if (!(alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS))
-      addr = util_vma_heap_alloc(&device->vma_hi, size, align);
-
-   if (addr == 0)
-      addr = util_vma_heap_alloc(&device->vma_lo, size, align);
+   addr = util_vma_heap_alloc(*out_vma_heap, size, align);
 
 done:
    pthread_mutex_unlock(&device->vma_mutex);
@@ -3651,22 +3662,18 @@ done:
 
 void
 anv_vma_free(struct anv_device *device,
+             struct util_vma_heap *vma_heap,
              uint64_t address, uint64_t size)
 {
+   assert(vma_heap == &device->vma_lo ||
+          vma_heap == &device->vma_cva ||
+          vma_heap == &device->vma_hi);
+
    const uint64_t addr_48b = intel_48b_address(address);
 
    pthread_mutex_lock(&device->vma_mutex);
 
-   if (addr_48b >= LOW_HEAP_MIN_ADDRESS &&
-       addr_48b <= LOW_HEAP_MAX_ADDRESS) {
-      util_vma_heap_free(&device->vma_lo, addr_48b, size);
-   } else if (addr_48b >= CLIENT_VISIBLE_HEAP_MIN_ADDRESS &&
-              addr_48b <= CLIENT_VISIBLE_HEAP_MAX_ADDRESS) {
-      util_vma_heap_free(&device->vma_cva, addr_48b, size);
-   } else {
-      assert(addr_48b >= HIGH_HEAP_MIN_ADDRESS);
-      util_vma_heap_free(&device->vma_hi, addr_48b, size);
-   }
+   util_vma_heap_free(vma_heap, addr_48b, size);
 
    pthread_mutex_unlock(&device->vma_mutex);
 }
index ce533f9..ef22374 100644 (file)
@@ -453,6 +453,12 @@ enum anv_bo_alloc_flags {
 struct anv_bo {
    const char *name;
 
+   /* The VMA heap in anv_device from which this BO takes its offset.
+    *
+    * This can only be NULL when has_fixed_address is true.
+    */
+   struct util_vma_heap *vma_heap;
+
    uint32_t gem_handle;
 
    uint32_t refcount;
@@ -1370,8 +1376,10 @@ int anv_gem_set_caching(struct anv_device *device, uint32_t gem_handle, uint32_t
 uint64_t anv_vma_alloc(struct anv_device *device,
                        uint64_t size, uint64_t align,
                        enum anv_bo_alloc_flags alloc_flags,
-                       uint64_t client_address);
+                       uint64_t client_address,
+                       struct util_vma_heap **out_vma_heap);
 void anv_vma_free(struct anv_device *device,
+                  struct util_vma_heap *vma_heap,
                   uint64_t address, uint64_t size);
 
 struct anv_reloc_list {