device->kmd_backend->gem_close(device, bo->gem_handle);
}
-static void anv_bo_vma_free(struct anv_device *device, struct anv_bo *bo)
+static void
+anv_bo_vma_free(struct anv_device *device, struct anv_bo *bo)
{
- if (bo->offset != 0 && !bo->has_fixed_address)
- anv_vma_free(device, bo->offset, bo->size + bo->_ccs_size);
+ if (bo->offset != 0 && !bo->has_fixed_address) {
+ assert(bo->vma_heap != NULL);
+ anv_vma_free(device, bo->vma_heap, bo->offset, bo->size + bo->_ccs_size);
+ }
+ bo->vma_heap = NULL;
}
static void
enum anv_bo_alloc_flags alloc_flags,
uint64_t explicit_address)
{
+ assert(bo->vma_heap == NULL);
assert(explicit_address == intel_48b_address(explicit_address));
uint32_t align = device->physical->info.mem_alignment;
bo->offset = explicit_address;
} else {
bo->offset = anv_vma_alloc(device, bo->size + bo->_ccs_size,
- align, alloc_flags, explicit_address);
+ align, alloc_flags, explicit_address,
+ &bo->vma_heap);
if (bo->offset == 0) {
anv_bo_unmap_close(device, bo);
return vk_errorf(device, VK_ERROR_OUT_OF_DEVICE_MEMORY,
}
}
+static struct util_vma_heap *
+anv_vma_heap_for_flags(struct anv_device *device,
+ enum anv_bo_alloc_flags alloc_flags)
+{
+ if (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS)
+ return &device->vma_cva;
+
+ if (alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS)
+ return &device->vma_lo;
+
+ return &device->vma_hi;
+}
+
uint64_t
anv_vma_alloc(struct anv_device *device,
uint64_t size, uint64_t align,
enum anv_bo_alloc_flags alloc_flags,
- uint64_t client_address)
+ uint64_t client_address,
+ struct util_vma_heap **out_vma_heap)
{
pthread_mutex_lock(&device->vma_mutex);
uint64_t addr = 0;
+ *out_vma_heap = anv_vma_heap_for_flags(device, alloc_flags);
if (alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) {
if (client_address) {
- if (util_vma_heap_alloc_addr(&device->vma_cva,
+ if (util_vma_heap_alloc_addr(*out_vma_heap,
client_address, size)) {
addr = client_address;
}
} else {
- addr = util_vma_heap_alloc(&device->vma_cva, size, align);
+ addr = util_vma_heap_alloc(*out_vma_heap, size, align);
}
/* We don't want to fall back to other heaps */
goto done;
assert(client_address == 0);
- if (!(alloc_flags & ANV_BO_ALLOC_32BIT_ADDRESS))
- addr = util_vma_heap_alloc(&device->vma_hi, size, align);
-
- if (addr == 0)
- addr = util_vma_heap_alloc(&device->vma_lo, size, align);
+ addr = util_vma_heap_alloc(*out_vma_heap, size, align);
done:
pthread_mutex_unlock(&device->vma_mutex);
void
anv_vma_free(struct anv_device *device,
+ struct util_vma_heap *vma_heap,
uint64_t address, uint64_t size)
{
+ assert(vma_heap == &device->vma_lo ||
+ vma_heap == &device->vma_cva ||
+ vma_heap == &device->vma_hi);
+
const uint64_t addr_48b = intel_48b_address(address);
pthread_mutex_lock(&device->vma_mutex);
- if (addr_48b >= LOW_HEAP_MIN_ADDRESS &&
- addr_48b <= LOW_HEAP_MAX_ADDRESS) {
- util_vma_heap_free(&device->vma_lo, addr_48b, size);
- } else if (addr_48b >= CLIENT_VISIBLE_HEAP_MIN_ADDRESS &&
- addr_48b <= CLIENT_VISIBLE_HEAP_MAX_ADDRESS) {
- util_vma_heap_free(&device->vma_cva, addr_48b, size);
- } else {
- assert(addr_48b >= HIGH_HEAP_MIN_ADDRESS);
- util_vma_heap_free(&device->vma_hi, addr_48b, size);
- }
+ util_vma_heap_free(vma_heap, addr_48b, size);
pthread_mutex_unlock(&device->vma_mutex);
}
struct anv_bo {
const char *name;
+ /* The VMA heap in anv_device from which this BO takes its offset.
+ *
+ * This can only be NULL when has_fixed_address is true.
+ */
+ struct util_vma_heap *vma_heap;
+
uint32_t gem_handle;
uint32_t refcount;
uint64_t anv_vma_alloc(struct anv_device *device,
uint64_t size, uint64_t align,
enum anv_bo_alloc_flags alloc_flags,
- uint64_t client_address);
+ uint64_t client_address,
+ struct util_vma_heap **out_vma_heap);
void anv_vma_free(struct anv_device *device,
+ struct util_vma_heap *vma_heap,
uint64_t address, uint64_t size);
struct anv_reloc_list {