goto fail_free_name;
}
+ if (device->has_set_iova) {
+ mtx_init(&device->vma_mutex, mtx_plain);
+ util_vma_heap_init(&device->vma, device->va_start,
+ ROUND_DOWN_TO(device->va_size, 4096));
+ }
+
fd_get_driver_uuid(device->driver_uuid);
fd_get_device_uuid(device->device_uuid, &device->dev_id);
&supported_extensions,
&dispatch_table);
if (result != VK_SUCCESS)
- goto fail_free_name;
+ goto fail_free_vma;
device->vk.supported_sync_types = device->sync_types;
if (result != VK_SUCCESS) {
vk_startup_errorf(instance, result, "WSI init failure");
vk_physical_device_finish(&device->vk);
- goto fail_free_name;
+ goto fail_free_vma;
}
#endif
return VK_SUCCESS;
+fail_free_vma:
+ if (device->has_set_iova)
+ util_vma_heap_finish(&device->vma);
fail_free_name:
vk_free(&instance->vk.alloc, (void *)device->name);
return result;
if (device->master_fd != -1)
close(device->master_fd);
+ if (device->has_set_iova)
+ util_vma_heap_finish(&device->vma);
+
vk_free(&device->instance->vk.alloc, (void *)device->name);
vk_physical_device_finish(&device->vk);
return tu_drm_get_param(dev, MSM_PARAM_GMEM_BASE, base);
}
+static int
+tu_drm_get_va_prop(const struct tu_physical_device *dev,
+ uint64_t *va_start, uint64_t *va_size)
+{
+ uint64_t value;
+ int ret = tu_drm_get_param(dev, MSM_PARAM_VA_START, &value);
+ if (ret)
+ return ret;
+
+ *va_start = value;
+
+ ret = tu_drm_get_param(dev, MSM_PARAM_VA_SIZE, &value);
+ if (ret)
+ return ret;
+
+ *va_size = value;
+
+ return 0;
+}
+
int
tu_device_get_gpu_timestamp(struct tu_device *dev, uint64_t *ts)
{
}
static VkResult
+tu_allocate_userspace_iova(struct tu_device *dev,
+ uint32_t gem_handle,
+ uint64_t size,
+ uint64_t *iova)
+{
+ mtx_lock(&dev->physical_device->vma_mutex);
+
+ dev->physical_device->vma.alloc_high = false;
+ *iova = util_vma_heap_alloc(&dev->physical_device->vma, size, 0x1000);
+
+ mtx_unlock(&dev->physical_device->vma_mutex);
+
+ if (!*iova)
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+ struct drm_msm_gem_info req = {
+ .handle = gem_handle,
+ .info = MSM_INFO_SET_IOVA,
+ .value = *iova,
+ };
+
+ int ret =
+ drmCommandWriteRead(dev->fd, DRM_MSM_GEM_INFO, &req, sizeof(req));
+ if (ret < 0)
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+tu_allocate_kernel_iova(struct tu_device *dev,
+ uint32_t gem_handle,
+ uint64_t *iova)
+{
+ *iova = tu_gem_info(dev, gem_handle, MSM_INFO_GET_IOVA);
+ if (!*iova)
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+
+ return VK_SUCCESS;
+}
+
+static VkResult
tu_bo_init(struct tu_device *dev,
struct tu_bo *bo,
uint32_t gem_handle,
uint64_t size,
- bool dump)
+ enum tu_bo_alloc_flags flags)
{
- uint64_t iova = tu_gem_info(dev, gem_handle, MSM_INFO_GET_IOVA);
- if (!iova) {
- tu_gem_close(dev, gem_handle);
- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ VkResult result = VK_SUCCESS;
+ uint64_t iova = 0;
+
+ if (dev->physical_device->has_set_iova) {
+ result = tu_allocate_userspace_iova(dev, gem_handle, size, &iova);
+ } else {
+ result = tu_allocate_kernel_iova(dev, gem_handle, &iova);
}
+ if (result != VK_SUCCESS)
+ goto fail_bo_list;
+
mtx_lock(&dev->bo_mutex);
uint32_t idx = dev->bo_count++;
struct drm_msm_gem_submit_bo *new_ptr =
vk_realloc(&dev->vk.alloc, dev->bo_list, new_len * sizeof(*dev->bo_list),
8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
- if (!new_ptr)
+ if (!new_ptr) {
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto fail_bo_list;
+ }
dev->bo_list = new_ptr;
dev->bo_list_size = new_len;
}
+ bool dump = flags & TU_BO_ALLOC_ALLOW_DUMP;
dev->bo_list[idx] = (struct drm_msm_gem_submit_bo) {
.flags = MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE |
COND(dump, MSM_SUBMIT_BO_DUMP),
fail_bo_list:
tu_gem_close(dev, gem_handle);
- return VK_ERROR_OUT_OF_HOST_MEMORY;
+ return result;
}
VkResult
assert(bo && bo->gem_handle == 0);
VkResult result =
- tu_bo_init(dev, bo, req.handle, size, flags & TU_BO_ALLOC_ALLOW_DUMP);
+ tu_bo_init(dev, bo, req.handle, size, flags);
if (result != VK_SUCCESS)
memset(bo, 0, sizeof(*bo));
return VK_SUCCESS;
}
- VkResult result = tu_bo_init(dev, bo, gem_handle, size, false);
+ VkResult result =
+ tu_bo_init(dev, bo, gem_handle, size, TU_BO_ALLOC_NO_FLAGS);
if (result != VK_SUCCESS)
memset(bo, 0, sizeof(*bo));
mtx_unlock(&dev->bo_mutex);
+ if (dev->physical_device->has_set_iova) {
+ mtx_lock(&dev->physical_device->vma_mutex);
+ util_vma_heap_free(&dev->physical_device->vma, bo->iova, bo->size);
+ mtx_unlock(&dev->physical_device->vma_mutex);
+ }
+
/* Our BO structs are stored in a sparse array in the physical device,
* so we don't want to free the BO pointer, instead we want to reset it
* to 0, to signal that array entry as being free.
goto fail;
}
+ device->has_set_iova = !tu_drm_get_va_prop(device, &device->va_start,
+ &device->va_size);
+
struct stat st;
if (stat(primary_path, &st) == 0) {