accel = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*accel), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (accel == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
vk_object_base_init(&device->vk, &accel->base,
VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR);
const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos)
{
unreachable("Unimplemented");
- return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
+ return anv_error(VK_ERROR_FEATURE_NOT_PRESENT);
}
VkResult
const VkCopyAccelerationStructureInfoKHR* pInfo)
{
unreachable("Unimplemented");
- return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
+ return anv_error(VK_ERROR_FEATURE_NOT_PRESENT);
}
VkResult
const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo)
{
unreachable("Unimplemented");
- return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
+ return anv_error(VK_ERROR_FEATURE_NOT_PRESENT);
}
VkResult
const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo)
{
unreachable("Unimplemented");
- return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
+ return anv_error(VK_ERROR_FEATURE_NOT_PRESENT);
}
VkResult
size_t stride)
{
unreachable("Unimplemented");
- return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
+ return anv_error(VK_ERROR_FEATURE_NOT_PRESENT);
}
void
*/
table->fd = os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE, "state table");
if (table->fd == -1) {
- result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
goto fail_fd;
}
if (!u_vector_init(&table->cleanups,
round_to_power_of_two(sizeof(struct anv_state_table_cleanup)),
128)) {
- result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
goto fail_fd;
}
/* Make sure that we don't go outside the bounds of the memfd */
if (size > BLOCK_POOL_MEMFD_SIZE)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
cleanup = u_vector_add(&table->cleanups);
if (!cleanup)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
*cleanup = ANV_STATE_TABLE_CLEANUP_INIT;
map = mmap(NULL, size, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_POPULATE, table->fd, 0);
if (map == MAP_FAILED) {
- return vk_errorf(table->device, &table->device->vk.base,
+ return anv_errorf(table->device, &table->device->vk.base,
VK_ERROR_OUT_OF_HOST_MEMORY, "mmap failed: %m");
}
*/
pool->fd = os_create_anonymous_file(BLOCK_POOL_MEMFD_SIZE, "block pool");
if (pool->fd == -1)
- return vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ return anv_error(VK_ERROR_INITIALIZATION_FAILED);
pool->wrapper_bo = (struct anv_bo) {
.refcount = 1,
if (!u_vector_init(&pool->mmap_cleanups,
round_to_power_of_two(sizeof(struct anv_mmap_cleanup)),
128)) {
- result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
goto fail_fd;
}
MAP_SHARED | MAP_POPULATE, pool->fd,
BLOCK_POOL_MEMFD_CENTER - center_bo_offset);
if (map == MAP_FAILED)
- return vk_errorf(pool->device, &pool->device->vk.base,
+ return anv_errorf(pool->device, &pool->device->vk.base,
VK_ERROR_MEMORY_MAP_FAILED, "mmap failed: %m");
struct anv_bo *new_bo;
if (!cleanup) {
munmap(map, size);
anv_device_release_bo(pool->device, new_bo);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
cleanup->map = map;
cleanup->size = size;
if (pthread_mutex_init(&cache->mutex, NULL)) {
util_sparse_array_finish(&cache->bo_map);
- return vk_errorf(NULL, NULL, VK_ERROR_OUT_OF_HOST_MEMORY,
+ return anv_errorf(NULL, NULL, VK_ERROR_OUT_OF_HOST_MEMORY,
"pthread_mutex_init failed: %m");
}
}
if (gem_handle == 0)
- return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
struct anv_bo new_bo = {
.name = name,
new_bo.map = anv_gem_mmap(device, new_bo.gem_handle, 0, size, 0);
if (new_bo.map == MAP_FAILED) {
anv_gem_close(device, new_bo.gem_handle);
- return vk_errorf(device, &device->vk.base,
+ return anv_errorf(device, &device->vk.base,
VK_ERROR_OUT_OF_HOST_MEMORY,
"mmap failed: %m");
}
if (new_bo.map)
anv_gem_munmap(device, new_bo.map, size);
anv_gem_close(device, new_bo.gem_handle);
- return vk_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ return anv_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
"failed to allocate virtual address for BO");
}
} else {
uint32_t gem_handle = anv_gem_userptr(device, host_ptr, size);
if (!gem_handle)
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ return anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
pthread_mutex_lock(&cache->mutex);
assert(bo->gem_handle == gem_handle);
if (bo_flags != bo->flags) {
pthread_mutex_unlock(&cache->mutex);
- return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return anv_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"same host pointer imported two different ways");
}
if (bo->has_client_visible_address !=
((alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0)) {
pthread_mutex_unlock(&cache->mutex);
- return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return anv_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"The same BO was imported with and without buffer "
"device address");
}
if (client_address && client_address != intel_48b_address(bo->offset)) {
pthread_mutex_unlock(&cache->mutex);
- return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return anv_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"The same BO was imported at two different "
"addresses");
}
if (new_bo.offset == 0) {
anv_gem_close(device, new_bo.gem_handle);
pthread_mutex_unlock(&cache->mutex);
- return vk_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ return anv_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
"failed to allocate virtual address for BO");
}
} else {
uint32_t gem_handle = anv_gem_fd_to_handle(device, fd);
if (!gem_handle) {
pthread_mutex_unlock(&cache->mutex);
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ return anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
struct anv_bo *bo = anv_device_lookup_bo(device, gem_handle);
if ((bo->flags & EXEC_OBJECT_PINNED) !=
(bo_flags & EXEC_OBJECT_PINNED)) {
pthread_mutex_unlock(&cache->mutex);
- return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return anv_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"The same BO was imported two different ways");
}
(bo->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) !=
(bo_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS)) {
pthread_mutex_unlock(&cache->mutex);
- return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return anv_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"The same BO was imported on two different heaps");
}
if (bo->has_client_visible_address !=
((alloc_flags & ANV_BO_ALLOC_CLIENT_VISIBLE_ADDRESS) != 0)) {
pthread_mutex_unlock(&cache->mutex);
- return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return anv_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"The same BO was imported with and without buffer "
"device address");
}
if (client_address && client_address != intel_48b_address(bo->offset)) {
pthread_mutex_unlock(&cache->mutex);
- return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return anv_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"The same BO was imported at two different "
"addresses");
}
if (size == (off_t)-1) {
anv_gem_close(device, gem_handle);
pthread_mutex_unlock(&cache->mutex);
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ return anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
struct anv_bo new_bo = {
if (new_bo.offset == 0) {
anv_gem_close(device, new_bo.gem_handle);
pthread_mutex_unlock(&cache->mutex);
- return vk_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
+ return anv_errorf(device, NULL, VK_ERROR_OUT_OF_DEVICE_MEMORY,
"failed to allocate virtual address for BO");
}
} else {
int fd = anv_gem_handle_to_fd(device, bo->gem_handle);
if (fd < 0)
- return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
+ return anv_error(VK_ERROR_TOO_MANY_OBJECTS);
*fd_out = fd;
};
if (gralloc_info->handle->numFds != 1) {
- return vk_errorf(device, &device->vk.base,
+ return anv_errorf(device, &device->vk.base,
VK_ERROR_INVALID_EXTERNAL_HANDLE,
"VkNativeBufferANDROID::handle::numFds is %d, "
"expected 1", gralloc_info->handle->numFds);
0 /* client_address */,
&bo);
if (result != VK_SUCCESS) {
- return vk_errorf(device, &device->vk.base, result,
+ return anv_errorf(device, &device->vk.base, result,
"failed to import dma-buf from VkNativeBufferANDROID");
}
anv_info.isl_tiling_flags = ISL_TILING_Y0_BIT;
break;
case -1:
- result = vk_errorf(device, &device->vk.base,
+ result = anv_errorf(device, &device->vk.base,
VK_ERROR_INVALID_EXTERNAL_HANDLE,
"DRM_IOCTL_I915_GEM_GET_TILING failed for "
"VkNativeBufferANDROID");
goto fail_tiling;
default:
- result = vk_errorf(device, &device->vk.base,
+ result = anv_errorf(device, &device->vk.base,
VK_ERROR_INVALID_EXTERNAL_HANDLE,
"DRM_IOCTL_I915_GEM_GET_TILING returned unknown "
"tiling %d for VkNativeBufferANDROID", i915_tiling);
mem_reqs.memoryRequirements.alignment);
if (bo->size < aligned_image_size) {
- result = vk_errorf(device, &device->vk.base,
+ result = anv_errorf(device, &device->vk.base,
VK_ERROR_INVALID_EXTERNAL_HANDLE,
"dma-buf from VkNativeBufferANDROID is too small for "
"VkImage: %"PRIu64"B < %"PRIu64"B",
0 /* client_address */,
&bo);
if (result != VK_SUCCESS) {
- return vk_errorf(device, &device->vk.base, result,
+ return anv_errorf(device, &device->vk.base, result,
"failed to import dma-buf from VkNativeBufferANDROID");
}
uint64_t img_size = image->bindings[ANV_IMAGE_MEMORY_BINDING_MAIN].memory_range.size;
if (img_size < bo->size) {
- result = vk_errorf(device, &device->vk.base, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ result = anv_errorf(device, &device->vk.base, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"dma-buf from VkNativeBufferANDROID is too small for "
"VkImage: %"PRIu64"B < %"PRIu64"B",
bo->size, img_size);
result = anv_GetPhysicalDeviceImageFormatProperties2(phys_dev_h,
&image_format_info, &image_format_props);
if (result != VK_SUCCESS) {
- return vk_errorf(device, &device->vk.base, result,
+ return anv_errorf(device, &device->vk.base, result,
"anv_GetPhysicalDeviceImageFormatProperties2 failed "
"inside %s", __func__);
}
* gralloc swapchains.
*/
if (imageUsage != 0) {
- return vk_errorf(device, &device->vk.base, VK_ERROR_FORMAT_NOT_SUPPORTED,
+ return anv_errorf(device, &device->vk.base, VK_ERROR_FORMAT_NOT_SUPPORTED,
"unsupported VkImageUsageFlags(0x%x) for gralloc "
"swapchain", imageUsage);
}
VkResult err = (errno == EMFILE) ? VK_ERROR_TOO_MANY_OBJECTS :
VK_ERROR_OUT_OF_HOST_MEMORY;
close(nativeFenceFd);
- return vk_error(err);
+ return anv_error(err);
}
} else if (semaphore_h != VK_NULL_HANDLE) {
semaphore_fd = nativeFenceFd;
vk_alloc(alloc, list->array_length * sizeof(*list->relocs), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (list->relocs == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
list->reloc_bos =
vk_alloc(alloc, list->array_length * sizeof(*list->reloc_bos), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (list->reloc_bos == NULL) {
vk_free(alloc, list->relocs);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
memcpy(list->relocs, other_list->relocs,
new_length * sizeof(*list->relocs), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (new_relocs == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
list->relocs = new_relocs;
struct anv_bo **new_reloc_bos =
new_length * sizeof(*list->reloc_bos), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (new_reloc_bos == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
list->reloc_bos = new_reloc_bos;
list->array_length = new_length;
vk_realloc(alloc, list->deps, new_length * sizeof(BITSET_WORD), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (new_deps == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
list->deps = new_deps;
/* Zero out the new data */
struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (bbo == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
size, &bbo->bo);
struct anv_batch_bo *bbo = vk_alloc(&cmd_buffer->pool->alloc, sizeof(*bbo),
8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (bbo == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
result = anv_bo_pool_alloc(&cmd_buffer->device->batch_bo_pool,
other_bbo->bo->size, &bbo->bo);
struct anv_batch_bo **seen_bbo = u_vector_add(&cmd_buffer->seen_bbos);
if (seen_bbo == NULL) {
anv_batch_bo_destroy(new_bbo, cmd_buffer);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
*seen_bbo = new_bbo;
struct anv_state *bt_block = u_vector_add(&cmd_buffer->bt_block_states);
if (bt_block == NULL) {
anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
*bt_block = anv_binding_table_pool_alloc(cmd_buffer->device);
list_for_each_entry(struct anv_batch_bo, bbo, list, link) {
struct anv_batch_bo **bbo_ptr = u_vector_add(&cmd_buffer->seen_bbos);
if (bbo_ptr == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
*bbo_ptr = bbo;
}
struct drm_i915_gem_exec_object2 *new_objects =
vk_alloc(exec->alloc, new_len * sizeof(*new_objects), 8, exec->alloc_scope);
if (new_objects == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
struct anv_bo **new_bos =
vk_alloc(exec->alloc, new_len * sizeof(*new_bos), 8, exec->alloc_scope);
if (new_bos == NULL) {
vk_free(exec->alloc, new_objects);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
if (exec->objects) {
cmd_buffer = vk_alloc2(&device->vk.alloc, &pool->alloc, sizeof(*cmd_buffer),
8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (cmd_buffer == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
result = vk_command_buffer_init(&cmd_buffer->vk, &device->vk);
if (result != VK_SUCCESS)
pool = vk_object_alloc(&device->vk, pAllocator, sizeof(*pool),
VK_OBJECT_TYPE_COMMAND_POOL);
if (pool == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
assert(pCreateInfo->queueFamilyIndex < device->physical->queue.family_count);
pool->queue_family =
if (!vk_object_multizalloc(&device->vk, &ma, NULL,
VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT))
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
set_layout->ref_cnt = 1;
set_layout->binding_count = num_bindings;
layout = vk_object_alloc(&device->vk, pAllocator, sizeof(*layout),
VK_OBJECT_TYPE_PIPELINE_LAYOUT);
if (layout == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
layout->num_sets = pCreateInfo->setLayoutCount;
pool = vk_object_alloc(&device->vk, pAllocator, total_size,
VK_OBJECT_TYPE_DESCRIPTOR_POOL);
if (!pool)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
pool->size = pool_size;
pool->next = 0;
ANV_UBO_ALIGNMENT);
if (pool_vma_offset == 0) {
anv_descriptor_pool_free_set(pool, set);
- return vk_error(VK_ERROR_FRAGMENTED_POOL);
+ return anv_error(VK_ERROR_FRAGMENTED_POOL);
}
assert(pool_vma_offset >= POOL_HEAP_OFFSET &&
pool_vma_offset - POOL_HEAP_OFFSET <= INT32_MAX);
template = vk_object_alloc(&device->vk, pAllocator, size,
VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE);
if (template == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
template->bind_point = pCreateInfo->pipelineBindPoint;
intel_i915_query_alloc(fd, DRM_I915_QUERY_MEMORY_REGIONS);
if (mem_regions == NULL) {
if (device->info.has_local_mem) {
- return vk_errorfi(device->instance, NULL,
+ return anv_errorfi(device->instance, NULL,
VK_ERROR_INCOMPATIBLE_DRIVER,
"failed to memory regions: %m");
}
uint64_t total_phys;
if (!os_get_total_physical_memory(&total_phys)) {
- return vk_errorfi(device->instance, NULL,
+ return anv_errorfi(device->instance, NULL,
VK_ERROR_INITIALIZATION_FAILED,
"failed to get total physical memory: %m");
}
"Failed to get I915_CONTEXT_PARAM_GTT_SIZE: %m");
if (intel_get_aperture_size(fd, &device->gtt_size) == -1) {
- return vk_errorfi(device->instance, NULL,
+ return anv_errorfi(device->instance, NULL,
VK_ERROR_INITIALIZATION_FAILED,
"failed to get aperture size: %m");
}
const struct build_id_note *note =
build_id_find_nhdr_for_addr(anv_physical_device_init_uuids);
if (!note) {
- return vk_errorfi(device->instance, NULL,
+ return anv_errorfi(device->instance, NULL,
VK_ERROR_INITIALIZATION_FAILED,
"Failed to find build-id");
}
unsigned build_id_len = build_id_length(note);
if (build_id_len < 20) {
- return vk_errorfi(device->instance, NULL,
+ return anv_errorfi(device->instance, NULL,
VK_ERROR_INITIALIZATION_FAILED,
"build-id too short. It needs to be a SHA");
}
fd = open(path, O_RDWR | O_CLOEXEC);
if (fd < 0) {
if (errno == ENOMEM) {
- return vk_errorfi(instance, NULL, VK_ERROR_OUT_OF_HOST_MEMORY,
+ return anv_errorfi(instance, NULL, VK_ERROR_OUT_OF_HOST_MEMORY,
"Unable to open device %s: out of memory", path);
}
- return vk_errorfi(instance, NULL, VK_ERROR_INCOMPATIBLE_DRIVER,
+ return anv_errorfi(instance, NULL, VK_ERROR_INCOMPATIBLE_DRIVER,
"Unable to open device %s: %m", path);
}
struct intel_device_info devinfo;
if (!intel_get_device_info_from_fd(fd, &devinfo)) {
- result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
+ result = anv_error(VK_ERROR_INCOMPATIBLE_DRIVER);
goto fail_fd;
}
} else if (devinfo.ver >= 8 && devinfo.ver <= 12) {
/* Gfx8-12 fully supported */
} else {
- result = vk_errorfi(instance, NULL, VK_ERROR_INCOMPATIBLE_DRIVER,
+ result = anv_errorfi(instance, NULL, VK_ERROR_INCOMPATIBLE_DRIVER,
"Vulkan not yet supported on %s", devinfo.name);
goto fail_fd;
}
vk_zalloc(&instance->vk.alloc, sizeof(*device), 8,
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (device == NULL) {
- result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ result = anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail_fd;
}
NULL, /* We set up extensions later */
&dispatch_table);
if (result != VK_SUCCESS) {
- vk_error(result);
+ anv_error(result);
goto fail_alloc;
}
device->instance = instance;
device->cmd_parser_version =
anv_gem_get_param(fd, I915_PARAM_CMD_PARSER_VERSION);
if (device->cmd_parser_version == -1) {
- result = vk_errorfi(device->instance, NULL,
+ result = anv_errorfi(device->instance, NULL,
VK_ERROR_INITIALIZATION_FAILED,
"failed to get command parser version");
goto fail_base;
}
if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
- result = vk_errorfi(device->instance, NULL,
+ result = anv_errorfi(device->instance, NULL,
VK_ERROR_INITIALIZATION_FAILED,
"kernel missing gem wait");
goto fail_base;
}
if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
- result = vk_errorfi(device->instance, NULL,
+ result = anv_errorfi(device->instance, NULL,
VK_ERROR_INITIALIZATION_FAILED,
"kernel missing execbuf2");
goto fail_base;
if (!device->info.has_llc &&
anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
- result = vk_errorfi(device->instance, NULL,
+ result = anv_errorfi(device->instance, NULL,
VK_ERROR_INITIALIZATION_FAILED,
"kernel missing wc mmap");
goto fail_base;
if (device->info.ver >= 8 && !device->info.is_cherryview &&
!anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_SOFTPIN)) {
- result = vk_errorfi(device->instance, NULL,
+ result = anv_errorfi(device->instance, NULL,
VK_ERROR_INITIALIZATION_FAILED,
"kernel missing softpin");
goto fail_alloc;
}
if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXEC_FENCE_ARRAY)) {
- result = vk_errorfi(device->instance, NULL,
+ result = anv_errorfi(device->instance, NULL,
VK_ERROR_INITIALIZATION_FAILED,
"kernel missing syncobj support");
goto fail_base;
device->compiler = brw_compiler_create(NULL, &device->info);
if (device->compiler == NULL) {
- result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ result = anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail_base;
}
device->compiler->shader_debug_log = compiler_debug_log;
VkExtensionProperties* pProperties)
{
if (pLayerName)
- return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
+ return anv_error(VK_ERROR_LAYER_NOT_PRESENT);
return vk_enumerate_instance_extension_properties(
&instance_extensions, pPropertyCount, pProperties);
instance = vk_alloc(pAllocator, sizeof(*instance), 8,
VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
if (!instance)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
struct vk_instance_dispatch_table dispatch_table;
vk_instance_dispatch_table_from_entrypoints(
&dispatch_table, pCreateInfo, pAllocator);
if (result != VK_SUCCESS) {
vk_free(pAllocator, instance);
- return vk_error(result);
+ return anv_error(result);
}
instance->physical_devices_enumerated = false;
assert(pCreateInfo->queueCreateInfoCount > 0);
for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
if (pCreateInfo->pQueueCreateInfos[i].flags != 0)
- return vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ return anv_error(VK_ERROR_INITIALIZATION_FAILED);
}
/* Check if client specified queue priority. */
sizeof(*device), 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!device)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
struct vk_device_dispatch_table dispatch_table;
vk_device_dispatch_table_from_entrypoints(&dispatch_table,
result = vk_device_init(&device->vk, &physical_device->vk,
&dispatch_table, pCreateInfo, pAllocator);
if (result != VK_SUCCESS) {
- vk_error(result);
+ anv_error(result);
goto fail_alloc;
}
/* XXX(chadv): Can we dup() physicalDevice->fd here? */
device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
if (device->fd == -1) {
- result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
goto fail_device;
}
device->context_id = anv_gem_create_context(device);
}
if (device->context_id == -1) {
- result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
goto fail_fd;
}
vk_zalloc(&device->vk.alloc, num_queues * sizeof(*device->queues), 8,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (device->queues == NULL) {
- result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ result = anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail_context_id;
}
if (physical_device->use_softpin) {
if (pthread_mutex_init(&device->vma_mutex, NULL) != 0) {
- result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
goto fail_queues;
}
I915_CONTEXT_PARAM_PRIORITY,
vk_priority_to_gen(priority));
if (err != 0 && priority > VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT) {
- result = vk_error(VK_ERROR_NOT_PERMITTED_EXT);
+ result = anv_error(VK_ERROR_NOT_PERMITTED_EXT);
goto fail_vmas;
}
}
device->robust_buffer_access = robust_buffer_access;
if (pthread_mutex_init(&device->mutex, NULL) != 0) {
- result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
goto fail_queues;
}
pthread_condattr_t condattr;
if (pthread_condattr_init(&condattr) != 0) {
- result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
goto fail_mutex;
}
if (pthread_condattr_setclock(&condattr, CLOCK_MONOTONIC) != 0) {
pthread_condattr_destroy(&condattr);
- result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
goto fail_mutex;
}
if (pthread_cond_init(&device->queue_submit, &condattr) != 0) {
pthread_condattr_destroy(&condattr);
- result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
goto fail_mutex;
}
pthread_condattr_destroy(&condattr);
}
/* None supported at this time */
- return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
+ return anv_error(VK_ERROR_LAYER_NOT_PRESENT);
}
void
for (uint32_t i = 0; i < device->queue_count; i++) {
struct anv_queue *queue = &device->queues[i];
if (queue->lost) {
- __vk_errorf(device->physical->instance, &device->vk.base,
+ __anv_errorf(device->physical->instance, &device->vk.base,
VK_ERROR_DEVICE_LOST,
queue->error_file, queue->error_line,
"%s", queue->error_msg);
device->lost_reported = true;
va_start(ap, msg);
- err = __vk_errorv(device->physical->instance, &device->vk.base,
+ err = __anv_errorv(device->physical->instance, &device->vk.base,
VK_ERROR_DEVICE_LOST, file, line, msg, ap);
va_end(ap);
align_u64(pAllocateInfo->allocationSize, 4096);
if (aligned_alloc_size > MAX_MEMORY_ALLOCATION_SIZE)
- return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
assert(pAllocateInfo->memoryTypeIndex < pdevice->memory.type_count);
struct anv_memory_type *mem_type =
uint64_t mem_heap_used = p_atomic_read(&mem_heap->used);
if (mem_heap_used + aligned_alloc_size > mem_heap->size)
- return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
mem = vk_object_alloc(&device->vk, pAllocator, sizeof(*mem),
VK_OBJECT_TYPE_DEVICE_MEMORY);
if (mem == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
mem->type = mem_type;
mem->map = NULL;
* this sort of attack but only if it can trust the buffer size.
*/
if (mem->bo->size < aligned_alloc_size) {
- result = vk_errorf(device, &device->vk.base,
+ result = anv_errorf(device, &device->vk.base,
VK_ERROR_INVALID_EXTERNAL_HANDLE,
"aligned allocationSize too large for "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT: "
if (host_ptr_info && host_ptr_info->handleType) {
if (host_ptr_info->handleType ==
VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT) {
- result = vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ result = anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
goto fail;
}
i915_tiling);
if (ret) {
anv_device_release_bo(device, mem->bo);
- result = vk_errorf(device, &device->vk.base,
+ result = anv_errorf(device, &device->vk.base,
VK_ERROR_OUT_OF_DEVICE_MEMORY,
"failed to set BO tiling: %m");
goto fail;
if (mem_heap_used > mem_heap->size) {
p_atomic_add(&mem_heap->used, -mem->bo->size);
anv_device_release_bo(device, mem->bo);
- result = vk_errorf(device, &device->vk.base,
+ result = anv_errorf(device, &device->vk.base,
VK_ERROR_OUT_OF_DEVICE_MEMORY,
"Out of heap memory");
goto fail;
*
* So opaque handle types fall into the default "unsupported" case.
*/
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ return anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
}
void *map = anv_gem_mmap(device, mem->bo->gem_handle,
map_offset, map_size, gem_flags);
if (map == MAP_FAILED)
- return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
+ return anv_error(VK_ERROR_MEMORY_MAP_FAILED);
mem->map = map;
mem->map_size = map_size;
if (anv_device_is_lost(queue->device))
return VK_ERROR_DEVICE_LOST;
- return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
+ return anv_error(VK_ERROR_FEATURE_NOT_PRESENT);
}
// Event functions
event = vk_object_alloc(&device->vk, pAllocator, sizeof(*event),
VK_OBJECT_TYPE_EVENT);
if (event == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
event->state = anv_state_pool_alloc(&device->dynamic_state_pool,
sizeof(uint64_t), 8);
* allocating a buffer larger than our GTT size.
*/
if (pCreateInfo->size > device->physical->gtt_size)
- return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
buffer = vk_object_alloc(&device->vk, pAllocator, sizeof(*buffer),
VK_OBJECT_TYPE_BUFFER);
if (buffer == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
buffer->create_flags = pCreateInfo->flags;
buffer->size = pCreateInfo->size;
framebuffer = vk_object_alloc(&device->vk, pAllocator, size,
VK_OBJECT_TYPE_FRAMEBUFFER);
if (framebuffer == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
framebuffer->width = pCreateInfo->width;
framebuffer->height = pCreateInfo->height;
* non-mipmapped single-sample) 2D images.
*/
if (info->type != VK_IMAGE_TYPE_2D) {
- vk_errorfi(instance, &physical_device->vk.base,
+ anv_errorfi(instance, &physical_device->vk.base,
VK_ERROR_FORMAT_NOT_SUPPORTED,
"VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT "
"requires VK_IMAGE_TYPE_2D");
* and therefore requires explicit memory layout.
*/
if (!tiling_has_explicit_layout) {
- result = vk_errorfi(instance, &physical_device->vk.base,
+ result = anv_errorfi(instance, &physical_device->vk.base,
VK_ERROR_FORMAT_NOT_SUPPORTED,
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT "
"requires VK_IMAGE_TILING_LINEAR or "
* and therefore requires explicit memory layout.
*/
if (!tiling_has_explicit_layout) {
- result = vk_errorfi(instance, &physical_device->vk.base,
+ result = anv_errorfi(instance, &physical_device->vk.base,
VK_ERROR_FORMAT_NOT_SUPPORTED,
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT "
"requires VK_IMAGE_TILING_LINEAR or "
* vkGetPhysicalDeviceImageFormatProperties2 returns
* VK_ERROR_FORMAT_NOT_SUPPORTED.
*/
- result = vk_errorfi(instance, &physical_device->vk.base,
+ result = anv_errorfi(instance, &physical_device->vk.base,
VK_ERROR_FORMAT_NOT_SUPPORTED,
"unsupported VkExternalMemoryTypeFlagBits 0x%x",
external_info->handleType);
conversion = vk_object_zalloc(&device->vk, pAllocator, sizeof(*conversion),
VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION);
if (!conversion)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
conversion->format = anv_get_format(pCreateInfo->format);
conversion->ycbcr_model = pCreateInfo->ycbcrModel;
* VkImageDrmFormatModifierExplicitCreateInfoEXT.
*/
if (unlikely(!anv_is_aligned(offset, alignment))) {
- return vk_errorf(device, &device->vk.base,
+ return anv_errorf(device, &device->vk.base,
VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT,
"VkImageDrmFormatModifierExplicitCreateInfoEXT::"
"pPlaneLayouts[]::offset is misaligned");
* VkImageDrmFormatModifierExplicitCreateInfoEXT,
*/
if (unlikely(offset < container->size)) {
- return vk_errorf(device, &device->vk.base,
+ return anv_errorf(device, &device->vk.base,
VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT,
"VkImageDrmFormatModifierExplicitCreateInfoEXT::"
"pPlaneLayouts[]::offset is too small");
if (__builtin_add_overflow(offset, size, &container->size)) {
if (has_implicit_offset) {
assert(!"overflow");
- return vk_errorf(device, &device->vk.base,
+ return anv_errorf(device, &device->vk.base,
VK_ERROR_UNKNOWN,
"internal error: overflow in %s", __func__);
} else {
- return vk_errorf(device, &device->vk.base,
+ return anv_errorf(device, &device->vk.base,
VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT,
"VkImageDrmFormatModifierExplicitCreateInfoEXT::"
"pPlaneLayouts[]::offset is too large");
* usage, then we may enable a private aux surface.
*/
if (plane->aux_usage != isl_mod_info->aux_usage) {
- return vk_errorf(device, &image->vk.base, VK_ERROR_UNKNOWN,
+ return anv_errorf(device, &image->vk.base, VK_ERROR_UNKNOWN,
"image with modifier unexpectedly has wrong aux "
"usage");
}
/* Reject special values in the app-provided plane layouts. */
for (uint32_t i = 0; i < mod_plane_count; ++i) {
if (drm_info->pPlaneLayouts[i].rowPitch == 0) {
- return vk_errorf(device, &device->vk.base,
+ return anv_errorf(device, &device->vk.base,
VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT,
"VkImageDrmFormatModifierExplicitCreateInfoEXT::"
"pPlaneLayouts[%u]::rowPitch is 0", i);
}
if (drm_info->pPlaneLayouts[i].offset == ANV_OFFSET_IMPLICIT) {
- return vk_errorf(device, &device->vk.base,
+ return anv_errorf(device, &device->vk.base,
VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT,
"VkImageDrmFormatModifierExplicitCreateInfoEXT::"
"pPlaneLayouts[%u]::offset is %" PRIu64,
vk_object_zalloc(&device->vk, pAllocator, sizeof(*image),
VK_OBJECT_TYPE_IMAGE);
if (!image)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
VkResult result = anv_image_init_from_create_info(device, image,
pCreateInfo);
iview = vk_image_view_create(&device->vk, pCreateInfo,
pAllocator, sizeof(*iview));
if (iview == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
iview->image = image;
iview->n_planes = anv_image_aspect_get_planes(iview->vk.aspects);
view = vk_object_alloc(&device->vk, pAllocator, sizeof(*view),
VK_OBJECT_TYPE_BUFFER_VIEW);
if (!view)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
/* TODO: Handle the format swizzle? */
if (!vk_object_multizalloc(&device->vk, &ma, pAllocator,
VK_OBJECT_TYPE_RENDER_PASS))
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
/* Clear the subpasses along with the parent pass. This required because
* each array member of anv_subpass must be a valid pointer if not NULL.
config = vk_object_alloc(&device->vk, NULL, sizeof(*config),
VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL);
if (!config)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
if (!(INTEL_DEBUG & DEBUG_NO_OACONFIG)) {
config->register_config =
pipeline_ctx,
&stages[s]);
if (stages[s].nir == NULL) {
- result = vk_error(VK_ERROR_UNKNOWN);
+ result = anv_error(VK_ERROR_UNKNOWN);
goto fail;
}
}
if (stages[s].code == NULL) {
ralloc_free(stage_ctx);
- result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ result = anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail;
}
xfb_info, &stages[s].bind_map);
if (!bin) {
ralloc_free(stage_ctx);
- result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ result = anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
goto fail;
}
stage.nir = anv_pipeline_stage_get_nir(&pipeline->base, cache, mem_ctx, &stage);
if (stage.nir == NULL) {
ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_UNKNOWN);
+ return anv_error(VK_ERROR_UNKNOWN);
}
NIR_PASS_V(stage.nir, anv_nir_add_base_work_group_id);
stage.code = brw_compile_cs(compiler, mem_ctx, ¶ms);
if (stage.code == NULL) {
ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
anv_nir_validate_push_layout(&stage.prog_data.base, &stage.bind_map);
NULL, &stage.bind_map);
if (!bin) {
ralloc_free(mem_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
stage.feedback.duration = os_time_get_nano() - stage_start;
&stage->key.bs, &stage->prog_data.bs, nir,
num_resume_shaders, resume_shaders, stage->stats, NULL);
if (stage->code == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
/* Ray-tracing shaders don't have a "real" bind map */
struct anv_pipeline_bind_map empty_bind_map = {};
stage->stats, 1,
NULL, &empty_bind_map);
if (bin == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
/* TODO: Figure out executables for resume shaders */
anv_pipeline_add_executables(&pipeline->base, stage, bin);
pipeline_ctx, &stages[i]);
if (stages[i].nir == NULL) {
ralloc_free(pipeline_ctx);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
anv_pipeline_lower_nir(&pipeline->base, pipeline_ctx, &stages[i], layout);
ralloc_free(tmp_ctx);
if (device->rt_trampoline == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
struct brw_rt_trivial_return {
if (device->rt_trivial_return == NULL) {
anv_shader_bin_unref(device, device->rt_trampoline);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
}
{
ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
if (pipeline->type != ANV_PIPELINE_RAY_TRACING)
- return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
+ return anv_error(VK_ERROR_FEATURE_NOT_PRESENT);
struct anv_ray_tracing_pipeline *rt_pipeline =
anv_pipeline_to_ray_tracing(pipeline);
void* pData)
{
unreachable("Unimplemented");
- return vk_error(VK_ERROR_FEATURE_NOT_PRESENT);
+ return anv_error(VK_ERROR_FEATURE_NOT_PRESENT);
}
VkDeviceSize
sizeof(*cache), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (cache == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
anv_pipeline_cache_init(cache, device,
device->physical->instance->pipeline_cache_enabled,
* propagating errors. Might be useful to plug in a stack trace here.
*/
-VkResult __vk_errorv(struct anv_instance *instance,
+VkResult __anv_errorv(struct anv_instance *instance,
const struct vk_object_base *object, VkResult error,
const char *file, int line, const char *format,
va_list args);
-VkResult __vk_errorf(struct anv_instance *instance,
+VkResult __anv_errorf(struct anv_instance *instance,
const struct vk_object_base *object, VkResult error,
const char *file, int line, const char *format, ...)
anv_printflike(6, 7);
#ifdef DEBUG
-#define vk_error(error) __vk_errorf(NULL, NULL, error, __FILE__, __LINE__, NULL)
-#define vk_errorfi(instance, obj, error, format, ...)\
- __vk_errorf(instance, obj, error,\
+#define anv_error(error) __anv_errorf(NULL, NULL, error, __FILE__, __LINE__, NULL)
+#define anv_errorfi(instance, obj, error, format, ...)\
+ __anv_errorf(instance, obj, error,\
__FILE__, __LINE__, format, ## __VA_ARGS__)
-#define vk_errorf(device, obj, error, format, ...)\
- vk_errorfi(anv_device_instance_or_null(device),\
+#define anv_errorf(device, obj, error, format, ...)\
+ anv_errorfi(anv_device_instance_or_null(device),\
obj, error, format, ## __VA_ARGS__)
#else
-static inline VkResult __dummy_vk_error(VkResult error, UNUSED const void *ignored)
+static inline VkResult __dummy_anv_error(VkResult error, UNUSED const void *ignored)
{
return error;
}
-#define vk_error(error) __dummy_vk_error(error, NULL)
-#define vk_errorfi(instance, obj, error, format, ...) __dummy_vk_error(error, instance)
-#define vk_errorf(device, obj, error, format, ...) __dummy_vk_error(error, device)
+#define anv_error(error) __dummy_anv_error(error, NULL)
+#define anv_errorfi(instance, obj, error, format, ...) __dummy_anv_error(error, instance)
+#define anv_errorf(device, obj, error, format, ...) __dummy_anv_error(error, device)
#endif
/**
vk_zalloc(&device->vk.alloc, sizeof(**point),
8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
if (!(*point))
- result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ result = anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
if (result == VK_SUCCESS) {
result = anv_device_alloc_bo(device, "timeline-semaphore", 4096,
ANV_BO_ALLOC_EXTERNAL |
*/
if (device->has_thread_submit) {
if (pthread_mutex_init(&queue->mutex, NULL) != 0) {
- result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
goto fail_queue;
}
if (pthread_cond_init(&queue->cond, NULL) != 0) {
- result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
goto fail_mutex;
}
if (pthread_create(&queue->thread, NULL, anv_queue_task, queue)) {
- result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ result = anv_error(VK_ERROR_INITIALIZATION_FAILED);
goto fail_cond;
}
}
submit->fence_bos, new_len * sizeof(*submit->fence_bos),
8, submit->alloc_scope);
if (new_fence_bos == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
submit->fence_bos = new_fence_bos;
submit->fence_bo_array_length = new_len;
new_len * sizeof(*submit->wait_timeline_syncobjs),
8, submit->alloc_scope);
if (new_wait_timeline_syncobjs == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
submit->wait_timeline_syncobjs = new_wait_timeline_syncobjs;
submit->wait_timeline_values, new_len * sizeof(*submit->wait_timeline_values),
8, submit->alloc_scope);
if (new_wait_timeline_values == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
submit->wait_timeline_values = new_wait_timeline_values;
submit->wait_timeline_array_length = new_len;
submit->fences, new_len * sizeof(*submit->fences),
8, submit->alloc_scope);
if (new_fences == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
submit->fences = new_fences;
submit->fence_values, new_len * sizeof(*submit->fence_values),
8, submit->alloc_scope);
if (new_fence_values == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
submit->fence_values = new_fence_values;
submit->fence_array_length = new_len;
submit->wait_timelines, new_len * sizeof(*submit->wait_timelines),
8, submit->alloc_scope);
if (new_wait_timelines == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
submit->wait_timelines = new_wait_timelines;
submit->wait_timeline_values, new_len * sizeof(*submit->wait_timeline_values),
8, submit->alloc_scope);
if (new_wait_timeline_values == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
submit->wait_timeline_values = new_wait_timeline_values;
submit->signal_timelines, new_len * sizeof(*submit->signal_timelines),
8, submit->alloc_scope);
if (new_signal_timelines == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
submit->signal_timelines = new_signal_timelines;
submit->signal_timeline_values, new_len * sizeof(*submit->signal_timeline_values),
8, submit->alloc_scope);
if (new_signal_timeline_values == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
submit->signal_timeline_values = new_signal_timeline_values;
struct anv_device *device = queue->device;
struct anv_queue_submit *submit = anv_queue_submit_alloc(device);
if (!submit)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
bool has_syncobj_wait = device->physical->has_syncobj_wait;
VkResult result;
if (has_syncobj_wait) {
syncobj = anv_gem_syncobj_create(device, 0);
if (!syncobj) {
- result = vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ result = anv_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
goto err_free_submit;
}
new_len * sizeof(*submit->temporary_semaphores),
8, submit->alloc_scope);
if (new_array == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
submit->temporary_semaphores = new_array;
submit->temporary_semaphore_array_length = new_len;
submit->cmd_buffers, new_len * sizeof(*submit->cmd_buffers),
8, submit->alloc_scope);
if (new_cmd_buffers == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
submit->cmd_buffers = new_cmd_buffers;
submit->cmd_buffer_array_length = new_len;
*submit = anv_queue_submit_alloc(queue->device);
if (!*submit)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
return VK_SUCCESS;
}
struct anv_queue_submit *submit = anv_queue_submit_alloc(device);
if (!submit)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
for (uint32_t i = 0; i < submitCount; i++) {
const struct wsi_memory_signal_submit_info *mem_signal_info =
fence = vk_object_zalloc(&device->vk, pAllocator, sizeof(*fence),
VK_OBJECT_TYPE_FENCE);
if (fence == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
if (device->physical->has_syncobj_wait) {
fence->permanent.type = ANV_FENCE_TYPE_SYNCOBJ;
fence->permanent.syncobj = anv_gem_syncobj_create(device, create_flags);
if (!fence->permanent.syncobj)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
} else {
fence->permanent.type = ANV_FENCE_TYPE_BO;
sizeof(*syncobjs) * fenceCount, 8,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND);
if (!syncobjs)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
for (uint32_t i = 0; i < fenceCount; i++) {
ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
if (!new_impl.syncobj)
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ return anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
break;
new_impl.syncobj = anv_gem_syncobj_create(device, create_flags);
if (!new_impl.syncobj)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
if (fd != -1 &&
anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
anv_gem_syncobj_destroy(device, new_impl.syncobj);
- return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return anv_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"syncobj sync file import failed: %m");
}
break;
}
default:
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ return anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
/* From the Vulkan 1.0.53 spec:
case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT: {
int fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
if (fd < 0)
- return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
+ return anv_error(VK_ERROR_TOO_MANY_OBJECTS);
*pFd = fd;
break;
int fd = anv_gem_syncobj_export_sync_file(device, impl->syncobj);
if (fd < 0)
- return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
+ return anv_error(VK_ERROR_TOO_MANY_OBJECTS);
*pFd = fd;
break;
impl->type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ;
impl->syncobj = anv_gem_syncobj_create(device, 0);
if (!impl->syncobj)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
return VK_SUCCESS;
}
impl->type = ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ_TIMELINE;
impl->syncobj = anv_gem_syncobj_create(device, 0);
if (!impl->syncobj)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
if (initial_value) {
if (anv_gem_syncobj_timeline_signal(device,
&impl->syncobj,
&initial_value, 1)) {
anv_gem_syncobj_destroy(device, impl->syncobj);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
}
} else {
semaphore = vk_object_alloc(&device->vk, NULL, sizeof(*semaphore),
VK_OBJECT_TYPE_SEMAPHORE);
if (semaphore == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
const VkExportSemaphoreCreateInfo *export =
vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO);
semaphore->permanent.syncobj = anv_gem_syncobj_create(device, 0);
if (!semaphore->permanent.syncobj) {
vk_object_free(&device->vk, pAllocator, semaphore);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
} else {
assert(!"Unknown handle type");
vk_object_free(&device->vk, pAllocator, semaphore);
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ return anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
semaphore->temporary.type = ANV_SEMAPHORE_TYPE_NONE;
new_impl.syncobj = anv_gem_syncobj_fd_to_handle(device, fd);
if (!new_impl.syncobj)
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ return anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
/* From the Vulkan spec:
*
};
if (!new_impl.syncobj)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
if (fd != -1) {
if (anv_gem_syncobj_import_sync_file(device, new_impl.syncobj, fd)) {
anv_gem_syncobj_destroy(device, new_impl.syncobj);
- return vk_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
+ return anv_errorf(device, NULL, VK_ERROR_INVALID_EXTERNAL_HANDLE,
"syncobj sync file import failed: %m");
}
/* Ownership of the FD is transfered to Anv. Since we don't need it
}
default:
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ return anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT) {
fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
}
if (fd < 0)
- return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
+ return anv_error(VK_ERROR_TOO_MANY_OBJECTS);
*pFd = fd;
break;
assert(pGetFdInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT);
fd = anv_gem_syncobj_handle_to_fd(device, impl->syncobj);
if (fd < 0)
- return vk_error(VK_ERROR_TOO_MANY_OBJECTS);
+ return anv_error(VK_ERROR_TOO_MANY_OBJECTS);
*pFd = fd;
break;
default:
- return vk_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
+ return anv_error(VK_ERROR_INVALID_EXTERNAL_HANDLE);
}
/* From the Vulkan 1.0.53 spec:
if (!vk_multialloc_alloc(&ma, &device->vk.alloc,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND))
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
uint32_t handle_count = 0;
for (uint32_t i = 0; i < pWaitInfo->semaphoreCount; i++) {
}
VkResult
-__vk_errorv(struct anv_instance *instance,
+__anv_errorv(struct anv_instance *instance,
const struct vk_object_base *object, VkResult error,
const char *file, int line, const char *format, va_list ap)
{
}
VkResult
-__vk_errorf(struct anv_instance *instance,
+__anv_errorf(struct anv_instance *instance,
const struct vk_object_base *object, VkResult error,
const char *file, int line, const char *format, ...)
{
va_list ap;
va_start(ap, format);
- __vk_errorv(instance, object, error, file, line, format, ap);
+ __anv_errorv(instance, object, error, file, line, format, ap);
va_end(ap);
return error;
if (!vk_multialloc_alloc(&ma, &device->vk.alloc,
VK_SYSTEM_ALLOCATION_SCOPE_COMMAND))
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
uint32_t wait_count = 0;
for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; i++) {
vk_free(&device->vk.alloc, values);
if (ret)
- return vk_error(VK_ERROR_DEVICE_LOST);
+ return anv_error(VK_ERROR_DEVICE_LOST);
}
VkResult result = wsi_common_queue_present(&device->physical->wsi_device,
fence = vk_zalloc2(&device->vk.alloc, allocator, sizeof (*fence), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!fence)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
fence->permanent.type = ANV_FENCE_TYPE_WSI;
const char *function)
{
if (device->physical->cmd_parser_version < required_version) {
- return vk_errorf(device, &device->physical->vk.base,
+ return anv_errorf(device, &device->physical->vk.base,
VK_ERROR_FEATURE_NOT_PRESENT,
"cmd parser version %d is required for %s",
required_version, function);
pipeline = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*pipeline), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pipeline == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
result = anv_graphics_pipeline_init(pipeline, device, cache,
pCreateInfo, pAllocator);
pipeline = vk_zalloc2(&device->vk.alloc, pAllocator, sizeof(*pipeline), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pipeline == NULL)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
result = anv_pipeline_init(&pipeline->base, device,
ANV_PIPELINE_COMPUTE, pCreateInfo->flags,
VK_MULTIALLOC_DECL(&ma, struct anv_rt_shader_group, groups, pCreateInfo->groupCount);
if (!vk_multialloc_zalloc2(&ma, &device->vk.alloc, pAllocator,
VK_SYSTEM_ALLOCATION_SCOPE_DEVICE))
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
result = anv_pipeline_init(&pipeline->base, device,
ANV_PIPELINE_RAY_TRACING, pCreateInfo->flags,
if (!vk_object_multialloc(&device->vk, &ma, pAllocator,
VK_OBJECT_TYPE_QUERY_POOL))
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
pool->type = pCreateInfo->queryType;
pool->pipeline_statistics = pipeline_statistics;
res = init_render_queue_state(queue);
break;
default:
- res = vk_error(VK_ERROR_INITIALIZATION_FAILED);
+ res = anv_error(VK_ERROR_INITIALIZATION_FAILED);
break;
}
if (res != VK_SUCCESS)
sampler = vk_object_zalloc(&device->vk, pAllocator, sizeof(*sampler),
VK_OBJECT_TYPE_SAMPLER);
if (!sampler)
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ return anv_error(VK_ERROR_OUT_OF_HOST_MEMORY);
sampler->n_planes = 1;