return VK_ERROR_INVALID_EXTERNAL_HANDLE;
uint64_t alloc_size = 0;
- mem->bo = device->ws->buffer_from_fd(device->ws, dma_buf, priority, &alloc_size);
- if (!mem->bo)
- return VK_ERROR_OUT_OF_HOST_MEMORY;
+ VkResult result =
+ device->ws->buffer_from_fd(device->ws, dma_buf, priority, &mem->bo, &alloc_size);
+ if (result != VK_SUCCESS)
+ return result;
if (mem->image) {
struct radeon_bo_metadata metadata;
radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer, uint64_t min_needed)
{
uint64_t new_size;
- struct radeon_winsys_bo *bo;
+ struct radeon_winsys_bo *bo = NULL;
struct radv_cmd_buffer_upload *upload;
struct radv_device *device = cmd_buffer->device;
new_size = MAX2(min_needed, 16 * 1024);
new_size = MAX2(new_size, 2 * cmd_buffer->upload.size);
- bo = device->ws->buffer_create(device->ws, new_size, 4096, device->ws->cs_domain(device->ws),
- RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING |
- RADEON_FLAG_32BIT | RADEON_FLAG_GTT_WC,
- RADV_BO_PRIORITY_UPLOAD_BUFFER);
+ VkResult result =
+ device->ws->buffer_create(device->ws, new_size, 4096, device->ws->cs_domain(device->ws),
+ RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING |
+ RADEON_FLAG_32BIT | RADEON_FLAG_GTT_WC,
+ RADV_BO_PRIORITY_UPLOAD_BUFFER, &bo);
- if (!bo) {
- cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ if (result != VK_SUCCESS) {
+ cmd_buffer->record_result = result;
return false;
}
struct radeon_winsys *ws = device->ws;
VkResult result;
- device->trace_bo = ws->buffer_create(
+ result = ws->buffer_create(
ws, TRACE_BO_SIZE, 8, RADEON_DOMAIN_VRAM,
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_ZERO_VRAM,
- RADV_BO_PRIORITY_UPLOAD_BUFFER);
- if (!device->trace_bo)
+ RADV_BO_PRIORITY_UPLOAD_BUFFER, &device->trace_bo);
+ if (result != VK_SUCCESS)
return false;
result = ws->buffer_make_resident(ws, device->trace_bo, true);
if (result != VK_SUCCESS)
return false;
- device->tma_bo = ws->buffer_create(ws, TMA_BO_SIZE, 256, RADEON_DOMAIN_VRAM,
- RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING |
- RADEON_FLAG_ZERO_VRAM | RADEON_FLAG_32BIT,
- RADV_BO_PRIORITY_SCRATCH);
- if (!device->tma_bo)
+ result = ws->buffer_create(ws, TMA_BO_SIZE, 256, RADEON_DOMAIN_VRAM,
+ RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING |
+ RADEON_FLAG_ZERO_VRAM | RADEON_FLAG_32BIT,
+ RADV_BO_PRIORITY_SCRATCH, &device->tma_bo);
+ if (result != VK_SUCCESS)
return false;
result = ws->buffer_make_resident(ws, device->tma_bo, true);
if (bo_size) {
if (!(pCreateInfo->flags & VK_DESCRIPTOR_POOL_CREATE_HOST_ONLY_BIT_VALVE)) {
- pool->bo = device->ws->buffer_create(
+ VkResult result = device->ws->buffer_create(
device->ws, bo_size, 32, RADEON_DOMAIN_VRAM,
RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT,
- RADV_BO_PRIORITY_DESCRIPTOR);
- if (!pool->bo) {
+ RADV_BO_PRIORITY_DESCRIPTOR, &pool->bo);
+ if (result != VK_SUCCESS) {
radv_destroy_descriptor_pool(device, pAllocator, pool);
- return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return vk_error(device->instance, result);
}
pool->mapped_ptr = (uint8_t *)device->ws->buffer_map(pool->bo);
if (!pool->mapped_ptr) {
{
VkResult result;
- device->border_color_data.bo = device->ws->buffer_create(
+ result = device->ws->buffer_create(
device->ws, RADV_BORDER_COLOR_BUFFER_SIZE, 4096, RADEON_DOMAIN_VRAM,
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_READ_ONLY | RADEON_FLAG_NO_INTERPROCESS_SHARING,
- RADV_BO_PRIORITY_SHADER);
+ RADV_BO_PRIORITY_SHADER, &device->border_color_data.bo);
- if (device->border_color_data.bo == NULL)
- return vk_error(device->physical_device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ if (result != VK_SUCCESS)
+ return vk_error(device->physical_device->instance, result);
result = device->ws->buffer_make_resident(device->ws, device->border_color_data.bo, true);
if (result != VK_SUCCESS)
unsigned hs_offchip_param = 0;
unsigned tess_offchip_ring_offset;
uint32_t ring_bo_flags = RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING;
+ VkResult result = VK_SUCCESS;
if (!queue->has_tess_rings) {
if (needs_tess_rings)
add_tess_rings = true;
uint32_t scratch_size = scratch_size_per_wave * scratch_waves;
uint32_t queue_scratch_size = queue->scratch_size_per_wave * queue->scratch_waves;
if (scratch_size > queue_scratch_size) {
- scratch_bo =
+ result =
queue->device->ws->buffer_create(queue->device->ws, scratch_size, 4096, RADEON_DOMAIN_VRAM,
- ring_bo_flags, RADV_BO_PRIORITY_SCRATCH);
- if (!scratch_bo)
+ ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, &scratch_bo);
+ if (result != VK_SUCCESS)
goto fail;
} else
scratch_bo = queue->scratch_bo;
uint32_t compute_queue_scratch_size =
queue->compute_scratch_size_per_wave * queue->compute_scratch_waves;
if (compute_scratch_size > compute_queue_scratch_size) {
- compute_scratch_bo = queue->device->ws->buffer_create(queue->device->ws, compute_scratch_size,
- 4096, RADEON_DOMAIN_VRAM, ring_bo_flags,
- RADV_BO_PRIORITY_SCRATCH);
- if (!compute_scratch_bo)
+ result = queue->device->ws->buffer_create(queue->device->ws, compute_scratch_size, 4096,
+ RADEON_DOMAIN_VRAM, ring_bo_flags,
+ RADV_BO_PRIORITY_SCRATCH, &compute_scratch_bo);
+ if (result != VK_SUCCESS)
goto fail;
} else
compute_scratch_bo = queue->compute_scratch_bo;
if (esgs_ring_size > queue->esgs_ring_size) {
- esgs_ring_bo = queue->device->ws->buffer_create(queue->device->ws, esgs_ring_size, 4096,
- RADEON_DOMAIN_VRAM, ring_bo_flags,
- RADV_BO_PRIORITY_SCRATCH);
- if (!esgs_ring_bo)
+ result = queue->device->ws->buffer_create(queue->device->ws, esgs_ring_size, 4096,
+ RADEON_DOMAIN_VRAM, ring_bo_flags,
+ RADV_BO_PRIORITY_SCRATCH, &esgs_ring_bo);
+ if (result != VK_SUCCESS)
goto fail;
} else {
esgs_ring_bo = queue->esgs_ring_bo;
}
if (gsvs_ring_size > queue->gsvs_ring_size) {
- gsvs_ring_bo = queue->device->ws->buffer_create(queue->device->ws, gsvs_ring_size, 4096,
- RADEON_DOMAIN_VRAM, ring_bo_flags,
- RADV_BO_PRIORITY_SCRATCH);
- if (!gsvs_ring_bo)
+ result = queue->device->ws->buffer_create(queue->device->ws, gsvs_ring_size, 4096,
+ RADEON_DOMAIN_VRAM, ring_bo_flags,
+ RADV_BO_PRIORITY_SCRATCH, &gsvs_ring_bo);
+ if (result != VK_SUCCESS)
goto fail;
} else {
gsvs_ring_bo = queue->gsvs_ring_bo;
}
if (add_tess_rings) {
- tess_rings_bo = queue->device->ws->buffer_create(
+ result = queue->device->ws->buffer_create(
queue->device->ws, tess_offchip_ring_offset + tess_offchip_ring_size, 256,
- RADEON_DOMAIN_VRAM, ring_bo_flags, RADV_BO_PRIORITY_SCRATCH);
- if (!tess_rings_bo)
+ RADEON_DOMAIN_VRAM, ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, &tess_rings_bo);
+ if (result != VK_SUCCESS)
goto fail;
} else {
tess_rings_bo = queue->tess_rings_bo;
/* 4 streamout GDS counters.
* We need 256B (64 dw) of GDS, otherwise streamout hangs.
*/
- gds_bo = queue->device->ws->buffer_create(queue->device->ws, 256, 4, RADEON_DOMAIN_GDS,
- ring_bo_flags, RADV_BO_PRIORITY_SCRATCH);
- if (!gds_bo)
+ result = queue->device->ws->buffer_create(queue->device->ws, 256, 4, RADEON_DOMAIN_GDS,
+ ring_bo_flags, RADV_BO_PRIORITY_SCRATCH, &gds_bo);
+ if (result != VK_SUCCESS)
goto fail;
} else {
gds_bo = queue->gds_bo;
if (add_gds_oa) {
assert(queue->device->physical_device->rad_info.chip_class >= GFX10);
- gds_oa_bo = queue->device->ws->buffer_create(queue->device->ws, 4, 1, RADEON_DOMAIN_OA,
- ring_bo_flags, RADV_BO_PRIORITY_SCRATCH);
- if (!gds_oa_bo)
+ result =
+ queue->device->ws->buffer_create(queue->device->ws, 4, 1, RADEON_DOMAIN_OA, ring_bo_flags,
+ RADV_BO_PRIORITY_SCRATCH, &gds_oa_bo);
+ if (result != VK_SUCCESS)
goto fail;
} else {
gds_oa_bo = queue->gds_oa_bo;
} else if (scratch_bo)
size = 8; /* 2 dword */
- descriptor_bo = queue->device->ws->buffer_create(
+ result = queue->device->ws->buffer_create(
queue->device->ws, size, 4096, RADEON_DOMAIN_VRAM,
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY,
- RADV_BO_PRIORITY_DESCRIPTOR);
- if (!descriptor_bo)
+ RADV_BO_PRIORITY_DESCRIPTOR, &descriptor_bo);
+ if (result != VK_SUCCESS)
goto fail;
} else
descriptor_bo = queue->descriptor_bo;
struct radeon_cmdbuf *cs = NULL;
cs = queue->device->ws->cs_create(queue->device->ws,
queue->queue_family_index ? RING_COMPUTE : RING_GFX);
- if (!cs)
+ if (!cs) {
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto fail;
+ }
dest_cs[i] = cs;
&sqtt_flush_bits, 0);
}
- if (queue->device->ws->cs_finalize(cs) != VK_SUCCESS)
+ result = queue->device->ws->cs_finalize(cs);
+ if (result != VK_SUCCESS)
goto fail;
}
if (gds_oa_bo && gds_oa_bo != queue->gds_oa_bo)
queue->device->ws->buffer_destroy(queue->device->ws, gds_oa_bo);
- return vk_error(queue->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return vk_error(queue->device->instance, result);
}
static VkResult
} else if (import_info) {
assert(import_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT ||
import_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
- mem->bo = device->ws->buffer_from_fd(device->ws, import_info->fd, priority, NULL);
- if (!mem->bo) {
- result = VK_ERROR_INVALID_EXTERNAL_HANDLE;
+ result = device->ws->buffer_from_fd(device->ws, import_info->fd, priority, &mem->bo, NULL);
+ if (result != VK_SUCCESS) {
goto fail;
} else {
close(import_info->fd);
}
} else if (host_ptr_info) {
assert(host_ptr_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT);
- mem->bo = device->ws->buffer_from_ptr(device->ws, host_ptr_info->pHostPointer,
- pAllocateInfo->allocationSize, priority);
- if (!mem->bo) {
- result = VK_ERROR_INVALID_EXTERNAL_HANDLE;
+ result = device->ws->buffer_from_ptr(device->ws, host_ptr_info->pHostPointer,
+ pAllocateInfo->allocationSize, priority, &mem->bo);
+ if (result != VK_SUCCESS) {
goto fail;
} else {
mem->user_ptr = host_ptr_info->pHostPointer;
mtx_unlock(&device->overallocation_mutex);
}
- mem->bo = device->ws->buffer_create(device->ws, alloc_size,
- device->physical_device->rad_info.max_alignment, domain,
- flags, priority);
+ result = device->ws->buffer_create(device->ws, alloc_size,
+ device->physical_device->rad_info.max_alignment, domain,
+ flags, priority, &mem->bo);
- if (!mem->bo) {
+ if (result != VK_SUCCESS) {
if (device->overallocation_disallowed) {
mtx_lock(&device->overallocation_mutex);
device->allocated_memory_size[heap_index] -= alloc_size;
mtx_unlock(&device->overallocation_mutex);
}
- result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
goto fail;
}
vk_object_base_init(&device->vk, &event->base, VK_OBJECT_TYPE_EVENT);
- event->bo = device->ws->buffer_create(
+ VkResult result = device->ws->buffer_create(
device->ws, 8, 8, RADEON_DOMAIN_GTT,
RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING,
- RADV_BO_PRIORITY_FENCE);
- if (!event->bo) {
+ RADV_BO_PRIORITY_FENCE, &event->bo);
+ if (result != VK_SUCCESS) {
radv_destroy_event(device, pAllocator, event);
- return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return vk_error(device->instance, result);
}
event->map = (uint64_t *)device->ws->buffer_map(event->bo);
vk_find_struct_const(pCreateInfo->pNext, EXTERNAL_MEMORY_BUFFER_CREATE_INFO) != NULL;
if (pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
- buffer->bo = device->ws->buffer_create(device->ws, align64(buffer->size, 4096), 4096, 0,
- RADEON_FLAG_VIRTUAL, RADV_BO_PRIORITY_VIRTUAL);
- if (!buffer->bo) {
+ VkResult result =
+ device->ws->buffer_create(device->ws, align64(buffer->size, 4096), 4096, 0,
+ RADEON_FLAG_VIRTUAL, RADV_BO_PRIORITY_VIRTUAL, &buffer->bo);
+ if (result != VK_SUCCESS) {
radv_destroy_buffer(device, pAllocator, buffer);
- return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return vk_error(device->instance, result);
}
}
image->size = align64(image->size, image->alignment);
image->offset = 0;
- image->bo = device->ws->buffer_create(device->ws, image->size, image->alignment, 0,
- RADEON_FLAG_VIRTUAL, RADV_BO_PRIORITY_VIRTUAL);
- if (!image->bo) {
+ result = device->ws->buffer_create(device->ws, image->size, image->alignment, 0,
+ RADEON_FLAG_VIRTUAL, RADV_BO_PRIORITY_VIRTUAL, &image->bo);
+ if (result != VK_SUCCESS) {
radv_destroy_image(device, alloc, image);
- return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return vk_error(device->instance, result);
}
}
image->l2_coherent = radv_image_is_l2_coherent(device, image);
if (pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS)
pool->size += 4 * pCreateInfo->queryCount;
- pool->bo =
- device->ws->buffer_create(device->ws, pool->size, 64, RADEON_DOMAIN_GTT,
- RADEON_FLAG_NO_INTERPROCESS_SHARING, RADV_BO_PRIORITY_QUERY_POOL);
- if (!pool->bo) {
+ VkResult result = device->ws->buffer_create(device->ws, pool->size, 64, RADEON_DOMAIN_GTT,
+ RADEON_FLAG_NO_INTERPROCESS_SHARING,
+ RADV_BO_PRIORITY_QUERY_POOL, &pool->bo);
+ if (result != VK_SUCCESS) {
radv_destroy_query_pool(device, pAllocator, pool);
- return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
+ return vk_error(device->instance, result);
}
pool->ptr = device->ws->buffer_map(pool->bo);
const char *(*get_chip_name)(struct radeon_winsys *ws);
- struct radeon_winsys_bo *(*buffer_create)(struct radeon_winsys *ws, uint64_t size,
- unsigned alignment, enum radeon_bo_domain domain,
- enum radeon_bo_flag flags, unsigned priority);
+ VkResult (*buffer_create)(struct radeon_winsys *ws, uint64_t size, unsigned alignment,
+ enum radeon_bo_domain domain, enum radeon_bo_flag flags,
+ unsigned priority, struct radeon_winsys_bo **out_bo);
void (*buffer_destroy)(struct radeon_winsys *ws, struct radeon_winsys_bo *bo);
void *(*buffer_map)(struct radeon_winsys_bo *bo);
- struct radeon_winsys_bo *(*buffer_from_ptr)(struct radeon_winsys *ws, void *pointer,
- uint64_t size, unsigned priority);
+ VkResult (*buffer_from_ptr)(struct radeon_winsys *ws, void *pointer, uint64_t size,
+ unsigned priority, struct radeon_winsys_bo **out_bo);
- struct radeon_winsys_bo *(*buffer_from_fd)(struct radeon_winsys *ws, int fd, unsigned priority,
- uint64_t *alloc_size);
+ VkResult (*buffer_from_fd)(struct radeon_winsys *ws, int fd, unsigned priority,
+ struct radeon_winsys_bo **out_bo, uint64_t *alloc_size);
bool (*buffer_get_fd)(struct radeon_winsys *ws, struct radeon_winsys_bo *bo, int *fd);
struct radv_shader_slab *slab = calloc(1, sizeof(struct radv_shader_slab));
slab->size = MAX2(256 * 1024, shader->code_size);
- slab->bo = device->ws->buffer_create(
+ VkResult result = device->ws->buffer_create(
device->ws, slab->size, 256, RADEON_DOMAIN_VRAM,
RADEON_FLAG_NO_INTERPROCESS_SHARING |
(device->physical_device->rad_info.cpdma_prefetch_writes_memory ? 0
: RADEON_FLAG_READ_ONLY),
- RADV_BO_PRIORITY_SHADER);
- if (!slab->bo) {
+ RADV_BO_PRIORITY_SHADER, &slab->bo);
+ if (result != VK_SUCCESS) {
free(slab);
return NULL;
}
size = align64(sizeof(struct ac_thread_trace_info) * max_se, 1 << SQTT_BUFFER_ALIGN_SHIFT);
size += device->thread_trace.buffer_size * (uint64_t)max_se;
- device->thread_trace.bo = ws->buffer_create(
+ struct radeon_winsys_bo *bo = NULL;
+ VkResult result = ws->buffer_create(
ws, size, 4096, RADEON_DOMAIN_VRAM,
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_ZERO_VRAM,
- RADV_BO_PRIORITY_SCRATCH);
- if (!device->thread_trace.bo)
+ RADV_BO_PRIORITY_SCRATCH, &bo);
+ device->thread_trace.bo = bo;
+ if (result != VK_SUCCESS)
return false;
device->thread_trace.ptr = ws->buffer_map(device->thread_trace.bo);
radeon_emit(cs, PKT3_NOP_PAD);
}
- device->gfx_init =
+ VkResult result =
device->ws->buffer_create(device->ws, cs->cdw * 4, 4096, device->ws->cs_domain(device->ws),
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING |
RADEON_FLAG_READ_ONLY | RADEON_FLAG_GTT_WC,
- RADV_BO_PRIORITY_CS);
- if (!device->gfx_init)
+ RADV_BO_PRIORITY_CS, &device->gfx_init);
+ if (result != VK_SUCCESS)
goto fail;
void *map = device->ws->buffer_map(device->gfx_init);
FREE(bo);
}
-static struct radeon_winsys_bo *
+static VkResult
radv_amdgpu_winsys_bo_create(struct radeon_winsys *_ws, uint64_t size, unsigned alignment,
enum radeon_bo_domain initial_domain, enum radeon_bo_flag flags,
- unsigned priority)
+ unsigned priority, struct radeon_winsys_bo **out_bo)
{
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
struct radv_amdgpu_winsys_bo *bo;
uint64_t va = 0;
amdgpu_va_handle va_handle;
int r;
+ VkResult result = VK_SUCCESS;
+
+ /* Just be robust for callers that might use NULL-ness for determining if things should be freed.
+ */
+ *out_bo = NULL;
+
bo = CALLOC_STRUCT(radv_amdgpu_winsys_bo);
if (!bo) {
- return NULL;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
}
unsigned virt_alignment = alignment;
r = amdgpu_va_range_alloc(
ws->dev, amdgpu_gpu_va_range_general, size, virt_alignment, 0, &va, &va_handle,
(flags & RADEON_FLAG_32BIT ? AMDGPU_VA_RANGE_32_BIT : 0) | AMDGPU_VA_RANGE_HIGH);
- if (r)
+ if (r) {
+ result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
goto error_va_alloc;
+ }
bo->base.va = va;
bo->va_handle = va_handle;
if (flags & RADEON_FLAG_VIRTUAL) {
ranges = realloc(NULL, sizeof(struct radv_amdgpu_map_range));
- if (!ranges)
+ if (!ranges) {
+ result = VK_ERROR_OUT_OF_HOST_MEMORY;
goto error_ranges_alloc;
+ }
bo->ranges = ranges;
bo->range_count = 1;
radv_amdgpu_winsys_virtual_map(ws, bo, bo->ranges);
radv_amdgpu_log_bo(ws, bo, false);
- return (struct radeon_winsys_bo *)bo;
+ *out_bo = (struct radeon_winsys_bo *)bo;
+ return VK_SUCCESS;
}
request.alloc_size = size;
fprintf(stderr, "amdgpu: size : %" PRIu64 " bytes\n", size);
fprintf(stderr, "amdgpu: alignment : %u bytes\n", alignment);
fprintf(stderr, "amdgpu: domains : %u\n", initial_domain);
+ result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
goto error_bo_alloc;
}
r = radv_amdgpu_bo_va_op(ws, buf_handle, 0, size, va, flags, 0, AMDGPU_VA_OP_MAP);
- if (r)
+ if (r) {
+ result = VK_ERROR_UNKNOWN;
goto error_va_map;
+ }
bo->bo = buf_handle;
bo->base.initial_domain = initial_domain;
radv_amdgpu_global_bo_list_add(ws, bo);
radv_amdgpu_log_bo(ws, bo, false);
- return (struct radeon_winsys_bo *)bo;
+ *out_bo = (struct radeon_winsys_bo *)bo;
+ return VK_SUCCESS;
error_va_map:
amdgpu_bo_free(buf_handle);
error_va_alloc:
FREE(bo);
- return NULL;
+ return result;
}
static void *
return vm_alignment;
}
-static struct radeon_winsys_bo *
+static VkResult
radv_amdgpu_winsys_bo_from_ptr(struct radeon_winsys *_ws, void *pointer, uint64_t size,
- unsigned priority)
+ unsigned priority, struct radeon_winsys_bo **out_bo)
{
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
amdgpu_bo_handle buf_handle;
uint64_t va;
amdgpu_va_handle va_handle;
uint64_t vm_alignment;
+ VkResult result = VK_SUCCESS;
+
+ /* Just be robust for callers that might use NULL-ness for determining if things should be freed.
+ */
+ *out_bo = NULL;
bo = CALLOC_STRUCT(radv_amdgpu_winsys_bo);
if (!bo)
- return NULL;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
- if (amdgpu_create_bo_from_user_mem(ws->dev, pointer, size, &buf_handle))
+ if (amdgpu_create_bo_from_user_mem(ws->dev, pointer, size, &buf_handle)) {
+ result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
goto error;
+ }
/* Using the optimal VM alignment also fixes GPU hangs for buffers that
* are imported.
vm_alignment = radv_amdgpu_get_optimal_vm_alignment(ws, size, ws->info.gart_page_size);
if (amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general, size, vm_alignment, 0, &va,
- &va_handle, AMDGPU_VA_RANGE_HIGH))
+ &va_handle, AMDGPU_VA_RANGE_HIGH)) {
+ result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
goto error_va_alloc;
+ }
- if (amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP))
+ if (amdgpu_bo_va_op(buf_handle, 0, size, va, 0, AMDGPU_VA_OP_MAP)) {
+ result = VK_ERROR_UNKNOWN;
goto error_va_map;
+ }
/* Initialize it */
bo->base.va = va;
radv_amdgpu_global_bo_list_add(ws, bo);
radv_amdgpu_log_bo(ws, bo, false);
- return (struct radeon_winsys_bo *)bo;
+ *out_bo = (struct radeon_winsys_bo *)bo;
+ return VK_SUCCESS;
error_va_map:
amdgpu_va_range_free(va_handle);
error:
FREE(bo);
- return NULL;
+ return result;
}
-static struct radeon_winsys_bo *
+static VkResult
radv_amdgpu_winsys_bo_from_fd(struct radeon_winsys *_ws, int fd, unsigned priority,
- uint64_t *alloc_size)
+ struct radeon_winsys_bo **out_bo, uint64_t *alloc_size)
{
struct radv_amdgpu_winsys *ws = radv_amdgpu_winsys(_ws);
struct radv_amdgpu_winsys_bo *bo;
struct amdgpu_bo_info info = {0};
enum radeon_bo_domain initial = 0;
int r;
+ VkResult vk_result = VK_SUCCESS;
+
+ /* Just be robust for callers that might use NULL-ness for determining if things should be freed.
+ */
+ *out_bo = NULL;
+
bo = CALLOC_STRUCT(radv_amdgpu_winsys_bo);
if (!bo)
- return NULL;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
r = amdgpu_bo_import(ws->dev, type, fd, &result);
- if (r)
+ if (r) {
+ vk_result = VK_ERROR_INVALID_EXTERNAL_HANDLE;
goto error;
+ }
r = amdgpu_bo_query_info(result.buf_handle, &info);
- if (r)
+ if (r) {
+ vk_result = VK_ERROR_UNKNOWN;
goto error_query;
+ }
if (alloc_size) {
*alloc_size = info.alloc_size;
r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general, result.alloc_size, 1 << 20, 0,
&va, &va_handle, AMDGPU_VA_RANGE_HIGH);
- if (r)
+ if (r) {
+ vk_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
goto error_query;
+ }
r =
radv_amdgpu_bo_va_op(ws, result.buf_handle, 0, result.alloc_size, va, 0, 0, AMDGPU_VA_OP_MAP);
- if (r)
+ if (r) {
+ vk_result = VK_ERROR_UNKNOWN;
goto error_va_map;
+ }
if (info.preferred_heap & AMDGPU_GEM_DOMAIN_VRAM)
initial |= RADEON_DOMAIN_VRAM;
radv_amdgpu_global_bo_list_add(ws, bo);
radv_amdgpu_log_bo(ws, bo, false);
- return (struct radeon_winsys_bo *)bo;
+ *out_bo = (struct radeon_winsys_bo *)bo;
+ return VK_SUCCESS;
error_va_map:
amdgpu_va_range_free(va_handle);
error:
FREE(bo);
- return NULL;
+ return vk_result;
}
static bool
radv_amdgpu_init_cs(cs, ring_type);
if (cs->ws->use_ib_bos) {
- cs->ib_buffer =
+ VkResult result =
ws->buffer_create(ws, ib_size, 0, radv_amdgpu_cs_domain(ws),
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING |
RADEON_FLAG_READ_ONLY | RADEON_FLAG_GTT_WC,
- RADV_BO_PRIORITY_CS);
- if (!cs->ib_buffer) {
+ RADV_BO_PRIORITY_CS, &cs->ib_buffer);
+ if (result != VK_SUCCESS) {
free(cs);
return NULL;
}
cs->old_ib_buffers[cs->num_old_ib_buffers].bo = cs->ib_buffer;
cs->old_ib_buffers[cs->num_old_ib_buffers++].cdw = cs->base.cdw;
- cs->ib_buffer =
+ VkResult result =
cs->ws->base.buffer_create(&cs->ws->base, ib_size, 0, radv_amdgpu_cs_domain(&cs->ws->base),
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING |
RADEON_FLAG_READ_ONLY | RADEON_FLAG_GTT_WC,
- RADV_BO_PRIORITY_CS);
+ RADV_BO_PRIORITY_CS, &cs->ib_buffer);
- if (!cs->ib_buffer) {
+ if (result != VK_SUCCESS) {
cs->base.cdw = 0;
cs->status = VK_ERROR_OUT_OF_DEVICE_MEMORY;
cs->ib_buffer = cs->old_ib_buffers[--cs->num_old_ib_buffers].bo;
pad_words++;
}
- bos[j] = ws->buffer_create(
+ ws->buffer_create(
ws, 4 * size, 4096, radv_amdgpu_cs_domain(ws),
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY,
- RADV_BO_PRIORITY_CS);
+ RADV_BO_PRIORITY_CS, &bos[j]);
ptr = ws->buffer_map(bos[j]);
if (needs_preamble) {
}
assert(cnt);
- bos[0] = ws->buffer_create(
+ ws->buffer_create(
ws, 4 * size, 4096, radv_amdgpu_cs_domain(ws),
RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_READ_ONLY,
- RADV_BO_PRIORITY_CS);
+ RADV_BO_PRIORITY_CS, &bos[0]);
ptr = ws->buffer_map(bos[0]);
if (preamble_cs) {
ctx->ws = ws;
assert(AMDGPU_HW_IP_NUM * MAX_RINGS_PER_TYPE * sizeof(uint64_t) <= 4096);
- ctx->fence_bo = ws->base.buffer_create(
- &ws->base, 4096, 8, RADEON_DOMAIN_GTT,
- RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING, RADV_BO_PRIORITY_CS);
- if (!ctx->fence_bo) {
- result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ result = ws->base.buffer_create(&ws->base, 4096, 8, RADEON_DOMAIN_GTT,
+ RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING,
+ RADV_BO_PRIORITY_CS, &ctx->fence_bo);
+ if (result != VK_SUCCESS) {
goto fail_alloc;
}
#include "radv_null_bo.h"
#include "util/u_memory.h"
-static struct radeon_winsys_bo *
+static VkResult
radv_null_winsys_bo_create(struct radeon_winsys *_ws, uint64_t size, unsigned alignment,
enum radeon_bo_domain initial_domain, enum radeon_bo_flag flags,
- unsigned priority)
+ unsigned priority, struct radeon_winsys_bo **out_bo)
{
struct radv_null_winsys_bo *bo;
+ /* Courtesy for users using NULL to check if they need to destroy the BO. */
+ *out_bo = NULL;
+
bo = CALLOC_STRUCT(radv_null_winsys_bo);
if (!bo)
- return NULL;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
bo->ptr = malloc(size);
if (!bo->ptr)
goto error_ptr_alloc;
- return (struct radeon_winsys_bo *)bo;
+ *out_bo = (struct radeon_winsys_bo *)bo;
+ return VK_SUCCESS;
error_ptr_alloc:
FREE(bo);
- return NULL;
+ return VK_ERROR_OUT_OF_HOST_MEMORY;
}
static void *