From 68d88f0237cbd743db83c8ad3e313251ab8fcff8 Mon Sep 17 00:00:00 2001 From: Iago Toral Quiroga Date: Fri, 3 Mar 2017 10:55:19 +0100 Subject: [PATCH] anv: handle failures when growing reloc lists Growing the reloc list happens through calling anv_reloc_list_add() or anv_reloc_list_append(). Make sure that we call these through helpers that check the result and set the batch error status if needed. v2: - Handling the crashes is not good enough, we need to keep track of the error, for that, keep track of the errors in the batch instead (Jason). - Make reloc list growth go through helpers so we can have a central place where we can do error tracking (Jason). v3: - Callers that need the offset returned by anv_reloc_list_add() can compute it themselves since it is extracted from the inputs to the function, so change the function to return a VkResult, make anv_batch_emit_reloc() also return a VkResult and let their callers do the error management (Topi) v4: - Let anv_batch_emit_reloc() return an uint64_t as it originally did, there is no real benefit in having it return a VkResult. - Do not add an is_aux parameter to add_surface_state_reloc(), instead do error checking for aux in add_image_view_relocs() separately. Reviewed-by: Topi Pohjolainen --- src/intel/vulkan/anv_batch_chain.c | 35 ++++++++++++++++++++++++----------- src/intel/vulkan/anv_private.h | 2 +- src/intel/vulkan/genX_blorp_exec.c | 7 +++++-- src/intel/vulkan/genX_cmd_buffer.c | 21 +++++++++++++-------- 4 files changed, 43 insertions(+), 22 deletions(-) diff --git a/src/intel/vulkan/anv_batch_chain.c b/src/intel/vulkan/anv_batch_chain.c index c304d6d..abd0c17 100644 --- a/src/intel/vulkan/anv_batch_chain.c +++ b/src/intel/vulkan/anv_batch_chain.c @@ -140,7 +140,7 @@ anv_reloc_list_grow(struct anv_reloc_list *list, return VK_SUCCESS; } -uint64_t +VkResult anv_reloc_list_add(struct anv_reloc_list *list, const VkAllocationCallbacks *alloc, uint32_t offset, struct anv_bo *target_bo, uint32_t delta) @@ -151,8 +151,9 @@ anv_reloc_list_add(struct anv_reloc_list *list, const uint32_t domain = target_bo->is_winsys_bo ? I915_GEM_DOMAIN_RENDER : 0; - anv_reloc_list_grow(list, alloc, 1); - /* TODO: Handle failure */ + VkResult result = anv_reloc_list_grow(list, alloc, 1); + if (result != VK_SUCCESS) + return result; /* XXX: Can we use I915_EXEC_HANDLE_LUT? */ index = list->num_relocs++; @@ -166,16 +167,17 @@ anv_reloc_list_add(struct anv_reloc_list *list, entry->write_domain = domain; VG(VALGRIND_CHECK_MEM_IS_DEFINED(entry, sizeof(*entry))); - return target_bo->offset + delta; + return VK_SUCCESS; } -static void +static VkResult anv_reloc_list_append(struct anv_reloc_list *list, const VkAllocationCallbacks *alloc, struct anv_reloc_list *other, uint32_t offset) { - anv_reloc_list_grow(list, alloc, other->num_relocs); - /* TODO: Handle failure */ + VkResult result = anv_reloc_list_grow(list, alloc, other->num_relocs); + if (result != VK_SUCCESS) + return result; memcpy(&list->relocs[list->num_relocs], &other->relocs[0], other->num_relocs * sizeof(other->relocs[0])); @@ -186,6 +188,7 @@ anv_reloc_list_append(struct anv_reloc_list *list, list->relocs[i + list->num_relocs].offset += offset; list->num_relocs += other->num_relocs; + return VK_SUCCESS; } /*-----------------------------------------------------------------------* @@ -215,8 +218,14 @@ uint64_t anv_batch_emit_reloc(struct anv_batch *batch, void *location, struct anv_bo *bo, uint32_t delta) { - return anv_reloc_list_add(batch->relocs, batch->alloc, - location - batch->start, bo, delta); + VkResult result = anv_reloc_list_add(batch->relocs, batch->alloc, + location - batch->start, bo, delta); + if (result != VK_SUCCESS) { + anv_batch_set_error(batch, result); + return 0; + } + + return bo->offset + delta; } void @@ -241,8 +250,12 @@ anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other) memcpy(batch->next, other->start, size); offset = batch->next - batch->start; - anv_reloc_list_append(batch->relocs, batch->alloc, - other->relocs, offset); + VkResult result = anv_reloc_list_append(batch->relocs, batch->alloc, + other->relocs, offset); + if (result != VK_SUCCESS) { + anv_batch_set_error(batch, result); + return; + } batch->next += size; } diff --git a/src/intel/vulkan/anv_private.h b/src/intel/vulkan/anv_private.h index 87538de..a2da041 100644 --- a/src/intel/vulkan/anv_private.h +++ b/src/intel/vulkan/anv_private.h @@ -673,7 +673,7 @@ VkResult anv_reloc_list_init(struct anv_reloc_list *list, void anv_reloc_list_finish(struct anv_reloc_list *list, const VkAllocationCallbacks *alloc); -uint64_t anv_reloc_list_add(struct anv_reloc_list *list, +VkResult anv_reloc_list_add(struct anv_reloc_list *list, const VkAllocationCallbacks *alloc, uint32_t offset, struct anv_bo *target_bo, uint32_t delta); diff --git a/src/intel/vulkan/genX_blorp_exec.c b/src/intel/vulkan/genX_blorp_exec.c index 7084735..ba834d4 100644 --- a/src/intel/vulkan/genX_blorp_exec.c +++ b/src/intel/vulkan/genX_blorp_exec.c @@ -57,8 +57,11 @@ blorp_surface_reloc(struct blorp_batch *batch, uint32_t ss_offset, struct blorp_address address, uint32_t delta) { struct anv_cmd_buffer *cmd_buffer = batch->driver_batch; - anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc, - ss_offset, address.buffer, address.offset + delta); + VkResult result = + anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc, + ss_offset, address.buffer, address.offset + delta); + if (result != VK_SUCCESS) + anv_batch_set_error(&cmd_buffer->batch, result); } static void * diff --git a/src/intel/vulkan/genX_cmd_buffer.c b/src/intel/vulkan/genX_cmd_buffer.c index e3faa17..0e24000 100644 --- a/src/intel/vulkan/genX_cmd_buffer.c +++ b/src/intel/vulkan/genX_cmd_buffer.c @@ -159,8 +159,11 @@ add_surface_state_reloc(struct anv_cmd_buffer *cmd_buffer, { const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev; - anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc, - state.offset + isl_dev->ss.addr_offset, bo, offset); + VkResult result = + anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc, + state.offset + isl_dev->ss.addr_offset, bo, offset); + if (result != VK_SUCCESS) + anv_batch_set_error(&cmd_buffer->batch, result); } static void @@ -171,9 +174,7 @@ add_image_view_relocs(struct anv_cmd_buffer *cmd_buffer, { const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev; - anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc, - state.offset + isl_dev->ss.addr_offset, - iview->bo, iview->offset); + add_surface_state_reloc(cmd_buffer, state, iview->bo, iview->offset); if (aux_usage != ISL_AUX_USAGE_NONE) { uint32_t aux_offset = iview->offset + iview->image->aux_surface.offset; @@ -186,9 +187,13 @@ add_image_view_relocs(struct anv_cmd_buffer *cmd_buffer, uint32_t *aux_addr_dw = state.map + isl_dev->ss.aux_addr_offset; aux_offset += *aux_addr_dw & 0xfff; - anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc, - state.offset + isl_dev->ss.aux_addr_offset, - iview->bo, aux_offset); + VkResult result = + anv_reloc_list_add(&cmd_buffer->surface_relocs, + &cmd_buffer->pool->alloc, + state.offset + isl_dev->ss.aux_addr_offset, + iview->bo, aux_offset); + if (result != VK_SUCCESS) + anv_batch_set_error(&cmd_buffer->batch, result); } } -- 2.7.4