From e1a11b81547049588379386d4c045d5de3937c47 Mon Sep 17 00:00:00 2001 From: Iago Toral Quiroga Date: Wed, 17 Jun 2020 12:15:42 +0200 Subject: [PATCH] v3dv: always map full BOs Both the API user and the driver may attempt to map a BO, possibly only partially and using different ranges. This is a problem because we only have a single map per BO. Fix this by making sure that when a BO is mapped, we always map its entire range. This way if a BO has been mapped before, we know that map is still valid no matter the region we need to access now. Part-of: --- src/broadcom/vulkan/v3dv_device.c | 32 ++++++++++++++------------------ src/broadcom/vulkan/v3dv_queue.c | 27 ++++++--------------------- 2 files changed, 20 insertions(+), 39 deletions(-) diff --git a/src/broadcom/vulkan/v3dv_device.c b/src/broadcom/vulkan/v3dv_device.c index f3f9edc..1aa9ae9 100644 --- a/src/broadcom/vulkan/v3dv_device.c +++ b/src/broadcom/vulkan/v3dv_device.c @@ -1382,9 +1382,7 @@ device_unmap(struct v3dv_device *device, struct v3dv_device_memory *mem) } static VkResult -device_map(struct v3dv_device *device, - struct v3dv_device_memory *mem, - uint32_t size) +device_map(struct v3dv_device *device, struct v3dv_device_memory *mem) { assert(mem && mem->bo); @@ -1396,14 +1394,16 @@ device_map(struct v3dv_device *device, * * We are not concerned with this ourselves (validation layers should * catch these errors and warn users), however, the driver may internally - * map things (for example for debug CLIF dumps) so by the time the user - * call here the buffer might already been mapped internally, so let's just - * make sure we unmap if needed. + * map things (for example for debug CLIF dumps or some CPU-side operations) + * so by the time the user calls here the buffer might already been mapped + * internally by the driver. */ - if (mem->bo->map) - device_unmap(device, mem); + if (mem->bo->map) { + assert(mem->bo->map_size == mem->bo->size); + return VK_SUCCESS; + } - bool ok = v3dv_bo_map(device, mem->bo, size); + bool ok = v3dv_bo_map(device, mem->bo, mem->bo->size); if (!ok) return VK_ERROR_MEMORY_MAP_FAILED; @@ -1621,16 +1621,12 @@ v3dv_MapMemory(VkDevice _device, assert(offset < mem->bo->size); - /* We always map from the beginning of the region, so if our offset - * is not 0 and we are not mapping the entire region, we need to - * add the offset to the map size. + /* Since the driver can map BOs internally as well and the mapped range + * required by the user or the driver might not be the same, we always map + * the entire BO and then add the requested offset to the start address + * of the mapped region. */ - if (size == VK_WHOLE_SIZE) - size = mem->bo->size; - else if (offset > 0) - size += offset; - - VkResult result = device_map(device, mem, size); + VkResult result = device_map(device, mem); if (result != VK_SUCCESS) return vk_error(device->instance, result); diff --git a/src/broadcom/vulkan/v3dv_queue.c b/src/broadcom/vulkan/v3dv_queue.c index 267ce46..1200c5f 100644 --- a/src/broadcom/vulkan/v3dv_queue.c +++ b/src/broadcom/vulkan/v3dv_queue.c @@ -196,19 +196,10 @@ handle_copy_query_results_cpu_job(struct v3dv_job *job) assert(info->dst && info->dst->mem && info->dst->mem->bo); struct v3dv_bo *bo = info->dst->mem->bo; - const uint32_t bo_size = info->dst->size; - - /* Map the entire dst buffer for the CPU copy */ - bool dst_was_mapped = bo->map != NULL; - uint32_t map_size = bo->map_size; - bool needs_map = false; - if (!dst_was_mapped) { - needs_map = true; - } else if (map_size < bo_size) { - v3dv_bo_unmap(job->device, bo); - needs_map = true; - } - if (needs_map && !v3dv_bo_map(job->device, bo, bo_size)) + + /* Map the entire dst buffer for the CPU copy if needed */ + assert(!bo->map || bo->map_size == bo->size); + if (!bo->map && !v3dv_bo_map(job->device, bo, bo->size)) return vk_error(job->device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); /* FIXME: if flags includes VK_QUERY_RESULT_WAIT_BIT this could trigger a @@ -223,14 +214,6 @@ handle_copy_query_results_cpu_job(struct v3dv_job *job) info->stride, info->flags); - if (needs_map) { - v3dv_bo_unmap(job->device, bo); - if (dst_was_mapped) { - if (!v3dv_bo_map(job->device, bo, map_size)) - return vk_error(job->device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); - } - } - return VK_SUCCESS; } @@ -403,11 +386,13 @@ handle_copy_buffer_to_image_cpu_job(struct v3dv_job *job) /* Map BOs */ struct v3dv_bo *dst_bo = info->image->mem->bo; + assert(!dst_bo->map || dst_bo->map_size == dst_bo->size); if (!dst_bo->map && !v3dv_bo_map(job->device, dst_bo, dst_bo->size)) return vk_error(job->device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); void *dst_ptr = dst_bo->map; struct v3dv_bo *src_bo = info->buffer->mem->bo; + assert(!src_bo->map || src_bo->map_size == src_bo->size); if (!src_bo->map && !v3dv_bo_map(job->device, src_bo, src_bo->size)) return vk_error(job->device->instance, VK_ERROR_OUT_OF_HOST_MEMORY); void *src_ptr = src_bo->map; -- 2.7.4