}
static VkResult
-device_map(struct v3dv_device *device,
- struct v3dv_device_memory *mem,
- uint32_t size)
+device_map(struct v3dv_device *device, struct v3dv_device_memory *mem)
{
assert(mem && mem->bo);
*
* We are not concerned with this ourselves (validation layers should
* catch these errors and warn users), however, the driver may internally
- * map things (for example for debug CLIF dumps) so by the time the user
- * call here the buffer might already been mapped internally, so let's just
- * make sure we unmap if needed.
+ * map things (for example for debug CLIF dumps or some CPU-side operations)
+ * so by the time the user calls here the buffer might already been mapped
+ * internally by the driver.
*/
- if (mem->bo->map)
- device_unmap(device, mem);
+ if (mem->bo->map) {
+ assert(mem->bo->map_size == mem->bo->size);
+ return VK_SUCCESS;
+ }
- bool ok = v3dv_bo_map(device, mem->bo, size);
+ bool ok = v3dv_bo_map(device, mem->bo, mem->bo->size);
if (!ok)
return VK_ERROR_MEMORY_MAP_FAILED;
assert(offset < mem->bo->size);
- /* We always map from the beginning of the region, so if our offset
- * is not 0 and we are not mapping the entire region, we need to
- * add the offset to the map size.
+ /* Since the driver can map BOs internally as well and the mapped range
+ * required by the user or the driver might not be the same, we always map
+ * the entire BO and then add the requested offset to the start address
+ * of the mapped region.
*/
- if (size == VK_WHOLE_SIZE)
- size = mem->bo->size;
- else if (offset > 0)
- size += offset;
-
- VkResult result = device_map(device, mem, size);
+ VkResult result = device_map(device, mem);
if (result != VK_SUCCESS)
return vk_error(device->instance, result);
assert(info->dst && info->dst->mem && info->dst->mem->bo);
struct v3dv_bo *bo = info->dst->mem->bo;
- const uint32_t bo_size = info->dst->size;
-
- /* Map the entire dst buffer for the CPU copy */
- bool dst_was_mapped = bo->map != NULL;
- uint32_t map_size = bo->map_size;
- bool needs_map = false;
- if (!dst_was_mapped) {
- needs_map = true;
- } else if (map_size < bo_size) {
- v3dv_bo_unmap(job->device, bo);
- needs_map = true;
- }
- if (needs_map && !v3dv_bo_map(job->device, bo, bo_size))
+
+ /* Map the entire dst buffer for the CPU copy if needed */
+ assert(!bo->map || bo->map_size == bo->size);
+ if (!bo->map && !v3dv_bo_map(job->device, bo, bo->size))
return vk_error(job->device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
/* FIXME: if flags includes VK_QUERY_RESULT_WAIT_BIT this could trigger a
info->stride,
info->flags);
- if (needs_map) {
- v3dv_bo_unmap(job->device, bo);
- if (dst_was_mapped) {
- if (!v3dv_bo_map(job->device, bo, map_size))
- return vk_error(job->device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
- }
- }
-
return VK_SUCCESS;
}
/* Map BOs */
struct v3dv_bo *dst_bo = info->image->mem->bo;
+ assert(!dst_bo->map || dst_bo->map_size == dst_bo->size);
if (!dst_bo->map && !v3dv_bo_map(job->device, dst_bo, dst_bo->size))
return vk_error(job->device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
void *dst_ptr = dst_bo->map;
struct v3dv_bo *src_bo = info->buffer->mem->bo;
+ assert(!src_bo->map || src_bo->map_size == src_bo->size);
if (!src_bo->map && !v3dv_bo_map(job->device, src_bo, src_bo->size))
return vk_error(job->device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
void *src_ptr = src_bo->map;