zink: cache transfer maps
authorMike Blumenkrantz <michael.blumenkrantz@gmail.com>
Tue, 3 Nov 2020 19:28:17 +0000 (14:28 -0500)
committerMarge Bot <eric+marge@anholt.net>
Tue, 23 Mar 2021 14:23:17 +0000 (14:23 +0000)
vk spec disallows mapping memory regions more than once, but we always
map the full memory range, so we can just refcount and store the pointer
onto the resource object for reuse to avoid issues here

Reviewed-by: Dave Airlie <airlied@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/9725>

src/gallium/drivers/zink/zink_resource.c
src/gallium/drivers/zink/zink_resource.h

index 09b578a..caad9a3 100644 (file)
@@ -137,6 +137,7 @@ cache_or_free_mem(struct zink_screen *screen, struct zink_resource_object *obj)
 void
 zink_destroy_resource_object(struct zink_screen *screen, struct zink_resource_object *obj)
 {
+   assert(!obj->map_count);
    if (obj->is_buffer)
       vkDestroyBuffer(screen->dev, obj->buffer, NULL);
    else
@@ -659,7 +660,7 @@ buffer_transfer_map(struct zink_context *ctx, struct zink_resource *res, unsigne
                     const struct pipe_box *box, struct zink_transfer *trans)
 {
    struct zink_screen *screen = zink_screen(ctx->base.screen);
-   void *ptr;
+   void *ptr = NULL;
 
    if (!(usage & PIPE_MAP_UNSYNCHRONIZED)) {
       if (usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE) {
@@ -688,10 +689,14 @@ buffer_transfer_map(struct zink_context *ctx, struct zink_resource *res, unsigne
       }
    }
 
+   if (!trans->staging_res && res->obj->map)
+      ptr = res->obj->map;
 
-   VkResult result = vkMapMemory(screen->dev, res->obj->mem, res->obj->offset, res->obj->size, 0, &ptr);
-   if (result != VK_SUCCESS)
-      return NULL;
+   if (!ptr) {
+      VkResult result = vkMapMemory(screen->dev, res->obj->mem, res->obj->offset, res->obj->size, 0, &ptr);
+      if (result != VK_SUCCESS)
+         return NULL;
+   }
 
 #if defined(__APPLE__)
       if (!(usage & PIPE_MAP_DISCARD_WHOLE_RESOURCE)) {
@@ -712,7 +717,6 @@ buffer_transfer_map(struct zink_context *ctx, struct zink_resource *res, unsigne
       }
 #endif
 
-   ptr = ((uint8_t *)ptr) + box->x;
    if (usage & PIPE_MAP_WRITE)
       util_range_add(&res->base, &res->valid_buffer_range, box->x, box->x + box->width);
    return ptr;
@@ -742,9 +746,10 @@ zink_transfer_map(struct pipe_context *pctx,
    trans->base.usage = usage;
    trans->base.box = *box;
 
-   void *ptr;
+   void *ptr, *base;
    if (pres->target == PIPE_BUFFER) {
-      ptr = buffer_transfer_map(ctx, res, usage, box, trans);
+      base = buffer_transfer_map(ctx, res, usage, box, trans);
+      ptr = ((uint8_t *)base) + box->x;
    } else {
       if (usage & PIPE_MAP_WRITE && !(usage & PIPE_MAP_READ))
          /* this is like a blit, so we can potentially dump some clears or maybe we have to  */
@@ -793,12 +798,14 @@ zink_transfer_map(struct pipe_context *pctx,
 
          VkResult result = vkMapMemory(screen->dev, staging_res->obj->mem,
                                        staging_res->obj->offset,
-                                       staging_res->obj->size, 0, &ptr);
+                                       staging_res->obj->size, 0, &base);
          if (result != VK_SUCCESS)
             return NULL;
+         ptr = base;
 
       } else {
          assert(!res->optimal_tiling);
+
          /* special case compute reads since they aren't handled by zink_fence_wait() */
          if (zink_resource_has_usage(res, ZINK_RESOURCE_ACCESS_READ, ZINK_QUEUE_COMPUTE))
             resource_sync_reads_from_compute(ctx, res);
@@ -808,9 +815,13 @@ zink_transfer_map(struct pipe_context *pctx,
             else
                zink_fence_wait(pctx);
          }
-         VkResult result = vkMapMemory(screen->dev, res->obj->mem, res->obj->offset, res->obj->size, 0, &ptr);
-         if (result != VK_SUCCESS)
-            return NULL;
+         if (res->obj->map)
+            base = res->obj->map;
+         else {
+            VkResult result = vkMapMemory(screen->dev, res->obj->mem, res->obj->offset, res->obj->size, 0, &base);
+            if (result != VK_SUCCESS)
+               return NULL;
+         }
          VkImageSubresource isr = {
             res->aspect,
             level,
@@ -825,12 +836,20 @@ zink_transfer_map(struct pipe_context *pctx,
                            box->z * srl.depthPitch +
                            (box->y / desc->block.height) * srl.rowPitch +
                            (box->x / desc->block.width) * (desc->block.bits / 8);
-         ptr = ((uint8_t *)ptr) + offset;
+         ptr = ((uint8_t *)base) + offset;
       }
    }
    if ((usage & PIPE_MAP_PERSISTENT) && !(usage & PIPE_MAP_COHERENT))
       res->obj->persistent_maps++;
 
+   if (trans->staging_res) {
+      zink_resource(trans->staging_res)->obj->map = base;
+      p_atomic_inc(&zink_resource(trans->staging_res)->obj->map_count);
+   } else {
+      res->obj->map = base;
+      p_atomic_inc(&res->obj->map_count);
+   }
+
    *transfer = &trans->base;
    return ptr;
 }
@@ -869,9 +888,14 @@ zink_transfer_unmap(struct pipe_context *pctx,
    struct zink_transfer *trans = (struct zink_transfer *)ptrans;
    if (trans->staging_res) {
       struct zink_resource *staging_res = zink_resource(trans->staging_res);
-      vkUnmapMemory(screen->dev, staging_res->obj->mem);
-   } else
+      if (p_atomic_dec_zero(&staging_res->obj->map_count)) {
+         vkUnmapMemory(screen->dev, staging_res->obj->mem);
+         staging_res->obj->map = NULL;
+      }
+   } else if (p_atomic_dec_zero(&res->obj->map_count)) {
       vkUnmapMemory(screen->dev, res->obj->mem);
+      res->obj->map = NULL;
+   }
    if ((trans->base.usage & PIPE_MAP_PERSISTENT) && !(trans->base.usage & PIPE_MAP_COHERENT))
       res->obj->persistent_maps--;
    if (!(trans->base.usage & (PIPE_MAP_FLUSH_EXPLICIT | PIPE_MAP_COHERENT))) {
index 4b6ac60..4fd6f9b 100644 (file)
@@ -67,6 +67,8 @@ struct zink_resource_object {
 
    struct zink_batch_usage reads;
    struct zink_batch_usage writes;
+   unsigned map_count;
+   void *map;
    bool is_buffer;
    bool host_visible;
 };