zink: clamp to 500 max batch states on nvidia
authorMike Blumenkrantz <michael.blumenkrantz@gmail.com>
Wed, 17 Nov 2021 19:15:18 +0000 (14:15 -0500)
committerMarge Bot <emma+marge@anholt.net>
Thu, 18 Nov 2021 00:00:16 +0000 (00:00 +0000)
I've been advised that leaving this unclamped will use up all the fds
allotted to a process

Reviewed-by: Dave Airlie <airlied@redhat.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/13844>

src/gallium/drivers/zink/zink_batch.c
src/gallium/drivers/zink/zink_screen.c
src/gallium/drivers/zink/zink_screen.h

index ae23a1f..f343364 100644 (file)
@@ -339,8 +339,8 @@ post_submit(void *data, void *gdata, int thread_index)
       if (bs->ctx->reset.reset)
          bs->ctx->reset.reset(bs->ctx->reset.data, PIPE_GUILTY_CONTEXT_RESET);
       screen->device_lost = true;
-   } else if (bs->ctx->batch_states_count > 5000) {
-      zink_screen_batch_id_wait(screen, bs->fence.batch_id - 2500, PIPE_TIMEOUT_INFINITE);
+   } else if (bs->ctx->batch_states_count > screen->max_fences) {
+      zink_screen_batch_id_wait(screen, bs->fence.batch_id - (screen->max_fences / 2), PIPE_TIMEOUT_INFINITE);
    }
 }
 
index 55e8ff8..fa221ce 100644 (file)
@@ -2028,6 +2028,15 @@ zink_internal_create_screen(const struct pipe_screen_config *config)
    if (!os_get_total_physical_memory(&screen->total_mem))
       goto fail;
 
+   switch (screen->info.driver_props.driverID) {
+   case VK_DRIVER_ID_NVIDIA_PROPRIETARY:
+      screen->max_fences = 500;
+      break;
+   default:
+      screen->max_fences = 5000;
+      break;
+   }
+
    if (debug_get_bool_option("ZINK_NO_TIMELINES", false))
       screen->info.have_KHR_timeline_semaphore = false;
    if (screen->info.have_KHR_timeline_semaphore)
index 7df140b..cb64e67 100644 (file)
@@ -134,6 +134,7 @@ struct zink_screen {
    uint32_t gfx_queue;
    uint32_t max_queues;
    uint32_t timestamp_valid_bits;
+   unsigned max_fences;
    VkDevice dev;
    VkQueue queue; //gfx+compute
    VkQueue thread_queue; //gfx+compute